diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000000..e69de29bb2d diff --git a/CNAME b/CNAME new file mode 100644 index 00000000000..f5515ee7595 --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +kframework.org diff --git a/assets/css/Firefly-Download-Icon-Inverted.32636383.png b/assets/css/Firefly-Download-Icon-Inverted.32636383.png new file mode 100644 index 00000000000..c87605a3d70 Binary files /dev/null and b/assets/css/Firefly-Download-Icon-Inverted.32636383.png differ diff --git a/assets/css/Firefly-Download-Icon.8094a4f0.png b/assets/css/Firefly-Download-Icon.8094a4f0.png new file mode 100644 index 00000000000..e0fdca7b4c4 Binary files /dev/null and b/assets/css/Firefly-Download-Icon.8094a4f0.png differ diff --git a/assets/css/fontawesome-webfont.0caf0c90.ttf b/assets/css/fontawesome-webfont.0caf0c90.ttf new file mode 100644 index 00000000000..35acda2fa11 Binary files /dev/null and b/assets/css/fontawesome-webfont.0caf0c90.ttf differ diff --git a/assets/css/fontawesome-webfont.3981e506.eot b/assets/css/fontawesome-webfont.3981e506.eot new file mode 100644 index 00000000000..e9f60ca953f Binary files /dev/null and b/assets/css/fontawesome-webfont.3981e506.eot differ diff --git a/assets/css/fontawesome-webfont.58488e7e.woff2 b/assets/css/fontawesome-webfont.58488e7e.woff2 new file mode 100644 index 00000000000..4d13fc60404 Binary files /dev/null and b/assets/css/fontawesome-webfont.58488e7e.woff2 differ diff --git a/assets/css/fontawesome-webfont.a9323ae9.svg b/assets/css/fontawesome-webfont.a9323ae9.svg new file mode 100644 index 00000000000..756bf0896c7 --- /dev/null +++ b/assets/css/fontawesome-webfont.a9323ae9.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/assets/css/fontawesome-webfont.ed962b83.woff b/assets/css/fontawesome-webfont.ed962b83.woff new file mode 100644 index 00000000000..400014a4b06 Binary files /dev/null and b/assets/css/fontawesome-webfont.ed962b83.woff differ diff --git a/assets/css/iconfont.960c72b2.eot b/assets/css/iconfont.960c72b2.eot new file mode 100644 index 00000000000..63c34745265 Binary files /dev/null and b/assets/css/iconfont.960c72b2.eot differ diff --git a/assets/css/iconfont.a07d77e0.woff2 b/assets/css/iconfont.a07d77e0.woff2 new file mode 100644 index 00000000000..02fbbae4194 Binary files /dev/null and b/assets/css/iconfont.a07d77e0.woff2 differ diff --git a/assets/css/iconfont.b2a06094.woff b/assets/css/iconfont.b2a06094.woff new file mode 100644 index 00000000000..5c0b2e3a596 Binary files /dev/null and b/assets/css/iconfont.b2a06094.woff differ diff --git a/assets/css/index.css b/assets/css/index.css new file mode 100644 index 00000000000..5fdaff40711 --- /dev/null +++ b/assets/css/index.css @@ -0,0 +1 @@ +:root{--blue:#007bff;--indigo:#6610f2;--purple:#6f42c1;--pink:#e83e8c;--red:#dc3545;--orange:#fd7e14;--yellow:#ffc107;--green:#28a745;--teal:#20c997;--cyan:#17a2b8;--white:#fff;--gray:#6c757d;--gray-dark:#343a40;--primary:#007bff;--secondary:#6c757d;--success:#28a745;--info:#17a2b8;--warning:#ffc107;--danger:#dc3545;--light:#f8f9fa;--dark:#343a40;--breakpoint-xs:0;--breakpoint-sm:576px;--breakpoint-md:768px;--breakpoint-lg:992px;--breakpoint-xl:1200px;--font-family-sans-serif:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans","Liberation Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--font-family-monospace:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace}*,:after,:before{box-sizing:border-box}html{-webkit-text-size-adjust:100%;-webkit-tap-highlight-color:transparent;font-family:sans-serif;line-height:1.15}article,aside,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}body{color:#212529;text-align:left;background-color:#fff;margin:0;font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,Liberation Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji;font-size:1rem;font-weight:400;line-height:1.5}[tabindex="-1"]:focus:not(:focus-visible){outline:0!important}hr{box-sizing:content-box;height:0;overflow:visible}h1,h2,h3,h4,h5,h6{margin-top:0;margin-bottom:.5rem}p{margin-top:0;margin-bottom:1rem}abbr[data-original-title],abbr[title]{cursor:help;-webkit-text-decoration-skip-ink:none;text-decoration-skip-ink:none;text-decoration:underline;border-bottom:0;-webkit-text-decoration:underline dotted;text-decoration:underline dotted}address{font-style:normal;line-height:inherit;margin-bottom:1rem}dl,ol,ul{margin-top:0;margin-bottom:1rem}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}dt{font-weight:700}dd{margin-bottom:.5rem;margin-left:0}blockquote{margin:0 0 1rem}b,strong{font-weight:bolder}small{font-size:80%}sub,sup{vertical-align:baseline;font-size:75%;line-height:0;position:relative}sub{bottom:-.25em}sup{top:-.5em}a{color:#007bff;background-color:#0000;text-decoration:none}a:hover{color:#0056b3;text-decoration:underline}a:not([href]):not([class]){color:inherit;text-decoration:none}a:not([href]):not([class]):hover{color:inherit;text-decoration:none}code,kbd,pre,samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;font-size:1em}pre{-ms-overflow-style:scrollbar;margin-top:0;margin-bottom:1rem;overflow:auto}figure{margin:0 0 1rem}img{vertical-align:middle;border-style:none}svg{vertical-align:middle;overflow:hidden}table{border-collapse:collapse}caption{color:#6c757d;text-align:left;caption-side:bottom;padding-top:.75rem;padding-bottom:.75rem}th{text-align:inherit;text-align:-webkit-match-parent}label{margin-bottom:.5rem;display:inline-block}button{border-radius:0}button:focus:not(:focus-visible){outline:0}button,input,optgroup,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit;margin:0}button,input{overflow:visible}button,select{text-transform:none}[role=button]{cursor:pointer}select{word-wrap:normal}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[type=button]:not(:disabled),[type=reset]:not(:disabled),[type=submit]:not(:disabled),button:not(:disabled){cursor:pointer}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{border-style:none;padding:0}input[type=checkbox],input[type=radio]{box-sizing:border-box;padding:0}textarea{resize:vertical;overflow:auto}fieldset{min-width:0;border:0;margin:0;padding:0}legend{width:100%;max-width:100%;font-size:1.5rem;line-height:inherit;color:inherit;white-space:normal;margin-bottom:.5rem;padding:0;display:block}progress{vertical-align:baseline}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{outline-offset:-2px;-webkit-appearance:none}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{font:inherit;-webkit-appearance:button}output{display:inline-block}summary{cursor:pointer;display:list-item}template{display:none}[hidden]{display:none!important}.h1,.h2,.h3,.h4,.h5,.h6,h1,h2,h3,h4,h5,h6{margin-bottom:.5rem;font-weight:500;line-height:1.2}.h1,h1{font-size:2.5rem}.h2,h2{font-size:2rem}.h3,h3{font-size:1.75rem}.h4,h4{font-size:1.5rem}.h5,h5{font-size:1.25rem}.h6,h6{font-size:1rem}.lead{font-size:1.25rem;font-weight:300}.display-1{font-size:6rem;font-weight:300;line-height:1.2}.display-2{font-size:5.5rem;font-weight:300;line-height:1.2}.display-3{font-size:4.5rem;font-weight:300;line-height:1.2}.display-4{font-size:3.5rem;font-weight:300;line-height:1.2}hr{border:0;border-top:1px solid #0000001a;margin-top:1rem;margin-bottom:1rem}.small,small{font-size:.875em;font-weight:400}.mark,mark{background-color:#fcf8e3;padding:.2em}.list-unstyled,.list-inline{padding-left:0;list-style:none}.list-inline-item{display:inline-block}.list-inline-item:not(:last-child){margin-right:.5rem}.initialism{text-transform:uppercase;font-size:90%}.blockquote{margin-bottom:1rem;font-size:1.25rem}.blockquote-footer{color:#6c757d;font-size:.875em;display:block}.blockquote-footer:before{content:"— "}.img-fluid{max-width:100%;height:auto}.img-thumbnail{max-width:100%;height:auto;background-color:#fff;border:1px solid #dee2e6;border-radius:.25rem;padding:.25rem}.figure{display:inline-block}.figure-img{margin-bottom:.5rem;line-height:1}.figure-caption{color:#6c757d;font-size:90%}code{color:#e83e8c;word-wrap:break-word;font-size:87.5%}a>code{color:inherit}kbd{color:#fff;background-color:#212529;border-radius:.2rem;padding:.2rem .4rem;font-size:87.5%}kbd kbd{padding:0;font-size:100%;font-weight:700}pre{color:#212529;font-size:87.5%;display:block}pre code{font-size:inherit;color:inherit;word-break:normal}.pre-scrollable{max-height:340px;overflow-y:scroll}.container,.container-fluid,.container-lg,.container-md,.container-sm,.container-xl{width:100%;margin-left:auto;margin-right:auto;padding-left:15px;padding-right:15px}@media (min-width:576px){.container,.container-sm{max-width:540px}}@media (min-width:768px){.container,.container-md,.container-sm{max-width:720px}}@media (min-width:992px){.container,.container-lg,.container-md,.container-sm{max-width:960px}}@media (min-width:1200px){.container,.container-lg,.container-md,.container-sm,.container-xl{max-width:1140px}}.row{-ms-flex-wrap:wrap;flex-wrap:wrap;margin-left:-15px;margin-right:-15px;display:-ms-flexbox;display:flex}.no-gutters{margin-left:0;margin-right:0}.no-gutters>.col,.no-gutters>[class*=col-]{padding-left:0;padding-right:0}.col,.col-1,.col-10,.col-11,.col-12,.col-2,.col-3,.col-4,.col-5,.col-6,.col-7,.col-8,.col-9,.col-auto,.col-lg,.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-auto,.col-md,.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-auto,.col-sm,.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-auto,.col-xl,.col-xl-1,.col-xl-10,.col-xl-11,.col-xl-12,.col-xl-2,.col-xl-3,.col-xl-4,.col-xl-5,.col-xl-6,.col-xl-7,.col-xl-8,.col-xl-9,.col-xl-auto{width:100%;padding-left:15px;padding-right:15px;position:relative}.col{max-width:100%;-ms-flex-positive:1;-ms-flex-preferred-size:0;flex-grow:1;flex-basis:0}.row-cols-1>*{max-width:100%;-ms-flex:0 0 100%;flex:0 0 100%}.row-cols-2>*{max-width:50%;-ms-flex:0 0 50%;flex:0 0 50%}.row-cols-3>*{max-width:33.3333%;-ms-flex:0 0 33.3333%;flex:0 0 33.3333%}.row-cols-4>*{max-width:25%;-ms-flex:0 0 25%;flex:0 0 25%}.row-cols-5>*{max-width:20%;-ms-flex:0 0 20%;flex:0 0 20%}.row-cols-6>*{max-width:16.6667%;-ms-flex:0 0 16.6667%;flex:0 0 16.6667%}.col-auto{width:auto;max-width:100%;-ms-flex:none;flex:none}.col-1{max-width:8.33333%;-ms-flex:0 0 8.33333%;flex:0 0 8.33333%}.col-2{max-width:16.6667%;-ms-flex:0 0 16.6667%;flex:0 0 16.6667%}.col-3{max-width:25%;-ms-flex:0 0 25%;flex:0 0 25%}.col-4{max-width:33.3333%;-ms-flex:0 0 33.3333%;flex:0 0 33.3333%}.col-5{max-width:41.6667%;-ms-flex:0 0 41.6667%;flex:0 0 41.6667%}.col-6{max-width:50%;-ms-flex:0 0 50%;flex:0 0 50%}.col-7{max-width:58.3333%;-ms-flex:0 0 58.3333%;flex:0 0 58.3333%}.col-8{max-width:66.6667%;-ms-flex:0 0 66.6667%;flex:0 0 66.6667%}.col-9{max-width:75%;-ms-flex:0 0 75%;flex:0 0 75%}.col-10{max-width:83.3333%;-ms-flex:0 0 83.3333%;flex:0 0 83.3333%}.col-11{max-width:91.6667%;-ms-flex:0 0 91.6667%;flex:0 0 91.6667%}.col-12{max-width:100%;-ms-flex:0 0 100%;flex:0 0 100%}.order-first{-ms-flex-order:-1;order:-1}.order-last{-ms-flex-order:13;order:13}.order-0{-ms-flex-order:0;order:0}.order-1{-ms-flex-order:1;order:1}.order-2{-ms-flex-order:2;order:2}.order-3{-ms-flex-order:3;order:3}.order-4{-ms-flex-order:4;order:4}.order-5{-ms-flex-order:5;order:5}.order-6{-ms-flex-order:6;order:6}.order-7{-ms-flex-order:7;order:7}.order-8{-ms-flex-order:8;order:8}.order-9{-ms-flex-order:9;order:9}.order-10{-ms-flex-order:10;order:10}.order-11{-ms-flex-order:11;order:11}.order-12{-ms-flex-order:12;order:12}.offset-1{margin-left:8.33333%}.offset-2{margin-left:16.6667%}.offset-3{margin-left:25%}.offset-4{margin-left:33.3333%}.offset-5{margin-left:41.6667%}.offset-6{margin-left:50%}.offset-7{margin-left:58.3333%}.offset-8{margin-left:66.6667%}.offset-9{margin-left:75%}.offset-10{margin-left:83.3333%}.offset-11{margin-left:91.6667%}@media (min-width:576px){.col-sm{max-width:100%;-ms-flex-positive:1;-ms-flex-preferred-size:0;flex-grow:1;flex-basis:0}.row-cols-sm-1>*{max-width:100%;-ms-flex:0 0 100%;flex:0 0 100%}.row-cols-sm-2>*{max-width:50%;-ms-flex:0 0 50%;flex:0 0 50%}.row-cols-sm-3>*{max-width:33.3333%;-ms-flex:0 0 33.3333%;flex:0 0 33.3333%}.row-cols-sm-4>*{max-width:25%;-ms-flex:0 0 25%;flex:0 0 25%}.row-cols-sm-5>*{max-width:20%;-ms-flex:0 0 20%;flex:0 0 20%}.row-cols-sm-6>*{max-width:16.6667%;-ms-flex:0 0 16.6667%;flex:0 0 16.6667%}.col-sm-auto{width:auto;max-width:100%;-ms-flex:none;flex:none}.col-sm-1{max-width:8.33333%;-ms-flex:0 0 8.33333%;flex:0 0 8.33333%}.col-sm-2{max-width:16.6667%;-ms-flex:0 0 16.6667%;flex:0 0 16.6667%}.col-sm-3{max-width:25%;-ms-flex:0 0 25%;flex:0 0 25%}.col-sm-4{max-width:33.3333%;-ms-flex:0 0 33.3333%;flex:0 0 33.3333%}.col-sm-5{max-width:41.6667%;-ms-flex:0 0 41.6667%;flex:0 0 41.6667%}.col-sm-6{max-width:50%;-ms-flex:0 0 50%;flex:0 0 50%}.col-sm-7{max-width:58.3333%;-ms-flex:0 0 58.3333%;flex:0 0 58.3333%}.col-sm-8{max-width:66.6667%;-ms-flex:0 0 66.6667%;flex:0 0 66.6667%}.col-sm-9{max-width:75%;-ms-flex:0 0 75%;flex:0 0 75%}.col-sm-10{max-width:83.3333%;-ms-flex:0 0 83.3333%;flex:0 0 83.3333%}.col-sm-11{max-width:91.6667%;-ms-flex:0 0 91.6667%;flex:0 0 91.6667%}.col-sm-12{max-width:100%;-ms-flex:0 0 100%;flex:0 0 100%}.order-sm-first{-ms-flex-order:-1;order:-1}.order-sm-last{-ms-flex-order:13;order:13}.order-sm-0{-ms-flex-order:0;order:0}.order-sm-1{-ms-flex-order:1;order:1}.order-sm-2{-ms-flex-order:2;order:2}.order-sm-3{-ms-flex-order:3;order:3}.order-sm-4{-ms-flex-order:4;order:4}.order-sm-5{-ms-flex-order:5;order:5}.order-sm-6{-ms-flex-order:6;order:6}.order-sm-7{-ms-flex-order:7;order:7}.order-sm-8{-ms-flex-order:8;order:8}.order-sm-9{-ms-flex-order:9;order:9}.order-sm-10{-ms-flex-order:10;order:10}.order-sm-11{-ms-flex-order:11;order:11}.order-sm-12{-ms-flex-order:12;order:12}.offset-sm-0{margin-left:0}.offset-sm-1{margin-left:8.33333%}.offset-sm-2{margin-left:16.6667%}.offset-sm-3{margin-left:25%}.offset-sm-4{margin-left:33.3333%}.offset-sm-5{margin-left:41.6667%}.offset-sm-6{margin-left:50%}.offset-sm-7{margin-left:58.3333%}.offset-sm-8{margin-left:66.6667%}.offset-sm-9{margin-left:75%}.offset-sm-10{margin-left:83.3333%}.offset-sm-11{margin-left:91.6667%}}@media (min-width:768px){.col-md{max-width:100%;-ms-flex-positive:1;-ms-flex-preferred-size:0;flex-grow:1;flex-basis:0}.row-cols-md-1>*{max-width:100%;-ms-flex:0 0 100%;flex:0 0 100%}.row-cols-md-2>*{max-width:50%;-ms-flex:0 0 50%;flex:0 0 50%}.row-cols-md-3>*{max-width:33.3333%;-ms-flex:0 0 33.3333%;flex:0 0 33.3333%}.row-cols-md-4>*{max-width:25%;-ms-flex:0 0 25%;flex:0 0 25%}.row-cols-md-5>*{max-width:20%;-ms-flex:0 0 20%;flex:0 0 20%}.row-cols-md-6>*{max-width:16.6667%;-ms-flex:0 0 16.6667%;flex:0 0 16.6667%}.col-md-auto{width:auto;max-width:100%;-ms-flex:none;flex:none}.col-md-1{max-width:8.33333%;-ms-flex:0 0 8.33333%;flex:0 0 8.33333%}.col-md-2{max-width:16.6667%;-ms-flex:0 0 16.6667%;flex:0 0 16.6667%}.col-md-3{max-width:25%;-ms-flex:0 0 25%;flex:0 0 25%}.col-md-4{max-width:33.3333%;-ms-flex:0 0 33.3333%;flex:0 0 33.3333%}.col-md-5{max-width:41.6667%;-ms-flex:0 0 41.6667%;flex:0 0 41.6667%}.col-md-6{max-width:50%;-ms-flex:0 0 50%;flex:0 0 50%}.col-md-7{max-width:58.3333%;-ms-flex:0 0 58.3333%;flex:0 0 58.3333%}.col-md-8{max-width:66.6667%;-ms-flex:0 0 66.6667%;flex:0 0 66.6667%}.col-md-9{max-width:75%;-ms-flex:0 0 75%;flex:0 0 75%}.col-md-10{max-width:83.3333%;-ms-flex:0 0 83.3333%;flex:0 0 83.3333%}.col-md-11{max-width:91.6667%;-ms-flex:0 0 91.6667%;flex:0 0 91.6667%}.col-md-12{max-width:100%;-ms-flex:0 0 100%;flex:0 0 100%}.order-md-first{-ms-flex-order:-1;order:-1}.order-md-last{-ms-flex-order:13;order:13}.order-md-0{-ms-flex-order:0;order:0}.order-md-1{-ms-flex-order:1;order:1}.order-md-2{-ms-flex-order:2;order:2}.order-md-3{-ms-flex-order:3;order:3}.order-md-4{-ms-flex-order:4;order:4}.order-md-5{-ms-flex-order:5;order:5}.order-md-6{-ms-flex-order:6;order:6}.order-md-7{-ms-flex-order:7;order:7}.order-md-8{-ms-flex-order:8;order:8}.order-md-9{-ms-flex-order:9;order:9}.order-md-10{-ms-flex-order:10;order:10}.order-md-11{-ms-flex-order:11;order:11}.order-md-12{-ms-flex-order:12;order:12}.offset-md-0{margin-left:0}.offset-md-1{margin-left:8.33333%}.offset-md-2{margin-left:16.6667%}.offset-md-3{margin-left:25%}.offset-md-4{margin-left:33.3333%}.offset-md-5{margin-left:41.6667%}.offset-md-6{margin-left:50%}.offset-md-7{margin-left:58.3333%}.offset-md-8{margin-left:66.6667%}.offset-md-9{margin-left:75%}.offset-md-10{margin-left:83.3333%}.offset-md-11{margin-left:91.6667%}}@media (min-width:992px){.col-lg{max-width:100%;-ms-flex-positive:1;-ms-flex-preferred-size:0;flex-grow:1;flex-basis:0}.row-cols-lg-1>*{max-width:100%;-ms-flex:0 0 100%;flex:0 0 100%}.row-cols-lg-2>*{max-width:50%;-ms-flex:0 0 50%;flex:0 0 50%}.row-cols-lg-3>*{max-width:33.3333%;-ms-flex:0 0 33.3333%;flex:0 0 33.3333%}.row-cols-lg-4>*{max-width:25%;-ms-flex:0 0 25%;flex:0 0 25%}.row-cols-lg-5>*{max-width:20%;-ms-flex:0 0 20%;flex:0 0 20%}.row-cols-lg-6>*{max-width:16.6667%;-ms-flex:0 0 16.6667%;flex:0 0 16.6667%}.col-lg-auto{width:auto;max-width:100%;-ms-flex:none;flex:none}.col-lg-1{max-width:8.33333%;-ms-flex:0 0 8.33333%;flex:0 0 8.33333%}.col-lg-2{max-width:16.6667%;-ms-flex:0 0 16.6667%;flex:0 0 16.6667%}.col-lg-3{max-width:25%;-ms-flex:0 0 25%;flex:0 0 25%}.col-lg-4{max-width:33.3333%;-ms-flex:0 0 33.3333%;flex:0 0 33.3333%}.col-lg-5{max-width:41.6667%;-ms-flex:0 0 41.6667%;flex:0 0 41.6667%}.col-lg-6{max-width:50%;-ms-flex:0 0 50%;flex:0 0 50%}.col-lg-7{max-width:58.3333%;-ms-flex:0 0 58.3333%;flex:0 0 58.3333%}.col-lg-8{max-width:66.6667%;-ms-flex:0 0 66.6667%;flex:0 0 66.6667%}.col-lg-9{max-width:75%;-ms-flex:0 0 75%;flex:0 0 75%}.col-lg-10{max-width:83.3333%;-ms-flex:0 0 83.3333%;flex:0 0 83.3333%}.col-lg-11{max-width:91.6667%;-ms-flex:0 0 91.6667%;flex:0 0 91.6667%}.col-lg-12{max-width:100%;-ms-flex:0 0 100%;flex:0 0 100%}.order-lg-first{-ms-flex-order:-1;order:-1}.order-lg-last{-ms-flex-order:13;order:13}.order-lg-0{-ms-flex-order:0;order:0}.order-lg-1{-ms-flex-order:1;order:1}.order-lg-2{-ms-flex-order:2;order:2}.order-lg-3{-ms-flex-order:3;order:3}.order-lg-4{-ms-flex-order:4;order:4}.order-lg-5{-ms-flex-order:5;order:5}.order-lg-6{-ms-flex-order:6;order:6}.order-lg-7{-ms-flex-order:7;order:7}.order-lg-8{-ms-flex-order:8;order:8}.order-lg-9{-ms-flex-order:9;order:9}.order-lg-10{-ms-flex-order:10;order:10}.order-lg-11{-ms-flex-order:11;order:11}.order-lg-12{-ms-flex-order:12;order:12}.offset-lg-0{margin-left:0}.offset-lg-1{margin-left:8.33333%}.offset-lg-2{margin-left:16.6667%}.offset-lg-3{margin-left:25%}.offset-lg-4{margin-left:33.3333%}.offset-lg-5{margin-left:41.6667%}.offset-lg-6{margin-left:50%}.offset-lg-7{margin-left:58.3333%}.offset-lg-8{margin-left:66.6667%}.offset-lg-9{margin-left:75%}.offset-lg-10{margin-left:83.3333%}.offset-lg-11{margin-left:91.6667%}}@media (min-width:1200px){.col-xl{max-width:100%;-ms-flex-positive:1;-ms-flex-preferred-size:0;flex-grow:1;flex-basis:0}.row-cols-xl-1>*{max-width:100%;-ms-flex:0 0 100%;flex:0 0 100%}.row-cols-xl-2>*{max-width:50%;-ms-flex:0 0 50%;flex:0 0 50%}.row-cols-xl-3>*{max-width:33.3333%;-ms-flex:0 0 33.3333%;flex:0 0 33.3333%}.row-cols-xl-4>*{max-width:25%;-ms-flex:0 0 25%;flex:0 0 25%}.row-cols-xl-5>*{max-width:20%;-ms-flex:0 0 20%;flex:0 0 20%}.row-cols-xl-6>*{max-width:16.6667%;-ms-flex:0 0 16.6667%;flex:0 0 16.6667%}.col-xl-auto{width:auto;max-width:100%;-ms-flex:none;flex:none}.col-xl-1{max-width:8.33333%;-ms-flex:0 0 8.33333%;flex:0 0 8.33333%}.col-xl-2{max-width:16.6667%;-ms-flex:0 0 16.6667%;flex:0 0 16.6667%}.col-xl-3{max-width:25%;-ms-flex:0 0 25%;flex:0 0 25%}.col-xl-4{max-width:33.3333%;-ms-flex:0 0 33.3333%;flex:0 0 33.3333%}.col-xl-5{max-width:41.6667%;-ms-flex:0 0 41.6667%;flex:0 0 41.6667%}.col-xl-6{max-width:50%;-ms-flex:0 0 50%;flex:0 0 50%}.col-xl-7{max-width:58.3333%;-ms-flex:0 0 58.3333%;flex:0 0 58.3333%}.col-xl-8{max-width:66.6667%;-ms-flex:0 0 66.6667%;flex:0 0 66.6667%}.col-xl-9{max-width:75%;-ms-flex:0 0 75%;flex:0 0 75%}.col-xl-10{max-width:83.3333%;-ms-flex:0 0 83.3333%;flex:0 0 83.3333%}.col-xl-11{max-width:91.6667%;-ms-flex:0 0 91.6667%;flex:0 0 91.6667%}.col-xl-12{max-width:100%;-ms-flex:0 0 100%;flex:0 0 100%}.order-xl-first{-ms-flex-order:-1;order:-1}.order-xl-last{-ms-flex-order:13;order:13}.order-xl-0{-ms-flex-order:0;order:0}.order-xl-1{-ms-flex-order:1;order:1}.order-xl-2{-ms-flex-order:2;order:2}.order-xl-3{-ms-flex-order:3;order:3}.order-xl-4{-ms-flex-order:4;order:4}.order-xl-5{-ms-flex-order:5;order:5}.order-xl-6{-ms-flex-order:6;order:6}.order-xl-7{-ms-flex-order:7;order:7}.order-xl-8{-ms-flex-order:8;order:8}.order-xl-9{-ms-flex-order:9;order:9}.order-xl-10{-ms-flex-order:10;order:10}.order-xl-11{-ms-flex-order:11;order:11}.order-xl-12{-ms-flex-order:12;order:12}.offset-xl-0{margin-left:0}.offset-xl-1{margin-left:8.33333%}.offset-xl-2{margin-left:16.6667%}.offset-xl-3{margin-left:25%}.offset-xl-4{margin-left:33.3333%}.offset-xl-5{margin-left:41.6667%}.offset-xl-6{margin-left:50%}.offset-xl-7{margin-left:58.3333%}.offset-xl-8{margin-left:66.6667%}.offset-xl-9{margin-left:75%}.offset-xl-10{margin-left:83.3333%}.offset-xl-11{margin-left:91.6667%}}.table{width:100%;color:#212529;margin-bottom:1rem}.table td,.table th{vertical-align:top;border-top:1px solid #dee2e6;padding:.75rem}.table thead th{vertical-align:bottom;border-bottom:2px solid #dee2e6}.table tbody+tbody{border-top:2px solid #dee2e6}.table-sm td,.table-sm th{padding:.3rem}.table-bordered,.table-bordered td,.table-bordered th{border:1px solid #dee2e6}.table-bordered thead td,.table-bordered thead th{border-bottom-width:2px}.table-borderless tbody+tbody,.table-borderless td,.table-borderless th,.table-borderless thead th{border:0}.table-striped tbody tr:nth-of-type(2n+1){background-color:#0000000d}.table-hover tbody tr:hover{color:#212529;background-color:#00000013}.table-primary,.table-primary>td,.table-primary>th{background-color:#b8daff}.table-primary tbody+tbody,.table-primary td,.table-primary th,.table-primary thead th{border-color:#7abaff}.table-hover .table-primary:hover{background-color:#9fcdff}.table-hover .table-primary:hover>td,.table-hover .table-primary:hover>th{background-color:#9fcdff}.table-secondary,.table-secondary>td,.table-secondary>th{background-color:#d6d8db}.table-secondary tbody+tbody,.table-secondary td,.table-secondary th,.table-secondary thead th{border-color:#b3b7bb}.table-hover .table-secondary:hover{background-color:#c8cbcf}.table-hover .table-secondary:hover>td,.table-hover .table-secondary:hover>th{background-color:#c8cbcf}.table-success,.table-success>td,.table-success>th{background-color:#c3e6cb}.table-success tbody+tbody,.table-success td,.table-success th,.table-success thead th{border-color:#8fd19e}.table-hover .table-success:hover{background-color:#b1dfbb}.table-hover .table-success:hover>td,.table-hover .table-success:hover>th{background-color:#b1dfbb}.table-info,.table-info>td,.table-info>th{background-color:#bee5eb}.table-info tbody+tbody,.table-info td,.table-info th,.table-info thead th{border-color:#86cfda}.table-hover .table-info:hover{background-color:#abdde5}.table-hover .table-info:hover>td,.table-hover .table-info:hover>th{background-color:#abdde5}.table-warning,.table-warning>td,.table-warning>th{background-color:#ffeeba}.table-warning tbody+tbody,.table-warning td,.table-warning th,.table-warning thead th{border-color:#ffdf7e}.table-hover .table-warning:hover{background-color:#ffe8a1}.table-hover .table-warning:hover>td,.table-hover .table-warning:hover>th{background-color:#ffe8a1}.table-danger,.table-danger>td,.table-danger>th{background-color:#f5c6cb}.table-danger tbody+tbody,.table-danger td,.table-danger th,.table-danger thead th{border-color:#ed969e}.table-hover .table-danger:hover{background-color:#f1b0b7}.table-hover .table-danger:hover>td,.table-hover .table-danger:hover>th{background-color:#f1b0b7}.table-light,.table-light>td,.table-light>th{background-color:#fdfdfe}.table-light tbody+tbody,.table-light td,.table-light th,.table-light thead th{border-color:#fbfcfc}.table-hover .table-light:hover{background-color:#ececf6}.table-hover .table-light:hover>td,.table-hover .table-light:hover>th{background-color:#ececf6}.table-dark,.table-dark>td,.table-dark>th{background-color:#c6c8ca}.table-dark tbody+tbody,.table-dark td,.table-dark th,.table-dark thead th{border-color:#95999c}.table-hover .table-dark:hover{background-color:#b9bbbe}.table-hover .table-dark:hover>td,.table-hover .table-dark:hover>th{background-color:#b9bbbe}.table-active,.table-active>td,.table-active>th{background-color:#00000013}.table-hover .table-active:hover{background-color:#00000013}.table-hover .table-active:hover>td,.table-hover .table-active:hover>th{background-color:#00000013}.table .thead-dark th{color:#fff;background-color:#343a40;border-color:#454d55}.table .thead-light th{color:#495057;background-color:#e9ecef;border-color:#dee2e6}.table-dark{color:#fff;background-color:#343a40}.table-dark td,.table-dark th,.table-dark thead th{border-color:#454d55}.table-dark.table-bordered{border:0}.table-dark.table-striped tbody tr:nth-of-type(2n+1){background-color:#ffffff0d}.table-dark.table-hover tbody tr:hover{color:#fff;background-color:#ffffff13}@media (max-width:575.98px){.table-responsive-sm{width:100%;-webkit-overflow-scrolling:touch;display:block;overflow-x:auto}.table-responsive-sm>.table-bordered{border:0}}@media (max-width:767.98px){.table-responsive-md{width:100%;-webkit-overflow-scrolling:touch;display:block;overflow-x:auto}.table-responsive-md>.table-bordered{border:0}}@media (max-width:991.98px){.table-responsive-lg{width:100%;-webkit-overflow-scrolling:touch;display:block;overflow-x:auto}.table-responsive-lg>.table-bordered{border:0}}@media (max-width:1199.98px){.table-responsive-xl{width:100%;-webkit-overflow-scrolling:touch;display:block;overflow-x:auto}.table-responsive-xl>.table-bordered{border:0}}.table-responsive{width:100%;-webkit-overflow-scrolling:touch;display:block;overflow-x:auto}.table-responsive>.table-bordered{border:0}.form-control{width:100%;height:calc(1.5em + .75rem + 2px);color:#495057;background-color:#fff;background-clip:padding-box;border:1px solid #ced4da;border-radius:.25rem;padding:.375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;transition:border-color .15s ease-in-out,box-shadow .15s ease-in-out;display:block}@media (prefers-reduced-motion:reduce){.form-control{transition:none}}.form-control::-ms-expand{background-color:#0000;border:0}.form-control:focus{color:#495057;background-color:#fff;border-color:#80bdff;outline:0;box-shadow:0 0 0 .2rem #007bff40}.form-control::-webkit-input-placeholder{color:#6c757d;opacity:1}.form-control::-moz-placeholder{color:#6c757d;opacity:1}.form-control:-ms-input-placeholder{color:#6c757d;opacity:1}.form-control::-moz-placeholder{color:#6c757d;opacity:1}.form-control::placeholder{color:#6c757d;opacity:1}.form-control:disabled,.form-control[readonly]{opacity:1;background-color:#e9ecef}input[type=date].form-control,input[type=datetime-local].form-control,input[type=month].form-control,input[type=time].form-control{-webkit-appearance:none;-moz-appearance:none;appearance:none}select.form-control:-moz-focusring{color:#0000;text-shadow:0 0 #495057}select.form-control:focus::-ms-value{color:#495057;background-color:#fff}.form-control-file,.form-control-range{width:100%;display:block}.col-form-label{font-size:inherit;margin-bottom:0;padding-top:calc(.375rem + 1px);padding-bottom:calc(.375rem + 1px);line-height:1.5}.col-form-label-lg{padding-top:calc(.5rem + 1px);padding-bottom:calc(.5rem + 1px);font-size:1.25rem;line-height:1.5}.col-form-label-sm{padding-top:calc(.25rem + 1px);padding-bottom:calc(.25rem + 1px);font-size:.875rem;line-height:1.5}.form-control-plaintext{width:100%;color:#212529;background-color:#0000;border:1px solid #0000;border-width:1px 0;margin-bottom:0;padding:.375rem 0;font-size:1rem;line-height:1.5;display:block}.form-control-plaintext.form-control-lg,.form-control-plaintext.form-control-sm{padding-left:0;padding-right:0}.form-control-sm{height:calc(1.5em + .5rem + 2px);border-radius:.2rem;padding:.25rem .5rem;font-size:.875rem;line-height:1.5}.form-control-lg{height:calc(1.5em + 1rem + 2px);border-radius:.3rem;padding:.5rem 1rem;font-size:1.25rem;line-height:1.5}select.form-control[multiple],select.form-control[size]{height:auto}textarea.form-control{height:auto}.form-group{margin-bottom:1rem}.form-text{margin-top:.25rem;display:block}.form-row{-ms-flex-wrap:wrap;flex-wrap:wrap;margin-left:-5px;margin-right:-5px;display:-ms-flexbox;display:flex}.form-row>.col,.form-row>[class*=col-]{padding-left:5px;padding-right:5px}.form-check{padding-left:1.25rem;display:block;position:relative}.form-check-input{margin-top:.3rem;margin-left:-1.25rem;position:absolute}.form-check-input:disabled~.form-check-label,.form-check-input[disabled]~.form-check-label{color:#6c757d}.form-check-label{margin-bottom:0}.form-check-inline{align-items:center;margin-right:.75rem;padding-left:0;display:-ms-inline-flexbox;display:inline-flex}.form-check-inline .form-check-input{margin-top:0;margin-left:0;margin-right:.3125rem;position:static}.valid-feedback{width:100%;color:#28a745;margin-top:.25rem;font-size:.875em;display:none}.valid-tooltip{z-index:5;max-width:100%;color:#fff;background-color:#28a745e6;border-radius:.25rem;margin-top:.1rem;padding:.25rem .5rem;font-size:.875rem;line-height:1.5;display:none;position:absolute;top:100%;left:0}.form-row>.col>.valid-tooltip,.form-row>[class*=col-]>.valid-tooltip{left:5px}.is-valid~.valid-feedback,.is-valid~.valid-tooltip,.was-validated :valid~.valid-feedback,.was-validated :valid~.valid-tooltip{display:block}.form-control.is-valid,.was-validated .form-control:valid{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' width='8' height='8' viewBox='0 0 8 8'%3e%3cpath fill='%2328a745' d='M2.3 6.73L.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3e%3c/svg%3e");background-position:right calc(.375em + .1875rem) center;background-repeat:no-repeat;background-size:calc(.75em + .375rem) calc(.75em + .375rem);border-color:#28a745;padding-right:calc(1.5em + .75rem)!important}.form-control.is-valid:focus,.was-validated .form-control:valid:focus{border-color:#28a745;box-shadow:0 0 0 .2rem #28a74540}.was-validated select.form-control:valid,select.form-control.is-valid{background-position:right 1.5rem center;padding-right:3rem!important}.was-validated textarea.form-control:valid,textarea.form-control.is-valid{background-position:right calc(.375em + .1875rem) top calc(.375em + .1875rem);padding-right:calc(1.5em + .75rem)}.custom-select.is-valid,.was-validated .custom-select:valid{background:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' width='4' height='5' viewBox='0 0 4 5'%3e%3cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3e%3c/svg%3e") right .75rem center/8px 10px no-repeat,#fff url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' width='8' height='8' viewBox='0 0 8 8'%3e%3cpath fill='%2328a745' d='M2.3 6.73L.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3e%3c/svg%3e") right 1.75rem center/calc(.75em + .375rem) calc(.75em + .375rem) no-repeat;border-color:#28a745;padding-right:calc(.75em + 2.3125rem)!important}.custom-select.is-valid:focus,.was-validated .custom-select:valid:focus{border-color:#28a745;box-shadow:0 0 0 .2rem #28a74540}.form-check-input.is-valid~.form-check-label,.was-validated .form-check-input:valid~.form-check-label{color:#28a745}.form-check-input.is-valid~.valid-feedback,.form-check-input.is-valid~.valid-tooltip,.was-validated .form-check-input:valid~.valid-feedback,.was-validated .form-check-input:valid~.valid-tooltip{display:block}.custom-control-input.is-valid~.custom-control-label,.was-validated .custom-control-input:valid~.custom-control-label{color:#28a745}.custom-control-input.is-valid~.custom-control-label:before,.was-validated .custom-control-input:valid~.custom-control-label:before{border-color:#28a745}.custom-control-input.is-valid:checked~.custom-control-label:before,.was-validated .custom-control-input:valid:checked~.custom-control-label:before{background-color:#34ce57;border-color:#34ce57}.custom-control-input.is-valid:focus~.custom-control-label:before,.was-validated .custom-control-input:valid:focus~.custom-control-label:before{box-shadow:0 0 0 .2rem #28a74540}.custom-control-input.is-valid:focus:not(:checked)~.custom-control-label:before,.was-validated .custom-control-input:valid:focus:not(:checked)~.custom-control-label:before{border-color:#28a745}.custom-file-input.is-valid~.custom-file-label,.was-validated .custom-file-input:valid~.custom-file-label{border-color:#28a745}.custom-file-input.is-valid:focus~.custom-file-label,.was-validated .custom-file-input:valid:focus~.custom-file-label{border-color:#28a745;box-shadow:0 0 0 .2rem #28a74540}.invalid-feedback{width:100%;color:#dc3545;margin-top:.25rem;font-size:.875em;display:none}.invalid-tooltip{z-index:5;max-width:100%;color:#fff;background-color:#dc3545e6;border-radius:.25rem;margin-top:.1rem;padding:.25rem .5rem;font-size:.875rem;line-height:1.5;display:none;position:absolute;top:100%;left:0}.form-row>.col>.invalid-tooltip,.form-row>[class*=col-]>.invalid-tooltip{left:5px}.is-invalid~.invalid-feedback,.is-invalid~.invalid-tooltip,.was-validated :invalid~.invalid-feedback,.was-validated :invalid~.invalid-tooltip{display:block}.form-control.is-invalid,.was-validated .form-control:invalid{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' fill='none' stroke='%23dc3545' viewBox='0 0 12 12'%3e%3ccircle cx='6' cy='6' r='4.5'/%3e%3cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3e%3ccircle cx='6' cy='8.2' r='.6' fill='%23dc3545' stroke='none'/%3e%3c/svg%3e");background-position:right calc(.375em + .1875rem) center;background-repeat:no-repeat;background-size:calc(.75em + .375rem) calc(.75em + .375rem);border-color:#dc3545;padding-right:calc(1.5em + .75rem)!important}.form-control.is-invalid:focus,.was-validated .form-control:invalid:focus{border-color:#dc3545;box-shadow:0 0 0 .2rem #dc354540}.was-validated select.form-control:invalid,select.form-control.is-invalid{background-position:right 1.5rem center;padding-right:3rem!important}.was-validated textarea.form-control:invalid,textarea.form-control.is-invalid{background-position:right calc(.375em + .1875rem) top calc(.375em + .1875rem);padding-right:calc(1.5em + .75rem)}.custom-select.is-invalid,.was-validated .custom-select:invalid{background:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' width='4' height='5' viewBox='0 0 4 5'%3e%3cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3e%3c/svg%3e") right .75rem center/8px 10px no-repeat,#fff url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' fill='none' stroke='%23dc3545' viewBox='0 0 12 12'%3e%3ccircle cx='6' cy='6' r='4.5'/%3e%3cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3e%3ccircle cx='6' cy='8.2' r='.6' fill='%23dc3545' stroke='none'/%3e%3c/svg%3e") right 1.75rem center/calc(.75em + .375rem) calc(.75em + .375rem) no-repeat;border-color:#dc3545;padding-right:calc(.75em + 2.3125rem)!important}.custom-select.is-invalid:focus,.was-validated .custom-select:invalid:focus{border-color:#dc3545;box-shadow:0 0 0 .2rem #dc354540}.form-check-input.is-invalid~.form-check-label,.was-validated .form-check-input:invalid~.form-check-label{color:#dc3545}.form-check-input.is-invalid~.invalid-feedback,.form-check-input.is-invalid~.invalid-tooltip,.was-validated .form-check-input:invalid~.invalid-feedback,.was-validated .form-check-input:invalid~.invalid-tooltip{display:block}.custom-control-input.is-invalid~.custom-control-label,.was-validated .custom-control-input:invalid~.custom-control-label{color:#dc3545}.custom-control-input.is-invalid~.custom-control-label:before,.was-validated .custom-control-input:invalid~.custom-control-label:before{border-color:#dc3545}.custom-control-input.is-invalid:checked~.custom-control-label:before,.was-validated .custom-control-input:invalid:checked~.custom-control-label:before{background-color:#e4606d;border-color:#e4606d}.custom-control-input.is-invalid:focus~.custom-control-label:before,.was-validated .custom-control-input:invalid:focus~.custom-control-label:before{box-shadow:0 0 0 .2rem #dc354540}.custom-control-input.is-invalid:focus:not(:checked)~.custom-control-label:before,.was-validated .custom-control-input:invalid:focus:not(:checked)~.custom-control-label:before{border-color:#dc3545}.custom-file-input.is-invalid~.custom-file-label,.was-validated .custom-file-input:invalid~.custom-file-label{border-color:#dc3545}.custom-file-input.is-invalid:focus~.custom-file-label,.was-validated .custom-file-input:invalid:focus~.custom-file-label{border-color:#dc3545;box-shadow:0 0 0 .2rem #dc354540}.form-inline{-ms-flex-flow:wrap;flex-flow:wrap;align-items:center;display:-ms-flexbox;display:flex}.form-inline .form-check{width:100%}@media (min-width:576px){.form-inline label{justify-content:center;align-items:center;margin-bottom:0;display:-ms-flexbox;display:flex}.form-inline .form-group{-ms-flex-flow:wrap;flex-flow:wrap;-ms-flex:none;flex:none;align-items:center;margin-bottom:0;display:-ms-flexbox;display:flex}.form-inline .form-control{width:auto;vertical-align:middle;display:inline-block}.form-inline .form-control-plaintext{display:inline-block}.form-inline .custom-select,.form-inline .input-group{width:auto}.form-inline .form-check{width:auto;justify-content:center;align-items:center;padding-left:0;display:-ms-flexbox;display:flex}.form-inline .form-check-input{-ms-flex-negative:0;flex-shrink:0;margin-top:0;margin-left:0;margin-right:.25rem;position:relative}.form-inline .custom-control{justify-content:center;align-items:center}.form-inline .custom-control-label{margin-bottom:0}}.btn{color:#212529;text-align:center;vertical-align:middle;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-color:#0000;border:1px solid #0000;border-radius:.25rem;padding:.375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;display:inline-block}@media (prefers-reduced-motion:reduce){.btn{transition:none}}.btn:hover{color:#212529;text-decoration:none}.btn.focus,.btn:focus{outline:0;box-shadow:0 0 0 .2rem #007bff40}.btn.disabled,.btn:disabled{opacity:.65}.btn:not(:disabled):not(.disabled){cursor:pointer}a.btn.disabled,fieldset:disabled a.btn{pointer-events:none}.btn-primary{color:#fff;background-color:#007bff;border-color:#007bff}.btn-primary:hover{color:#fff;background-color:#0069d9;border-color:#0062cc}.btn-primary.focus,.btn-primary:focus{color:#fff;background-color:#0069d9;border-color:#0062cc;box-shadow:0 0 0 .2rem #268fff80}.btn-primary.disabled,.btn-primary:disabled{color:#fff;background-color:#007bff;border-color:#007bff}.btn-primary:not(:disabled):not(.disabled).active,.btn-primary:not(:disabled):not(.disabled):active,.show>.btn-primary.dropdown-toggle{color:#fff;background-color:#0062cc;border-color:#005cbf}.btn-primary:not(:disabled):not(.disabled).active:focus,.btn-primary:not(:disabled):not(.disabled):active:focus,.show>.btn-primary.dropdown-toggle:focus{box-shadow:0 0 0 .2rem #268fff80}.btn-secondary{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-secondary:hover{color:#fff;background-color:#5a6268;border-color:#545b62}.btn-secondary.focus,.btn-secondary:focus{color:#fff;background-color:#5a6268;border-color:#545b62;box-shadow:0 0 0 .2rem #828a9180}.btn-secondary.disabled,.btn-secondary:disabled{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-secondary:not(:disabled):not(.disabled).active,.btn-secondary:not(:disabled):not(.disabled):active,.show>.btn-secondary.dropdown-toggle{color:#fff;background-color:#545b62;border-color:#4e555b}.btn-secondary:not(:disabled):not(.disabled).active:focus,.btn-secondary:not(:disabled):not(.disabled):active:focus,.show>.btn-secondary.dropdown-toggle:focus{box-shadow:0 0 0 .2rem #828a9180}.btn-success{color:#fff;background-color:#28a745;border-color:#28a745}.btn-success:hover{color:#fff;background-color:#218838;border-color:#1e7e34}.btn-success.focus,.btn-success:focus{color:#fff;background-color:#218838;border-color:#1e7e34;box-shadow:0 0 0 .2rem #48b46180}.btn-success.disabled,.btn-success:disabled{color:#fff;background-color:#28a745;border-color:#28a745}.btn-success:not(:disabled):not(.disabled).active,.btn-success:not(:disabled):not(.disabled):active,.show>.btn-success.dropdown-toggle{color:#fff;background-color:#1e7e34;border-color:#1c7430}.btn-success:not(:disabled):not(.disabled).active:focus,.btn-success:not(:disabled):not(.disabled):active:focus,.show>.btn-success.dropdown-toggle:focus{box-shadow:0 0 0 .2rem #48b46180}.btn-info{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-info:hover{color:#fff;background-color:#138496;border-color:#117a8b}.btn-info.focus,.btn-info:focus{color:#fff;background-color:#138496;border-color:#117a8b;box-shadow:0 0 0 .2rem #3ab0c380}.btn-info.disabled,.btn-info:disabled{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-info:not(:disabled):not(.disabled).active,.btn-info:not(:disabled):not(.disabled):active,.show>.btn-info.dropdown-toggle{color:#fff;background-color:#117a8b;border-color:#10707f}.btn-info:not(:disabled):not(.disabled).active:focus,.btn-info:not(:disabled):not(.disabled):active:focus,.show>.btn-info.dropdown-toggle:focus{box-shadow:0 0 0 .2rem #3ab0c380}.btn-warning{color:#212529;background-color:#ffc107;border-color:#ffc107}.btn-warning:hover{color:#212529;background-color:#e0a800;border-color:#d39e00}.btn-warning.focus,.btn-warning:focus{color:#212529;background-color:#e0a800;border-color:#d39e00;box-shadow:0 0 0 .2rem #deaa0c80}.btn-warning.disabled,.btn-warning:disabled{color:#212529;background-color:#ffc107;border-color:#ffc107}.btn-warning:not(:disabled):not(.disabled).active,.btn-warning:not(:disabled):not(.disabled):active,.show>.btn-warning.dropdown-toggle{color:#212529;background-color:#d39e00;border-color:#c69500}.btn-warning:not(:disabled):not(.disabled).active:focus,.btn-warning:not(:disabled):not(.disabled):active:focus,.show>.btn-warning.dropdown-toggle:focus{box-shadow:0 0 0 .2rem #deaa0c80}.btn-danger{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-danger:hover{color:#fff;background-color:#c82333;border-color:#bd2130}.btn-danger.focus,.btn-danger:focus{color:#fff;background-color:#c82333;border-color:#bd2130;box-shadow:0 0 0 .2rem #e1536180}.btn-danger.disabled,.btn-danger:disabled{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-danger:not(:disabled):not(.disabled).active,.btn-danger:not(:disabled):not(.disabled):active,.show>.btn-danger.dropdown-toggle{color:#fff;background-color:#bd2130;border-color:#b21f2d}.btn-danger:not(:disabled):not(.disabled).active:focus,.btn-danger:not(:disabled):not(.disabled):active:focus,.show>.btn-danger.dropdown-toggle:focus{box-shadow:0 0 0 .2rem #e1536180}.btn-light{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-light:hover{color:#212529;background-color:#e2e6ea;border-color:#dae0e5}.btn-light.focus,.btn-light:focus{color:#212529;background-color:#e2e6ea;border-color:#dae0e5;box-shadow:0 0 0 .2rem #d8d9db80}.btn-light.disabled,.btn-light:disabled{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-light:not(:disabled):not(.disabled).active,.btn-light:not(:disabled):not(.disabled):active,.show>.btn-light.dropdown-toggle{color:#212529;background-color:#dae0e5;border-color:#d3d9df}.btn-light:not(:disabled):not(.disabled).active:focus,.btn-light:not(:disabled):not(.disabled):active:focus,.show>.btn-light.dropdown-toggle:focus{box-shadow:0 0 0 .2rem #d8d9db80}.btn-dark{color:#fff;background-color:#343a40;border-color:#343a40}.btn-dark:hover{color:#fff;background-color:#23272b;border-color:#1d2124}.btn-dark.focus,.btn-dark:focus{color:#fff;background-color:#23272b;border-color:#1d2124;box-shadow:0 0 0 .2rem #52585d80}.btn-dark.disabled,.btn-dark:disabled{color:#fff;background-color:#343a40;border-color:#343a40}.btn-dark:not(:disabled):not(.disabled).active,.btn-dark:not(:disabled):not(.disabled):active,.show>.btn-dark.dropdown-toggle{color:#fff;background-color:#1d2124;border-color:#171a1d}.btn-dark:not(:disabled):not(.disabled).active:focus,.btn-dark:not(:disabled):not(.disabled):active:focus,.show>.btn-dark.dropdown-toggle:focus{box-shadow:0 0 0 .2rem #52585d80}.btn-outline-primary{color:#007bff;border-color:#007bff}.btn-outline-primary:hover{color:#fff;background-color:#007bff;border-color:#007bff}.btn-outline-primary.focus,.btn-outline-primary:focus{box-shadow:0 0 0 .2rem #007bff80}.btn-outline-primary.disabled,.btn-outline-primary:disabled{color:#007bff;background-color:#0000}.btn-outline-primary:not(:disabled):not(.disabled).active,.btn-outline-primary:not(:disabled):not(.disabled):active,.show>.btn-outline-primary.dropdown-toggle{color:#fff;background-color:#007bff;border-color:#007bff}.btn-outline-primary:not(:disabled):not(.disabled).active:focus,.btn-outline-primary:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-primary.dropdown-toggle:focus{box-shadow:0 0 0 .2rem #007bff80}.btn-outline-secondary{color:#6c757d;border-color:#6c757d}.btn-outline-secondary:hover{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-outline-secondary.focus,.btn-outline-secondary:focus{box-shadow:0 0 0 .2rem #6c757d80}.btn-outline-secondary.disabled,.btn-outline-secondary:disabled{color:#6c757d;background-color:#0000}.btn-outline-secondary:not(:disabled):not(.disabled).active,.btn-outline-secondary:not(:disabled):not(.disabled):active,.show>.btn-outline-secondary.dropdown-toggle{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-outline-secondary:not(:disabled):not(.disabled).active:focus,.btn-outline-secondary:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-secondary.dropdown-toggle:focus{box-shadow:0 0 0 .2rem #6c757d80}.btn-outline-success{color:#28a745;border-color:#28a745}.btn-outline-success:hover{color:#fff;background-color:#28a745;border-color:#28a745}.btn-outline-success.focus,.btn-outline-success:focus{box-shadow:0 0 0 .2rem #28a74580}.btn-outline-success.disabled,.btn-outline-success:disabled{color:#28a745;background-color:#0000}.btn-outline-success:not(:disabled):not(.disabled).active,.btn-outline-success:not(:disabled):not(.disabled):active,.show>.btn-outline-success.dropdown-toggle{color:#fff;background-color:#28a745;border-color:#28a745}.btn-outline-success:not(:disabled):not(.disabled).active:focus,.btn-outline-success:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-success.dropdown-toggle:focus{box-shadow:0 0 0 .2rem #28a74580}.btn-outline-info{color:#17a2b8;border-color:#17a2b8}.btn-outline-info:hover{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-outline-info.focus,.btn-outline-info:focus{box-shadow:0 0 0 .2rem #17a2b880}.btn-outline-info.disabled,.btn-outline-info:disabled{color:#17a2b8;background-color:#0000}.btn-outline-info:not(:disabled):not(.disabled).active,.btn-outline-info:not(:disabled):not(.disabled):active,.show>.btn-outline-info.dropdown-toggle{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-outline-info:not(:disabled):not(.disabled).active:focus,.btn-outline-info:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-info.dropdown-toggle:focus{box-shadow:0 0 0 .2rem #17a2b880}.btn-outline-warning{color:#ffc107;border-color:#ffc107}.btn-outline-warning:hover{color:#212529;background-color:#ffc107;border-color:#ffc107}.btn-outline-warning.focus,.btn-outline-warning:focus{box-shadow:0 0 0 .2rem #ffc10780}.btn-outline-warning.disabled,.btn-outline-warning:disabled{color:#ffc107;background-color:#0000}.btn-outline-warning:not(:disabled):not(.disabled).active,.btn-outline-warning:not(:disabled):not(.disabled):active,.show>.btn-outline-warning.dropdown-toggle{color:#212529;background-color:#ffc107;border-color:#ffc107}.btn-outline-warning:not(:disabled):not(.disabled).active:focus,.btn-outline-warning:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-warning.dropdown-toggle:focus{box-shadow:0 0 0 .2rem #ffc10780}.btn-outline-danger{color:#dc3545;border-color:#dc3545}.btn-outline-danger:hover{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-outline-danger.focus,.btn-outline-danger:focus{box-shadow:0 0 0 .2rem #dc354580}.btn-outline-danger.disabled,.btn-outline-danger:disabled{color:#dc3545;background-color:#0000}.btn-outline-danger:not(:disabled):not(.disabled).active,.btn-outline-danger:not(:disabled):not(.disabled):active,.show>.btn-outline-danger.dropdown-toggle{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-outline-danger:not(:disabled):not(.disabled).active:focus,.btn-outline-danger:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-danger.dropdown-toggle:focus{box-shadow:0 0 0 .2rem #dc354580}.btn-outline-light{color:#f8f9fa;border-color:#f8f9fa}.btn-outline-light:hover{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-outline-light.focus,.btn-outline-light:focus{box-shadow:0 0 0 .2rem #f8f9fa80}.btn-outline-light.disabled,.btn-outline-light:disabled{color:#f8f9fa;background-color:#0000}.btn-outline-light:not(:disabled):not(.disabled).active,.btn-outline-light:not(:disabled):not(.disabled):active,.show>.btn-outline-light.dropdown-toggle{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-outline-light:not(:disabled):not(.disabled).active:focus,.btn-outline-light:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-light.dropdown-toggle:focus{box-shadow:0 0 0 .2rem #f8f9fa80}.btn-outline-dark{color:#343a40;border-color:#343a40}.btn-outline-dark:hover{color:#fff;background-color:#343a40;border-color:#343a40}.btn-outline-dark.focus,.btn-outline-dark:focus{box-shadow:0 0 0 .2rem #343a4080}.btn-outline-dark.disabled,.btn-outline-dark:disabled{color:#343a40;background-color:#0000}.btn-outline-dark:not(:disabled):not(.disabled).active,.btn-outline-dark:not(:disabled):not(.disabled):active,.show>.btn-outline-dark.dropdown-toggle{color:#fff;background-color:#343a40;border-color:#343a40}.btn-outline-dark:not(:disabled):not(.disabled).active:focus,.btn-outline-dark:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-dark.dropdown-toggle:focus{box-shadow:0 0 0 .2rem #343a4080}.btn-link{color:#007bff;font-weight:400;text-decoration:none}.btn-link:hover{color:#0056b3;text-decoration:underline}.btn-link.focus,.btn-link:focus{text-decoration:underline}.btn-link.disabled,.btn-link:disabled{color:#6c757d;pointer-events:none}.btn-group-lg>.btn,.btn-lg{border-radius:.3rem;padding:.5rem 1rem;font-size:1.25rem;line-height:1.5}.btn-group-sm>.btn,.btn-sm{border-radius:.2rem;padding:.25rem .5rem;font-size:.875rem;line-height:1.5}.btn-block{width:100%;display:block}.btn-block+.btn-block{margin-top:.5rem}input[type=button].btn-block,input[type=reset].btn-block,input[type=submit].btn-block{width:100%}.fade{transition:opacity .15s linear}@media (prefers-reduced-motion:reduce){.fade{transition:none}}.fade:not(.show){opacity:0}.collapse:not(.show){display:none}.collapsing{height:0;transition:height .35s;position:relative;overflow:hidden}@media (prefers-reduced-motion:reduce){.collapsing{transition:none}}.collapsing.width{width:0;height:auto;transition:width .35s}@media (prefers-reduced-motion:reduce){.collapsing.width{transition:none}}.dropdown,.dropleft,.dropright,.dropup{position:relative}.dropdown-toggle{white-space:nowrap}.dropdown-toggle:after{vertical-align:.255em;content:"";border:.3em solid #0000;border-top-color:currentColor;border-bottom:0;margin-left:.255em;display:inline-block}.dropdown-toggle:empty:after{margin-left:0}.dropdown-menu{z-index:1000;float:left;min-width:10rem;color:#212529;text-align:left;background-color:#fff;background-clip:padding-box;border:1px solid #00000026;border-radius:.25rem;margin:.125rem 0 0;padding:.5rem 0;font-size:1rem;list-style:none;display:none;position:absolute;top:100%;left:0}.dropdown-menu-left{left:0;right:auto}.dropdown-menu-right{left:auto;right:0}@media (min-width:576px){.dropdown-menu-sm-left{left:0;right:auto}.dropdown-menu-sm-right{left:auto;right:0}}@media (min-width:768px){.dropdown-menu-md-left{left:0;right:auto}.dropdown-menu-md-right{left:auto;right:0}}@media (min-width:992px){.dropdown-menu-lg-left{left:0;right:auto}.dropdown-menu-lg-right{left:auto;right:0}}@media (min-width:1200px){.dropdown-menu-xl-left{left:0;right:auto}.dropdown-menu-xl-right{left:auto;right:0}}.dropup .dropdown-menu{margin-top:0;margin-bottom:.125rem;top:auto;bottom:100%}.dropup .dropdown-toggle:after{vertical-align:.255em;content:"";border:.3em solid #0000;border-top:0;border-bottom-color:currentColor;margin-left:.255em;display:inline-block}.dropup .dropdown-toggle:empty:after{margin-left:0}.dropright .dropdown-menu{margin-top:0;margin-left:.125rem;top:0;left:100%;right:auto}.dropright .dropdown-toggle:after{vertical-align:.255em;content:"";border:.3em solid #0000;border-left-color:currentColor;border-right:0;margin-left:.255em;display:inline-block}.dropright .dropdown-toggle:empty:after{margin-left:0}.dropright .dropdown-toggle:after{vertical-align:0}.dropleft .dropdown-menu{margin-top:0;margin-right:.125rem;top:0;left:auto;right:100%}.dropleft .dropdown-toggle:after{vertical-align:.255em;content:"";margin-left:.255em;display:inline-block}.dropleft .dropdown-toggle:after{display:none}.dropleft .dropdown-toggle:before{vertical-align:.255em;content:"";border-top:.3em solid #0000;border-bottom:.3em solid #0000;border-right:.3em solid;margin-right:.255em;display:inline-block}.dropleft .dropdown-toggle:empty:after{margin-left:0}.dropleft .dropdown-toggle:before{vertical-align:0}.dropdown-menu[x-placement^=bottom],.dropdown-menu[x-placement^=left],.dropdown-menu[x-placement^=right],.dropdown-menu[x-placement^=top]{bottom:auto;right:auto}.dropdown-divider{height:0;border-top:1px solid #e9ecef;margin:.5rem 0;overflow:hidden}.dropdown-item{width:100%;clear:both;color:#212529;text-align:inherit;white-space:nowrap;background-color:#0000;border:0;padding:.25rem 1.5rem;font-weight:400;display:block}.dropdown-item:focus,.dropdown-item:hover{color:#16181b;background-color:#e9ecef;text-decoration:none}.dropdown-item.active,.dropdown-item:active{color:#fff;background-color:#007bff;text-decoration:none}.dropdown-item.disabled,.dropdown-item:disabled{color:#adb5bd;pointer-events:none;background-color:#0000}.dropdown-menu.show{display:block}.dropdown-header{color:#6c757d;white-space:nowrap;margin-bottom:0;padding:.5rem 1.5rem;font-size:.875rem;display:block}.dropdown-item-text{color:#212529;padding:.25rem 1.5rem;display:block}.btn-group,.btn-group-vertical{vertical-align:middle;display:-ms-inline-flexbox;display:inline-flex;position:relative}.btn-group-vertical>.btn,.btn-group>.btn{-ms-flex:auto;flex:auto;position:relative}.btn-group-vertical>.btn:hover,.btn-group>.btn:hover{z-index:1}.btn-group-vertical>.btn.active,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn:focus,.btn-group>.btn.active,.btn-group>.btn:active,.btn-group>.btn:focus{z-index:1}.btn-toolbar{-ms-flex-wrap:wrap;flex-wrap:wrap;justify-content:flex-start;display:-ms-flexbox;display:flex}.btn-toolbar .input-group{width:auto}.btn-group>.btn-group:not(:first-child),.btn-group>.btn:not(:first-child){margin-left:-1px}.btn-group>.btn-group:not(:last-child)>.btn,.btn-group>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:not(:first-child)>.btn,.btn-group>.btn:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.dropdown-toggle-split{padding-left:.5625rem;padding-right:.5625rem}.dropdown-toggle-split:after,.dropright .dropdown-toggle-split:after,.dropup .dropdown-toggle-split:after{margin-left:0}.dropleft .dropdown-toggle-split:before{margin-right:0}.btn-group-sm>.btn+.dropdown-toggle-split,.btn-sm+.dropdown-toggle-split{padding-left:.375rem;padding-right:.375rem}.btn-group-lg>.btn+.dropdown-toggle-split,.btn-lg+.dropdown-toggle-split{padding-left:.75rem;padding-right:.75rem}.btn-group-vertical{-ms-flex-direction:column;flex-direction:column;justify-content:center;align-items:flex-start}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group{width:100%}.btn-group-vertical>.btn-group:not(:first-child),.btn-group-vertical>.btn:not(:first-child){margin-top:-1px}.btn-group-vertical>.btn-group:not(:last-child)>.btn,.btn-group-vertical>.btn:not(:last-child):not(.dropdown-toggle){border-bottom-left-radius:0;border-bottom-right-radius:0}.btn-group-vertical>.btn-group:not(:first-child)>.btn,.btn-group-vertical>.btn:not(:first-child){border-top-left-radius:0;border-top-right-radius:0}.btn-group-toggle>.btn,.btn-group-toggle>.btn-group>.btn{margin-bottom:0}.btn-group-toggle>.btn input[type=checkbox],.btn-group-toggle>.btn input[type=radio],.btn-group-toggle>.btn-group>.btn input[type=checkbox],.btn-group-toggle>.btn-group>.btn input[type=radio]{clip:rect(0,0,0,0);pointer-events:none;position:absolute}.input-group{width:100%;-ms-flex-wrap:wrap;flex-wrap:wrap;align-items:stretch;display:-ms-flexbox;display:flex;position:relative}.input-group>.custom-file,.input-group>.custom-select,.input-group>.form-control,.input-group>.form-control-plaintext{width:1%;min-width:0;-ms-flex:auto;flex:auto;margin-bottom:0;position:relative}.input-group>.custom-file+.custom-file,.input-group>.custom-file+.custom-select,.input-group>.custom-file+.form-control,.input-group>.custom-select+.custom-file,.input-group>.custom-select+.custom-select,.input-group>.custom-select+.form-control,.input-group>.form-control+.custom-file,.input-group>.form-control+.custom-select,.input-group>.form-control+.form-control,.input-group>.form-control-plaintext+.custom-file,.input-group>.form-control-plaintext+.custom-select,.input-group>.form-control-plaintext+.form-control{margin-left:-1px}.input-group>.custom-file .custom-file-input:focus~.custom-file-label,.input-group>.custom-select:focus,.input-group>.form-control:focus{z-index:3}.input-group>.custom-file .custom-file-input:focus{z-index:4}.input-group>.custom-select:not(:first-child),.input-group>.form-control:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.input-group>.custom-file{align-items:center;display:-ms-flexbox;display:flex}.input-group>.custom-file:not(:last-child) .custom-file-label,.input-group>.custom-file:not(:last-child) .custom-file-label:after{border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.custom-file:not(:first-child) .custom-file-label{border-top-left-radius:0;border-bottom-left-radius:0}.input-group:not(.has-validation)>.custom-file:not(:last-child) .custom-file-label,.input-group:not(.has-validation)>.custom-file:not(:last-child) .custom-file-label:after,.input-group:not(.has-validation)>.custom-select:not(:last-child),.input-group:not(.has-validation)>.form-control:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.input-group.has-validation>.custom-file:nth-last-child(n+3) .custom-file-label,.input-group.has-validation>.custom-file:nth-last-child(n+3) .custom-file-label:after,.input-group.has-validation>.custom-select:nth-last-child(n+3),.input-group.has-validation>.form-control:nth-last-child(n+3){border-top-right-radius:0;border-bottom-right-radius:0}.input-group-append,.input-group-prepend{display:-ms-flexbox;display:flex}.input-group-append .btn,.input-group-prepend .btn{z-index:2;position:relative}.input-group-append .btn:focus,.input-group-prepend .btn:focus{z-index:3}.input-group-append .btn+.btn,.input-group-append .btn+.input-group-text,.input-group-append .input-group-text+.btn,.input-group-append .input-group-text+.input-group-text,.input-group-prepend .btn+.btn,.input-group-prepend .btn+.input-group-text,.input-group-prepend .input-group-text+.btn,.input-group-prepend .input-group-text+.input-group-text{margin-left:-1px}.input-group-prepend{margin-right:-1px}.input-group-append{margin-left:-1px}.input-group-text{color:#495057;text-align:center;white-space:nowrap;background-color:#e9ecef;border:1px solid #ced4da;border-radius:.25rem;align-items:center;margin-bottom:0;padding:.375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;display:-ms-flexbox;display:flex}.input-group-text input[type=checkbox],.input-group-text input[type=radio]{margin-top:0}.input-group-lg>.custom-select,.input-group-lg>.form-control:not(textarea){height:calc(1.5em + 1rem + 2px)}.input-group-lg>.custom-select,.input-group-lg>.form-control,.input-group-lg>.input-group-append>.btn,.input-group-lg>.input-group-append>.input-group-text,.input-group-lg>.input-group-prepend>.btn,.input-group-lg>.input-group-prepend>.input-group-text{border-radius:.3rem;padding:.5rem 1rem;font-size:1.25rem;line-height:1.5}.input-group-sm>.custom-select,.input-group-sm>.form-control:not(textarea){height:calc(1.5em + .5rem + 2px)}.input-group-sm>.custom-select,.input-group-sm>.form-control,.input-group-sm>.input-group-append>.btn,.input-group-sm>.input-group-append>.input-group-text,.input-group-sm>.input-group-prepend>.btn,.input-group-sm>.input-group-prepend>.input-group-text{border-radius:.2rem;padding:.25rem .5rem;font-size:.875rem;line-height:1.5}.input-group-lg>.custom-select,.input-group-sm>.custom-select{padding-right:1.75rem}.input-group.has-validation>.input-group-append:nth-last-child(n+3)>.btn,.input-group.has-validation>.input-group-append:nth-last-child(n+3)>.input-group-text,.input-group:not(.has-validation)>.input-group-append:not(:last-child)>.btn,.input-group:not(.has-validation)>.input-group-append:not(:last-child)>.input-group-text,.input-group>.input-group-append:last-child>.btn:not(:last-child):not(.dropdown-toggle),.input-group>.input-group-append:last-child>.input-group-text:not(:last-child),.input-group>.input-group-prepend>.btn,.input-group>.input-group-prepend>.input-group-text{border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.input-group-append>.btn,.input-group>.input-group-append>.input-group-text,.input-group>.input-group-prepend:first-child>.btn:not(:first-child),.input-group>.input-group-prepend:first-child>.input-group-text:not(:first-child),.input-group>.input-group-prepend:not(:first-child)>.btn,.input-group>.input-group-prepend:not(:first-child)>.input-group-text{border-top-left-radius:0;border-bottom-left-radius:0}.custom-control{z-index:1;min-height:1.5rem;-webkit-print-color-adjust:exact;color-adjust:exact;print-color-adjust:exact;padding-left:1.5rem;display:block;position:relative}.custom-control-inline{margin-right:1rem;display:-ms-inline-flexbox;display:inline-flex}.custom-control-input{z-index:-1;width:1rem;height:1.25rem;opacity:0;position:absolute;left:0}.custom-control-input:checked~.custom-control-label:before{color:#fff;background-color:#007bff;border-color:#007bff}.custom-control-input:focus~.custom-control-label:before{box-shadow:0 0 0 .2rem #007bff40}.custom-control-input:focus:not(:checked)~.custom-control-label:before{border-color:#80bdff}.custom-control-input:not(:disabled):active~.custom-control-label:before{color:#fff;background-color:#b3d7ff;border-color:#b3d7ff}.custom-control-input:disabled~.custom-control-label,.custom-control-input[disabled]~.custom-control-label{color:#6c757d}.custom-control-input:disabled~.custom-control-label:before,.custom-control-input[disabled]~.custom-control-label:before{background-color:#e9ecef}.custom-control-label{vertical-align:top;margin-bottom:0;position:relative}.custom-control-label:before{width:1rem;height:1rem;pointer-events:none;content:"";background-color:#fff;border:1px solid #adb5bd;display:block;position:absolute;top:.25rem;left:-1.5rem}.custom-control-label:after{width:1rem;height:1rem;content:"";background:50%/50% 50% no-repeat;display:block;position:absolute;top:.25rem;left:-1.5rem}.custom-checkbox .custom-control-label:before{border-radius:.25rem}.custom-checkbox .custom-control-input:checked~.custom-control-label:after{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' width='8' height='8' viewBox='0 0 8 8'%3e%3cpath fill='%23fff' d='M6.564.75l-3.59 3.612-1.538-1.55L0 4.26l2.974 2.99L8 2.193z'/%3e%3c/svg%3e")}.custom-checkbox .custom-control-input:indeterminate~.custom-control-label:before{background-color:#007bff;border-color:#007bff}.custom-checkbox .custom-control-input:indeterminate~.custom-control-label:after{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' width='4' height='4' viewBox='0 0 4 4'%3e%3cpath stroke='%23fff' d='M0 2h4'/%3e%3c/svg%3e")}.custom-checkbox .custom-control-input:disabled:checked~.custom-control-label:before{background-color:#007bff80}.custom-checkbox .custom-control-input:disabled:indeterminate~.custom-control-label:before{background-color:#007bff80}.custom-radio .custom-control-label:before{border-radius:50%}.custom-radio .custom-control-input:checked~.custom-control-label:after{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='%23fff'/%3e%3c/svg%3e")}.custom-radio .custom-control-input:disabled:checked~.custom-control-label:before{background-color:#007bff80}.custom-switch{padding-left:2.25rem}.custom-switch .custom-control-label:before{width:1.75rem;pointer-events:all;border-radius:.5rem;left:-2.25rem}.custom-switch .custom-control-label:after{width:calc(1rem - 4px);height:calc(1rem - 4px);background-color:#adb5bd;border-radius:.5rem;transition:transform .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out,-webkit-transform .15s ease-in-out;top:calc(.25rem + 2px);left:calc(2px - 2.25rem)}@media (prefers-reduced-motion:reduce){.custom-switch .custom-control-label:after{transition:none}}.custom-switch .custom-control-input:checked~.custom-control-label:after{background-color:#fff;-webkit-transform:translate(.75rem);transform:translate(.75rem)}.custom-switch .custom-control-input:disabled:checked~.custom-control-label:before{background-color:#007bff80}.custom-select{width:100%;height:calc(1.5em + .75rem + 2px);color:#495057;vertical-align:middle;-webkit-appearance:none;-moz-appearance:none;appearance:none;background:#fff url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' width='4' height='5' viewBox='0 0 4 5'%3e%3cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3e%3c/svg%3e") right .75rem center/8px 10px no-repeat;border:1px solid #ced4da;border-radius:.25rem;padding:.375rem 1.75rem .375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;display:inline-block}.custom-select:focus{border-color:#80bdff;outline:0;box-shadow:0 0 0 .2rem #007bff40}.custom-select:focus::-ms-value{color:#495057;background-color:#fff}.custom-select[multiple],.custom-select[size]:not([size="1"]){height:auto;background-image:none;padding-right:.75rem}.custom-select:disabled{color:#6c757d;background-color:#e9ecef}.custom-select::-ms-expand{display:none}.custom-select:-moz-focusring{color:#0000;text-shadow:0 0 #495057}.custom-select-sm{height:calc(1.5em + .5rem + 2px);padding-top:.25rem;padding-bottom:.25rem;padding-left:.5rem;font-size:.875rem}.custom-select-lg{height:calc(1.5em + 1rem + 2px);padding-top:.5rem;padding-bottom:.5rem;padding-left:1rem;font-size:1.25rem}.custom-file{width:100%;height:calc(1.5em + .75rem + 2px);margin-bottom:0;display:inline-block;position:relative}.custom-file-input{z-index:2;width:100%;height:calc(1.5em + .75rem + 2px);opacity:0;margin:0;position:relative;overflow:hidden}.custom-file-input:focus~.custom-file-label{border-color:#80bdff;box-shadow:0 0 0 .2rem #007bff40}.custom-file-input:disabled~.custom-file-label,.custom-file-input[disabled]~.custom-file-label{background-color:#e9ecef}.custom-file-input:lang(en)~.custom-file-label:after{content:"Browse"}.custom-file-input~.custom-file-label[data-browse]:after{content:attr(data-browse)}.custom-file-label{z-index:1;height:calc(1.5em + .75rem + 2px);color:#495057;background-color:#fff;border:1px solid #ced4da;border-radius:.25rem;padding:.375rem .75rem;font-weight:400;line-height:1.5;position:absolute;top:0;left:0;right:0;overflow:hidden}.custom-file-label:after{z-index:3;height:calc(1.5em + .75rem);color:#495057;content:"Browse";border-left:inherit;background-color:#e9ecef;border-radius:0 .25rem .25rem 0;padding:.375rem .75rem;line-height:1.5;display:block;position:absolute;top:0;bottom:0;right:0}.custom-range{width:100%;height:1.4rem;-webkit-appearance:none;-moz-appearance:none;appearance:none;background-color:#0000;padding:0}.custom-range:focus{outline:0}.custom-range:focus::-webkit-slider-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .2rem #007bff40}.custom-range:focus::-moz-range-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .2rem #007bff40}.custom-range:focus::-ms-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .2rem #007bff40}.custom-range::-moz-focus-outer{border:0}.custom-range::-webkit-slider-thumb{width:1rem;height:1rem;-webkit-appearance:none;appearance:none;background-color:#007bff;border:0;border-radius:1rem;margin-top:-.25rem;-webkit-transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.custom-range::-webkit-slider-thumb{-webkit-transition:none;transition:none}}.custom-range::-webkit-slider-thumb:active{background-color:#b3d7ff}.custom-range::-webkit-slider-runnable-track{width:100%;height:.5rem;color:#0000;cursor:pointer;background-color:#dee2e6;border-color:#0000;border-radius:1rem}.custom-range::-moz-range-thumb{width:1rem;height:1rem;-moz-appearance:none;appearance:none;background-color:#007bff;border:0;border-radius:1rem;-moz-transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.custom-range::-moz-range-thumb{-moz-transition:none;transition:none}}.custom-range::-moz-range-thumb:active{background-color:#b3d7ff}.custom-range::-moz-range-track{width:100%;height:.5rem;color:#0000;cursor:pointer;background-color:#dee2e6;border-color:#0000;border-radius:1rem}.custom-range::-ms-thumb{width:1rem;height:1rem;appearance:none;background-color:#007bff;border:0;border-radius:1rem;margin-top:0;margin-left:.2rem;margin-right:.2rem;-ms-transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.custom-range::-ms-thumb{-ms-transition:none;transition:none}}.custom-range::-ms-thumb:active{background-color:#b3d7ff}.custom-range::-ms-track{width:100%;height:.5rem;color:#0000;cursor:pointer;background-color:#0000;border-width:.5rem;border-color:#0000}.custom-range::-ms-fill-lower{background-color:#dee2e6;border-radius:1rem}.custom-range::-ms-fill-upper{background-color:#dee2e6;border-radius:1rem;margin-right:15px}.custom-range:disabled::-webkit-slider-thumb{background-color:#adb5bd}.custom-range:disabled::-webkit-slider-runnable-track{cursor:default}.custom-range:disabled::-moz-range-thumb{background-color:#adb5bd}.custom-range:disabled::-moz-range-track{cursor:default}.custom-range:disabled::-ms-thumb{background-color:#adb5bd}.custom-control-label:before,.custom-file-label,.custom-select{transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.custom-control-label:before,.custom-file-label,.custom-select{transition:none}}.nav{-ms-flex-wrap:wrap;flex-wrap:wrap;margin-bottom:0;padding-left:0;list-style:none;display:-ms-flexbox;display:flex}.nav-link{padding:.5rem 1rem;display:block}.nav-link:focus,.nav-link:hover{text-decoration:none}.nav-link.disabled{color:#6c757d;pointer-events:none;cursor:default}.nav-tabs{border-bottom:1px solid #dee2e6}.nav-tabs .nav-link{background-color:#0000;border:1px solid #0000;border-top-left-radius:.25rem;border-top-right-radius:.25rem;margin-bottom:-1px}.nav-tabs .nav-link:focus,.nav-tabs .nav-link:hover{isolation:isolate;border-color:#e9ecef #e9ecef #dee2e6}.nav-tabs .nav-link.disabled{color:#6c757d;background-color:#0000;border-color:#0000}.nav-tabs .nav-item.show .nav-link,.nav-tabs .nav-link.active{color:#495057;background-color:#fff;border-color:#dee2e6 #dee2e6 #fff}.nav-tabs .dropdown-menu{border-top-left-radius:0;border-top-right-radius:0;margin-top:-1px}.nav-pills .nav-link{background:0 0;border:0;border-radius:.25rem}.nav-pills .nav-link.active,.nav-pills .show>.nav-link{color:#fff;background-color:#007bff}.nav-fill .nav-item,.nav-fill>.nav-link{text-align:center;-ms-flex:auto;flex:auto}.nav-justified .nav-item,.nav-justified>.nav-link{text-align:center;-ms-flex-positive:1;-ms-flex-preferred-size:0;flex-grow:1;flex-basis:0}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.navbar{-ms-flex-wrap:wrap;flex-wrap:wrap;justify-content:space-between;align-items:center;padding:.5rem 1rem;display:-ms-flexbox;display:flex;position:relative}.navbar .container,.navbar .container-fluid,.navbar .container-lg,.navbar .container-md,.navbar .container-sm,.navbar .container-xl{-ms-flex-wrap:wrap;flex-wrap:wrap;justify-content:space-between;align-items:center;display:-ms-flexbox;display:flex}.navbar-brand{font-size:1.25rem;line-height:inherit;white-space:nowrap;margin-right:1rem;padding-top:.3125rem;padding-bottom:.3125rem;display:inline-block}.navbar-brand:focus,.navbar-brand:hover{text-decoration:none}.navbar-nav{-ms-flex-direction:column;flex-direction:column;margin-bottom:0;padding-left:0;list-style:none;display:-ms-flexbox;display:flex}.navbar-nav .nav-link{padding-left:0;padding-right:0}.navbar-nav .dropdown-menu{float:none;position:static}.navbar-text{padding-top:.5rem;padding-bottom:.5rem;display:inline-block}.navbar-collapse{-ms-flex-positive:1;-ms-flex-preferred-size:100%;flex-grow:1;flex-basis:100%;align-items:center}.navbar-toggler{background-color:#0000;border:1px solid #0000;border-radius:.25rem;padding:.25rem .75rem;font-size:1.25rem;line-height:1}.navbar-toggler:focus,.navbar-toggler:hover{text-decoration:none}.navbar-toggler-icon{width:1.5em;height:1.5em;vertical-align:middle;content:"";background:50%/100% 100% no-repeat;display:inline-block}.navbar-nav-scroll{max-height:75vh;overflow-y:auto}@media (max-width:575.98px){.navbar-expand-sm>.container,.navbar-expand-sm>.container-fluid,.navbar-expand-sm>.container-lg,.navbar-expand-sm>.container-md,.navbar-expand-sm>.container-sm,.navbar-expand-sm>.container-xl{padding-left:0;padding-right:0}}@media (min-width:576px){.navbar-expand-sm{-ms-flex-flow:row;flex-flow:row;justify-content:flex-start}.navbar-expand-sm .navbar-nav{-ms-flex-direction:row;flex-direction:row}.navbar-expand-sm .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-sm .navbar-nav .nav-link{padding-left:.5rem;padding-right:.5rem}.navbar-expand-sm>.container,.navbar-expand-sm>.container-fluid,.navbar-expand-sm>.container-lg,.navbar-expand-sm>.container-md,.navbar-expand-sm>.container-sm,.navbar-expand-sm>.container-xl{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand-sm .navbar-nav-scroll{overflow:visible}.navbar-expand-sm .navbar-collapse{-ms-flex-preferred-size:auto;flex-basis:auto;display:-ms-flexbox!important;display:flex!important}.navbar-expand-sm .navbar-toggler{display:none}}@media (max-width:767.98px){.navbar-expand-md>.container,.navbar-expand-md>.container-fluid,.navbar-expand-md>.container-lg,.navbar-expand-md>.container-md,.navbar-expand-md>.container-sm,.navbar-expand-md>.container-xl{padding-left:0;padding-right:0}}@media (min-width:768px){.navbar-expand-md{-ms-flex-flow:row;flex-flow:row;justify-content:flex-start}.navbar-expand-md .navbar-nav{-ms-flex-direction:row;flex-direction:row}.navbar-expand-md .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-md .navbar-nav .nav-link{padding-left:.5rem;padding-right:.5rem}.navbar-expand-md>.container,.navbar-expand-md>.container-fluid,.navbar-expand-md>.container-lg,.navbar-expand-md>.container-md,.navbar-expand-md>.container-sm,.navbar-expand-md>.container-xl{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand-md .navbar-nav-scroll{overflow:visible}.navbar-expand-md .navbar-collapse{-ms-flex-preferred-size:auto;flex-basis:auto;display:-ms-flexbox!important;display:flex!important}.navbar-expand-md .navbar-toggler{display:none}}@media (max-width:991.98px){.navbar-expand-lg>.container,.navbar-expand-lg>.container-fluid,.navbar-expand-lg>.container-lg,.navbar-expand-lg>.container-md,.navbar-expand-lg>.container-sm,.navbar-expand-lg>.container-xl{padding-left:0;padding-right:0}}@media (min-width:992px){.navbar-expand-lg{-ms-flex-flow:row;flex-flow:row;justify-content:flex-start}.navbar-expand-lg .navbar-nav{-ms-flex-direction:row;flex-direction:row}.navbar-expand-lg .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-lg .navbar-nav .nav-link{padding-left:.5rem;padding-right:.5rem}.navbar-expand-lg>.container,.navbar-expand-lg>.container-fluid,.navbar-expand-lg>.container-lg,.navbar-expand-lg>.container-md,.navbar-expand-lg>.container-sm,.navbar-expand-lg>.container-xl{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand-lg .navbar-nav-scroll{overflow:visible}.navbar-expand-lg .navbar-collapse{-ms-flex-preferred-size:auto;flex-basis:auto;display:-ms-flexbox!important;display:flex!important}.navbar-expand-lg .navbar-toggler{display:none}}@media (max-width:1199.98px){.navbar-expand-xl>.container,.navbar-expand-xl>.container-fluid,.navbar-expand-xl>.container-lg,.navbar-expand-xl>.container-md,.navbar-expand-xl>.container-sm,.navbar-expand-xl>.container-xl{padding-left:0;padding-right:0}}@media (min-width:1200px){.navbar-expand-xl{-ms-flex-flow:row;flex-flow:row;justify-content:flex-start}.navbar-expand-xl .navbar-nav{-ms-flex-direction:row;flex-direction:row}.navbar-expand-xl .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xl .navbar-nav .nav-link{padding-left:.5rem;padding-right:.5rem}.navbar-expand-xl>.container,.navbar-expand-xl>.container-fluid,.navbar-expand-xl>.container-lg,.navbar-expand-xl>.container-md,.navbar-expand-xl>.container-sm,.navbar-expand-xl>.container-xl{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand-xl .navbar-nav-scroll{overflow:visible}.navbar-expand-xl .navbar-collapse{-ms-flex-preferred-size:auto;flex-basis:auto;display:-ms-flexbox!important;display:flex!important}.navbar-expand-xl .navbar-toggler{display:none}}.navbar-expand{-ms-flex-flow:row;flex-flow:row;justify-content:flex-start}.navbar-expand>.container,.navbar-expand>.container-fluid,.navbar-expand>.container-lg,.navbar-expand>.container-md,.navbar-expand>.container-sm,.navbar-expand>.container-xl{padding-left:0;padding-right:0}.navbar-expand .navbar-nav{-ms-flex-direction:row;flex-direction:row}.navbar-expand .navbar-nav .dropdown-menu{position:absolute}.navbar-expand .navbar-nav .nav-link{padding-left:.5rem;padding-right:.5rem}.navbar-expand>.container,.navbar-expand>.container-fluid,.navbar-expand>.container-lg,.navbar-expand>.container-md,.navbar-expand>.container-sm,.navbar-expand>.container-xl{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand .navbar-nav-scroll{overflow:visible}.navbar-expand .navbar-collapse{-ms-flex-preferred-size:auto;flex-basis:auto;display:-ms-flexbox!important;display:flex!important}.navbar-expand .navbar-toggler{display:none}.navbar-light .navbar-brand{color:#000000e6}.navbar-light .navbar-brand:focus,.navbar-light .navbar-brand:hover{color:#000000e6}.navbar-light .navbar-nav .nav-link{color:#00000080}.navbar-light .navbar-nav .nav-link:focus,.navbar-light .navbar-nav .nav-link:hover{color:#000000b3}.navbar-light .navbar-nav .nav-link.disabled{color:#0000004d}.navbar-light .navbar-nav .active>.nav-link,.navbar-light .navbar-nav .nav-link.active,.navbar-light .navbar-nav .nav-link.show,.navbar-light .navbar-nav .show>.nav-link{color:#000000e6}.navbar-light .navbar-toggler{color:#00000080;border-color:#0000001a}.navbar-light .navbar-toggler-icon{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' width='30' height='30' viewBox='0 0 30 30'%3e%3cpath stroke='rgba%280, 0, 0, 0.5%29' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3e%3c/svg%3e")}.navbar-light .navbar-text{color:#00000080}.navbar-light .navbar-text a{color:#000000e6}.navbar-light .navbar-text a:focus,.navbar-light .navbar-text a:hover{color:#000000e6}.navbar-dark .navbar-brand{color:#fff}.navbar-dark .navbar-brand:focus,.navbar-dark .navbar-brand:hover{color:#fff}.navbar-dark .navbar-nav .nav-link{color:#ffffff80}.navbar-dark .navbar-nav .nav-link:focus,.navbar-dark .navbar-nav .nav-link:hover{color:#ffffffbf}.navbar-dark .navbar-nav .nav-link.disabled{color:#ffffff40}.navbar-dark .navbar-nav .active>.nav-link,.navbar-dark .navbar-nav .nav-link.active,.navbar-dark .navbar-nav .nav-link.show,.navbar-dark .navbar-nav .show>.nav-link{color:#fff}.navbar-dark .navbar-toggler{color:#ffffff80;border-color:#ffffff1a}.navbar-dark .navbar-toggler-icon{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' width='30' height='30' viewBox='0 0 30 30'%3e%3cpath stroke='rgba%28255, 255, 255, 0.5%29' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3e%3c/svg%3e")}.navbar-dark .navbar-text{color:#ffffff80}.navbar-dark .navbar-text a{color:#fff}.navbar-dark .navbar-text a:focus,.navbar-dark .navbar-text a:hover{color:#fff}.card{min-width:0;word-wrap:break-word;background-color:#fff;background-clip:border-box;border:1px solid #00000020;border-radius:.25rem;-ms-flex-direction:column;flex-direction:column;display:-ms-flexbox;display:flex;position:relative}.card>hr{margin-left:0;margin-right:0}.card>.list-group{border-top:inherit;border-bottom:inherit}.card>.list-group:first-child{border-top-width:0;border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px)}.card>.list-group:last-child{border-bottom-width:0;border-bottom-left-radius:calc(.25rem - 1px);border-bottom-right-radius:calc(.25rem - 1px)}.card>.card-header+.list-group,.card>.list-group+.card-footer{border-top:0}.card-body{min-height:1px;-ms-flex:auto;flex:auto;padding:1.25rem}.card-title{margin-bottom:.75rem}.card-subtitle{margin-top:-.375rem;margin-bottom:0}.card-text:last-child{margin-bottom:0}.card-link:hover{text-decoration:none}.card-link+.card-link{margin-left:1.25rem}.card-header{background-color:#00000008;border-bottom:1px solid #00000020;margin-bottom:0;padding:.75rem 1.25rem}.card-header:first-child{border-radius:calc(.25rem - 1px) calc(.25rem - 1px) 0 0}.card-footer{background-color:#00000008;border-top:1px solid #00000020;padding:.75rem 1.25rem}.card-footer:last-child{border-radius:0 0 calc(.25rem - 1px) calc(.25rem - 1px)}.card-header-tabs{border-bottom:0;margin-bottom:-.75rem;margin-left:-.625rem;margin-right:-.625rem}.card-header-pills{margin-left:-.625rem;margin-right:-.625rem}.card-img-overlay{border-radius:calc(.25rem - 1px);padding:1.25rem;position:absolute;inset:0}.card-img,.card-img-bottom,.card-img-top{width:100%;-ms-flex-negative:0;flex-shrink:0}.card-img,.card-img-top{border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px)}.card-img,.card-img-bottom{border-bottom-left-radius:calc(.25rem - 1px);border-bottom-right-radius:calc(.25rem - 1px)}.card-deck .card{margin-bottom:15px}@media (min-width:576px){.card-deck{-ms-flex-flow:wrap;flex-flow:wrap;margin-left:-15px;margin-right:-15px;display:-ms-flexbox;display:flex}.card-deck .card{-ms-flex:1 0;flex:1 0;margin-bottom:0;margin-left:15px;margin-right:15px}}.card-group>.card{margin-bottom:15px}@media (min-width:576px){.card-group{-ms-flex-flow:wrap;flex-flow:wrap;display:-ms-flexbox;display:flex}.card-group>.card{-ms-flex:1 0;flex:1 0;margin-bottom:0}.card-group>.card+.card{border-left:0;margin-left:0}.card-group>.card:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.card-group>.card:not(:last-child) .card-header,.card-group>.card:not(:last-child) .card-img-top{border-top-right-radius:0}.card-group>.card:not(:last-child) .card-footer,.card-group>.card:not(:last-child) .card-img-bottom{border-bottom-right-radius:0}.card-group>.card:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.card-group>.card:not(:first-child) .card-header,.card-group>.card:not(:first-child) .card-img-top{border-top-left-radius:0}.card-group>.card:not(:first-child) .card-footer,.card-group>.card:not(:first-child) .card-img-bottom{border-bottom-left-radius:0}}.card-columns .card{margin-bottom:.75rem}@media (min-width:576px){.card-columns{-webkit-column-count:3;-moz-column-count:3;column-count:3;-webkit-column-gap:1.25rem;-moz-column-gap:1.25rem;orphans:1;widows:1;column-gap:1.25rem}.card-columns .card{width:100%;display:inline-block}}.accordion{overflow-anchor:none}.accordion>.card{overflow:hidden}.accordion>.card:not(:last-of-type){border-bottom:0;border-bottom-left-radius:0;border-bottom-right-radius:0}.accordion>.card:not(:first-of-type){border-top-left-radius:0;border-top-right-radius:0}.accordion>.card>.card-header{border-radius:0;margin-bottom:-1px}.breadcrumb{background-color:#e9ecef;border-radius:.25rem;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-bottom:1rem;padding:.75rem 1rem;list-style:none;display:-ms-flexbox;display:flex}.breadcrumb-item+.breadcrumb-item{padding-left:.5rem}.breadcrumb-item+.breadcrumb-item:before{float:left;color:#6c757d;content:"/";padding-right:.5rem}.breadcrumb-item+.breadcrumb-item:hover:before{text-decoration:underline}.breadcrumb-item+.breadcrumb-item:hover:before{text-decoration:none}.breadcrumb-item.active{color:#6c757d}.pagination{border-radius:.25rem;padding-left:0;list-style:none;display:-ms-flexbox;display:flex}.page-link{color:#007bff;background-color:#fff;border:1px solid #dee2e6;margin-left:-1px;padding:.5rem .75rem;line-height:1.25;display:block;position:relative}.page-link:hover{z-index:2;color:#0056b3;background-color:#e9ecef;border-color:#dee2e6;text-decoration:none}.page-link:focus{z-index:3;outline:0;box-shadow:0 0 0 .2rem #007bff40}.page-item:first-child .page-link{border-top-left-radius:.25rem;border-bottom-left-radius:.25rem;margin-left:0}.page-item:last-child .page-link{border-top-right-radius:.25rem;border-bottom-right-radius:.25rem}.page-item.active .page-link{z-index:3;color:#fff;background-color:#007bff;border-color:#007bff}.page-item.disabled .page-link{color:#6c757d;pointer-events:none;cursor:auto;background-color:#fff;border-color:#dee2e6}.pagination-lg .page-link{padding:.75rem 1.5rem;font-size:1.25rem;line-height:1.5}.pagination-lg .page-item:first-child .page-link{border-top-left-radius:.3rem;border-bottom-left-radius:.3rem}.pagination-lg .page-item:last-child .page-link{border-top-right-radius:.3rem;border-bottom-right-radius:.3rem}.pagination-sm .page-link{padding:.25rem .5rem;font-size:.875rem;line-height:1.5}.pagination-sm .page-item:first-child .page-link{border-top-left-radius:.2rem;border-bottom-left-radius:.2rem}.pagination-sm .page-item:last-child .page-link{border-top-right-radius:.2rem;border-bottom-right-radius:.2rem}.badge{text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25rem;padding:.25em .4em;font-size:75%;font-weight:700;line-height:1;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;display:inline-block}@media (prefers-reduced-motion:reduce){.badge{transition:none}}a.badge:focus,a.badge:hover{text-decoration:none}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.badge-pill{border-radius:10rem;padding-left:.6em;padding-right:.6em}.badge-primary{color:#fff;background-color:#007bff}a.badge-primary:focus,a.badge-primary:hover{color:#fff;background-color:#0062cc}a.badge-primary.focus,a.badge-primary:focus{outline:0;box-shadow:0 0 0 .2rem #007bff80}.badge-secondary{color:#fff;background-color:#6c757d}a.badge-secondary:focus,a.badge-secondary:hover{color:#fff;background-color:#545b62}a.badge-secondary.focus,a.badge-secondary:focus{outline:0;box-shadow:0 0 0 .2rem #6c757d80}.badge-success{color:#fff;background-color:#28a745}a.badge-success:focus,a.badge-success:hover{color:#fff;background-color:#1e7e34}a.badge-success.focus,a.badge-success:focus{outline:0;box-shadow:0 0 0 .2rem #28a74580}.badge-info{color:#fff;background-color:#17a2b8}a.badge-info:focus,a.badge-info:hover{color:#fff;background-color:#117a8b}a.badge-info.focus,a.badge-info:focus{outline:0;box-shadow:0 0 0 .2rem #17a2b880}.badge-warning{color:#212529;background-color:#ffc107}a.badge-warning:focus,a.badge-warning:hover{color:#212529;background-color:#d39e00}a.badge-warning.focus,a.badge-warning:focus{outline:0;box-shadow:0 0 0 .2rem #ffc10780}.badge-danger{color:#fff;background-color:#dc3545}a.badge-danger:focus,a.badge-danger:hover{color:#fff;background-color:#bd2130}a.badge-danger.focus,a.badge-danger:focus{outline:0;box-shadow:0 0 0 .2rem #dc354580}.badge-light{color:#212529;background-color:#f8f9fa}a.badge-light:focus,a.badge-light:hover{color:#212529;background-color:#dae0e5}a.badge-light.focus,a.badge-light:focus{outline:0;box-shadow:0 0 0 .2rem #f8f9fa80}.badge-dark{color:#fff;background-color:#343a40}a.badge-dark:focus,a.badge-dark:hover{color:#fff;background-color:#1d2124}a.badge-dark.focus,a.badge-dark:focus{outline:0;box-shadow:0 0 0 .2rem #343a4080}.jumbotron{background-color:#e9ecef;border-radius:.3rem;margin-bottom:2rem;padding:2rem 1rem}@media (min-width:576px){.jumbotron{padding:4rem 2rem}}.jumbotron-fluid{border-radius:0;padding-left:0;padding-right:0}.alert{border:1px solid #0000;border-radius:.25rem;margin-bottom:1rem;padding:.75rem 1.25rem;position:relative}.alert-heading{color:inherit}.alert-link{font-weight:700}.alert-dismissible{padding-right:4rem}.alert-dismissible .close{z-index:2;color:inherit;padding:.75rem 1.25rem;position:absolute;top:0;right:0}.alert-primary{color:#004085;background-color:#cce5ff;border-color:#b8daff}.alert-primary hr{border-top-color:#9fcdff}.alert-primary .alert-link{color:#002752}.alert-secondary{color:#383d41;background-color:#e2e3e5;border-color:#d6d8db}.alert-secondary hr{border-top-color:#c8cbcf}.alert-secondary .alert-link{color:#202326}.alert-success{color:#155724;background-color:#d4edda;border-color:#c3e6cb}.alert-success hr{border-top-color:#b1dfbb}.alert-success .alert-link{color:#0b2e13}.alert-info{color:#0c5460;background-color:#d1ecf1;border-color:#bee5eb}.alert-info hr{border-top-color:#abdde5}.alert-info .alert-link{color:#062c33}.alert-warning{color:#856404;background-color:#fff3cd;border-color:#ffeeba}.alert-warning hr{border-top-color:#ffe8a1}.alert-warning .alert-link{color:#533f03}.alert-danger{color:#721c24;background-color:#f8d7da;border-color:#f5c6cb}.alert-danger hr{border-top-color:#f1b0b7}.alert-danger .alert-link{color:#491217}.alert-light{color:#818182;background-color:#fefefe;border-color:#fdfdfe}.alert-light hr{border-top-color:#ececf6}.alert-light .alert-link{color:#686868}.alert-dark{color:#1b1e21;background-color:#d6d8d9;border-color:#c6c8ca}.alert-dark hr{border-top-color:#b9bbbe}.alert-dark .alert-link{color:#040505}@-webkit-keyframes progress-bar-stripes{0%{background-position:1rem 0}to{background-position:0 0}}@keyframes progress-bar-stripes{0%{background-position:1rem 0}to{background-position:0 0}}.progress{height:1rem;background-color:#e9ecef;border-radius:.25rem;font-size:.75rem;line-height:0;display:-ms-flexbox;display:flex;overflow:hidden}.progress-bar{color:#fff;text-align:center;white-space:nowrap;background-color:#007bff;-ms-flex-direction:column;flex-direction:column;justify-content:center;transition:width .6s;display:-ms-flexbox;display:flex;overflow:hidden}@media (prefers-reduced-motion:reduce){.progress-bar{transition:none}}.progress-bar-striped{background-image:linear-gradient(45deg,#ffffff26 25%,#0000 25% 50%,#ffffff26 50% 75%,#0000 75%,#0000);background-size:1rem 1rem}.progress-bar-animated{-webkit-animation:progress-bar-stripes 1s linear infinite;animation:progress-bar-stripes 1s linear infinite}@media (prefers-reduced-motion:reduce){.progress-bar-animated{-webkit-animation:none;animation:none}}.media{align-items:flex-start;display:-ms-flexbox;display:flex}.media-body{-ms-flex:1;flex:1}.list-group{border-radius:.25rem;-ms-flex-direction:column;flex-direction:column;margin-bottom:0;padding-left:0;display:-ms-flexbox;display:flex}.list-group-item-action{width:100%;color:#495057;text-align:inherit}.list-group-item-action:focus,.list-group-item-action:hover{z-index:1;color:#495057;background-color:#f8f9fa;text-decoration:none}.list-group-item-action:active{color:#212529;background-color:#e9ecef}.list-group-item{background-color:#fff;border:1px solid #00000020;padding:.75rem 1.25rem;display:block;position:relative}.list-group-item:first-child{border-top-left-radius:inherit;border-top-right-radius:inherit}.list-group-item:last-child{border-bottom-right-radius:inherit;border-bottom-left-radius:inherit}.list-group-item.disabled,.list-group-item:disabled{color:#6c757d;pointer-events:none;background-color:#fff}.list-group-item.active{z-index:2;color:#fff;background-color:#007bff;border-color:#007bff}.list-group-item+.list-group-item{border-top-width:0}.list-group-item+.list-group-item.active{border-top-width:1px;margin-top:-1px}.list-group-horizontal{-ms-flex-direction:row;flex-direction:row}.list-group-horizontal>.list-group-item:first-child{border-top-right-radius:0;border-bottom-left-radius:.25rem}.list-group-horizontal>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal>.list-group-item.active{margin-top:0}.list-group-horizontal>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal>.list-group-item+.list-group-item.active{border-left-width:1px;margin-left:-1px}@media (min-width:576px){.list-group-horizontal-sm{-ms-flex-direction:row;flex-direction:row}.list-group-horizontal-sm>.list-group-item:first-child{border-top-right-radius:0;border-bottom-left-radius:.25rem}.list-group-horizontal-sm>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-sm>.list-group-item.active{margin-top:0}.list-group-horizontal-sm>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-sm>.list-group-item+.list-group-item.active{border-left-width:1px;margin-left:-1px}}@media (min-width:768px){.list-group-horizontal-md{-ms-flex-direction:row;flex-direction:row}.list-group-horizontal-md>.list-group-item:first-child{border-top-right-radius:0;border-bottom-left-radius:.25rem}.list-group-horizontal-md>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-md>.list-group-item.active{margin-top:0}.list-group-horizontal-md>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-md>.list-group-item+.list-group-item.active{border-left-width:1px;margin-left:-1px}}@media (min-width:992px){.list-group-horizontal-lg{-ms-flex-direction:row;flex-direction:row}.list-group-horizontal-lg>.list-group-item:first-child{border-top-right-radius:0;border-bottom-left-radius:.25rem}.list-group-horizontal-lg>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-lg>.list-group-item.active{margin-top:0}.list-group-horizontal-lg>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-lg>.list-group-item+.list-group-item.active{border-left-width:1px;margin-left:-1px}}@media (min-width:1200px){.list-group-horizontal-xl{-ms-flex-direction:row;flex-direction:row}.list-group-horizontal-xl>.list-group-item:first-child{border-top-right-radius:0;border-bottom-left-radius:.25rem}.list-group-horizontal-xl>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-xl>.list-group-item.active{margin-top:0}.list-group-horizontal-xl>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-xl>.list-group-item+.list-group-item.active{border-left-width:1px;margin-left:-1px}}.list-group-flush{border-radius:0}.list-group-flush>.list-group-item{border-width:0 0 1px}.list-group-flush>.list-group-item:last-child{border-bottom-width:0}.list-group-item-primary{color:#004085;background-color:#b8daff}.list-group-item-primary.list-group-item-action:focus,.list-group-item-primary.list-group-item-action:hover{color:#004085;background-color:#9fcdff}.list-group-item-primary.list-group-item-action.active{color:#fff;background-color:#004085;border-color:#004085}.list-group-item-secondary{color:#383d41;background-color:#d6d8db}.list-group-item-secondary.list-group-item-action:focus,.list-group-item-secondary.list-group-item-action:hover{color:#383d41;background-color:#c8cbcf}.list-group-item-secondary.list-group-item-action.active{color:#fff;background-color:#383d41;border-color:#383d41}.list-group-item-success{color:#155724;background-color:#c3e6cb}.list-group-item-success.list-group-item-action:focus,.list-group-item-success.list-group-item-action:hover{color:#155724;background-color:#b1dfbb}.list-group-item-success.list-group-item-action.active{color:#fff;background-color:#155724;border-color:#155724}.list-group-item-info{color:#0c5460;background-color:#bee5eb}.list-group-item-info.list-group-item-action:focus,.list-group-item-info.list-group-item-action:hover{color:#0c5460;background-color:#abdde5}.list-group-item-info.list-group-item-action.active{color:#fff;background-color:#0c5460;border-color:#0c5460}.list-group-item-warning{color:#856404;background-color:#ffeeba}.list-group-item-warning.list-group-item-action:focus,.list-group-item-warning.list-group-item-action:hover{color:#856404;background-color:#ffe8a1}.list-group-item-warning.list-group-item-action.active{color:#fff;background-color:#856404;border-color:#856404}.list-group-item-danger{color:#721c24;background-color:#f5c6cb}.list-group-item-danger.list-group-item-action:focus,.list-group-item-danger.list-group-item-action:hover{color:#721c24;background-color:#f1b0b7}.list-group-item-danger.list-group-item-action.active{color:#fff;background-color:#721c24;border-color:#721c24}.list-group-item-light{color:#818182;background-color:#fdfdfe}.list-group-item-light.list-group-item-action:focus,.list-group-item-light.list-group-item-action:hover{color:#818182;background-color:#ececf6}.list-group-item-light.list-group-item-action.active{color:#fff;background-color:#818182;border-color:#818182}.list-group-item-dark{color:#1b1e21;background-color:#c6c8ca}.list-group-item-dark.list-group-item-action:focus,.list-group-item-dark.list-group-item-action:hover{color:#1b1e21;background-color:#b9bbbe}.list-group-item-dark.list-group-item-action.active{color:#fff;background-color:#1b1e21;border-color:#1b1e21}.close{float:right;color:#000;text-shadow:0 1px #fff;opacity:.5;font-size:1.5rem;font-weight:700;line-height:1}.close:hover{color:#000;text-decoration:none}.close:not(:disabled):not(.disabled):focus,.close:not(:disabled):not(.disabled):hover{opacity:.75}button.close{background-color:#0000;border:0;padding:0}a.close.disabled{pointer-events:none}.toast{max-width:350px;opacity:0;background-color:#ffffffd9;background-clip:padding-box;border:1px solid #0000001a;border-radius:.25rem;-ms-flex-preferred-size:350px;flex-basis:350px;font-size:.875rem;box-shadow:0 .25rem .75rem #0000001a}.toast:not(:last-child){margin-bottom:.75rem}.toast.showing{opacity:1}.toast.show{opacity:1;display:block}.toast.hide{display:none}.toast-header{color:#6c757d;background-color:#ffffffd9;background-clip:padding-box;border-bottom:1px solid #0000000d;border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px);align-items:center;padding:.25rem .75rem;display:-ms-flexbox;display:flex}.toast-body{padding:.75rem}.modal-open{overflow:hidden}.modal-open .modal{overflow:hidden auto}.modal{z-index:1050;width:100%;height:100%;outline:0;display:none;position:fixed;top:0;left:0;overflow:hidden}.modal-dialog{width:auto;pointer-events:none;margin:.5rem;position:relative}.modal.fade .modal-dialog{transition:transform .3s ease-out,-webkit-transform .3s ease-out;-webkit-transform:translateY(-50px);transform:translateY(-50px)}@media (prefers-reduced-motion:reduce){.modal.fade .modal-dialog{transition:none}}.modal.show .modal-dialog{-webkit-transform:none;transform:none}.modal.modal-static .modal-dialog{-webkit-transform:scale(1.02);transform:scale(1.02)}.modal-dialog-scrollable{max-height:calc(100% - 1rem);display:-ms-flexbox;display:flex}.modal-dialog-scrollable .modal-content{max-height:calc(100vh - 1rem);overflow:hidden}.modal-dialog-scrollable .modal-footer,.modal-dialog-scrollable .modal-header{-ms-flex-negative:0;flex-shrink:0}.modal-dialog-scrollable .modal-body{overflow-y:auto}.modal-dialog-centered{min-height:calc(100% - 1rem);align-items:center;display:-ms-flexbox;display:flex}.modal-dialog-centered:before{height:calc(100vh - 1rem);height:-webkit-min-content;height:-moz-min-content;height:min-content;content:"";display:block}.modal-dialog-centered.modal-dialog-scrollable{height:100%;-ms-flex-direction:column;flex-direction:column;justify-content:center}.modal-dialog-centered.modal-dialog-scrollable .modal-content{max-height:none}.modal-dialog-centered.modal-dialog-scrollable:before{content:none}.modal-content{width:100%;pointer-events:auto;background-color:#fff;background-clip:padding-box;border:1px solid #0003;border-radius:.3rem;outline:0;-ms-flex-direction:column;flex-direction:column;display:-ms-flexbox;display:flex;position:relative}.modal-backdrop{z-index:1040;width:100vw;height:100vh;background-color:#000;position:fixed;top:0;left:0}.modal-backdrop.fade{opacity:0}.modal-backdrop.show{opacity:.5}.modal-header{border-bottom:1px solid #dee2e6;border-top-left-radius:calc(.3rem - 1px);border-top-right-radius:calc(.3rem - 1px);justify-content:space-between;align-items:flex-start;padding:1rem;display:-ms-flexbox;display:flex}.modal-header .close{margin:-1rem -1rem -1rem auto;padding:1rem}.modal-title{margin-bottom:0;line-height:1.5}.modal-body{-ms-flex:auto;flex:auto;padding:1rem;position:relative}.modal-footer{border-top:1px solid #dee2e6;border-bottom-left-radius:calc(.3rem - 1px);border-bottom-right-radius:calc(.3rem - 1px);-ms-flex-wrap:wrap;flex-wrap:wrap;justify-content:flex-end;align-items:center;padding:.75rem;display:-ms-flexbox;display:flex}.modal-footer>*{margin:.25rem}.modal-scrollbar-measure{width:50px;height:50px;position:absolute;top:-9999px;overflow:scroll}@media (min-width:576px){.modal-dialog{max-width:500px;margin:1.75rem auto}.modal-dialog-scrollable{max-height:calc(100% - 3.5rem)}.modal-dialog-scrollable .modal-content{max-height:calc(100vh - 3.5rem)}.modal-dialog-centered{min-height:calc(100% - 3.5rem)}.modal-dialog-centered:before{height:calc(100vh - 3.5rem);height:-webkit-min-content;height:-moz-min-content;height:min-content}.modal-sm{max-width:300px}}@media (min-width:992px){.modal-lg,.modal-xl{max-width:800px}}@media (min-width:1200px){.modal-xl{max-width:1140px}}.tooltip{z-index:1070;text-align:left;text-align:start;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;white-space:normal;word-spacing:normal;line-break:auto;word-wrap:break-word;opacity:0;margin:0;font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,Liberation Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji;font-size:.875rem;font-style:normal;font-weight:400;line-height:1.5;text-decoration:none;display:block;position:absolute}.tooltip.show{opacity:.9}.tooltip .arrow{width:.8rem;height:.4rem;display:block;position:absolute}.tooltip .arrow:before{content:"";border-style:solid;border-color:#0000;position:absolute}.bs-tooltip-auto[x-placement^=top],.bs-tooltip-top{padding:.4rem 0}.bs-tooltip-auto[x-placement^=top] .arrow,.bs-tooltip-top .arrow{bottom:0}.bs-tooltip-auto[x-placement^=top] .arrow:before,.bs-tooltip-top .arrow:before{border-width:.4rem .4rem 0;border-top-color:#000;top:0}.bs-tooltip-auto[x-placement^=right],.bs-tooltip-right{padding:0 .4rem}.bs-tooltip-auto[x-placement^=right] .arrow,.bs-tooltip-right .arrow{width:.4rem;height:.8rem;left:0}.bs-tooltip-auto[x-placement^=right] .arrow:before,.bs-tooltip-right .arrow:before{border-width:.4rem .4rem .4rem 0;border-right-color:#000;right:0}.bs-tooltip-auto[x-placement^=bottom],.bs-tooltip-bottom{padding:.4rem 0}.bs-tooltip-auto[x-placement^=bottom] .arrow,.bs-tooltip-bottom .arrow{top:0}.bs-tooltip-auto[x-placement^=bottom] .arrow:before,.bs-tooltip-bottom .arrow:before{border-width:0 .4rem .4rem;border-bottom-color:#000;bottom:0}.bs-tooltip-auto[x-placement^=left],.bs-tooltip-left{padding:0 .4rem}.bs-tooltip-auto[x-placement^=left] .arrow,.bs-tooltip-left .arrow{width:.4rem;height:.8rem;right:0}.bs-tooltip-auto[x-placement^=left] .arrow:before,.bs-tooltip-left .arrow:before{border-width:.4rem 0 .4rem .4rem;border-left-color:#000;left:0}.tooltip-inner{max-width:200px;color:#fff;text-align:center;background-color:#000;border-radius:.25rem;padding:.25rem .5rem}.popover{z-index:1060;max-width:276px;text-align:left;text-align:start;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;white-space:normal;word-spacing:normal;line-break:auto;word-wrap:break-word;background-color:#fff;background-clip:padding-box;border:1px solid #0003;border-radius:.3rem;font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,Liberation Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji;font-size:.875rem;font-style:normal;font-weight:400;line-height:1.5;text-decoration:none;display:block;position:absolute;top:0;left:0}.popover .arrow{width:1rem;height:.5rem;margin:0 .3rem;display:block;position:absolute}.popover .arrow:after,.popover .arrow:before{content:"";border-style:solid;border-color:#0000;display:block;position:absolute}.bs-popover-auto[x-placement^=top],.bs-popover-top{margin-bottom:.5rem}.bs-popover-auto[x-placement^=top]>.arrow,.bs-popover-top>.arrow{bottom:calc(-.5rem - 1px)}.bs-popover-auto[x-placement^=top]>.arrow:before,.bs-popover-top>.arrow:before{border-width:.5rem .5rem 0;border-top-color:#00000040;bottom:0}.bs-popover-auto[x-placement^=top]>.arrow:after,.bs-popover-top>.arrow:after{border-width:.5rem .5rem 0;border-top-color:#fff;bottom:1px}.bs-popover-auto[x-placement^=right],.bs-popover-right{margin-left:.5rem}.bs-popover-auto[x-placement^=right]>.arrow,.bs-popover-right>.arrow{width:.5rem;height:1rem;margin:.3rem 0;left:calc(-.5rem - 1px)}.bs-popover-auto[x-placement^=right]>.arrow:before,.bs-popover-right>.arrow:before{border-width:.5rem .5rem .5rem 0;border-right-color:#00000040;left:0}.bs-popover-auto[x-placement^=right]>.arrow:after,.bs-popover-right>.arrow:after{border-width:.5rem .5rem .5rem 0;border-right-color:#fff;left:1px}.bs-popover-auto[x-placement^=bottom],.bs-popover-bottom{margin-top:.5rem}.bs-popover-auto[x-placement^=bottom]>.arrow,.bs-popover-bottom>.arrow{top:calc(-.5rem - 1px)}.bs-popover-auto[x-placement^=bottom]>.arrow:before,.bs-popover-bottom>.arrow:before{border-width:0 .5rem .5rem;border-bottom-color:#00000040;top:0}.bs-popover-auto[x-placement^=bottom]>.arrow:after,.bs-popover-bottom>.arrow:after{border-width:0 .5rem .5rem;border-bottom-color:#fff;top:1px}.bs-popover-auto[x-placement^=bottom] .popover-header:before,.bs-popover-bottom .popover-header:before{width:1rem;content:"";border-bottom:1px solid #f7f7f7;margin-left:-.5rem;display:block;position:absolute;top:0;left:50%}.bs-popover-auto[x-placement^=left],.bs-popover-left{margin-right:.5rem}.bs-popover-auto[x-placement^=left]>.arrow,.bs-popover-left>.arrow{width:.5rem;height:1rem;margin:.3rem 0;right:calc(-.5rem - 1px)}.bs-popover-auto[x-placement^=left]>.arrow:before,.bs-popover-left>.arrow:before{border-width:.5rem 0 .5rem .5rem;border-left-color:#00000040;right:0}.bs-popover-auto[x-placement^=left]>.arrow:after,.bs-popover-left>.arrow:after{border-width:.5rem 0 .5rem .5rem;border-left-color:#fff;right:1px}.popover-header{background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-top-left-radius:calc(.3rem - 1px);border-top-right-radius:calc(.3rem - 1px);margin-bottom:0;padding:.5rem .75rem;font-size:1rem}.popover-header:empty{display:none}.popover-body{color:#212529;padding:.5rem .75rem}.carousel{position:relative}.carousel.pointer-event{-ms-touch-action:pan-y;touch-action:pan-y}.carousel-inner{width:100%;position:relative;overflow:hidden}.carousel-inner:after{clear:both;content:"";display:block}.carousel-item{float:left;width:100%;-webkit-backface-visibility:hidden;backface-visibility:hidden;margin-right:-100%;transition:transform .6s ease-in-out,-webkit-transform .6s ease-in-out;display:none;position:relative}@media (prefers-reduced-motion:reduce){.carousel-item{transition:none}}.carousel-item-next,.carousel-item-prev,.carousel-item.active{display:block}.active.carousel-item-right,.carousel-item-next:not(.carousel-item-left){-webkit-transform:translate(100%);transform:translate(100%)}.active.carousel-item-left,.carousel-item-prev:not(.carousel-item-right){-webkit-transform:translate(-100%);transform:translate(-100%)}.carousel-fade .carousel-item{opacity:0;transition-property:opacity;-webkit-transform:none;transform:none}.carousel-fade .carousel-item-next.carousel-item-left,.carousel-fade .carousel-item-prev.carousel-item-right,.carousel-fade .carousel-item.active{z-index:1;opacity:1}.carousel-fade .active.carousel-item-left,.carousel-fade .active.carousel-item-right{z-index:0;opacity:0;transition:opacity 0s .6s}@media (prefers-reduced-motion:reduce){.carousel-fade .active.carousel-item-left,.carousel-fade .active.carousel-item-right{transition:none}}.carousel-control-next,.carousel-control-prev{z-index:1;width:15%;color:#fff;text-align:center;opacity:.5;background:0 0;border:0;justify-content:center;align-items:center;padding:0;transition:opacity .15s;display:-ms-flexbox;display:flex;position:absolute;top:0;bottom:0}@media (prefers-reduced-motion:reduce){.carousel-control-next,.carousel-control-prev{transition:none}}.carousel-control-next:focus,.carousel-control-next:hover,.carousel-control-prev:focus,.carousel-control-prev:hover{color:#fff;opacity:.9;outline:0;text-decoration:none}.carousel-control-prev{left:0}.carousel-control-next{right:0}.carousel-control-next-icon,.carousel-control-prev-icon{width:20px;height:20px;background:50%/100% 100% no-repeat;display:inline-block}.carousel-control-prev-icon{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' width='8' height='8' viewBox='0 0 8 8'%3e%3cpath d='M5.25 0l-4 4 4 4 1.5-1.5L4.25 4l2.5-2.5L5.25 0z'/%3e%3c/svg%3e")}.carousel-control-next-icon{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' width='8' height='8' viewBox='0 0 8 8'%3e%3cpath d='M2.75 0l-1.5 1.5L3.75 4l-2.5 2.5L2.75 8l4-4-4-4z'/%3e%3c/svg%3e")}.carousel-indicators{z-index:15;justify-content:center;margin-left:15%;margin-right:15%;padding-left:0;list-style:none;display:-ms-flexbox;display:flex;position:absolute;bottom:0;left:0;right:0}.carousel-indicators li{box-sizing:content-box;width:30px;height:3px;text-indent:-999px;cursor:pointer;opacity:.5;background-color:#fff;background-clip:padding-box;border-top:10px solid #0000;border-bottom:10px solid #0000;-ms-flex:0 auto;flex:0 auto;margin-left:3px;margin-right:3px;transition:opacity .6s}@media (prefers-reduced-motion:reduce){.carousel-indicators li{transition:none}}.carousel-indicators .active{opacity:1}.carousel-caption{z-index:10;color:#fff;text-align:center;padding-top:20px;padding-bottom:20px;position:absolute;bottom:20px;left:15%;right:15%}@-webkit-keyframes spinner-border{to{-webkit-transform:rotate(360deg);transform:rotate(360deg)}}@keyframes spinner-border{to{-webkit-transform:rotate(360deg);transform:rotate(360deg)}}.spinner-border{width:2rem;height:2rem;vertical-align:-.125em;border:.25em solid;border-right-color:#0000;border-radius:50%;-webkit-animation:spinner-border .75s linear infinite;animation:spinner-border .75s linear infinite;display:inline-block}.spinner-border-sm{width:1rem;height:1rem;border-width:.2em}@-webkit-keyframes spinner-grow{0%{-webkit-transform:scale(0);transform:scale(0)}50%{opacity:1;-webkit-transform:none;transform:none}}@keyframes spinner-grow{0%{-webkit-transform:scale(0);transform:scale(0)}50%{opacity:1;-webkit-transform:none;transform:none}}.spinner-grow{width:2rem;height:2rem;vertical-align:-.125em;opacity:0;background-color:currentColor;border-radius:50%;-webkit-animation:spinner-grow .75s linear infinite;animation:spinner-grow .75s linear infinite;display:inline-block}.spinner-grow-sm{width:1rem;height:1rem}@media (prefers-reduced-motion:reduce){.spinner-border,.spinner-grow{-webkit-animation-duration:1.5s;animation-duration:1.5s}}.align-baseline{vertical-align:baseline!important}.align-top{vertical-align:top!important}.align-middle{vertical-align:middle!important}.align-bottom{vertical-align:bottom!important}.align-text-bottom{vertical-align:text-bottom!important}.align-text-top{vertical-align:text-top!important}.bg-primary{background-color:#007bff!important}a.bg-primary:focus,a.bg-primary:hover,button.bg-primary:focus,button.bg-primary:hover{background-color:#0062cc!important}.bg-secondary{background-color:#6c757d!important}a.bg-secondary:focus,a.bg-secondary:hover,button.bg-secondary:focus,button.bg-secondary:hover{background-color:#545b62!important}.bg-success{background-color:#28a745!important}a.bg-success:focus,a.bg-success:hover,button.bg-success:focus,button.bg-success:hover{background-color:#1e7e34!important}.bg-info{background-color:#17a2b8!important}a.bg-info:focus,a.bg-info:hover,button.bg-info:focus,button.bg-info:hover{background-color:#117a8b!important}.bg-warning{background-color:#ffc107!important}a.bg-warning:focus,a.bg-warning:hover,button.bg-warning:focus,button.bg-warning:hover{background-color:#d39e00!important}.bg-danger{background-color:#dc3545!important}a.bg-danger:focus,a.bg-danger:hover,button.bg-danger:focus,button.bg-danger:hover{background-color:#bd2130!important}.bg-light{background-color:#f8f9fa!important}a.bg-light:focus,a.bg-light:hover,button.bg-light:focus,button.bg-light:hover{background-color:#dae0e5!important}.bg-dark{background-color:#343a40!important}a.bg-dark:focus,a.bg-dark:hover,button.bg-dark:focus,button.bg-dark:hover{background-color:#1d2124!important}.bg-white{background-color:#fff!important}.bg-transparent{background-color:#0000!important}.border{border:1px solid #dee2e6!important}.border-top{border-top:1px solid #dee2e6!important}.border-right{border-right:1px solid #dee2e6!important}.border-bottom{border-bottom:1px solid #dee2e6!important}.border-left{border-left:1px solid #dee2e6!important}.border-0{border:0!important}.border-top-0{border-top:0!important}.border-right-0{border-right:0!important}.border-bottom-0{border-bottom:0!important}.border-left-0{border-left:0!important}.border-primary{border-color:#007bff!important}.border-secondary{border-color:#6c757d!important}.border-success{border-color:#28a745!important}.border-info{border-color:#17a2b8!important}.border-warning{border-color:#ffc107!important}.border-danger{border-color:#dc3545!important}.border-light{border-color:#f8f9fa!important}.border-dark{border-color:#343a40!important}.border-white{border-color:#fff!important}.rounded-sm{border-radius:.2rem!important}.rounded{border-radius:.25rem!important}.rounded-top{border-top-left-radius:.25rem!important;border-top-right-radius:.25rem!important}.rounded-right{border-top-right-radius:.25rem!important;border-bottom-right-radius:.25rem!important}.rounded-bottom{border-bottom-left-radius:.25rem!important;border-bottom-right-radius:.25rem!important}.rounded-left{border-top-left-radius:.25rem!important;border-bottom-left-radius:.25rem!important}.rounded-lg{border-radius:.3rem!important}.rounded-circle{border-radius:50%!important}.rounded-pill{border-radius:50rem!important}.rounded-0{border-radius:0!important}.clearfix:after{clear:both;content:"";display:block}.d-none{display:none!important}.d-inline{display:inline!important}.d-inline-block{display:inline-block!important}.d-block{display:block!important}.d-table{display:table!important}.d-table-row{display:table-row!important}.d-table-cell{display:table-cell!important}.d-flex{display:-ms-flexbox!important;display:flex!important}.d-inline-flex{display:-ms-inline-flexbox!important;display:inline-flex!important}@media (min-width:576px){.d-sm-none{display:none!important}.d-sm-inline{display:inline!important}.d-sm-inline-block{display:inline-block!important}.d-sm-block{display:block!important}.d-sm-table{display:table!important}.d-sm-table-row{display:table-row!important}.d-sm-table-cell{display:table-cell!important}.d-sm-flex{display:-ms-flexbox!important;display:flex!important}.d-sm-inline-flex{display:-ms-inline-flexbox!important;display:inline-flex!important}}@media (min-width:768px){.d-md-none{display:none!important}.d-md-inline{display:inline!important}.d-md-inline-block{display:inline-block!important}.d-md-block{display:block!important}.d-md-table{display:table!important}.d-md-table-row{display:table-row!important}.d-md-table-cell{display:table-cell!important}.d-md-flex{display:-ms-flexbox!important;display:flex!important}.d-md-inline-flex{display:-ms-inline-flexbox!important;display:inline-flex!important}}@media (min-width:992px){.d-lg-none{display:none!important}.d-lg-inline{display:inline!important}.d-lg-inline-block{display:inline-block!important}.d-lg-block{display:block!important}.d-lg-table{display:table!important}.d-lg-table-row{display:table-row!important}.d-lg-table-cell{display:table-cell!important}.d-lg-flex{display:-ms-flexbox!important;display:flex!important}.d-lg-inline-flex{display:-ms-inline-flexbox!important;display:inline-flex!important}}@media (min-width:1200px){.d-xl-none{display:none!important}.d-xl-inline{display:inline!important}.d-xl-inline-block{display:inline-block!important}.d-xl-block{display:block!important}.d-xl-table{display:table!important}.d-xl-table-row{display:table-row!important}.d-xl-table-cell{display:table-cell!important}.d-xl-flex{display:-ms-flexbox!important;display:flex!important}.d-xl-inline-flex{display:-ms-inline-flexbox!important;display:inline-flex!important}}@media print{.d-print-none{display:none!important}.d-print-inline{display:inline!important}.d-print-inline-block{display:inline-block!important}.d-print-block{display:block!important}.d-print-table{display:table!important}.d-print-table-row{display:table-row!important}.d-print-table-cell{display:table-cell!important}.d-print-flex{display:-ms-flexbox!important;display:flex!important}.d-print-inline-flex{display:-ms-inline-flexbox!important;display:inline-flex!important}}.embed-responsive{width:100%;padding:0;display:block;position:relative;overflow:hidden}.embed-responsive:before{content:"";display:block}.embed-responsive .embed-responsive-item,.embed-responsive embed,.embed-responsive iframe,.embed-responsive object,.embed-responsive video{width:100%;height:100%;border:0;position:absolute;top:0;bottom:0;left:0}.embed-responsive-21by9:before{padding-top:42.8571%}.embed-responsive-16by9:before{padding-top:56.25%}.embed-responsive-4by3:before{padding-top:75%}.embed-responsive-1by1:before{padding-top:100%}.flex-row{-ms-flex-direction:row!important;flex-direction:row!important}.flex-column{-ms-flex-direction:column!important;flex-direction:column!important}.flex-row-reverse{-ms-flex-direction:row-reverse!important;flex-direction:row-reverse!important}.flex-column-reverse{-ms-flex-direction:column-reverse!important;flex-direction:column-reverse!important}.flex-wrap{-ms-flex-wrap:wrap!important;flex-wrap:wrap!important}.flex-nowrap{-ms-flex-wrap:nowrap!important;flex-wrap:nowrap!important}.flex-wrap-reverse{-ms-flex-wrap:wrap-reverse!important;flex-wrap:wrap-reverse!important}.flex-fill{-ms-flex:auto!important;flex:auto!important}.flex-grow-0{-ms-flex-positive:0!important;flex-grow:0!important}.flex-grow-1{-ms-flex-positive:1!important;flex-grow:1!important}.flex-shrink-0{-ms-flex-negative:0!important;flex-shrink:0!important}.flex-shrink-1{-ms-flex-negative:1!important;flex-shrink:1!important}.justify-content-start{justify-content:flex-start!important}.justify-content-end{justify-content:flex-end!important}.justify-content-center{justify-content:center!important}.justify-content-between{justify-content:space-between!important}.justify-content-around{justify-content:space-around!important}.align-items-start{align-items:flex-start!important}.align-items-end{align-items:flex-end!important}.align-items-center{align-items:center!important}.align-items-baseline{align-items:baseline!important}.align-items-stretch{align-items:stretch!important}.align-content-start{align-content:flex-start!important}.align-content-end{align-content:flex-end!important}.align-content-center{align-content:center!important}.align-content-between{align-content:space-between!important}.align-content-around{align-content:space-around!important}.align-content-stretch{align-content:stretch!important}.align-self-auto{align-self:auto!important}.align-self-start{align-self:flex-start!important}.align-self-end{align-self:flex-end!important}.align-self-center{align-self:center!important}.align-self-baseline{align-self:baseline!important}.align-self-stretch{align-self:stretch!important}@media (min-width:576px){.flex-sm-row{-ms-flex-direction:row!important;flex-direction:row!important}.flex-sm-column{-ms-flex-direction:column!important;flex-direction:column!important}.flex-sm-row-reverse{-ms-flex-direction:row-reverse!important;flex-direction:row-reverse!important}.flex-sm-column-reverse{-ms-flex-direction:column-reverse!important;flex-direction:column-reverse!important}.flex-sm-wrap{-ms-flex-wrap:wrap!important;flex-wrap:wrap!important}.flex-sm-nowrap{-ms-flex-wrap:nowrap!important;flex-wrap:nowrap!important}.flex-sm-wrap-reverse{-ms-flex-wrap:wrap-reverse!important;flex-wrap:wrap-reverse!important}.flex-sm-fill{-ms-flex:auto!important;flex:auto!important}.flex-sm-grow-0{-ms-flex-positive:0!important;flex-grow:0!important}.flex-sm-grow-1{-ms-flex-positive:1!important;flex-grow:1!important}.flex-sm-shrink-0{-ms-flex-negative:0!important;flex-shrink:0!important}.flex-sm-shrink-1{-ms-flex-negative:1!important;flex-shrink:1!important}.justify-content-sm-start{justify-content:flex-start!important}.justify-content-sm-end{justify-content:flex-end!important}.justify-content-sm-center{justify-content:center!important}.justify-content-sm-between{justify-content:space-between!important}.justify-content-sm-around{justify-content:space-around!important}.align-items-sm-start{align-items:flex-start!important}.align-items-sm-end{align-items:flex-end!important}.align-items-sm-center{align-items:center!important}.align-items-sm-baseline{align-items:baseline!important}.align-items-sm-stretch{align-items:stretch!important}.align-content-sm-start{align-content:flex-start!important}.align-content-sm-end{align-content:flex-end!important}.align-content-sm-center{align-content:center!important}.align-content-sm-between{align-content:space-between!important}.align-content-sm-around{align-content:space-around!important}.align-content-sm-stretch{align-content:stretch!important}.align-self-sm-auto{align-self:auto!important}.align-self-sm-start{align-self:flex-start!important}.align-self-sm-end{align-self:flex-end!important}.align-self-sm-center{align-self:center!important}.align-self-sm-baseline{align-self:baseline!important}.align-self-sm-stretch{align-self:stretch!important}}@media (min-width:768px){.flex-md-row{-ms-flex-direction:row!important;flex-direction:row!important}.flex-md-column{-ms-flex-direction:column!important;flex-direction:column!important}.flex-md-row-reverse{-ms-flex-direction:row-reverse!important;flex-direction:row-reverse!important}.flex-md-column-reverse{-ms-flex-direction:column-reverse!important;flex-direction:column-reverse!important}.flex-md-wrap{-ms-flex-wrap:wrap!important;flex-wrap:wrap!important}.flex-md-nowrap{-ms-flex-wrap:nowrap!important;flex-wrap:nowrap!important}.flex-md-wrap-reverse{-ms-flex-wrap:wrap-reverse!important;flex-wrap:wrap-reverse!important}.flex-md-fill{-ms-flex:auto!important;flex:auto!important}.flex-md-grow-0{-ms-flex-positive:0!important;flex-grow:0!important}.flex-md-grow-1{-ms-flex-positive:1!important;flex-grow:1!important}.flex-md-shrink-0{-ms-flex-negative:0!important;flex-shrink:0!important}.flex-md-shrink-1{-ms-flex-negative:1!important;flex-shrink:1!important}.justify-content-md-start{justify-content:flex-start!important}.justify-content-md-end{justify-content:flex-end!important}.justify-content-md-center{justify-content:center!important}.justify-content-md-between{justify-content:space-between!important}.justify-content-md-around{justify-content:space-around!important}.align-items-md-start{align-items:flex-start!important}.align-items-md-end{align-items:flex-end!important}.align-items-md-center{align-items:center!important}.align-items-md-baseline{align-items:baseline!important}.align-items-md-stretch{align-items:stretch!important}.align-content-md-start{align-content:flex-start!important}.align-content-md-end{align-content:flex-end!important}.align-content-md-center{align-content:center!important}.align-content-md-between{align-content:space-between!important}.align-content-md-around{align-content:space-around!important}.align-content-md-stretch{align-content:stretch!important}.align-self-md-auto{align-self:auto!important}.align-self-md-start{align-self:flex-start!important}.align-self-md-end{align-self:flex-end!important}.align-self-md-center{align-self:center!important}.align-self-md-baseline{align-self:baseline!important}.align-self-md-stretch{align-self:stretch!important}}@media (min-width:992px){.flex-lg-row{-ms-flex-direction:row!important;flex-direction:row!important}.flex-lg-column{-ms-flex-direction:column!important;flex-direction:column!important}.flex-lg-row-reverse{-ms-flex-direction:row-reverse!important;flex-direction:row-reverse!important}.flex-lg-column-reverse{-ms-flex-direction:column-reverse!important;flex-direction:column-reverse!important}.flex-lg-wrap{-ms-flex-wrap:wrap!important;flex-wrap:wrap!important}.flex-lg-nowrap{-ms-flex-wrap:nowrap!important;flex-wrap:nowrap!important}.flex-lg-wrap-reverse{-ms-flex-wrap:wrap-reverse!important;flex-wrap:wrap-reverse!important}.flex-lg-fill{-ms-flex:auto!important;flex:auto!important}.flex-lg-grow-0{-ms-flex-positive:0!important;flex-grow:0!important}.flex-lg-grow-1{-ms-flex-positive:1!important;flex-grow:1!important}.flex-lg-shrink-0{-ms-flex-negative:0!important;flex-shrink:0!important}.flex-lg-shrink-1{-ms-flex-negative:1!important;flex-shrink:1!important}.justify-content-lg-start{justify-content:flex-start!important}.justify-content-lg-end{justify-content:flex-end!important}.justify-content-lg-center{justify-content:center!important}.justify-content-lg-between{justify-content:space-between!important}.justify-content-lg-around{justify-content:space-around!important}.align-items-lg-start{align-items:flex-start!important}.align-items-lg-end{align-items:flex-end!important}.align-items-lg-center{align-items:center!important}.align-items-lg-baseline{align-items:baseline!important}.align-items-lg-stretch{align-items:stretch!important}.align-content-lg-start{align-content:flex-start!important}.align-content-lg-end{align-content:flex-end!important}.align-content-lg-center{align-content:center!important}.align-content-lg-between{align-content:space-between!important}.align-content-lg-around{align-content:space-around!important}.align-content-lg-stretch{align-content:stretch!important}.align-self-lg-auto{align-self:auto!important}.align-self-lg-start{align-self:flex-start!important}.align-self-lg-end{align-self:flex-end!important}.align-self-lg-center{align-self:center!important}.align-self-lg-baseline{align-self:baseline!important}.align-self-lg-stretch{align-self:stretch!important}}@media (min-width:1200px){.flex-xl-row{-ms-flex-direction:row!important;flex-direction:row!important}.flex-xl-column{-ms-flex-direction:column!important;flex-direction:column!important}.flex-xl-row-reverse{-ms-flex-direction:row-reverse!important;flex-direction:row-reverse!important}.flex-xl-column-reverse{-ms-flex-direction:column-reverse!important;flex-direction:column-reverse!important}.flex-xl-wrap{-ms-flex-wrap:wrap!important;flex-wrap:wrap!important}.flex-xl-nowrap{-ms-flex-wrap:nowrap!important;flex-wrap:nowrap!important}.flex-xl-wrap-reverse{-ms-flex-wrap:wrap-reverse!important;flex-wrap:wrap-reverse!important}.flex-xl-fill{-ms-flex:auto!important;flex:auto!important}.flex-xl-grow-0{-ms-flex-positive:0!important;flex-grow:0!important}.flex-xl-grow-1{-ms-flex-positive:1!important;flex-grow:1!important}.flex-xl-shrink-0{-ms-flex-negative:0!important;flex-shrink:0!important}.flex-xl-shrink-1{-ms-flex-negative:1!important;flex-shrink:1!important}.justify-content-xl-start{justify-content:flex-start!important}.justify-content-xl-end{justify-content:flex-end!important}.justify-content-xl-center{justify-content:center!important}.justify-content-xl-between{justify-content:space-between!important}.justify-content-xl-around{justify-content:space-around!important}.align-items-xl-start{align-items:flex-start!important}.align-items-xl-end{align-items:flex-end!important}.align-items-xl-center{align-items:center!important}.align-items-xl-baseline{align-items:baseline!important}.align-items-xl-stretch{align-items:stretch!important}.align-content-xl-start{align-content:flex-start!important}.align-content-xl-end{align-content:flex-end!important}.align-content-xl-center{align-content:center!important}.align-content-xl-between{align-content:space-between!important}.align-content-xl-around{align-content:space-around!important}.align-content-xl-stretch{align-content:stretch!important}.align-self-xl-auto{align-self:auto!important}.align-self-xl-start{align-self:flex-start!important}.align-self-xl-end{align-self:flex-end!important}.align-self-xl-center{align-self:center!important}.align-self-xl-baseline{align-self:baseline!important}.align-self-xl-stretch{align-self:stretch!important}}.float-left{float:left!important}.float-right{float:right!important}.float-none{float:none!important}@media (min-width:576px){.float-sm-left{float:left!important}.float-sm-right{float:right!important}.float-sm-none{float:none!important}}@media (min-width:768px){.float-md-left{float:left!important}.float-md-right{float:right!important}.float-md-none{float:none!important}}@media (min-width:992px){.float-lg-left{float:left!important}.float-lg-right{float:right!important}.float-lg-none{float:none!important}}@media (min-width:1200px){.float-xl-left{float:left!important}.float-xl-right{float:right!important}.float-xl-none{float:none!important}}.user-select-all{-webkit-user-select:all!important;-moz-user-select:all!important;user-select:all!important}.user-select-auto{-webkit-user-select:auto!important;-moz-user-select:auto!important;-ms-user-select:auto!important;user-select:auto!important}.user-select-none{-webkit-user-select:none!important;-moz-user-select:none!important;-ms-user-select:none!important;user-select:none!important}.overflow-auto{overflow:auto!important}.overflow-hidden{overflow:hidden!important}.position-static{position:static!important}.position-relative{position:relative!important}.position-absolute{position:absolute!important}.position-fixed{position:fixed!important}.position-sticky{position:-webkit-sticky!important;position:sticky!important}.fixed-top{z-index:1030;position:fixed;top:0;left:0;right:0}.fixed-bottom{z-index:1030;position:fixed;bottom:0;left:0;right:0}@supports ((position:-webkit-sticky) or (position:sticky)){.sticky-top{z-index:1020;position:-webkit-sticky;position:sticky;top:0}}.sr-only{width:1px;height:1px;clip:rect(0,0,0,0);white-space:nowrap;border:0;margin:-1px;padding:0;position:absolute;overflow:hidden}.sr-only-focusable:active,.sr-only-focusable:focus{width:auto;height:auto;clip:auto;white-space:normal;position:static;overflow:visible}.shadow-sm{box-shadow:0 .125rem .25rem #00000013!important}.shadow{box-shadow:0 .5rem 1rem #00000026!important}.shadow-lg{box-shadow:0 1rem 3rem #0000002d!important}.shadow-none{box-shadow:none!important}.w-25{width:25%!important}.w-50{width:50%!important}.w-75{width:75%!important}.w-100{width:100%!important}.w-auto{width:auto!important}.h-25{height:25%!important}.h-50{height:50%!important}.h-75{height:75%!important}.h-100{height:100%!important}.h-auto{height:auto!important}.mw-100{max-width:100%!important}.mh-100{max-height:100%!important}.min-vw-100{min-width:100vw!important}.min-vh-100{min-height:100vh!important}.vw-100{width:100vw!important}.vh-100{height:100vh!important}.m-0{margin:0!important}.mt-0,.my-0{margin-top:0!important}.mr-0,.mx-0{margin-right:0!important}.mb-0,.my-0{margin-bottom:0!important}.ml-0,.mx-0{margin-left:0!important}.m-1{margin:.25rem!important}.mt-1,.my-1{margin-top:.25rem!important}.mr-1,.mx-1{margin-right:.25rem!important}.mb-1,.my-1{margin-bottom:.25rem!important}.ml-1,.mx-1{margin-left:.25rem!important}.m-2{margin:.5rem!important}.mt-2,.my-2{margin-top:.5rem!important}.mr-2,.mx-2{margin-right:.5rem!important}.mb-2,.my-2{margin-bottom:.5rem!important}.ml-2,.mx-2{margin-left:.5rem!important}.m-3{margin:1rem!important}.mt-3,.my-3{margin-top:1rem!important}.mr-3,.mx-3{margin-right:1rem!important}.mb-3,.my-3{margin-bottom:1rem!important}.ml-3,.mx-3{margin-left:1rem!important}.m-4{margin:1.5rem!important}.mt-4,.my-4{margin-top:1.5rem!important}.mr-4,.mx-4{margin-right:1.5rem!important}.mb-4,.my-4{margin-bottom:1.5rem!important}.ml-4,.mx-4{margin-left:1.5rem!important}.m-5{margin:3rem!important}.mt-5,.my-5{margin-top:3rem!important}.mr-5,.mx-5{margin-right:3rem!important}.mb-5,.my-5{margin-bottom:3rem!important}.ml-5,.mx-5{margin-left:3rem!important}.p-0{padding:0!important}.pt-0,.py-0{padding-top:0!important}.pr-0,.px-0{padding-right:0!important}.pb-0,.py-0{padding-bottom:0!important}.pl-0,.px-0{padding-left:0!important}.p-1{padding:.25rem!important}.pt-1,.py-1{padding-top:.25rem!important}.pr-1,.px-1{padding-right:.25rem!important}.pb-1,.py-1{padding-bottom:.25rem!important}.pl-1,.px-1{padding-left:.25rem!important}.p-2{padding:.5rem!important}.pt-2,.py-2{padding-top:.5rem!important}.pr-2,.px-2{padding-right:.5rem!important}.pb-2,.py-2{padding-bottom:.5rem!important}.pl-2,.px-2{padding-left:.5rem!important}.p-3{padding:1rem!important}.pt-3,.py-3{padding-top:1rem!important}.pr-3,.px-3{padding-right:1rem!important}.pb-3,.py-3{padding-bottom:1rem!important}.pl-3,.px-3{padding-left:1rem!important}.p-4{padding:1.5rem!important}.pt-4,.py-4{padding-top:1.5rem!important}.pr-4,.px-4{padding-right:1.5rem!important}.pb-4,.py-4{padding-bottom:1.5rem!important}.pl-4,.px-4{padding-left:1.5rem!important}.p-5{padding:3rem!important}.pt-5,.py-5{padding-top:3rem!important}.pr-5,.px-5{padding-right:3rem!important}.pb-5,.py-5{padding-bottom:3rem!important}.pl-5,.px-5{padding-left:3rem!important}.m-n1{margin:-.25rem!important}.mt-n1,.my-n1{margin-top:-.25rem!important}.mr-n1,.mx-n1{margin-right:-.25rem!important}.mb-n1,.my-n1{margin-bottom:-.25rem!important}.ml-n1,.mx-n1{margin-left:-.25rem!important}.m-n2{margin:-.5rem!important}.mt-n2,.my-n2{margin-top:-.5rem!important}.mr-n2,.mx-n2{margin-right:-.5rem!important}.mb-n2,.my-n2{margin-bottom:-.5rem!important}.ml-n2,.mx-n2{margin-left:-.5rem!important}.m-n3{margin:-1rem!important}.mt-n3,.my-n3{margin-top:-1rem!important}.mr-n3,.mx-n3{margin-right:-1rem!important}.mb-n3,.my-n3{margin-bottom:-1rem!important}.ml-n3,.mx-n3{margin-left:-1rem!important}.m-n4{margin:-1.5rem!important}.mt-n4,.my-n4{margin-top:-1.5rem!important}.mr-n4,.mx-n4{margin-right:-1.5rem!important}.mb-n4,.my-n4{margin-bottom:-1.5rem!important}.ml-n4,.mx-n4{margin-left:-1.5rem!important}.m-n5{margin:-3rem!important}.mt-n5,.my-n5{margin-top:-3rem!important}.mr-n5,.mx-n5{margin-right:-3rem!important}.mb-n5,.my-n5{margin-bottom:-3rem!important}.ml-n5,.mx-n5{margin-left:-3rem!important}.m-auto{margin:auto!important}.mt-auto,.my-auto{margin-top:auto!important}.mr-auto,.mx-auto{margin-right:auto!important}.mb-auto,.my-auto{margin-bottom:auto!important}.ml-auto,.mx-auto{margin-left:auto!important}@media (min-width:576px){.m-sm-0{margin:0!important}.mt-sm-0,.my-sm-0{margin-top:0!important}.mr-sm-0,.mx-sm-0{margin-right:0!important}.mb-sm-0,.my-sm-0{margin-bottom:0!important}.ml-sm-0,.mx-sm-0{margin-left:0!important}.m-sm-1{margin:.25rem!important}.mt-sm-1,.my-sm-1{margin-top:.25rem!important}.mr-sm-1,.mx-sm-1{margin-right:.25rem!important}.mb-sm-1,.my-sm-1{margin-bottom:.25rem!important}.ml-sm-1,.mx-sm-1{margin-left:.25rem!important}.m-sm-2{margin:.5rem!important}.mt-sm-2,.my-sm-2{margin-top:.5rem!important}.mr-sm-2,.mx-sm-2{margin-right:.5rem!important}.mb-sm-2,.my-sm-2{margin-bottom:.5rem!important}.ml-sm-2,.mx-sm-2{margin-left:.5rem!important}.m-sm-3{margin:1rem!important}.mt-sm-3,.my-sm-3{margin-top:1rem!important}.mr-sm-3,.mx-sm-3{margin-right:1rem!important}.mb-sm-3,.my-sm-3{margin-bottom:1rem!important}.ml-sm-3,.mx-sm-3{margin-left:1rem!important}.m-sm-4{margin:1.5rem!important}.mt-sm-4,.my-sm-4{margin-top:1.5rem!important}.mr-sm-4,.mx-sm-4{margin-right:1.5rem!important}.mb-sm-4,.my-sm-4{margin-bottom:1.5rem!important}.ml-sm-4,.mx-sm-4{margin-left:1.5rem!important}.m-sm-5{margin:3rem!important}.mt-sm-5,.my-sm-5{margin-top:3rem!important}.mr-sm-5,.mx-sm-5{margin-right:3rem!important}.mb-sm-5,.my-sm-5{margin-bottom:3rem!important}.ml-sm-5,.mx-sm-5{margin-left:3rem!important}.p-sm-0{padding:0!important}.pt-sm-0,.py-sm-0{padding-top:0!important}.pr-sm-0,.px-sm-0{padding-right:0!important}.pb-sm-0,.py-sm-0{padding-bottom:0!important}.pl-sm-0,.px-sm-0{padding-left:0!important}.p-sm-1{padding:.25rem!important}.pt-sm-1,.py-sm-1{padding-top:.25rem!important}.pr-sm-1,.px-sm-1{padding-right:.25rem!important}.pb-sm-1,.py-sm-1{padding-bottom:.25rem!important}.pl-sm-1,.px-sm-1{padding-left:.25rem!important}.p-sm-2{padding:.5rem!important}.pt-sm-2,.py-sm-2{padding-top:.5rem!important}.pr-sm-2,.px-sm-2{padding-right:.5rem!important}.pb-sm-2,.py-sm-2{padding-bottom:.5rem!important}.pl-sm-2,.px-sm-2{padding-left:.5rem!important}.p-sm-3{padding:1rem!important}.pt-sm-3,.py-sm-3{padding-top:1rem!important}.pr-sm-3,.px-sm-3{padding-right:1rem!important}.pb-sm-3,.py-sm-3{padding-bottom:1rem!important}.pl-sm-3,.px-sm-3{padding-left:1rem!important}.p-sm-4{padding:1.5rem!important}.pt-sm-4,.py-sm-4{padding-top:1.5rem!important}.pr-sm-4,.px-sm-4{padding-right:1.5rem!important}.pb-sm-4,.py-sm-4{padding-bottom:1.5rem!important}.pl-sm-4,.px-sm-4{padding-left:1.5rem!important}.p-sm-5{padding:3rem!important}.pt-sm-5,.py-sm-5{padding-top:3rem!important}.pr-sm-5,.px-sm-5{padding-right:3rem!important}.pb-sm-5,.py-sm-5{padding-bottom:3rem!important}.pl-sm-5,.px-sm-5{padding-left:3rem!important}.m-sm-n1{margin:-.25rem!important}.mt-sm-n1,.my-sm-n1{margin-top:-.25rem!important}.mr-sm-n1,.mx-sm-n1{margin-right:-.25rem!important}.mb-sm-n1,.my-sm-n1{margin-bottom:-.25rem!important}.ml-sm-n1,.mx-sm-n1{margin-left:-.25rem!important}.m-sm-n2{margin:-.5rem!important}.mt-sm-n2,.my-sm-n2{margin-top:-.5rem!important}.mr-sm-n2,.mx-sm-n2{margin-right:-.5rem!important}.mb-sm-n2,.my-sm-n2{margin-bottom:-.5rem!important}.ml-sm-n2,.mx-sm-n2{margin-left:-.5rem!important}.m-sm-n3{margin:-1rem!important}.mt-sm-n3,.my-sm-n3{margin-top:-1rem!important}.mr-sm-n3,.mx-sm-n3{margin-right:-1rem!important}.mb-sm-n3,.my-sm-n3{margin-bottom:-1rem!important}.ml-sm-n3,.mx-sm-n3{margin-left:-1rem!important}.m-sm-n4{margin:-1.5rem!important}.mt-sm-n4,.my-sm-n4{margin-top:-1.5rem!important}.mr-sm-n4,.mx-sm-n4{margin-right:-1.5rem!important}.mb-sm-n4,.my-sm-n4{margin-bottom:-1.5rem!important}.ml-sm-n4,.mx-sm-n4{margin-left:-1.5rem!important}.m-sm-n5{margin:-3rem!important}.mt-sm-n5,.my-sm-n5{margin-top:-3rem!important}.mr-sm-n5,.mx-sm-n5{margin-right:-3rem!important}.mb-sm-n5,.my-sm-n5{margin-bottom:-3rem!important}.ml-sm-n5,.mx-sm-n5{margin-left:-3rem!important}.m-sm-auto{margin:auto!important}.mt-sm-auto,.my-sm-auto{margin-top:auto!important}.mr-sm-auto,.mx-sm-auto{margin-right:auto!important}.mb-sm-auto,.my-sm-auto{margin-bottom:auto!important}.ml-sm-auto,.mx-sm-auto{margin-left:auto!important}}@media (min-width:768px){.m-md-0{margin:0!important}.mt-md-0,.my-md-0{margin-top:0!important}.mr-md-0,.mx-md-0{margin-right:0!important}.mb-md-0,.my-md-0{margin-bottom:0!important}.ml-md-0,.mx-md-0{margin-left:0!important}.m-md-1{margin:.25rem!important}.mt-md-1,.my-md-1{margin-top:.25rem!important}.mr-md-1,.mx-md-1{margin-right:.25rem!important}.mb-md-1,.my-md-1{margin-bottom:.25rem!important}.ml-md-1,.mx-md-1{margin-left:.25rem!important}.m-md-2{margin:.5rem!important}.mt-md-2,.my-md-2{margin-top:.5rem!important}.mr-md-2,.mx-md-2{margin-right:.5rem!important}.mb-md-2,.my-md-2{margin-bottom:.5rem!important}.ml-md-2,.mx-md-2{margin-left:.5rem!important}.m-md-3{margin:1rem!important}.mt-md-3,.my-md-3{margin-top:1rem!important}.mr-md-3,.mx-md-3{margin-right:1rem!important}.mb-md-3,.my-md-3{margin-bottom:1rem!important}.ml-md-3,.mx-md-3{margin-left:1rem!important}.m-md-4{margin:1.5rem!important}.mt-md-4,.my-md-4{margin-top:1.5rem!important}.mr-md-4,.mx-md-4{margin-right:1.5rem!important}.mb-md-4,.my-md-4{margin-bottom:1.5rem!important}.ml-md-4,.mx-md-4{margin-left:1.5rem!important}.m-md-5{margin:3rem!important}.mt-md-5,.my-md-5{margin-top:3rem!important}.mr-md-5,.mx-md-5{margin-right:3rem!important}.mb-md-5,.my-md-5{margin-bottom:3rem!important}.ml-md-5,.mx-md-5{margin-left:3rem!important}.p-md-0{padding:0!important}.pt-md-0,.py-md-0{padding-top:0!important}.pr-md-0,.px-md-0{padding-right:0!important}.pb-md-0,.py-md-0{padding-bottom:0!important}.pl-md-0,.px-md-0{padding-left:0!important}.p-md-1{padding:.25rem!important}.pt-md-1,.py-md-1{padding-top:.25rem!important}.pr-md-1,.px-md-1{padding-right:.25rem!important}.pb-md-1,.py-md-1{padding-bottom:.25rem!important}.pl-md-1,.px-md-1{padding-left:.25rem!important}.p-md-2{padding:.5rem!important}.pt-md-2,.py-md-2{padding-top:.5rem!important}.pr-md-2,.px-md-2{padding-right:.5rem!important}.pb-md-2,.py-md-2{padding-bottom:.5rem!important}.pl-md-2,.px-md-2{padding-left:.5rem!important}.p-md-3{padding:1rem!important}.pt-md-3,.py-md-3{padding-top:1rem!important}.pr-md-3,.px-md-3{padding-right:1rem!important}.pb-md-3,.py-md-3{padding-bottom:1rem!important}.pl-md-3,.px-md-3{padding-left:1rem!important}.p-md-4{padding:1.5rem!important}.pt-md-4,.py-md-4{padding-top:1.5rem!important}.pr-md-4,.px-md-4{padding-right:1.5rem!important}.pb-md-4,.py-md-4{padding-bottom:1.5rem!important}.pl-md-4,.px-md-4{padding-left:1.5rem!important}.p-md-5{padding:3rem!important}.pt-md-5,.py-md-5{padding-top:3rem!important}.pr-md-5,.px-md-5{padding-right:3rem!important}.pb-md-5,.py-md-5{padding-bottom:3rem!important}.pl-md-5,.px-md-5{padding-left:3rem!important}.m-md-n1{margin:-.25rem!important}.mt-md-n1,.my-md-n1{margin-top:-.25rem!important}.mr-md-n1,.mx-md-n1{margin-right:-.25rem!important}.mb-md-n1,.my-md-n1{margin-bottom:-.25rem!important}.ml-md-n1,.mx-md-n1{margin-left:-.25rem!important}.m-md-n2{margin:-.5rem!important}.mt-md-n2,.my-md-n2{margin-top:-.5rem!important}.mr-md-n2,.mx-md-n2{margin-right:-.5rem!important}.mb-md-n2,.my-md-n2{margin-bottom:-.5rem!important}.ml-md-n2,.mx-md-n2{margin-left:-.5rem!important}.m-md-n3{margin:-1rem!important}.mt-md-n3,.my-md-n3{margin-top:-1rem!important}.mr-md-n3,.mx-md-n3{margin-right:-1rem!important}.mb-md-n3,.my-md-n3{margin-bottom:-1rem!important}.ml-md-n3,.mx-md-n3{margin-left:-1rem!important}.m-md-n4{margin:-1.5rem!important}.mt-md-n4,.my-md-n4{margin-top:-1.5rem!important}.mr-md-n4,.mx-md-n4{margin-right:-1.5rem!important}.mb-md-n4,.my-md-n4{margin-bottom:-1.5rem!important}.ml-md-n4,.mx-md-n4{margin-left:-1.5rem!important}.m-md-n5{margin:-3rem!important}.mt-md-n5,.my-md-n5{margin-top:-3rem!important}.mr-md-n5,.mx-md-n5{margin-right:-3rem!important}.mb-md-n5,.my-md-n5{margin-bottom:-3rem!important}.ml-md-n5,.mx-md-n5{margin-left:-3rem!important}.m-md-auto{margin:auto!important}.mt-md-auto,.my-md-auto{margin-top:auto!important}.mr-md-auto,.mx-md-auto{margin-right:auto!important}.mb-md-auto,.my-md-auto{margin-bottom:auto!important}.ml-md-auto,.mx-md-auto{margin-left:auto!important}}@media (min-width:992px){.m-lg-0{margin:0!important}.mt-lg-0,.my-lg-0{margin-top:0!important}.mr-lg-0,.mx-lg-0{margin-right:0!important}.mb-lg-0,.my-lg-0{margin-bottom:0!important}.ml-lg-0,.mx-lg-0{margin-left:0!important}.m-lg-1{margin:.25rem!important}.mt-lg-1,.my-lg-1{margin-top:.25rem!important}.mr-lg-1,.mx-lg-1{margin-right:.25rem!important}.mb-lg-1,.my-lg-1{margin-bottom:.25rem!important}.ml-lg-1,.mx-lg-1{margin-left:.25rem!important}.m-lg-2{margin:.5rem!important}.mt-lg-2,.my-lg-2{margin-top:.5rem!important}.mr-lg-2,.mx-lg-2{margin-right:.5rem!important}.mb-lg-2,.my-lg-2{margin-bottom:.5rem!important}.ml-lg-2,.mx-lg-2{margin-left:.5rem!important}.m-lg-3{margin:1rem!important}.mt-lg-3,.my-lg-3{margin-top:1rem!important}.mr-lg-3,.mx-lg-3{margin-right:1rem!important}.mb-lg-3,.my-lg-3{margin-bottom:1rem!important}.ml-lg-3,.mx-lg-3{margin-left:1rem!important}.m-lg-4{margin:1.5rem!important}.mt-lg-4,.my-lg-4{margin-top:1.5rem!important}.mr-lg-4,.mx-lg-4{margin-right:1.5rem!important}.mb-lg-4,.my-lg-4{margin-bottom:1.5rem!important}.ml-lg-4,.mx-lg-4{margin-left:1.5rem!important}.m-lg-5{margin:3rem!important}.mt-lg-5,.my-lg-5{margin-top:3rem!important}.mr-lg-5,.mx-lg-5{margin-right:3rem!important}.mb-lg-5,.my-lg-5{margin-bottom:3rem!important}.ml-lg-5,.mx-lg-5{margin-left:3rem!important}.p-lg-0{padding:0!important}.pt-lg-0,.py-lg-0{padding-top:0!important}.pr-lg-0,.px-lg-0{padding-right:0!important}.pb-lg-0,.py-lg-0{padding-bottom:0!important}.pl-lg-0,.px-lg-0{padding-left:0!important}.p-lg-1{padding:.25rem!important}.pt-lg-1,.py-lg-1{padding-top:.25rem!important}.pr-lg-1,.px-lg-1{padding-right:.25rem!important}.pb-lg-1,.py-lg-1{padding-bottom:.25rem!important}.pl-lg-1,.px-lg-1{padding-left:.25rem!important}.p-lg-2{padding:.5rem!important}.pt-lg-2,.py-lg-2{padding-top:.5rem!important}.pr-lg-2,.px-lg-2{padding-right:.5rem!important}.pb-lg-2,.py-lg-2{padding-bottom:.5rem!important}.pl-lg-2,.px-lg-2{padding-left:.5rem!important}.p-lg-3{padding:1rem!important}.pt-lg-3,.py-lg-3{padding-top:1rem!important}.pr-lg-3,.px-lg-3{padding-right:1rem!important}.pb-lg-3,.py-lg-3{padding-bottom:1rem!important}.pl-lg-3,.px-lg-3{padding-left:1rem!important}.p-lg-4{padding:1.5rem!important}.pt-lg-4,.py-lg-4{padding-top:1.5rem!important}.pr-lg-4,.px-lg-4{padding-right:1.5rem!important}.pb-lg-4,.py-lg-4{padding-bottom:1.5rem!important}.pl-lg-4,.px-lg-4{padding-left:1.5rem!important}.p-lg-5{padding:3rem!important}.pt-lg-5,.py-lg-5{padding-top:3rem!important}.pr-lg-5,.px-lg-5{padding-right:3rem!important}.pb-lg-5,.py-lg-5{padding-bottom:3rem!important}.pl-lg-5,.px-lg-5{padding-left:3rem!important}.m-lg-n1{margin:-.25rem!important}.mt-lg-n1,.my-lg-n1{margin-top:-.25rem!important}.mr-lg-n1,.mx-lg-n1{margin-right:-.25rem!important}.mb-lg-n1,.my-lg-n1{margin-bottom:-.25rem!important}.ml-lg-n1,.mx-lg-n1{margin-left:-.25rem!important}.m-lg-n2{margin:-.5rem!important}.mt-lg-n2,.my-lg-n2{margin-top:-.5rem!important}.mr-lg-n2,.mx-lg-n2{margin-right:-.5rem!important}.mb-lg-n2,.my-lg-n2{margin-bottom:-.5rem!important}.ml-lg-n2,.mx-lg-n2{margin-left:-.5rem!important}.m-lg-n3{margin:-1rem!important}.mt-lg-n3,.my-lg-n3{margin-top:-1rem!important}.mr-lg-n3,.mx-lg-n3{margin-right:-1rem!important}.mb-lg-n3,.my-lg-n3{margin-bottom:-1rem!important}.ml-lg-n3,.mx-lg-n3{margin-left:-1rem!important}.m-lg-n4{margin:-1.5rem!important}.mt-lg-n4,.my-lg-n4{margin-top:-1.5rem!important}.mr-lg-n4,.mx-lg-n4{margin-right:-1.5rem!important}.mb-lg-n4,.my-lg-n4{margin-bottom:-1.5rem!important}.ml-lg-n4,.mx-lg-n4{margin-left:-1.5rem!important}.m-lg-n5{margin:-3rem!important}.mt-lg-n5,.my-lg-n5{margin-top:-3rem!important}.mr-lg-n5,.mx-lg-n5{margin-right:-3rem!important}.mb-lg-n5,.my-lg-n5{margin-bottom:-3rem!important}.ml-lg-n5,.mx-lg-n5{margin-left:-3rem!important}.m-lg-auto{margin:auto!important}.mt-lg-auto,.my-lg-auto{margin-top:auto!important}.mr-lg-auto,.mx-lg-auto{margin-right:auto!important}.mb-lg-auto,.my-lg-auto{margin-bottom:auto!important}.ml-lg-auto,.mx-lg-auto{margin-left:auto!important}}@media (min-width:1200px){.m-xl-0{margin:0!important}.mt-xl-0,.my-xl-0{margin-top:0!important}.mr-xl-0,.mx-xl-0{margin-right:0!important}.mb-xl-0,.my-xl-0{margin-bottom:0!important}.ml-xl-0,.mx-xl-0{margin-left:0!important}.m-xl-1{margin:.25rem!important}.mt-xl-1,.my-xl-1{margin-top:.25rem!important}.mr-xl-1,.mx-xl-1{margin-right:.25rem!important}.mb-xl-1,.my-xl-1{margin-bottom:.25rem!important}.ml-xl-1,.mx-xl-1{margin-left:.25rem!important}.m-xl-2{margin:.5rem!important}.mt-xl-2,.my-xl-2{margin-top:.5rem!important}.mr-xl-2,.mx-xl-2{margin-right:.5rem!important}.mb-xl-2,.my-xl-2{margin-bottom:.5rem!important}.ml-xl-2,.mx-xl-2{margin-left:.5rem!important}.m-xl-3{margin:1rem!important}.mt-xl-3,.my-xl-3{margin-top:1rem!important}.mr-xl-3,.mx-xl-3{margin-right:1rem!important}.mb-xl-3,.my-xl-3{margin-bottom:1rem!important}.ml-xl-3,.mx-xl-3{margin-left:1rem!important}.m-xl-4{margin:1.5rem!important}.mt-xl-4,.my-xl-4{margin-top:1.5rem!important}.mr-xl-4,.mx-xl-4{margin-right:1.5rem!important}.mb-xl-4,.my-xl-4{margin-bottom:1.5rem!important}.ml-xl-4,.mx-xl-4{margin-left:1.5rem!important}.m-xl-5{margin:3rem!important}.mt-xl-5,.my-xl-5{margin-top:3rem!important}.mr-xl-5,.mx-xl-5{margin-right:3rem!important}.mb-xl-5,.my-xl-5{margin-bottom:3rem!important}.ml-xl-5,.mx-xl-5{margin-left:3rem!important}.p-xl-0{padding:0!important}.pt-xl-0,.py-xl-0{padding-top:0!important}.pr-xl-0,.px-xl-0{padding-right:0!important}.pb-xl-0,.py-xl-0{padding-bottom:0!important}.pl-xl-0,.px-xl-0{padding-left:0!important}.p-xl-1{padding:.25rem!important}.pt-xl-1,.py-xl-1{padding-top:.25rem!important}.pr-xl-1,.px-xl-1{padding-right:.25rem!important}.pb-xl-1,.py-xl-1{padding-bottom:.25rem!important}.pl-xl-1,.px-xl-1{padding-left:.25rem!important}.p-xl-2{padding:.5rem!important}.pt-xl-2,.py-xl-2{padding-top:.5rem!important}.pr-xl-2,.px-xl-2{padding-right:.5rem!important}.pb-xl-2,.py-xl-2{padding-bottom:.5rem!important}.pl-xl-2,.px-xl-2{padding-left:.5rem!important}.p-xl-3{padding:1rem!important}.pt-xl-3,.py-xl-3{padding-top:1rem!important}.pr-xl-3,.px-xl-3{padding-right:1rem!important}.pb-xl-3,.py-xl-3{padding-bottom:1rem!important}.pl-xl-3,.px-xl-3{padding-left:1rem!important}.p-xl-4{padding:1.5rem!important}.pt-xl-4,.py-xl-4{padding-top:1.5rem!important}.pr-xl-4,.px-xl-4{padding-right:1.5rem!important}.pb-xl-4,.py-xl-4{padding-bottom:1.5rem!important}.pl-xl-4,.px-xl-4{padding-left:1.5rem!important}.p-xl-5{padding:3rem!important}.pt-xl-5,.py-xl-5{padding-top:3rem!important}.pr-xl-5,.px-xl-5{padding-right:3rem!important}.pb-xl-5,.py-xl-5{padding-bottom:3rem!important}.pl-xl-5,.px-xl-5{padding-left:3rem!important}.m-xl-n1{margin:-.25rem!important}.mt-xl-n1,.my-xl-n1{margin-top:-.25rem!important}.mr-xl-n1,.mx-xl-n1{margin-right:-.25rem!important}.mb-xl-n1,.my-xl-n1{margin-bottom:-.25rem!important}.ml-xl-n1,.mx-xl-n1{margin-left:-.25rem!important}.m-xl-n2{margin:-.5rem!important}.mt-xl-n2,.my-xl-n2{margin-top:-.5rem!important}.mr-xl-n2,.mx-xl-n2{margin-right:-.5rem!important}.mb-xl-n2,.my-xl-n2{margin-bottom:-.5rem!important}.ml-xl-n2,.mx-xl-n2{margin-left:-.5rem!important}.m-xl-n3{margin:-1rem!important}.mt-xl-n3,.my-xl-n3{margin-top:-1rem!important}.mr-xl-n3,.mx-xl-n3{margin-right:-1rem!important}.mb-xl-n3,.my-xl-n3{margin-bottom:-1rem!important}.ml-xl-n3,.mx-xl-n3{margin-left:-1rem!important}.m-xl-n4{margin:-1.5rem!important}.mt-xl-n4,.my-xl-n4{margin-top:-1.5rem!important}.mr-xl-n4,.mx-xl-n4{margin-right:-1.5rem!important}.mb-xl-n4,.my-xl-n4{margin-bottom:-1.5rem!important}.ml-xl-n4,.mx-xl-n4{margin-left:-1.5rem!important}.m-xl-n5{margin:-3rem!important}.mt-xl-n5,.my-xl-n5{margin-top:-3rem!important}.mr-xl-n5,.mx-xl-n5{margin-right:-3rem!important}.mb-xl-n5,.my-xl-n5{margin-bottom:-3rem!important}.ml-xl-n5,.mx-xl-n5{margin-left:-3rem!important}.m-xl-auto{margin:auto!important}.mt-xl-auto,.my-xl-auto{margin-top:auto!important}.mr-xl-auto,.mx-xl-auto{margin-right:auto!important}.mb-xl-auto,.my-xl-auto{margin-bottom:auto!important}.ml-xl-auto,.mx-xl-auto{margin-left:auto!important}}.stretched-link:after{z-index:1;pointer-events:auto;content:"";background-color:#0000;position:absolute;inset:0}.text-monospace{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace!important}.text-justify{text-align:justify!important}.text-wrap{white-space:normal!important}.text-nowrap{white-space:nowrap!important}.text-truncate{text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.text-left{text-align:left!important}.text-right{text-align:right!important}.text-center{text-align:center!important}@media (min-width:576px){.text-sm-left{text-align:left!important}.text-sm-right{text-align:right!important}.text-sm-center{text-align:center!important}}@media (min-width:768px){.text-md-left{text-align:left!important}.text-md-right{text-align:right!important}.text-md-center{text-align:center!important}}@media (min-width:992px){.text-lg-left{text-align:left!important}.text-lg-right{text-align:right!important}.text-lg-center{text-align:center!important}}@media (min-width:1200px){.text-xl-left{text-align:left!important}.text-xl-right{text-align:right!important}.text-xl-center{text-align:center!important}}.text-lowercase{text-transform:lowercase!important}.text-uppercase{text-transform:uppercase!important}.text-capitalize{text-transform:capitalize!important}.font-weight-light{font-weight:300!important}.font-weight-lighter{font-weight:lighter!important}.font-weight-normal{font-weight:400!important}.font-weight-bold{font-weight:700!important}.font-weight-bolder{font-weight:bolder!important}.font-italic{font-style:italic!important}.text-white{color:#fff!important}.text-primary{color:#007bff!important}a.text-primary:focus,a.text-primary:hover{color:#0056b3!important}.text-secondary{color:#6c757d!important}a.text-secondary:focus,a.text-secondary:hover{color:#494f54!important}.text-success{color:#28a745!important}a.text-success:focus,a.text-success:hover{color:#19692c!important}.text-info{color:#17a2b8!important}a.text-info:focus,a.text-info:hover{color:#0f6674!important}.text-warning{color:#ffc107!important}a.text-warning:focus,a.text-warning:hover{color:#ba8b00!important}.text-danger{color:#dc3545!important}a.text-danger:focus,a.text-danger:hover{color:#a71d2a!important}.text-light{color:#f8f9fa!important}a.text-light:focus,a.text-light:hover{color:#cbd3da!important}.text-dark{color:#343a40!important}a.text-dark:focus,a.text-dark:hover{color:#121416!important}.text-body{color:#212529!important}.text-muted{color:#6c757d!important}.text-black-50{color:#00000080!important}.text-white-50{color:#ffffff80!important}.text-hide{font:0/0 a;color:#0000;text-shadow:none;background-color:#0000;border:0}.text-decoration-none{text-decoration:none!important}.text-break{word-break:break-word!important;word-wrap:break-word!important}.text-reset{color:inherit!important}.visible{visibility:visible!important}.invisible{visibility:hidden!important}@media print{*,:after,:before{text-shadow:none!important;box-shadow:none!important}a:not(.btn){text-decoration:underline}abbr[title]:after{content:" (" attr(title)")"}pre{white-space:pre-wrap!important}blockquote,pre{page-break-inside:avoid;border:1px solid #adb5bd}img,tr{page-break-inside:avoid}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}@page{size:a3}body,.container{min-width:992px!important}.navbar{display:none}.badge{border:1px solid #000}.table{border-collapse:collapse!important}.table td,.table th{background-color:#fff!important}.table-bordered td,.table-bordered th{border:1px solid #dee2e6!important}.table-dark{color:inherit}.table-dark tbody+tbody,.table-dark td,.table-dark th,.table-dark thead th{border-color:#dee2e6}.table .thead-dark th{color:inherit;border-color:#dee2e6}}@-webkit-keyframes bs-notify-fadeOut{0%{opacity:.9}to{opacity:0}}@-o-keyframes bs-notify-fadeOut{0%{opacity:.9}to{opacity:0}}@keyframes bs-notify-fadeOut{0%{opacity:.9}to{opacity:0}}.bootstrap-select>select.bs-select-hidden,select.bs-select-hidden,select.selectpicker{display:none!important}.bootstrap-select{width:220px�;vertical-align:middle}.bootstrap-select>.dropdown-toggle{width:100%;text-align:right;white-space:nowrap;justify-content:space-between;align-items:center;display:-webkit-inline-box;display:-webkit-inline-flex;display:-ms-inline-flexbox;display:inline-flex;position:relative}.bootstrap-select>.dropdown-toggle:after{margin-top:-1px}.bootstrap-select>.dropdown-toggle.bs-placeholder,.bootstrap-select>.dropdown-toggle.bs-placeholder:active,.bootstrap-select>.dropdown-toggle.bs-placeholder:focus,.bootstrap-select>.dropdown-toggle.bs-placeholder:hover{color:#999}.bootstrap-select>.dropdown-toggle.bs-placeholder.btn-danger,.bootstrap-select>.dropdown-toggle.bs-placeholder.btn-danger:active,.bootstrap-select>.dropdown-toggle.bs-placeholder.btn-danger:focus,.bootstrap-select>.dropdown-toggle.bs-placeholder.btn-danger:hover,.bootstrap-select>.dropdown-toggle.bs-placeholder.btn-dark,.bootstrap-select>.dropdown-toggle.bs-placeholder.btn-dark:active,.bootstrap-select>.dropdown-toggle.bs-placeholder.btn-dark:focus,.bootstrap-select>.dropdown-toggle.bs-placeholder.btn-dark:hover,.bootstrap-select>.dropdown-toggle.bs-placeholder.btn-info,.bootstrap-select>.dropdown-toggle.bs-placeholder.btn-info:active,.bootstrap-select>.dropdown-toggle.bs-placeholder.btn-info:focus,.bootstrap-select>.dropdown-toggle.bs-placeholder.btn-info:hover,.bootstrap-select>.dropdown-toggle.bs-placeholder.btn-primary,.bootstrap-select>.dropdown-toggle.bs-placeholder.btn-primary:active,.bootstrap-select>.dropdown-toggle.bs-placeholder.btn-primary:focus,.bootstrap-select>.dropdown-toggle.bs-placeholder.btn-primary:hover,.bootstrap-select>.dropdown-toggle.bs-placeholder.btn-secondary,.bootstrap-select>.dropdown-toggle.bs-placeholder.btn-secondary:active,.bootstrap-select>.dropdown-toggle.bs-placeholder.btn-secondary:focus,.bootstrap-select>.dropdown-toggle.bs-placeholder.btn-secondary:hover,.bootstrap-select>.dropdown-toggle.bs-placeholder.btn-success,.bootstrap-select>.dropdown-toggle.bs-placeholder.btn-success:active,.bootstrap-select>.dropdown-toggle.bs-placeholder.btn-success:focus,.bootstrap-select>.dropdown-toggle.bs-placeholder.btn-success:hover{color:#ffffff80}.bootstrap-select>select{border:none;bottom:0;left:50%;width:.5px!important;height:100%!important;opacity:0!important;z-index:0!important;padding:0!important;display:block!important;position:absolute!important}.bootstrap-select>select.mobile-device{top:0;left:0;width:100%!important;z-index:2!important;display:block!important}.bootstrap-select.is-invalid .dropdown-toggle,.error .bootstrap-select .dropdown-toggle,.has-error .bootstrap-select .dropdown-toggle,.was-validated .bootstrap-select select:invalid+.dropdown-toggle{border-color:#b94a48}.bootstrap-select.is-valid .dropdown-toggle,.was-validated .bootstrap-select select:valid+.dropdown-toggle{border-color:#28a745}.bootstrap-select.fit-width{width:auto!important}.bootstrap-select:not([class*=col-]):not([class*=form-control]):not(.input-group-btn){width:220px}.bootstrap-select .dropdown-toggle:focus,.bootstrap-select>select.mobile-device:focus+.dropdown-toggle{outline-offset:-2px;outline:thin dotted #333!important;outline:5px auto -webkit-focus-ring-color!important}.bootstrap-select.form-control{height:auto;border:none;margin-bottom:0;padding:0}:not(.input-group)>.bootstrap-select.form-control:not([class*=col-]){width:100%}.bootstrap-select.form-control.input-group-btn{float:none;z-index:auto}.form-inline .bootstrap-select,.form-inline .bootstrap-select.form-control:not([class*=col-]){width:auto}.bootstrap-select:not(.input-group-btn),.bootstrap-select[class*=col-]{float:none;margin-left:0;display:inline-block}.bootstrap-select.dropdown-menu-right,.bootstrap-select[class*=col-].dropdown-menu-right,.row .bootstrap-select[class*=col-].dropdown-menu-right{float:right}.form-group .bootstrap-select,.form-horizontal .bootstrap-select,.form-inline .bootstrap-select{margin-bottom:0}.form-group-lg .bootstrap-select.form-control,.form-group-sm .bootstrap-select.form-control{padding:0}.form-group-lg .bootstrap-select.form-control .dropdown-toggle,.form-group-sm .bootstrap-select.form-control .dropdown-toggle{height:100%;font-size:inherit;line-height:inherit;border-radius:inherit}.bootstrap-select.form-control-lg .dropdown-toggle,.bootstrap-select.form-control-sm .dropdown-toggle{font-size:inherit;line-height:inherit;border-radius:inherit}.bootstrap-select.form-control-sm .dropdown-toggle{padding:.25rem .5rem}.bootstrap-select.form-control-lg .dropdown-toggle{padding:.5rem 1rem}.form-inline .bootstrap-select .form-control{width:100%}.bootstrap-select.disabled,.bootstrap-select>.disabled{cursor:not-allowed}.bootstrap-select.disabled:focus,.bootstrap-select>.disabled:focus{outline:0!important}.bootstrap-select.bs-container{position:absolute;top:0;left:0;height:0!important;padding:0!important}.bootstrap-select.bs-container .dropdown-menu{z-index:1060}.bootstrap-select .dropdown-toggle .filter-option{float:left;height:100%;width:100%;text-align:left;-webkit-box-flex:0;-webkit-flex:0 auto;-ms-flex:0 auto;flex:0 auto;position:static;top:0;left:0;overflow:hidden}.bs3.bootstrap-select .dropdown-toggle .filter-option{padding-right:inherit}.input-group .bs3-has-addon.bootstrap-select .dropdown-toggle .filter-option{padding-top:inherit;padding-bottom:inherit;padding-left:inherit;float:none;position:absolute}.input-group .bs3-has-addon.bootstrap-select .dropdown-toggle .filter-option .filter-option-inner{padding-right:inherit}.bootstrap-select .dropdown-toggle .filter-option-inner-inner{overflow:hidden}.bootstrap-select .dropdown-toggle .filter-expand{float:left;overflow:hidden;width:0!important;opacity:0!important}.bootstrap-select .dropdown-toggle .caret{vertical-align:middle;margin-top:-2px;position:absolute;top:50%;right:12px}.input-group .bootstrap-select.form-control .dropdown-toggle{border-radius:inherit}.bootstrap-select[class*=col-] .dropdown-toggle{width:100%}.bootstrap-select .dropdown-menu{min-width:100%;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.bootstrap-select .dropdown-menu>.inner:focus{outline:0!important}.bootstrap-select .dropdown-menu.inner{float:none;-webkit-box-shadow:none;box-shadow:none;border:0;border-radius:0;margin:0;padding:0;position:static}.bootstrap-select .dropdown-menu li{position:relative}.bootstrap-select .dropdown-menu li.active small{color:#ffffff80!important}.bootstrap-select .dropdown-menu li.disabled a{cursor:not-allowed}.bootstrap-select .dropdown-menu li a{cursor:pointer;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.bootstrap-select .dropdown-menu li a.opt{padding-left:2.25em;position:relative}.bootstrap-select .dropdown-menu li a span.check-mark{display:none}.bootstrap-select .dropdown-menu li a span.text{display:inline-block}.bootstrap-select .dropdown-menu li small{padding-left:.5em}.bootstrap-select .dropdown-menu .notify{width:96%;min-height:26px;pointer-events:none;opacity:.9;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;background:#f5f5f5;border:1px solid #e3e3e3;margin:0 2%;padding:3px 5px;position:absolute;bottom:5px;-webkit-box-shadow:inset 0 1px 1px #0000000d;box-shadow:inset 0 1px 1px #0000000d}.bootstrap-select .dropdown-menu .notify.fadeOut{-webkit-animation:bs-notify-fadeOut .3s linear .75s forwards;-o-animation:bs-notify-fadeOut .3s linear .75s forwards;animation:bs-notify-fadeOut .3s linear .75s forwards}.bootstrap-select .no-results{white-space:nowrap;background:#f5f5f5;margin:0 5px;padding:3px}.bootstrap-select.fit-width .dropdown-toggle .filter-option{padding:0;display:inline;position:static}.bootstrap-select.fit-width .dropdown-toggle .filter-option-inner,.bootstrap-select.fit-width .dropdown-toggle .filter-option-inner-inner{display:inline}.bootstrap-select.fit-width .dropdown-toggle .bs-caret:before{content:" "}.bootstrap-select.fit-width .dropdown-toggle .caret{margin-top:-1px;position:static;top:auto}.bootstrap-select.show-tick .dropdown-menu .selected span.check-mark{display:inline-block;position:absolute;top:5px;right:15px}.bootstrap-select.show-tick .dropdown-menu li a span.text{margin-right:34px}.bootstrap-select .bs-ok-default:after{content:"";width:.5em;height:1em;-webkit-transform-style:preserve-3d;transform-style:preserve-3d;border-style:solid;border-width:0 .26em .26em 0;display:block;-webkit-transform:rotate(45deg);-ms-transform:rotate(45deg);-o-transform:rotate(45deg);transform:rotate(45deg)}.bootstrap-select.show-menu-arrow.open>.dropdown-toggle,.bootstrap-select.show-menu-arrow.show>.dropdown-toggle{z-index:1061}.bootstrap-select.show-menu-arrow .dropdown-toggle .filter-option:before{content:"";border-bottom:7px solid #ccc3;border-left:7px solid #0000;border-right:7px solid #0000;display:none;position:absolute;bottom:-4px;left:9px}.bootstrap-select.show-menu-arrow .dropdown-toggle .filter-option:after{content:"";border-bottom:6px solid #fff;border-left:6px solid #0000;border-right:6px solid #0000;display:none;position:absolute;bottom:-4px;left:10px}.bootstrap-select.show-menu-arrow.dropup .dropdown-toggle .filter-option:before{border-top:7px solid #ccc3;border-bottom:0;top:-4px;bottom:auto}.bootstrap-select.show-menu-arrow.dropup .dropdown-toggle .filter-option:after{border-top:6px solid #fff;border-bottom:0;top:-4px;bottom:auto}.bootstrap-select.show-menu-arrow.pull-right .dropdown-toggle .filter-option:before{left:auto;right:12px}.bootstrap-select.show-menu-arrow.pull-right .dropdown-toggle .filter-option:after{left:auto;right:13px}.bootstrap-select.show-menu-arrow.open>.dropdown-toggle .filter-option:after,.bootstrap-select.show-menu-arrow.open>.dropdown-toggle .filter-option:before,.bootstrap-select.show-menu-arrow.show>.dropdown-toggle .filter-option:after,.bootstrap-select.show-menu-arrow.show>.dropdown-toggle .filter-option:before{display:block}.bs-actionsbox,.bs-donebutton,.bs-searchbox{padding:4px 8px}.bs-actionsbox{width:100%;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.bs-actionsbox .btn-group button{width:50%}.bs-donebutton{float:left;width:100%;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.bs-donebutton .btn-group button{width:100%}.bs-searchbox+.bs-actionsbox{padding:0 8px 4px}.bs-searchbox .form-control{width:100%;float:none;margin-bottom:0}@font-face{font-family:FontAwesome;src:url(fontawesome-webfont.3981e506.eot);src:url(fontawesome-webfont.3981e506.eot#iefix&v=4.7.0)format("embedded-opentype"),url(fontawesome-webfont.58488e7e.woff2)format("woff2"),url(fontawesome-webfont.ed962b83.woff)format("woff"),url(fontawesome-webfont.0caf0c90.ttf)format("truetype"),url(fontawesome-webfont.a9323ae9.svg#fontawesomeregular)format("svg");font-weight:400;font-style:normal}.fa{font:14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;display:inline-block}.fa-lg{vertical-align:-15%;font-size:1.33333em;line-height:.75em}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571em;text-align:center}.fa-ul{margin-left:2.14286em;padding-left:0;list-style-type:none}.fa-ul>li{position:relative}.fa-li{width:2.14286em;text-align:center;position:absolute;top:.142857em;left:-2.14286em}.fa-li.fa-lg{left:-1.85714em}.fa-border{border:.08em solid #eee;border-radius:.1em;padding:.2em .25em .15em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left{margin-right:.3em}.fa.fa-pull-right{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left{margin-right:.3em}.fa.pull-right{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s linear infinite;animation:fa-spin 2s linear infinite}.fa-pulse{-webkit-animation:fa-spin 1s steps(8,end) infinite;animation:fa-spin 1s steps(8,end) infinite}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);-ms-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scaleY(-1);-ms-transform:scaleY(-1);transform:scaleY(-1)}:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270,:root .fa-flip-horizontal,:root .fa-flip-vertical{filter:none}.fa-stack{width:2em;height:2em;vertical-align:middle;line-height:2em;display:inline-block;position:relative}.fa-stack-1x,.fa-stack-2x{width:100%;text-align:center;position:absolute;left:0}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-remove:before,.fa-close:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-gear:before,.fa-cog:before{content:""}.fa-trash-o:before{content:""}.fa-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-rotate-right:before,.fa-repeat:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-photo:before,.fa-image:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before{content:""}.fa-check-circle:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-warning:before,.fa-exclamation-triangle:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-gears:before,.fa-cogs:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook-f:before,.fa-facebook:before{content:""}.fa-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-feed:before,.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before{content:""}.fa-arrow-circle-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-save:before,.fa-floppy-o:before{content:""}.fa-square:before{content:""}.fa-navicon:before,.fa-reorder:before,.fa-bars:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-unsorted:before,.fa-sort:before{content:""}.fa-sort-down:before,.fa-sort-desc:before{content:""}.fa-sort-up:before,.fa-sort-asc:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-legal:before,.fa-gavel:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-flash:before,.fa-bolt:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-paste:before,.fa-clipboard:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-unlink:before,.fa-chain-broken:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:""}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:""}.fa-toggle-right:before,.fa-caret-square-o-right:before{content:""}.fa-euro:before,.fa-eur:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-rupee:before,.fa-inr:before{content:""}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:""}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:""}.fa-won:before,.fa-krw:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-turkish-lira:before,.fa-try:before{content:""}.fa-plus-square-o:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-institution:before,.fa-bank:before,.fa-university:before{content:""}.fa-mortar-board:before,.fa-graduation-cap:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-photo-o:before,.fa-file-picture-o:before,.fa-file-image-o:before{content:""}.fa-file-zip-o:before,.fa-file-archive-o:before{content:""}.fa-file-sound-o:before,.fa-file-audio-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-saver:before,.fa-support:before,.fa-life-ring:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-resistance:before,.fa-rebel:before{content:""}.fa-ge:before,.fa-empire:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-y-combinator-square:before,.fa-yc-square:before,.fa-hacker-news:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-send:before,.fa-paper-plane:before{content:""}.fa-send-o:before,.fa-paper-plane-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-soccer-ball-o:before,.fa-futbol-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-shekel:before,.fa-sheqel:before,.fa-ils:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:""}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:""}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:""}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-hotel:before,.fa-bed:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-yc:before,.fa-y-combinator:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery:before,.fa-battery-full:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:""}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:""}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:""}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-stop-o:before,.fa-hand-paper-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:""}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-tv:before,.fa-television:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:""}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:""}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:""}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:""}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-asl-interpreting:before,.fa-american-sign-language-interpreting:before{content:""}.fa-deafness:before,.fa-hard-of-hearing:before,.fa-deaf:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-signing:before,.fa-sign-language:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-vcard:before,.fa-address-card:before{content:""}.fa-vcard-o:before,.fa-address-card-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:""}.fa-id-badge:before{content:""}.fa-drivers-license:before,.fa-id-card:before{content:""}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:""}.fa-free-code-camp:before{content:""}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer:before,.fa-thermometer-full:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:""}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:""}.fa-shower:before{content:""}.fa-bathtub:before,.fa-s15:before,.fa-bath:before{content:""}.fa-podcast:before{content:""}.fa-window-maximize:before{content:""}.fa-window-minimize:before{content:""}.fa-window-restore:before{content:""}.fa-times-rectangle:before,.fa-window-close:before{content:""}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:""}.fa-bandcamp:before{content:""}.fa-grav:before{content:""}.fa-etsy:before{content:""}.fa-imdb:before{content:""}.fa-ravelry:before{content:""}.fa-eercast:before{content:""}.fa-microchip:before{content:""}.fa-snowflake-o:before{content:""}.fa-superpowers:before{content:""}.fa-wpexplorer:before{content:""}.fa-meetup:before{content:""}.sr-only{width:1px;height:1px;clip:rect(0,0,0,0);border:0;margin:-1px;padding:0;position:absolute;overflow:hidden}.sr-only-focusable:active,.sr-only-focusable:focus{width:auto;height:auto;clip:auto;margin:0;position:static;overflow:visible}.mfp-bg{width:100%;height:100%;z-index:1042;opacity:.8;background:#0b0b0b;position:fixed;top:0;left:0;overflow:hidden}.mfp-wrap{width:100%;height:100%;z-index:1043;-webkit-backface-visibility:hidden;position:fixed;top:0;left:0;outline:none!important}.mfp-container{text-align:center;width:100%;height:100%;box-sizing:border-box;padding:0 8px;position:absolute;top:0;left:0}.mfp-container:before{content:"";height:100%;vertical-align:middle;display:inline-block}.mfp-align-top .mfp-container:before{display:none}.mfp-content{vertical-align:middle;text-align:left;z-index:1045;margin:0 auto;display:inline-block;position:relative}.mfp-inline-holder .mfp-content,.mfp-ajax-holder .mfp-content{width:100%;cursor:auto}.mfp-ajax-cur{cursor:progress}.mfp-zoom-out-cur,.mfp-zoom-out-cur .mfp-image-holder .mfp-close{cursor:-moz-zoom-out;cursor:-webkit-zoom-out;cursor:zoom-out}.mfp-zoom{cursor:pointer;cursor:-webkit-zoom-in;cursor:-moz-zoom-in;cursor:zoom-in}.mfp-auto-cursor .mfp-content{cursor:auto}.mfp-close,.mfp-arrow,.mfp-preloader,.mfp-counter{-webkit-user-select:none;-moz-user-select:none;user-select:none}.mfp-loading.mfp-figure{display:none}.mfp-hide{display:none!important}.mfp-preloader{color:#ccc;width:auto;text-align:center;z-index:1044;margin-top:-.8em;position:absolute;top:50%;left:8px;right:8px}.mfp-preloader a{color:#ccc}.mfp-preloader a:hover{color:#fff}.mfp-s-ready .mfp-preloader,.mfp-s-error .mfp-content{display:none}button.mfp-close,button.mfp-arrow{cursor:pointer;-webkit-appearance:none;z-index:1046;box-shadow:none;touch-action:manipulation;background:0 0;border:0;outline:none;padding:0;display:block;overflow:visible}button::-moz-focus-inner{border:0;padding:0}.mfp-close{width:44px;height:44px;text-align:center;opacity:.65;color:#fff;padding:0 0 18px 10px;font-family:Arial,Baskerville,monospace;font-size:28px;font-style:normal;line-height:44px;text-decoration:none;position:absolute;top:0;right:0}.mfp-close:hover,.mfp-close:focus{opacity:1}.mfp-close:active{top:1px}.mfp-close-btn-in .mfp-close{color:#333}.mfp-image-holder .mfp-close,.mfp-iframe-holder .mfp-close{color:#fff;text-align:right;width:100%;padding-right:6px;right:-6px}.mfp-counter{color:#ccc;white-space:nowrap;font-size:12px;line-height:18px;position:absolute;top:0;right:0}.mfp-arrow{opacity:.65;width:90px;height:110px;-webkit-tap-highlight-color:transparent;margin:-55px 0 0;padding:0;position:absolute;top:50%}.mfp-arrow:active{margin-top:-54px}.mfp-arrow:hover,.mfp-arrow:focus{opacity:1}.mfp-arrow:before,.mfp-arrow:after{content:"";width:0;height:0;border:inset #0000;margin-top:35px;margin-left:35px;display:block;position:absolute;top:0;left:0}.mfp-arrow:after{border-top-width:13px;border-bottom-width:13px;top:8px}.mfp-arrow:before{opacity:.7;border-top-width:21px;border-bottom-width:21px}.mfp-arrow-left{left:0}.mfp-arrow-left:after{border-right:17px solid #fff;margin-left:31px}.mfp-arrow-left:before{border-right:27px solid #3f3f3f;margin-left:25px}.mfp-arrow-right{right:0}.mfp-arrow-right:after{border-left:17px solid #fff;margin-left:39px}.mfp-arrow-right:before{border-left:27px solid #3f3f3f}.mfp-iframe-holder{padding-top:40px;padding-bottom:40px}.mfp-iframe-holder .mfp-content{width:100%;max-width:900px;line-height:0}.mfp-iframe-holder .mfp-close{top:-40px}.mfp-iframe-scaler{width:100%;height:0;padding-top:56.25%;overflow:hidden}.mfp-iframe-scaler iframe{width:100%;height:100%;background:#000;display:block;position:absolute;top:0;left:0;box-shadow:0 0 8px #0009}img.mfp-img{width:auto;max-width:100%;height:auto;box-sizing:border-box;margin:0 auto;padding:40px 0;line-height:0;display:block}.mfp-figure{line-height:0}.mfp-figure:after{content:"";width:auto;height:auto;z-index:-1;background:#444;display:block;position:absolute;inset:40px 0;box-shadow:0 0 8px #0009}.mfp-figure small{color:#bdbdbd;font-size:12px;line-height:14px;display:block}.mfp-figure figure{margin:0}.mfp-bottom-bar{width:100%;cursor:auto;margin-top:-36px;position:absolute;top:100%;left:0}.mfp-title{text-align:left;color:#f3f3f3;word-wrap:break-word;padding-right:36px;line-height:18px}.mfp-image-holder .mfp-content{max-width:100%}.mfp-gallery .mfp-image-holder .mfp-figure{cursor:pointer}@media screen and (max-width:800px) and (orientation:landscape),screen and (max-height:300px){.mfp-img-mobile .mfp-image-holder{padding-left:0;padding-right:0}.mfp-img-mobile img.mfp-img{padding:0}.mfp-img-mobile .mfp-figure:after{top:0;bottom:0}.mfp-img-mobile .mfp-figure small{margin-left:5px;display:inline}.mfp-img-mobile .mfp-bottom-bar{box-sizing:border-box;background:#0009;margin:0;padding:3px 5px;position:fixed;top:auto;bottom:0}.mfp-img-mobile .mfp-bottom-bar:empty{padding:0}.mfp-img-mobile .mfp-counter{top:3px;right:5px}.mfp-img-mobile .mfp-close{width:35px;height:35px;text-align:center;background:#0009;padding:0;line-height:35px;position:fixed;top:0;right:0}}@media (max-width:900px){.mfp-arrow{-webkit-transform:scale(.75);transform:scale(.75)}.mfp-arrow-left{-webkit-transform-origin:0;transform-origin:0}.mfp-arrow-right{-webkit-transform-origin:100%;transform-origin:100%}.mfp-container{padding-left:6px;padding-right:6px}}.owl-carousel,.owl-carousel .owl-item{-webkit-tap-highlight-color:transparent;position:relative}.owl-carousel{width:100%;z-index:1;display:none}.owl-carousel .owl-stage{-ms-touch-action:pan-Y;touch-action:manipulation;-moz-backface-visibility:hidden;position:relative}.owl-carousel .owl-stage:after{content:".";clear:both;visibility:hidden;height:0;line-height:0;display:block}.owl-carousel .owl-stage-outer{position:relative;overflow:hidden;-webkit-transform:translate(0,0)}.owl-carousel .owl-item,.owl-carousel .owl-wrapper{-webkit-backface-visibility:hidden;-moz-backface-visibility:hidden;-ms-backface-visibility:hidden;-webkit-transform:translate(0,0);-moz-transform:translate(0,0);-ms-transform:translate(0,0)}.owl-carousel .owl-item{min-height:1px;float:left;-webkit-backface-visibility:hidden;-webkit-touch-callout:none}.owl-carousel .owl-item img{width:100%;display:block}.owl-carousel .owl-dots.disabled,.owl-carousel .owl-nav.disabled{display:none}.no-js .owl-carousel,.owl-carousel.owl-loaded{display:block}.owl-carousel .owl-dot,.owl-carousel .owl-nav .owl-next,.owl-carousel .owl-nav .owl-prev{cursor:pointer;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-khtml-user-select:none}.owl-carousel .owl-nav button.owl-next,.owl-carousel .owl-nav button.owl-prev,.owl-carousel button.owl-dot{color:inherit;font:inherit;background:0 0;border:none;padding:0!important}.owl-carousel.owl-loading{opacity:0;display:block}.owl-carousel.owl-hidden{opacity:0}.owl-carousel.owl-refresh .owl-item{visibility:hidden}.owl-carousel.owl-drag .owl-item{-ms-touch-action:pan-y;touch-action:pan-y;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.owl-carousel.owl-grab{cursor:move;cursor:grab}.owl-carousel.owl-rtl{direction:rtl}.owl-carousel.owl-rtl .owl-item{float:right}.owl-carousel .animated{animation-duration:1s;animation-fill-mode:both}.owl-carousel .owl-animated-in{z-index:0}.owl-carousel .owl-animated-out{z-index:1}.owl-carousel .fadeOut{animation-name:fadeOut}@keyframes fadeOut{0%{opacity:1}to{opacity:0}}.owl-height{transition:height .5s ease-in-out}.owl-carousel .owl-item .owl-lazy{opacity:0;transition:opacity .4s}.owl-carousel .owl-item .owl-lazy:not([src]),.owl-carousel .owl-item .owl-lazy[src^=""]{max-height:0}.owl-carousel .owl-item img.owl-lazy{transform-style:preserve-3d}.owl-carousel .owl-video-wrapper{height:100%;background:#000;position:relative}.owl-carousel .owl-video-play-icon{height:80px;width:80px;cursor:pointer;z-index:1;-webkit-backface-visibility:hidden;background:url(owl.video.play.e7a23fb2.png) no-repeat;margin-top:-40px;margin-left:-40px;transition:transform .1s;position:absolute;top:50%;left:50%}.owl-carousel .owl-video-play-icon:hover{-ms-transform:scale(1.3);transform:scale(1.3)}.owl-carousel .owl-video-playing .owl-video-play-icon,.owl-carousel .owl-video-playing .owl-video-tn{display:none}.owl-carousel .owl-video-tn{opacity:0;height:100%;background-position:50%;background-repeat:no-repeat;background-size:contain;transition:opacity .4s}.owl-carousel .owl-video-frame{z-index:1;height:100%;width:100%;position:relative}.owl-theme .owl-dots,.owl-theme .owl-nav{text-align:center;-webkit-tap-highlight-color:transparent}.owl-theme .owl-nav{margin-top:10px}.owl-theme .owl-nav [class*=owl-]{color:#fff;cursor:pointer;background:#d6d6d6;border-radius:3px;margin:5px;padding:4px 7px;font-size:14px;display:inline-block}.owl-theme .owl-nav [class*=owl-]:hover{color:#fff;background:#869791;text-decoration:none}.owl-theme .owl-nav .disabled{opacity:.5;cursor:default}.owl-theme .owl-nav.disabled+.owl-dots{margin-top:10px}.owl-theme .owl-dots .owl-dot{zoom:1;display:inline-block}.owl-theme .owl-dots .owl-dot span{width:10px;height:10px;-webkit-backface-visibility:visible;background:#d6d6d6;border-radius:30px;margin:5px 7px;transition:opacity .2s;display:block}.owl-theme .owl-dots .owl-dot.active span,.owl-theme .owl-dots .owl-dot:hover span{background:#869791}.toast-title{font-weight:700}.toast-message{-ms-word-wrap:break-word;word-wrap:break-word}.toast-message a,.toast-message label{color:#fff}.toast-message a:hover{color:#ccc;text-decoration:none}.toast-close-button{float:right;color:#fff;-webkit-text-shadow:0 1px 0 #fff;text-shadow:0 1px #fff;opacity:.8;-ms-filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=80);filter:alpha(opacity=80);font-size:20px;font-weight:700;line-height:1;position:relative;top:-.3em;right:-.3em}.toast-close-button:focus,.toast-close-button:hover{color:#000;cursor:pointer;opacity:.4;-ms-filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=40);filter:alpha(opacity=40);text-decoration:none}.rtl .toast-close-button{float:left;left:-.3em;right:.3em}button.toast-close-button{cursor:pointer;-webkit-appearance:none;background:0 0;border:0;padding:0}.toast-top-center{width:100%;top:0;right:0}.toast-bottom-center{width:100%;bottom:0;right:0}.toast-top-full-width{width:100%;top:0;right:0}.toast-bottom-full-width{width:100%;bottom:0;right:0}.toast-top-left{top:12px;left:12px}.toast-top-right{top:12px;right:12px}.toast-bottom-right{bottom:12px;right:12px}.toast-bottom-left{bottom:12px;left:12px}#toast-container{z-index:999999;pointer-events:none;position:fixed}#toast-container *{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}#toast-container>div{pointer-events:auto;width:300px;color:#fff;opacity:.8;-ms-filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=80);filter:alpha(opacity=80);background-position:15px;background-repeat:no-repeat;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px;margin:0 0 6px;padding:15px 15px 15px 50px;position:relative;overflow:hidden;-webkit-box-shadow:0 0 12px #999;-moz-box-shadow:0 0 12px #999;box-shadow:0 0 12px #999}#toast-container>div.rtl{direction:rtl;background-position:right 15px center;padding:15px 50px 15px 15px}#toast-container>div:hover{opacity:1;-ms-filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=100);filter:alpha(opacity=100);cursor:pointer;-webkit-box-shadow:0 0 12px #000;-moz-box-shadow:0 0 12px #000;box-shadow:0 0 12px #000}#toast-container>.toast-info{background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAAGwSURBVEhLtZa9SgNBEMc9sUxxRcoUKSzSWIhXpFMhhYWFhaBg4yPYiWCXZxBLERsLRS3EQkEfwCKdjWJAwSKCgoKCcudv4O5YLrt7EzgXhiU3/4+b2ckmwVjJSpKkQ6wAi4gwhT+z3wRBcEz0yjSseUTrcRyfsHsXmD0AmbHOC9Ii8VImnuXBPglHpQ5wwSVM7sNnTG7Za4JwDdCjxyAiH3nyA2mtaTJufiDZ5dCaqlItILh1NHatfN5skvjx9Z38m69CgzuXmZgVrPIGE763Jx9qKsRozWYw6xOHdER+nn2KkO+Bb+UV5CBN6WC6QtBgbRVozrahAbmm6HtUsgtPC19tFdxXZYBOfkbmFJ1VaHA1VAHjd0pp70oTZzvR+EVrx2Ygfdsq6eu55BHYR8hlcki+n+kERUFG8BrA0BwjeAv2M8WLQBtcy+SD6fNsmnB3AlBLrgTtVW1c2QN4bVWLATaIS60J2Du5y1TiJgjSBvFVZgTmwCU+dAZFoPxGEEs8nyHC9Bwe2GvEJv2WXZb0vjdyFT4Cxk3e/kIqlOGoVLwwPevpYHT+00T+hWwXDf4AJAOUqWcDhbwAAAAASUVORK5CYII=)!important}#toast-container>.toast-error{background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAAHOSURBVEhLrZa/SgNBEMZzh0WKCClSCKaIYOED+AAKeQQLG8HWztLCImBrYadgIdY+gIKNYkBFSwu7CAoqCgkkoGBI/E28PdbLZmeDLgzZzcx83/zZ2SSXC1j9fr+I1Hq93g2yxH4iwM1vkoBWAdxCmpzTxfkN2RcyZNaHFIkSo10+8kgxkXIURV5HGxTmFuc75B2RfQkpxHG8aAgaAFa0tAHqYFfQ7Iwe2yhODk8+J4C7yAoRTWI3w/4klGRgR4lO7Rpn9+gvMyWp+uxFh8+H+ARlgN1nJuJuQAYvNkEnwGFck18Er4q3egEc/oO+mhLdKgRyhdNFiacC0rlOCbhNVz4H9FnAYgDBvU3QIioZlJFLJtsoHYRDfiZoUyIxqCtRpVlANq0EU4dApjrtgezPFad5S19Wgjkc0hNVnuF4HjVA6C7QrSIbylB+oZe3aHgBsqlNqKYH48jXyJKMuAbiyVJ8KzaB3eRc0pg9VwQ4niFryI68qiOi3AbjwdsfnAtk0bCjTLJKr6mrD9g8iq/S/B81hguOMlQTnVyG40wAcjnmgsCNESDrjme7wfftP4P7SP4N3CJZdvzoNyGq2c/HWOXJGsvVg+RA/k2MC/wN6I2YA2Pt8GkAAAAASUVORK5CYII=)!important}#toast-container>.toast-success{background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAADsSURBVEhLY2AYBfQMgf///3P8+/evAIgvA/FsIF+BavYDDWMBGroaSMMBiE8VC7AZDrIFaMFnii3AZTjUgsUUWUDA8OdAH6iQbQEhw4HyGsPEcKBXBIC4ARhex4G4BsjmweU1soIFaGg/WtoFZRIZdEvIMhxkCCjXIVsATV6gFGACs4Rsw0EGgIIH3QJYJgHSARQZDrWAB+jawzgs+Q2UO49D7jnRSRGoEFRILcdmEMWGI0cm0JJ2QpYA1RDvcmzJEWhABhD/pqrL0S0CWuABKgnRki9lLseS7g2AlqwHWQSKH4oKLrILpRGhEQCw2LiRUIa4lwAAAABJRU5ErkJggg==)!important}#toast-container>.toast-warning{background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAAGYSURBVEhL5ZSvTsNQFMbXZGICMYGYmJhAQIJAICYQPAACiSDB8AiICQQJT4CqQEwgJvYASAQCiZiYmJhAIBATCARJy+9rTsldd8sKu1M0+dLb057v6/lbq/2rK0mS/TRNj9cWNAKPYIJII7gIxCcQ51cvqID+GIEX8ASG4B1bK5gIZFeQfoJdEXOfgX4QAQg7kH2A65yQ87lyxb27sggkAzAuFhbbg1K2kgCkB1bVwyIR9m2L7PRPIhDUIXgGtyKw575yz3lTNs6X4JXnjV+LKM/m3MydnTbtOKIjtz6VhCBq4vSm3ncdrD2lk0VgUXSVKjVDJXJzijW1RQdsU7F77He8u68koNZTz8Oz5yGa6J3H3lZ0xYgXBK2QymlWWA+RWnYhskLBv2vmE+hBMCtbA7KX5drWyRT/2JsqZ2IvfB9Y4bWDNMFbJRFmC9E74SoS0CqulwjkC0+5bpcV1CZ8NMej4pjy0U+doDQsGyo1hzVJttIjhQ7GnBtRFN1UarUlH8F3xict+HY07rEzoUGPlWcjRFRr4/gChZgc3ZL2d8oAAAAASUVORK5CYII=)!important}#toast-container.toast-bottom-center>div,#toast-container.toast-top-center>div{width:300px;margin-left:auto;margin-right:auto}#toast-container.toast-bottom-full-width>div,#toast-container.toast-top-full-width>div{width:96%;margin-left:auto;margin-right:auto}.toast{background-color:#030303}.toast-success{background-color:#51a351}.toast-error{background-color:#bd362f}.toast-info{background-color:#2f96b4}.toast-warning{background-color:#f89406}.toast-progress{height:4px;opacity:.4;-ms-filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=40);filter:alpha(opacity=40);background-color:#000;position:absolute;bottom:0;left:0}@media (max-width:240px){#toast-container>div{width:11em;padding:8px 8px 8px 50px}#toast-container>div.rtl{padding:8px 50px 8px 8px}#toast-container .toast-close-button{top:-.2em;right:-.2em}#toast-container .rtl .toast-close-button{left:-.2em;right:.2em}}@media (min-width:241px) and (max-width:480px){#toast-container>div{width:18em;padding:8px 8px 8px 50px}#toast-container>div.rtl{padding:8px 50px 8px 8px}#toast-container .toast-close-button{top:-.2em;right:-.2em}#toast-container .rtl .toast-close-button{left:-.2em;right:.2em}}@media (min-width:481px) and (max-width:768px){#toast-container>div{width:25em;padding:15px 15px 15px 50px}#toast-container>div.rtl{padding:15px 50px 15px 15px}}:root{--animate-duration:1s;--animate-delay:1s;--animate-repeat:1}.animate__animated{-webkit-animation-duration:1s;animation-duration:1s;-webkit-animation-duration:var(--animate-duration);animation-duration:var(--animate-duration);-webkit-animation-fill-mode:both;animation-fill-mode:both}.animate__animated.animate__infinite{-webkit-animation-iteration-count:infinite;animation-iteration-count:infinite}.animate__animated.animate__repeat-1{-webkit-animation-iteration-count:1;animation-iteration-count:1;-webkit-animation-iteration-count:var(--animate-repeat);animation-iteration-count:var(--animate-repeat)}.animate__animated.animate__repeat-2{-webkit-animation-iteration-count:2;animation-iteration-count:2;-webkit-animation-iteration-count:calc(var(--animate-repeat)*2);animation-iteration-count:calc(var(--animate-repeat)*2)}.animate__animated.animate__repeat-3{-webkit-animation-iteration-count:3;animation-iteration-count:3;-webkit-animation-iteration-count:calc(var(--animate-repeat)*3);animation-iteration-count:calc(var(--animate-repeat)*3)}.animate__animated.animate__delay-1s{-webkit-animation-delay:1s;animation-delay:1s;-webkit-animation-delay:var(--animate-delay);animation-delay:var(--animate-delay)}.animate__animated.animate__delay-2s{-webkit-animation-delay:2s;animation-delay:2s;-webkit-animation-delay:calc(var(--animate-delay)*2);animation-delay:calc(var(--animate-delay)*2)}.animate__animated.animate__delay-3s{-webkit-animation-delay:3s;animation-delay:3s;-webkit-animation-delay:calc(var(--animate-delay)*3);animation-delay:calc(var(--animate-delay)*3)}.animate__animated.animate__delay-4s{-webkit-animation-delay:4s;animation-delay:4s;-webkit-animation-delay:calc(var(--animate-delay)*4);animation-delay:calc(var(--animate-delay)*4)}.animate__animated.animate__delay-5s{-webkit-animation-delay:5s;animation-delay:5s;-webkit-animation-delay:calc(var(--animate-delay)*5);animation-delay:calc(var(--animate-delay)*5)}.animate__animated.animate__faster{-webkit-animation-duration:.5s;animation-duration:.5s;-webkit-animation-duration:calc(var(--animate-duration)/2);animation-duration:calc(var(--animate-duration)/2)}.animate__animated.animate__fast{-webkit-animation-duration:.8s;animation-duration:.8s;-webkit-animation-duration:calc(var(--animate-duration)*.8);animation-duration:calc(var(--animate-duration)*.8)}.animate__animated.animate__slow{-webkit-animation-duration:2s;animation-duration:2s;-webkit-animation-duration:calc(var(--animate-duration)*2);animation-duration:calc(var(--animate-duration)*2)}.animate__animated.animate__slower{-webkit-animation-duration:3s;animation-duration:3s;-webkit-animation-duration:calc(var(--animate-duration)*3);animation-duration:calc(var(--animate-duration)*3)}@media (prefers-reduced-motion:reduce),print{.animate__animated{-webkit-transition-duration:1ms!important;transition-duration:1ms!important;-webkit-animation-duration:1ms!important;animation-duration:1ms!important;-webkit-animation-iteration-count:1!important;animation-iteration-count:1!important}.animate__animated[class*=Out]{opacity:0}}@-webkit-keyframes bounce{0%,20%,53%,to{-webkit-animation-timing-function:cubic-bezier(.215,.61,.355,1);animation-timing-function:cubic-bezier(.215,.61,.355,1);-webkit-transform:translateZ(0);transform:translateZ(0)}40%,43%{-webkit-animation-timing-function:cubic-bezier(.755,.05,.855,.06);animation-timing-function:cubic-bezier(.755,.05,.855,.06);-webkit-transform:matrix(1,0,0,1.1,0,-30);transform:matrix(1,0,0,1.1,0,-30)}70%{-webkit-animation-timing-function:cubic-bezier(.755,.05,.855,.06);animation-timing-function:cubic-bezier(.755,.05,.855,.06);-webkit-transform:matrix(1,0,0,1.05,0,-15);transform:matrix(1,0,0,1.05,0,-15)}80%{-webkit-transition-timing-function:cubic-bezier(.215,.61,.355,1);transition-timing-function:cubic-bezier(.215,.61,.355,1);-webkit-transform:scaleY(.95);transform:scaleY(.95)}90%{-webkit-transform:matrix(1,0,0,1.02,0,-4);transform:matrix(1,0,0,1.02,0,-4)}}@keyframes bounce{0%,20%,53%,to{-webkit-animation-timing-function:cubic-bezier(.215,.61,.355,1);animation-timing-function:cubic-bezier(.215,.61,.355,1);-webkit-transform:translateZ(0);transform:translateZ(0)}40%,43%{-webkit-animation-timing-function:cubic-bezier(.755,.05,.855,.06);animation-timing-function:cubic-bezier(.755,.05,.855,.06);-webkit-transform:matrix(1,0,0,1.1,0,-30);transform:matrix(1,0,0,1.1,0,-30)}70%{-webkit-animation-timing-function:cubic-bezier(.755,.05,.855,.06);animation-timing-function:cubic-bezier(.755,.05,.855,.06);-webkit-transform:matrix(1,0,0,1.05,0,-15);transform:matrix(1,0,0,1.05,0,-15)}80%{-webkit-transition-timing-function:cubic-bezier(.215,.61,.355,1);transition-timing-function:cubic-bezier(.215,.61,.355,1);-webkit-transform:scaleY(.95);transform:scaleY(.95)}90%{-webkit-transform:matrix(1,0,0,1.02,0,-4);transform:matrix(1,0,0,1.02,0,-4)}}.animate__bounce{-webkit-transform-origin:bottom;transform-origin:bottom;-webkit-animation-name:bounce;animation-name:bounce}@-webkit-keyframes flash{0%,50%,to{opacity:1}25%,75%{opacity:0}}@keyframes flash{0%,50%,to{opacity:1}25%,75%{opacity:0}}.animate__flash{-webkit-animation-name:flash;animation-name:flash}@-webkit-keyframes pulse{0%{-webkit-transform:scaleX(1);transform:scaleX(1)}50%{-webkit-transform:scale3d(1.05,1.05,1.05);transform:scale3d(1.05,1.05,1.05)}to{-webkit-transform:scaleX(1);transform:scaleX(1)}}@keyframes pulse{0%{-webkit-transform:scaleX(1);transform:scaleX(1)}50%{-webkit-transform:scale3d(1.05,1.05,1.05);transform:scale3d(1.05,1.05,1.05)}to{-webkit-transform:scaleX(1);transform:scaleX(1)}}.animate__pulse{-webkit-animation-name:pulse;animation-name:pulse;-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out}@-webkit-keyframes rubberBand{0%{-webkit-transform:scaleX(1);transform:scaleX(1)}30%{-webkit-transform:scale(1.25,.75);transform:scale(1.25,.75)}40%{-webkit-transform:scale(.75,1.25);transform:scale(.75,1.25)}50%{-webkit-transform:scale(1.15,.85);transform:scale(1.15,.85)}65%{-webkit-transform:scale(.95,1.05);transform:scale(.95,1.05)}75%{-webkit-transform:scale(1.05,.95);transform:scale(1.05,.95)}to{-webkit-transform:scaleX(1);transform:scaleX(1)}}@keyframes rubberBand{0%{-webkit-transform:scaleX(1);transform:scaleX(1)}30%{-webkit-transform:scale(1.25,.75);transform:scale(1.25,.75)}40%{-webkit-transform:scale(.75,1.25);transform:scale(.75,1.25)}50%{-webkit-transform:scale(1.15,.85);transform:scale(1.15,.85)}65%{-webkit-transform:scale(.95,1.05);transform:scale(.95,1.05)}75%{-webkit-transform:scale(1.05,.95);transform:scale(1.05,.95)}to{-webkit-transform:scaleX(1);transform:scaleX(1)}}.animate__rubberBand{-webkit-animation-name:rubberBand;animation-name:rubberBand}@-webkit-keyframes shakeX{0%,to{-webkit-transform:translateZ(0);transform:translateZ(0)}10%,30%,50%,70%,90%{-webkit-transform:translate(-10px);transform:translate(-10px)}20%,40%,60%,80%{-webkit-transform:translate(10px);transform:translate(10px)}}@keyframes shakeX{0%,to{-webkit-transform:translateZ(0);transform:translateZ(0)}10%,30%,50%,70%,90%{-webkit-transform:translate(-10px);transform:translate(-10px)}20%,40%,60%,80%{-webkit-transform:translate(10px);transform:translate(10px)}}.animate__shakeX{-webkit-animation-name:shakeX;animation-name:shakeX}@-webkit-keyframes shakeY{0%,to{-webkit-transform:translateZ(0);transform:translateZ(0)}10%,30%,50%,70%,90%{-webkit-transform:translateY(-10px);transform:translateY(-10px)}20%,40%,60%,80%{-webkit-transform:translateY(10px);transform:translateY(10px)}}@keyframes shakeY{0%,to{-webkit-transform:translateZ(0);transform:translateZ(0)}10%,30%,50%,70%,90%{-webkit-transform:translateY(-10px);transform:translateY(-10px)}20%,40%,60%,80%{-webkit-transform:translateY(10px);transform:translateY(10px)}}.animate__shakeY{-webkit-animation-name:shakeY;animation-name:shakeY}@-webkit-keyframes headShake{0%{-webkit-transform:translate(0);transform:translate(0)}6.5%{-webkit-transform:translate(-6px)rotateY(-9deg);transform:translate(-6px)rotateY(-9deg)}18.5%{-webkit-transform:translate(5px)rotateY(7deg);transform:translate(5px)rotateY(7deg)}31.5%{-webkit-transform:translate(-3px)rotateY(-5deg);transform:translate(-3px)rotateY(-5deg)}43.5%{-webkit-transform:translate(2px)rotateY(3deg);transform:translate(2px)rotateY(3deg)}50%{-webkit-transform:translate(0);transform:translate(0)}}@keyframes headShake{0%{-webkit-transform:translate(0);transform:translate(0)}6.5%{-webkit-transform:translate(-6px)rotateY(-9deg);transform:translate(-6px)rotateY(-9deg)}18.5%{-webkit-transform:translate(5px)rotateY(7deg);transform:translate(5px)rotateY(7deg)}31.5%{-webkit-transform:translate(-3px)rotateY(-5deg);transform:translate(-3px)rotateY(-5deg)}43.5%{-webkit-transform:translate(2px)rotateY(3deg);transform:translate(2px)rotateY(3deg)}50%{-webkit-transform:translate(0);transform:translate(0)}}.animate__headShake{-webkit-animation-name:headShake;animation-name:headShake;-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out}@-webkit-keyframes swing{20%{-webkit-transform:rotate(15deg);transform:rotate(15deg)}40%{-webkit-transform:rotate(-10deg);transform:rotate(-10deg)}60%{-webkit-transform:rotate(5deg);transform:rotate(5deg)}80%{-webkit-transform:rotate(-5deg);transform:rotate(-5deg)}to{-webkit-transform:rotate(0deg);transform:rotate(0deg)}}@keyframes swing{20%{-webkit-transform:rotate(15deg);transform:rotate(15deg)}40%{-webkit-transform:rotate(-10deg);transform:rotate(-10deg)}60%{-webkit-transform:rotate(5deg);transform:rotate(5deg)}80%{-webkit-transform:rotate(-5deg);transform:rotate(-5deg)}to{-webkit-transform:rotate(0deg);transform:rotate(0deg)}}.animate__swing{-webkit-transform-origin:top;transform-origin:top;-webkit-animation-name:swing;animation-name:swing}@-webkit-keyframes tada{0%{-webkit-transform:scaleX(1);transform:scaleX(1)}10%,20%{-webkit-transform:scale3d(.9,.9,.9)rotate(-3deg);transform:scale3d(.9,.9,.9)rotate(-3deg)}30%,50%,70%,90%{-webkit-transform:scale3d(1.1,1.1,1.1)rotate(3deg);transform:scale3d(1.1,1.1,1.1)rotate(3deg)}40%,60%,80%{-webkit-transform:scale3d(1.1,1.1,1.1)rotate(-3deg);transform:scale3d(1.1,1.1,1.1)rotate(-3deg)}to{-webkit-transform:scaleX(1);transform:scaleX(1)}}@keyframes tada{0%{-webkit-transform:scaleX(1);transform:scaleX(1)}10%,20%{-webkit-transform:scale3d(.9,.9,.9)rotate(-3deg);transform:scale3d(.9,.9,.9)rotate(-3deg)}30%,50%,70%,90%{-webkit-transform:scale3d(1.1,1.1,1.1)rotate(3deg);transform:scale3d(1.1,1.1,1.1)rotate(3deg)}40%,60%,80%{-webkit-transform:scale3d(1.1,1.1,1.1)rotate(-3deg);transform:scale3d(1.1,1.1,1.1)rotate(-3deg)}to{-webkit-transform:scaleX(1);transform:scaleX(1)}}.animate__tada{-webkit-animation-name:tada;animation-name:tada}@-webkit-keyframes wobble{0%{-webkit-transform:translateZ(0);transform:translateZ(0)}15%{-webkit-transform:translate(-25%)rotate(-5deg);transform:translate(-25%)rotate(-5deg)}30%{-webkit-transform:translate(20%)rotate(3deg);transform:translate(20%)rotate(3deg)}45%{-webkit-transform:translate(-15%)rotate(-3deg);transform:translate(-15%)rotate(-3deg)}60%{-webkit-transform:translate(10%)rotate(2deg);transform:translate(10%)rotate(2deg)}75%{-webkit-transform:translate(-5%)rotate(-1deg);transform:translate(-5%)rotate(-1deg)}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes wobble{0%{-webkit-transform:translateZ(0);transform:translateZ(0)}15%{-webkit-transform:translate(-25%)rotate(-5deg);transform:translate(-25%)rotate(-5deg)}30%{-webkit-transform:translate(20%)rotate(3deg);transform:translate(20%)rotate(3deg)}45%{-webkit-transform:translate(-15%)rotate(-3deg);transform:translate(-15%)rotate(-3deg)}60%{-webkit-transform:translate(10%)rotate(2deg);transform:translate(10%)rotate(2deg)}75%{-webkit-transform:translate(-5%)rotate(-1deg);transform:translate(-5%)rotate(-1deg)}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__wobble{-webkit-animation-name:wobble;animation-name:wobble}@-webkit-keyframes jello{0%,11.1%,to{-webkit-transform:translateZ(0);transform:translateZ(0)}22.2%{-webkit-transform:skew(-12.5deg)skewY(-12.5deg);transform:skew(-12.5deg)skewY(-12.5deg)}33.3%{-webkit-transform:skew(6.25deg)skewY(6.25deg);transform:skew(6.25deg)skewY(6.25deg)}44.4%{-webkit-transform:skew(-3.125deg)skewY(-3.125deg);transform:skew(-3.125deg)skewY(-3.125deg)}55.5%{-webkit-transform:skew(1.5625deg)skewY(1.5625deg);transform:skew(1.5625deg)skewY(1.5625deg)}66.6%{-webkit-transform:skew(-.78125deg)skewY(-.78125deg);transform:skew(-.78125deg)skewY(-.78125deg)}77.7%{-webkit-transform:skew(.390625deg)skewY(.390625deg);transform:skew(.390625deg)skewY(.390625deg)}88.8%{-webkit-transform:skew(-.195313deg)skewY(-.195313deg);transform:skew(-.195313deg)skewY(-.195313deg)}}@keyframes jello{0%,11.1%,to{-webkit-transform:translateZ(0);transform:translateZ(0)}22.2%{-webkit-transform:skew(-12.5deg)skewY(-12.5deg);transform:skew(-12.5deg)skewY(-12.5deg)}33.3%{-webkit-transform:skew(6.25deg)skewY(6.25deg);transform:skew(6.25deg)skewY(6.25deg)}44.4%{-webkit-transform:skew(-3.125deg)skewY(-3.125deg);transform:skew(-3.125deg)skewY(-3.125deg)}55.5%{-webkit-transform:skew(1.5625deg)skewY(1.5625deg);transform:skew(1.5625deg)skewY(1.5625deg)}66.6%{-webkit-transform:skew(-.78125deg)skewY(-.78125deg);transform:skew(-.78125deg)skewY(-.78125deg)}77.7%{-webkit-transform:skew(.390625deg)skewY(.390625deg);transform:skew(.390625deg)skewY(.390625deg)}88.8%{-webkit-transform:skew(-.195313deg)skewY(-.195313deg);transform:skew(-.195313deg)skewY(-.195313deg)}}.animate__jello{-webkit-transform-origin:50%;transform-origin:50%;-webkit-animation-name:jello;animation-name:jello}@-webkit-keyframes heartBeat{0%{-webkit-transform:scale(1);transform:scale(1)}14%{-webkit-transform:scale(1.3);transform:scale(1.3)}28%{-webkit-transform:scale(1);transform:scale(1)}42%{-webkit-transform:scale(1.3);transform:scale(1.3)}70%{-webkit-transform:scale(1);transform:scale(1)}}@keyframes heartBeat{0%{-webkit-transform:scale(1);transform:scale(1)}14%{-webkit-transform:scale(1.3);transform:scale(1.3)}28%{-webkit-transform:scale(1);transform:scale(1)}42%{-webkit-transform:scale(1.3);transform:scale(1.3)}70%{-webkit-transform:scale(1);transform:scale(1)}}.animate__heartBeat{-webkit-animation-name:heartBeat;animation-name:heartBeat;-webkit-animation-duration:1.3s;animation-duration:1.3s;-webkit-animation-duration:calc(var(--animate-duration)*1.3);animation-duration:calc(var(--animate-duration)*1.3);-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out}@-webkit-keyframes backInDown{0%{opacity:.7;-webkit-transform:matrix(.7,0,0,.7,0,-1200);transform:matrix(.7,0,0,.7,0,-1200)}80%{opacity:.7;-webkit-transform:scale(.7);transform:scale(.7)}to{opacity:1;-webkit-transform:scale(1);transform:scale(1)}}@keyframes backInDown{0%{opacity:.7;-webkit-transform:matrix(.7,0,0,.7,0,-1200);transform:matrix(.7,0,0,.7,0,-1200)}80%{opacity:.7;-webkit-transform:scale(.7);transform:scale(.7)}to{opacity:1;-webkit-transform:scale(1);transform:scale(1)}}.animate__backInDown{-webkit-animation-name:backInDown;animation-name:backInDown}@-webkit-keyframes backInLeft{0%{opacity:.7;-webkit-transform:matrix(.7,0,0,.7,-2000,0);transform:matrix(.7,0,0,.7,-2000,0)}80%{opacity:.7;-webkit-transform:scale(.7);transform:scale(.7)}to{opacity:1;-webkit-transform:scale(1);transform:scale(1)}}@keyframes backInLeft{0%{opacity:.7;-webkit-transform:matrix(.7,0,0,.7,-2000,0);transform:matrix(.7,0,0,.7,-2000,0)}80%{opacity:.7;-webkit-transform:scale(.7);transform:scale(.7)}to{opacity:1;-webkit-transform:scale(1);transform:scale(1)}}.animate__backInLeft{-webkit-animation-name:backInLeft;animation-name:backInLeft}@-webkit-keyframes backInRight{0%{opacity:.7;-webkit-transform:matrix(.7,0,0,.7,2000,0);transform:matrix(.7,0,0,.7,2000,0)}80%{opacity:.7;-webkit-transform:scale(.7);transform:scale(.7)}to{opacity:1;-webkit-transform:scale(1);transform:scale(1)}}@keyframes backInRight{0%{opacity:.7;-webkit-transform:matrix(.7,0,0,.7,2000,0);transform:matrix(.7,0,0,.7,2000,0)}80%{opacity:.7;-webkit-transform:scale(.7);transform:scale(.7)}to{opacity:1;-webkit-transform:scale(1);transform:scale(1)}}.animate__backInRight{-webkit-animation-name:backInRight;animation-name:backInRight}@-webkit-keyframes backInUp{0%{opacity:.7;-webkit-transform:matrix(.7,0,0,.7,0,1200);transform:matrix(.7,0,0,.7,0,1200)}80%{opacity:.7;-webkit-transform:scale(.7);transform:scale(.7)}to{opacity:1;-webkit-transform:scale(1);transform:scale(1)}}@keyframes backInUp{0%{opacity:.7;-webkit-transform:matrix(.7,0,0,.7,0,1200);transform:matrix(.7,0,0,.7,0,1200)}80%{opacity:.7;-webkit-transform:scale(.7);transform:scale(.7)}to{opacity:1;-webkit-transform:scale(1);transform:scale(1)}}.animate__backInUp{-webkit-animation-name:backInUp;animation-name:backInUp}@-webkit-keyframes backOutDown{0%{opacity:1;-webkit-transform:scale(1);transform:scale(1)}20%{opacity:.7;-webkit-transform:scale(.7);transform:scale(.7)}to{opacity:.7;-webkit-transform:matrix(.7,0,0,.7,0,700);transform:matrix(.7,0,0,.7,0,700)}}@keyframes backOutDown{0%{opacity:1;-webkit-transform:scale(1);transform:scale(1)}20%{opacity:.7;-webkit-transform:scale(.7);transform:scale(.7)}to{opacity:.7;-webkit-transform:matrix(.7,0,0,.7,0,700);transform:matrix(.7,0,0,.7,0,700)}}.animate__backOutDown{-webkit-animation-name:backOutDown;animation-name:backOutDown}@-webkit-keyframes backOutLeft{0%{opacity:1;-webkit-transform:scale(1);transform:scale(1)}20%{opacity:.7;-webkit-transform:scale(.7);transform:scale(.7)}to{opacity:.7;-webkit-transform:matrix(.7,0,0,.7,-2000,0);transform:matrix(.7,0,0,.7,-2000,0)}}@keyframes backOutLeft{0%{opacity:1;-webkit-transform:scale(1);transform:scale(1)}20%{opacity:.7;-webkit-transform:scale(.7);transform:scale(.7)}to{opacity:.7;-webkit-transform:matrix(.7,0,0,.7,-2000,0);transform:matrix(.7,0,0,.7,-2000,0)}}.animate__backOutLeft{-webkit-animation-name:backOutLeft;animation-name:backOutLeft}@-webkit-keyframes backOutRight{0%{opacity:1;-webkit-transform:scale(1);transform:scale(1)}20%{opacity:.7;-webkit-transform:scale(.7);transform:scale(.7)}to{opacity:.7;-webkit-transform:matrix(.7,0,0,.7,2000,0);transform:matrix(.7,0,0,.7,2000,0)}}@keyframes backOutRight{0%{opacity:1;-webkit-transform:scale(1);transform:scale(1)}20%{opacity:.7;-webkit-transform:scale(.7);transform:scale(.7)}to{opacity:.7;-webkit-transform:matrix(.7,0,0,.7,2000,0);transform:matrix(.7,0,0,.7,2000,0)}}.animate__backOutRight{-webkit-animation-name:backOutRight;animation-name:backOutRight}@-webkit-keyframes backOutUp{0%{opacity:1;-webkit-transform:scale(1);transform:scale(1)}20%{opacity:.7;-webkit-transform:scale(.7);transform:scale(.7)}to{opacity:.7;-webkit-transform:matrix(.7,0,0,.7,0,-700);transform:matrix(.7,0,0,.7,0,-700)}}@keyframes backOutUp{0%{opacity:1;-webkit-transform:scale(1);transform:scale(1)}20%{opacity:.7;-webkit-transform:scale(.7);transform:scale(.7)}to{opacity:.7;-webkit-transform:matrix(.7,0,0,.7,0,-700);transform:matrix(.7,0,0,.7,0,-700)}}.animate__backOutUp{-webkit-animation-name:backOutUp;animation-name:backOutUp}@-webkit-keyframes bounceIn{0%,20%,40%,60%,80%,to{-webkit-animation-timing-function:cubic-bezier(.215,.61,.355,1);animation-timing-function:cubic-bezier(.215,.61,.355,1)}0%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}20%{-webkit-transform:scale3d(1.1,1.1,1.1);transform:scale3d(1.1,1.1,1.1)}40%{-webkit-transform:scale3d(.9,.9,.9);transform:scale3d(.9,.9,.9)}60%{opacity:1;-webkit-transform:scale3d(1.03,1.03,1.03);transform:scale3d(1.03,1.03,1.03)}80%{-webkit-transform:scale3d(.97,.97,.97);transform:scale3d(.97,.97,.97)}to{opacity:1;-webkit-transform:scaleX(1);transform:scaleX(1)}}@keyframes bounceIn{0%,20%,40%,60%,80%,to{-webkit-animation-timing-function:cubic-bezier(.215,.61,.355,1);animation-timing-function:cubic-bezier(.215,.61,.355,1)}0%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}20%{-webkit-transform:scale3d(1.1,1.1,1.1);transform:scale3d(1.1,1.1,1.1)}40%{-webkit-transform:scale3d(.9,.9,.9);transform:scale3d(.9,.9,.9)}60%{opacity:1;-webkit-transform:scale3d(1.03,1.03,1.03);transform:scale3d(1.03,1.03,1.03)}80%{-webkit-transform:scale3d(.97,.97,.97);transform:scale3d(.97,.97,.97)}to{opacity:1;-webkit-transform:scaleX(1);transform:scaleX(1)}}.animate__bounceIn{-webkit-animation-duration:.75s;animation-duration:.75s;-webkit-animation-duration:calc(var(--animate-duration)*.75);animation-duration:calc(var(--animate-duration)*.75);-webkit-animation-name:bounceIn;animation-name:bounceIn}@-webkit-keyframes bounceInDown{0%,60%,75%,90%,to{-webkit-animation-timing-function:cubic-bezier(.215,.61,.355,1);animation-timing-function:cubic-bezier(.215,.61,.355,1)}0%{opacity:0;-webkit-transform:matrix(1,0,0,3,0,-3000);transform:matrix(1,0,0,3,0,-3000)}60%{opacity:1;-webkit-transform:matrix(1,0,0,.9,0,25);transform:matrix(1,0,0,.9,0,25)}75%{-webkit-transform:matrix(1,0,0,.95,0,-10);transform:matrix(1,0,0,.95,0,-10)}90%{-webkit-transform:matrix(1,0,0,.985,0,5);transform:matrix(1,0,0,.985,0,5)}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes bounceInDown{0%,60%,75%,90%,to{-webkit-animation-timing-function:cubic-bezier(.215,.61,.355,1);animation-timing-function:cubic-bezier(.215,.61,.355,1)}0%{opacity:0;-webkit-transform:matrix(1,0,0,3,0,-3000);transform:matrix(1,0,0,3,0,-3000)}60%{opacity:1;-webkit-transform:matrix(1,0,0,.9,0,25);transform:matrix(1,0,0,.9,0,25)}75%{-webkit-transform:matrix(1,0,0,.95,0,-10);transform:matrix(1,0,0,.95,0,-10)}90%{-webkit-transform:matrix(1,0,0,.985,0,5);transform:matrix(1,0,0,.985,0,5)}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__bounceInDown{-webkit-animation-name:bounceInDown;animation-name:bounceInDown}@-webkit-keyframes bounceInLeft{0%,60%,75%,90%,to{-webkit-animation-timing-function:cubic-bezier(.215,.61,.355,1);animation-timing-function:cubic-bezier(.215,.61,.355,1)}0%{opacity:0;-webkit-transform:matrix(3,0,0,1,-3000,0);transform:matrix(3,0,0,1,-3000,0)}60%{opacity:1;-webkit-transform:translate(25px);transform:translate(25px)}75%{-webkit-transform:matrix(.98,0,0,1,-10,0);transform:matrix(.98,0,0,1,-10,0)}90%{-webkit-transform:matrix(.995,0,0,1,5,0);transform:matrix(.995,0,0,1,5,0)}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes bounceInLeft{0%,60%,75%,90%,to{-webkit-animation-timing-function:cubic-bezier(.215,.61,.355,1);animation-timing-function:cubic-bezier(.215,.61,.355,1)}0%{opacity:0;-webkit-transform:matrix(3,0,0,1,-3000,0);transform:matrix(3,0,0,1,-3000,0)}60%{opacity:1;-webkit-transform:translate(25px);transform:translate(25px)}75%{-webkit-transform:matrix(.98,0,0,1,-10,0);transform:matrix(.98,0,0,1,-10,0)}90%{-webkit-transform:matrix(.995,0,0,1,5,0);transform:matrix(.995,0,0,1,5,0)}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__bounceInLeft{-webkit-animation-name:bounceInLeft;animation-name:bounceInLeft}@-webkit-keyframes bounceInRight{0%,60%,75%,90%,to{-webkit-animation-timing-function:cubic-bezier(.215,.61,.355,1);animation-timing-function:cubic-bezier(.215,.61,.355,1)}0%{opacity:0;-webkit-transform:matrix(3,0,0,1,3000,0);transform:matrix(3,0,0,1,3000,0)}60%{opacity:1;-webkit-transform:translate(-25px);transform:translate(-25px)}75%{-webkit-transform:matrix(.98,0,0,1,10,0);transform:matrix(.98,0,0,1,10,0)}90%{-webkit-transform:matrix(.995,0,0,1,-5,0);transform:matrix(.995,0,0,1,-5,0)}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes bounceInRight{0%,60%,75%,90%,to{-webkit-animation-timing-function:cubic-bezier(.215,.61,.355,1);animation-timing-function:cubic-bezier(.215,.61,.355,1)}0%{opacity:0;-webkit-transform:matrix(3,0,0,1,3000,0);transform:matrix(3,0,0,1,3000,0)}60%{opacity:1;-webkit-transform:translate(-25px);transform:translate(-25px)}75%{-webkit-transform:matrix(.98,0,0,1,10,0);transform:matrix(.98,0,0,1,10,0)}90%{-webkit-transform:matrix(.995,0,0,1,-5,0);transform:matrix(.995,0,0,1,-5,0)}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__bounceInRight{-webkit-animation-name:bounceInRight;animation-name:bounceInRight}@-webkit-keyframes bounceInUp{0%,60%,75%,90%,to{-webkit-animation-timing-function:cubic-bezier(.215,.61,.355,1);animation-timing-function:cubic-bezier(.215,.61,.355,1)}0%{opacity:0;-webkit-transform:matrix(1,0,0,5,0,3000);transform:matrix(1,0,0,5,0,3000)}60%{opacity:1;-webkit-transform:matrix(1,0,0,.9,0,-20);transform:matrix(1,0,0,.9,0,-20)}75%{-webkit-transform:matrix(1,0,0,.95,0,10);transform:matrix(1,0,0,.95,0,10)}90%{-webkit-transform:matrix(1,0,0,.985,0,-5);transform:matrix(1,0,0,.985,0,-5)}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes bounceInUp{0%,60%,75%,90%,to{-webkit-animation-timing-function:cubic-bezier(.215,.61,.355,1);animation-timing-function:cubic-bezier(.215,.61,.355,1)}0%{opacity:0;-webkit-transform:matrix(1,0,0,5,0,3000);transform:matrix(1,0,0,5,0,3000)}60%{opacity:1;-webkit-transform:matrix(1,0,0,.9,0,-20);transform:matrix(1,0,0,.9,0,-20)}75%{-webkit-transform:matrix(1,0,0,.95,0,10);transform:matrix(1,0,0,.95,0,10)}90%{-webkit-transform:matrix(1,0,0,.985,0,-5);transform:matrix(1,0,0,.985,0,-5)}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__bounceInUp{-webkit-animation-name:bounceInUp;animation-name:bounceInUp}@-webkit-keyframes bounceOut{20%{-webkit-transform:scale3d(.9,.9,.9);transform:scale3d(.9,.9,.9)}50%,55%{opacity:1;-webkit-transform:scale3d(1.1,1.1,1.1);transform:scale3d(1.1,1.1,1.1)}to{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}}@keyframes bounceOut{20%{-webkit-transform:scale3d(.9,.9,.9);transform:scale3d(.9,.9,.9)}50%,55%{opacity:1;-webkit-transform:scale3d(1.1,1.1,1.1);transform:scale3d(1.1,1.1,1.1)}to{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}}.animate__bounceOut{-webkit-animation-duration:.75s;animation-duration:.75s;-webkit-animation-duration:calc(var(--animate-duration)*.75);animation-duration:calc(var(--animate-duration)*.75);-webkit-animation-name:bounceOut;animation-name:bounceOut}@-webkit-keyframes bounceOutDown{20%{-webkit-transform:matrix(1,0,0,.985,0,10);transform:matrix(1,0,0,.985,0,10)}40%,45%{opacity:1;-webkit-transform:matrix(1,0,0,.9,0,-20);transform:matrix(1,0,0,.9,0,-20)}to{opacity:0;-webkit-transform:matrix(1,0,0,3,0,2000);transform:matrix(1,0,0,3,0,2000)}}@keyframes bounceOutDown{20%{-webkit-transform:matrix(1,0,0,.985,0,10);transform:matrix(1,0,0,.985,0,10)}40%,45%{opacity:1;-webkit-transform:matrix(1,0,0,.9,0,-20);transform:matrix(1,0,0,.9,0,-20)}to{opacity:0;-webkit-transform:matrix(1,0,0,3,0,2000);transform:matrix(1,0,0,3,0,2000)}}.animate__bounceOutDown{-webkit-animation-name:bounceOutDown;animation-name:bounceOutDown}@-webkit-keyframes bounceOutLeft{20%{opacity:1;-webkit-transform:matrix(.9,0,0,1,20,0);transform:matrix(.9,0,0,1,20,0)}to{opacity:0;-webkit-transform:matrix(2,0,0,1,-2000,0);transform:matrix(2,0,0,1,-2000,0)}}@keyframes bounceOutLeft{20%{opacity:1;-webkit-transform:matrix(.9,0,0,1,20,0);transform:matrix(.9,0,0,1,20,0)}to{opacity:0;-webkit-transform:matrix(2,0,0,1,-2000,0);transform:matrix(2,0,0,1,-2000,0)}}.animate__bounceOutLeft{-webkit-animation-name:bounceOutLeft;animation-name:bounceOutLeft}@-webkit-keyframes bounceOutRight{20%{opacity:1;-webkit-transform:matrix(.9,0,0,1,-20,0);transform:matrix(.9,0,0,1,-20,0)}to{opacity:0;-webkit-transform:matrix(2,0,0,1,2000,0);transform:matrix(2,0,0,1,2000,0)}}@keyframes bounceOutRight{20%{opacity:1;-webkit-transform:matrix(.9,0,0,1,-20,0);transform:matrix(.9,0,0,1,-20,0)}to{opacity:0;-webkit-transform:matrix(2,0,0,1,2000,0);transform:matrix(2,0,0,1,2000,0)}}.animate__bounceOutRight{-webkit-animation-name:bounceOutRight;animation-name:bounceOutRight}@-webkit-keyframes bounceOutUp{20%{-webkit-transform:matrix(1,0,0,.985,0,-10);transform:matrix(1,0,0,.985,0,-10)}40%,45%{opacity:1;-webkit-transform:matrix(1,0,0,.9,0,20);transform:matrix(1,0,0,.9,0,20)}to{opacity:0;-webkit-transform:matrix(1,0,0,3,0,-2000);transform:matrix(1,0,0,3,0,-2000)}}@keyframes bounceOutUp{20%{-webkit-transform:matrix(1,0,0,.985,0,-10);transform:matrix(1,0,0,.985,0,-10)}40%,45%{opacity:1;-webkit-transform:matrix(1,0,0,.9,0,20);transform:matrix(1,0,0,.9,0,20)}to{opacity:0;-webkit-transform:matrix(1,0,0,3,0,-2000);transform:matrix(1,0,0,3,0,-2000)}}.animate__bounceOutUp{-webkit-animation-name:bounceOutUp;animation-name:bounceOutUp}@-webkit-keyframes fadeIn{0%{opacity:0}to{opacity:1}}@keyframes fadeIn{0%{opacity:0}to{opacity:1}}.animate__fadeIn{-webkit-animation-name:fadeIn;animation-name:fadeIn}@-webkit-keyframes fadeInDown{0%{opacity:0;-webkit-transform:translateY(-100%);transform:translateY(-100%)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes fadeInDown{0%{opacity:0;-webkit-transform:translateY(-100%);transform:translateY(-100%)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__fadeInDown{-webkit-animation-name:fadeInDown;animation-name:fadeInDown}@-webkit-keyframes fadeInDownBig{0%{opacity:0;-webkit-transform:translateY(-2000px);transform:translateY(-2000px)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes fadeInDownBig{0%{opacity:0;-webkit-transform:translateY(-2000px);transform:translateY(-2000px)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__fadeInDownBig{-webkit-animation-name:fadeInDownBig;animation-name:fadeInDownBig}@-webkit-keyframes fadeInLeft{0%{opacity:0;-webkit-transform:translate(-100%);transform:translate(-100%)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes fadeInLeft{0%{opacity:0;-webkit-transform:translate(-100%);transform:translate(-100%)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__fadeInLeft{-webkit-animation-name:fadeInLeft;animation-name:fadeInLeft}@-webkit-keyframes fadeInLeftBig{0%{opacity:0;-webkit-transform:translate(-2000px);transform:translate(-2000px)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes fadeInLeftBig{0%{opacity:0;-webkit-transform:translate(-2000px);transform:translate(-2000px)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__fadeInLeftBig{-webkit-animation-name:fadeInLeftBig;animation-name:fadeInLeftBig}@-webkit-keyframes fadeInRight{0%{opacity:0;-webkit-transform:translate(100%);transform:translate(100%)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes fadeInRight{0%{opacity:0;-webkit-transform:translate(100%);transform:translate(100%)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__fadeInRight{-webkit-animation-name:fadeInRight;animation-name:fadeInRight}@-webkit-keyframes fadeInRightBig{0%{opacity:0;-webkit-transform:translate(2000px);transform:translate(2000px)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes fadeInRightBig{0%{opacity:0;-webkit-transform:translate(2000px);transform:translate(2000px)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__fadeInRightBig{-webkit-animation-name:fadeInRightBig;animation-name:fadeInRightBig}@-webkit-keyframes fadeInUp{0%{opacity:0;-webkit-transform:translateY(100%);transform:translateY(100%)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes fadeInUp{0%{opacity:0;-webkit-transform:translateY(100%);transform:translateY(100%)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__fadeInUp{-webkit-animation-name:fadeInUp;animation-name:fadeInUp}@-webkit-keyframes fadeInUpBig{0%{opacity:0;-webkit-transform:translateY(2000px);transform:translateY(2000px)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes fadeInUpBig{0%{opacity:0;-webkit-transform:translateY(2000px);transform:translateY(2000px)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__fadeInUpBig{-webkit-animation-name:fadeInUpBig;animation-name:fadeInUpBig}@-webkit-keyframes fadeInTopLeft{0%{opacity:0;-webkit-transform:translate(-100%,-100%);transform:translate(-100%,-100%)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes fadeInTopLeft{0%{opacity:0;-webkit-transform:translate(-100%,-100%);transform:translate(-100%,-100%)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__fadeInTopLeft{-webkit-animation-name:fadeInTopLeft;animation-name:fadeInTopLeft}@-webkit-keyframes fadeInTopRight{0%{opacity:0;-webkit-transform:translate(100%,-100%);transform:translate(100%,-100%)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes fadeInTopRight{0%{opacity:0;-webkit-transform:translate(100%,-100%);transform:translate(100%,-100%)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__fadeInTopRight{-webkit-animation-name:fadeInTopRight;animation-name:fadeInTopRight}@-webkit-keyframes fadeInBottomLeft{0%{opacity:0;-webkit-transform:translate(-100%,100%);transform:translate(-100%,100%)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes fadeInBottomLeft{0%{opacity:0;-webkit-transform:translate(-100%,100%);transform:translate(-100%,100%)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__fadeInBottomLeft{-webkit-animation-name:fadeInBottomLeft;animation-name:fadeInBottomLeft}@-webkit-keyframes fadeInBottomRight{0%{opacity:0;-webkit-transform:translate(100%,100%);transform:translate(100%,100%)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes fadeInBottomRight{0%{opacity:0;-webkit-transform:translate(100%,100%);transform:translate(100%,100%)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__fadeInBottomRight{-webkit-animation-name:fadeInBottomRight;animation-name:fadeInBottomRight}@-webkit-keyframes fadeOut{0%{opacity:1}to{opacity:0}}@keyframes fadeOut{0%{opacity:1}to{opacity:0}}.animate__fadeOut{-webkit-animation-name:fadeOut;animation-name:fadeOut}@-webkit-keyframes fadeOutDown{0%{opacity:1}to{opacity:0;-webkit-transform:translateY(100%);transform:translateY(100%)}}@keyframes fadeOutDown{0%{opacity:1}to{opacity:0;-webkit-transform:translateY(100%);transform:translateY(100%)}}.animate__fadeOutDown{-webkit-animation-name:fadeOutDown;animation-name:fadeOutDown}@-webkit-keyframes fadeOutDownBig{0%{opacity:1}to{opacity:0;-webkit-transform:translateY(2000px);transform:translateY(2000px)}}@keyframes fadeOutDownBig{0%{opacity:1}to{opacity:0;-webkit-transform:translateY(2000px);transform:translateY(2000px)}}.animate__fadeOutDownBig{-webkit-animation-name:fadeOutDownBig;animation-name:fadeOutDownBig}@-webkit-keyframes fadeOutLeft{0%{opacity:1}to{opacity:0;-webkit-transform:translate(-100%);transform:translate(-100%)}}@keyframes fadeOutLeft{0%{opacity:1}to{opacity:0;-webkit-transform:translate(-100%);transform:translate(-100%)}}.animate__fadeOutLeft{-webkit-animation-name:fadeOutLeft;animation-name:fadeOutLeft}@-webkit-keyframes fadeOutLeftBig{0%{opacity:1}to{opacity:0;-webkit-transform:translate(-2000px);transform:translate(-2000px)}}@keyframes fadeOutLeftBig{0%{opacity:1}to{opacity:0;-webkit-transform:translate(-2000px);transform:translate(-2000px)}}.animate__fadeOutLeftBig{-webkit-animation-name:fadeOutLeftBig;animation-name:fadeOutLeftBig}@-webkit-keyframes fadeOutRight{0%{opacity:1}to{opacity:0;-webkit-transform:translate(100%);transform:translate(100%)}}@keyframes fadeOutRight{0%{opacity:1}to{opacity:0;-webkit-transform:translate(100%);transform:translate(100%)}}.animate__fadeOutRight{-webkit-animation-name:fadeOutRight;animation-name:fadeOutRight}@-webkit-keyframes fadeOutRightBig{0%{opacity:1}to{opacity:0;-webkit-transform:translate(2000px);transform:translate(2000px)}}@keyframes fadeOutRightBig{0%{opacity:1}to{opacity:0;-webkit-transform:translate(2000px);transform:translate(2000px)}}.animate__fadeOutRightBig{-webkit-animation-name:fadeOutRightBig;animation-name:fadeOutRightBig}@-webkit-keyframes fadeOutUp{0%{opacity:1}to{opacity:0;-webkit-transform:translateY(-100%);transform:translateY(-100%)}}@keyframes fadeOutUp{0%{opacity:1}to{opacity:0;-webkit-transform:translateY(-100%);transform:translateY(-100%)}}.animate__fadeOutUp{-webkit-animation-name:fadeOutUp;animation-name:fadeOutUp}@-webkit-keyframes fadeOutUpBig{0%{opacity:1}to{opacity:0;-webkit-transform:translateY(-2000px);transform:translateY(-2000px)}}@keyframes fadeOutUpBig{0%{opacity:1}to{opacity:0;-webkit-transform:translateY(-2000px);transform:translateY(-2000px)}}.animate__fadeOutUpBig{-webkit-animation-name:fadeOutUpBig;animation-name:fadeOutUpBig}@-webkit-keyframes fadeOutTopLeft{0%{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}to{opacity:0;-webkit-transform:translate(-100%,-100%);transform:translate(-100%,-100%)}}@keyframes fadeOutTopLeft{0%{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}to{opacity:0;-webkit-transform:translate(-100%,-100%);transform:translate(-100%,-100%)}}.animate__fadeOutTopLeft{-webkit-animation-name:fadeOutTopLeft;animation-name:fadeOutTopLeft}@-webkit-keyframes fadeOutTopRight{0%{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}to{opacity:0;-webkit-transform:translate(100%,-100%);transform:translate(100%,-100%)}}@keyframes fadeOutTopRight{0%{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}to{opacity:0;-webkit-transform:translate(100%,-100%);transform:translate(100%,-100%)}}.animate__fadeOutTopRight{-webkit-animation-name:fadeOutTopRight;animation-name:fadeOutTopRight}@-webkit-keyframes fadeOutBottomRight{0%{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}to{opacity:0;-webkit-transform:translate(100%,100%);transform:translate(100%,100%)}}@keyframes fadeOutBottomRight{0%{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}to{opacity:0;-webkit-transform:translate(100%,100%);transform:translate(100%,100%)}}.animate__fadeOutBottomRight{-webkit-animation-name:fadeOutBottomRight;animation-name:fadeOutBottomRight}@-webkit-keyframes fadeOutBottomLeft{0%{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}to{opacity:0;-webkit-transform:translate(-100%,100%);transform:translate(-100%,100%)}}@keyframes fadeOutBottomLeft{0%{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}to{opacity:0;-webkit-transform:translate(-100%,100%);transform:translate(-100%,100%)}}.animate__fadeOutBottomLeft{-webkit-animation-name:fadeOutBottomLeft;animation-name:fadeOutBottomLeft}@-webkit-keyframes flip{0%{-webkit-animation-timing-function:ease-out;animation-timing-function:ease-out;-webkit-transform:perspective(400px)scaleX(1)translateZ(0)rotateY(-1turn);transform:perspective(400px)scaleX(1)translateZ(0)rotateY(-1turn)}40%{-webkit-animation-timing-function:ease-out;animation-timing-function:ease-out;-webkit-transform:perspective(400px)scaleX(1)translateZ(150px)rotateY(-190deg);transform:perspective(400px)scaleX(1)translateZ(150px)rotateY(-190deg)}50%{-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in;-webkit-transform:perspective(400px)scaleX(1)translateZ(150px)rotateY(-170deg);transform:perspective(400px)scaleX(1)translateZ(150px)rotateY(-170deg)}80%{-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in;-webkit-transform:matrix3d(.95,0,0,0,0,.95,0,0,0,0,.95,-.002375,0,0,0,1);transform:matrix3d(.95,0,0,0,0,.95,0,0,0,0,.95,-.002375,0,0,0,1)}to{-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in;-webkit-transform:matrix3d(1,0,0,0,0,1,0,0,0,0,1,-.0025,0,0,0,1);transform:matrix3d(1,0,0,0,0,1,0,0,0,0,1,-.0025,0,0,0,1)}}@keyframes flip{0%{-webkit-animation-timing-function:ease-out;animation-timing-function:ease-out;-webkit-transform:perspective(400px)scaleX(1)translateZ(0)rotateY(-1turn);transform:perspective(400px)scaleX(1)translateZ(0)rotateY(-1turn)}40%{-webkit-animation-timing-function:ease-out;animation-timing-function:ease-out;-webkit-transform:perspective(400px)scaleX(1)translateZ(150px)rotateY(-190deg);transform:perspective(400px)scaleX(1)translateZ(150px)rotateY(-190deg)}50%{-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in;-webkit-transform:perspective(400px)scaleX(1)translateZ(150px)rotateY(-170deg);transform:perspective(400px)scaleX(1)translateZ(150px)rotateY(-170deg)}80%{-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in;-webkit-transform:matrix3d(.95,0,0,0,0,.95,0,0,0,0,.95,-.002375,0,0,0,1);transform:matrix3d(.95,0,0,0,0,.95,0,0,0,0,.95,-.002375,0,0,0,1)}to{-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in;-webkit-transform:matrix3d(1,0,0,0,0,1,0,0,0,0,1,-.0025,0,0,0,1);transform:matrix3d(1,0,0,0,0,1,0,0,0,0,1,-.0025,0,0,0,1)}}.animate__animated.animate__flip{-webkit-backface-visibility:visible;backface-visibility:visible;-webkit-animation-name:flip;animation-name:flip}@-webkit-keyframes flipInX{0%{opacity:0;-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in;-webkit-transform:perspective(400px)rotateX(90deg);transform:perspective(400px)rotateX(90deg)}40%{-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in;-webkit-transform:perspective(400px)rotateX(-20deg);transform:perspective(400px)rotateX(-20deg)}60%{opacity:1;-webkit-transform:perspective(400px)rotateX(10deg);transform:perspective(400px)rotateX(10deg)}80%{-webkit-transform:perspective(400px)rotateX(-5deg);transform:perspective(400px)rotateX(-5deg)}to{-webkit-transform:perspective(400px);transform:perspective(400px)}}@keyframes flipInX{0%{opacity:0;-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in;-webkit-transform:perspective(400px)rotateX(90deg);transform:perspective(400px)rotateX(90deg)}40%{-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in;-webkit-transform:perspective(400px)rotateX(-20deg);transform:perspective(400px)rotateX(-20deg)}60%{opacity:1;-webkit-transform:perspective(400px)rotateX(10deg);transform:perspective(400px)rotateX(10deg)}80%{-webkit-transform:perspective(400px)rotateX(-5deg);transform:perspective(400px)rotateX(-5deg)}to{-webkit-transform:perspective(400px);transform:perspective(400px)}}.animate__flipInX{-webkit-animation-name:flipInX;animation-name:flipInX;-webkit-backface-visibility:visible!important;backface-visibility:visible!important}@-webkit-keyframes flipInY{0%{opacity:0;-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in;-webkit-transform:perspective(400px)rotateY(90deg);transform:perspective(400px)rotateY(90deg)}40%{-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in;-webkit-transform:perspective(400px)rotateY(-20deg);transform:perspective(400px)rotateY(-20deg)}60%{opacity:1;-webkit-transform:perspective(400px)rotateY(10deg);transform:perspective(400px)rotateY(10deg)}80%{-webkit-transform:perspective(400px)rotateY(-5deg);transform:perspective(400px)rotateY(-5deg)}to{-webkit-transform:perspective(400px);transform:perspective(400px)}}@keyframes flipInY{0%{opacity:0;-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in;-webkit-transform:perspective(400px)rotateY(90deg);transform:perspective(400px)rotateY(90deg)}40%{-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in;-webkit-transform:perspective(400px)rotateY(-20deg);transform:perspective(400px)rotateY(-20deg)}60%{opacity:1;-webkit-transform:perspective(400px)rotateY(10deg);transform:perspective(400px)rotateY(10deg)}80%{-webkit-transform:perspective(400px)rotateY(-5deg);transform:perspective(400px)rotateY(-5deg)}to{-webkit-transform:perspective(400px);transform:perspective(400px)}}.animate__flipInY{-webkit-animation-name:flipInY;animation-name:flipInY;-webkit-backface-visibility:visible!important;backface-visibility:visible!important}@-webkit-keyframes flipOutX{0%{-webkit-transform:perspective(400px);transform:perspective(400px)}30%{opacity:1;-webkit-transform:perspective(400px)rotateX(-20deg);transform:perspective(400px)rotateX(-20deg)}to{opacity:0;-webkit-transform:perspective(400px)rotateX(90deg);transform:perspective(400px)rotateX(90deg)}}@keyframes flipOutX{0%{-webkit-transform:perspective(400px);transform:perspective(400px)}30%{opacity:1;-webkit-transform:perspective(400px)rotateX(-20deg);transform:perspective(400px)rotateX(-20deg)}to{opacity:0;-webkit-transform:perspective(400px)rotateX(90deg);transform:perspective(400px)rotateX(90deg)}}.animate__flipOutX{-webkit-animation-duration:.75s;animation-duration:.75s;-webkit-animation-duration:calc(var(--animate-duration)*.75);animation-duration:calc(var(--animate-duration)*.75);-webkit-animation-name:flipOutX;animation-name:flipOutX;-webkit-backface-visibility:visible!important;backface-visibility:visible!important}@-webkit-keyframes flipOutY{0%{-webkit-transform:perspective(400px);transform:perspective(400px)}30%{opacity:1;-webkit-transform:perspective(400px)rotateY(-15deg);transform:perspective(400px)rotateY(-15deg)}to{opacity:0;-webkit-transform:perspective(400px)rotateY(90deg);transform:perspective(400px)rotateY(90deg)}}@keyframes flipOutY{0%{-webkit-transform:perspective(400px);transform:perspective(400px)}30%{opacity:1;-webkit-transform:perspective(400px)rotateY(-15deg);transform:perspective(400px)rotateY(-15deg)}to{opacity:0;-webkit-transform:perspective(400px)rotateY(90deg);transform:perspective(400px)rotateY(90deg)}}.animate__flipOutY{-webkit-animation-duration:.75s;animation-duration:.75s;-webkit-animation-duration:calc(var(--animate-duration)*.75);animation-duration:calc(var(--animate-duration)*.75);-webkit-animation-name:flipOutY;animation-name:flipOutY;-webkit-backface-visibility:visible!important;backface-visibility:visible!important}@-webkit-keyframes lightSpeedInRight{0%{opacity:0;-webkit-transform:translate(100%)skew(-30deg);transform:translate(100%)skew(-30deg)}60%{opacity:1;-webkit-transform:skew(20deg);transform:skew(20deg)}80%{-webkit-transform:skew(-5deg);transform:skew(-5deg)}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes lightSpeedInRight{0%{opacity:0;-webkit-transform:translate(100%)skew(-30deg);transform:translate(100%)skew(-30deg)}60%{opacity:1;-webkit-transform:skew(20deg);transform:skew(20deg)}80%{-webkit-transform:skew(-5deg);transform:skew(-5deg)}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__lightSpeedInRight{-webkit-animation-name:lightSpeedInRight;animation-name:lightSpeedInRight;-webkit-animation-timing-function:ease-out;animation-timing-function:ease-out}@-webkit-keyframes lightSpeedInLeft{0%{opacity:0;-webkit-transform:translate(-100%)skew(30deg);transform:translate(-100%)skew(30deg)}60%{opacity:1;-webkit-transform:skew(-20deg);transform:skew(-20deg)}80%{-webkit-transform:skew(5deg);transform:skew(5deg)}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes lightSpeedInLeft{0%{opacity:0;-webkit-transform:translate(-100%)skew(30deg);transform:translate(-100%)skew(30deg)}60%{opacity:1;-webkit-transform:skew(-20deg);transform:skew(-20deg)}80%{-webkit-transform:skew(5deg);transform:skew(5deg)}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__lightSpeedInLeft{-webkit-animation-name:lightSpeedInLeft;animation-name:lightSpeedInLeft;-webkit-animation-timing-function:ease-out;animation-timing-function:ease-out}@-webkit-keyframes lightSpeedOutRight{0%{opacity:1}to{opacity:0;-webkit-transform:translate(100%)skew(30deg);transform:translate(100%)skew(30deg)}}@keyframes lightSpeedOutRight{0%{opacity:1}to{opacity:0;-webkit-transform:translate(100%)skew(30deg);transform:translate(100%)skew(30deg)}}.animate__lightSpeedOutRight{-webkit-animation-name:lightSpeedOutRight;animation-name:lightSpeedOutRight;-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}@-webkit-keyframes lightSpeedOutLeft{0%{opacity:1}to{opacity:0;-webkit-transform:translate(-100%)skew(-30deg);transform:translate(-100%)skew(-30deg)}}@keyframes lightSpeedOutLeft{0%{opacity:1}to{opacity:0;-webkit-transform:translate(-100%)skew(-30deg);transform:translate(-100%)skew(-30deg)}}.animate__lightSpeedOutLeft{-webkit-animation-name:lightSpeedOutLeft;animation-name:lightSpeedOutLeft;-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}@-webkit-keyframes rotateIn{0%{opacity:0;-webkit-transform:rotate(-200deg);transform:rotate(-200deg)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes rotateIn{0%{opacity:0;-webkit-transform:rotate(-200deg);transform:rotate(-200deg)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__rotateIn{-webkit-transform-origin:50%;transform-origin:50%;-webkit-animation-name:rotateIn;animation-name:rotateIn}@-webkit-keyframes rotateInDownLeft{0%{opacity:0;-webkit-transform:rotate(-45deg);transform:rotate(-45deg)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes rotateInDownLeft{0%{opacity:0;-webkit-transform:rotate(-45deg);transform:rotate(-45deg)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__rotateInDownLeft{-webkit-transform-origin:0 100%;transform-origin:0 100%;-webkit-animation-name:rotateInDownLeft;animation-name:rotateInDownLeft}@-webkit-keyframes rotateInDownRight{0%{opacity:0;-webkit-transform:rotate(45deg);transform:rotate(45deg)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes rotateInDownRight{0%{opacity:0;-webkit-transform:rotate(45deg);transform:rotate(45deg)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__rotateInDownRight{-webkit-transform-origin:100% 100%;transform-origin:100% 100%;-webkit-animation-name:rotateInDownRight;animation-name:rotateInDownRight}@-webkit-keyframes rotateInUpLeft{0%{opacity:0;-webkit-transform:rotate(45deg);transform:rotate(45deg)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes rotateInUpLeft{0%{opacity:0;-webkit-transform:rotate(45deg);transform:rotate(45deg)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__rotateInUpLeft{-webkit-transform-origin:0 100%;transform-origin:0 100%;-webkit-animation-name:rotateInUpLeft;animation-name:rotateInUpLeft}@-webkit-keyframes rotateInUpRight{0%{opacity:0;-webkit-transform:rotate(-90deg);transform:rotate(-90deg)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes rotateInUpRight{0%{opacity:0;-webkit-transform:rotate(-90deg);transform:rotate(-90deg)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__rotateInUpRight{-webkit-transform-origin:100% 100%;transform-origin:100% 100%;-webkit-animation-name:rotateInUpRight;animation-name:rotateInUpRight}@-webkit-keyframes rotateOut{0%{opacity:1}to{opacity:0;-webkit-transform:rotate(200deg);transform:rotate(200deg)}}@keyframes rotateOut{0%{opacity:1}to{opacity:0;-webkit-transform:rotate(200deg);transform:rotate(200deg)}}.animate__rotateOut{-webkit-transform-origin:50%;transform-origin:50%;-webkit-animation-name:rotateOut;animation-name:rotateOut}@-webkit-keyframes rotateOutDownLeft{0%{opacity:1}to{opacity:0;-webkit-transform:rotate(45deg);transform:rotate(45deg)}}@keyframes rotateOutDownLeft{0%{opacity:1}to{opacity:0;-webkit-transform:rotate(45deg);transform:rotate(45deg)}}.animate__rotateOutDownLeft{-webkit-transform-origin:0 100%;transform-origin:0 100%;-webkit-animation-name:rotateOutDownLeft;animation-name:rotateOutDownLeft}@-webkit-keyframes rotateOutDownRight{0%{opacity:1}to{opacity:0;-webkit-transform:rotate(-45deg);transform:rotate(-45deg)}}@keyframes rotateOutDownRight{0%{opacity:1}to{opacity:0;-webkit-transform:rotate(-45deg);transform:rotate(-45deg)}}.animate__rotateOutDownRight{-webkit-transform-origin:100% 100%;transform-origin:100% 100%;-webkit-animation-name:rotateOutDownRight;animation-name:rotateOutDownRight}@-webkit-keyframes rotateOutUpLeft{0%{opacity:1}to{opacity:0;-webkit-transform:rotate(-45deg);transform:rotate(-45deg)}}@keyframes rotateOutUpLeft{0%{opacity:1}to{opacity:0;-webkit-transform:rotate(-45deg);transform:rotate(-45deg)}}.animate__rotateOutUpLeft{-webkit-transform-origin:0 100%;transform-origin:0 100%;-webkit-animation-name:rotateOutUpLeft;animation-name:rotateOutUpLeft}@-webkit-keyframes rotateOutUpRight{0%{opacity:1}to{opacity:0;-webkit-transform:rotate(90deg);transform:rotate(90deg)}}@keyframes rotateOutUpRight{0%{opacity:1}to{opacity:0;-webkit-transform:rotate(90deg);transform:rotate(90deg)}}.animate__rotateOutUpRight{-webkit-transform-origin:100% 100%;transform-origin:100% 100%;-webkit-animation-name:rotateOutUpRight;animation-name:rotateOutUpRight}@-webkit-keyframes hinge{0%{-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out}20%,60%{-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out;-webkit-transform:rotate(80deg);transform:rotate(80deg)}40%,80%{opacity:1;-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out;-webkit-transform:rotate(60deg);transform:rotate(60deg)}to{opacity:0;-webkit-transform:translateY(700px);transform:translateY(700px)}}@keyframes hinge{0%{-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out}20%,60%{-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out;-webkit-transform:rotate(80deg);transform:rotate(80deg)}40%,80%{opacity:1;-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out;-webkit-transform:rotate(60deg);transform:rotate(60deg)}to{opacity:0;-webkit-transform:translateY(700px);transform:translateY(700px)}}.animate__hinge{-webkit-animation-duration:2s;animation-duration:2s;-webkit-animation-duration:calc(var(--animate-duration)*2);animation-duration:calc(var(--animate-duration)*2);-webkit-transform-origin:0 0;transform-origin:0 0;-webkit-animation-name:hinge;animation-name:hinge}@-webkit-keyframes jackInTheBox{0%{opacity:0;-webkit-transform-origin:bottom;transform-origin:bottom;-webkit-transform:scale(.1)rotate(30deg);transform:scale(.1)rotate(30deg)}50%{-webkit-transform:rotate(-10deg);transform:rotate(-10deg)}70%{-webkit-transform:rotate(3deg);transform:rotate(3deg)}to{opacity:1;-webkit-transform:scale(1);transform:scale(1)}}@keyframes jackInTheBox{0%{opacity:0;-webkit-transform-origin:bottom;transform-origin:bottom;-webkit-transform:scale(.1)rotate(30deg);transform:scale(.1)rotate(30deg)}50%{-webkit-transform:rotate(-10deg);transform:rotate(-10deg)}70%{-webkit-transform:rotate(3deg);transform:rotate(3deg)}to{opacity:1;-webkit-transform:scale(1);transform:scale(1)}}.animate__jackInTheBox{-webkit-animation-name:jackInTheBox;animation-name:jackInTheBox}@-webkit-keyframes rollIn{0%{opacity:0;-webkit-transform:translate(-100%)rotate(-120deg);transform:translate(-100%)rotate(-120deg)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes rollIn{0%{opacity:0;-webkit-transform:translate(-100%)rotate(-120deg);transform:translate(-100%)rotate(-120deg)}to{opacity:1;-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__rollIn{-webkit-animation-name:rollIn;animation-name:rollIn}@-webkit-keyframes rollOut{0%{opacity:1}to{opacity:0;-webkit-transform:translate(100%)rotate(120deg);transform:translate(100%)rotate(120deg)}}@keyframes rollOut{0%{opacity:1}to{opacity:0;-webkit-transform:translate(100%)rotate(120deg);transform:translate(100%)rotate(120deg)}}.animate__rollOut{-webkit-animation-name:rollOut;animation-name:rollOut}@-webkit-keyframes zoomIn{0%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}50%{opacity:1}}@keyframes zoomIn{0%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}50%{opacity:1}}.animate__zoomIn{-webkit-animation-name:zoomIn;animation-name:zoomIn}@-webkit-keyframes zoomInDown{0%{opacity:0;-webkit-animation-timing-function:cubic-bezier(.55,.055,.675,.19);animation-timing-function:cubic-bezier(.55,.055,.675,.19);-webkit-transform:translateY(-100px)scale3d(.1,.1,.1);transform:translateY(-100px)scale3d(.1,.1,.1)}60%{opacity:1;-webkit-animation-timing-function:cubic-bezier(.175,.885,.32,1);animation-timing-function:cubic-bezier(.175,.885,.32,1);-webkit-transform:scale3d(.475,.475,.475)translateY(60px);transform:scale3d(.475,.475,.475)translateY(60px)}}@keyframes zoomInDown{0%{opacity:0;-webkit-animation-timing-function:cubic-bezier(.55,.055,.675,.19);animation-timing-function:cubic-bezier(.55,.055,.675,.19);-webkit-transform:translateY(-100px)scale3d(.1,.1,.1);transform:translateY(-100px)scale3d(.1,.1,.1)}60%{opacity:1;-webkit-animation-timing-function:cubic-bezier(.175,.885,.32,1);animation-timing-function:cubic-bezier(.175,.885,.32,1);-webkit-transform:scale3d(.475,.475,.475)translateY(60px);transform:scale3d(.475,.475,.475)translateY(60px)}}.animate__zoomInDown{-webkit-animation-name:zoomInDown;animation-name:zoomInDown}@-webkit-keyframes zoomInLeft{0%{opacity:0;-webkit-animation-timing-function:cubic-bezier(.55,.055,.675,.19);animation-timing-function:cubic-bezier(.55,.055,.675,.19);-webkit-transform:translate(-100px)scale3d(.1,.1,.1);transform:translate(-100px)scale3d(.1,.1,.1)}60%{opacity:1;-webkit-animation-timing-function:cubic-bezier(.175,.885,.32,1);animation-timing-function:cubic-bezier(.175,.885,.32,1);-webkit-transform:scale3d(.475,.475,.475)translate(10px);transform:scale3d(.475,.475,.475)translate(10px)}}@keyframes zoomInLeft{0%{opacity:0;-webkit-animation-timing-function:cubic-bezier(.55,.055,.675,.19);animation-timing-function:cubic-bezier(.55,.055,.675,.19);-webkit-transform:translate(-100px)scale3d(.1,.1,.1);transform:translate(-100px)scale3d(.1,.1,.1)}60%{opacity:1;-webkit-animation-timing-function:cubic-bezier(.175,.885,.32,1);animation-timing-function:cubic-bezier(.175,.885,.32,1);-webkit-transform:scale3d(.475,.475,.475)translate(10px);transform:scale3d(.475,.475,.475)translate(10px)}}.animate__zoomInLeft{-webkit-animation-name:zoomInLeft;animation-name:zoomInLeft}@-webkit-keyframes zoomInRight{0%{opacity:0;-webkit-animation-timing-function:cubic-bezier(.55,.055,.675,.19);animation-timing-function:cubic-bezier(.55,.055,.675,.19);-webkit-transform:translate(100px)scale3d(.1,.1,.1);transform:translate(100px)scale3d(.1,.1,.1)}60%{opacity:1;-webkit-animation-timing-function:cubic-bezier(.175,.885,.32,1);animation-timing-function:cubic-bezier(.175,.885,.32,1);-webkit-transform:scale3d(.475,.475,.475)translate(-10px);transform:scale3d(.475,.475,.475)translate(-10px)}}@keyframes zoomInRight{0%{opacity:0;-webkit-animation-timing-function:cubic-bezier(.55,.055,.675,.19);animation-timing-function:cubic-bezier(.55,.055,.675,.19);-webkit-transform:translate(100px)scale3d(.1,.1,.1);transform:translate(100px)scale3d(.1,.1,.1)}60%{opacity:1;-webkit-animation-timing-function:cubic-bezier(.175,.885,.32,1);animation-timing-function:cubic-bezier(.175,.885,.32,1);-webkit-transform:scale3d(.475,.475,.475)translate(-10px);transform:scale3d(.475,.475,.475)translate(-10px)}}.animate__zoomInRight{-webkit-animation-name:zoomInRight;animation-name:zoomInRight}@-webkit-keyframes zoomInUp{0%{opacity:0;-webkit-animation-timing-function:cubic-bezier(.55,.055,.675,.19);animation-timing-function:cubic-bezier(.55,.055,.675,.19);-webkit-transform:translateY(100px)scale3d(.1,.1,.1);transform:translateY(100px)scale3d(.1,.1,.1)}60%{opacity:1;-webkit-animation-timing-function:cubic-bezier(.175,.885,.32,1);animation-timing-function:cubic-bezier(.175,.885,.32,1);-webkit-transform:scale3d(.475,.475,.475)translateY(-60px);transform:scale3d(.475,.475,.475)translateY(-60px)}}@keyframes zoomInUp{0%{opacity:0;-webkit-animation-timing-function:cubic-bezier(.55,.055,.675,.19);animation-timing-function:cubic-bezier(.55,.055,.675,.19);-webkit-transform:translateY(100px)scale3d(.1,.1,.1);transform:translateY(100px)scale3d(.1,.1,.1)}60%{opacity:1;-webkit-animation-timing-function:cubic-bezier(.175,.885,.32,1);animation-timing-function:cubic-bezier(.175,.885,.32,1);-webkit-transform:scale3d(.475,.475,.475)translateY(-60px);transform:scale3d(.475,.475,.475)translateY(-60px)}}.animate__zoomInUp{-webkit-animation-name:zoomInUp;animation-name:zoomInUp}@-webkit-keyframes zoomOut{0%{opacity:1}50%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}to{opacity:0}}@keyframes zoomOut{0%{opacity:1}50%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}to{opacity:0}}.animate__zoomOut{-webkit-animation-name:zoomOut;animation-name:zoomOut}@-webkit-keyframes zoomOutDown{40%{opacity:1;-webkit-animation-timing-function:cubic-bezier(.55,.055,.675,.19);animation-timing-function:cubic-bezier(.55,.055,.675,.19);-webkit-transform:scale3d(.475,.475,.475)translateY(-60px);transform:scale3d(.475,.475,.475)translateY(-60px)}to{opacity:0;-webkit-animation-timing-function:cubic-bezier(.175,.885,.32,1);animation-timing-function:cubic-bezier(.175,.885,.32,1);-webkit-transform:translateY(200px)scale3d(.1,.1,.1);transform:translateY(200px)scale3d(.1,.1,.1)}}@keyframes zoomOutDown{40%{opacity:1;-webkit-animation-timing-function:cubic-bezier(.55,.055,.675,.19);animation-timing-function:cubic-bezier(.55,.055,.675,.19);-webkit-transform:scale3d(.475,.475,.475)translateY(-60px);transform:scale3d(.475,.475,.475)translateY(-60px)}to{opacity:0;-webkit-animation-timing-function:cubic-bezier(.175,.885,.32,1);animation-timing-function:cubic-bezier(.175,.885,.32,1);-webkit-transform:translateY(200px)scale3d(.1,.1,.1);transform:translateY(200px)scale3d(.1,.1,.1)}}.animate__zoomOutDown{-webkit-transform-origin:bottom;transform-origin:bottom;-webkit-animation-name:zoomOutDown;animation-name:zoomOutDown}@-webkit-keyframes zoomOutLeft{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475)translate(42px);transform:scale3d(.475,.475,.475)translate(42px)}to{opacity:0;-webkit-transform:matrix(.1,0,0,.1,-200,0);transform:matrix(.1,0,0,.1,-200,0)}}@keyframes zoomOutLeft{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475)translate(42px);transform:scale3d(.475,.475,.475)translate(42px)}to{opacity:0;-webkit-transform:matrix(.1,0,0,.1,-200,0);transform:matrix(.1,0,0,.1,-200,0)}}.animate__zoomOutLeft{-webkit-transform-origin:0;transform-origin:0;-webkit-animation-name:zoomOutLeft;animation-name:zoomOutLeft}@-webkit-keyframes zoomOutRight{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475)translate(-42px);transform:scale3d(.475,.475,.475)translate(-42px)}to{opacity:0;-webkit-transform:matrix(.1,0,0,.1,200,0);transform:matrix(.1,0,0,.1,200,0)}}@keyframes zoomOutRight{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475)translate(-42px);transform:scale3d(.475,.475,.475)translate(-42px)}to{opacity:0;-webkit-transform:matrix(.1,0,0,.1,200,0);transform:matrix(.1,0,0,.1,200,0)}}.animate__zoomOutRight{-webkit-transform-origin:100%;transform-origin:100%;-webkit-animation-name:zoomOutRight;animation-name:zoomOutRight}@-webkit-keyframes zoomOutUp{40%{opacity:1;-webkit-animation-timing-function:cubic-bezier(.55,.055,.675,.19);animation-timing-function:cubic-bezier(.55,.055,.675,.19);-webkit-transform:scale3d(.475,.475,.475)translateY(60px);transform:scale3d(.475,.475,.475)translateY(60px)}to{opacity:0;-webkit-animation-timing-function:cubic-bezier(.175,.885,.32,1);animation-timing-function:cubic-bezier(.175,.885,.32,1);-webkit-transform:translateY(-200px)scale3d(.1,.1,.1);transform:translateY(-200px)scale3d(.1,.1,.1)}}@keyframes zoomOutUp{40%{opacity:1;-webkit-animation-timing-function:cubic-bezier(.55,.055,.675,.19);animation-timing-function:cubic-bezier(.55,.055,.675,.19);-webkit-transform:scale3d(.475,.475,.475)translateY(60px);transform:scale3d(.475,.475,.475)translateY(60px)}to{opacity:0;-webkit-animation-timing-function:cubic-bezier(.175,.885,.32,1);animation-timing-function:cubic-bezier(.175,.885,.32,1);-webkit-transform:translateY(-200px)scale3d(.1,.1,.1);transform:translateY(-200px)scale3d(.1,.1,.1)}}.animate__zoomOutUp{-webkit-transform-origin:bottom;transform-origin:bottom;-webkit-animation-name:zoomOutUp;animation-name:zoomOutUp}@-webkit-keyframes slideInDown{0%{visibility:visible;-webkit-transform:translateY(-100%);transform:translateY(-100%)}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes slideInDown{0%{visibility:visible;-webkit-transform:translateY(-100%);transform:translateY(-100%)}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__slideInDown{-webkit-animation-name:slideInDown;animation-name:slideInDown}@-webkit-keyframes slideInLeft{0%{visibility:visible;-webkit-transform:translate(-100%);transform:translate(-100%)}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes slideInLeft{0%{visibility:visible;-webkit-transform:translate(-100%);transform:translate(-100%)}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__slideInLeft{-webkit-animation-name:slideInLeft;animation-name:slideInLeft}@-webkit-keyframes slideInRight{0%{visibility:visible;-webkit-transform:translate(100%);transform:translate(100%)}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes slideInRight{0%{visibility:visible;-webkit-transform:translate(100%);transform:translate(100%)}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__slideInRight{-webkit-animation-name:slideInRight;animation-name:slideInRight}@-webkit-keyframes slideInUp{0%{visibility:visible;-webkit-transform:translateY(100%);transform:translateY(100%)}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes slideInUp{0%{visibility:visible;-webkit-transform:translateY(100%);transform:translateY(100%)}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}.animate__slideInUp{-webkit-animation-name:slideInUp;animation-name:slideInUp}@-webkit-keyframes slideOutDown{0%{-webkit-transform:translateZ(0);transform:translateZ(0)}to{visibility:hidden;-webkit-transform:translateY(100%);transform:translateY(100%)}}@keyframes slideOutDown{0%{-webkit-transform:translateZ(0);transform:translateZ(0)}to{visibility:hidden;-webkit-transform:translateY(100%);transform:translateY(100%)}}.animate__slideOutDown{-webkit-animation-name:slideOutDown;animation-name:slideOutDown}@-webkit-keyframes slideOutLeft{0%{-webkit-transform:translateZ(0);transform:translateZ(0)}to{visibility:hidden;-webkit-transform:translate(-100%);transform:translate(-100%)}}@keyframes slideOutLeft{0%{-webkit-transform:translateZ(0);transform:translateZ(0)}to{visibility:hidden;-webkit-transform:translate(-100%);transform:translate(-100%)}}.animate__slideOutLeft{-webkit-animation-name:slideOutLeft;animation-name:slideOutLeft}@-webkit-keyframes slideOutRight{0%{-webkit-transform:translateZ(0);transform:translateZ(0)}to{visibility:hidden;-webkit-transform:translate(100%);transform:translate(100%)}}@keyframes slideOutRight{0%{-webkit-transform:translateZ(0);transform:translateZ(0)}to{visibility:hidden;-webkit-transform:translate(100%);transform:translate(100%)}}.animate__slideOutRight{-webkit-animation-name:slideOutRight;animation-name:slideOutRight}@-webkit-keyframes slideOutUp{0%{-webkit-transform:translateZ(0);transform:translateZ(0)}to{visibility:hidden;-webkit-transform:translateY(-100%);transform:translateY(-100%)}}@keyframes slideOutUp{0%{-webkit-transform:translateZ(0);transform:translateZ(0)}to{visibility:hidden;-webkit-transform:translateY(-100%);transform:translateY(-100%)}}.animate__slideOutUp{-webkit-animation-name:slideOutUp;animation-name:slideOutUp}code[class*=language-],pre[class*=language-]{color:#000;text-shadow:0 1px #fff;text-align:left;white-space:pre;word-spacing:normal;word-break:normal;word-wrap:normal;-moz-tab-size:4;-o-tab-size:4;tab-size:4;-webkit-hyphens:none;-moz-hyphens:none;-ms-hyphens:none;hyphens:none;background:0 0;font-family:Consolas,Monaco,Andale Mono,Ubuntu Mono,monospace;font-size:1em;line-height:1.5}pre[class*=language-]::-moz-selection,pre[class*=language-] ::-moz-selection,code[class*=language-]::-moz-selection,code[class*=language-] ::-moz-selection{text-shadow:none;background:#b3d4fc}pre[class*=language-]::selection,pre[class*=language-] ::selection,code[class*=language-]::selection,code[class*=language-] ::selection{text-shadow:none;background:#b3d4fc}@media print{code[class*=language-],pre[class*=language-]{text-shadow:none}}pre[class*=language-]{margin:.5em 0;padding:1em;overflow:auto}:not(pre)>code[class*=language-],pre[class*=language-]{background:#f5f2f0}:not(pre)>code[class*=language-]{white-space:normal;border-radius:.3em;padding:.1em}.token.comment,.token.prolog,.token.doctype,.token.cdata{color:#708090}.token.punctuation{color:#999}.token.namespace{opacity:.7}.token.property,.token.tag,.token.boolean,.token.number,.token.constant,.token.symbol,.token.deleted{color:#905}.token.selector,.token.attr-name,.token.string,.token.char,.token.builtin,.token.inserted{color:#690}.token.operator,.token.entity,.token.url,.language-css .token.string,.style .token.string{color:#9a6e3a;background:#ffffff80}.token.atrule,.token.attr-value,.token.keyword{color:#07a}.token.function,.token.class-name{color:#dd4a68}.token.regex,.token.important,.token.variable{color:#e90}.token.important,.token.bold{font-weight:700}.token.italic{font-style:italic}.token.entity{cursor:help}pre[class*=language-].line-numbers{counter-reset:linenumber;padding-left:3.8em;position:relative}pre[class*=language-].line-numbers>code{white-space:inherit;position:relative}.line-numbers .line-numbers-rows{pointer-events:none;width:3em;letter-spacing:-1px;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;border-right:1px solid #999;font-size:100%;position:absolute;top:0;left:-3.8em}.line-numbers-rows>span{counter-increment:linenumber;display:block}.line-numbers-rows>span:before{content:counter(linenumber);color:#999;text-align:right;padding-right:.8em;display:block}#vl-menu{z-index:1200;position:relative}.vlmenu,.vlmenu ul{margin:0;padding:0;list-style:none}.vlmenu>li{float:left;padding:0 20px}.vlmenu li li{position:relative}.vlmenu li>a{display:block}.vlmenu a i{padding-right:10px;font-size:10px}.vlmenu .search a i,.vlmenu a i.arrow{padding-right:0}.vlmenu>li>a>i.arrow{margin-top:3px;margin-left:10px}.vlmenu a{color:#222;text-decoration:none}.vlmenu>li>a{padding:20px 0}.vlmenu>li{padding:0 20px}.vlmenu>li>a:hover,.vlmenu>li.active>a{color:#000}.vlmenu ul{width:190px;z-index:1300;display:none;position:absolute}.vlmenu>li.menu-right>ul ul{left:-190px}.vlmenu ul a{color:#222;padding:10px 20px}.vlmenu li:hover>ul{display:block}.vlmenu ul ul{top:0;left:190px}.container-fluid .vlmenu>li.menu-right>ul,.container-fluid .vlmenu>li.menu-right>div{right:0}.vlmenu li a i.arrow{float:right;margin-top:5px}.vlmenu li.search a i.arrow{display:none}.vlmenu ul a:hover,.mega-menu ol li a:hover{background:#ffffff1a}.vlmenu li>div{padding:10px}.mega-menu ol{padding-left:0;list-style:none}.mega-menu ol li a{color:#222;padding:10px 20px}.mega-menu h1,.mega-menu h2,.mega-menu h3,.mega-menu h4,.mega-menu h5,.mega-menu h6,.mega-menu p{padding-left:20px;font-weight:400}.vlmenu .full-nav,.vlmenu .half-nav,.vlmenu .quarter-nav{z-index:1300;display:none;position:absolute}.vlmenu>li:hover>div{display:block}.vlmenu .full-nav{width:100%;left:0;right:0}.vlmenu .half-nav{width:50%;left:auto;right:auto}.vlmenu .quarter-nav{width:25%;left:auto;right:auto}.vlmenu li.menu-right .half-nav{width:50%;left:auto;right:0}.vlmenu li.menu-right .quarter-nav{width:25%;left:auto;right:0}.vlmenu li>div input{box-shadow:none;border:none;border-radius:0}.vlmenu.light-sub-menu input{border:1px solid #ddd}.vlmenu li>div input:focus,.vlmenu.light-sub-menu input:focus{box-shadow:none}.light-sub-menu .mega-menu li a span,.dark-sub-menu .mega-menu li a span{color:#bbb;display:block}.light-sub-menu .mega-menu li a:hover span,.dark-sub-menu .mega-menu li a:hover span{color:#fff}.light-sub-menu .mega-menu li a span,.light-sub-menu .mega-menu li a:hover span,.dark-sub-menu .mega-menu li a span{-webkit-transition:all .3s;-moz-transition:all .3s;transition:all .3s}.vlmenu .col1,.vlmenu .col2,.vlmenu .col3,.vlmenu .col4,.vlmenu .col5,.vlmenu .col6{float:left}.mega-menu .col1{width:100%}.mega-menu .col2{width:50%}.mega-menu .col3{width:33.33%}.mega-menu .col4{width:25%}.mega-menu .col5{width:20%}.mega-menu .col6{width:16.66%}.grid .col1,.grid .col2,.grid .col3,.grid .col4,.grid .col5,.grid .col6{margin-right:1%;padding:5px;display:inline-block}.grid.gray .col1,.grid.gray .col2,.grid.gray .col3,.grid.gray .col4,.grid.gray .col5,.grid.gray .col6{background:#ddd}.grid .mega-menu .col1{width:99%}.grid .mega-menu .col2{width:49%}.grid .mega-menu .col3{width:32.33%}.grid .mega-menu .col4{width:24%}.grid .mega-menu .col5{width:19%}.grid .mega-menu .col6{width:15.66%}.menu-row{width:100%;margin-bottom:10px;margin-left:.5%;display:inline-block}.menu-row:last-child{margin-bottom:0}.dark-sub-menu ul,.dark-sub-menu li>div{background:#222}.dark-sub-menu ul a,.dark-sub-menu li>div a,.dark-sub-menu li>div h1,.dark-sub-menu li>div h2,.dark-sub-menu li>div h3,.dark-sub-menu li>div h4,.dark-sub-menu li>div h5,.dark-sub-menu li>div h6,.dark-sub-menu li>div p{color:#fff}.light-sub-menu ul,.light-sub-menu li>div{background:#fff;box-shadow:0 0 1px #d1d1d1}.light-sub-menu ul a,.light-sub-menu li>div a,.light-sub-menu li>div h1,.light-sub-menu li>div h2,.light-sub-menu li>div h3,.light-sub-menu li>div h4,.light-sub-menu li>div h5,.light-sub-menu li>div h6,.light-sub-menu li>div p{color:#222}.light-sub-menu ul a:hover,.light-sub-menu ol li a:hover{color:#fff}.light-sub-menu ul a:hover,.light-sub-menu ol li a:hover{color:#fff;background:#222}.container header{width:100%;display:inline-block}.container header .vl-logo{margin-left:20px}.container header .nav-btn{margin-right:20px}.container header.float-menu{width:100%;margin-top:50px;display:inline-block}.container header.float-menu .vl-logo{margin-left:20px}.container header.float-menu .nav-btn{margin-right:20px}.center-menu{text-align:center}.center-menu .center-logo,.center-menu .vlmenu{display:inline-block}.center-menu .vlmenu ul,.center-menu .vlmenu li>div{text-align:left!important}.no-bg{background:0 0}.dark-menu{background:#000}.dark-menu a{color:#fff}.menu-bg{background-position:100% 100%!important;background-repeat:no-repeat!important;background-size:auto!important}.grid .mega-menu .col1 img,.grid .mega-menu .col2 img,.grid .mega-menu .col3 img,.grid .mega-menu .col4 img,.grid .mega-menu .col5 img,.grid .mega-menu .col6 img{width:100%;height:auto;margin-bottom:10px}.contact-form{padding:0 20px}.contact-form input,.contact-form textarea{width:100%;height:35px;color:#222;background:#fff;border:1px solid #ddd;margin-bottom:10px;padding-left:10px;padding-right:10px;display:inline-block}input[type=checkbox]{width:auto;height:auto;float:left;margin-right:10px}.contact-form input:focus,.contact-form textarea:focus{outline:0}.contact-form textarea{height:150px}.vl-btn{border:none;padding:10px 20px}@media (max-width:1480px){#blog-menu>ul{right:-100px}}@media (max-width:1024px){.menu,.menu ul,div.mega-menu{display:inline-block;background:#333!important}.mega-menu p{padding-left:40px!important}.vl-logo{margin-top:10px}.vlmenu,.vlmenu ul,div.mega-menu{display:inline-block;background:#333!important}.vlmenu a{color:#fff!important}.vlmenu li{z-index:1300;background:#333;position:relative}.vlmenu>li{width:100%;border-bottom:1px solid #ffffff0d;padding:0;display:block}.vlmenu>li>a,.vlmenu>li>a>i.arrow{line-height:50px}.vlmenu>li>a{padding:0 20px}.vlmenu>li a:hover{background:#111}.vlmenu>li>a>i.arrow{margin-right:0}.vlmenu ul{width:100%;border:none;display:none;position:relative;overflow:hidden}.hidden-sub{display:none!important}.visible-sub{visibility:visible;opacity:1;display:block!important}.vlmenu ul a,.vlmenu li>div,.mega-menu ol li a{padding:10px 20px}.mega-menu ol li a,.vlmenu ul a{border-top:1px solid #ffffff0d}.vlmenu li>div{padding:10px 0}.vlmenu ul ul,.vlmenu>li.menu-right>ul ul{left:0}.vlmenu li li a{padding-left:40px}.vlmenu li li li a{padding-left:60px}.vlmenu li li li li a{padding-left:80px}.vlmenu li li li li li a{padding-left:100px}.vlmenu li li li li li li a{padding-left:120px}.vlmenu .full-nav,.vlmenu .half-nav,.vlmenu .quarter-nav{display:block;position:relative}.vlmenu .col1,.vlmenu .col2,.vlmenu .col3,.vlmenu .col4,.vlmenu .col5,.vlmenu .col6,.mega-menu ol,.grid .col1,.grid .col2,.grid .col3,.grid .col4,.grid .col5,.grid .col6{width:100%!important}.grid .col1,.grid .col2,.grid .col3,.grid .col4,.grid .col5,.grid .col6{margin-bottom:10px;display:inline-block}.vlmenu li>div{border:none}.vlmenu h1,.vlmenu h2,.vlmenu h3,.vlmenu h4,.vlmenu h5,.vlmenu h6{color:#fff;margin:10px 20px}.vlmenu p{color:#fff;padding:10px 20px}.vlmenu ul a:hover,.mega-menu ol li a:hover{color:#fff;background:#111}.mega-menu ol li a{margin-bottom:0}.wrapper,.vlmenu .half-nav,.vlmenu .quarter-nav,.vlmenu li.menu-right .half-nav,.vlmenu li.menu-right .quarter-nav{width:100%}.vlmenu{width:100%;display:none}.light-sub-menu ul a,.light-sub-menu li>div a,.light-sub-menu li>div h1,.light-sub-menu li>div h2,.light-sub-menu li>div h3,.light-sub-menu li>div h4,.light-sub-menu li>div h5,.light-sub-menu li>div h6,.light-sub-menu li>div p,.light-sub-menu li>div span{color:#fff}.light-sub-menu ul,.light-sub-menu li>div{box-shadow:none}.vlmenu ul,.vlmenu li>div{top:auto}.vlmenu>li>a{height:53px;border-radius:0;border:none!important}.vlmenu>li,.menu-row{margin:0}.center-menu .vlmenu ul,.center-menu .vlmenu li>div{top:0}.center-menu{text-align:left}.center-menu .vlmenu{display:none}.nav-btn{float:right;cursor:pointer;margin-top:10px;margin-bottom:10px;margin-right:0;display:block}.nav-btn .bars{vertical-align:bottom;height:2px;width:30px;background-color:#333;display:inline-block;position:relative;top:-5px}.nav-btn .bars:before,.nav-btn .bars:after{content:"";width:30px;height:2px;background-color:#333;display:inline-block;position:absolute;top:-8px}.nav-btn .bars:after{top:8px}#blog-menu>ul{right:0}}.introjs-overlay{box-sizing:content-box;z-index:999999;opacity:0;background-color:#000;background:-moz-radial-gradient( center,ellipse farthest-corner,#0006 0,#000000e6 100% );background:-webkit-gradient( radial,center center,0px,center center,100%,color-stop(0%,#0006),color-stop(100%,#000000e6));background:-webkit-radial-gradient( center,ellipse farthest-corner,#0006 0,#000000e6 100% );background:-o-radial-gradient( center,ellipse farthest-corner,#0006 0,#000000e6 100% );background:-ms-radial-gradient( center,ellipse farthest-corner,#0006 0,#000000e6 100% );background:radial-gradient( center,ellipse farthest-corner,#0006 0,#000000e6 100% );filter:"progid:DXImageTransform.Microsoft.gradient(startColorstr='#66000000',endColorstr='#e6000000',GradientType=1)";-ms-filter:"progid:DXImageTransform.Microsoft.Alpha(Opacity=50)";filter:alpha(opacity=50);-o-transition:all .3s ease-out;-webkit-transition:all .3s ease-out;-moz-transition:all .3s ease-out;-ms-transition:all .3s ease-out;transition:all .3s ease-out;position:absolute}.introjs-fixParent{z-index:auto!important;opacity:1!important;-webkit-transform:none!important;-moz-transform:none!important;-ms-transform:none!important;-o-transform:none!important;transform:none!important}.introjs-showElement,tr.introjs-showElement>td,tr.introjs-showElement>th{z-index:9999999!important}.introjs-disableInteraction{opacity:0;filter:alpha(opacity=0);background-color:#fff;position:absolute;z-index:99999999!important}.introjs-relativePosition,tr.introjs-showElement>td,tr.introjs-showElement>th{position:relative}.introjs-helperLayer{box-sizing:content-box;z-index:9999998;-o-transition:all .3s ease-out;background-color:#ffffffe6;border:1px solid #00000080;border-radius:4px;-webkit-transition:all .3s ease-out;-moz-transition:all .3s ease-out;-ms-transition:all .3s ease-out;transition:all .3s ease-out;position:absolute;box-shadow:0 2px 15px #0006}.introjs-tooltipReferenceLayer{box-sizing:content-box;visibility:hidden;z-index:100000000;-o-transition:all .3s ease-out;background-color:#0000;-webkit-transition:all .3s ease-out;-moz-transition:all .3s ease-out;-ms-transition:all .3s ease-out;transition:all .3s ease-out;position:absolute}.introjs-helperLayer *,.introjs-helperLayer :before,.introjs-helperLayer :after{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-ms-box-sizing:content-box;-o-box-sizing:content-box}.introjs-helperNumberLayer{box-sizing:content-box;visibility:visible;color:#fff;text-align:center;text-shadow:1px 1px 1px #0000004d;background:-moz-linear-gradient(#cf0404 0%,#ff3019 100%);background:-ms-linear-gradient(top,#ff3019 0%,#cf0404 100%);width:20px;height:20px;filter:"progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3019', endColorstr='#cf0404', GradientType=0)";filter:"progid:DXImageTransform.Microsoft.Shadow(direction=135, strength=2, color=ff0000)";background:-moz-linear-gradient(#cf0404 0%,#ff3019 100%);background:-webkit-linear-gradient(#cf0404 0%,#ff3019 100%);background:-webkit-gradient(linear,0 0,0 100%,from(#ff3019),to(#cf0404));background:-moz-linear-gradient(#cf0404 0%,#ff3019 100%);background:-o-linear-gradient(#cf0404 0%,#ff3019 100%);background:linear-gradient(#ff3019 0%,#cf0404 100%);border:3px solid #fff;border-radius:50%;padding:2px;font-family:Arial,verdana,tahoma;font-size:13px;font-weight:700;line-height:20px;position:absolute;top:-16px;left:-16px;box-shadow:0 2px 5px #0006;z-index:2147483647!important}.introjs-arrow{content:"";border:5px solid #0000;position:absolute}.introjs-arrow.top{border-bottom-color:#fff;top:-10px}.introjs-arrow.top-right{border-bottom-color:#fff;top:-10px;right:10px}.introjs-arrow.top-middle{border-bottom-color:#fff;margin-left:-5px;top:-10px;left:50%}.introjs-arrow.right{border-left-color:#fff;top:10px;right:-10px}.introjs-arrow.right-bottom{border-left-color:#fff;bottom:10px;right:-10px}.introjs-arrow.bottom{border-top-color:#fff;bottom:-10px}.introjs-arrow.bottom-right{border-top-color:#fff;bottom:-10px;right:10px}.introjs-arrow.bottom-middle{border-top-color:#fff;margin-left:-5px;bottom:-10px;left:50%}.introjs-arrow.left{border-right-color:#fff;top:10px;left:-10px}.introjs-arrow.left-bottom{border-right-color:#fff;bottom:10px;left:-10px}.introjs-tooltip{box-sizing:content-box;visibility:visible;min-width:200px;max-width:300px;-o-transition:opacity .1s ease-out;background-color:#fff;border-radius:3px;padding:10px;-webkit-transition:opacity .1s ease-out;-moz-transition:opacity .1s ease-out;-ms-transition:opacity .1s ease-out;transition:opacity .1s ease-out;position:absolute;box-shadow:0 1px 10px #0006}.introjs-tooltipbuttons{text-align:right;white-space:nowrap}.introjs-button{box-sizing:content-box;text-shadow:1px 1px #fff;color:#333;white-space:nowrap;cursor:pointer;background-color:#ececec;background-image:linear-gradient(#f4f4f4,#ececec);-webkit-background-clip:padding;-moz-background-clip:padding;-o-background-clip:padding-box;zoom:1;background-image:-webkit-gradient(linear,0 0,0 100%,from(#f4f4f4),to(#ececec));background-image:-moz-linear-gradient(#f4f4f4,#ececec);background-image:-o-linear-gradient(#f4f4f4,#ececec);border:1px solid #d4d4d4;-webkit-border-radius:.2em;-moz-border-radius:.2em;border-radius:.2em;outline:none;margin:10px 0 0;padding:.3em .8em;font:11px sans-serif;text-decoration:none;display:inline;position:relative;overflow:visible}.introjs-button:hover{border-color:#bcbcbc;text-decoration:none;box-shadow:0 1px 1px #e3e3e3}.introjs-button:focus,.introjs-button:active{background-image:-webkit-gradient(linear,0 0,0 100%,from(#ececec),to(#f4f4f4));background-image:-moz-linear-gradient(#ececec,#f4f4f4);background-image:-o-linear-gradient(#ececec,#f4f4f4);background-image:linear-gradient(#ececec,#f4f4f4)}.introjs-button::-moz-focus-inner{border:0;padding:0}.introjs-skipbutton{box-sizing:content-box;color:#7a7a7a;margin-right:5px}.introjs-prevbutton{border-right:none;-webkit-border-radius:.2em 0 0 .2em;-moz-border-radius:.2em 0 0 .2em;border-radius:.2em 0 0 .2em}.introjs-prevbutton.introjs-fullbutton{border:1px solid #d4d4d4;-webkit-border-radius:.2em;-moz-border-radius:.2em;border-radius:.2em}.introjs-nextbutton{-webkit-border-radius:0 .2em .2em 0;-moz-border-radius:0 .2em .2em 0;border-radius:0 .2em .2em 0}.introjs-nextbutton.introjs-fullbutton{-webkit-border-radius:.2em;-moz-border-radius:.2em;border-radius:.2em}.introjs-disabled,.introjs-disabled:hover,.introjs-disabled:focus{color:#9a9a9a;box-shadow:none;cursor:default;background-color:#f4f4f4;background-image:none;border-color:#d4d4d4;text-decoration:none}.introjs-hidden{display:none}.introjs-bullets{text-align:center}.introjs-bullets ul{box-sizing:content-box;clear:both;margin:15px auto 0;padding:0;display:inline-block}.introjs-bullets ul li{box-sizing:content-box;float:left;margin:0 2px;list-style:none}.introjs-bullets ul li a{box-sizing:content-box;width:6px;height:6px;cursor:pointer;background:#ccc;-webkit-border-radius:10px;-moz-border-radius:10px;border-radius:10px;text-decoration:none;display:block}.introjs-bullets ul li a:hover{background:#999}.introjs-bullets ul li a.active{background:#999}.introjs-progress{box-sizing:content-box;height:10px;background-color:#ecf0f1;border-radius:4px;margin:10px 0 5px;overflow:hidden}.introjs-progressbar{box-sizing:content-box;float:left;width:0%;height:100%;text-align:center;background-color:#08c;font-size:10px;line-height:10px}.introjsFloatingElement{height:0;width:0;position:absolute;top:50%;left:50%}.introjs-fixedTooltip{position:fixed}.introjs-hint{box-sizing:content-box;width:20px;height:15px;cursor:pointer;background:0 0;position:absolute}.introjs-hint:focus{border:0;outline:0}.introjs-hidehint{display:none}.introjs-fixedhint{position:fixed}.introjs-hint:hover>.introjs-hint-pulse{border:5px solid #3c3c3c91}.introjs-hint-pulse{box-sizing:content-box;width:10px;height:10px;z-index:10;-o-transition:all .2s ease-out;background-color:#8888883d;border:5px solid #3c3c3c45;-webkit-border-radius:30px;-moz-border-radius:30px;border-radius:30px;-webkit-transition:all .2s ease-out;-moz-transition:all .2s ease-out;-ms-transition:all .2s ease-out;transition:all .2s ease-out;position:absolute}.introjs-hint-no-anim .introjs-hint-dot{-webkit-animation:none;-moz-animation:none;animation:none}.introjs-hint-dot{box-sizing:content-box;height:50px;width:50px;z-index:1;opacity:0;background:0 0;border:10px solid #9292925c;-webkit-border-radius:60px;-moz-border-radius:60px;border-radius:60px;-webkit-animation:introjspulse 3s ease-out infinite;-moz-animation:introjspulse 3s ease-out infinite;animation:introjspulse 3s ease-out infinite;position:absolute;top:-25px;left:-25px}@-webkit-keyframes introjspulse{0%{opacity:0;-webkit-transform:scale(0)}25%{opacity:.1;-webkit-transform:scale(0)}50%{opacity:.3;-webkit-transform:scale(.1)}75%{opacity:.5;-webkit-transform:scale(.5)}to{opacity:0;-webkit-transform:scale(1)}}@-moz-keyframes introjspulse{0%{opacity:0;-moz-transform:scale(0)}25%{opacity:.1;-moz-transform:scale(0)}50%{opacity:.3;-moz-transform:scale(.1)}75%{opacity:.5;-moz-transform:scale(.5)}to{opacity:0;-moz-transform:scale(1)}}@keyframes introjspulse{0%{opacity:0;transform:scale(0)}25%{opacity:.1;transform:scale(0)}50%{opacity:.3;transform:scale(.1)}75%{opacity:.5;transform:scale(.5)}to{opacity:0;transform:scale(1)}}@font-face{font-family:iconfont;src:url(iconfont.960c72b2.eot#iefix)format("embedded-opentype"),url(iconfont.a07d77e0.woff2)format("woff2"),url(iconfont.b2a06094.woff)format("woff")}.icon{line-height:1}.icon:before{vertical-align:top;font-style:normal;font-family:iconfont!important;font-weight:400!important}.icon-close-fullscreen:before{content:""}.icon-open-fullscreen:before{content:""}.icon-output:before{content:""}.icon-coverage:before{content:""}.icon-more:before{content:""}.icon-copy:before{content:""}body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;-ms-text-size-adjust:100%;font-family:Nunito,sans-serif;font-size:16px;font-weight:400;line-height:1.6;overflow-x:hidden}html{scroll-behavior:smooth}a{color:#09c;word-break:break-word;text-decoration:none;transition:all .3s}a:hover{color:#069;text-decoration:none}button,a,.btn{outline:0;transition:all .3s}button:focus,a:focus,.btn:focus{outline:none;text-decoration:none;box-shadow:none!important}img{max-width:100%;height:auto}h1,.h1,h2,.h2,h3,.h3,h4,.h4,h5,.h5,h6,.h6{font-family:Nunito,sans-serif;font-weight:700;transition:all .3s}h1 a,.h1 a,h2 a,.h2 a,h3 a,.h3 a,h4 a,.h4 a,h5 a,.h5 a,h6 a,.h6 a{color:#242424}h1 a:hover,.h1 a:hover,h2 a:hover,.h2 a:hover,h3 a:hover,.h3 a:hover,h4 a:hover,.h4 a:hover,h5 a:hover,.h5 a:hover,h6 a:hover,.h6 a:hover{color:#09c;text-decoration:none}@media (min-width:576px){h1,.h1,h2,.h2,h3,.h3,h4,.h4,h5,.h5,h6,.h6{margin-top:-64px;padding-top:64px}}h1,.h1{font-size:32px}h2,.h2{font-size:28px}h3,.h3{font-size:24px}h4,.h4{font-size:20px}h5,.h5{font-size:18px}h6,.h6{font-size:16px}.section-gap{padding-top:3rem;padding-bottom:3rem;position:relative}@media (max-width:991px){h1,.h1{font-size:32px}h2,.h2{font-size:28px}h3,.h3{font-size:24px}h4,.h4{font-size:20px}h5,.h5{font-size:18px}h6,.h6{font-size:16px}.app-header+.section-gap{padding-top:5rem}}@media (max-width:767px){h1,.h1{font-size:28px}h2,.h2{font-size:26px}h3,.h3{font-size:24px}h4,.h4{font-size:20px}h5,.h5{font-size:18px}h6,.h6{font-size:16px}}@media (max-width:667px){h1,.h1{font-size:26px}h2,.h2{font-size:24px}h3,.h3{font-size:22px}h4,.h4{font-size:20px}h5,.h5{font-size:18px}h6,.h6{font-size:16px}}@media (min-width:769px){.section-gap{padding-top:7.5rem;padding-bottom:7.5rem}}.vlmenu>li>a:hover,.vlmenu>li.active>a{color:#09c}.hamburger{cursor:pointer;font:inherit;color:inherit;text-transform:none;background-color:#0000;border:0;margin:10px 0 0;padding:0;transition-property:opacity,filter;transition-duration:.15s;transition-timing-function:linear;display:inline-block;overflow:visible}.hamburger:hover{opacity:.7}.hamburger.is-active:hover{opacity:.7}.hamburger.is-active .hamburger-inner,.hamburger.is-active .hamburger-inner:before,.hamburger.is-active .hamburger-inner:after{background-color:#000}@media screen and (prefers-color-scheme:dark){.hamburger.is-active .hamburger-inner,.hamburger.is-active .hamburger-inner:before,.hamburger.is-active .hamburger-inner:after{background-color:#fff}}.hamburger-box{width:20px;height:17px;display:inline-block;position:relative}.hamburger-inner{margin-top:-2px;display:block;top:50%}.hamburger-inner,.hamburger-inner:before,.hamburger-inner:after{width:20px;height:2px;background-color:#000;border-radius:4px;transition-property:transform;transition-duration:.15s;transition-timing-function:ease;position:absolute}@media screen and (prefers-color-scheme:dark){.hamburger-inner,.hamburger-inner:before,.hamburger-inner:after{background-color:#fff}}.hamburger-inner:before,.hamburger-inner:after{content:"";display:block}.hamburger-inner:before{top:-10px}.hamburger-inner:after{bottom:-10px}.hamburger--slider .hamburger-inner{top:2px}.hamburger--slider .hamburger-inner:before{width:14px;transition-property:transform,opacity;transition-duration:.15s;transition-timing-function:ease;top:7px}.hamburger--slider .hamburger-inner:after{top:14px}.hamburger--slider.is-active .hamburger-inner{transform:translateY(7px)rotate(45deg)}.hamburger--slider.is-active .hamburger-inner:before{opacity:0;transform:rotate(-45deg)translate(-5.71429px,-6px)}.hamburger--slider.is-active .hamburger-inner:after{transform:translateY(-14px)rotate(-90deg)}.vl-accordion{font-family:Nunito,sans-serif!important}@media (max-width:1024px){.vlmenu{margin-top:10px}.app-header{width:100%;z-index:10;background:#fff;padding:.3em 0;position:absolute;top:auto}.app-header .logo-dark{height:50px;display:block}.app-header .logo-light{display:none}}@media screen and (max-width:1024px) and (prefers-color-scheme:dark){.app-header{background:#000}}@media (max-width:1024px){.app-header.transparent-header .logo-light{display:none}.header-links.navbar{position:absolute;right:15px}.vlmenu .vl-accordion .fa-angle-down{display:none}.vlmenu li li a{font-family:Nunito,sans-serif}.vlmenu>li>a{cursor:pointer}.mega-menu h3{font-size:14px;font-weight:700}.vlmenu .vl-accordion .fa-angle-down,.nav-extra-link{display:none}}@media (max-width:767px){.header-links.navbar{right:25px}}@media (min-width:1025px){.app-header{width:100%;z-index:10;background:#fff;transition:all .1s;position:absolute;top:auto}.app-header .navbar-brand{margin-top:0}.app-header .logo-light{display:none}.app-header.transparent-header .logo-light{display:block}.app-header.sticky-nav{z-index:1030;background:#fff;padding:0;animation:smoothScroll .3s forwards;position:fixed;top:0;bottom:auto;box-shadow:0 1px 10px #97a4af26}@keyframes smoothScroll{0%{transform:translateY(-100px)}to{transform:translateY(0)}}.transparent-header{background:0 0;padding-top:2rem;top:auto}.transparent-header .vlmenu>li>a{color:#fff}.transparent-header .vlmenu>li>a:hover{opacity:.7}.transparent-header .logo-dark{height:70px;display:none}.transparent-header.sticky-nav{background:#fff;top:0;box-shadow:0 1px 10px #97a4af26}.transparent-header.sticky-nav .vlmenu>li>a{color:#242424}.transparent-header.sticky-nav .vlmenu>li>a:hover{color:#09c}.transparent-header.sticky-nav .logo-light{display:none}.transparent-header.sticky-nav .logo-dark{height:50px;display:block}}@media screen and (min-width:1025px) and (prefers-color-scheme:dark){.transparent-header.sticky-nav{background:#000;box-shadow:0 2px 4px -1px #0003,0 4px 5px #00000024,0 1px 10px #0000001f}.transparent-header.sticky-nav .vlmenu>li>a{color:#fff}}@media (min-width:1025px){.transparent-header-dark-nav .vlmenu>li>a{color:#242424}.transparent-header-dark-nav .vlmenu>li>a:hover{color:#09c}}@media screen and (min-width:1025px) and (prefers-color-scheme:dark){.transparent-header-dark-nav .vlmenu>li>a{color:#fff}}@media (min-width:1025px){.transparent-header-dark-nav .logo-dark{display:block}.transparent-header-dark-nav .logo-light{display:none!important}.vlmenu>li>a>i.arrow{display:none;position:relative;top:2px}.light-sub-menu ul,.light-sub-menu li>div{box-shadow:0 1px 15px 1px #45414e1a}}@media screen and (min-width:1025px) and (prefers-color-scheme:dark){.light-sub-menu ul,.light-sub-menu li>div{color:#fff;background-color:#141414}}@media (min-width:1025px){.vlmenu ul{width:250px;padding-top:15px;padding-bottom:15px}.vlmenu ul a{padding:12px 30px;font-size:14px}.vlmenu ul ul{left:251px}.vlmenu li>div{padding:30px}.vlmenu li>div p{font-size:12px}.vlmenu>li.menu-right>ul ul{left:-251px}.vlmenu h3{margin-bottom:20px;font-size:14px;font-weight:700}.vlmenu .mega-menu ol li a{border-radius:6px;padding:12px 20px;font-size:14px}.vlmenu>li{padding:0 10px}}@media screen and (min-width:1025px) and (prefers-color-scheme:dark){.light-sub-menu ul a,.light-sub-menu ol li a{color:#fff}}@media (min-width:1025px){.light-sub-menu ul a:hover,.light-sub-menu ol li a:hover{color:#fff;background:#09c}}@media screen and (min-width:1025px) and (prefers-color-scheme:dark){.light-sub-menu ul a:hover,.light-sub-menu ol li a:hover{color:#fff}}@media (min-width:1025px){.vlmenu>li>a{cursor:pointer;padding:19.5px 0}.nav-btn{display:none}.vlmenu .half-nav,.vlmenu .full-nav,.vlmenu ul{border-radius:6px}.nav-extra-link .mt-3{margin-top:.85rem!important}}.overlay-nav{z-index:6;position:relative;top:1.7rem}.toggle-wrap{height:16px;width:20px;cursor:pointer;z-index:100;-webkit-transition:opacity .25s;transition:opacity .25s;position:absolute;top:3%;right:0}.toggle-wrap.active .top{background:#fff;-webkit-transform:translateY(7px)rotate(45deg);transform:translateY(7px)rotate(45deg)}.toggle-wrap.active .middle{opacity:0;background:#fff}.toggle-wrap.active .bottom{background:#fff;-webkit-transform:translateY(-7px)translate(0)rotate(-45deg);transform:translateY(-7px)translate(0)rotate(-45deg)}.toggle-wrap span{height:2px;width:100%;cursor:pointer;background:#242424;border:none;-webkit-transition:all .35s;transition:all .35s;position:absolute;top:0;left:0}.toggle-wrap span:nth-of-type(2){width:60%;top:7px}.toggle-wrap span:nth-of-type(3){top:14px}.overlay{width:100%;height:0;opacity:0;visibility:hidden;background:#242424;-webkit-transition:opacity .35s,visibility .35s,height .35s;transition:opacity .35s,visibility .35s,height .35s;position:fixed;top:0;left:0;overflow:hidden}.overlay.open{opacity:1;visibility:visible;height:100%}.overlay.open li{-webkit-animation:fadeInDown .5s .35s forwards;animation:fadeInDown .5s .35s forwards}.overlay.open li:nth-of-type(2){-webkit-animation-delay:.4s;animation-delay:.4s}.overlay.open li:nth-of-type(3){-webkit-animation-delay:.45s;animation-delay:.45s}.overlay.open li:nth-of-type(4){-webkit-animation-delay:.5s;animation-delay:.5s}.overlay.open li:nth-of-type(5){-webkit-animation-delay:.55s;animation-delay:.55s}.overlay.open li:nth-of-type(6){-webkit-animation-delay:.6s;animation-delay:.6s}.overlay.open li:nth-of-type(7){-webkit-animation-delay:.65s;animation-delay:.65s}.overlay.open li:nth-of-type(8){-webkit-animation-delay:.7s;animation-delay:.7s}.overlay nav{height:60%;text-align:center;font-size:1.5rem;position:relative;top:45%;-webkit-transform:translateY(-50%);transform:translateY(-50%)}.overlay ul{height:100%;margin:0 auto;padding:0;list-style:none;display:inline-block;position:relative}.overlay ul li{height:10%;min-height:50px;opacity:0;display:block;position:relative}.overlay ul li a{color:#fff;padding:0 0 10px;text-decoration:none;display:block;position:relative;overflow:hidden}.overlay ul li a:hover{color:#09c}.overlay ul li a:hover:after,.overlay ul li a:focus:after,.overlay ul li a:active:after{width:100%}.overlay ul li a:after{content:"";width:0%;height:1px;background:#09c;-webkit-transition:all .35s;transition:all .35s;position:absolute;bottom:5px;left:50%;-webkit-transform:translate(-50%);transform:translate(-50%)}.overlay-nav-social-link a{color:#8c8c8c;opacity:0;margin:0 .8rem;font-size:18px}.overlay-nav-social-link a:hover{color:#09c}.overlay-nav-social-link.open a{-webkit-animation:fadeInDown .6s .8s forwards;animation:fadeInDown .6s .8s forwards}@-webkit-keyframes fadeInDown{0%{opacity:0;-webkit-transform:translateY(-100%);transform:translateY(-100%)}to{opacity:1;-webkit-transform:translate(0,0);transform:translate(0,0)}}@keyframes fadeInDown{0%{opacity:0;-webkit-transform:translateY(-100%);transform:translateY(-100%)}to{opacity:1;-webkit-transform:translate(0,0);transform:translate(0,0)}}.fadeInDown{-webkit-animation-name:fadeInDown;animation-name:fadeInDown}.app-footer{border-top:1px solid #e8e8e8;padding:3rem 0;font-family:Arial,Helvetica,sans-serif}.app-footer .footer-link{margin-bottom:.5rem;padding:0}.app-footer .footer-link li{margin-bottom:.5rem;margin-right:15px;display:inline-block}.app-footer .footer-link li a{text-transform:uppercase;color:#8c8c8c;font-size:12px}.app-footer .footer-link li a:hover{color:#242424;text-decoration:none}.app-footer .footer-link li:last-child{margin-right:0}.app-footer .social-links li a{font-size:16px}.app-footer .copyright{color:#8c8c8c;margin:0;font-size:14px}@media screen and (prefers-color-scheme:dark){.app-footer{border-color:#414141}}.app-footer.bg-dark{color:#fff;padding:4rem 0}.app-footer.bg-dark .border-right{border-right:1px solid #616161!important}.app-footer.bg-dark .border-left{border-left:1px solid #616161!important}.app-footer.bg-dark .border-top{border-top:1px solid #616161!important}.app-footer.bg-dark .border-bottom{border-bottom:1px solid #616161!important}.app-secondary-footer{background:#171717;padding:2rem 0}.two-col-link li{width:50%;float:left;box-sizing:border-box;margin-right:0!important}.instagram-feed a{width:75px;height:75px;margin:0 .3rem .2rem 0;display:inline-block;overflow:hidden}.instagram-feed a img{width:100%;height:100%}.social-media-list{padding:0;list-style:none}.social-media-list a{text-transform:capitalize;color:#242424;margin-bottom:.5rem;font-size:18px;font-weight:700;display:inline-block}.social-media-list a:hover{color:#09c}.go-up-link i{font-weight:700}.go-up-link:hover{text-decoration:none}@media (max-width:767px){.app-footer .border-right,.app-footer.bg-dark .border-right{border-right:none!important}.app-footer,.app-footer.bg-dark{padding:2rem 0}.social-media-list a{font-size:14px}.footer-logo{max-height:32px}}img.footer-icon{width:28px;height:28px;margin:0}@media screen and (prefers-color-scheme:dark){code[class*=language-],pre[class*=language-]{color:#c5c8c6;text-shadow:0 1px #0000004d;direction:ltr;text-align:left;white-space:pre;word-spacing:normal;word-break:normal;-moz-tab-size:4;-o-tab-size:4;tab-size:4;-webkit-hyphens:none;-moz-hyphens:none;-ms-hyphens:none;hyphens:none;font-family:Inconsolata,Monaco,Consolas,Courier New,Courier,monospace;line-height:1.5}pre[class*=language-]{border-radius:.3em;margin:.5em 0;padding:1em;overflow:auto}:not(pre)>code[class*=language-],pre[class*=language-]{background:#1d1f21}:not(pre)>code[class*=language-]{border-radius:.3em;padding:.1em}.token.comment,.token.prolog,.token.doctype,.token.cdata{color:#7c7c7c}.token.punctuation{color:#c5c8c6}.namespace{opacity:.7}.token.property,.token.keyword,.token.tag{color:#96cbfe}.token.class-name{color:#ffffb6;text-decoration:underline}.token.boolean,.token.constant{color:#9c9}.token.symbol,.token.deleted{color:#f92672}.token.number{color:#ff73fd}.token.selector,.token.attr-name,.token.string,.token.char,.token.builtin,.token.inserted{color:#a8ff60}.token.variable{color:#c6c5fe}.token.operator{color:#ededed;background-color:inherit}.token.entity{color:#ffffb6;cursor:help}.token.url{color:#96cbfe}.language-css .token.string,.style .token.string{color:#87c38a}.token.atrule,.token.attr-value{color:#f9ee98}.token.function{color:#dad085}.token.regex{color:#e9c062}.token.important{color:#fd971f}.token.important,.token.bold{font-weight:700}.token.italic{font-style:italic}}.hero-img{background-position:50%;background-repeat:no-repeat;background-size:cover}.hero-img,.bg-overlay,.bg-overlay:before,.bg-theme-overlay,.bg-theme-overlay:before,.bg-navy-overlay,.bg-navy-overlay:before,.bg-gradient-overlay,.bg-gradient-overlay:before{position:absolute;inset:0}.bg-overlay:before{content:"";background-color:#242424}.bg-theme-overlay:before{content:"";background:#09c}.bg-navy-overlay:before{content:"";background:#1d2b40}.bg-gradient-overlay:before{content:"";background-image:linear-gradient(90deg,#09c 0%,#7431ff 100%)}[data-overlay="0"]:before{opacity:0}[data-overlay="1"]:before{opacity:.1}[data-overlay="2"]:before{opacity:.2}[data-overlay="3"]:before{opacity:.3}[data-overlay="4"]:before{opacity:.4}[data-overlay="5"]:before{opacity:.5}[data-overlay="6"]:before{opacity:.6}[data-overlay="7"]:before{opacity:.7}[data-overlay="8"]:before{opacity:.8}[data-overlay="9"]:before{opacity:.9}@media (max-width:768px){.section-top{padding-top:5rem}.hero-avatar-bottom{position:relative;bottom:-3rem}.scroll-down{bottom:10px;left:0;right:0}.scroll-down span{color:#09c;display:block}.scroll-down-circle{display:none}}@media (min-width:769px){.section-full>.section-full-exception,.section-full>.container:not(#homepage-container)>.row{min-height:calc(100vh - 20.8125rem)}.section-full .container>.row:before{min-height:inherit;content:""}.section-top{padding-top:13.3125rem}.scroll-down{bottom:50px;left:0;right:0}.scroll-down span{color:#09c;display:block}.scroll-down-circle{width:80px;height:80px;background:#fff;border-radius:50%;line-height:80px;position:absolute;bottom:-100px;left:50%;transform:translate(-50%);box-shadow:0 .2rem .8rem #24242426}}.btn{text-transform:uppercase;white-space:nowrap;padding:.75rem 2rem;font-size:11px;font-weight:700}.btn.btn-sm{padding:.485rem 1.2rem;font-size:10px;font-weight:700;line-height:1.6}.btn.btn-lg{padding:.965rem 2rem}.btn-pill{border-radius:5rem}.btn-theme{background:#09c;border:2px solid #09c;color:#fff!important}.btn-theme:hover{color:#fff;background:#069;border:2px solid #069}.btn-rv-blue{background:#09c;border:2px solid #09c;color:#fff!important}.btn-rv-blue:hover{color:#fff;background:#069;border:2px solid #069}.btn-outline{cursor:pointer;color:#242424;background:#fff;border:2px solid #e8e8e8;margin-left:2em}.btn-outline:hover{background:#242424;border-color:#242424;color:#fff!important}@media screen and (prefers-color-scheme:dark){.btn-outline{color:#fff;background:#000;border-color:#e8e8e8}.btn-outline:hover{border-color:#777}}.btn-outline-dark{color:#242424;background:0 0;border:2px solid #242424}.btn-outline-dark:hover{color:#fff;background:#242424;border-color:#242424}.btn-outline-light{color:#fff;background:0 0;border:2px solid #fff}.btn-outline-light:hover{color:#242424;background:#fff;border-color:#fff}.btn-solid-light{color:#242424;background:#fff;border:2px solid #fff}.btn-solid-light:hover{color:#fff;background:#09c;border-color:#09c}.btn-solid-dark{color:#fff;background:#242424;border:2px solid #242424}.btn-solid-dark:hover{color:#fff;background:#09c;border-color:#09c}.video-btn .video-play-icon,.video-btn span{display:inline-block}.video-btn .video-play-icon{width:46px;height:46px;text-align:center;color:#242424;cursor:pointer;background:#fff;border-radius:50%;line-height:46px;box-shadow:0 3px 6px #0000000d}.video-btn .video-play-icon i{font-size:12px;position:relative;top:0;left:2px}.video-btn .video-play-icon:hover i{color:#09c}.video-btn span{text-transform:uppercase;font-size:11px;font-weight:700;position:relative;left:10px}.video-play-btn-align-center{position:absolute;top:50%;left:50%;transform:translate(-50%,-50%)}.btn-read-more{display:inline-block;position:relative}.btn-read-more:hover{text-decoration:none}.btn-read-more:hover:after{right:-30px}@media (max-width:767px){.btn.btn-sm{padding:.4rem 1rem}.btn{padding:.5rem 1.3rem}.btn.btn-lg{padding:.7rem 1.4rem}}.accordion .card{border-color:#e8e8e8}.accordion .card p{color:#8c8c8c}.accordion .card-header{border-color:#e8e8e8;padding:0}.accordion .card-header h6{margin-bottom:0;font-size:16px;font-weight:400}.accordion .card-header a{cursor:pointer;color:#09c;background:#fff;border-radius:4px 4px 0 0;padding:1.2rem 1.5rem;display:block;position:relative}.accordion .card-header a:hover{color:#09c}.accordion .card-header .collapsed{color:#242424}.accordion .card-header .collapsed:after{content:""}.accordion .accordion-list{list-style:none}.accordion .accordion-list li{margin-bottom:.5rem}.accordion .accordion-list li a{color:#8c8c8c}.accordion .accordion-list li a i{font-size:14px}.accordion .accordion-list li a:hover{color:#09c;text-decoration:none}.accordion.accordion-style-1 .card-header a{color:#09c;background:#fff}.accordion.accordion-style-1 .card-header a:hover{color:#09c}.accordion.accordion-style-1 .card-header a:hover:after{color:#fff;background:#09c;border:1px solid #09c;transition:all .3s}.accordion.accordion-style-1 .card-header .collapsed{color:#242424}.accordion.accordion-style-1 .card-header .collapsed:after{content:"";color:#8c8c8c;background:#fff;border:1px solid #ccc}.accordion.accordion-style-2 .card{border-radius:4px;margin-bottom:10px}.accordion.accordion-style-2 .card .card-header{background:0 0;border:none;margin-bottom:0}.accordion.accordion-style-2 .card .card-header a:after{content:"";font-family:fontawesome;font-size:14px;position:absolute;top:35%;right:20px}.accordion.accordion-style-2 .card .card-header a.collapsed{border-radius:4px;position:relative}.accordion.accordion-style-2 .card .card-header a.collapsed:after{content:""}.accordion.accordion-style-2 .card:not(:first-of-type):not(:last-of-type){border-radius:4px}.accordion.accordion-style-2 .card:first-of-type,.accordion.accordion-style-2 .card:not(:first-of-type):not(:last-of-type){border-bottom:1px solid #e8e8e8}.accordion.accordion-style-3 .card,.accordion.accordion-style-3 .card-header,.accordion.accordion-style-3 .card-header a{background:0 0;border:none}.accordion.accordion-style-3 .card-header a{color:#09c;padding:1.2rem 0 1.2rem 4rem}.accordion.accordion-style-3 .card-header a:before{width:40px;height:40px;text-align:center;color:#fff;content:"";background:#09c;border-radius:50%;font-family:fontawesome;font-size:14px;line-height:40px;position:absolute;top:8px;left:5px}.accordion.accordion-style-3 .card-header a:after{content:""}.accordion.accordion-style-3 .card-header .collapsed{color:#242424}.accordion.accordion-style-3 .card-header .collapsed:before{content:"";width:40px;height:40px;text-align:center;color:#242424;background:#fff;border:1px solid #f5f5f5;border-radius:50%;line-height:38px;box-shadow:0 3px 6px #0000000d}.accordion.accordion-style-3 .card-body{padding:1.25rem 0 1.25rem 4rem}.accordion.accordion-style-4 .card{border-radius:4px;margin-bottom:10px}.accordion.accordion-style-4 .card .card-header{background:0 0;border:none;margin-bottom:0;position:relative}.accordion.accordion-style-4 .card .card-header a{color:#242424;padding:2.5rem 1.5rem;font-weight:700}.accordion.accordion-style-4 .card .card-header a.collapsed{border-radius:4px;position:relative}.accordion.accordion-style-4 .card .card-header a.collapsed:after{content:""}.accordion.accordion-style-4 .card .card-header i{color:#09c;position:absolute;top:28px}.accordion.accordion-style-4 .card .card-header span{padding-left:3rem}.accordion.accordion-style-4 .card:not(:first-of-type):not(:last-of-type){border-radius:4px}.accordion.accordion-style-4 .card:first-of-type,.accordion.accordion-style-4 .card:not(:first-of-type):not(:last-of-type){border-bottom:1px solid #e8e8e8}.blurb i{margin-bottom:2rem;font-size:3rem;display:inline-block}.blurb p{color:#8c8c8c;padding-right:2rem}.blurb.text-right p{padding-left:2rem;padding-right:0}.blurb-border{background:#fff;border:1px solid #e8e8e8;border-radius:6px;padding:1.8rem}.blurb-border p{margin-bottom:.5rem;padding:0}@media (max-width:767px){.blurb i{font-size:2.5rem}.blurb-border{margin-bottom:1.5rem}}.list-group-right-arrow a{position:relative}.list-group-right-arrow a:after{content:"";width:25px;height:25px;text-align:center;border:1px solid #e8e8e8;border-radius:50%;padding-left:2px;font-family:fontawesome;font-size:14px;line-height:23px;position:absolute;top:30%;right:20px}.list-group-right-arrow a:hover:after{color:#fff;background:#09c;border-color:#09c;transition:all .3s}.list-group-right-arrow .list-group-item.active{color:#09c;background:#fff;border-color:#e8e8e8}.list-group-right-arrow .list-group-item.active:after{color:#fff;background:#09c;border-color:#09c}.list-group-right-arrow-on-hover a:after{opacity:0;width:30px;height:30px;border:3px solid #fff;line-height:25px;top:33%;right:13px}.list-group-right-arrow-on-hover a:hover:after{opacity:1;width:30px;height:30px;border:3px solid #fff;line-height:25px;right:-13px}.list-group-right-arrow-on-hover .list-group-item.active:after{opacity:1;width:30px;height:30px;border:3px solid #fff;line-height:25px;right:-13px}.list-group .list-group-item h6{color:#242424}.list-group .list-group-item:hover h6{color:#09c}.list-group-gap .list-group-item{border-radius:6px;margin-bottom:10px;padding:1.8rem 1.25rem}@media (prefers-color-scheme:dark){.list-group-item{color:#fff;background-color:#1b1b1b;border-color:#414141}}.custom-list{padding:0;list-style:none}.custom-list li{padding:.8rem 0}.custom-list li a{color:#242424}.custom-list li a:hover{color:#09c;text-decoration:none}.custom-list-border li{border-bottom:1px solid #e8e8e8}.custom-list-border li:first-child{border-top:1px solid #e8e8e8}@media (prefers-color-scheme:dark){.list-group-item{color:#fff;background-color:#1b1b1b;border-color:#414141}}.clients-thumb{width:100%;max-width:10rem;height:auto;opacity:.5;margin-left:auto;margin-right:auto}.clients-thumb:hover{opacity:1}.dot-style-1.owl-theme .owl-dots .owl-dot span{width:6px;height:6px;background:#242424;transition:all .3s}.dot-style-1.owl-theme .owl-dots .owl-dot.active span{transform:scale(1.5)}.dot-style-2.owl-theme .owl-dots .owl-dot span{width:10px;height:10px;background:#ccc;transition:all .3s}.dot-style-2.owl-theme .owl-dots .owl-dot.active span{width:20px;height:10px;background:#09c;margin:5px 3px}.owl-carousel.text-center .owl-stage{display:inline-block}.carousel-highlighter.owl-carousel .owl-item{opacity:.2}.carousel-highlighter.owl-carousel .owl-item .card-img-overlay{opacity:0}.carousel-highlighter.owl-carousel .owl-item.center,.carousel-highlighter.owl-carousel .owl-item.center .card-img-overlay{opacity:1}.carousel-zoom-img .owl-stage-outer{overflow:visible}.carousel-zoom-img .owl-item .item h6{opacity:0}.carousel-zoom-img .owl-item.center .item{z-index:100;position:relative;transform:scale(1.2)}.carousel-zoom-img .owl-item.center .item h6{opacity:1}.carousel-zoom-img .owl-dots{margin-top:4rem}.owl-theme .owl-nav,.owl-theme .owl-nav.disabled+.owl-dots{margin-top:30px}.nav-circle.owl-theme .owl-nav [class*=owl-],.nav-circle-light.owl-theme .owl-nav [class*=owl-],.nav-circle-solid-light.owl-theme .owl-nav [class*=owl-]{width:50px;height:50px;text-align:center;color:#8c8c8c;border:1px solid #ccc;border-radius:50%;margin-bottom:2rem;font-size:12px;line-height:50px;position:relative}.nav-circle.owl-theme .owl-nav [class*=owl-]:hover,.nav-circle-light.owl-theme .owl-nav [class*=owl-]:hover,.nav-circle-solid-light.owl-theme .owl-nav [class*=owl-]:hover{color:#fff;background:#242424;border-color:#242424}.nav-circle-light.owl-theme .owl-nav [class*=owl-]:hover{color:#242424;background:#fff;border-color:#fff}.nav-circle-solid-light.owl-theme .owl-nav [class*=owl-]{color:#242424;background:#fff;border-color:#fff}.nav-circle-solid-light.owl-theme .owl-nav [class*=owl-]:hover{color:#fff;background:#09c;border-color:#09c}.nav-round.owl-theme .owl-nav [class*=owl-]{width:40px;height:90px;text-align:center;color:#fff;background:#0000004d;border-radius:6px;margin-bottom:2rem;font-size:12px;line-height:90px;position:relative}.nav-round.owl-theme .owl-nav [class*=owl-]:hover{background:#242424}.nav-round.owl-carousel .owl-nav .owl-prev,.nav-round.owl-carousel .owl-nav .owl-next{margin-top:-90px;position:absolute;top:50%}.nav-round.owl-carousel .owl-nav .owl-prev{border-radius:0 6px 6px 0;left:-5px}.nav-round.owl-carousel .owl-nav .owl-next{border-radius:6px 0 0 6px;right:-5px}.custom-testimonial.owl-theme .owl-nav{margin-top:0;bottom:2%}.custom-testimonial.owl-theme .owl-nav [class*=owl-]{width:35px;height:35px;text-align:center;color:#8c8c8c;border:1px solid #ccc;border-radius:50%;font-size:12px;line-height:35px;position:relative}.custom-testimonial.owl-theme .owl-nav [class*=owl-]:hover{color:#fff;background:#242424;border-color:#242424}.owl-carousel .owl-item img{width:auto}@media (min-width:1025px){.nav-circle.owl-carousel .owl-nav .owl-prev,.nav-circle.owl-carousel .owl-nav .owl-next,.nav-circle-light.owl-carousel .owl-nav .owl-prev,.nav-circle-light.owl-carousel .owl-nav .owl-next,.nav-circle-solid-light.owl-carousel .owl-nav .owl-prev,.nav-circle-solid-light.owl-carousel .owl-nav .owl-next,.nav-inside.owl-carousel .owl-nav .owl-prev,.nav-inside.owl-carousel .owl-nav .owl-next{margin-top:-50px;position:absolute;top:50%}.nav-circle.owl-carousel .owl-nav .owl-prev,.nav-circle-light.owl-carousel .owl-nav .owl-prev,.nav-circle-solid-light.owl-carousel .owl-nav .owl-prev{left:-8%}.nav-circle.owl-carousel .owl-nav .owl-next,.nav-circle-light.owl-carousel .owl-nav .owl-next,.nav-circle-solid-light.owl-carousel .owl-nav .owl-next{right:-8%}.nav-inside.owl-carousel .owl-nav .owl-prev,.nav-inside.owl-carousel .owl-nav .owl-prev{left:3%}.nav-inside.owl-carousel .owl-nav .owl-next,.nav-inside.owl-carousel .owl-nav .owl-next{right:3%}.custom-testimonial.owl-theme .owl-nav{z-index:100;margin-top:0;position:absolute;bottom:8%;right:11%}}.t-star-icon{font-size:8px;position:relative;bottom:2px}.justify-content-between .card-arrow-icon{width:25px;height:25px;text-align:center;border:1px solid #ddd;border-radius:50%;line-height:25px;transition:all .3s;position:relative}.justify-content-between .card-arrow-icon:after{content:"";font-family:fontawesome;font-size:14px;position:absolute;top:-1px;left:9px}.justify-content-between>a{text-decoration:none}.justify-content-between>a:hover+.card-arrow-icon{color:#fff;background:#09c;border-color:#09c}.card-img-overlay{top:auto;left:auto;right:auto}.box-hover,img.box-hover{transition:top .2s,box-shadow .2s;position:relative;top:0;box-shadow:0 0 #0000}.box-hover:hover,img.box-hover:hover{top:-.5rem;box-shadow:0 .2rem .8rem #24242426}@media (min-width:768px){.card.flex-md-row .card-img{object-fit:cover;border-radius:.375rem 0 0 .375rem}.card.flex-md-row .card-img-right{object-fit:cover;border-radius:0 .375rem .375rem 0}}@media (max-width:991.98px){.card.flex-column .card-img{border-radius:.375rem .375rem 0 0}.card.flex-md-row .card-img-right{border-radius:0 0 .375rem .375rem}}.form-control,.custom-select,.custom-file,.custom-file-input,.custom-file-label{height:calc(2.25rem + 12px);border:1px solid #e8e8e8}.form-control:focus,.custom-select:focus,.custom-file:focus,.custom-file-input:focus,.custom-file-label:focus{box-shadow:none}@media screen and (prefers-color-scheme:dark){.form-control,.form-control:focus{color:#fff;background-color:#1b1b1b;border-color:#414141}}.form-inline .form-control{height:calc(2.25rem + 8px)}.custom-file{border:none}.custom-file-label{padding:.65rem .75rem}.custom-file-label:after{height:2.85rem;padding:inherit;background:#fafafa;border-radius:0 .15rem .15rem 0}.icon-field,.icon-field-right{position:relative}.icon-field i,.icon-field-right i{user-select:none;cursor:default;color:#8c8c8c;position:absolute}.icon-field i{top:15px;left:15px}.icon-field input{padding-left:45px}.icon-field-right i{top:15px;right:15px}.icon-field-right input{padding-right:45px}.custom-control-label:before,.custom-control-label:after{width:1.3rem;height:1.3rem;top:.1rem;left:-1.5rem}.custom-control-label:before{border:1px solid #e8e8e8}.custom-control-label{padding-left:10px}.custom-switch .custom-control-label:before{width:40px;pointer-events:all;height:24px;background:#e8e8e8;border-radius:5rem;left:-2.25rem}.custom-switch .custom-control-label:after{width:calc(1.4rem - 2px);height:calc(1.4rem - 2px);background-color:#fff;border-radius:5rem;top:calc(.25rem - 1px);left:calc(3px - 2.25rem)}.custom-switch .custom-control-input:checked~.custom-control-label:after{transform:translate(.95rem)}.custom-switch .custom-control-label{padding-left:20px}.custom-file-input:focus~.custom-file-label,.custom-control-input:focus~.custom-control-label:before{box-shadow:none}.custom-control-input:checked~.custom-control-label:before,.custom-control-input:focus:not(:checked)~.custom-control-label:before{border:1px solid #e8e8e8}.custom-select{background:#fff url(select-arrow.b8e89c0c.svg) right .75rem center/8px 10px no-repeat}.login-content{color:#fff;text-align:center;width:100%;padding:2rem;position:absolute;top:50%;transform:translateY(-50%)}.login-circle-logo{width:80px;height:80px;background:#fff;border-radius:50%;line-height:80px;display:inline-block;box-shadow:0 .2rem .8rem #24242426}.c-form-content,.coming-soon-social{color:#fff;text-align:center;width:100%;padding:3rem;position:absolute;bottom:3%}.coming-soon-social{left:0}@media (max-width:991px){.coming-soon-social{margin-top:3rem;padding:0;position:relative}}@media (max-width:767px){.form-inline .form-control{height:2.25rem}}.steps-solid,.steps-dashed,.steps-dashed-light{width:150px;height:150px;text-align:center;background:#fff;border:2px solid #e8e8e8;border-radius:50%;line-height:150px;display:inline-block;position:relative}.steps-solid:after,.steps-dashed:after,.steps-dashed-light:after{content:"";width:100px;border-bottom:1px dashed #ccc;position:absolute;top:50%;right:-110%}.steps-solid i,.steps-dashed i,.steps-dashed-light i{line-height:150px}.steps-solid .step-number,.steps-dashed .step-number,.steps-dashed-light .step-number{color:#fff;background:#09c;border:3px solid #fff;border-radius:30px;padding:2px 10px;font-size:12px;font-weight:700;line-height:normal;position:absolute;top:10px;right:5px}.steps-solid.step-last:after,.steps-dashed.step-last:after,.steps-dashed-light.step-last:after{border-bottom:none}.steps-dashed{background:0 0;border:2px dashed #ccc}.steps-dashed-light{background:0 0;border:2px dashed #fff}.steps-dashed-light i{color:#fff}.steps-dashed-light:after{border-bottom:2px dashed #fff}.steps-dashed-light+.steps-info{color:#fff}.steps-info{padding:0 2rem}@media (max-width:1024px){.steps-solid:after,.steps-dashed:after,.steps-dashed-light:after{width:100%;top:50%;right:-110%}}@media (max-width:991px){.steps-solid:after,.steps-dashed:after,.steps-dashed-light:after{width:55%;right:-60%}}@media (max-width:767px){.steps-solid,.steps-dashed,.steps-dashed-light{width:100px;height:100px;line-height:100px}.steps-solid i,.steps-dashed i,.steps-dashed-light i{line-height:100px}.steps-solid:after,.steps-dashed:after,.steps-dashed-light:after{border-bottom:none}.steps-solid .step-number,.steps-dashed .step-number,.steps-dashed-light .step-number{right:-15px}.steps-info{margin-bottom:2rem}}.block-image{width:100%;height:100%;z-index:0;top:0;left:0}img.block-image{object-fit:cover}.block-image:not([class*=absolute]){position:absolute}.img-caption{width:70%;background:#ffffffe6;border-radius:0 .375rem .375rem 0;padding:1.5rem;position:absolute;bottom:10%;left:0}@media (min-width:1025px){.parallax-img{background-attachment:fixed}}@media (max-width:767px){.img-caption{padding:1rem}}.custom-progress{height:2px;box-shadow:none;background:#e8e8e8;border-radius:0;margin-top:4rem;position:relative}.custom-progress .progress-bar .skills-info{color:#242424;text-align:left;font-size:14px;position:relative;top:-20px}.custom-progress .progress-bar span{float:right;color:#242424;line-height:normal;position:relative;bottom:0}.custom-progress:first-child{margin-top:1.5rem}.custom-progress.progress{overflow:visible}.custom-progress.dark-progress .progress-bar,.custom-progress-inside.dark-progress .progress-bar{background:#242424}.progress-light-txt .progress-bar .skills-info,.progress-light-txt .progress-bar span{color:#fff}.custom-progress-inside{height:30px;background:#e8e8e8;margin-top:2rem}.custom-progress-inside .progress-bar .skills-info{color:#fff;text-align:left;padding:0 1rem;font-size:14px;position:relative;top:0}.custom-progress-inside .progress-bar span{float:right;color:#fff;line-height:normal;position:relative;bottom:0}.custom-progress-inside:first-child{margin-top:0}.nav.nav-group{display:inline-block}.nav.nav-group>li{text-align:center;float:left;margin:0 auto}.nav.nav-group>li .nav-link{color:#242424;padding:1rem 2rem;display:block}@media screen and (prefers-color-scheme:dark){.nav.nav-group>li .nav-link{color:#fff}}.nav.nav-group li a{background:#f9f9f9;border:1px solid #e8e8e8;margin-right:-1px}@media screen and (prefers-color-scheme:dark){.nav.nav-group li a{color:#fff;background-color:#000;border:1px solid #2f2f2f}}.nav.nav-group li a.active{color:#09c;background:#fff}@media screen and (prefers-color-scheme:dark){.nav.nav-group li a.active{background-color:#000}}.nav.nav-group li:first-child a{border-radius:30px 0 0 30px}.nav.nav-group li:last-child a{border-radius:0 30px 30px 0}.nav-line{border-bottom:1px solid #e8e8e8}.nav-line>li .nav-link{color:#242424;border-bottom:1px solid #0000;padding:1.5rem 3rem;display:block;position:relative;bottom:-1px}.nav-line>li a.active{color:#09c;border-bottom:1px solid #242424}.nav-vertical{border-right:1px solid #e8e8e8}.nav-vertical .nav-link{color:#242424;border-right:1px solid #0000;padding:1rem;display:block;position:relative;right:-1px}.nav-vertical i{position:relative;top:5px}.nav-vertical a.active{color:#09c;border-right:1px solid #242424}@media (max-width:1024px){.nav.nav-group>li .nav-link,.nav.nav-line>li .nav-link{padding:.6rem 1.5rem}}@media (max-width:767px){.nav-vertical{margin-bottom:2rem}}@media screen and (prefers-color-scheme:dark){.nav-tabs{border-color:#777}.nav-tabs .nav-link{color:#fff!important}.nav-tabs .nav-link.active{background-color:inherit;border-color:#777}}.vl-custom-table{border-spacing:0 1em;border-collapse:separate}.vl-custom-table h6{color:#616161;margin-top:0;padding-top:0;font-size:16px}.vl-custom-table thead th{vertical-align:middle;border-bottom:none;padding:.5rem 1.75rem}.vl-custom-table th,.vl-custom-table td{vertical-align:middle;border-top:none;font-family:Nunito,sans-serif}.vl-custom-table td:first-child{border-left:1px solid #e8e8e8;border-top-left-radius:10px;border-bottom-left-radius:10px}.vl-custom-table td:last-child{border-right:1px solid #e8e8e8;border-top-right-radius:10px;border-bottom-right-radius:10px}.vl-custom-table tbody td{background:#fff;border-top:1px solid #e8e8e8;border-bottom:1px solid #e8e8e8;padding:1.75rem}@media screen and (prefers-color-scheme:dark){.vl-custom-table h6{color:#777}.vl-custom-table td:first-child,.vl-custom-table td:last-child{border-color:#414141}.vl-custom-table tbody .report-row-header>td{background-color:#000;border-color:#414141}.vl-custom-table tbody .report-output-row>td{color:#fff;background-color:#000}.vl-custom-table tbody .report-output-row>td .report-view-box{color:#fff}.vl-custom-table tbody td{background-color:inherit}}.table-striped td,.table-striped th{vertical-align:middle}.table-striped tbody tr:nth-of-type(2n+1){background-color:#00000006}.table thead th{border-bottom-width:1px}.table .btn{white-space:nowrap}@media screen and (prefers-color-scheme:dark){.table thead,.table td{color:#fff}}.team-card{cursor:pointer;position:relative;overflow:hidden;box-shadow:0 8px 16px #0000001a}.team-card:hover{transition:all .3s;box-shadow:0 8px 16px #0003}@media screen and (prefers-color-scheme:dark){.team-card{color:#fff;background-color:#000}}.team-info .top-section{flex-direction:row;display:flex}.team-info .top-section .profile-img{width:200px;height:200px;background-color:#efefef;background-size:cover;margin-bottom:24px;margin-right:24px}.team-info .top-section .team-title p.team-position{margin-bottom:.5rem;font-size:14px}.team-info .top-section .team-social-links a{display:inline-block}@media (max-width:568px){.team-info .top-section{flex-direction:column;margin-bottom:16px}.team-info .top-section .profile-img{margin:0 auto 24px}}.team-info .modal-header{border-bottom:none;padding-bottom:0}@media screen and (prefers-color-scheme:dark){.team-info{color:#fff;background-color:#000}}.social-links a{color:#8c8c8c;margin-right:15px;display:inline-block}.social-links a:hover{color:#09c}@-webkit-keyframes blink{0%{opacity:1}50%{opacity:0}to{opacity:1}}@keyframes blink{0%{opacity:1}50%{opacity:0}to{opacity:1}}.typist-blink:after{height:26px;content:" ";border-right:2px solid;margin-left:3px;margin-right:7px;animation:blink 1s step-start infinite;display:inline-block;position:relative;top:3px}@media (max-width:667px){.typist-blink:after{height:22px;top:2px}}.typist-blink>.selectedText{display:none}.typist-mark>.selectedText{color:#fff;background-color:#242424;font-style:normal}@media screen and (max-width:1080px) and (min-width:768px){.firefly-typist-header{min-height:180px}}@media screen and (max-width:767px) and (min-width:668px){.firefly-typist-header{min-height:102px}}@media screen and (max-width:667px){.firefly-typist-header{min-height:124px}}.count-block{width:150px;height:150px;text-align:center;margin:0 20px;display:inline-block}.count-block h2{margin:2.5rem 0 0;font-weight:700}.count-block span{font-size:16px}.count-solid-light .count-block{background:#fff;border:1px solid #e8e8e8;margin-bottom:20px}.count-solid-dark .count-block{background:#242424;margin-bottom:20px}.count-solid-dark .count-block h2,.count-solid-dark .count-block span{color:#fff}.circle .count-block{border-radius:50%}.round .count-block{border-radius:6px}@media screen and (max-width:767px){.count-block{width:100px;height:100px}.count-block h2{margin:1.5rem 0 0}}.portfolio-filter{margin-bottom:50px;padding:0 1rem;list-style:none}.portfolio-filter li{margin:0 20px;font-family:Lora,serif;display:inline-block;position:relative}.portfolio-filter li a{color:#8c8c8c;text-transform:capitalize;font-size:16px;text-decoration:none}.portfolio-filter li a:hover{color:#242424}.portfolio-filter li:after{content:"/";color:#8c8c8c;position:absolute;top:0;right:-30px}.portfolio-filter li:last-child:after{content:" "}.portfolio-filter li.active a{color:#242424}.portfolio-grid .portfolio-item{float:left}.portfolio-grid .portfolio-item a{text-decoration:none}.portfolio-grid .portfolio-item a:focus{outline:none}.portfolio-grid .portfolio-item .portfolio-image{display:block;position:relative}.portfolio-grid .portfolio-item .portfolio-image img{width:100%;height:auto;display:block}.portfolio-grid .portfolio-item .portfolio-image .portfolio-hover-title{opacity:0;background-color:#242424e6;justify-content:center;align-items:center;margin:0;padding:30px;transition:all .3s;position:absolute;inset:0}.portfolio-grid .portfolio-item .portfolio-image .portfolio-hover-title .portfolio-content{width:100%;text-align:left;transition:all .3s;position:absolute;bottom:2rem;left:2rem}.portfolio-grid .portfolio-item .portfolio-image .portfolio-hover-title .portfolio-content h6{color:#fff;margin-bottom:.2rem;font-size:16px}.portfolio-grid .portfolio-item .portfolio-image .portfolio-hover-title .portfolio-content .portfolio-category span{color:#ccc;margin-right:5px;font-family:Lora,serif;font-size:14px}.portfolio-grid .portfolio-item:hover .portfolio-hover-title{opacity:1}.portfolio-grid .portfolio-title{margin:5% 0}.portfolio-grid .portfolio-title .portfolio-content h6{color:#242424;margin-bottom:.2rem}.portfolio-grid .portfolio-title .portfolio-content span{color:#8c8c8c;font-family:Lora,serif}.portfolio-grid.grid-2 .portfolio-item{width:50%}@media (max-width:768px){.portfolio-grid.grid-2 .portfolio-item{width:50%}}@media (max-width:568px){.portfolio-grid.grid-2 .portfolio-item{width:100%}}.portfolio-grid.grid-3 .portfolio-item{width:33.33%}@media (max-width:768px){.portfolio-grid.grid-3 .portfolio-item{width:50%}}@media (max-width:568px){.portfolio-grid.grid-3 .portfolio-item{width:100%}}.portfolio-grid.grid-4 .portfolio-item{width:25%}@media (max-width:768px){.portfolio-grid.grid-4 .portfolio-item{width:50%}}@media (max-width:568px){.portfolio-grid.grid-4 .portfolio-item{width:100%}}.portfolio-grid.grid-4 .portfolio-item .portfolio-content h6{font-size:16px}.portfolio-grid.grid-4 .portfolio-item .portfolio-content span{font-size:14px}.portfolio-grid.grid-5 .portfolio-item{width:20%}@media (max-width:768px){.portfolio-grid.grid-5 .portfolio-item{width:50%}}@media (max-width:568px){.portfolio-grid.grid-5 .portfolio-item{width:100%}}.portfolio-grid.grid-5 .portfolio-item .portfolio-content h6,.portfolio-grid.grid-5 .portfolio-item .portfolio-content span{font-size:14px}.portfolio-grid.grid-2.gutter{margin-right:-2%}.portfolio-grid.grid-2.gutter .portfolio-item{width:48%;margin-bottom:2%;margin-right:2%}@media (max-width:768px){.portfolio-grid.grid-2.gutter .portfolio-item{width:48%}}@media (max-width:568px){.portfolio-grid.grid-2.gutter .portfolio-item{width:98%}}.portfolio-grid.grid-3.gutter{margin-right:-2%}.portfolio-grid.grid-3.gutter .portfolio-item{width:31.33%;margin-bottom:2%;margin-right:2%}@media (max-width:768px){.portfolio-grid.grid-3.gutter .portfolio-item{width:48%}}@media (max-width:568px){.portfolio-grid.grid-3.gutter .portfolio-item{width:98%}}.portfolio-grid.grid-4.gutter{margin-right:-2%}.portfolio-grid.grid-4.gutter .portfolio-item{width:23%;margin-bottom:2%;margin-right:2%}@media (max-width:768px){.portfolio-grid.grid-4.gutter .portfolio-item{width:48%}}@media (max-width:568px){.portfolio-grid.grid-4.gutter .portfolio-item{width:98%}}.portfolio-grid.grid-5.gutter{margin-right:-2%}.portfolio-grid.grid-5.gutter .portfolio-item{width:18%;margin-bottom:2%;margin-right:2%}@media (max-width:768px){.portfolio-grid.grid-5.gutter .portfolio-item{width:48%}}@media (max-width:568px){.portfolio-grid.grid-5.gutter .portfolio-item{width:98%}}.portfolio-masonry .portfolio-title{padding-left:3%}.portfolio-masonry.gutter .portfolio-title{padding-left:0}.portfolio-grid.gutter .portfolio-item .portfolio-image .portfolio-hover-title,.portfolio-masonry.gutter .portfolio-item .portfolio-image .portfolio-hover-title,.portfolio-grid.gutter img,.portfolio-masonry.gutter img{border-radius:6px}.isotope-item{z-index:2}.isotope-hidden.isotope-item{pointer-events:none;z-index:1}.isotope,.isotope .isotope-item{transition-duration:.8s}.isotope{transition-property:height,width}.isotope .isotope-item{transition-property:transform,opacity}.isotope.no-transition,.isotope.no-transition .isotope-item,.isotope .isotope-item.no-transition{transition-duration:0s}.isotope.infinite-scrolling{transition:none}.twitter-feed-style{background:#fff;border:1px solid #e8e8e8;position:relative}.twitter-feed-style:after{content:"";color:#e8e8e8;font-family:FontAwesome;font-size:24px;position:absolute;top:1rem;right:1.5rem}.twitter-feed-style ul{margin-bottom:0;padding:0;list-style:none}.twitter-feed-style ul li{padding:2rem;position:relative;overflow:hidden}.twitter-feed-style .user{width:18%;float:left;margin-bottom:.928571em}.twitter-feed-style .user img{max-width:80%;height:auto;border-radius:50%}.twitter-feed-style .user [data-scribe=component\:author]{margin-left:1.3rem;position:absolute;top:10%;left:18%}.twitter-feed-style .user [data-scribe=component\:author] a{text-decoration:none}.twitter-feed-style .user [data-scribe=component\:author] span{display:block}.twitter-feed-style .user [data-scribe=component\:author] [data-scribe=element\:name]{color:#242424;font-weight:700}.twitter-feed-style .user [data-scribe=component\:author] [data-scribe=element\:screen_name]{color:#8c8c8c;font-size:14px}.twitter-feed-style .tweet{color:#8c8c8c;word-break:break-all;margin-top:4rem;margin-bottom:.464286em}.twitter-feed-style .tweet a{margin:0 3px}.twitter-feed-style .tweet,.twitter-feed-style .timePosted{width:82%;float:right}.twitter-feed-style .timePosted{margin-bottom:0;font-size:14px}.twitter-feed-style .timePosted a{color:#8c8c8c}.twitter-feed-style .interact{display:none}.twitter-feed-alt .user [data-scribe=component\:author]{display:none}.twitter-feed-alt .tweet{margin-top:0}.twitter-feed-alt:after{content:""}.floating-search-wrap{position:fixed;top:40%;left:3%}.floating-search-wrap a{color:#242424;margin-bottom:1.5rem;text-decoration:none;display:block}.floating-search-wrap a:hover{color:#09c}.floating-social-link{position:fixed;top:70%;right:0%;transform:translate(30%)rotate(-90deg)}.floating-social-link a{color:#242424;margin-left:.5rem;text-decoration:none}.floating-social-link a:hover{color:#09c}.floating-social-link span{color:#ccc}@media (max-width:1249px){.floating-search-wrap,.floating-social-link{display:none}}.blog-post{border-bottom:1px solid #ccc;margin-bottom:3.5rem;padding-bottom:3rem}.meta a,.meta.text-white a{color:#616161}.meta .meta-separator,.meta.text-white .meta-separator{width:30px;border-top:1px solid #616161;margin:0 15px;display:inline-block;position:relative;bottom:3px}.meta.text-white a{color:#fff}.meta.text-white .meta-separator{border-top:1px solid #fff}.blog-widget .instagram-feed a{width:95px;height:95px}.blog-single p{margin-bottom:3rem}.single-post .comments-area,.single-post .comments{margin-bottom:5rem;display:block}.single-post .comments-area .comments-title,.single-post .comments .comments-title{margin-bottom:5rem;font-size:28px}.single-post .comments-area ol,.single-post .comments-area ul,.single-post .comments ol,.single-post .comments ul,.single-post .comments-area ol ol,.single-post .comments-area ol ul,.single-post .comments-area ul ol,.single-post .comments-area ul ul,.single-post .comments ol ol,.single-post .comments ol ul,.single-post .comments ul ol,.single-post .comments ul ul{list-style:none}.single-post .comment-list,.single-post .comments>ul{margin:0;padding:0;list-style:none}.single-post .comment-list li,.single-post .comments>ul li{margin-bottom:1rem}.single-post .comment-list li .comment-body,.single-post .comments>ul li .comment-body{border-bottom:1px solid #e8e8e8;margin-bottom:3rem;padding-bottom:1.5rem;position:relative}.single-post .comment-list .comment-meta,.single-post .comments>ul .comment-meta{margin-bottom:1rem}.single-post .comment-list .comment-meta .comment-author img,.single-post .comments>ul .comment-meta .comment-author img{width:100px;height:100px;float:left;border-radius:6px;margin-right:2rem}.single-post .comment-list .comment-meta .comment-author a,.single-post .comments>ul .comment-meta .comment-author a{font-size:16px;font-weight:700}.single-post .comment-list .comment-meta .comment-author .says,.single-post .comments>ul .comment-meta .comment-author .says{display:none}.single-post .comment-list .comment-meta .comment-metadata a,.single-post .comments>ul .comment-meta .comment-metadata a{color:#8c8c8c;font-size:14px}.single-post .comment-list .comment-content,.single-post .comments>ul .comment-content{overflow:hidden}.single-post .comment-list .reply,.single-post .comments>ul .reply{position:absolute;top:0;right:0}.single-post .comment-list .reply a,.single-post .comments>ul .reply a{color:#09c}.single-post .comment-list .reply a:hover,.single-post .comments>ul .reply a:hover{color:#242424}@media (max-width:991px){.single-post .comments-area .comments-title,.comment-reply-title{font-size:1.5rem}.single-post .comments-area .comments-title{margin-bottom:3rem}}@media (max-width:767px){.single-post .comments-area .comments-title,.comment-reply-title{font-size:1.2rem}.single-post .comments-area .comment-list .comment-meta .comment-author img{width:50px;height:auto}.blog-post{margin-bottom:1.5rem;padding-bottom:1.5rem}}.form-qty{height:calc(2.25rem + 8px)}.bd-toc{order:2;padding-top:1.5rem;padding-bottom:1.5rem;font-size:.875rem}@supports (position: sticky){.bd-toc{height:calc(100vh - 4rem);position:sticky;top:4rem;overflow-y:auto}}.section-nav{border-left:1px solid #eee;padding-left:0}.section-nav ul{padding-left:1rem}.toc-entry{display:block}.toc-entry a{color:#77757a;padding:.125rem 1.5rem;display:block}.toc-entry a:hover{color:#007bff;text-decoration:none}.bd-sidebar{order:0}@media (min-width:768px){.bd-sidebar{border-right:1px solid #0000001a}@supports (position: sticky){.bd-sidebar{z-index:1000;height:calc(100vh - 4rem);position:sticky;top:4rem}}}@media (min-width:1200px){.bd-sidebar{flex:0 320px}}@media (prefers-color-scheme:dark){.bd-sidebar{border-right-color:#414141}}.bd-links{margin-left:-15px;margin-right:-15px;padding-top:1rem;padding-bottom:1rem}@media (min-width:768px){@supports (position: sticky){.bd-links{max-height:calc(100vh - 9rem);overflow-y:auto}}.bd-links{display:block!important}}.bd-search{border-bottom:1px solid #0000000d;margin-left:-15px;margin-right:-15px;padding:1rem 15px;position:relative}.bd-search .form-control:focus{border-color:#fc0;box-shadow:0 0 0 3px #ffcc0040}.bd-search-docs-toggle{color:#212529}@media screen and (prefers-color-scheme:dark){.bd-search-docs-toggle{color:#ccc}.bd-search-docs-toggle:hover{color:#e8e8e8}}.bd-sidenav{display:none}.bd-toc-link{color:#000000a6;padding:.25rem 1.5rem;font-weight:600;display:block}.bd-toc-link:hover{color:#000000d9;text-decoration:none}.bd-toc-link code{color:#e83e8c}.bd-toc-link.selected{color:#09c;font-weight:800}.bd-toc-link.selected:hover{color:#069}@media screen and (prefers-color-scheme:dark){.bd-toc-link{color:#fff}.bd-toc-link:hover{color:#ccc}}.bd-toc-item.active{margin-bottom:1rem}.bd-toc-item.active:not(:first-child){margin-top:1rem}.bd-toc-item.active>.bd-toc-link{color:#000000d9}.bd-toc-item.active>.bd-toc-link:hover{background-color:#0000}.bd-toc-item.active>.bd-sidenav{display:block}.bd-sidebar .nav>li>a{color:#000000a6;padding:.25rem 1.5rem;font-size:90%;display:block}.bd-sidebar .nav>li>a:hover{color:#000000d9;background-color:#0000;text-decoration:none}.bd-sidebar .nav>.active>a,.bd-sidebar .nav>.active:hover>a{color:#000000d9;background-color:#0000;font-weight:600}.navbar{flex-wrap:wrap;justify-content:space-between;align-items:center;padding:.5rem 1rem;display:flex;position:relative}.navbar>.container,.navbar>.container-fluid{flex-wrap:wrap;justify-content:space-between;align-items:center;display:flex}.navbar-brand{font-size:1.25rem;line-height:inherit;white-space:nowrap;margin-right:1rem;padding-top:.3125rem;padding-bottom:.3125rem;display:inline-block}.navbar-brand:hover,.navbar-brand:focus{text-decoration:none}.navbar-nav{flex-direction:column;margin-bottom:0;padding-left:0;list-style:none;display:flex}.navbar-nav .nav-link{padding-left:0;padding-right:0}.navbar-nav .dropdown-menu{float:none;position:static}.navbar-text{padding-top:.5rem;padding-bottom:.5rem;display:inline-block}.navbar-collapse{flex-grow:1;flex-basis:100%;align-items:center}.navbar-toggler{background-color:#0000;border:1px solid #0000;border-radius:.25rem;padding:.25rem .75rem;font-size:1.25rem;line-height:1}.navbar-toggler:hover,.navbar-toggler:focus{text-decoration:none}.navbar-toggler:not(:disabled):not(.disabled){cursor:pointer}.navbar-toggler-icon{width:1.5em;height:1.5em;vertical-align:middle;content:"";background:50%/100% 100% no-repeat;display:inline-block}@media (max-width:575.98px){.navbar-expand-sm>.container,.navbar-expand-sm>.container-fluid{padding-left:0;padding-right:0}}@media (min-width:576px){.navbar-expand-sm{flex-flow:row;justify-content:flex-start}.navbar-expand-sm .navbar-nav{flex-direction:row}.navbar-expand-sm .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-sm .navbar-nav .dropdown-menu-right{left:auto;right:0}.navbar-expand-sm .navbar-nav .nav-link{padding-left:.5rem;padding-right:.5rem}.navbar-expand-sm>.container,.navbar-expand-sm>.container-fluid{flex-wrap:nowrap}.navbar-expand-sm .navbar-collapse{flex-basis:auto;display:flex!important}.navbar-expand-sm .navbar-toggler{display:none}.navbar-expand-sm .dropup .dropdown-menu{top:auto;bottom:100%}}@media (max-width:767.98px){.navbar-expand-md>.container,.navbar-expand-md>.container-fluid{padding-left:0;padding-right:0}}@media (min-width:768px){.navbar-expand-md{flex-flow:row;justify-content:flex-start}.navbar-expand-md .navbar-nav{flex-direction:row}.navbar-expand-md .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-md .navbar-nav .dropdown-menu-right{left:auto;right:0}.navbar-expand-md .navbar-nav .nav-link{padding-left:.5rem;padding-right:.5rem}.navbar-expand-md>.container,.navbar-expand-md>.container-fluid{flex-wrap:nowrap}.navbar-expand-md .navbar-collapse{flex-basis:auto;display:flex!important}.navbar-expand-md .navbar-toggler{display:none}.navbar-expand-md .dropup .dropdown-menu{top:auto;bottom:100%}}@media (max-width:991.98px){.navbar-expand-lg>.container,.navbar-expand-lg>.container-fluid{padding-left:0;padding-right:0}}@media (min-width:992px){.navbar-expand-lg{flex-flow:row;justify-content:flex-start}.navbar-expand-lg .navbar-nav{flex-direction:row}.navbar-expand-lg .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-lg .navbar-nav .dropdown-menu-right{left:auto;right:0}.navbar-expand-lg .navbar-nav .nav-link{padding-left:.5rem;padding-right:.5rem}.navbar-expand-lg>.container,.navbar-expand-lg>.container-fluid{flex-wrap:nowrap}.navbar-expand-lg .navbar-collapse{flex-basis:auto;display:flex!important}.navbar-expand-lg .navbar-toggler{display:none}.navbar-expand-lg .dropup .dropdown-menu{top:auto;bottom:100%}}@media (max-width:1199.98px){.navbar-expand-xl>.container,.navbar-expand-xl>.container-fluid{padding-left:0;padding-right:0}}@media (min-width:1200px){.navbar-expand-xl{flex-flow:row;justify-content:flex-start}.navbar-expand-xl .navbar-nav{flex-direction:row}.navbar-expand-xl .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xl .navbar-nav .dropdown-menu-right{left:auto;right:0}.navbar-expand-xl .navbar-nav .nav-link{padding-left:.5rem;padding-right:.5rem}.navbar-expand-xl>.container,.navbar-expand-xl>.container-fluid{flex-wrap:nowrap}.navbar-expand-xl .navbar-collapse{flex-basis:auto;display:flex!important}.navbar-expand-xl .navbar-toggler{display:none}.navbar-expand-xl .dropup .dropdown-menu{top:auto;bottom:100%}}.navbar-expand{flex-flow:row;justify-content:flex-start}.navbar-expand>.container,.navbar-expand>.container-fluid{padding-left:0;padding-right:0}.navbar-expand .navbar-nav{flex-direction:row}.navbar-expand .navbar-nav .dropdown-menu{position:absolute}.navbar-expand .navbar-nav .dropdown-menu-right{left:auto;right:0}.navbar-expand .navbar-nav .nav-link{padding-left:.5rem;padding-right:.5rem}.navbar-expand>.container,.navbar-expand>.container-fluid{flex-wrap:nowrap}.navbar-expand .navbar-collapse{flex-basis:auto;display:flex!important}.navbar-expand .navbar-toggler{display:none}.navbar-expand .dropup .dropdown-menu{top:auto;bottom:100%}.navbar-light .navbar-brand{color:#000000e6}.navbar-light .navbar-brand:hover,.navbar-light .navbar-brand:focus{color:#000000e6}.navbar-light .navbar-nav .nav-link{color:#00000080}.navbar-light .navbar-nav .nav-link:hover,.navbar-light .navbar-nav .nav-link:focus{color:#000000b3}.navbar-light .navbar-nav .nav-link.disabled{color:#0000004d}.navbar-light .navbar-nav .show>.nav-link,.navbar-light .navbar-nav .active>.nav-link,.navbar-light .navbar-nav .nav-link.show,.navbar-light .navbar-nav .nav-link.active{color:#000000e6}.navbar-light .navbar-toggler{color:#00000080;border-color:#0000001a}.navbar-light .navbar-toggler-icon{background-image:url("data:image/svg+xml,")}.navbar-light .navbar-text{color:#00000080}.navbar-light .navbar-text a{color:#000000e6}.navbar-light .navbar-text a:hover,.navbar-light .navbar-text a:focus{color:#000000e6}.navbar-dark .navbar-brand{color:#fff}.navbar-dark .navbar-brand:hover,.navbar-dark .navbar-brand:focus{color:#fff}.navbar-dark .navbar-nav .nav-link{color:#ffffff80}.navbar-dark .navbar-nav .nav-link:hover,.navbar-dark .navbar-nav .nav-link:focus{color:#ffffffbf}.navbar-dark .navbar-nav .nav-link.disabled{color:#ffffff40}.navbar-dark .navbar-nav .show>.nav-link,.navbar-dark .navbar-nav .active>.nav-link,.navbar-dark .navbar-nav .nav-link.show,.navbar-dark .navbar-nav .nav-link.active{color:#fff}.navbar-dark .navbar-toggler{color:#ffffff80;border-color:#ffffff1a}.navbar-dark .navbar-toggler-icon{background-image:url("data:image/svg+xml,")}.navbar-dark .navbar-text{color:#ffffff80}.navbar-dark .navbar-text a{color:#fff}.navbar-dark .navbar-text a:hover,.navbar-dark .navbar-text a:focus{color:#fff}.bd-navbar{min-height:4rem;background-color:#fff;box-shadow:0 .5rem 1rem #0000000d,inset 0 -1px #0000001a}@media (max-width:991.98px){.bd-navbar{padding-left:.5rem;padding-right:.5rem}.bd-navbar .navbar-nav-scroll{max-width:100%;height:2.5rem;margin-top:.25rem;font-size:.875rem;overflow:hidden}.bd-navbar .navbar-nav-scroll .navbar-nav{white-space:nowrap;-webkit-overflow-scrolling:touch;padding-bottom:2rem;overflow-x:auto}}@media (min-width:768px){@supports (position: sticky){.bd-navbar{z-index:1071;position:sticky;top:0}}}.bd-navbar .navbar-nav .nav-link{color:#09c;padding-left:.5rem;padding-right:.5rem}.bd-navbar .navbar-nav .nav-link.active,.bd-navbar .navbar-nav .nav-link:hover{color:#069;background-color:#0000}.bd-navbar .navbar-nav .nav-link.active{font-weight:500}.bd-navbar .navbar-nav-svg{width:1rem;height:1rem;vertical-align:text-top;display:inline-block}.bd-navbar .dropdown-menu{font-size:.875rem}.bd-navbar .dropdown-item.active{color:#212529;background-color:#0000;background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3E%3Cpath fill='%23292b2c' d='M2.3 6.73L.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3E%3C/svg%3E");background-position:.4rem .6rem;background-repeat:no-repeat;background-size:.75rem .75rem;font-weight:500}@media screen and (prefers-color-scheme:dark){.bd-navbar{color:#fff;background-color:#0f0f0f;box-shadow:0 2px 4px -1px #0003,0 4px 5px #00000024,0 1px 10px #0000001f}}.bg-primary{background:#09c}.bg-success{background:#96be5d}.bg-warning{background:#fab63f}.bg-info{background:#18b9d4}.bg-danger{background:#ef5a5a}.text-primary{color:#09c}.text-purple{color:#7431ff}.text-muted{color:#8c8c8c!important}.rounded{border-radius:6px!important}.card{border-color:#e8e8e8;border-radius:6px}.card-img-top{border-top-left-radius:6px;border-top-right-radius:6px}.card-footer{background:0 0;border-top:1px solid #0000000f;padding:1.1rem 1.25rem}.bg-dark .card-footer{border-top:1px solid #ffffff0f}.alert-secondary{color:#242424;background-color:#f4f5f7;border-color:#e8e8e8}.alert-warning{color:#fab63f;background-color:#fef8ec;border-color:#fab63f}.alert-success{color:#7dad14;background-color:#f3f8ed;border-color:#7dad14}.alert-danger{color:#ef5a5a;background-color:#fdeced;border-color:#ef5a5a}.alert-primary{color:#09c;background-color:#eaf4ff;border-color:#09c}.alert-solid-secondary{color:#242424;background-color:#e8e8e8;border-color:#e8e8e8}.alert-solid-warning{color:#fff;background-color:#fab63f;border-color:#fab63f}.alert-solid-success{color:#fff;background-color:#7dad14;border-color:#7dad14}.alert-solid-danger{color:#fff;background-color:#ef5a5a;border-color:#ef5a5a}.alert-solid-primary{color:#fff;background-color:#09c;border-color:#09c}.list-group-item{color:#8c8c8c;border-color:#e8e8e8;padding:1.3rem 1.25rem}.badge{position:relative;bottom:1px}.custom-modal .modal-body{padding:2.5rem}.custom-modal .modal-content{border:none}.custom-modal .modal-header{padding:1rem}.custom-modal .modal-header .close{padding:.5rem 1rem}.modal-body{padding:1.8rem}.modal-header,.modal-footer{padding:1rem 1.8rem}@media screen and (prefers-color-scheme:dark){.modal-header{border-bottom-color:#414141}.modal-footer{border-top-color:#414141}}.modal-image{border-top-left-radius:.25rem;border-top-right-radius:.25rem;position:relative}.modal-image .close{width:50px;height:50px;opacity:1;text-shadow:none;background:#fff;border-radius:50%;line-height:50px;position:absolute;top:1.875rem;right:1.875rem}.modal-image .close:hover{color:#fff;opacity:1;background:#09c}@media (min-width:576px){.modal-dialog{max-width:600px}}@media (min-width:992px){.modal-lg,.modal-xl{max-width:800px}}.ls-2{letter-spacing:1rem}.ls-3{letter-spacing:2rem}.lh-35{line-height:35px}.lh-45{line-height:45px}.h60{height:60%}.h70{height:70%}.curve-bottom-1,.curve-bottom-right{clip-path:polygon(0 0,100% 0,100% 85%,70% 100%,0 85%)}.curve-top-bottom{clip-path:polygon(100% 6%,100% 100%,68% 94%,0% 100%,0 10%,28% 0)}.curve-bottom-center{clip-path:polygon(0 0,100% 0,100% 70%,50% 100%,0 70%)}.font-lora{font-family:Lora,serif}.font-weight-300{font-weight:300}.font-weight-400{font-weight:400}.font-weight-600{font-weight:600}.font-weight-700{font-weight:700}.font-weight-800{font-weight:800}.font-size-12{font-size:12px}.font-size-14{font-size:14px}.font-size-16{font-size:16px}.font-size-20{font-size:20px!important}.font-size-60{font-size:60px}.font-size-72{font-size:72px}.i-size-60{font-size:60px}.section-gray,.bg-gray{background-color:#e8e8e8}@media screen and (prefers-color-scheme:dark){.section-gray,.bg-gray{background-color:#2f2f2f}.text-dark{color:#e8e8e8!important}}.bg-white{background:#fff}.bg-dark{background:#242424!important}.bg-dark hr{border-top:1px solid #ffffff80}@media screen and (prefers-color-scheme:dark){hr{border-top-color:#414141}}.bg-green-light{background:#d7f5e3}.bg-dark-alt{background:#303030}.bg-extra{background:#e5e8ec}.component-section{padding:4rem 0;position:relative;overflow:hidden}.show-markup-section{position:relative}.demo .btn{margin:0 .4rem 1rem}.title-separator{margin-bottom:4rem;position:relative}.title-separator:after{content:"";width:60px;height:2px;background:#fff;position:absolute;bottom:-2rem;left:0}.gradient-primary{background-image:linear-gradient(90deg,#09c 0%,#7431ff 100%)}.cta-img{background-position:50%;background-repeat:no-repeat;background-size:cover;position:relative;overflow:hidden}.border-light{border-color:#ffffff20!important}.custom-dropdown{border:none;box-shadow:0 1px 10px #97a4af26}.custom-dropdown .dropdown-item{font-size:12px}.custom-dropdown-sm{min-width:6rem}.opacity-30{opacity:.3}.opacity-40{opacity:.4}.opacity-50{opacity:.5}.opacity-60{opacity:.6}.opacity-70{opacity:.7}.opacity-80{opacity:.8}.opacity-90{opacity:.9}.opacity-100{opacity:1}.avatar-sm{width:35px;height:35px}.avatar,.avatar-md{width:70px;height:70px}.avatar-lg{width:140px;height:140px}.icon-sm{font-size:28px}.icon-md{font-size:42px}.icon-lg{font-size:56px}.ft-tag{color:#fff;background:#09c;border-radius:30px;padding:3px 12px;font-size:11px;font-weight:700}.ft-inside-tl{position:absolute;top:1rem;left:1rem}.ft-inside-tr{position:absolute;top:1rem;right:1rem}.ft-inside-bl{position:absolute;bottom:1rem;left:1rem}.ft-inside-br{position:absolute;bottom:1rem;right:1rem}.ft-overflow-bl{border:2px solid #fff;position:absolute;bottom:-.8rem;left:1rem}.ft-overflow-br{border:2px solid #fff;position:absolute;bottom:-.8rem;right:1rem}.ft-overflow-bc{border:2px solid #fff;position:absolute;bottom:-.8rem;left:50%;transform:translate(-50%)}.ft-overflow-tl{border:2px solid #fff;position:absolute;top:2rem;left:-2rem}.ft-overflow-tr{border:2px solid #fff;position:absolute;top:2rem;right:-2rem}.ft-overflow-tc{border:2px solid #fff;position:absolute;top:-1rem;left:50%;transform:translate(-50%)}@media (max-width:991px){.component-section{padding:3rem 0}}@media (max-width:767px){.i-size-60{font-size:3rem}.lh-35,.lh-45{line-height:normal}.ls-2{letter-spacing:.5rem}.font-size-60{font-size:28px}.font-size-72{font-size:32px}.icon-md{font-size:2rem}.icon-lg{font-size:2.5rem}.border-sm-right-0{border-right:none!important}.curve-top-bottom,.curve-bottom-1,.curve-bottom-right{clip-path:polygon(0 0,100% 0,100% 100%,0 100%)}.btn-sm-block{width:100%;padding:.75rem 2rem;display:block}}@media (min-width:991px){.neg-mt-100{margin-top:-100px}.neg-mt-300{margin-top:-300px}.clab-shape{padding-bottom:13.5rem;overflow:hidden}.clab-shape:after{content:"ă";color:#1420f11a;font-size:1200px;font-weight:800;position:absolute;top:-35%;left:50%;transform:translate(-50%)}.clab-shape .container{z-index:1;position:relative}}.markup-example{position:relative}.markup-example .btn{z-index:2;padding:.385rem 1rem;line-height:normal;position:absolute;top:.5rem;right:.5rem}.markup-example pre[class*=language-]{background:#e8e8e8;border-radius:6px;padding:3rem 1rem 1rem}.component-section>button,.show-markup-section>button{z-index:1000;opacity:0;transition:opacity .2s linear;position:absolute;top:50%;right:.75rem;transform:translateY(-50%)}.component-section:hover>button,.show-markup-section:hover>button{opacity:1}.clip-txt{text-align:center;color:#0000;background-position:50%;background-size:cover;background-clip:text;-webkit-background-clip:text;font-size:15rem;font-weight:700;line-height:normal}@media (max-width:565px){.clip-txt{font-size:6rem}}@media (min-width:769px){.clip-txt{font-size:25rem}}.landing-slider .owl-carousel .owl-item img{width:100%}@media screen and (max-width:991px){.wow{visibility:visible!important;animation-name:none!important}}.buy-link{color:#fff;z-index:1200;text-transform:uppercase;text-align:center;background:#82b440;border-radius:0 5px 5px 0;padding:.6rem;font-size:12px;font-weight:700;transition:all .3s;position:fixed;top:30%;left:-5px}.buy-link:hover{color:#fff;text-decoration:none;left:0}html body{background-color:#fff}@media screen and (prefers-color-scheme:dark){html body{color:#fff;background-color:#000}}body.modal-open{overflow:hidden}.report-view-box{height:80vh;white-space:pre-wrap;cursor:default;overflow:hidden}.report-view-box>div{height:100%;max-width:1035px;margin:auto;overflow:auto}.report-view-box .split-view-container{display:flex;overflow:hidden}.report-view-box .split-view-container .solidity-coverage-container{width:100%;min-width:0;flex-direction:column;flex-grow:0;display:flex}.report-view-box .split-view-container .solidity-coverage-container .solidity-coverage{height:100%;overflow:auto}.report-view-box .split-view-container .solidity-coverage-container .message-box{height:25%}@media screen and (prefers-color-scheme:dark){.report-view-box .split-view-container .solidity-coverage-container .message-box table.table-light{color:#fff;background-color:#000}}.report-view-box .split-view-container .bytecode-coverage{max-width:25%;overflow:auto}.report-view-box .split-view-container .contract-explorer{width:25%;background-color:#fcfcfc;padding:8px 0;font-size:.9rem;overflow:auto}.report-view-box .split-view-container .contract-explorer>ul>li{cursor:pointer;padding:4px 8px;transition:background .3s}.report-view-box .split-view-container .contract-explorer>ul>li:hover{background:#ddd}.report-view-box .split-view-container .contract-explorer>ul>li.active{background:#8ed3f1}.report-view-box .split-view-container .contract-explorer>ul>li:first-child{font-size:1rem}.report-view-box .split-view-container .contract-explorer>ul>li:not(:first-child){border-left:1px solid #ccc;margin-left:.75rem}@media screen and (prefers-color-scheme:dark){.report-view-box .split-view-container .contract-explorer{color:#fff;background-color:#000}.report-view-box .split-view-container .contract-explorer>ul>li.active{background:#09c}}.report-view-box .split-view-container tr{border-bottom:2px solid #0000}.report-view-box .split-view-container tr:hover{cursor:pointer}.report-view-box .split-view-container tr.covered:hover{border-bottom:2px solid #91daa6}.report-view-box .split-view-container tr.weak-covered:hover{border-bottom:2px solid #d2da91}.report-view-box .split-view-container tr.not-covered:hover{border-bottom:2px solid #da919a}.report-view-box .split-view-container tr.active{filter:brightness(85%)}.report-view-box .split-view-container tr.covered.active{border-bottom:2px solid #91daa6}.report-view-box .split-view-container tr.weak-covered.active{border-bottom:2px solid #d2da91}.report-view-box .split-view-container tr.not-covered.active{border-bottom:2px solid #da919a}.report-view-box .split-view-container .has-highlight tr.text-muted.not-covered,.report-view-box .split-view-container .has-highlight tr.text-muted.covered,.report-view-box .split-view-container .has-highlight tr.text-muted.weak-covered,.report-view-box .split-view-container .has-highlight tr.text-muted tr,.report-view-box .split-view-container .has-highlight tr.text-muted td{background-color:inherit!important}.report-view-box .split-view-container .has-highlight tr.message-muted{display:none}.benefits{padding-bottom:48px}.has-highlighted>table tr:not(.highlighted) *{color:#ccc!important;background-color:inherit!important}pre[class*=language-]{background-color:inherit;padding:0}.coverage{height:100%;font-size:14px;overflow:auto}.coverage:not(.solidity-coverage){font-size:13px}.coverage tr td,.coverage tr th{white-space:nowrap;border:0;padding:0 0 0 1em;border-radius:0!important}.coverage tr td pre,.coverage tr th pre{background:inherit;margin:0;padding:0}.coverage tr th{border-bottom:1px solid #dfdfdf;font-size:16px}.coverage tr td.index,.coverage tr th.index{text-align:right;width:3em;border-right:1px solid #ccc;padding-right:1ch}.coverage tr td.index{background-color:#fafbfc}.coverage tr td.opcode{font-weight:600}.coverage tr td.hit-count{text-align:center}.coverage tr.covered,.coverage tr.covered td{background-color:#e6ffed}.coverage tr.covered td.index{background-color:#dbffe3}.coverage tr.not-covered,.coverage tr.not-covered td{background-color:#ffe6e8}.coverage tr.not-covered td.index{background-color:#ffdbdf}.coverage tr.weak-covered,.coverage tr.weak-covered td{background-color:#fcffe6}.coverage tr.weak-covered td.index{background-color:#fbffdb}@media screen and (prefers-color-scheme:dark){.coverage{color:#ccc}.coverage th{color:#fff}.coverage tr.not-covered td.index,.coverage tr.covered td.index{color:#ccc}.coverage tr th.index{border-color:#414141}.coverage tr td.index{background-color:#000;border-color:#414141}.coverage tr td.opcode{color:#ccc}.coverage tr.covered,.coverage tr.covered td{background-color:#295234}.coverage tr.covered td.index{background-color:#214129}.coverage tr.not-covered,.coverage tr.not-covered td{background-color:#692217}.coverage tr.not-covered td.index{background-color:#541b12}.coverage tr.weak-covered,.coverage tr.weak-covered td{background-color:#68360b}.coverage tr.weak-covered td.index{background-color:#512a09}}.coverage table{width:100%}.mouse-pointer{cursor:pointer}.wizard-grid div{padding:10px}.wizard-grid .btn-outline,.wizard-box .btn-outline{margin-left:0}a.wizard-link:visited{color:purple}.code{background-color:#fafbfc;padding:10px}@media screen and (prefers-color-scheme:dark){.code{color:#fff;background-color:#2f2f2f}}.code-block-selectors{color:#000;width:fit-content;background-color:#e5e5e5;border-radius:4px;margin-bottom:8px;padding:2px 12px;position:absolute;top:0;left:0}@media screen and (prefers-color-scheme:dark){.code-block-selectors{color:#fff;background-color:#616161}}.nav-item{cursor:pointer}.ns-form{grid-column-gap:1rem;grid-template-columns:75% 25%;display:grid}.btn-disabled{background:#fff;border:2px solid #e8e8e8;margin-left:2em;color:gray!important}#firefly-reports,#firefly-report{margin-bottom:100px}#firefly-reports thead tr th,#firefly-report thead tr th{text-align:center;padding:.5rem .75rem}#firefly-reports tbody .report-row-header td,#firefly-report tbody .report-row-header td{text-align:center}#firefly-reports tbody .report-row-header td .report-id,#firefly-report tbody .report-row-header td .report-id{color:#09c;text-decoration:underline}#firefly-reports tbody .report-row-header td .report-id:hover,#firefly-report tbody .report-row-header td .report-id:hover{color:#007aa3}#firefly-reports .center-image,#firefly-report .center-image{padding-bottom:0;padding-left:21px;padding-right:21px}.report-buttons>ul{align-items:center;margin:0;display:flex}.report-buttons>ul>li p{margin:0}.report-buttons>ul>li:not(:last-child){margin-right:1.5rem}.report-buttons>ul .dropdown{display:inline-block}.report-buttons>ul .dropdown.show{outline:2px solid #09c}.report-buttons>ul .dropdown img{height:2rem}.report-buttons>ul .dropdown img:hover{outline:1px solid #09c}.report-buttons>ul .dropdown-toggle:after{content:none}@media screen and (prefers-color-scheme:dark){.report-buttons>ul{color:#fff}.report-buttons>ul .dropdown-menu,.report-buttons>ul .dropdown-item{color:#fff;background-color:#000}.report-buttons>ul .dropdown-item:hover{color:#c2c2c2}}@media (prefers-color-scheme:dark){.dropdown-menu,.dropdown-item{color:#fff;background-color:#1b1b1b}.dropdown-item:hover{color:#fff;background-color:#2f2f2f}}.report-coverage-row td{position:relative}.report-coverage-row .report-container .fullscreen-btn{cursor:pointer;color:#09c;font-size:2rem;position:absolute;top:.5rem;right:2rem}.report-coverage-row .report-container.fullscreen{width:100%;height:100%;z-index:10;background-color:#000000e6;padding:2rem;transition:all .15s linear;position:fixed;top:0;left:0}.report-coverage-row .report-container.fullscreen .report-wrapper{height:100%;background-color:#fff;border-radius:.3rem;padding:2rem;animation-name:zoom;animation-duration:.6s;position:relative;overflow:auto}.report-coverage-row .report-container.fullscreen .report-wrapper .fullscreen-btn{position:fixed;top:2rem;right:2.8rem}.report-coverage-row .report-container.fullscreen .report-wrapper .report-view-box{height:calc(100vh - 256px)}@media screen and (prefers-color-scheme:dark){.report-coverage-row .report-container.fullscreen .report-wrapper{background-color:#000}.report-coverage-row .report-container.fullscreen .report-wrapper th{color:#fff}}.report-coverage-row .report-container.fullscreen .coverage,.report-coverage-row .report-container.fullscreen .split-view-container{max-width:initial}.introjs-hint.introjs-fixedhint{z-index:11}i.fa{margin:0 1ch}.expandable-title{padding:.3rem 0;transition:all .3s}.expandable-title:hover{cursor:pointer;color:#09c}.expandable-title:not(.contract-title){font-weight:400}.contract-title .contract-coverage{float:right}.contract-hint-trigger{text-align:center;display:block}.contract-hint-trigger:hover{cursor:pointer;color:#fc0}.contract-hint-trigger .contract-hint-trigger-hidden{display:none}.source-name{border-bottom:1px solid #ccc;font-size:12px}@media screen and (prefers-color-scheme:dark){.source-name{color:#fff;border-color:#414141}}.indent-2{padding-left:2ch}.btn-download-report{background-image:url(Firefly-Download-Icon.8094a4f0.png)}.btn-download-report:hover{background-image:url(Firefly-Download-Icon-Inverted.32636383.png)}.btn-copy-report-id{cursor:pointer;background-size:cover;display:inline-block;font-size:1rem!important}@media (min-width:1025px){#firefly-header:not(.sticky-nav)>.container>.row>.col-12{min-height:150px}.vlmenu>li:not(:last-child){margin-right:20px}.vlmenu>li>a{padding-bottom:0}.vlmenu li:hover{border-bottom:2px solid #fc0}.vlmenu li.active{border-bottom:2px solid #fc0}.vlmenu li>ul{z-index:1}#dashboard-submenu{position:relative}#dashboard-submenu>ul{z-index:0;box-shadow:none;width:260px;justify-content:space-between;display:flex;position:absolute;top:50px;right:0}#dashboard-submenu>ul>li{display:inline-block}#dashboard-submenu>ul>li>a{padding:0;display:inline-block}#dashboard-submenu>ul>li>a:hover{color:#000;background:0 0}}@media screen and (min-width:1025px) and (prefers-color-scheme:dark){#dashboard-submenu>ul>li>a:hover{color:#fff}}.breadcrumb{background-color:inherit}.breadcrumb-item+.breadcrumb-item:before{content:">"!important}div.table-responsive{padding:0 8px}#youtube-video{height:303.75px;width:540px;max-width:100%;background-color:#d7d7d7;justify-content:center;align-items:center;display:flex}video{width:100%!important;height:auto!important}.gif:hover{opacity:.7}.custom-control-input:checked~.custom-control-label:before{background-color:#09c}.divider-bar-closed{color:#fff;background:#09c}.divider-bar-closed:hover{background:#008ab8}.divider-bar-opened{background:#fc0}.divider-bar-opened:hover{background:#e6b800}.divider-bar{cursor:pointer;flex-shrink:0;justify-content:center;align-items:center;transition:background .2s ease-in-out;display:flex}.divider-bar span{letter-spacing:1px}@media screen and (prefers-color-scheme:dark){.divider-bar{color:#fff;background:#343434}.divider-bar:hover{background-color:#393939}}.divider-bar-vertical{width:1.3rem;flex-direction:column}.divider-bar-vertical span{writing-mode:vertical-lr;text-orientation:sideways}.divider-bar-vertical i{margin:1ch 0}.divider-bar-horizontal{height:1.3rem;flex-direction:row}#contact-links li:hover{cursor:pointer;border:none!important}#contact-links img{height:1.5rem}#firefly-profile .btn{margin:4px auto;padding:4px 1rem}.icon{color:#09c;font-size:2rem;transition:color .2s}.icon:hover{color:#069}.icon.active{color:#fc0}.icon.active:hover{color:#ccb400}@media screen and (prefers-color-scheme:dark){.introjs-tooltip{background-color:#000}.introjs-helperLayer{background-color:#414141e6}}.modal{width:100%;height:100%;background-color:#000000e6;display:none;position:fixed;top:0;left:0;overflow:auto}#email-modal{z-index:2000}.modal-content{width:80%;max-width:1024px;margin:auto;display:block}@media screen and (prefers-color-scheme:dark){.modal-content{color:#fff;background-color:#141414}}video.modal-content{margin-top:100px}#modal-caption{width:80%;max-width:700px;text-align:center;color:#ccc;margin:auto;padding:10px 0;display:block}.modal-content,#modal-caption{animation-name:zoom;animation-duration:.6s}@keyframes zoom{0%{transform:scale(0)}to{transform:scale(1)}}.close{color:#09c;z-index:99;font-size:50px;font-weight:700;transition:all .3s;position:absolute;top:15px;right:35px}.close:hover,.close:focus{color:#fc0;cursor:pointer;text-decoration:none}@media only screen and (max-width:1024px){.modal-content{width:100%}}#firefly-feedback{z-index:999;position:fixed;bottom:30px;right:30px}#firefly-feedback>picture>img{width:100px}#firefly-feedback img{cursor:pointer;opacity:.7;transition:opacity .3s}#firefly-feedback img:hover{opacity:1}#firefly-feedback .feedback-form{background:#fff;border-radius:4px;flex-direction:column;justify-content:space-evenly;padding:24px;display:flex;box-shadow:1px 2px 10px #0003}#firefly-feedback .feedback-form .fa-times{cursor:pointer;opacity:.5;transition:opacity .3s;position:absolute;top:25px;right:25px}#firefly-feedback .feedback-form .fa-times:hover{opacity:1}#firefly-feedback .feedback-form .feedback-rating{justify-content:space-evenly;display:flex}#firefly-feedback .feedback-form .feedback-rating>picture>img{height:120px;margin:4px 18px}#firefly-feedback .feedback-form .feedback-message{float:left;width:100%;height:150px;margin-top:10px;margin-bottom:10px;overflow:scroll}@media only screen and (max-width:767px){#firefly-feedback .feedback-form{position:fixed;bottom:8px;left:8px;right:8px}#firefly-feedback .feedback-form .feedback-rating>picture>img{height:100px}}@media screen and (prefers-color-scheme:dark){#firefly-feedback .feedback-form{color:#fff;background-color:#000}}#contact-links img{margin-right:1rem}#bytecodes,#solidity-files,#contract-links{margin-bottom:24px}.drop-area{text-align:center;height:64px;cursor:pointer;background-color:#f7f8fa;border-style:dashed;border-color:#616161;margin-top:0;margin-bottom:0}.drop-area:hover{background-color:#edf1f4}.drop-area p{margin:0;position:relative;top:50%;transform:translateY(-50%)}.drop-area.uploader{margin-bottom:0}@media screen and (prefers-color-scheme:dark){.drop-area{color:#242424;border-color:#777}}.ratings,.ratings p{display:inline-block}.ratings #positive{text-align:left}.ratings #indifferent{text-align:center}.ratings #negative{text-align:right}.feedback-table{height:70%}.color-erc20-green{color:#007f3a!important}.background-color-erc20-green{background-color:#007f3a!important}#erc20-dashboard .bytecode-row-header h6 a,#erc20-bytecode .bytecode-row-header h6 a,#erc20-dev-dashboard .bytecode-row-header h6 a{color:#09c;text-decoration:underline}#erc20-dashboard .bytecode-row-header h6 a:hover,#erc20-bytecode .bytecode-row-header h6 a:hover,#erc20-dev-dashboard .bytecode-row-header h6 a:hover{color:#007aa3}#erc20-dashboard .markdown-preview,#erc20-bytecode .markdown-preview,#erc20-dev-dashboard .markdown-preview{border:1px solid #e8e8e8;border-radius:4px;margin-bottom:32px;padding:8px}#erc20-dashboard .markdown-preview pre[class*=language-],#erc20-bytecode .markdown-preview pre[class*=language-],#erc20-dev-dashboard .markdown-preview pre[class*=language-]{word-break:normal;white-space:pre-wrap;background-color:#f7f8fa;padding:16px;font-size:.85rem;overflow:auto}@media screen and (prefers-color-scheme:dark){#erc20-dashboard .markdown-preview,#erc20-bytecode .markdown-preview,#erc20-dev-dashboard .markdown-preview{color:#e8e8e8;border:1px solid #414141}#erc20-dashboard .markdown-preview pre[class*=language-],#erc20-bytecode .markdown-preview pre[class*=language-],#erc20-dev-dashboard .markdown-preview pre[class*=language-]{background-color:#1d1f21}#erc20-dashboard,#erc20-bytecode,#erc20-dev-dashboard,#erc20-dashboard .table,#erc20-bytecode .table,#erc20-dev-dashboard .table{color:#fff}}.logo-link{color:#242424}@media screen and (prefers-color-scheme:dark){.logo-link{color:#fff}}pre[class*=language-],pre{word-break:normal;white-space:pre-wrap;background-color:#f7f8fa;padding:16px;font-family:Inconsolata,Monaco,Consolas,Courier New,Courier,monospace;font-size:.85rem;overflow:auto}code[class*=language-]{font-family:Inconsolata,Monaco,Consolas,Courier New,Courier,monospace}pre[class*=language-]{border-radius:.3em;margin:.5em 0;padding:1em;overflow:auto}@media screen and (prefers-color-scheme:dark){pre[class*=language-],pre{color:#c5c8c6;background-color:#1d1f21}}.h1 a,.h2 a,.h3 a,.h4 a,.h5 a,.h6 a,h1 a,h2 a,h3 a,h4 a,h5 a,h6 a{color:#09c}.h1 a:hover,.h2 a:hover,.h3 a:hover,.h4 a:hover,.h5 a:hover,.h6 a:hover,h1 a:hover,h2 a:hover,h3 a:hover,h4 a:hover,h5 a:hover,h6 a:hover{color:#069}.rv-btn{color:#242424;background-color:#fff;border-color:#242424}.rv-btn:hover{background-color:#f2f2f2;border-color:#171717}@media screen and (prefers-color-scheme:dark){.rv-btn{color:#fff;background-color:#242424;border-color:#fff}.rv-btn:hover{color:#fff;background-color:#212121;border-color:#f2f2f2}}.rv-btn-yellow{color:#242424;background-color:#fc0;border-color:#fc0}.rv-btn-yellow:hover{background-color:#e6b800;border-color:#e6b800}@media screen and (prefers-color-scheme:dark){.rv-btn-yellow{background-color:#ccb400;border-color:#ccb400}.rv-btn-yellow:hover{background-color:#b39e00;border-color:#b39e00}}.rv-btn-blue{background-color:#09c;border-color:#09c;color:#fff!important}.rv-btn-blue:hover{background-color:#0086b3;border-color:#0086b3;color:#fff!important}@media screen and (prefers-color-scheme:dark){.rv-btn-blue{background-color:#069;border-color:#069}.rv-btn-blue:hover{background-color:#005580;border-color:#005580}}.rv-btn-red{background-color:#f04124;border-color:#f04124;color:#fff!important}.rv-btn-red:hover{background-color:#ea2f10;border-color:#ea2f10;color:#fff!important}@media screen and (prefers-color-scheme:dark){.rv-btn-red{background-color:#cf2a0e;border-color:#cf2a0e}.rv-btn-red:hover{background-color:#b7250c;border-color:#b7250c}}.rv-btn-green{background-color:#007f3a;border-color:#007f3a;color:#fff!important}.rv-btn-green:hover{background-color:#00662e;border-color:#00662e;color:#fff!important}@media screen and (prefers-color-scheme:dark){.rv-btn-green{background-color:#007f3a;border-color:#007f3a}.rv-btn-green:hover{background-color:#00662e;border-color:#00662e}}.card{position:relative;overflow:hidden;box-shadow:0 8px 16px #0000001a}.card:hover{transition:all .3s;box-shadow:0 8px 16px #0003}@media screen and (prefers-color-scheme:dark){.card{color:#fff;background-color:#141414;border-color:#414141}}blockquote{font-size:inherit;color:#5c5c5c;background-color:#f0f0f0;border-left:4px solid #d6d6d6;margin:16px 0;padding:0 15px}@media screen and (prefers-color-scheme:dark){blockquote{font-size:inherit;color:#fdfdfd;background-color:#323232;border-left:4px solid #474747;margin:16px 0;padding:0 15px}}.page-toc{background-color:#fff;border-left:1px solid #0000001a;border-right:none;overflow:auto}@media (max-width:767.98px){.page-toc{z-index:2000;height:100%;background-color:#fff;border:none;display:none;position:fixed;top:0;overflow:auto}}@media screen and (prefers-color-scheme:dark){.page-toc{background:#000;border-left-color:#414141}}.page-toc .bd-toc-link-wrapper .bd-toc-link{white-space:nowrap;padding:.25rem 0;display:inline}.page-toc .bd-toc-link-wrapper.highlighted .bd-toc-link{color:#09c;font-weight:800}.page-toc-toggle-btn{width:48px;height:48px;z-index:3000;border-radius:100%;padding:0;font-size:150%;display:none;position:fixed;bottom:12px;right:12px}@media (max-width:767.98px){.page-toc-toggle-btn{justify-content:center;align-items:center;display:flex}} \ No newline at end of file diff --git a/assets/css/owl.video.play.e7a23fb2.png b/assets/css/owl.video.play.e7a23fb2.png new file mode 100644 index 00000000000..aa387f962d1 Binary files /dev/null and b/assets/css/owl.video.play.e7a23fb2.png differ diff --git a/assets/css/select-arrow.b8e89c0c.svg b/assets/css/select-arrow.b8e89c0c.svg new file mode 100644 index 00000000000..4c24a2a3427 --- /dev/null +++ b/assets/css/select-arrow.b8e89c0c.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/assets/img/404.png b/assets/img/404.png new file mode 100644 index 00000000000..7e858c28a12 Binary files /dev/null and b/assets/img/404.png differ diff --git a/assets/img/500.png b/assets/img/500.png new file mode 100644 index 00000000000..953bc1b5b15 Binary files /dev/null and b/assets/img/500.png differ diff --git a/assets/img/android-chrome-192x192.png b/assets/img/android-chrome-192x192.png new file mode 100644 index 00000000000..358dc4b20ad Binary files /dev/null and b/assets/img/android-chrome-192x192.png differ diff --git a/assets/img/android-chrome-512x512.png b/assets/img/android-chrome-512x512.png new file mode 100644 index 00000000000..ff0646b0124 Binary files /dev/null and b/assets/img/android-chrome-512x512.png differ diff --git a/assets/img/apple-touch-icon.png b/assets/img/apple-touch-icon.png new file mode 100644 index 00000000000..5a7099aa748 Binary files /dev/null and b/assets/img/apple-touch-icon.png differ diff --git a/assets/img/favicon-16x16.png b/assets/img/favicon-16x16.png new file mode 100644 index 00000000000..732ccd78f8f Binary files /dev/null and b/assets/img/favicon-16x16.png differ diff --git a/assets/img/favicon-32x32.png b/assets/img/favicon-32x32.png new file mode 100644 index 00000000000..1b098ea27df Binary files /dev/null and b/assets/img/favicon-32x32.png differ diff --git a/assets/img/favicon.ico b/assets/img/favicon.ico new file mode 100644 index 00000000000..f4db8d1f94b Binary files /dev/null and b/assets/img/favicon.ico differ diff --git a/assets/img/hero/kerc20-hero-image.png b/assets/img/hero/kerc20-hero-image.png new file mode 100644 index 00000000000..0908db01fcb Binary files /dev/null and b/assets/img/hero/kerc20-hero-image.png differ diff --git a/assets/img/hero/test-coverage-hero-image.png b/assets/img/hero/test-coverage-hero-image.png new file mode 100644 index 00000000000..b8a7bfe6739 Binary files /dev/null and b/assets/img/hero/test-coverage-hero-image.png differ diff --git a/assets/img/hero/test-runner-hero-image.png b/assets/img/hero/test-runner-hero-image.png new file mode 100644 index 00000000000..0908db01fcb Binary files /dev/null and b/assets/img/hero/test-runner-hero-image.png differ diff --git a/assets/img/k-logo-dark.png b/assets/img/k-logo-dark.png new file mode 100644 index 00000000000..3982958bae0 Binary files /dev/null and b/assets/img/k-logo-dark.png differ diff --git a/assets/img/k-logo.png b/assets/img/k-logo.png new file mode 100644 index 00000000000..c23f9ca3527 Binary files /dev/null and b/assets/img/k-logo.png differ diff --git a/assets/img/lang.jpg b/assets/img/lang.jpg new file mode 100644 index 00000000000..cb5659ef1ae Binary files /dev/null and b/assets/img/lang.jpg differ diff --git a/assets/img/new.gif b/assets/img/new.gif new file mode 100644 index 00000000000..7945d05f44c Binary files /dev/null and b/assets/img/new.gif differ diff --git a/assets/img/rv-logo-dark.png b/assets/img/rv-logo-dark.png new file mode 100644 index 00000000000..50f488b7b3b Binary files /dev/null and b/assets/img/rv-logo-dark.png differ diff --git a/assets/img/rv-logo.png b/assets/img/rv-logo.png new file mode 100644 index 00000000000..33bdafe3927 Binary files /dev/null and b/assets/img/rv-logo.png differ diff --git a/assets/img/try-it-online.jpg b/assets/img/try-it-online.jpg new file mode 100644 index 00000000000..1a2ff260b17 Binary files /dev/null and b/assets/img/try-it-online.jpg differ diff --git a/assets/js/index.js b/assets/js/index.js new file mode 100644 index 00000000000..670ddccf7bb --- /dev/null +++ b/assets/js/index.js @@ -0,0 +1,212 @@ +var e="undefined"!=typeof globalThis?globalThis:"undefined"!=typeof self?self:"undefined"!=typeof window?window:"undefined"!=typeof global?global:{};var t={},n={},i=e.parcelRequire2b1d;null==i&&((i=function(e){if(e in t)return t[e].exports;if(e in n){var i=n[e];delete n[e];var o={id:e,exports:{}};return t[e]=o,i.call(o.exports,o,o.exports),o.exports}var s=new Error("Cannot find module '"+e+"'");throw s.code="MODULE_NOT_FOUND",s}).register=function(e,t){n[e]=t},e.parcelRequire2b1d=i),i.register("lJ4Q2",(function(e,t){ +/*! + * jQuery JavaScript Library v3.6.0 + * https://jquery.com/ + * + * Includes Sizzle.js + * https://sizzlejs.com/ + * + * Copyright OpenJS Foundation and other contributors + * Released under the MIT license + * https://jquery.org/license + * + * Date: 2021-03-02T17:08Z + */ +!function(t,n){"use strict";"object"==typeof e.exports?e.exports=t.document?n(t,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return n(e)}:n(t)}("undefined"!=typeof window?window:e.exports,(function(e,t){"use strict";var n=[],i=Object.getPrototypeOf,o=n.slice,s=n.flat?function(e){return n.flat.call(e)}:function(e){return n.concat.apply([],e)},r=n.push,a=n.indexOf,l={},c=l.toString,u=l.hasOwnProperty,d=u.toString,h=d.call(Object),f={},p=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType&&"function"!=typeof e.item},m=function(e){return null!=e&&e===e.window},g=e.document,v={type:!0,src:!0,nonce:!0,noModule:!0};function y(e,t,n){var i,o,s=(n=n||g).createElement("script");if(s.text=e,t)for(i in v)(o=t[i]||t.getAttribute&&t.getAttribute(i))&&s.setAttribute(i,o);n.head.appendChild(s).parentNode.removeChild(s)}function b(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?l[c.call(e)]||"object":typeof e}var _="3.6.0",w=function(e,t){return new w.fn.init(e,t)};function x(e){var t=!!e&&"length"in e&&e.length,n=b(e);return!p(e)&&!m(e)&&("array"===n||0===t||"number"==typeof t&&t>0&&t-1 in e)}w.fn=w.prototype={jquery:_,constructor:w,length:0,toArray:function(){return o.call(this)},get:function(e){return null==e?o.call(this):e<0?this[e+this.length]:this[e]},pushStack:function(e){var t=w.merge(this.constructor(),e);return t.prevObject=this,t},each:function(e){return w.each(this,e)},map:function(e){return this.pushStack(w.map(this,(function(t,n){return e.call(t,n,t)})))},slice:function(){return this.pushStack(o.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},even:function(){return this.pushStack(w.grep(this,(function(e,t){return(t+1)%2})))},odd:function(){return this.pushStack(w.grep(this,(function(e,t){return t%2})))},eq:function(e){var t=this.length,n=+e+(e<0?t:0);return this.pushStack(n>=0&&n+~]|[\\x20\\t\\r\\n\\f])[\\x20\\t\\r\\n\\f]*"),U=new RegExp(H+"|>"),V=new RegExp(R),Q=new RegExp("^"+j+"$"),G={ID:new RegExp("^#("+j+")"),CLASS:new RegExp("^\\.("+j+")"),TAG:new RegExp("^("+j+"|[*])"),ATTR:new RegExp("^"+M),PSEUDO:new RegExp("^"+R),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\([\\x20\\t\\r\\n\\f]*(even|odd|(([+-]|)(\\d*)n|)[\\x20\\t\\r\\n\\f]*(?:([+-]|)[\\x20\\t\\r\\n\\f]*(\\d+)|))[\\x20\\t\\r\\n\\f]*\\)|)","i"),bool:new RegExp("^(?:"+P+")$","i"),needsContext:new RegExp("^[\\x20\\t\\r\\n\\f]*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\([\\x20\\t\\r\\n\\f]*((?:-\\d)?\\d*)[\\x20\\t\\r\\n\\f]*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,X=/^(?:input|select|textarea|button)$/i,K=/^h\d$/i,Z=/^[^{]+\{\s*\[native \w/,J=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\[\\da-fA-F]{1,6}[\\x20\\t\\r\\n\\f]?|\\\\([^\\r\\n\\f])","g"),ne=function(e,t){var n="0x"+e.slice(1)-65536;return t||(n<0?String.fromCharCode(n+65536):String.fromCharCode(n>>10|55296,1023&n|56320))},ie=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,oe=function(e,t){return t?"\0"===e?"�":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},se=function(){h()},re=_e((function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()}),{dir:"parentNode",next:"legend"});try{L.apply(O=$.call(w.childNodes),w.childNodes),O[w.childNodes.length].nodeType}catch(e){L={apply:O.length?function(e,t){N.apply(e,$.call(t))}:function(e,t){for(var n=e.length,i=0;e[n++]=t[i++];);e.length=n-1}}}function ae(e,t,i,o){var s,a,c,u,d,p,v,y=t&&t.ownerDocument,w=t?t.nodeType:9;if(i=i||[],"string"!=typeof e||!e||1!==w&&9!==w&&11!==w)return i;if(!o&&(h(t),t=t||f,m)){if(11!==w&&(d=J.exec(e)))if(s=d[1]){if(9===w){if(!(c=t.getElementById(s)))return i;if(c.id===s)return i.push(c),i}else if(y&&(c=y.getElementById(s))&&b(t,c)&&c.id===s)return i.push(c),i}else{if(d[2])return L.apply(i,t.getElementsByTagName(e)),i;if((s=d[3])&&n.getElementsByClassName&&t.getElementsByClassName)return L.apply(i,t.getElementsByClassName(s)),i}if(n.qsa&&!k[e+" "]&&(!g||!g.test(e))&&(1!==w||"object"!==t.nodeName.toLowerCase())){if(v=e,y=t,1===w&&(U.test(e)||q.test(e))){for((y=ee.test(e)&&ve(t.parentNode)||t)===t&&n.scope||((u=t.getAttribute("id"))?u=u.replace(ie,oe):t.setAttribute("id",u=_)),a=(p=r(e)).length;a--;)p[a]=(u?"#"+u:":scope")+" "+be(p[a]);v=p.join(",")}try{return L.apply(i,y.querySelectorAll(v)),i}catch(t){k(e,!0)}finally{u===_&&t.removeAttribute("id")}}}return l(e.replace(B,"$1"),t,i,o)}function le(){var e=[];return function t(n,o){return e.push(n+" ")>i.cacheLength&&delete t[e.shift()],t[n+" "]=o}}function ce(e){return e[_]=!0,e}function ue(e){var t=f.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function de(e,t){for(var n=e.split("|"),o=n.length;o--;)i.attrHandle[n[o]]=t}function he(e,t){var n=t&&e,i=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(i)return i;if(n)for(;n=n.nextSibling;)if(n===t)return-1;return e?1:-1}function fe(e){return function(t){return"input"===t.nodeName.toLowerCase()&&t.type===e}}function pe(e){return function(t){var n=t.nodeName.toLowerCase();return("input"===n||"button"===n)&&t.type===e}}function me(e){return function(t){return"form"in t?t.parentNode&&!1===t.disabled?"label"in t?"label"in t.parentNode?t.parentNode.disabled===e:t.disabled===e:t.isDisabled===e||t.isDisabled!==!e&&re(t)===e:t.disabled===e:"label"in t&&t.disabled===e}}function ge(e){return ce((function(t){return t=+t,ce((function(n,i){for(var o,s=e([],n.length,t),r=s.length;r--;)n[o=s[r]]&&(n[o]=!(i[o]=n[o]))}))}))}function ve(e){return e&&void 0!==e.getElementsByTagName&&e}for(t in n=ae.support={},s=ae.isXML=function(e){var t=e&&e.namespaceURI,n=e&&(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},h=ae.setDocument=function(e){var t,o,r=e?e.ownerDocument||e:w;return r!=f&&9===r.nodeType&&r.documentElement?(p=(f=r).documentElement,m=!s(f),w!=f&&(o=f.defaultView)&&o.top!==o&&(o.addEventListener?o.addEventListener("unload",se,!1):o.attachEvent&&o.attachEvent("onunload",se)),n.scope=ue((function(e){return p.appendChild(e).appendChild(f.createElement("div")),void 0!==e.querySelectorAll&&!e.querySelectorAll(":scope fieldset div").length})),n.attributes=ue((function(e){return e.className="i",!e.getAttribute("className")})),n.getElementsByTagName=ue((function(e){return e.appendChild(f.createComment("")),!e.getElementsByTagName("*").length})),n.getElementsByClassName=Z.test(f.getElementsByClassName),n.getById=ue((function(e){return p.appendChild(e).id=_,!f.getElementsByName||!f.getElementsByName(_).length})),n.getById?(i.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},i.find.ID=function(e,t){if(void 0!==t.getElementById&&m){var n=t.getElementById(e);return n?[n]:[]}}):(i.filter.ID=function(e){var t=e.replace(te,ne);return function(e){var n=void 0!==e.getAttributeNode&&e.getAttributeNode("id");return n&&n.value===t}},i.find.ID=function(e,t){if(void 0!==t.getElementById&&m){var n,i,o,s=t.getElementById(e);if(s){if((n=s.getAttributeNode("id"))&&n.value===e)return[s];for(o=t.getElementsByName(e),i=0;s=o[i++];)if((n=s.getAttributeNode("id"))&&n.value===e)return[s]}return[]}}),i.find.TAG=n.getElementsByTagName?function(e,t){return void 0!==t.getElementsByTagName?t.getElementsByTagName(e):n.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,i=[],o=0,s=t.getElementsByTagName(e);if("*"===e){for(;n=s[o++];)1===n.nodeType&&i.push(n);return i}return s},i.find.CLASS=n.getElementsByClassName&&function(e,t){if(void 0!==t.getElementsByClassName&&m)return t.getElementsByClassName(e)},v=[],g=[],(n.qsa=Z.test(f.querySelectorAll))&&(ue((function(e){var t;p.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&g.push("[*^$]=[\\x20\\t\\r\\n\\f]*(?:''|\"\")"),e.querySelectorAll("[selected]").length||g.push("\\[[\\x20\\t\\r\\n\\f]*(?:value|"+P+")"),e.querySelectorAll("[id~="+_+"-]").length||g.push("~="),(t=f.createElement("input")).setAttribute("name",""),e.appendChild(t),e.querySelectorAll("[name='']").length||g.push("\\[[\\x20\\t\\r\\n\\f]*name[\\x20\\t\\r\\n\\f]*=[\\x20\\t\\r\\n\\f]*(?:''|\"\")"),e.querySelectorAll(":checked").length||g.push(":checked"),e.querySelectorAll("a#"+_+"+*").length||g.push(".#.+[+~]"),e.querySelectorAll("\\\f"),g.push("[\\r\\n\\f]")})),ue((function(e){e.innerHTML="";var t=f.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&g.push("name[\\x20\\t\\r\\n\\f]*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&g.push(":enabled",":disabled"),p.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&g.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),g.push(",.*:")}))),(n.matchesSelector=Z.test(y=p.matches||p.webkitMatchesSelector||p.mozMatchesSelector||p.oMatchesSelector||p.msMatchesSelector))&&ue((function(e){n.disconnectedMatch=y.call(e,"*"),y.call(e,"[s!='']:x"),v.push("!=",R)})),g=g.length&&new RegExp(g.join("|")),v=v.length&&new RegExp(v.join("|")),t=Z.test(p.compareDocumentPosition),b=t||Z.test(p.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,i=t&&t.parentNode;return e===i||!(!i||1!==i.nodeType||!(n.contains?n.contains(i):e.compareDocumentPosition&&16&e.compareDocumentPosition(i)))}:function(e,t){if(t)for(;t=t.parentNode;)if(t===e)return!0;return!1},I=t?function(e,t){if(e===t)return d=!0,0;var i=!e.compareDocumentPosition-!t.compareDocumentPosition;return i||(1&(i=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!n.sortDetached&&t.compareDocumentPosition(e)===i?e==f||e.ownerDocument==w&&b(w,e)?-1:t==f||t.ownerDocument==w&&b(w,t)?1:u?z(u,e)-z(u,t):0:4&i?-1:1)}:function(e,t){if(e===t)return d=!0,0;var n,i=0,o=e.parentNode,s=t.parentNode,r=[e],a=[t];if(!o||!s)return e==f?-1:t==f?1:o?-1:s?1:u?z(u,e)-z(u,t):0;if(o===s)return he(e,t);for(n=e;n=n.parentNode;)r.unshift(n);for(n=t;n=n.parentNode;)a.unshift(n);for(;r[i]===a[i];)i++;return i?he(r[i],a[i]):r[i]==w?-1:a[i]==w?1:0},f):f},ae.matches=function(e,t){return ae(e,null,null,t)},ae.matchesSelector=function(e,t){if(h(e),n.matchesSelector&&m&&!k[t+" "]&&(!v||!v.test(t))&&(!g||!g.test(t)))try{var i=y.call(e,t);if(i||n.disconnectedMatch||e.document&&11!==e.document.nodeType)return i}catch(e){k(t,!0)}return ae(t,f,null,[e]).length>0},ae.contains=function(e,t){return(e.ownerDocument||e)!=f&&h(e),b(e,t)},ae.attr=function(e,t){(e.ownerDocument||e)!=f&&h(e);var o=i.attrHandle[t.toLowerCase()],s=o&&A.call(i.attrHandle,t.toLowerCase())?o(e,t,!m):void 0;return void 0!==s?s:n.attributes||!m?e.getAttribute(t):(s=e.getAttributeNode(t))&&s.specified?s.value:null},ae.escape=function(e){return(e+"").replace(ie,oe)},ae.error=function(e){throw new Error("Syntax error, unrecognized expression: "+e)},ae.uniqueSort=function(e){var t,i=[],o=0,s=0;if(d=!n.detectDuplicates,u=!n.sortStable&&e.slice(0),e.sort(I),d){for(;t=e[s++];)t===e[s]&&(o=i.push(s));for(;o--;)e.splice(i[o],1)}return u=null,e},o=ae.getText=function(e){var t,n="",i=0,s=e.nodeType;if(s){if(1===s||9===s||11===s){if("string"==typeof e.textContent)return e.textContent;for(e=e.firstChild;e;e=e.nextSibling)n+=o(e)}else if(3===s||4===s)return e.nodeValue}else for(;t=e[i++];)n+=o(t);return n},i=ae.selectors={cacheLength:50,createPseudo:ce,match:G,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||ae.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&ae.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&V.test(n)&&(t=r(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=C[e+" "];return t||(t=new RegExp("(^|[\\x20\\t\\r\\n\\f])"+e+"("+H+"|$)"),C(e,(function(e){return t.test("string"==typeof e.className&&e.className||void 0!==e.getAttribute&&e.getAttribute("class")||"")})))},ATTR:function(e,t,n){return function(i){var o=ae.attr(i,e);return null==o?"!="===t:!t||(o+="","="===t?o===n:"!="===t?o!==n:"^="===t?n&&0===o.indexOf(n):"*="===t?n&&o.indexOf(n)>-1:"$="===t?n&&o.slice(-n.length)===n:"~="===t?(" "+o.replace(F," ")+" ").indexOf(n)>-1:"|="===t&&(o===n||o.slice(0,n.length+1)===n+"-"))}},CHILD:function(e,t,n,i,o){var s="nth"!==e.slice(0,3),r="last"!==e.slice(-4),a="of-type"===t;return 1===i&&0===o?function(e){return!!e.parentNode}:function(t,n,l){var c,u,d,h,f,p,m=s!==r?"nextSibling":"previousSibling",g=t.parentNode,v=a&&t.nodeName.toLowerCase(),y=!l&&!a,b=!1;if(g){if(s){for(;m;){for(h=t;h=h[m];)if(a?h.nodeName.toLowerCase()===v:1===h.nodeType)return!1;p=m="only"===e&&!p&&"nextSibling"}return!0}if(p=[r?g.firstChild:g.lastChild],r&&y){for(b=(f=(c=(u=(d=(h=g)[_]||(h[_]={}))[h.uniqueID]||(d[h.uniqueID]={}))[e]||[])[0]===x&&c[1])&&c[2],h=f&&g.childNodes[f];h=++f&&h&&h[m]||(b=f=0)||p.pop();)if(1===h.nodeType&&++b&&h===t){u[e]=[x,f,b];break}}else if(y&&(b=f=(c=(u=(d=(h=t)[_]||(h[_]={}))[h.uniqueID]||(d[h.uniqueID]={}))[e]||[])[0]===x&&c[1]),!1===b)for(;(h=++f&&h&&h[m]||(b=f=0)||p.pop())&&((a?h.nodeName.toLowerCase()!==v:1!==h.nodeType)||!++b||(y&&((u=(d=h[_]||(h[_]={}))[h.uniqueID]||(d[h.uniqueID]={}))[e]=[x,b]),h!==t)););return(b-=o)===i||b%i==0&&b/i>=0}}},PSEUDO:function(e,t){var n,o=i.pseudos[e]||i.setFilters[e.toLowerCase()]||ae.error("unsupported pseudo: "+e);return o[_]?o(t):o.length>1?(n=[e,e,"",t],i.setFilters.hasOwnProperty(e.toLowerCase())?ce((function(e,n){for(var i,s=o(e,t),r=s.length;r--;)e[i=z(e,s[r])]=!(n[i]=s[r])})):function(e){return o(e,0,n)}):o}},pseudos:{not:ce((function(e){var t=[],n=[],i=a(e.replace(B,"$1"));return i[_]?ce((function(e,t,n,o){for(var s,r=i(e,null,o,[]),a=e.length;a--;)(s=r[a])&&(e[a]=!(t[a]=s))})):function(e,o,s){return t[0]=e,i(t,null,s,n),t[0]=null,!n.pop()}})),has:ce((function(e){return function(t){return ae(e,t).length>0}})),contains:ce((function(e){return e=e.replace(te,ne),function(t){return(t.textContent||o(t)).indexOf(e)>-1}})),lang:ce((function(e){return Q.test(e||"")||ae.error("unsupported lang: "+e),e=e.replace(te,ne).toLowerCase(),function(t){var n;do{if(n=m?t.lang:t.getAttribute("xml:lang")||t.getAttribute("lang"))return(n=n.toLowerCase())===e||0===n.indexOf(e+"-")}while((t=t.parentNode)&&1===t.nodeType);return!1}})),target:function(t){var n=e.location&&e.location.hash;return n&&n.slice(1)===t.id},root:function(e){return e===p},focus:function(e){return e===f.activeElement&&(!f.hasFocus||f.hasFocus())&&!!(e.type||e.href||~e.tabIndex)},enabled:me(!1),disabled:me(!0),checked:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&!!e.checked||"option"===t&&!!e.selected},selected:function(e){return e.parentNode&&e.parentNode.selectedIndex,!0===e.selected},empty:function(e){for(e=e.firstChild;e;e=e.nextSibling)if(e.nodeType<6)return!1;return!0},parent:function(e){return!i.pseudos.empty(e)},header:function(e){return K.test(e.nodeName)},input:function(e){return X.test(e.nodeName)},button:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&"button"===e.type||"button"===t},text:function(e){var t;return"input"===e.nodeName.toLowerCase()&&"text"===e.type&&(null==(t=e.getAttribute("type"))||"text"===t.toLowerCase())},first:ge((function(){return[0]})),last:ge((function(e,t){return[t-1]})),eq:ge((function(e,t,n){return[n<0?n+t:n]})),even:ge((function(e,t){for(var n=0;nt?t:n;--i>=0;)e.push(i);return e})),gt:ge((function(e,t,n){for(var i=n<0?n+t:n;++i1?function(t,n,i){for(var o=e.length;o--;)if(!e[o](t,n,i))return!1;return!0}:e[0]}function xe(e,t,n,i,o){for(var s,r=[],a=0,l=e.length,c=null!=t;a-1&&(s[c]=!(r[c]=d))}}else v=xe(v===r?v.splice(p,v.length):v),o?o(null,r,v,l):L.apply(r,v)}))}function Ce(e){for(var t,n,o,s=e.length,r=i.relative[e[0].type],a=r||i.relative[" "],l=r?1:0,u=_e((function(e){return e===t}),a,!0),d=_e((function(e){return z(t,e)>-1}),a,!0),h=[function(e,n,i){var o=!r&&(i||n!==c)||((t=n).nodeType?u(e,n,i):d(e,n,i));return t=null,o}];l1&&we(h),l>1&&be(e.slice(0,l-1).concat({value:" "===e[l-2].type?"*":""})).replace(B,"$1"),n,l0,o=e.length>0,s=function(s,r,a,l,u){var d,p,g,v=0,y="0",b=s&&[],_=[],w=c,E=s||o&&i.find.TAG("*",u),C=x+=null==w?1:Math.random()||.1,S=E.length;for(u&&(c=r==f||r||u);y!==S&&null!=(d=E[y]);y++){if(o&&d){for(p=0,r||d.ownerDocument==f||(h(d),a=!m);g=e[p++];)if(g(d,r||f,a)){l.push(d);break}u&&(x=C)}n&&((d=!g&&d)&&v--,s&&b.push(d))}if(v+=y,n&&y!==v){for(p=0;g=t[p++];)g(b,_,r,a);if(s){if(v>0)for(;y--;)b[y]||_[y]||(_[y]=D.call(l));_=xe(_)}L.apply(l,_),u&&!s&&_.length>0&&v+t.length>1&&ae.uniqueSort(l)}return u&&(x=C,c=w),b};return n?ce(s):s}(s,o)),a.selector=e}return a},l=ae.select=function(e,t,n,o){var s,l,c,u,d,h="function"==typeof e&&e,f=!o&&r(e=h.selector||e);if(n=n||[],1===f.length){if((l=f[0]=f[0].slice(0)).length>2&&"ID"===(c=l[0]).type&&9===t.nodeType&&m&&i.relative[l[1].type]){if(!(t=(i.find.ID(c.matches[0].replace(te,ne),t)||[])[0]))return n;h&&(t=t.parentNode),e=e.slice(l.shift().value.length)}for(s=G.needsContext.test(e)?0:l.length;s--&&(c=l[s],!i.relative[u=c.type]);)if((d=i.find[u])&&(o=d(c.matches[0].replace(te,ne),ee.test(l[0].type)&&ve(t.parentNode)||t))){if(l.splice(s,1),!(e=o.length&&be(l)))return L.apply(n,o),n;break}}return(h||a(e,f))(o,t,!m,n,!t||ee.test(e)&&ve(t.parentNode)||t),n},n.sortStable=_.split("").sort(I).join("")===_,n.detectDuplicates=!!d,h(),n.sortDetached=ue((function(e){return 1&e.compareDocumentPosition(f.createElement("fieldset"))})),ue((function(e){return e.innerHTML="","#"===e.firstChild.getAttribute("href")}))||de("type|href|height|width",(function(e,t,n){if(!n)return e.getAttribute(t,"type"===t.toLowerCase()?1:2)})),n.attributes&&ue((function(e){return e.innerHTML="",e.firstChild.setAttribute("value",""),""===e.firstChild.getAttribute("value")}))||de("value",(function(e,t,n){if(!n&&"input"===e.nodeName.toLowerCase())return e.defaultValue})),ue((function(e){return null==e.getAttribute("disabled")}))||de(P,(function(e,t,n){var i;if(!n)return!0===e[t]?t.toLowerCase():(i=e.getAttributeNode(t))&&i.specified?i.value:null})),ae}(e);w.find=E,w.expr=E.selectors,w.expr[":"]=w.expr.pseudos,w.uniqueSort=w.unique=E.uniqueSort,w.text=E.getText,w.isXMLDoc=E.isXML,w.contains=E.contains,w.escapeSelector=E.escape;var C=function(e,t,n){for(var i=[],o=void 0!==n;(e=e[t])&&9!==e.nodeType;)if(1===e.nodeType){if(o&&w(e).is(n))break;i.push(e)}return i},S=function(e,t){for(var n=[];e;e=e.nextSibling)1===e.nodeType&&e!==t&&n.push(e);return n},T=w.expr.match.needsContext;function k(e,t){return e.nodeName&&e.nodeName.toLowerCase()===t.toLowerCase()}var I=/^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function A(e,t,n){return p(t)?w.grep(e,(function(e,i){return!!t.call(e,i,e)!==n})):t.nodeType?w.grep(e,(function(e){return e===t!==n})):"string"!=typeof t?w.grep(e,(function(e){return a.call(t,e)>-1!==n})):w.filter(t,e,n)}w.filter=function(e,t,n){var i=t[0];return n&&(e=":not("+e+")"),1===t.length&&1===i.nodeType?w.find.matchesSelector(i,e)?[i]:[]:w.find.matches(e,w.grep(t,(function(e){return 1===e.nodeType})))},w.fn.extend({find:function(e){var t,n,i=this.length,o=this;if("string"!=typeof e)return this.pushStack(w(e).filter((function(){for(t=0;t1?w.uniqueSort(n):n},filter:function(e){return this.pushStack(A(this,e||[],!1))},not:function(e){return this.pushStack(A(this,e||[],!0))},is:function(e){return!!A(this,"string"==typeof e&&T.test(e)?w(e):e||[],!1).length}});var O,D=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/;(w.fn.init=function(e,t,n){var i,o;if(!e)return this;if(n=n||O,"string"==typeof e){if(!(i="<"===e[0]&&">"===e[e.length-1]&&e.length>=3?[null,e,null]:D.exec(e))||!i[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(i[1]){if(t=t instanceof w?t[0]:t,w.merge(this,w.parseHTML(i[1],t&&t.nodeType?t.ownerDocument||t:g,!0)),I.test(i[1])&&w.isPlainObject(t))for(i in t)p(this[i])?this[i](t[i]):this.attr(i,t[i]);return this}return(o=g.getElementById(i[2]))&&(this[0]=o,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):p(e)?void 0!==n.ready?n.ready(e):e(w):w.makeArray(e,this)}).prototype=w.fn,O=w(g);var N=/^(?:parents|prev(?:Until|All))/,L={children:!0,contents:!0,next:!0,prev:!0};function $(e,t){for(;(e=e[t])&&1!==e.nodeType;);return e}w.fn.extend({has:function(e){var t=w(e,this),n=t.length;return this.filter((function(){for(var e=0;e-1:1===n.nodeType&&w.find.matchesSelector(n,e))){s.push(n);break}return this.pushStack(s.length>1?w.uniqueSort(s):s)},index:function(e){return e?"string"==typeof e?a.call(w(e),this[0]):a.call(this,e.jquery?e[0]:e):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(e,t){return this.pushStack(w.uniqueSort(w.merge(this.get(),w(e,t))))},addBack:function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}}),w.each({parent:function(e){var t=e.parentNode;return t&&11!==t.nodeType?t:null},parents:function(e){return C(e,"parentNode")},parentsUntil:function(e,t,n){return C(e,"parentNode",n)},next:function(e){return $(e,"nextSibling")},prev:function(e){return $(e,"previousSibling")},nextAll:function(e){return C(e,"nextSibling")},prevAll:function(e){return C(e,"previousSibling")},nextUntil:function(e,t,n){return C(e,"nextSibling",n)},prevUntil:function(e,t,n){return C(e,"previousSibling",n)},siblings:function(e){return S((e.parentNode||{}).firstChild,e)},children:function(e){return S(e.firstChild)},contents:function(e){return null!=e.contentDocument&&i(e.contentDocument)?e.contentDocument:(k(e,"template")&&(e=e.content||e),w.merge([],e.childNodes))}},(function(e,t){w.fn[e]=function(n,i){var o=w.map(this,t,n);return"Until"!==e.slice(-5)&&(i=n),i&&"string"==typeof i&&(o=w.filter(i,o)),this.length>1&&(L[e]||w.uniqueSort(o),N.test(e)&&o.reverse()),this.pushStack(o)}}));var z=/[^\x20\t\r\n\f]+/g;function P(e){return e}function H(e){throw e}function j(e,t,n,i){var o;try{e&&p(o=e.promise)?o.call(e).done(t).fail(n):e&&p(o=e.then)?o.call(e,t,n):t.apply(void 0,[e].slice(i))}catch(e){n.apply(void 0,[e])}}w.Callbacks=function(e){e="string"==typeof e?function(e){var t={};return w.each(e.match(z)||[],(function(e,n){t[n]=!0})),t}(e):w.extend({},e);var t,n,i,o,s=[],r=[],a=-1,l=function(){for(o=o||e.once,i=t=!0;r.length;a=-1)for(n=r.shift();++a-1;)s.splice(n,1),n<=a&&a--})),this},has:function(e){return e?w.inArray(e,s)>-1:s.length>0},empty:function(){return s&&(s=[]),this},disable:function(){return o=r=[],s=n="",this},disabled:function(){return!s},lock:function(){return o=r=[],n||t||(s=n=""),this},locked:function(){return!!o},fireWith:function(e,n){return o||(n=[e,(n=n||[]).slice?n.slice():n],r.push(n),t||l()),this},fire:function(){return c.fireWith(this,arguments),this},fired:function(){return!!i}};return c},w.extend({Deferred:function(t){var n=[["notify","progress",w.Callbacks("memory"),w.Callbacks("memory"),2],["resolve","done",w.Callbacks("once memory"),w.Callbacks("once memory"),0,"resolved"],["reject","fail",w.Callbacks("once memory"),w.Callbacks("once memory"),1,"rejected"]],i="pending",o={state:function(){return i},always:function(){return s.done(arguments).fail(arguments),this},catch:function(e){return o.then(null,e)},pipe:function(){var e=arguments;return w.Deferred((function(t){w.each(n,(function(n,i){var o=p(e[i[4]])&&e[i[4]];s[i[1]]((function(){var e=o&&o.apply(this,arguments);e&&p(e.promise)?e.promise().progress(t.notify).done(t.resolve).fail(t.reject):t[i[0]+"With"](this,o?[e]:arguments)}))})),e=null})).promise()},then:function(t,i,o){var s=0;function r(t,n,i,o){return function(){var a=this,l=arguments,c=function(){var e,c;if(!(t=s&&(i!==H&&(a=void 0,l=[e]),n.rejectWith(a,l))}};t?u():(w.Deferred.getStackHook&&(u.stackTrace=w.Deferred.getStackHook()),e.setTimeout(u))}}return w.Deferred((function(e){n[0][3].add(r(0,e,p(o)?o:P,e.notifyWith)),n[1][3].add(r(0,e,p(t)?t:P)),n[2][3].add(r(0,e,p(i)?i:H))})).promise()},promise:function(e){return null!=e?w.extend(e,o):o}},s={};return w.each(n,(function(e,t){var r=t[2],a=t[5];o[t[1]]=r.add,a&&r.add((function(){i=a}),n[3-e][2].disable,n[3-e][3].disable,n[0][2].lock,n[0][3].lock),r.add(t[3].fire),s[t[0]]=function(){return s[t[0]+"With"](this===s?void 0:this,arguments),this},s[t[0]+"With"]=r.fireWith})),o.promise(s),t&&t.call(s,s),s},when:function(e){var t=arguments.length,n=t,i=Array(n),s=o.call(arguments),r=w.Deferred(),a=function(e){return function(n){i[e]=this,s[e]=arguments.length>1?o.call(arguments):n,--t||r.resolveWith(i,s)}};if(t<=1&&(j(e,r.done(a(n)).resolve,r.reject,!t),"pending"===r.state()||p(s[n]&&s[n].then)))return r.then();for(;n--;)j(s[n],a(n),r.reject);return r.promise()}});var M=/^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;w.Deferred.exceptionHook=function(t,n){e.console&&e.console.warn&&t&&M.test(t.name)&&e.console.warn("jQuery.Deferred exception: "+t.message,t.stack,n)},w.readyException=function(t){e.setTimeout((function(){throw t}))};var R=w.Deferred();function F(){g.removeEventListener("DOMContentLoaded",F),e.removeEventListener("load",F),w.ready()}w.fn.ready=function(e){return R.then(e).catch((function(e){w.readyException(e)})),this},w.extend({isReady:!1,readyWait:1,ready:function(e){(!0===e?--w.readyWait:w.isReady)||(w.isReady=!0,!0!==e&&--w.readyWait>0||R.resolveWith(g,[w]))}}),w.ready.then=R.then,"complete"===g.readyState||"loading"!==g.readyState&&!g.documentElement.doScroll?e.setTimeout(w.ready):(g.addEventListener("DOMContentLoaded",F),e.addEventListener("load",F));var B=function(e,t,n,i,o,s,r){var a=0,l=e.length,c=null==n;if("object"===b(n))for(a in o=!0,n)B(e,t,a,n[a],!0,s,r);else if(void 0!==i&&(o=!0,p(i)||(r=!0),c&&(r?(t.call(e,i),t=null):(c=t,t=function(e,t,n){return c.call(w(e),n)})),t))for(;a1,null,!0)},removeData:function(e){return this.each((function(){X.remove(this,e)}))}}),w.extend({queue:function(e,t,n){var i;if(e)return t=(t||"fx")+"queue",i=Y.get(e,t),n&&(!i||Array.isArray(n)?i=Y.access(e,t,w.makeArray(n)):i.push(n)),i||[]},dequeue:function(e,t){t=t||"fx";var n=w.queue(e,t),i=n.length,o=n.shift(),s=w._queueHooks(e,t);"inprogress"===o&&(o=n.shift(),i--),o&&("fx"===t&&n.unshift("inprogress"),delete s.stop,o.call(e,(function(){w.dequeue(e,t)}),s)),!i&&s&&s.empty.fire()},_queueHooks:function(e,t){var n=t+"queueHooks";return Y.get(e,n)||Y.access(e,n,{empty:w.Callbacks("once memory").add((function(){Y.remove(e,[t+"queue",n])}))})}}),w.fn.extend({queue:function(e,t){var n=2;return"string"!=typeof e&&(t=e,e="fx",n--),arguments.length\x20\t\r\n\f]*)/i,me=/^$|^module$|\/(?:java|ecma)script/i;de=g.createDocumentFragment().appendChild(g.createElement("div")),(he=g.createElement("input")).setAttribute("type","radio"),he.setAttribute("checked","checked"),he.setAttribute("name","t"),de.appendChild(he),f.checkClone=de.cloneNode(!0).cloneNode(!0).lastChild.checked,de.innerHTML="",f.noCloneChecked=!!de.cloneNode(!0).lastChild.defaultValue,de.innerHTML="",f.option=!!de.lastChild;var ge={thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n=void 0!==e.getElementsByTagName?e.getElementsByTagName(t||"*"):void 0!==e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&k(e,t)?w.merge([e],n):n}function ye(e,t){for(var n=0,i=e.length;n",""]);var be=/<|&#?\w+;/;function _e(e,t,n,i,o){for(var s,r,a,l,c,u,d=t.createDocumentFragment(),h=[],f=0,p=e.length;f-1)o&&o.push(s);else if(c=oe(s),r=ve(d.appendChild(s),"script"),c&&ye(r),n)for(u=0;s=r[u++];)me.test(s.type||"")&&n.push(s);return d}var we=/^([^.]*)(?:\.(.+)|)/;function xe(){return!0}function Ee(){return!1}function Ce(e,t){return e===function(){try{return g.activeElement}catch(e){}}()==("focus"===t)}function Se(e,t,n,i,o,s){var r,a;if("object"==typeof t){for(a in"string"!=typeof n&&(i=i||n,n=void 0),t)Se(e,a,n,i,t[a],s);return e}if(null==i&&null==o?(o=n,i=n=void 0):null==o&&("string"==typeof n?(o=i,i=void 0):(o=i,i=n,n=void 0)),!1===o)o=Ee;else if(!o)return e;return 1===s&&(r=o,o=function(e){return w().off(e),r.apply(this,arguments)},o.guid=r.guid||(r.guid=w.guid++)),e.each((function(){w.event.add(this,t,o,i,n)}))}function Te(e,t,n){n?(Y.set(e,t,!1),w.event.add(e,t,{namespace:!1,handler:function(e){var i,s,r=Y.get(this,t);if(1&e.isTrigger&&this[t]){if(r.length)(w.event.special[t]||{}).delegateType&&e.stopPropagation();else if(r=o.call(arguments),Y.set(this,t,r),i=n(this,t),this[t](),r!==(s=Y.get(this,t))||i?Y.set(this,t,!1):s={},r!==s)return e.stopImmediatePropagation(),e.preventDefault(),s&&s.value}else r.length&&(Y.set(this,t,{value:w.event.trigger(w.extend(r[0],w.Event.prototype),r.slice(1),this)}),e.stopImmediatePropagation())}})):void 0===Y.get(e,t)&&w.event.add(e,t,xe)}w.event={global:{},add:function(e,t,n,i,o){var s,r,a,l,c,u,d,h,f,p,m,g=Y.get(e);if(Q(e))for(n.handler&&(n=(s=n).handler,o=s.selector),o&&w.find.matchesSelector(ie,o),n.guid||(n.guid=w.guid++),(l=g.events)||(l=g.events=Object.create(null)),(r=g.handle)||(r=g.handle=function(t){return void 0!==w&&w.event.triggered!==t.type?w.event.dispatch.apply(e,arguments):void 0}),c=(t=(t||"").match(z)||[""]).length;c--;)f=m=(a=we.exec(t[c])||[])[1],p=(a[2]||"").split(".").sort(),f&&(d=w.event.special[f]||{},f=(o?d.delegateType:d.bindType)||f,d=w.event.special[f]||{},u=w.extend({type:f,origType:m,data:i,handler:n,guid:n.guid,selector:o,needsContext:o&&w.expr.match.needsContext.test(o),namespace:p.join(".")},s),(h=l[f])||((h=l[f]=[]).delegateCount=0,d.setup&&!1!==d.setup.call(e,i,p,r)||e.addEventListener&&e.addEventListener(f,r)),d.add&&(d.add.call(e,u),u.handler.guid||(u.handler.guid=n.guid)),o?h.splice(h.delegateCount++,0,u):h.push(u),w.event.global[f]=!0)},remove:function(e,t,n,i,o){var s,r,a,l,c,u,d,h,f,p,m,g=Y.hasData(e)&&Y.get(e);if(g&&(l=g.events)){for(c=(t=(t||"").match(z)||[""]).length;c--;)if(f=m=(a=we.exec(t[c])||[])[1],p=(a[2]||"").split(".").sort(),f){for(d=w.event.special[f]||{},h=l[f=(i?d.delegateType:d.bindType)||f]||[],a=a[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),r=s=h.length;s--;)u=h[s],!o&&m!==u.origType||n&&n.guid!==u.guid||a&&!a.test(u.namespace)||i&&i!==u.selector&&("**"!==i||!u.selector)||(h.splice(s,1),u.selector&&h.delegateCount--,d.remove&&d.remove.call(e,u));r&&!h.length&&(d.teardown&&!1!==d.teardown.call(e,p,g.handle)||w.removeEvent(e,f,g.handle),delete l[f])}else for(f in l)w.event.remove(e,f+t[c],n,i,!0);w.isEmptyObject(l)&&Y.remove(e,"handle events")}},dispatch:function(e){var t,n,i,o,s,r,a=new Array(arguments.length),l=w.event.fix(e),c=(Y.get(this,"events")||Object.create(null))[l.type]||[],u=w.event.special[l.type]||{};for(a[0]=l,t=1;t=1))for(;c!==this;c=c.parentNode||this)if(1===c.nodeType&&("click"!==e.type||!0!==c.disabled)){for(s=[],r={},n=0;n-1:w.find(o,this,null,[c]).length),r[o]&&s.push(i);s.length&&a.push({elem:c,handlers:s})}return c=this,l\s*$/g;function Oe(e,t){return k(e,"table")&&k(11!==t.nodeType?t:t.firstChild,"tr")&&w(e).children("tbody")[0]||e}function De(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function Ne(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Le(e,t){var n,i,o,s,r,a;if(1===t.nodeType){if(Y.hasData(e)&&(a=Y.get(e).events))for(o in Y.remove(t,"handle events"),a)for(n=0,i=a[o].length;n1&&"string"==typeof g&&!f.checkClone&&Ie.test(g))return e.each((function(o){var s=e.eq(o);v&&(t[0]=g.call(this,o,s.html())),ze(s,t,n,i)}));if(h&&(r=(o=_e(t,e[0].ownerDocument,!1,e,i)).firstChild,1===o.childNodes.length&&(o=r),r||i)){for(l=(a=w.map(ve(o,"script"),De)).length;d0&&ye(r,!l&&ve(e,"script")),a},cleanData:function(e){for(var t,n,i,o=w.event.special,s=0;void 0!==(n=e[s]);s++)if(Q(n)){if(t=n[Y.expando]){if(t.events)for(i in t.events)o[i]?w.event.remove(n,i):w.removeEvent(n,i,t.handle);n[Y.expando]=void 0}n[X.expando]&&(n[X.expando]=void 0)}}}),w.fn.extend({detach:function(e){return Pe(this,e,!0)},remove:function(e){return Pe(this,e)},text:function(e){return B(this,(function(e){return void 0===e?w.text(this):this.empty().each((function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=e)}))}),null,e,arguments.length)},append:function(){return ze(this,arguments,(function(e){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||Oe(this,e).appendChild(e)}))},prepend:function(){return ze(this,arguments,(function(e){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var t=Oe(this,e);t.insertBefore(e,t.firstChild)}}))},before:function(){return ze(this,arguments,(function(e){this.parentNode&&this.parentNode.insertBefore(e,this)}))},after:function(){return ze(this,arguments,(function(e){this.parentNode&&this.parentNode.insertBefore(e,this.nextSibling)}))},empty:function(){for(var e,t=0;null!=(e=this[t]);t++)1===e.nodeType&&(w.cleanData(ve(e,!1)),e.textContent="");return this},clone:function(e,t){return e=null!=e&&e,t=null==t?e:t,this.map((function(){return w.clone(this,e,t)}))},html:function(e){return B(this,(function(e){var t=this[0]||{},n=0,i=this.length;if(void 0===e&&1===t.nodeType)return t.innerHTML;if("string"==typeof e&&!ke.test(e)&&!ge[(pe.exec(e)||["",""])[1].toLowerCase()]){e=w.htmlPrefilter(e);try{for(;n=0&&(l+=Math.max(0,Math.ceil(e["offset"+t[0].toUpperCase()+t.slice(1)]-s-l-a-.5))||0),l}function Je(e,t,n){var i=je(e),o=(!f.boxSizingReliable()||n)&&"border-box"===w.css(e,"boxSizing",!1,i),s=o,r=Fe(e,t,i),a="offset"+t[0].toUpperCase()+t.slice(1);if(He.test(r)){if(!n)return r;r="auto"}return(!f.boxSizingReliable()&&o||!f.reliableTrDimensions()&&k(e,"tr")||"auto"===r||!parseFloat(r)&&"inline"===w.css(e,"display",!1,i))&&e.getClientRects().length&&(o="border-box"===w.css(e,"boxSizing",!1,i),(s=a in e)&&(r=e[a])),(r=parseFloat(r)||0)+Ze(e,t,n||(o?"border":"content"),s,i,r)+"px"}function et(e,t,n,i,o){return new et.prototype.init(e,t,n,i,o)}w.extend({cssHooks:{opacity:{get:function(e,t){if(t){var n=Fe(e,"opacity");return""===n?"1":n}}}},cssNumber:{animationIterationCount:!0,columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,gridArea:!0,gridColumn:!0,gridColumnEnd:!0,gridColumnStart:!0,gridRow:!0,gridRowEnd:!0,gridRowStart:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{},style:function(e,t,n,i){if(e&&3!==e.nodeType&&8!==e.nodeType&&e.style){var o,s,r,a=V(t),l=Ge.test(t),c=e.style;if(l||(t=Ve(a)),r=w.cssHooks[t]||w.cssHooks[a],void 0===n)return r&&"get"in r&&void 0!==(o=r.get(e,!1,i))?o:c[t];"string"===(s=typeof n)&&(o=te.exec(n))&&o[1]&&(n=ae(e,t,o),s="number"),null!=n&&n==n&&("number"!==s||l||(n+=o&&o[3]||(w.cssNumber[a]?"":"px")),f.clearCloneStyle||""!==n||0!==t.indexOf("background")||(c[t]="inherit"),r&&"set"in r&&void 0===(n=r.set(e,n,i))||(l?c.setProperty(t,n):c[t]=n))}},css:function(e,t,n,i){var o,s,r,a=V(t);return Ge.test(t)||(t=Ve(a)),(r=w.cssHooks[t]||w.cssHooks[a])&&"get"in r&&(o=r.get(e,!0,n)),void 0===o&&(o=Fe(e,t,i)),"normal"===o&&t in Xe&&(o=Xe[t]),""===n||n?(s=parseFloat(o),!0===n||isFinite(s)?s||0:o):o}}),w.each(["height","width"],(function(e,t){w.cssHooks[t]={get:function(e,n,i){if(n)return!Qe.test(w.css(e,"display"))||e.getClientRects().length&&e.getBoundingClientRect().width?Je(e,t,i):Me(e,Ye,(function(){return Je(e,t,i)}))},set:function(e,n,i){var o,s=je(e),r=!f.scrollboxSize()&&"absolute"===s.position,a=(r||i)&&"border-box"===w.css(e,"boxSizing",!1,s),l=i?Ze(e,t,i,a,s):0;return a&&r&&(l-=Math.ceil(e["offset"+t[0].toUpperCase()+t.slice(1)]-parseFloat(s[t])-Ze(e,t,"border",!1,s)-.5)),l&&(o=te.exec(n))&&"px"!==(o[3]||"px")&&(e.style[t]=n,n=w.css(e,t)),Ke(0,n,l)}}})),w.cssHooks.marginLeft=Be(f.reliableMarginLeft,(function(e,t){if(t)return(parseFloat(Fe(e,"marginLeft"))||e.getBoundingClientRect().left-Me(e,{marginLeft:0},(function(){return e.getBoundingClientRect().left})))+"px"})),w.each({margin:"",padding:"",border:"Width"},(function(e,t){w.cssHooks[e+t]={expand:function(n){for(var i=0,o={},s="string"==typeof n?n.split(" "):[n];i<4;i++)o[e+ne[i]+t]=s[i]||s[i-2]||s[0];return o}},"margin"!==e&&(w.cssHooks[e+t].set=Ke)})),w.fn.extend({css:function(e,t){return B(this,(function(e,t,n){var i,o,s={},r=0;if(Array.isArray(t)){for(i=je(e),o=t.length;r1)}}),w.Tween=et,et.prototype={constructor:et,init:function(e,t,n,i,o,s){this.elem=e,this.prop=n,this.easing=o||w.easing._default,this.options=t,this.start=this.now=this.cur(),this.end=i,this.unit=s||(w.cssNumber[n]?"":"px")},cur:function(){var e=et.propHooks[this.prop];return e&&e.get?e.get(this):et.propHooks._default.get(this)},run:function(e){var t,n=et.propHooks[this.prop];return this.options.duration?this.pos=t=w.easing[this.easing](e,this.options.duration*e,0,1,this.options.duration):this.pos=t=e,this.now=(this.end-this.start)*t+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),n&&n.set?n.set(this):et.propHooks._default.set(this),this}},et.prototype.init.prototype=et.prototype,et.propHooks={_default:{get:function(e){var t;return 1!==e.elem.nodeType||null!=e.elem[e.prop]&&null==e.elem.style[e.prop]?e.elem[e.prop]:(t=w.css(e.elem,e.prop,""))&&"auto"!==t?t:0},set:function(e){w.fx.step[e.prop]?w.fx.step[e.prop](e):1!==e.elem.nodeType||!w.cssHooks[e.prop]&&null==e.elem.style[Ve(e.prop)]?e.elem[e.prop]=e.now:w.style(e.elem,e.prop,e.now+e.unit)}}},et.propHooks.scrollTop=et.propHooks.scrollLeft={set:function(e){e.elem.nodeType&&e.elem.parentNode&&(e.elem[e.prop]=e.now)}},w.easing={linear:function(e){return e},swing:function(e){return.5-Math.cos(e*Math.PI)/2},_default:"swing"},w.fx=et.prototype.init,w.fx.step={};var tt,nt,it=/^(?:toggle|show|hide)$/,ot=/queueHooks$/;function st(){nt&&(!1===g.hidden&&e.requestAnimationFrame?e.requestAnimationFrame(st):e.setTimeout(st,w.fx.interval),w.fx.tick())}function rt(){return e.setTimeout((function(){tt=void 0})),tt=Date.now()}function at(e,t){var n,i=0,o={height:e};for(t=t?1:0;i<4;i+=2-t)o["margin"+(n=ne[i])]=o["padding"+n]=e;return t&&(o.opacity=o.width=e),o}function lt(e,t,n){for(var i,o=(ct.tweeners[t]||[]).concat(ct.tweeners["*"]),s=0,r=o.length;s1)},removeAttr:function(e){return this.each((function(){w.removeAttr(this,e)}))}}),w.extend({attr:function(e,t,n){var i,o,s=e.nodeType;if(3!==s&&8!==s&&2!==s)return void 0===e.getAttribute?w.prop(e,t,n):(1===s&&w.isXMLDoc(e)||(o=w.attrHooks[t.toLowerCase()]||(w.expr.match.bool.test(t)?ut:void 0)),void 0!==n?null===n?void w.removeAttr(e,t):o&&"set"in o&&void 0!==(i=o.set(e,n,t))?i:(e.setAttribute(t,n+""),n):o&&"get"in o&&null!==(i=o.get(e,t))?i:null==(i=w.find.attr(e,t))?void 0:i)},attrHooks:{type:{set:function(e,t){if(!f.radioValue&&"radio"===t&&k(e,"input")){var n=e.value;return e.setAttribute("type",t),n&&(e.value=n),t}}}},removeAttr:function(e,t){var n,i=0,o=t&&t.match(z);if(o&&1===e.nodeType)for(;n=o[i++];)e.removeAttribute(n)}}),ut={set:function(e,t,n){return!1===t?w.removeAttr(e,n):e.setAttribute(n,n),n}},w.each(w.expr.match.bool.source.match(/\w+/g),(function(e,t){var n=dt[t]||w.find.attr;dt[t]=function(e,t,i){var o,s,r=t.toLowerCase();return i||(s=dt[r],dt[r]=o,o=null!=n(e,t,i)?r:null,dt[r]=s),o}}));var ht=/^(?:input|select|textarea|button)$/i,ft=/^(?:a|area)$/i;function pt(e){return(e.match(z)||[]).join(" ")}function mt(e){return e.getAttribute&&e.getAttribute("class")||""}function gt(e){return Array.isArray(e)?e:"string"==typeof e&&e.match(z)||[]}w.fn.extend({prop:function(e,t){return B(this,w.prop,e,t,arguments.length>1)},removeProp:function(e){return this.each((function(){delete this[w.propFix[e]||e]}))}}),w.extend({prop:function(e,t,n){var i,o,s=e.nodeType;if(3!==s&&8!==s&&2!==s)return 1===s&&w.isXMLDoc(e)||(t=w.propFix[t]||t,o=w.propHooks[t]),void 0!==n?o&&"set"in o&&void 0!==(i=o.set(e,n,t))?i:e[t]=n:o&&"get"in o&&null!==(i=o.get(e,t))?i:e[t]},propHooks:{tabIndex:{get:function(e){var t=w.find.attr(e,"tabindex");return t?parseInt(t,10):ht.test(e.nodeName)||ft.test(e.nodeName)&&e.href?0:-1}}},propFix:{for:"htmlFor",class:"className"}}),f.optSelected||(w.propHooks.selected={get:function(e){var t=e.parentNode;return t&&t.parentNode&&t.parentNode.selectedIndex,null},set:function(e){var t=e.parentNode;t&&(t.selectedIndex,t.parentNode&&t.parentNode.selectedIndex)}}),w.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],(function(){w.propFix[this.toLowerCase()]=this})),w.fn.extend({addClass:function(e){var t,n,i,o,s,r,a,l=0;if(p(e))return this.each((function(t){w(this).addClass(e.call(this,t,mt(this)))}));if((t=gt(e)).length)for(;n=this[l++];)if(o=mt(n),i=1===n.nodeType&&" "+pt(o)+" "){for(r=0;s=t[r++];)i.indexOf(" "+s+" ")<0&&(i+=s+" ");o!==(a=pt(i))&&n.setAttribute("class",a)}return this},removeClass:function(e){var t,n,i,o,s,r,a,l=0;if(p(e))return this.each((function(t){w(this).removeClass(e.call(this,t,mt(this)))}));if(!arguments.length)return this.attr("class","");if((t=gt(e)).length)for(;n=this[l++];)if(o=mt(n),i=1===n.nodeType&&" "+pt(o)+" "){for(r=0;s=t[r++];)for(;i.indexOf(" "+s+" ")>-1;)i=i.replace(" "+s+" "," ");o!==(a=pt(i))&&n.setAttribute("class",a)}return this},toggleClass:function(e,t){var n=typeof e,i="string"===n||Array.isArray(e);return"boolean"==typeof t&&i?t?this.addClass(e):this.removeClass(e):p(e)?this.each((function(n){w(this).toggleClass(e.call(this,n,mt(this),t),t)})):this.each((function(){var t,o,s,r;if(i)for(o=0,s=w(this),r=gt(e);t=r[o++];)s.hasClass(t)?s.removeClass(t):s.addClass(t);else void 0!==e&&"boolean"!==n||((t=mt(this))&&Y.set(this,"__className__",t),this.setAttribute&&this.setAttribute("class",t||!1===e?"":Y.get(this,"__className__")||""))}))},hasClass:function(e){var t,n,i=0;for(t=" "+e+" ";n=this[i++];)if(1===n.nodeType&&(" "+pt(mt(n))+" ").indexOf(t)>-1)return!0;return!1}});var vt=/\r/g;w.fn.extend({val:function(e){var t,n,i,o=this[0];return arguments.length?(i=p(e),this.each((function(n){var o;1===this.nodeType&&(null==(o=i?e.call(this,n,w(this).val()):e)?o="":"number"==typeof o?o+="":Array.isArray(o)&&(o=w.map(o,(function(e){return null==e?"":e+""}))),(t=w.valHooks[this.type]||w.valHooks[this.nodeName.toLowerCase()])&&"set"in t&&void 0!==t.set(this,o,"value")||(this.value=o))}))):o?(t=w.valHooks[o.type]||w.valHooks[o.nodeName.toLowerCase()])&&"get"in t&&void 0!==(n=t.get(o,"value"))?n:"string"==typeof(n=o.value)?n.replace(vt,""):null==n?"":n:void 0}}),w.extend({valHooks:{option:{get:function(e){var t=w.find.attr(e,"value");return null!=t?t:pt(w.text(e))}},select:{get:function(e){var t,n,i,o=e.options,s=e.selectedIndex,r="select-one"===e.type,a=r?null:[],l=r?s+1:o.length;for(i=s<0?l:r?s:0;i-1)&&(n=!0);return n||(e.selectedIndex=-1),s}}}}),w.each(["radio","checkbox"],(function(){w.valHooks[this]={set:function(e,t){if(Array.isArray(t))return e.checked=w.inArray(w(e).val(),t)>-1}},f.checkOn||(w.valHooks[this].get=function(e){return null===e.getAttribute("value")?"on":e.value})})),f.focusin="onfocusin"in e;var yt=/^(?:focusinfocus|focusoutblur)$/,bt=function(e){e.stopPropagation()};w.extend(w.event,{trigger:function(t,n,i,o){var s,r,a,l,c,d,h,f,v=[i||g],y=u.call(t,"type")?t.type:t,b=u.call(t,"namespace")?t.namespace.split("."):[];if(r=f=a=i=i||g,3!==i.nodeType&&8!==i.nodeType&&!yt.test(y+w.event.triggered)&&(y.indexOf(".")>-1&&(b=y.split("."),y=b.shift(),b.sort()),c=y.indexOf(":")<0&&"on"+y,(t=t[w.expando]?t:new w.Event(y,"object"==typeof t&&t)).isTrigger=o?2:3,t.namespace=b.join("."),t.rnamespace=t.namespace?new RegExp("(^|\\.)"+b.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,t.result=void 0,t.target||(t.target=i),n=null==n?[t]:w.makeArray(n,[t]),h=w.event.special[y]||{},o||!h.trigger||!1!==h.trigger.apply(i,n))){if(!o&&!h.noBubble&&!m(i)){for(l=h.delegateType||y,yt.test(l+y)||(r=r.parentNode);r;r=r.parentNode)v.push(r),a=r;a===(i.ownerDocument||g)&&v.push(a.defaultView||a.parentWindow||e)}for(s=0;(r=v[s++])&&!t.isPropagationStopped();)f=r,t.type=s>1?l:h.bindType||y,(d=(Y.get(r,"events")||Object.create(null))[t.type]&&Y.get(r,"handle"))&&d.apply(r,n),(d=c&&r[c])&&d.apply&&Q(r)&&(t.result=d.apply(r,n),!1===t.result&&t.preventDefault());return t.type=y,o||t.isDefaultPrevented()||h._default&&!1!==h._default.apply(v.pop(),n)||!Q(i)||c&&p(i[y])&&!m(i)&&((a=i[c])&&(i[c]=null),w.event.triggered=y,t.isPropagationStopped()&&f.addEventListener(y,bt),i[y](),t.isPropagationStopped()&&f.removeEventListener(y,bt),w.event.triggered=void 0,a&&(i[c]=a)),t.result}},simulate:function(e,t,n){var i=w.extend(new w.Event,n,{type:e,isSimulated:!0});w.event.trigger(i,null,t)}}),w.fn.extend({trigger:function(e,t){return this.each((function(){w.event.trigger(e,t,this)}))},triggerHandler:function(e,t){var n=this[0];if(n)return w.event.trigger(e,t,n,!0)}}),f.focusin||w.each({focus:"focusin",blur:"focusout"},(function(e,t){var n=function(e){w.event.simulate(t,e.target,w.event.fix(e))};w.event.special[t]={setup:function(){var i=this.ownerDocument||this.document||this,o=Y.access(i,t);o||i.addEventListener(e,n,!0),Y.access(i,t,(o||0)+1)},teardown:function(){var i=this.ownerDocument||this.document||this,o=Y.access(i,t)-1;o?Y.access(i,t,o):(i.removeEventListener(e,n,!0),Y.remove(i,t))}}}));var _t=e.location,wt={guid:Date.now()},xt=/\?/;w.parseXML=function(t){var n,i;if(!t||"string"!=typeof t)return null;try{n=(new e.DOMParser).parseFromString(t,"text/xml")}catch(e){}return i=n&&n.getElementsByTagName("parsererror")[0],n&&!i||w.error("Invalid XML: "+(i?w.map(i.childNodes,(function(e){return e.textContent})).join("\n"):t)),n};var Et=/\[\]$/,Ct=/\r?\n/g,St=/^(?:submit|button|image|reset|file)$/i,Tt=/^(?:input|select|textarea|keygen)/i;function kt(e,t,n,i){var o;if(Array.isArray(t))w.each(t,(function(t,o){n||Et.test(e)?i(e,o):kt(e+"["+("object"==typeof o&&null!=o?t:"")+"]",o,n,i)}));else if(n||"object"!==b(t))i(e,t);else for(o in t)kt(e+"["+o+"]",t[o],n,i)}w.param=function(e,t){var n,i=[],o=function(e,t){var n=p(t)?t():t;i[i.length]=encodeURIComponent(e)+"="+encodeURIComponent(null==n?"":n)};if(null==e)return"";if(Array.isArray(e)||e.jquery&&!w.isPlainObject(e))w.each(e,(function(){o(this.name,this.value)}));else for(n in e)kt(n,e[n],t,o);return i.join("&")},w.fn.extend({serialize:function(){return w.param(this.serializeArray())},serializeArray:function(){return this.map((function(){var e=w.prop(this,"elements");return e?w.makeArray(e):this})).filter((function(){var e=this.type;return this.name&&!w(this).is(":disabled")&&Tt.test(this.nodeName)&&!St.test(e)&&(this.checked||!fe.test(e))})).map((function(e,t){var n=w(this).val();return null==n?null:Array.isArray(n)?w.map(n,(function(e){return{name:t.name,value:e.replace(Ct,"\r\n")}})):{name:t.name,value:n.replace(Ct,"\r\n")}})).get()}});var It=/%20/g,At=/#.*$/,Ot=/([?&])_=[^&]*/,Dt=/^(.*?):[ \t]*([^\r\n]*)$/gm,Nt=/^(?:GET|HEAD)$/,Lt=/^\/\//,$t={},zt={},Pt="*/".concat("*"),Ht=g.createElement("a");function jt(e){return function(t,n){"string"!=typeof t&&(n=t,t="*");var i,o=0,s=t.toLowerCase().match(z)||[];if(p(n))for(;i=s[o++];)"+"===i[0]?(i=i.slice(1)||"*",(e[i]=e[i]||[]).unshift(n)):(e[i]=e[i]||[]).push(n)}}function Mt(e,t,n,i){var o={},s=e===zt;function r(a){var l;return o[a]=!0,w.each(e[a]||[],(function(e,a){var c=a(t,n,i);return"string"!=typeof c||s||o[c]?s?!(l=c):void 0:(t.dataTypes.unshift(c),r(c),!1)})),l}return r(t.dataTypes[0])||!o["*"]&&r("*")}function Rt(e,t){var n,i,o=w.ajaxSettings.flatOptions||{};for(n in t)void 0!==t[n]&&((o[n]?e:i||(i={}))[n]=t[n]);return i&&w.extend(!0,e,i),e}Ht.href=_t.href,w.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:_t.href,type:"GET",isLocal:/^(?:about|app|app-storage|.+-extension|file|res|widget):$/.test(_t.protocol),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":Pt,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":JSON.parse,"text xml":w.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(e,t){return t?Rt(Rt(e,w.ajaxSettings),t):Rt(w.ajaxSettings,e)},ajaxPrefilter:jt($t),ajaxTransport:jt(zt),ajax:function(t,n){"object"==typeof t&&(n=t,t=void 0),n=n||{};var i,o,s,r,a,l,c,u,d,h,f=w.ajaxSetup({},n),p=f.context||f,m=f.context&&(p.nodeType||p.jquery)?w(p):w.event,v=w.Deferred(),y=w.Callbacks("once memory"),b=f.statusCode||{},_={},x={},E="canceled",C={readyState:0,getResponseHeader:function(e){var t;if(c){if(!r)for(r={};t=Dt.exec(s);)r[t[1].toLowerCase()+" "]=(r[t[1].toLowerCase()+" "]||[]).concat(t[2]);t=r[e.toLowerCase()+" "]}return null==t?null:t.join(", ")},getAllResponseHeaders:function(){return c?s:null},setRequestHeader:function(e,t){return null==c&&(e=x[e.toLowerCase()]=x[e.toLowerCase()]||e,_[e]=t),this},overrideMimeType:function(e){return null==c&&(f.mimeType=e),this},statusCode:function(e){var t;if(e)if(c)C.always(e[C.status]);else for(t in e)b[t]=[b[t],e[t]];return this},abort:function(e){var t=e||E;return i&&i.abort(t),S(0,t),this}};if(v.promise(C),f.url=((t||f.url||_t.href)+"").replace(Lt,_t.protocol+"//"),f.type=n.method||n.type||f.method||f.type,f.dataTypes=(f.dataType||"*").toLowerCase().match(z)||[""],null==f.crossDomain){l=g.createElement("a");try{l.href=f.url,l.href=l.href,f.crossDomain=Ht.protocol+"//"+Ht.host!=l.protocol+"//"+l.host}catch(e){f.crossDomain=!0}}if(f.data&&f.processData&&"string"!=typeof f.data&&(f.data=w.param(f.data,f.traditional)),Mt($t,f,n,C),c)return C;for(d in(u=w.event&&f.global)&&0==w.active++&&w.event.trigger("ajaxStart"),f.type=f.type.toUpperCase(),f.hasContent=!Nt.test(f.type),o=f.url.replace(At,""),f.hasContent?f.data&&f.processData&&0===(f.contentType||"").indexOf("application/x-www-form-urlencoded")&&(f.data=f.data.replace(It,"+")):(h=f.url.slice(o.length),f.data&&(f.processData||"string"==typeof f.data)&&(o+=(xt.test(o)?"&":"?")+f.data,delete f.data),!1===f.cache&&(o=o.replace(Ot,"$1"),h=(xt.test(o)?"&":"?")+"_="+wt.guid+++h),f.url=o+h),f.ifModified&&(w.lastModified[o]&&C.setRequestHeader("If-Modified-Since",w.lastModified[o]),w.etag[o]&&C.setRequestHeader("If-None-Match",w.etag[o])),(f.data&&f.hasContent&&!1!==f.contentType||n.contentType)&&C.setRequestHeader("Content-Type",f.contentType),C.setRequestHeader("Accept",f.dataTypes[0]&&f.accepts[f.dataTypes[0]]?f.accepts[f.dataTypes[0]]+("*"!==f.dataTypes[0]?", "+Pt+"; q=0.01":""):f.accepts["*"]),f.headers)C.setRequestHeader(d,f.headers[d]);if(f.beforeSend&&(!1===f.beforeSend.call(p,C,f)||c))return C.abort();if(E="abort",y.add(f.complete),C.done(f.success),C.fail(f.error),i=Mt(zt,f,n,C)){if(C.readyState=1,u&&m.trigger("ajaxSend",[C,f]),c)return C;f.async&&f.timeout>0&&(a=e.setTimeout((function(){C.abort("timeout")}),f.timeout));try{c=!1,i.send(_,S)}catch(e){if(c)throw e;S(-1,e)}}else S(-1,"No Transport");function S(t,n,r,l){var d,h,g,_,x,E=n;c||(c=!0,a&&e.clearTimeout(a),i=void 0,s=l||"",C.readyState=t>0?4:0,d=t>=200&&t<300||304===t,r&&(_=function(e,t,n){for(var i,o,s,r,a=e.contents,l=e.dataTypes;"*"===l[0];)l.shift(),void 0===i&&(i=e.mimeType||t.getResponseHeader("Content-Type"));if(i)for(o in a)if(a[o]&&a[o].test(i)){l.unshift(o);break}if(l[0]in n)s=l[0];else{for(o in n){if(!l[0]||e.converters[o+" "+l[0]]){s=o;break}r||(r=o)}s=s||r}if(s)return s!==l[0]&&l.unshift(s),n[s]}(f,C,r)),!d&&w.inArray("script",f.dataTypes)>-1&&w.inArray("json",f.dataTypes)<0&&(f.converters["text script"]=function(){}),_=function(e,t,n,i){var o,s,r,a,l,c={},u=e.dataTypes.slice();if(u[1])for(r in e.converters)c[r.toLowerCase()]=e.converters[r];for(s=u.shift();s;)if(e.responseFields[s]&&(n[e.responseFields[s]]=t),!l&&i&&e.dataFilter&&(t=e.dataFilter(t,e.dataType)),l=s,s=u.shift())if("*"===s)s=l;else if("*"!==l&&l!==s){if(!(r=c[l+" "+s]||c["* "+s]))for(o in c)if((a=o.split(" "))[1]===s&&(r=c[l+" "+a[0]]||c["* "+a[0]])){!0===r?r=c[o]:!0!==c[o]&&(s=a[0],u.unshift(a[1]));break}if(!0!==r)if(r&&e.throws)t=r(t);else try{t=r(t)}catch(e){return{state:"parsererror",error:r?e:"No conversion from "+l+" to "+s}}}return{state:"success",data:t}}(f,_,C,d),d?(f.ifModified&&((x=C.getResponseHeader("Last-Modified"))&&(w.lastModified[o]=x),(x=C.getResponseHeader("etag"))&&(w.etag[o]=x)),204===t||"HEAD"===f.type?E="nocontent":304===t?E="notmodified":(E=_.state,h=_.data,d=!(g=_.error))):(g=E,!t&&E||(E="error",t<0&&(t=0))),C.status=t,C.statusText=(n||E)+"",d?v.resolveWith(p,[h,E,C]):v.rejectWith(p,[C,E,g]),C.statusCode(b),b=void 0,u&&m.trigger(d?"ajaxSuccess":"ajaxError",[C,f,d?h:g]),y.fireWith(p,[C,E]),u&&(m.trigger("ajaxComplete",[C,f]),--w.active||w.event.trigger("ajaxStop")))}return C},getJSON:function(e,t,n){return w.get(e,t,n,"json")},getScript:function(e,t){return w.get(e,void 0,t,"script")}}),w.each(["get","post"],(function(e,t){w[t]=function(e,n,i,o){return p(n)&&(o=o||i,i=n,n=void 0),w.ajax(w.extend({url:e,type:t,dataType:o,data:n,success:i},w.isPlainObject(e)&&e))}})),w.ajaxPrefilter((function(e){var t;for(t in e.headers)"content-type"===t.toLowerCase()&&(e.contentType=e.headers[t]||"")})),w._evalUrl=function(e,t,n){return w.ajax({url:e,type:"GET",dataType:"script",cache:!0,async:!1,global:!1,converters:{"text script":function(){}},dataFilter:function(e){w.globalEval(e,t,n)}})},w.fn.extend({wrapAll:function(e){var t;return this[0]&&(p(e)&&(e=e.call(this[0])),t=w(e,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&t.insertBefore(this[0]),t.map((function(){for(var e=this;e.firstElementChild;)e=e.firstElementChild;return e})).append(this)),this},wrapInner:function(e){return p(e)?this.each((function(t){w(this).wrapInner(e.call(this,t))})):this.each((function(){var t=w(this),n=t.contents();n.length?n.wrapAll(e):t.append(e)}))},wrap:function(e){var t=p(e);return this.each((function(n){w(this).wrapAll(t?e.call(this,n):e)}))},unwrap:function(e){return this.parent(e).not("body").each((function(){w(this).replaceWith(this.childNodes)})),this}}),w.expr.pseudos.hidden=function(e){return!w.expr.pseudos.visible(e)},w.expr.pseudos.visible=function(e){return!!(e.offsetWidth||e.offsetHeight||e.getClientRects().length)},w.ajaxSettings.xhr=function(){try{return new e.XMLHttpRequest}catch(e){}};var Ft={0:200,1223:204},Bt=w.ajaxSettings.xhr();f.cors=!!Bt&&"withCredentials"in Bt,f.ajax=Bt=!!Bt,w.ajaxTransport((function(t){var n,i;if(f.cors||Bt&&!t.crossDomain)return{send:function(o,s){var r,a=t.xhr();if(a.open(t.type,t.url,t.async,t.username,t.password),t.xhrFields)for(r in t.xhrFields)a[r]=t.xhrFields[r];for(r in t.mimeType&&a.overrideMimeType&&a.overrideMimeType(t.mimeType),t.crossDomain||o["X-Requested-With"]||(o["X-Requested-With"]="XMLHttpRequest"),o)a.setRequestHeader(r,o[r]);n=function(e){return function(){n&&(n=i=a.onload=a.onerror=a.onabort=a.ontimeout=a.onreadystatechange=null,"abort"===e?a.abort():"error"===e?"number"!=typeof a.status?s(0,"error"):s(a.status,a.statusText):s(Ft[a.status]||a.status,a.statusText,"text"!==(a.responseType||"text")||"string"!=typeof a.responseText?{binary:a.response}:{text:a.responseText},a.getAllResponseHeaders()))}},a.onload=n(),i=a.onerror=a.ontimeout=n("error"),void 0!==a.onabort?a.onabort=i:a.onreadystatechange=function(){4===a.readyState&&e.setTimeout((function(){n&&i()}))},n=n("abort");try{a.send(t.hasContent&&t.data||null)}catch(e){if(n)throw e}},abort:function(){n&&n()}}})),w.ajaxPrefilter((function(e){e.crossDomain&&(e.contents.script=!1)})),w.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/\b(?:java|ecma)script\b/},converters:{"text script":function(e){return w.globalEval(e),e}}}),w.ajaxPrefilter("script",(function(e){void 0===e.cache&&(e.cache=!1),e.crossDomain&&(e.type="GET")})),w.ajaxTransport("script",(function(e){var t,n;if(e.crossDomain||e.scriptAttrs)return{send:function(i,o){t=w(" + + + + + + + diff --git a/docs/ktools/index.html b/docs/ktools/index.html new file mode 100644 index 00000000000..1a23db79525 --- /dev/null +++ b/docs/ktools/index.html @@ -0,0 +1,734 @@ + + + + + + + + + + + + + + +K Tools | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

K Tools

+

Here we document how to use some of the most commonly used K tools.

+

Minimizing Output

+

When one is working with kore-repl or the prover in general and looking at +specific configurations using config, sometimes the configurations can be huge.

+

One tool to help print configuration compactly is the pyk print utility:

+
sh
pyk print +
+

We are going to use --minimize option (which is actually used automatically +when printing with pyk). This will filter out many uninteresting cells for the +current config and make the result more compact.

+

Then, when invoking the prover, you can minimize your output by piping it into +the pyk print ... facility with arguments for controlling the output:

+
sh
kprove --output json --definition DEFN ... \ + | jq .term \ + | pyk print DEFN /dev/stdin --omit-labels ... --keep-labels ... +
+

You can also use this in the kore-repl more easily, by making a help script. +In your current directory, save a new script pykprint.sh:

+
sh
#!/bin/bash + +kast --input kore --output json --definition $1 /dev/stdin \ + | jq .term \ + | pyk print $1 /dev/stdin --omit-labels $2 +
+

Now call config | bash pykprint.sh DEFN in Kore REPL to make the output +smaller.

+

The options you have to control the output are as follows:

+
    +
  • --no-minimize: do not remove uninteresting cells.
  • +
  • --omit-cells: remove the selected cells from the output.
  • +
  • --keep-cells: keep only the selected cells in the output.
  • +
+

Note: Make sure that there is no whitespace around , in the omit list, +otherwise you'll get an error (, is a list separator, so this +requirement is strict).

+

Debugging

+

The LLVM Backend has support for integration with GDB. You can run the debugger +on a particular program by passing the --debugger flag to krun, or by +invoking the llvm backend interpreter directly. Below we provide a simple +tutorial to explain some of the basic commands supported by the LLVM backend.

+

LLDB Support

+

GDB is not well-supported on macOS, particularly on newer OS versions and Apple +Silicon ARM hardware. Consequently, if the --debugger option is passed to krun +on macOS, LLDB[^1] is launched instead of GDB. However, the K-specific debugger +scripts that GDB uses have not been ported to LLDB yet, and so the instructions +in the rest of this section will not work.

+

The K Definition

+

Here is a sample K definition we will use to demonstrate debugging +capabilities:

+
k
module TEST + imports INT + + configuration <k> foo(5) </k> + rule [test]: I:Int => I +Int 1 requires I <Int 10 + + syntax Int ::= foo(Int) [function] + rule foo(I) => 0 -Int I + +endmodule +
+

You should compile this definition with --backend llvm --enable-llvm-debug to +use the debugger most effectively.

+

Stepping

+

Important: When you first run krun with option --debugger, GDB / LLDB +will instruct you on how to modify ~/.gdbinit or ~/.lldbinit to enable +printing abstract syntax of K terms in the debugger. If you do not perform this +step, you can still use all the other features, but K terms will be printed as +their raw address in memory.

+

GDB will need the kompiled interpreter in its safe path in order to access the +pretty printing python script within it. A good way to do this would be to pick +a minimum top-level path that covers all of your kompiled semantics (ie. set auto-load safe-path ~/k-semantics). LLDB has slightly different security +policies that do not require fully-arbitrary code execution.

+

This section uses GDB syntax to demonstrate the debugging features. Please +refer to the GDB to LLDB command map on +macOS.

+

You can break before every step of execution is taken by setting a breakpoint +on the k_step function.

+
(gdb) break definition.kore:k_step
+Breakpoint 1 at 0x25e340
+(gdb) run
+Breakpoint 1, 0x000000000025e340 in step (subject=`<generatedTop>{}`(`<k>{}`(`kseq{}`(`inj{Int{}, KItem{}}`(#token("0", "Int")),dotk{}(.KList))),`<generatedCounter>{}`(#token("0", "Int"))))
+(gdb) continue
+Continuing.
+
+Breakpoint 1, 0x000000000025e340 in step (subject=`<generatedTop>{}`(`<k>{}`(`kseq{}`(`inj{Int{}, KItem{}}`(#token("1", "Int")),dotk{}(.KList))),`<generatedCounter>{}`(#token("0", "Int"))))
+(gdb) continue 2
+Will ignore next crossing of breakpoint 1.  Continuing.
+
+Breakpoint 1, 0x000000000025e340 in step (subject=`<generatedTop>{}`(`<k>{}`(`kseq{}`(`inj{Int{}, KItem{}}`(#token("3", "Int")),dotk{}(.KList))),`<generatedCounter>{}`(#token("0", "Int"))))
+(gdb)
+
+

Breaking on a specific rule

+

You can break when a rule is applied by giving the rule a rule label. If the +module name is TEST and the rule label is test, you can break when the rule +applies by setting a breakpoint on the TEST.test.rhs function:

+
(gdb) break TEST.test.rhs
+Breakpoint 1 at 0x25e250: file /home/dwightguth/test/./test.k, line 4.
+(gdb) run
+Breakpoint 1, TEST.test.rhs (VarDotVar0=`<generatedCounter>{}`(#token("0", "Int")), VarDotVar1=dotk{}(.KList), VarI=#token("0", "Int")) at /home/dwightguth/test/./test.k:4
+4         rule [test]: I:Int => I +Int 1 requires I <Int 10
+(gdb)
+
+

Note that the substitution associated with that rule is visible in the +description of the frame.

+

You can also break when a side condition is applied using the TEST.test.sc +function:

+
(gdb) break TEST.test.sc
+Breakpoint 1 at 0x25e230: file /home/dwightguth/test/./test.k, line 4.
+(gdb) run
+Breakpoint 1, TEST.test.sc (VarI=#token("0", "Int")) at /home/dwightguth/test/./test.k:4
+4         rule [test]: I:Int => I +Int 1 requires I <Int 10
+(gdb)
+
+

Note that every variable used in the side condition can have its value +inspected when stopped at this breakpoint, but other variables are not visible.

+

You can also break on a rule by its location:

+
(gdb) break test.k:4
+Breakpoint 1 at 0x25e230: test.k:4. (2 locations)
+(gdb) run
+Breakpoint 1, TEST.test.sc (VarI=#token("0", "Int")) at /home/dwightguth/test/./test.k:4
+4         rule [test]: I:Int => I +Int 1 requires I <Int 10
+(gdb) continue
+Continuing.
+
+Breakpoint 1, TEST.test.rhs (VarDotVar0=`<generatedCounter>{}`(#token("0", "Int")), VarDotVar1=dotk{}(.KList), VarI=#token("0", "Int")) at /home/dwightguth/test/./test.k:4
+4         rule [test]: I:Int => I +Int 1 requires I <Int 10
+(gdb) continue
+Continuing.
+
+Breakpoint 1, TEST.test.sc (VarI=#token("1", "Int")) at /home/dwightguth/test/./test.k:4
+4         rule [test]: I:Int => I +Int 1 requires I <Int 10
+(gdb)
+
+

Note that this sets a breakpoint at two locations: one on the side condition +and one on the right hand side. If the rule had no side condition, the first +would not be set. You can also view the locations of the breakpoints and +disable them individually:

+
(gdb) info breakpoint
+Num     Type           Disp Enb Address            What
+1       breakpoint     keep y   <MULTIPLE>
+        breakpoint already hit 3 times
+1.1                         y     0x000000000025e230 in TEST.test.sc at /home/dwightguth/test/./test.k:4
+1.2                         y     0x000000000025e250 in TEST.test.rhs at /home/dwightguth/test/./test.k:4
+(gdb) disable 1.1
+(gdb) continue
+Continuing.
+
+Breakpoint 1, TEST.test.rhs (VarDotVar0=`<generatedCounter>{}`(#token("0", "Int")), VarDotVar1=dotk{}(.KList), VarI=#token("1", "Int")) at /home/dwightguth/test/./test.k:4
+4         rule [test]: I:Int => I +Int 1 requires I <Int 10
+(gdb) continue
+Continuing.
+
+Breakpoint 1, TEST.test.rhs (VarDotVar0=`<generatedCounter>{}`(#token("0", "Int")), VarDotVar1=dotk{}(.KList), VarI=#token("2", "Int")) at /home/dwightguth/test/./test.k:4
+4         rule [test]: I:Int => I +Int 1 requires I <Int 10
+(gdb)
+
+

Now only the breakpoint when the rule applies is enabled.

+

Breaking on a function

+

You can also break when a particular function in your semantics is invoked:

+
(gdb) info functions foo
+All functions matching regular expression "foo":
+
+File /home/dwightguth/test/./test.k:
+struct __mpz_struct *Lblfoo'LParUndsRParUnds'TEST'UndsUnds'Int(struct __mpz_struct *);
+(gdb) break Lblfoo'LParUndsRParUnds'TEST'UndsUnds'Int
+Breakpoint 1 at 0x25e640: file /home/dwightguth/test/./test.k, line 6.
+(gdb) run
+Breakpoint 1, Lblfoo'LParUndsRParUnds'TEST'UndsUnds'Int (_1=#token("1", "Int")) at /home/dwightguth/test/./test.k:6
+6         syntax Int ::= foo(Int) [function]
+(gdb)
+
+

In this case, the variables have numbers instead of names because the names of +arguments in functions in K come from rules, and we are stopped before any +specific rule has applied. For example, _1 is the first argument to the +function.

+

You can also set a breakpoint in this location by setting it on the line +associated with its production:

+
(gdb) break test.k:6
+Breakpoint 1 at 0x25e640: file /home/dwightguth/test/./test.k, line 6.
+(gdb) run
+Breakpoint 1, Lblfoo'LParUndsRParUnds'TEST'UndsUnds'Int (_1=#token("1", "Int")) at /home/dwightguth/test/./test.k:6
+6         syntax Int ::= foo(Int) [function]
+
+

These two syntaxes are equivalent; use whichever is easier for you.

+

You can also view the stack of function applications:

+
(gdb) bt
+#0  Lblfoo'LParUndsRParUnds'TEST'UndsUnds'Int (_1=#token("1", "Int")) at /home/dwightguth/test/./test.k:6
+#1  0x000000000025e5f8 in apply_rule_111 (VarDotVar0=`<generatedCounter>{}`(#token("0", "Int")), VarDotVar1=dotk{}(.KList)) at /home/dwightguth/test/./test.k:9
+#2  0x0000000000268a52 in take_steps ()
+#3  0x000000000026b7b4 in main ()
+(gdb)
+
+

Here we see that foo was invoked while applying the rule on line 9 of test.k, +and we also can see the substitution of that rule. If foo was evaluated while +evaluating another function, we would also be able to see the arguments of that +function as well, unless the function was tail recursive, in which case no +stack frame would exist once the tail call was performed.

+

Breaking on a set of rules or functions

+

Using rbreak <regex> you can set breakpoints on multiple functions.

+
    +
  • +

    rbreak Lbl - sets a breakpoint on all non hooked functions

    +
  • +
  • +

    rbreak Lbl.*TEST - sets a breakpoint on all functions from module TEST

    +
  • +
  • +

    rbreak hook_INT - sets a breakpoint on all hooks from module INT

    +
  • +
+

Other debugger issues

+
    +
  • <optimized out> try kompiling without -O1, -O2, or -O3.
  • +
  • (gdb) break definition.kore:break -> No source file named definition.kore. +send --enable-llvm-debug to kompile in order to generate debug info symbols.
  • +
+

Profiling your K semantics

+

The first thing to be aware of is in order to get meaningful data, +you need to build the semantics and all of its dependencies with +optimizations enabled but without the frame pointer elimination +optimization. For example, for EVM, this means rebuilding GMP, MPFR, +JEMalloc, Crypto++, SECP256K1, etc with the following exports.

+
sh
export CFLAGS="-DNDEBUG -O2 -fno-omit-frame-pointer" +export CXXFLAGS="-DNDEBUG -O2 -fno-omit-frame-pointer" +
+

You can skip this step, but if you do, any samples within these +libraries will not have correct stack trace information, which means +you will likely not get a meaningful set of data that will tell you +where the majority of time is really being spent. Don't worry about +rebuilding literally every single dependency though. Just focus on the +ones that you expect to take a non-negligible amount of runtime. You +will be able to tell if you haven't done enough later, and you can go +back and rebuild more. Once this is done, you then build K with +optimizations and debug info enabled, like so:

+
sh
mvn package -Dproject.build.type="FastBuild" +
+

Next, you build the semantics with optimizations and debug info +enabled (i.e., kompile -ccopt -O2 --iterated -ccopt -fno-omit-frame-pointer).

+

Once all this is done, you should be ready to profile your +application. Essentially, you should run whatever test suite you +usually run, but with perf record -g -- prefixed to the front. For +example, for KEVM it's the following command. (For best data, don't +run this step in parallel.)

+
sh
perf record -g -- make test-conformance +
+

Finally, you want to filter out just the samples that landed within +the llvm backend and view the report. For this, you need to know the +name of the binary that was generated by your build system. Normally +it is interpreter, but e.g. if you are building the web3 client for +kevm, it would be kevm-client. You will want to run the following +command.

+
sh
perf report -g -c $binary_name +
+

If all goes well, you should see a breakdown of where CPU time has +been spent executing the application. You will know that sufficient +time was spent rebuilding dependencies with the correct flags when the +total time reported by the main method is close to 100%. If it's not +close to 100%, this is probably because a decent amount of self time +was reported in stack traces that were not built with frame pointers +enabled, meaning that perf was unable to walk the stack. You will have +to go back, rebuild the appropriate libraries, and then record your +trace again.

+

Your ultimate goal is to identify the hotspots that take the most +time, and make them execute faster. Entries like step and +step_1234 like functions refer to the cost of matching. An entry +like side_condition_1234 is a side condition and apply_rule_1234 +is constructing the rhs of a rule. You can convert from this rule +ordinal to a location using the llvm-kompile-compute-loc script in +the bin folder of the llvm backend repo. For example,

+
sh
llvm-kompile-compute-loc 5868 evm-semantics/.build/defn/llvm/driver-kompiled +
+

spits out the following text.

+
Line: 18529
+/home/dwightguth/evm-semantics/./.build/defn/llvm/driver.k:493:10
+
+

This is the line of definition.kore that the axiom appears on as +well as the original location of the rule in the K semantics. You can +use this information to figure out which rules and functions are +causing the most time and optimize them to be more efficient.

+

Running tests - kserver

+

The kserver is a front-end tool based on Nailgun +which helps to reduce the startup time of the JVM. Calling kserver in a terminal +window will wait for all kompile/kprove calls and force them to run in the same process +and share the same threads. This also reduces the thread contention significantly. kompile +uses all the threads available to do rule parsing. Another benefit is that it saves caches, +and each time you call kprove/kast, you can access those directly w/o extra disk usage. +Running the regression-new integration tests on a powerful machine (32 threads) takes 8m, +with the kserver active it takes 2m. You can start the kserver in two ways.

+
    +
  • blocking: call kserver in the command line. Close it after you are done testing. Useful for quick testing.
  • +
  • non-blocking: call spawn-kserver <log.flie> and close it with stop-kserver - this is used for automation on CI
  • +
+

Because we reuse caches, you should stop and restart the server between runs. +The Nailgun implementation hasn't been updated in the last 3-5 years, and it's not compatible with Java 18 onwards.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + diff --git a/docs/user_manual/index.html b/docs/user_manual/index.html new file mode 100644 index 00000000000..37b887a353a --- /dev/null +++ b/docs/user_manual/index.html @@ -0,0 +1,4087 @@ + + + + + + + + + + + + + + +K User Manual | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

K User Manual

+

NOTE: The K User Manual is still under construction; some features of K +may have partial or missing documentation.

+

Introduction

+

Why K?

+

The K Framework is a programming language and system design toolkit made for +practioners and researchers alike.

+

K For Practioners: +K is a framework for deriving programming languages tools from their semantic +specifications.

+

Typically, programming language tool development follows a similar pattern. +After a new programming language is designed, separate teams will develop +separate language tools (e.g. a compiler, interpreter, parser, symbolic +execution engine, etc). Code reuse is uncommon. The end result is that for each +new language, the same basic tools and patterns are re-implemented again and +again.

+

K approaches the problem differently -- it generates each of these tools from a single language specification. +The work of programming language design and tool implementation are made separate concerns. +The end result is that the exercise of +designing new languages and their associated tooling is now reduced to +developing a single language specification from which we derive our tooling for +free.

+

K For Researchers: +K is a configuration- and rewrite-based executable semantic framework.

+

In more detail, K specifications are:

+
    +
  1. Executable: compile into runnable and testable programs;
  2. +
  3. Semantic: correspond to a logical theory with a sound and relatively +complete proof system;
  4. +
  5. Configuration-based: organize system states into compositional, +hierarchical, labelled units called cells;
  6. +
  7. Rewrite-based: define system transitions using rewrite rules.
  8. +
+

K specifications are compiled into particular matching logic theories, giving +them a simple and expressive semantics. K semantic rules are implicitly defined +over the entire configuration structure, but omit unused cells, enabling a +highly modular definitional style. Furthermore, K has been used to develop +programming languages, type systems, and formal analysis tools.

+

Manual Objectives

+

As mentioned in the Why K? section above, the K Framework is designed as a +collection of language-generic command-line interface (CLI) tools which revolve +around K specifications. These tools cover a broad range of uses, but they +typically fall into one of the following categories:

+
    +
  1. Transforming K Specs (e.g. compilation)
  2. +
  3. Running K Specs (e.g. concrete and symbolic execution)
  4. +
  5. Analyzing K Specs (e.g. theorem proving)
  6. +
+

The main user-facing K tools include:

+
    +
  • kompile - the K compiler driver
  • +
  • kparse - the stanadlone K parser and abstract syntax tree (AST) +transformation tool
  • +
  • krun - the K interpreter and symbolic execution engine driver
  • +
  • kprove - the K theorem prover
  • +
+

This user manual is designed to be a tool reference. +In particular, it is not desgined to be a tutorial on how to write K +specifications or to teach the logical foundations of K. New K users should +consult our dedicated +K tutorial, +or the more language-design oriented +PL tutorial. +Researchers seeking to learn more about the logic underlying K are encouraged +to peruse the +growing literature on K and matching logic. +We will consider the manual complete when it provides a complete description of +all user-facing K tools and features.

+

Introduction to K

+

Since K specifications are the primary input into the entire system, let us +take a moment to describe them. At the highest level, K specifications describe +a programming language or system using three different pieces:

+
    +
  1. the system primitives, the base datatypes used during system operation, +e.g., numbers, lists, maps, etc;
  2. +
  3. the system state, a tuple or record over system primitives which gives a +complete snapshot of the system at any given moment;
  4. +
  5. the system behavior, a set of rules which defines possible system +evolutions.
  6. +
+

K specifications are then defined by a collection of sentences which +correspond to the three concepts above:

+
    +
  1. syntax declarations encode the system primitives;
  2. +
  3. configuration declarations encode the system state;
  4. +
  5. context and rule declarations encode the system behavior.
  6. +
+

K sentences are then organized into one or modules which are stored in one or +more files. In this scheme, files may require other files and modules may +import other modules, giving rise to a hierarchy of files and modules. We +give an intuitive sketch of the two levels of grouping in the diagram below:

+
   example.k file
+  +=======================+
+  | requires ".." --------|--> File_1
+  | ...                   |
+  | requires ".." --------|--> File_N
+  |                       |
+  |  +-----------------+  |
+  |  | module ..       |  |
+  |  |   imports .. ---|--|--> Module_1
+  |  |   ...           |  |
+  |  |   imports .. ---|--|--> Module_M
+  |  |                 |  |
+  |  |   sentence_1    |  |
+  |  |   ...           |  |
+  |  |   sentence_K    |  |
+  |  | endmodule       |  |
+  |  +-----------------+  |
+  |                       |
+  +=======================+
+
+

where:

+
    +
  • files and modules are denoted by double-bordered and single-borded boxes +respectively;
  • +
  • file or module identifiers are denoted by double dots (..);
  • +
  • potential repititions are denoted by triple dots (...).
  • +
+

In the end, we require that the file and module hierarchies both form a +directed acyclic graph (DAG). This is, no file may recursively require itself, +and likewise, no module may recursively import itself.

+

We now zoom in further to discuss the various kinds of sentences contained in K +specifications:

+
    +
  1. +

    sentences that define our system's primitives, including:

    +
      +
    • sort declarations: define new categories of primitive datatypes
    • +
    • Backus-Naur Form (BNF) grammar declarations: define the +operators that inhabit our primitive datatypes
    • +
    • lexical syntax declarations: define lexemes/tokens for the +lexer/tokenizer
    • +
    • syntax associativity declarations: specify the +associativity/grouping of our declared operators
    • +
    • syntax priority declarations: specify the priority of +potential ambiguous operators
    • +
    +
  2. +
  3. +

    sentences that define our system's state, including:

    +
      +
    • configuration declarations: define labelled, hierarchical records +using an nested XML-like syntax
    • +
    +
  4. +
  5. +

    sentences that define our system's behavior, including:

    +
      +
    • context declarations: describe how primitives and configurations +can simplify
    • +
    • context alias declarations: define templates that can generate new +contexts
    • +
    • rule declarations: define how the system transitions from one state +to the next
    • +
    +
  6. +
+

K Process Overview

+

We now examine how the K tools are generally used. The main input to all of the +K tools is a K specification. For effieciency reasons, this specification is +first compiled into an intermediate representation called Kore. Once we have +obtained this intermediate representation, we can use it to do:

+
    +
  1. parsing/pretty-printing, i.e., converting a K term, whose syntax is defined +by a K specification, into a alternate representation
  2. +
  3. concrete and abstract execution of a K specification
  4. +
  5. theorem proving, i.e., verifying whether a set of claims about a K +specification hold
  6. +
+

We represent the overall process using the graphic below:

+
 K Compilation Process
++============================================================+
+|                     +---------+                            |
+|  K Specification ---| kompile |--> Kore Specification --+  |
+|                     +---------+                         |  |
++=========================================================|==+
+                                                          |
+ K Execution Process                                      |
++=========================================================|==+
+|                                                         |  |
+|             +-------------------------------------------+  |
+|             |                                              |
+|             |       +---------+                            |
+|  K Term ----+-------| kparse  |--> K Term                  |
+|             |       +---------+                            |
+|             |                                              |
+|             |       +---------+                            |
+|  K Term ----+-------|  krun   |--> K Term                  |
+|             |       +---------+                            |
+|             |                                              |
+|             |       +---------+                            |
+|  K Claims --+-------| kprove  |--> K Claims                |
+|                     +---------+                            |
+|                                                            |
++============================================================+
+
+

where:

+
    +
  • process outlines are denoted by boxes with double-lined borders
  • +
  • executables are denoted by boxes with single-lined borders
  • +
  • inputs and outputs are denoted by words attached to lines
  • +
  • K terms typically correspond to programs defined in a particular +language's syntax (which are either parsed using kparse or executed using +krun)
  • +
  • K claims are a notation for describing how certain K programs should +execute (which are checked by our theorem prover kprove)
  • +
+

K Compilation Process: +Let us start with a description of the compilation process. According to the +above diagram, the compiler driver is called kompile. For our purposes, it is +enough to view the K compilation process as a black box that transforms a K +specification into a lower-level Kore specification that encodes the same +information, but that is easier to work with programmatically.

+

K Execution Process: +We now turn our attention to the K execution process. Abstractly, we can divide +the K execution process into the following stages:

+
    +
  1. the kore specification is loaded (which defines a lexer, parser, and +unparser among other things)
  2. +
  3. the input string is lexed into a token stream
  4. +
  5. the token stream is parsed into K terms/claims
  6. +
  7. the K term/claims are transformed according the K tool being used (e.g. +kparse, krun, or kprove)
  8. +
  9. the K term/claims are unparsed into a string form and printed
  10. +
+

Note that all of the above steps performed in K execution process are fully +prescribed by the input K specification. Of course, there are entire languages +devoted to encoding these various stages proces individually, e.g., flex for +lexers, bison for parsers, etc. What K offers is a consistent language to +package the above concepts in a way that we believe is convenient and practical +for a wide range of uses.

+

Module Declaration

+

K modules are declared at the top level of a K file. They begin with the +module keyword and are followed by a module ID and an optional set of +attributes. They continue with zero or more imports and zero or more sentences +until the endmodule keyword is reached.

+

A module ID consists of an optional # at the beginning, followed by one or +more components separated by hyphens. Each component can contain letters, +numbers, or underscores.

+

After the module ID, attributes can be specified in square brackets. See below +for an (incomplete) list of allowed module attributes.

+

Following the attributes, a module can contain zero or more imports. An +import consists of the import or imports keywords followed by a module ID. +An import tells the compiler that this module should contain all the sentences +(recursively) contained by the module being imported.

+

Imports can be public or private. By default, they are public, which +means that all the imported syntax can be used by any module that imports the +module doing the import. However, you can explicitly override the visibility +of the import with the public or private keyword immediately prior to the +module name. A module imported privately does not export its syntax to modules +that import the module doing the import.

+

Following imports, a module can contain zero or more sentences. A sentence can +be a syntax declaration, a rule, a configuration declaration, a context, a +claim, or a context alias. Details on each of these can be found in subsequent +sections.

+

private attribute

+

If the module is given the private attribute, all of its imports and syntax +are private by default. Individual pieces of syntax can be made public with +the public attribute, and individual imports can be made public with the +public keyword. See relevant sections on syntax and modules for more details +on what it means for syntax and imports to be public or private.

+

symbolic and concrete attribute

+

These attributes may be placed on modules to indicate that they should only +be used by the Haskell and LLVM backends respectively. If the definition is +compiled on the opposite backend, they are implicitly removed from the +definition prior to parsing anywhere they are imported. This can be useful when +used in limited capacity in order to provide alternate semantics for certain +features on different backends. It should be used sparingly as it makes it more +difficult to trust the correctness of your semantics, even in the presence of +testing.

+

Syntax Declaration

+

Named Non-Terminals

+

We have added a syntax to Productions which allows non-terminals to be given a +name in productions. This significantly improves the ability to document K, by +providing a way to explicitly explain what a field in a production corresponds +to instead of having to infer it from a comment or from the rule body.

+

The syntax is:

+
k
name: Sort +
+

This syntax can be used anywhere in a K definition that expects a non-terminal.

+

symbol(_) attribute

+

By default, when compiling a definition, K generates a unique "mangled" label +identifier for each syntactic production. These identifiers can be used to +reference productions externally, for example when constructing terms by hand +or programmatically via Pyk.

+

The symbol(_) attribute can be applied to a production to control the precise +identifier for a production that appears in a compiled definition. For example:

+
k
module SYMBOLS + syntax Foo ::= foo() [symbol(foo)] + | bar() +endmodule +
+

Here, the compiled definition will contain the following symbol declarations:

+
  symbol Lblfoo{}() ...
+  symbol Lblbar'LParRParUnds'SYMBOLS'Unds'Foo{}() ...
+
+

The compiler enforces uniqueness[1] of symbol names specified in +this way; it would be an error to apply symbol(foo) to another production in +the module above. Additionally, symbol(_) with an argument may not co-occur +with the klabel(_) attribute (see below).

+

overload attribute

+

K supports subsort overloading[2] on symbols, whereby a +constructor can have a more specific sort for certain arguments. For example, +consider the following productions derived from a C-like language semantics:

+
k
syntax Exp ::= LVal + | Exp "." Id +syntax LVal ::= LVal "." Id +
+

Here, it is useful for the result of the dot operator to be an LVal if the +left-hand side is itself an LVal. However, there is an issue with the code +as written: if L() is a term of sort LVal, then the program L() . x has a +parsing ambiguity between the two productions for the dot operator. To resolve +this, we can mark the productions as overloads:

+
k
syntax Exp ::= LVal + | Exp "." Id [overload(_._)] +syntax LVal ::= LVal "." Id [overload(_._)] +
+

Now, the parser will select the most specific overloaded production when it +resolves ambiguities in L() . x (that is, L() . x parses to a term of sort +LVal.

+

Formally, the compiler organises productions into a partial order that defines +the overload relation as follows. We say that P is a more specific overload +of Q if:

+
    +
  • P and Q have the same overload(_) attribute. Note that the argument +supplied has no semantic meaning other than as a key grouping productions +together.
  • +
  • Let S_P be the sort of P, and S_p1 etc. be the sorts of its arguments +(c.f. for Q). The tuple (S_P, S_p1, ..., S_pN) must be elementwise +strictly less than (S_Q, S_q1, ..., S_qN) according to the definition's +subsorting relationship. That is, a term from production P is a restriction +of one from production Q; when its arguments are more precise, we can give +the result a more precise sort.
  • +
+

klabel(_) and symbol attributes

+

Note: the klabel(_), symbol approach described in this section is a legacy +feature that will be removed in the future. New code should use the symbol(_) +and overload(_) attributes to opt into explicit naming and overloading +respectively.

+

References here to "overloading" are explained in the section above; the use +of the klabel(_) attribute without symbol is equivalent to the new +overload(_) syntax.

+

By default K generates for each syntax definition a long and obfuscated klabel +string, which serves as a unique internal identifier and also is used in kast +format of that syntax. If we need to reference a certain syntax production +externally, we have to manually define the klabels using the klabel attribute. +One example of where you would want to do this is to be able to refer to a given +symbol via the syntax priority attribute, or to enable overloading of a +given symbol.

+

If you only provide the klabel attribute, you can use the provided klabel to +refer to that symbol anywhere in the frontend K code. However, the internal +identifier seen by the backend for that symbol will still be the long obfuscated +generated string. Sometimes you want control over the internal identifier used as +well, in which case you use the symbol attribute. This tells the frontend to +use whatever the declared klabel is directly as the internal identifier.

+

For example:

+
k
module MYMODULE + syntax FooBarBaz ::= #Foo( Int, Int ) [klabel(#Foo), symbol] // symbol1 + | #Bar( Int, Int ) [klabel(#Bar)] // symbol2 + | #Baz( Int, Int ) // symbol3 +endmodule +
+

Here, we have that:

+
    +
  • In frontend K, you can refer to "symbol1" as #Foo (from klabel(#Foo)), +and the backend will see 'Hash'Foo as the symbol name.
  • +
  • In frontend K, you can refer to "symbol2" as #Bar (from klabel(#Bar)), +and the backend will see +'Hash'Bar'LParUndsCommUndsRParUnds'MYMODULE'Unds'FooBarBaz'Unds'Int'Unds'Int +as the symbol name.
  • +
  • In frontend K, you can refer to "symbol3" as +#Baz(_,_)_MYMODULE_FooBarBaz_Int_Int (from auto-generated klabel), and +the backend will see +'Hash'Baz'LParUndsCommUndsRParUnds'MYMODULE'Unds'FooBarBaz'Unds'Int'Unds'Int +as the symbol name.
  • +
+

The symbol provided must be unique to this definition. This is enforced by +K. In general, it's recommended to use the symbol attribute whenever you use +klabel unless you explicitly have a reason not to (e.g. you want to overload +symbols, or you're using a deprecated backend). It can be very helpful use the +symbol attribute for debugging, as many debugging messages are printed in +Kast format which will be more readable with the symbol names you explicitly +declare. In addition, if you are programatically manipulating definitions via +the JSON Kast format, building terms using the user-provided pretty +symbol, klabel(...) is easier and less error-prone if the auto-generation +process for klabels changes.

+

Syntactic Lists

+

When using K's support for syntactic lists, a production like:

+
k
syntax Ints ::= List{Int, ","} [symbol(ints)] +
+

will desugar into two productions:

+
k
syntax Ints ::= Int "," Ints [symbol(ints)] +syntax Ints ::= ".Ints" [symbol(List{"ints"})] +
+

Note that the symbol for the terminator of the list has been generated +automatically from the label on the original production. It is possible to +control what the terminator's label is using the terminator-symbol(_) +attribute. For example:

+
k
syntax Ints ::= List{Int, ","} [symbol(ints), terminator-symbol(.ints)] +
+

will desugar into two productions:

+
k
syntax Ints ::= Int "," Ints [symbol(ints)] +syntax Ints ::= ".Ints" [symbol(.ints)] +
+

It is an error to apply terminator-symbol(_) to a non-production sentence, or +to a production that does not declare a syntactic list.

+

Parametric productions and bracket attributes

+

Some syntax productions, like the rewrite operator, the bracket operator, and +the #if #then #else #fi operator, cannot have their precise type system +expressed using only concrete sorts.

+

Prior versions of K solved this issue by using the K sort in this case, but +this introduces inexactness in which poorly typed terms can be created even +without having a cast operator present in the syntax, which is a design +consideration we would prefer to avoid.

+

It also introduces cases where terms cannot be placed in positions where they +ought to be well sorted unless their return sort is made to be KBott, which in +turn vastly complicates the grammar and makes parsing much slower.

+

In order to introduce this, we provide a new syntax for parametric productions +in K. This allows you to express syntax that has a sort signature based on +parametric polymorphism. We do this by means of an optional curly-brace- +enclosed list of parameters prior to the return sort of a production.

+

Some examples:

+
k
syntax {Sort} Sort ::= "(" Sort ")" [bracket] +syntax {Sort} KItem ::= Sort +syntax {Sort} Sort ::= KBott +syntax {Sort} Sort ::= Sort "=>" Sort +syntax {Sort} Sort ::= "#if" Bool "#then" Sort "#else" Sort "#fi" +syntax {Sort1, Sort2} Sort1 ::= "#fun" "(" Sort2 "=>" Sort1 ")" "(" Sort2 ")" +
+

Here we have:

+
    +
  1. Brackets, which can enclose any sort but should be of the same sort that was +enclosed.
  2. +
  3. Every sort is a KItem.
  4. +
  5. A KBott term can appear inside any sort.
  6. +
  7. Rewrites, which can rewrite a value of any sort to a value of the same sort. +Note that this allows the lhs or rhs to be a subsort of the other.
  8. +
  9. If then else, which can return any sort but which must contain that sort on +both the true and false branches.
  10. +
  11. lambda applications, in which the argument and parameter must be the same +sort, and the return value of the application must be the same sort as the +return value of the function.
  12. +
+

Note the last case, in which two different parameters are specified separated +by a comma. This indicates that we have multiple independent parameters which +must be the same each place they occur, but not the same as the other +parameters.

+

In practice, because every sort is a subsort of K, the Sort2 +parameter in #6 above does nothing during parsing. It cannot +actually reject any parse, because it can always infer that the sort of the +argument and parameter are K, and it has no effect on the resulting sort of +the term. However, it will nevertheless affect the kore generated from the term +by introducing an additional parameter to the symbol generated for the term.

+

function and total attributes

+

Many times it becomes easier to write a semantics if you have "helper" +functions written which can be used in the RHS of rules. The function +attribute tells K that a given symbol should be simplified immediately when it +appears anywhere in the configuration. Semantically, it means that evaluation +of that symbol will result in at most one return value (that is, the symbol is +a partial function).

+

The total attribute indicates that a symbol cannot be equal to matching logic +bottom; in other words, it has at least one value for every possible set of +arguments. It can be added to a production with the function attribute to +indicate to the symbolic reasoning engine that a given symbol is a +total function, that is it has exactly one return value for every possible +input. Other uses of the total attribute (i.e., on multi-valued symbols to +indicate they always have at least one value) are not yet implemented.

+

For example, here we define the _+Word_ total function and the _/Word_ +partial function, which can be used to do addition/division modulo +2 ^Int 256. These functions can be used anywhere in the semantics where +integers should not grow larger than 2 ^Int 256. Notice how _/Word_ is +not defined when the denominator is 0.

+
k
syntax Int ::= Int "+Word" Int [function, total] + | Int "/Word" Int [function] + +rule I1 +Word I2 => (I1 +Int I2) modInt (2 ^Int 256) +rule I1 /Word I2 => (I1 /Int I2) modInt (2 ^Int 256) requires I2 =/=Int 0 +
+

freshGenerator attribute

+

In K, you can access "fresh" values in a given domain using the syntax +!VARNAME:VarSort (with the !-prefixed variable name). This is supported for +builtin sorts Int and Id already. For example, you can generate fresh +memory locations for declared identifiers as such:

+
k
rule <k> new var x ; => . ... </k> + <env> ENV => ENV [ x <- !I:Int ] </env> + <mem> MEM => MEM [ !I <- 0 ] </mem> +
+

Each time a !-prefixed variable is encountered, a new integer will be used, +so each variable declared with new var _ ; will get a unique position in the +<mem>.

+

Sometimes you want to have generation of fresh constants in a user-defined +sort. For this, K will still generate a fresh Int, but can use a converter +function you supply to turn it into the correct sort. For example, here we can +generate fresh Foos using the freshFoo(_) function annotated with +freshGenerator.

+
k
syntax Foo ::= "a" | "b" | "c" | d ( Int ) + +syntax Foo ::= freshFoo ( Int ) [freshGenerator, function, total] + +rule freshFoo(0) => a +rule freshFoo(1) => b +rule freshFoo(2) => c +rule freshFoo(I) => d(I) [owise] + +rule <k> new var x ; => . ... </k> + <env> ENV => ENV [ x <- !I:Int ] </env> + <mem> MEM => MEM [ !I <- !F:Foo ] </mem> +
+

Now each newly allocated memory slot will have a fresh Foo placed in it.

+

token attribute

+

The token attribute signals to the Kore generator that the associated sort +will be inhabited by domain values. Sorts inhabited by domain values must not +have any constructors declared.

+
k
syntax Bytes [hook(BYTES.Bytes), token] +
+

Converting between [token] sorts

+

You can convert between tokens of one sort via Strings by defining functions +implemented by builtin hooks. +The hook STRING.token2string allows conversion of any token to a string:

+
k
syntax String ::= FooToString(Foo) [function, total, hook(STRING.token2string)] +
+

Similarly, the hook STRING.string2Token allows the inverse:

+
k
syntax Bar ::= StringToBar(String) [function, total, hook(STRING.string2token)] +
+

WARNING: This sort of conversion does NOT do any sort of parsing or validation. +Thus, we can create arbitary tokens of any sort:

+
StringToBar("The sun rises in the west.")
+
+

Composing these two functions lets us convert from Foo to Bar

+
k
syntax Bar ::= FooToBar(Foo) [function] +rule FooToBar(F) => StringToBar(FooToString(F)) +
+

Parsing comments, and the #Layout sort

+

Productions for the #Layout sort are used to describe tokens that are +considered "whitespace". The scanner removes tokens matching these productions +so they are not even seen by the parser. Below, we use it to define +lines begining with ; (semicolon) as comments.

+
k
syntax #Layout ::= r"(;[^\\n\\r]*)" // Semi-colon comments + | r"([\\ \\n\\r\\t])" // Whitespace +
+

prec attribute

+

Consider the following naive attempt at creating a language what syntax that +allows two types of variables: names that contain underbars, and names that +contain sharps/hashes/pound-signs:

+
k
syntax NameWithUnderbar ::= r"[a-zA-Z][A-Za-z0-9_]*" [token] +syntax NameWithSharp ::= r"[a-zA-Z][A-Za-z0-9_#]*" [token] +syntax Pgm ::= underbar(NameWithUnderbar) + | sharp(NameWithSharp) +
+

Although, it seems that K has enough information to parse the programs +underbar(foo) and sharp(foo) with, the lexer does not take into account +whether a token is being parsed for the sharp or for the underbar +production. It chooses an arbitary sort for the token foo (perhaps +NameWithUnderbar). Thus, during paring it is unable to construct a valid term +for one of those programs (sharp(foo)) and produces the error message: +Inner Parser: Parse error: unexpected token 'foo'.

+

Since calculating inclusions and intersections between regular expressions is +tricky, we must provide this information to K. We do this via the prec(N) +attribute. The lexer will always prefer longer tokens to shorter tokens. +However, when it has to choose between two different tokens of equal length, +token productions with higher precedence are tried first. Note that the default +precedence value is zero when the prec attribute is not specified.

+

For example, the BUILTIN-ID-TOKENS module defines #UpperId and #LowerId with +the prec(2) attribute.

+
k
syntax #LowerId ::= r"[a-z][a-zA-Z0-9]*" [prec(2), token] + syntax #UpperId ::= r"[A-Z][a-zA-Z0-9]*" [prec(2), token] +
+

Furthermore, we also need to make sorts with more specific tokens subsorts of ones with more +general tokens. We add the token attribute to this production so that all +tokens of a particular sort are marked with the sort they are parsed as and not a +subsort thereof. e.g. we get underbar(#token("foo", "NameWithUnderbar")) +instead of underbar(#token("foo", "#LowerId"))

+
k
imports BUILTIN-ID-TOKENS +syntax NameWithUnderbar ::= r"[a-zA-Z][A-Za-z0-9_]*" [prec(1), token] + | #UpperId [token] + | #LowerId [token] +syntax NameWithSharp ::= r"[a-zA-Z][A-Za-z0-9_#]*" [prec(1), token] + | #UpperId [token] + | #LowerId [token] +syntax Pgm ::= underbar(NameWithUnderbar) + | sharp(NameWithSharp) +
+

unused attribute

+

K will warn you if you declare a symbol that is not used in any of the rules of +your definition. Sometimes this is intentional, however; in this case, you can +suppress the warning by adding the unused attribute to the production or +cell.

+
k
syntax Foo ::= foo() [unused] + +configuration <foo unused=""> .K </foo> +
+

deprecated attribute

+

Symbols can be marked as deprecated by adding the deprecated attribute to +their declaration. If that symbol subsequently appears in the definition (in a +rule, context, context alias or configuration), the compiler will issue a +warning.

+
k
syntax Foo ::= foo() [deprecated] +rule foo() => . // warning on this line +
+

Symbol priority and associativity

+

Unlike most other parser generators, K combines the task of parsing with AST +generation. A production declared with the syntax keyword in K is both a +piece of syntax used when parsing, and a symbol that is used when rewriting. +As a result, it is generally convenient to describe expression grammars using +priority and associativity declarations rather than explicitly transforming +your grammar into a series of nonterminals, one for each level of operator +precedence. Thus, for example, a simple grammar for addition and multiplication +will look like this:

+
k
syntax Exp ::= Exp "*" Exp + | Exp "+" Exp +
+

However, this grammar is ambiguous. The term x+y*z might refer to x+(y*z) +or to (x+y)*z. In order to differentiate this, we introduce a partial +ordering between productions known as priority. A symbol "has tighter priority" +than another symbol if the first symbol can appear under the second, but the +second cannot appear under the first without a bracket. For example, in +traditional arithmetic, multiplication has tighter priority than addition, +which means that x+y*z cannot parse as (x+y)*z because the addition +operator would appear directly beneath the multiplication, which is forbidden +by the priority filter.

+

Priority is applied individually to each possible ambiguous parse of a term. It +then either accepts or rejects that parse. If there is only a single remaining +parse (after all the other disambiguation steps have happened), this is the +parse that is chosen. If all the parses were rejected, it is a parse error. If +multiple parses remain, they might be resolved by further disambiguation such +as via the prefer and avoid attributes, but if multiple parses remain after +disambiguation finishes, this is an ambiguous parse error, indicating there is +not a unique parse for that term. In the vast majority of cases, this is +an error and indicates that you ought to either change your grammar or add +brackets to the term in question.

+

Priority is specified in K grammars by means of one of two different +mechanisms. The first, and simplest, simply replaces the | operator in a +sequence of K productions with the > operator. This operator indicates that +everything prior to the > operator (including transitively) binds tighter +than what comes after. For example, a more complete grammar for simple +arithmetic might be:

+
k
syntax Exp ::= Exp "*" Exp + | Exp "/" Exp + > Exp "+" Exp + | Exp "-" Exp +
+

This indicates that multiplication and division bind tigher than addition +and subtraction, but that there is no relationship in priority between +multiplication and division.

+

As you may have noticed, this grammar is also ambiguous. x*y/z might refer to +x*(y/z) or to (x*y)/z. Indeed, if we removed division and subtraction +entirely, the grammar would still be ambiguous: x*y*z might parse as +x*(y*z), or as (x*y)*z. To resolve this, we introduce another feature: +associativity. Roughly, asssociativity tells us how symbols are allowed to nest +within other symbols with the same priority. If a set of symbols is left +associative, then symbols in that set cannot appear as the rightmost child +of other symbols in that set. If a set of symbols is right associative, then +symbols in that set cannot appear as the leftmost child of other symbols in +that set. Finally, if a set of symbols is non-associative, then symbols +in that set cannot appear as the rightmost or leftmost child of other symbols +in that set. For example, in the above example, if addition and subtraction +are left associative, then x+y+z will parse as (x+y)+z and x+y-z will +parse as (x+y)-z (because the other parse will have been rejected).

+

You might notice that this seems to apply only to binary infix operators. In +fact, the real behavior is slightly more complicated. Priority and +associativity (for technical reasons that go beyond the scope of this document) +really only apply when the rightmost or leftmost item in a production is a +nonterminal. If the rightmost nonterminal is followed by a terminal (or +respectively the leftmost preceded), priority and associativity do not apply. +Thus we can generalize these concepts to arbitrary context-free grammars.

+

Note that in some cases, this is not the behavior you want. You may actually +want to reject parses even though the leftmost and rightmost item in a +production are terminals. You can accomplish this by means of the +applyPriority attribute. When placed on a production, it tells the parser +which nonterminals of a production the priority filter ought to reject children +under, overriding the default behavior. For example, I might have a production +like syntax Exp ::= foo(Exp, Exp) [applyPriority(1)]. This tells the parser +to reject terms with looser priority binding under the first Exp, but not +the second. By default, with this production, neither position would apply +to the priority filter, because the first and last items of the production +are both terminals.

+

Associativity is specified in K grammars by means of one of two different +mechanisms. The first, and simplest, adds the associativity of a priority block +of symbols prior to that block. For example, we can remove the remaining +ambiguities in the above grammar like so:

+
k
syntax Exp ::= left: + Exp "*" Exp + | Exp "/" Exp + > right: + Exp "+" Exp + | Exp "-" Exp +
+

This indicates that multiplication and division are left-associative, ie, after +symbols with higher priority are parsed as innermost, symbols are nested with +the rightmost on top. Addition and subtraction are right associative, which +is the opposite and indicates that symbols are nested with the leftmost on top. +Note that this is similar but different from evaluation order, which also +concerns itself with the ordering of symbols, which is described in the next +section.

+

You may note we have not yet introduced the second syntax for priority +and associativity. In some cases, syntax for a grammar might be spread across +multiple modules, sometimes for very good reasons with respect to code +modularity. As a result, it becomes infeasible to declare priority and +associativity inline within a set of productions, because the productions +are not contiguous within a single file.

+

For this purpose, we introduce the equivalent syntax priority, +syntax left, syntax right, and syntax non-assoc declarations. For +example, the above grammar can be written equivalently as:

+
k
syntax Exp ::= Exp "*" Exp [group(mult)] + | Exp "/" Exp [group(div)] + | Exp "+" Exp [group(add)] + | Exp "-" Exp [group(sub)] + +syntax priority mult div > add sub +syntax left mult div +syntax right add sub +
+

Here, the group(_) attribute is used to create user-defined groups of +sentences. A particular group name collectively refers to the whole set of +sentences within that group. The sets are flattened together, so we could +equivalently have written:

+
k
syntax Exp ::= Exp "*" Exp [group(mult)] + | Exp "/" Exp [group(mult)] + | Exp "+" Exp [group(add)] + | Exp "-" Exp [group(add)] + +syntax priority mult > add +syntax left mult +syntax right add +
+

Note that syntax [left|right|non-assoc] should not be used to group together +productions with different priorities. For example, this code would be invalid:

+
k
syntax priority mult > add +syntax left mult add +
+

Note that there is one other way to describe associativity, but it is +prone to a very common mistake. You can apply the attribute left, right, +or non-assoc directly to a production to indicate that it is, by itself, +left-, right-, or non-associative.

+

However, this often does not mean what users think it means. In particular:

+
k
syntax Exp ::= Exp "+" Exp [left] + | Exp "-" Exp [left] +
+

is not equivalent to:

+
k
syntax Exp ::= left: + Exp "+" Exp + | Exp "-" Exp +
+

Under the first, each production is associative with itself, but not each +other. Thus, x+y+z will parse unambiguously as (x+y)+z, but x+y-z will +be ambiguous. However, in the second, x+y-z will parse unambiguously as +(x+y)-z.

+

Think carefully about how you want your grammar to parse. In general, if you're +not sure, it's probably best to group associativity together into the same +blocks you use for priority, rather than using left, right, or non-assoc +attributes on the productions.

+

Lexical identifiers

+

Sometimes it is convenient to be able to give a certain regular expression a +name and then refer to it in one or more regular expression terminals. This +can be done with a syntax lexical sentence in K:

+
k
syntax lexical Alphanum = r"[0-9a-zA-Z]" +
+

This defines a lexical identifier Alphanum which can be expanded in any +regular expression terminal to the above regular expression. For example, I +might choose to then implement the syntax of identifiers as follows:

+
k
syntax Id ::= r"[a-zA-Z]{Alphanum}*" [token] +
+

Here {Alphanum} expands to the above regular expression, making the sentence +equivalent to the following:

+
k
syntax Id ::= r"[a-zA-Z]([0-9a-zA-Z])*" [token] +
+

This feature can be used to more modularly construct the lexical syntax of your +language. Note that K does not currently check that lexical identifiers used +in regular expressions have been defined; this will generate an error when +creating the scanner, however, and the user ought to be able to debug what +happened.

+

assoc, comm, idem, and unit attributes

+

These attributes are used to indicate whether a collection or a production +is associative, commutative, idempotent, and/or has a unit. +In general, you should not need to apply these attributes to productions +yourself, however, they do have certain special meaning to K. K will generate +axioms related to each of these concepts into your definition for you +automatically. It will also automatically sort associative-commutative +collections, and flatten the indentation of associative collections, when +unparsing.

+

public and private attribute

+

K allows users to declare certain pieces of syntax as either public or private. +All syntax is public by default. Public syntax can be used from any module that +imports that piece of syntax. A piece of syntax can be declared private with +the private attribute. This means that that syntax can only be used in the +module in which it is declared; it is not visible from modules that import +that module.

+

You can also change the default visibility of a module with the private +attribute, when it is placed directly on a module. A module with the private +attribute has all syntax private by default; this can be overridden on +specific sentences with the public attribute.

+

Note that the private module attribute also changes the default visiblity +of imports; please refer to the appropriate section elsewhere in the manual +for more details.

+

Here is an example usage:

+
k
module WIDGET-SYNTAX + + syntax Widget ::= foo() + syntax WidgetHelper ::= bar() [private] // this production is not visible + // outside this module +endmodule + +module WIDGET [private] + imports WIDGET-SYNTAX + + syntax Widget ::= fooImpl() // this production is not visible outside this + // module + + // this production is visible outside this module + syntax KItem ::= adjustWidget(Widget) [function, public] +endmodule +
+

Configuration Declaration

+

exit attribute

+

A single configuration cell containing an integer may have the "exit" +attribute. This integer will then be used as the return value on the console +when executing the program.

+

For example:

+
k
configuration <k> $PGM:Pgm </k> + <status-code exit=""> 1 </status-code> +
+

declares that the cell status-code should be used as the exit-code for +invocations of krun. Additionally, we state that the default exit-code is 1 +(an error state). One use of this is for writing testing harnesses which assume +that the test fails until proven otherwise and only set the <status-code> cell +to 0 if the test succeeds.

+

Collection Cells: multiplicity and type attributes

+

Sometimes a semantics needs to allow multiple copies of the same cell, for +example if you are making a concurrent multi-threading programming language. +For this purpose, K supports the multiplicity and type attributes on cells +declared in the configuration.

+

multiplicity can take on values * and ?. Declaring multiplicity="*" +indicates that the cell may appear any number of times in a runtime +configuration. Setting multiplicity="?" indicates that the cell may only +appear exactly 0 or 1 times in a runtime configuration. If there are no +configuration variables present in the cell collection, the initial +configuration will start with exactly 0 instances of the cell collection. If +there are configuration variables present in the cell collection, the initial +configuration will start with exactly 1 instance of the cell collection.

+

type can take on values Set, List, and Map. For example, here we declare +several collecion cells:

+
k
configuration <k> $PGM:Pgm </k> + <sets> <set multiplicity="?" type="Set"> 0:Int </set> </sets> + <lists> <list multiplicity="*" type="List"> 0:Int </list> </lists> + <maps> + <map multiplicity="*" type="Map"> + <map-key> 0:Int </map-key> + <map-value-1> "":String </map-value-1> + <map-value-2> 0:Int </map-value-2> + </map> + </maps> +
+

Declaring type="Set" indicates that duplicate occurrences of the cell should +be de-duplicated, and accesses to instances of the cell will be nondeterministic +choices (constrained by any other parts of the match and side-conditions). +Similarly, declaring type="List" means that new instances of the cell can be +added at the front or back, and elements can be accessed from the front or back, +and the order of the cells will be maintained. The following are examples of +introduction and elimination rules for these collections:

+
k
rule <k> introduce-set(I:Int) => . ... </k> + <sets> .Bag => <set> I </set> </sets> + +rule <k> eliminate-set => I ... </k> + <sets> <set> I </set> => .Bag </sets> + +rule <k> introduce-list-start(I:Int) => . ... </k> + <lists> (.Bag => <list> I </list>) ... </lists> + +rule <k> introduce-list-end(I:Int) => . ... </k> + <lists> ... (.Bag => <list> I </list>) </lists> + +rule <k> eliminate-list-start => I ... </k> + <lists> (<list> I </list> => .Bag) ... </lists> + +rule <k> eliminate-list-end => I ... </k> + <lists> ... (<list> I </list> => .Bag) </lists> +
+

Notice that for multiplicity="?", we only admit a single <set> instance at +a time. For the type=List cell, we can add/eliminate cells from the from or +back of the <lists> cell. Also note that we use .Bag to indicate the empty +cell collection in all cases.

+

Declaring type="Map" indicates that the first sub-cell will be used as a +cell-key. This means that matching on those cells will be done as a map-lookup +operation if the cell-key is mentioned in the rule (for performance). If the +cell-key is not mentioned, it will fallback to normal nondeterministic +constrained by other parts of the match and any side-conditions. Note that there +is no special meaning to the name of the cells (in this case <map>, +<map-key>, <map-value-1>, and <map-value-2>). Additionally, any number of +sub-cells are allowed, and the entire instance of the cell collection is +considered part of the cell-value, including the cell-key (<map-key> in this +case) and the surrounding collection cell (<map> in this case).

+

For example, the following rules introduce, set, retrieve from, and eliminate +type="Map" cells:

+
k
rule <k> introduce-map(I:Int) => . ... </k> + <maps> ... (.Bag => <map> <map-key> I </map-key> ... </map>) ... </maps> + +rule <k> set-map-value-1(I:Int, S:String) => . ... </k> + <map> <map-key> I </map-key> <map-value-1> _ => S </map-value-1> ... </map> + +rule <k> set-map-value-2(I:Int, V:Int) => . ... </k> + <map> <map-key> I </map-key> <map-value-2> _ => V </map-value-2> ... </map> + +rule <k> retrieve-map-value-1(I:Int) => S ... </k> + <map> <map-key> I </map-key> <map-value-1> S </map-value-1> ... </map> + +rule <k> retrieve-map-value-2(I:Int) => V ... </k> + <map> <map-key> I </map-key> <map-value-2> V </map-value-2> ... </map> + +rule <k> eliminate-map(I:Int) => . ... </k> + <maps> ... (<map> <map-key> I </map-key> ... </map> => .Bag) ... </maps> +
+

Note how each rule makes sure that <map-key> cell is mentioned, and we +continue to use .Bag to indicate the empty collection. Also note that +when introducing new map elements, you may omit any of the sub-cells which are +not the cell-key. In case you do omit sub-cells, you must use structural +framing ... to indicate the missing cells, they will receive the default +value given in the configuration ... declaration.

+

Rule Declaration

+

Rule Structure

+

Each K rule follows the same basic structure (given as an example here):

+
k
rule LHS => RHS requires REQ ensures ENS [ATTRS] +
+

The portion between rule and requires is referred to as the rule body, +and may contain one or more rewrites (though not nested). Here, the rule body is +LHS => RHS, where LHS and RHS are used as placeholders for the pre- and +post- states. Note that we lose no generality referring to the LHS or the +RHS, even in the presence of multiple rewrites, as the rewrites are pulled to +the top-level anyway.

+

Next is the requires clause, represented here as REQ. The requires clause is +an additional predicate (function-like term of sort Bool), which is to be +evaluated before applying the rule. If the requires clause does not evaluate to +true, then the rule does not apply.

+

Finally is the ensures clause, represented here as ENS. The ensures clause +is to be interpreted as a post-condition, and will be automatically added to the +path condition if the rule applies. It may cause the entire term to become +undefined, but the backend will not stop itself from applying the rule in this +case. Note that concrete backends (eg. the LLVM backend) are free to ignore the +ensures clause.

+

Overall, the transition represented by such a rule is from a state +LHS #And REQ ending in a state RHS #And ENS. When backends apply this rule +as a transition/rewrite, they should:

+
    +
  • Check if pattern LHS matches (or unifies) with the current term, giving +substitution alpha.
  • +
  • Check if the instantiation alpha(REQ) is valid (or satisfiable).
  • +
  • Build the new term alpha(RHS #And ENS), and check if it's satisfiable.
  • +
+

Pattern Matching operator

+

Sometimes when you want to express a side condition, you want to say that a +rule matches if a particular term matches a particular pattern, or if it +instead does /not/ match a particular pattern.

+

The syntax in K for this is :=K and :/=K. It has similar meaning to ==K and +=/=K, except that where ==K and =/=K express equality, :=K and =/=K express +model membership. That is to say, whether or not the rhs is a member of the set +of terms expressed by the lhs pattern. Because the lhs of these operators is a +pattern, the user can use variables in the lhs of the operator. However, due to +current limitations, these variables are NOT bound in the rest of the term. +The user is thus encouraged to use anonymous variables only, although this is +not required.

+

This is compiled by the K frontend down to an efficient pattern matching on a +fresh function symbol.

+

Anonymous function applications

+

There are a number of cases in K where you would prefer to be able to take some +term on the RHS, bind it to a variable, and refer to it in multiple different +places in a rule.

+

You might also prefer to take a variable for which you know some of its +structure, and modify some of its internal structure without requiring you to +match on every single field contained inside that structure.

+

In order to do this, we introduce syntax to K that allows you to construct +anonymous functions in the RHS of a rule and apply them to a term.

+

The syntax for this is:

+
#fun(RuleBody)(Argument)
+
+

Note the limitations currently imposed by the implementation. These functions +are not first-order: you cannot bind them to a variable and inject them like +you can with a regular klabel for a function. You also cannot express multiple +rules or multiple parameters, or side conditions. All of these are extensions +we would like to support in the future, however.

+

In the following, we use three examples to illustrate the behavior of #fun. +We point out that the support for #fun is provided by the frontend, not the +backends.

+

The three examples are real examples borrowed or modified from existing language +semantics.

+

Example 1 (A Simple Self-Explained Example).

+
#fun(V:Val => isFoo(V) andBool isBar(V))(someFunctionReturningVal())
+
+

Example 2 (Nested #fun).

+
   #fun(C
+=> #fun(R
+=> #fun(E
+=> foo1(E, R, C)
+  )(foo2(C))
+  )(foo3(0))
+  )(foo4(1))
+
+

This example is from the beacon +semantics:https://github.com/runtimeverification/beacon-chain-spec/blob/master/b +eacon-chain.k at line 302, with some modification for simplicity. Note how +variables C, R, E are bound in the nested #fun.

+

Example 3 (Matching a structure).

+
rule foo(K, RECORD) =>
+  #fun(record(... field: _ => K))(RECORD)
+
+

Unlike previous examples, the LHS of #fun in this example is no longer a +variable, but a structure. It has the same spirit as the first two examples, +but we match the RECORD with a structure record( DotVar, field: X), instead +of a standalone variable. We also use K's local rewrite syntax (i.e., the +rewriting symbol => does not occur at the top-level) to prevent writing +duplicate expressions on the LHS and RHS of the rewriting.

+

Macros and Aliases

+

A production can be tagged with the macro, alias, macro-rec, or alias-rec +attributes. In all cases, what this signifies is that this is a macro production. +Macro rules are rules where the top symbol of the left-hand-side are macro +labels. Macro rules are applied statically during compilation on all terms that +they match, and statically before program execution on the initial configuration. +Currently, macro rules are required to not have side conditions, although they +can contain sort checks.

+

alias rules are also applied statically in reverse prior to unparsing on the +final configuration. Note that a macro rule can have unbound variables in the +right hand side. When such a macro exists, it should be used only on the left +hand side of rules, unless the user is performing symbolic execution and expects +to introduce symbolic terms into the subject being rewritten.

+

However, when used on the left hand side of a rule, it functions similarly to a +pattern alias, and allows the user to concisely express a reusable pattern that +they wish to match on in multiple places.

+

For example, consider the following semantics:

+
k
syntax KItem ::= "foo" [alias] | "foobar" +syntax KItem ::= bar(KItem) [macro] | baz(Int, KItem) +rule foo => foobar +rule bar(I) => baz(?_, I) +rule bar(I) => I +
+

This will rewrite baz(0, foo) to foo. First baz(0, foo) will be rewritten +statically to baz(0, foobar). Then the non-macro rule will apply (because +the rule will have been rewritten to rule baz(_, I) => I). Then foobar will +be rewritten statically after rewriting finishes to foo via the reverse form +of the alias.

+

Note that macros do not apply recursively within their own expansion. This is +done so as to ensure that macro expansion will always terminate. If the user +genuinely desires a recursive macro, the macro-rec and alias-rec attributes +can be used to provide this behavior.

+

For example, consider the following semantics:

+
k
syntax Exp ::= "int" Exp ";" | "int" Exps ";" [macro] | Exp Exp | Id +syntax Exps ::= List{Exp,","} + +rule int X:Id, X':Id, Xs:Exps ; => int X ; int X', Xs ; +
+

This will expand int x, y, z; to int x; int y, z; because the macro does +not apply the second time after applying the substitution of the first +application. However, if the macro attribute were changed to the macro-rec +attribute, it would instead expand (as the user likely intended) to +int x; int y; int z;.

+

The alias-rec attribute behaves with respect to the alias attribute the +same way the macro-rec attribute behaves with respect to macro.

+

anywhere rules

+

Some rules are not functional, but you want them to apply anywhere in the +configuration (similar to functional rules). You can use the anywhere +attribute on a rule to instruct the backends to make sure they apply anywhere +they match in the entire configuration.

+

For example, if you want to make sure that some associative operator is always +right-associated anywhere in the configuration, you can do:

+
k
syntax Stmt ::= Stmt ";" Stmt + +rule (S1 ; S2) ; S3 => S1 ; (S2 ; S3) [anywhere] +
+

Then after every step, all occurrences of _;_ will be re-associated. Note that +this allows the symbol _;_ to still be a constructor, even though it is +simplified similarly to a function.

+

trusted claims

+

You may add the trusted attribute to a given claim for the K prover to +automatically add it to the list of proven circularities, instead of trying to +discharge it separately.

+

Projection and Predicate functions

+

K automatically generates certain predicate and projection functions from the +syntax you declare. For example, if you write:

+
k
syntax Foo ::= foo(bar: Bar) +
+

It will automatically generate the following K code:

+
k
syntax Bool ::= isFoo(K) [function] +syntax Foo ::= "{" K "}" ":>Foo" [function] +syntax Bar ::= bar(Foo) [function] + +rule isFoo(F:Foo) => true +rule isFoo(_) => false [owise] + +rule { F:Foo }:>Foo => F +rule bar(foo(B:Bar)) => B +
+

The first two types of functions are generated automatically for every sort in +your K definition, and the third type of function is generated automatically +for each named nonterminal in your definition. Essentially, isFoo for some +sort Foo will tell you whether a particular term of sort K is a Foo, +{F}:>Foo will cast F to sort Foo if F is of sort Foo and will be +undefined (i.e., theoretically defined as #Bottom, the bottom symbol in +matching logic) otherwise. Finally, bar will project out the child of a foo +named bar in its production declaration.

+

Note that if another term of equal or smaller sort to Foo exists and has a +child named bar of equal or smaller sort to Bar, this will generate an +ambiguity during parsing, so care should be taken to ensure that named +nonterminals are sufficiently unique from one another to prevent such +ambiguities. Of course, the compiler will generate a warning in this case.

+

simplification attribute

+

The simplification attribute identifies rules outside the main semantics that +are used to simplify function patterns.

+

Conditions: A simplification rule is applied by matching the function +arguments, instead of unification as when applying function definition +rules. This allows function symbols to appear nested as arguments to other +functions on the left-hand side of a simplification rule, which is forbidden in +function definition rules. For example, this rule would not be accepted as a +function definition rule:

+
k
rule (X +Int Y) +Int Z => X +Int (Y +Int Z) [simplification] +
+

A simplification rule is only applied when the current side condition implies +the requires clause of the rule, like function definition rules.

+

Order: The simplification attribute accepts an optional integer argument +which is the rule's simplification priority; if the optional argument is not +specified, it is equivalent to a simplification priority of 50. Backends +should attempt simplification rules in order of their simplification +priority, but are not required to do so; in fact, the backend is free to apply +simplification rules at any time. Because of this, users must ensure that +simplification rules are sound regardless of their order of application. This +differs from the priority attribute in that rules with the priority +attribute must be applied in their priority order by the backend. It is an +error to have the priority attribute on a simplification rule.

+

For example, for the following definition:

+
k
syntax WordStack ::= Int ":" WordStack | ".WordStack" + syntax Int ::= sizeWordStack ( WordStack ) [function] + | sizeWordStackAux ( WordStack , Int ) [function] + // -------------------------------------------------------------- + rule sizeWordStack(WS) => sizeWordStackAux(WS, 0) + + rule sizeWordStackAux(.WordStack, N) => N + rule sizeWordStackAux(W : WS , N) => sizeWordStackAux(WS, N +Int 1) +
+

We might add the following simplification lemma:

+
k
rule sizeWordStackAux(WS, N) => N +Int sizeWordStackAux(WS, 0) + requires N =/=Int 0 + [simplification] +
+

Then this simplification rule will only apply if the Haskell backend can prove +that notBool N =/=Int 0 is unsatisfiable. This avoids an infinite cycle of +applying this simplification lemma.

+

NOTE: The frontend and Haskell backend do not check that supplied +simplification rules are sound, this is the developer's responsibility. In +particular, rules with the simplification attribute must preserve definedness; +that is, if the left-hand side refers to any partial function then:

+
    +
  • the right-hand side must be #Bottom when the left-hand side is #Bottom, or
  • +
  • the rule must have an ensures clause that is false when the left-hand +side is #Bottom, or
  • +
  • the rule must have a requires clause that is false when the left-hand +side is #Bottom.
  • +
+

These conditions are in order of decreasing preference: the best option is to +preserve #Bottom on the right-hand side, the next best option is to have an +ensures clause, and the least-preferred option is to have a requires clause. +The most preferred option is to write total functions and avoid the entire issue.

+

NOTE: The Haskell backend does not attempt to prove claims which right-hand +side is #Bottom. The reason for this is that the general case is undecidable, +and the backend might enter an infinite loop. Therefore, the backend emits a +warning if it encounters such a claim.

+

concrete and symbolic attributes (Haskell backend)

+

Users can control the application of simplification rules using the concrete +and the symbolic attributes by specifying the type of patterns the rule's +arguments are to match.

+

A concrete pattern is a pattern which does not contain variables or unevaluated +functions, otherwise the pattern is symbolic.

+

The semantics of the two attributes is defined as follows:

+
    +
  • If a simplification rule is marked concrete, then all arguments must be +concrete for the rule to match.
  • +
  • If a simplification rule is marked symbolic, then all arguments must be +symbolic for the rule to match.
  • +
  • The following syntax concrete(<variables>) (resp. symbolic(<variables>)), +where <variables> is a list of variable names separated by commas, can be used +to specify the exact arguments the user expects to match concrete (resp. symbolic) +patterns.
  • +
+

For example, the following will only match when all arguments +are concrete:

+
k
rule X +Int (Y +Int Z) => (X +Int Y) +Int Z [simplification, concrete] +
+

Conversely, the following will only match when all arguments +are symbolic:

+
k
rule X +Int (Y +Int Z) => (X +Int Y) +Int Z [simplification, symbolic] +
+

In practice, the following rules will re-associate and commute terms to combine +concrete arguments:

+
k
rule (A +Int Y) +Int Z => A +Int (Y +Int Z) + [concrete(Y, Z), symbolic(A), simplification] + +rule X +Int (B +Int Z) => B +Int (X +Int Z) + [concrete(X, Z), symbolic(B), simplification] +
+

The unboundVariables attribute

+

Normally, K rules are not allowed to contain regular (i.e., not fresh, not +existential) variables in the RHS / requires / ensures clauses which are not +bound in the LHS.

+

However, in certain cases this behavior might be desired, like, for example, +when specifying a macro rule which is to be used in the LHS of other rules. +To allow for such cases, but still be useful and perform the unboundness checks +in regular cases, the unboundVariables attributes allows the user to specify +a comma-separated list of names of variables which can be unbound in the rule.

+

For example, in the macro declaration

+
k
rule cppEnumType => bar(_, scopedEnum() #Or unscopedEnum() ) [unboundVariables(_)] +
+

the declaration unboundVariables(_) allows the rule to pass the unbound +variable checks, and this in turn allows for cppEnumType to be used in +the LHS of a rule to mean the pattern above:

+
k
rule inverseConvertType(cppEnumType, foo((cppEnumType #as T::CPPType => underlyingType(T)))) +
+

The memo attribute

+

The memo attribute is a hint from the user to the backend to memoize a +function. Not all backends support memoization, but when the attribute is used +and the definition is compiled for a memo-supporting backend, then calls to +the function may be cached. At the time of writing, only the Haskell +backend supports memoization.

+

Limitations of memoization with the Haskell backend

+

The Haskell backend will only cache a function call if all arguments are concrete.

+

It is recommended not to memoize recursive functions, as each recursive call +will be stored in the cache, but only the first iteration will be retrieved from +the cache; that is, the cache will be filled with many unreachable +entries. Instead, we recommend to perform a worker-wrapper transformation on +recursive functions, and apply the memo attribute to the wrapper.

+

Warning: A function declared with the memo attribute must not use +uninterpreted functions in the side-condition of any rule. Memoizing such an +impure function is unsound. To see why, consider the following rules:

+
k
syntax Bool ::= impure( Int ) [function] + +syntax Int ::= unsound( Int ) [function, memo] +rule unsound(X:Int) => X +Int 1 requires impure(X) +rule unsound(X:Int) => X requires notBool impure(X) +
+

Because the function impure is not given rules to cover all inputs, unsound +can be memoized incoherently. For example,

+
{unsound(0) #And {impure(0) #Equals true}} #Equals 1
+
+

but

+
{unsound(0) #And {impure(0) #Equals false}} #Equals 0
+
+

The memoized value of unsound(0) would be incoherently determined by which +pattern the backend encounters first.

+

Variable Sort Inference

+

In K, it is not required that users declare the sorts of variables in rules or +in the initial configuration. If the user does not explicitly declare the sort +of a variable somewhere via a cast (see below), the sort of the variable is +inferred from context based on the sort signature of every place the variable +appears in the rule.

+

As an example, consider the rule for addition in IMP:

+
k
syntax Exp ::= Exp "+" Exp | Int + + rule I1 + I2 => I1 +Int I2 +
+

Here +Int is defined in the INT module with the following signature:

+
k
syntax Int ::= Int "+Int" Int [function] +
+

In the rule above, the sort of both I1 and I2 is inferred as Int. This is because +a variable must have the same sort every place it appears within the same rule. +While a variable appearing only on the left-hand-side of the rule could have +sort Exp instead, the same variable appears as a child of +Int, which +constriants the sorts of I1 and I2 more tightly. Since the sort must be a +subsort of Int or equal to Int, and Int has no subsorts, we infer Int +as the sorts of I1 and I2. This means that the above rule will not match +until I1 and I2 become integers (i.e., have already been evaluated).

+

More complex examples are possible, however:

+
k
syntax Exp ::= Exp "+" Int | Int + rule _ + _ => 0 +
+

Here we have two anonymous variables. They do not refer to the same variable +as one another, so they can have different sorts. The right side is constrained +by + to be of sort Int, but the left side could be either Exp or Int. +When this occurs, we have multiple solutions to the sorts of the variables in +the rule. K will only choose solutions which are maximal, however. To be +precise, if two different solutions exist, but the sorts of one solution are +all greater than or equal to the sorts of the other solution, K will discard +the smaller solution. Thus, in the case above, the variable on the left side +of the + is inferred of sort Exp, because the solution (Exp, Int) is +strictly greater than the solution (Int, Int).

+

It is possible, however, for terms to have multiple maximal solutions:

+
k
syntax Exp ::= Exp "+" Int | Int "+" Exp | Int + rule I1 + I2 => 0 +
+

In this example, there is an ambiguous parse. This could parse as either +the first + or the second. In the first case, the maximal solution chosen is +(Exp, Int). In the second, it is (Int, Exp). Neither of these solutions is +greater than the other, so both are allowed by K. As a result, this program +will emit an error because the parse is ambiguous. To pick one solution over +the other, a cast or a prefer or avoid attribute can be used.

+

Casting

+

There are three main types of casts in K: the semantic cast, the strict cast, +and the projection cast.

+

Semantic casts

+

For every sort S declared in your grammar, K will define the following +production for you for use in rules:

+
k
syntax S ::= S ":S" +
+

The meaning of this cast is that the term inside the cast must be less than +or equal to Sort. This can be used to resolve ambiguities, but its principle +purpose is to guide execution by telling K what sort variables must match in +order for the rule to apply. When compiled, it will generate a pattern that +matches on an injection into Sort.

+

Strict casts

+

K also introduces the strict cast:

+
k
syntax S ::= S "::S" +
+

The meaning at runtime is exactly the same as the semantic cast; however, it +restricts the sort of the term inside the cast to exactly Sort. That is +to say, if you use it on something that is a strictly smaller sort, it will +generate a type error. This is useful in certain circumstances to help +disambiguate terms, when a semantic cast would not have resolved the ambiguity. +As such, it is primarily used to solve ambiguities rather than to guide +execution.

+

Projection casts

+

K also introduces the projection cast:

+
k
syntax {S2} S ::= "{" S2 "}" ":>S" +
+

The meaning of this cast at runtime is that if the term inside is of sort +Sort, it should have it injection stripped away and the value inside is +returned as a term of static sort Sort. However, if the term is of a +different sort, it is an error and execution will get stuck. Thus the primary +usefulness of this cast is to cast the return value of a function with a +greater sort down to a strictly smaller sort that you expect the return value +of the function to have. For example:

+
k
syntax Exp ::= foo(Exp) [function] | bar(Int) | Int + rule foo(I:Int) => I + rule bar(I) => bar({foo(I +Int 1)}:>Int) +
+

Here we know that foo(I +Int 1) will return an Int, but the return sort of +foo is Exp. So we project the result into the Int sort so that it can +be placed as the child of a bar.

+

owise and priority attributes.

+

Sometimes, it is simply not convenient to explicitly describe every +single negative case under which a rule should not apply. Instead, +we simply wish to say that a rule should only apply after some other set of +rules have been tried. K introduces two different attributes that can be +added to rules which will automatically generate the necessary matching +conditions in a manner which is performant for concrete execution (indeed, +it generally outperforms during concrete execution code where the conditions +are written explicitly).

+

The first is the owise attribute. Very roughly, rules without an attribute +indicating their priority apply first, followed by rules with the owise +attribute only if all the other rules have been tried and failed. For example, +consider the following function:

+
k
syntax Int ::= foo(Int) [function] +rule foo(0) => 0 +rule foo(_) => 1 [owise] +
+

Here foo(0) is defined explicitly as 0. Any other integer yields the +integer 1. In particular, the second rule above will only be tried after the +first rule has been shown not to apply.

+

This is because the first rule has a lower number assigned for its priority +than the second rule. In practice, each rule in your semantics is implicitly +or explicitly assigned a numerical priority. Rules are tried in increasing +order of priority, starting at zero and trying each increasing numerical value +successively.

+

You can specify the priority of a rule with the priority attribute. For +example, I could equivalently write the second rule above as:

+
k
rule foo(_) => 1 [priority(200)] +
+

The number 200 is not chosen at random. In fact, when you use the owise +attribute, what you are doing is implicitly setting the priority of the rule +to 200. This has a couple of implications:

+
    +
  1. Multiple rules with the owise attribute all have the same priority and thus +can apply in any order.
  2. +
  3. Rules with priority higher than 200 apply after all rules with the +owise attribute have been tried.
  4. +
+

There is one more rule by which priorities are assigned: a rule with no +attributes indicating its priority is assigned the priority 50. Thus, +with each priority explicitly declared, the above example looks like:

+
k
syntax Int ::= foo(Int) [function] +rule foo(0) => 0 [priority(50)] +rule foo(_) => 1 [owise] +
+

One final note: the llvm backend reserves priorities between 50 and 150 +inclusive for certain specific purposes. Because of this, explicit +priorities which are given within this region may not behave precisely as +described above. This is primarily in order that it be possible where necessary +to provide guidance to the pattern matching algorithm when it would otherwise +make bad choices about which rules to try first. You generally should not +give any rule a priority within this region unless you know exactly what the +implications are with respect to how the llvm backend orders matches.

+

Evaluation Strategy

+

strict and seqstrict attributes

+

The strictness attributes allow defining evaluation strategies without having +to explicitly make rules which implement them. This is done by injecting +heating and cooling rules for the subterms. For this to work, you need to +define what a result is for K, by extending the KResult sort.

+

For example:

+
k
syntax AExp ::= Int + | AExp "+" AExp [strict, klabel(addExp)] +
+

This generates two heating rules (where the hole syntaxes "[]" "+" AExp and +AExp "+" "[]" is automatically added to create an evaluation context):

+
k
rule [addExp1-heat]: <k> HOLE:AExp + AE2:AExp => HOLE ~> [] + AE2 ... </k> [heat] +rule [addExp2-heat]: <k> AE1:AExp + HOLE:AExp => HOLE ~> AE1 + [] ... </k> [heat] +
+

And two corresponding cooling rules:

+
k
rule [addExp1-cool]: <k> HOLE:AExp ~> [] + AE2 => HOLE + AE2 ... </k> [cool] +rule [addExp2-cool]: <k> HOLE:AExp ~> AE1 + [] => AE1 + HOLE ... </k> [cool] +
+

Note that the rules are given labels based on the klabel of the production, which +nonterminal is the hole, and whether it's the heating or the cooling rule.

+

You will note that these rules can apply one after another infinitely. In +practice, the KResult sort is used to break this cycle by ensuring that only +terms that are not part of the KResult sort will be heated. The heat and +cool attributes are used to tell the compiler that these are heating and +cooling rules and should be handled in the manner just described. Nothing stops +the user from writing such heating and cooling rules directly if they wish, +although we describe other more convenient syntax for most of the advanced +cases below.

+

One other thing to note is that in the above sentences, HOLE is just a +variable, but it has special meaning in the context of sentences with the +heat or cool attribute. In heating or cooling rules, the variable named +HOLE is considered to be the term being heated or cooled and the compiler +will generate isKResult(HOLE) and notBool isKResult(HOLE) side conditions +appropriately to ensure that the backend does not loop infinitely. The module +BOOL will also be automatically and privately included for semantic +purposes. The syntax for parsing programs will not be affected.

+

In order for this functionality to work, you need to define the KResult sort. +For instance, we tell K that a term is fully evaluated once it becomes an Int +here:

+
k
syntax KResult ::= Int +
+

Note that you can also say that a given expression is only strict only in +specific argument positions. Here we use this to define "short-circuiting" +boolean operators.

+
k
syntax KResult ::= Bool + +syntax BExp ::= Bool + | BExp "||" BExp [strict(1)] + | BExp "&&" BExp [strict(1)] + +rule <k> true || _ => true ... </k> +rule <k> false || REST => REST ... </k> + +rule <k> true && REST => REST ... </k> +rule <k> false && _ => false ... </k> +
+

If you want to force a specific evaluation order of the arguments, you can use +the variant seqstrict to do so. For example, this would make the boolean +operators short-circuit in their second argument first:

+
k
syntax KResult ::= Bool + +syntax BExp ::= Bool + | BExp "||" BExp [seqstrict(2,1)] + | BExp "&&" BExp [seqstrict(2,1)] + +rule <k> _ || true => true ... </k> +rule <k> REST || false => REST ... </k> + +rule <k> REST && true => REST ... </k> +rule <k> _ && false => false ... </k> +
+

This will generate rules like this in the case of _||_ (note that BE1 will +not be heated unless isKResult(BE2) is true, meaning that BE2 must be +evaluated first):

+
k
rule <k> BE1:BExp || HOLE:BExp => HOLE ~> BE1 || [] ... </k> [heat] +rule <k> HOLE:BExp || BE2:BExp => HOLE ~> [] || BE2 ... </k> requires isKResult(BE2) [heat] + +rule <k> HOLE:BExp ~> [] || BE2 => HOLE || BE2 ... </k> [cool] +rule <k> HOLE:BExp ~> BE1 || [] => BE1 || HOLE ... </k> [cool] +
+

Context Declaration

+

Sometimes more advanced evaluation strategies are needed. By default, the +strict and seqstrict attributes are limited in that they cannot describe +the context in which heating or cooling should occur. When this type of +control over the evaluation strategy is required, context sentences can be +used to simplify the process of declaring heating and cooling when it would be +unnecessarily verbose to write heating and cooling rules directly.

+

For example, if the user wants to heat a term if it exists under a foo +constructor if the term to be heated is of sort bar, one might write the +following context (with the optional label):

+
k
context [foo]: foo(HOLE:Bar) +
+

Once again, note that HOLE is just a variable, but one that has special +meaning to the compiler indicating the position in the context that should +be heated or cooled.

+

This will automatically generate the following sentences:

+
k
rule [foo-heat]: <k> foo(HOLE:Bar) => HOLE ~> foo([]) ... </k> [heat] +rule [foo-cool]: <k> HOLE:Bar ~> foo([]) => foo(HOLE) ... </k> [cool] +
+

The user may also write the K cell explicitly in the context declaration +if they want to match on another cell as well, for example:

+
k
context <k> foo(HOLE:Bar) ... </k> <state> .Map </state> +
+

This context will now only heat or cool if the state cell is empty.

+

Side conditions in context declarations

+

The user is allowed to write a side condition in a context declaration, like +so:

+
k
context foo(HOLE:Bar) requires baz(HOLE) +
+

This side condition will be appended verbatim to the heating rule that is +generated, however, it will not affect the cooling rule that is generated:

+
k
rule <k> foo(HOLE:Bar) => HOLE ~> foo([]) ... </k> requires baz(HOLE) [heat] +rule <k> HOLE:Bar ~> foo([]) => foo(HOLE) ... </k> [cool] +
+

Rewrites in context declarations

+

The user can also include exactly one rewrite operation in a context +declaration if that rule rewrites the variable HOLE on the left hand side +to a term containing HOLE on the right hand side. For exampl;e:

+
k
context foo(HOLE:Bar => bar(HOLE)) +
+

In this case, the code generated will be as follows:

+
k
rule <k> foo(HOLE:Bar) => bar(HOLE) ~> foo([]) ... </k> [heat] +rule <k> bar(HOLE:Bar) ~> foo([]) => foo(HOLE) ... </k> [cool] +
+

This can be useful if the user wishes to evaluate a term using a different +set of rules than normal.

+

result attribute

+

Sometimes it is necessary to be able to evaluate a term to a different sort +than KResult. This is done by means of adding the result attribute to +a strict production, a context, or an explicit heating or cooling rule:

+
k
syntax BExp ::= Bool + | BExp "||" BExp [seqstrict(2,1), result(Bool)] +
+

In this case, the sort check used by seqstrict and by the heat and cool +attributes will be isBool instead of isKResult. This particular example +does not really require use of the result attribute, but if the user wishes +to evaluate a term of sort KResult further, the result attribute would be +required.

+

hybrid attribute

+

In certain situations, it is desirable to treat a particular production which +has the strict attribute as a result if the term has had its arguments fully +evaluated. This can be accomplished by means of the hybrid attribute:

+
k
syntax KResult ::= Bool + +syntax BExp ::= Bool + | BExp "||" BExp [strict(1), hybrid] +
+

This attribute is equivalent in this case to the following additional axiom +being added to the definition of isKResult:

+
k
rule isKResult(BE1:BExp || BE2:BExp) => true requires isKResult(BE1) +
+

Sometimes you wish to declare a production hybrid with respect to a predicate +other than isKResult. You can do this by specifying a sort as the body of the +hybrid attribute, e.g.:

+
k
syntax BExp ::= BExp "||" BExp [strict(1), hybrid(Foo)] +
+

generates the rule:

+
k
rule isFoo(BE1:BExp || BE2:BExp) => true requires isFoo(BE1) +
+

Properly speaking, hybrid takes an optional comma-separated list of sort +names. If the list is empty, the attribute is equivalent to hybrid(KResult). +Otherwise, it generates hybrid predicates for exactly the sorts named.

+

Context aliases

+

Sometimes it is necessary to define a fairly complicated evaluation strategy +for a lot of different operators. In this case, the user could simply write +a number of complex context declarations, however, this quickly becomes +tedious. For this purpose, K has a concept called a context alias. A context +alias is a bit like a template for describing contexts. The template can then +be instantiated against particular productions using the strict and +seqstrict attributes.

+

Here is a (simplified) example taken from the K semantics of C++:

+
k
context alias [c]: <k> HERE:K ... </k> <evaluate> false </evaluate> +context alias [c]: <k> HERE:K ... </k> <evaluate> true </evaluate> [result(ExecResult)] + +syntax Expr ::= Expr "=" Init [strict(c; 1)] +
+

This defines the evaluation strategy during the translation phase of a C++ +program for the assignment operator. It is equivalent to writing the following +context declarations:

+
k
context <k> HOLE:Expr = I:Init ... </k> <evaluate> false </evaluate> +context <k> HOLE:Expr = I:Init ... </k> <evaluate> true </evaluate> [result(ExecResult)] +
+

What this is saying is, if the evaluate cell is false, evaluate the term +like normal to a KResult. But if the evaluate cell is true, instead +evaluate it to the ExecResult sort.

+

Essentially, we have given a name to this evaluation strategy in the form of +the rule label on the context alias sentences (in this case, c). We can +then say that we want to use this evaluation strategy to evaluate particular +arguments of particular productions by referring to it by name in a strict +attribute. For example, strict(c) will instantiate these contexts once for +each argument of the production, whereas strict(c; 1) will instantiate it +only for the first argument. The special variable HERE is used to tell the +compiler where you want to place the production that is to be heated or cooled.

+

You can also specify multiple context aliases for different parts of a production, +for example:

+
k
syntax Exp ::= foo(Exp, Exp) [strict(left; 1; right; 2)] +
+

This says that we can evaluate the left and right arguments in either order, but to evaluate +the left using the left context alias and the right using the right context alias.

+

We can also say seqstrict(left; 1; right; 2), in which case we additionally must evaluate +the left argument before the right argument. Note, all strict positions are considered collectively +when determining the evaluation order of seqstrict or the hybrid predicates.

+

A strict attribute with no rule label associated with it is equivalent to +a strict attribute given with the following context alias:

+
k
context alias [default]: <k> HERE:K ... </k> +
+

One syntactic convenience that is provided is that if you wish to declare the following context:

+
k
context foo(HOLE => bar(HOLE)) +
+

you can simply write the following:

+
k
syntax Foo ::= foo(Bar) [strict(alias)] + +context alias [alias]: HERE [context(bar)] +
+

Pattern Matching

+

As Patterns

+

New syntax has been added to K for matching a pattern and binding the resulting +match in its entirety to a variable.

+

The syntax is:

+
Pattern #as V::Var
+
+

In this case, Pattern, including any variables, is matched and the resulting +variables are added to the substitution if matching succeeds. Furthermore, the +term matched by Pattern is added to the substitution as V.

+

This code can also be used outside of any rewrite, in which case matching +occurs as if it appeared on the left hand side, and the right hand side becomes +a variable corresponding to the alias.

+

It is an error to use an as pattern on the right hand side of a rule.

+

Record-like KApply Patterns

+

We have added a syntax for matching on KApply terms which mimics the record +syntax in functional languages. This allows us to more easily express patterns +involving a KApply term in which we don't care about some or most of the +children, without introducing a dependency into the code on the number of +arguments which could be changed by a future refactoring.

+

The syntax is:

+
record(... field1: Pattern1, field2: Pattern2)
+
+

Note that this only applies to productions that are prefix productions. +A prefix production is considered by the implementation to be any production +whose production items match the following regular expression:

+
(Terminal(_)*) Terminal("(")
+(NonTerminal (Terminal(",") NonTerminal)* )?
+Terminal(")")
+
+

In other words, any sequence of terminals followed by an open parenthesis, an +optional comma separated list of non-terminals, and a close parenthesis.

+

If a prefix production has no named nonterminals, a record(...) syntax is +allowed, but in order to reference specific fields, it is necessary to give one +or more of the non-terminals in the production names.

+

Note: because the implementation currently creates one production per possible +set of fields to match on, and because all possible permutations of all +possible subsets of a list of n elements is a number that scales factorially +and reaches over 100 thousand productions at n=8, we currently do not allow +fields to be matched in any order like a true record, but only in the same +order as appears in the production itself.

+

Given that this only reduces the number of productions to the size of the power +set, this will still explode the parsing time if we create large productions of +10 or more fields that all have names. This is something that should probably +be improved, however, productions with that large of an arity are rare, and +thus it has not been viewed as a priority.

+

Or Patterns

+

Sometimes you wish to express that a rule should match if one out of multiple +patterns should match the same subterm. We can now express this in K by means +of using the #Or ML connective on the left hand side of a rule.

+

For example:

+
k
rule foo #Or bar #Or baz => qux +
+

Here any of foo, bar, or baz will match this rule. Note that the behavior is +ill-defined if it is not the case that all the clauses of the or have the same +bound variables.

+

Matching global context in function rules

+

On occasion it is highly desirable to be able to look up information from the +global configuration and match against it when evaluating a function. For this +purpose, we introduce a new syntax for function rules.

+

This syntax allows the user to match on function context from within a +function rule:

+
k
syntax Int ::= foo(Int) [function] + +rule [[ foo(0) => I ]] + <bar> I </bar> + +rule something => foo(0) +
+

This is completely desugared by the K frontend and does not require any special +support in the backend. It is an error to have a rewrite inside function +context, as we do not currently support propagating such changes back into the +global configuration. It is also an error if the context is not at the top +level of a rule body.

+

Desugared code:

+
k
syntax Int ::= foo(Int, GeneratedTopCell) [function] + +rule foo(0, <generatedTop> + <bar> I </bar> + ... + </generatedTop> #as Configuration) => I +rule <generatedTop> + <k> something ... </k> + ... + </generatedTop> #as Configuration + => <generatedTop> + <k> foo(0, Configuration> ... </k> + ... + </generatedTop> +
+

Collection patterns

+

It is allowed to write patterns on the left hand side of rules which refer to +complex terms of sort Map, List, and Set, despite these patterns ostensibly +breaking the rule that terms which are functions should not appear on the left +hand side of rules. Such terms are destructured into pattern matching +operations.

+

The following forms are allowed:

+
// 0 or more elements followed by 0 or 1 variables of sort List followed by
+// 0 or more elements
+ListItem(E1) ListItem(E2) L:List ListItem(E3) ListItem(E4)
+
+// the empty list
+.List
+
+// 1 or more list update operations applied to a variable
+L:List [ K1 <- E1 ] [ K2 <- E2 ]
+
+// 0 or more elements in any order plus 0 or 1 variables of sort Set
+// in any order
+SetItem(K1) SetItem(K2) S::Set SetItem(K3) SetItem(K4)
+
+// the empty set
+.Set
+
+// 0 or more elements in any order plus by 0 or 1 variables of sort Map
+// in any order
+K1 |-> E1 K2 |-> E2 M::Map K3 |-> E3 K4 |-> E4
+
+// the empty map
+.Map
+
+

Here K1, K2, K3, K4 etc can be any pattern except a pattern containing both +function symbols and unbound variables. An unbound variable is a variable whose +binding cannot be determined by means of decomposing non-set-or-map patterns or +map elements whose keys contain no unbound variables.

+

This is determined recursively, ie, the term K1 |-> E2 E2 |-> E3 E3 |-> E4 is +considered to contain no unbound variables.

+

Note that in the pattern K1 |-> E2 K3 |-> E4 E4 |-> E5, K1 and K3 are +unbound, but E4 is bound because it is bound by deconstructing the key E3, even +though E3 is itself unbound.

+

In the above examples, E1, E2, E3, and E4 can be any pattern that is normally +allowed on the lhs of a rule.

+

When a map, set, or list key contains function symbols, we know that the +variables in that key are bound (because of the above restriction), so it is +possible to evaluate the function to a concrete term prior to performing the +lookup.

+

Indeed, this is the precise semantics which occurs; the function is evaluated +and the result is looked up in the collection.

+

For example:

+
k
syntax Int ::= f(Int) [function] +rule f(I:Int) => I +Int 1 +rule <k> I:Int => . ... </k> <state> ... SetItem(f(I)) ... </state> +
+

This will rewrite I to . if and only if the state cell contains +I +Int 1.

+

Note that in the case of Set and Map, one guarantee is that K1, K2, K3, and K4 +represent /distinct/ elements. Pattern matching fails if the correct number of +distinct elements cannot be found.

+

Matching on cell fragments

+

K allows matching fragments of the configuration and using them to construct +terms and use as function parameters.

+
k
configuration <t> + <k> #init ~> #collectOdd ~> $PGM </k> + <fs> + <f multiplicity="*" type="Set"> 1 </f> + </fs> + </t> +
+

The #collectOdd construct grabs the entire content of the <fs> cell. +We may also match on only a portion of its content. Note that the fragment +must be wrapped in a <f> cell at the call site.

+
k
syntax KItem ::= "#collectOdd" +rule <k> #collectOdd => collectOdd(<fs> Fs </fs>) ... </k> + <fs> Fs </fs> +
+

The collectOdd function collects the items it needs

+
k
syntax Set ::= collectOdd(FsCell) [function] +rule collectOdd(<fs> <f> I </f> REST </fs>) => SetItem(I) collectOdd(<fs> REST </fs>) requires I %Int 2 ==Int 1 +rule collectOdd(<fs> <f> I </f> REST </fs>) => collectOdd(<fs> REST </fs>) requires I %Int 2 ==Int 0 +rule collectOdd(<fs> .Bag </fs>) => .Set +
+

all-path and one-path attributes to distinguish reachability claims

+

As the Haskell backend can handle both one-path and all-path reachability +claims, but both these are encoded as rewrite rules in K, these attributes can +be used to clarify what kind of claim a rule is.

+

In addition of being able to annotate a rule with one of them +(if annotating with more at the same time, only one of them would be chosen), +one can also annotate whole modules, to give a default claim type for all rules +in that module.

+

Additionally, the Haskell backend introduces an extra command line option +for the K frontend, --default-claim-type, with possible values +all-path and one-path to allow choosing a default type for all +claims.

+

Set Variables

+

Motivation

+

Set variables were introduced as part of Matching Mu Logic, the mathematical +foundations for K. In Matching Mu Logic, terms evaluate to sets of values. +This is useful for both capturing partiality (as in 3/0) and capturing +non-determinism (as in 3 #Or 5). Consequently, symbol interpretation is +extended to have a collective interpretation over sets of input values.

+

Usually, K rules are given using regular variables, which expect that the term +they match is both defined and has a unique interpretation.

+

However, it is sometimes useful to have simplification rules which work over +any kind of pattern, be it undefined or non-deterministic. This behavior can be +achieved by using set variables to stand for any kind of pattern.

+

Syntax

+

Any variable prefixed by @ will be considered a set variable.

+

Example

+

Below is a simplification rule which motivated this extension:

+
  rule #Ceil(@I1:Int /Int @I2:Int) =>
+    {(@I2 =/=Int 0) #Equals true} #And #Ceil(@I1) #And #Ceil(@I2)
+    [anywhere]
+
+

This rule basically says that @I1:Int /Int @I2:Int is defined if @I1 and +@I2 are defined and @I2 is not 0. Using sets variables here is important as +it allows the simplification rule to apply any symbolic patterns, without +caring whether they are defined or not.

+

This allows simplifying the expression #Ceil((A:Int /Int B:Int) / C:Int) to:

+
{(C =/=Int 0) #Equals true} #And #Ceil(C) #And ({(B =/=Int 0) #Equals true}
+#And #Ceil(B) #And #Ceil(A)`
+
+

See kframework/kore#729 for +more details.

+

SMT Translation

+

K makes queries to an SMT solver (Z3) to discharge proof obligations when doing +symbolic execution. You can control how these queries are made using the +attributes smtlib, smt-hook, and smt-lemma on declared productions. +These attributes guide the prover when it tries to apply rules to discharge a +proof obligation.

+
    +
  • smt-hook(...) allows you to specify a term in SMTLIB2 format which should +be used to encode that production, and assumes that all symbols appearing in +the term are already declared by the SMT solver.
  • +
  • smtlib(...) allows you to declare a new SMT symbol to be used when that +production is sent to Z3, and gives it uninterpreted function semantics.
  • +
  • smt-lemma can be applied to a rule to encode it as a conditional equality +when sending queries to Z3. A rule rule LHS => RHS requires REQ will be +encoded as the conditional equality (=> REQ (= (LHS RHS)). Every symbol +present in the rule must have an smt-hook(...) or smtlib(...) attribute.
  • +
+
k
syntax Int ::= "~Int" Int [function, klabel(~Int_), symbol, + smtlib(notInt)] + | Int "^%Int" Int Int [function, klabel(_^%Int__), symbol, + smt-hook((mod (^ #1 #2) #3))] +
+

In the example above, we declare two productions ~Int_ and _^%Int__, and +tell the SMT solver to:

+
    +
  • use uninterpreted function semantics for ~Int_ via SMTLIB2 symbol +notInt, and
  • +
  • use the SMTLIB2 term (mod (^ #1 #2) #3) (where #N marks the Nth +production non-terminal argument positions) for _^%Int__, where mod and +^ already are declared by the SMT solver.
  • +
+

Caution

+

Set variables are currently only supported by the Haskell backend. +The use of rules with set variables should be sound for all other backends +which just execute by rewriting, however it might not be safe for backends +which want to guarantee coverage.

+

Variables occurring only in the RHS of a rule

+

This section presents possible scenarios requiring variables to only appear in +the RHS of a rule.

+

Summary

+

Except for ? variables and ! (fresh) variables, which are +required to only appear in the RHS of a rule, all other variables must +also appear in the LHS of a rule. This restriction also applies to anonymous +variables; in particular, for claims, ?_ (not _) should be used in the RHS +to indicate that something changes but we don't care to what value.

+

To support specifying random-like behavior, the above restriction can be relaxed +by annotating a rule with the unboundVariables attribute whenever the rule +intentionally contains regular variables only occurring in the RHS.

+

Introduction

+

K uses question mark variables of the form ?X to refer to +existential variables, and uses ensures to specify logical constraints on +those variables. +These variables are only allowed to appear in the RHS of a K rule.

+

If the rules represent rewrite (semantic) steps or verification claims, +then the ? variables are existentially quantified at the top of the RHS; +otherwise, if they represent equations, the ? variables are quantified at the +top of the entire rule.

+

Note that when both ?-variables and regular variables are present, +regular variables are (implicitly) universally quantified on top of the rule +(already containing the existential quantifications). +This essentially makes all ? variables depend on all regular variables.

+

All examples below are intended more for program verification / +symbolic execution, and thus concrete implementations might choose to ignore +them altogether or to provide ad-hoc implementations for them.

+

Example: Verification claims

+

Consider the following definition of a (transition) system:

+
k
module A + rule foo => true + rule bar => true + rule bar => false +endmodule +
+

Consider also, the following specification of claims about the definition above:

+
k
module A-SPEC + rule [s1]: foo => ?X:Bool + rule [s2]: foo => X:Bool [unboundVariables(X)] + rule [s3]: bar => ?X:Bool + rule [s4]: bar => X:Bool [unboundVariables(X)] +endmodule +
+
One-path interpretation
+
    +
  • (s1) says that there exists a path from foo to some boolean, which is +satisfied easily using the foo => true rule
  • +
  • (s3) says the same thing about bar and can be satisfied by either of +bar => true and bar => false rules
  • +
  • (s2) and (s4) can be better understood by replacing them with instances for +each element of type Bool, which can be interpreted that +both true and false are reachable from foo for (s2), or bar for (s4), +respectively. +
      +
    • (s2) cannot be verified as we cannot find a path from foo to false.
    • +
    • (s4) can be verified by using bar => true to show true is reachable and +bar => false to achieve the same thing for false
    • +
    +
  • +
+
All-path interpretation
+
    +
  • +

    (s1) says that all paths from foo will reach some boolean, which is +satisfied by the foo => true rule and the lack of other rules for foo

    +
  • +
  • +

    (s3) says the same thing about bar and can be satisfied by checking that +both bar => true and bar => false end in a boolean, and there are no +other rules for bar

    +
  • +
  • +

    (s2) and (s4) can be better understood by replacing them with instances for +each element of type Bool, which can be interpreted that +both true and false are reachable in all paths originating in +foo for (s2), or bar for (s4), respectively. +This is a very strong claim, requiring that all paths originating in +foo (bar) pass through both true and false, +so neither (s2) nor (s4) can be verified.

    +

    Interestingly enough, adding a rule like false => true would make both +(s2) and (s4) hold.

    +
  • +
+

Example: Random Number Construct rand()

+

The random number construct rand() is a language construct which could be +easily conceived to be part of the syntax of a programming language:

+
k
Exp ::= "rand" "(" ")" +
+

The intended semantics of rand() is that it can rewrite to any integer in +a single step. This could be expressed as the following following infinitely +many rules.

+
k
rule rand() => 0 +rule rand() => 1 +rule rand() => 2 + ... ... +rule rand() => (-1) +rule rand() => (-2) + ... ... +
+

Since we need an instance of the rule for every integer, one could summarize +the above infinitely many rules with the rule

+
rule rand() => I:Int [unboundVariables(I)]
+
+

Note that I occurs only in the RHS in the rule above, and thus the rule +needs the unboundVariables(I) attribute to signal that this is intentionally.

+

One can define variants of rand() by further constraining the output variable +as a precondition to the rule.

+
Rand-like examples
+
    +
  1. +

    randBounded(M,N) can rewrite to any integer between M and N

    +
    k
    syntax Exp ::= randBounded(Int, Int) +rule randBounded(M, N) => I + requires M <=Int I andBool I <=Int N + [unboundVariables(I)] +
    +
  2. +
  3. +

    randInList(Is) takes a list Is of items +and can rewrite in one step to any item in Is.

    +
    k
    syntax Exp ::= randInList (List) +rule randInList(Is) => I + requires I inList Is + [unboundVariables(I)] +
    +
  4. +
  5. +

    randNotInList(Is) takes a list Is of items +and can rewrite in one step to any item not in Is.

    +
    k
    syntax Exp ::= randNotInList (List) +rule randNotInList(Is) => I + requires notBool(I inList Is) + [unboundVariables(I)] +
    +
  6. +
  7. +

    randPrime(), can rewrite to any prime number.

    +
    k
    syntax Exp ::= randPrime () +rule randPrime() => X:Int + requires isPrime(X) + [unboundVariables(X)] +
    +

    where isPrime(_) is a predicate that can be defined in the usual way.

    +
  8. +
+

Note 1: all above are not function symbols, but language constructs.

+

Note 2: Currently the frontend does not allow rules with universally quantified +variables in the RHS which are not bound in the LHS.

+

Note 3. Allowing these rules in a concrete execution engine would require an +algorithm for generating concrete instances for such variables, satisfying the +given constraints; thus the unboundVariables attribute serves two purposes:

+
    +
  • to allow such rules to pass the variable checks, and
  • +
  • to signal (concrete execution) backends that specialized algorithm would be +needed to instantiate these variables.
  • +
+

Example: Fresh Integer Construct fresh(Is)

+

The fresh integer construct fresh(Is) is a language construct.

+
Exp ::= ... | "fresh" "(" List{Int} ")"
+
+

The intended semantics of fresh(Is) is that it can always rewrite to an +integer that in not in Is.

+

Note that fresh(Is) and randNotInList(Is) are different; the former +does not need to be able to rewrite to every integers not in Is, +while the latter requires so.

+

For example, it is correct to implement fresh(Is) so it always returns the +smallest positive integer that is not in Is, but same implementation for +randNotInList(Is) might be considered inadequate. +In other words, there exist multiple correct implementations of fresh(Is), +some of which may be deterministic, but there only exists a unique +implementation of randNotInList(Is). +Finally, note that randNotInList(Is) is a correct implementation +for fresh(Is); Hence, concrete execution engines can choose to handle +such rules accordingly.

+

We use the following K syntax to define fresh(Is)

+
k
syntax Exp ::= fresh (List{Int}) +rule fresh(Is:List{Int}) => ?I:Int + ensures notBool (?I inList{Int} Is) +
+

A variant of this would be a choiceInList(Is) language construct which would +choose some number from a list:

+
k
syntax Exp ::= choiceInList (List{Int}) +rule choiceInList(Is:List{Int}) => ?I:Int + ensures ?I inList{Int} Is +
+

Note: This definition is different from one using a ! variable to indicate +freshness because using ! is just syntactic sugar for generating globally +unique instances and relies on a special configuration cell, and cannot be +constrained, while the fresh described here is local and can be constrained. +While the first is more appropriate for concrete execution, this might be +better for symbolic execution / program verification.

+

Example: Arbitrary Number (Unspecific Function) arb()

+

The function arb() is not a PL construct, but a mathematical function. +Therefore, its definition should not be interpreted as an execution step, but +rather as an equality.

+

The intended semantics of arb() is that it is an unspecified nullary function. +The exact return value of arb() is unspecified in the semantics but up to the +implementations. +However, being a mathematical function, arb() must return the same value in +any one implementation.

+

We do not need special frontend syntax to define arb(). +We only need to define it in the usual way as a function +(instead of a language construct), and provide no axioms for it. +The total attribute ensures that the function is total, i.e., +that it evaluates to precisely one value for each input.

+
Variants
+

There are many variants of arb(). For example, arbInList(Is) is +an unspecified function whose return value must be an element from Is.

+

Note that arbInList(Is) is different from choiceInList(Is), because +choiceInList(Is) transitions to an integer in Is (could be a different one +each time it is used), while arbInList(Is) is equal to a (fixed) +integer not in Is.

+

W.r.t. the arb variants, we can use ? variables and the function +annotation to signal that we're defining a function and the value of the +function is fixed, but non-determinate.

+
k
syntax Int ::= arbInList(List{Int}) [function] +rule arbInList(Is:List{Int}) => ?I:Int + ensures ?I inList{Int} Is +
+

If elimination of existentials in equational rules is needed, one possible +approach would be through Skolemization, +i.e., replacing the ? variable with a new uninterpreted function depending +on the regular variables present in the function.

+

Example: Interval (Non-function Symbols) interval()

+

The symbol interval(M,N) is not a PL construct, nor a function in the +first-order sense, but a proper matching-logic symbol, whose interpretation is +in the powerset of its domain. +Its axioms will not use rewrites but equalities.

+

The intended semantics of interval(M,N) is that it equals the set of +integers that are larger than or equal to M and smaller than or equal to N.

+

Since expressing the axiom for interval requires an an existential +quantification on the right-hand-side, thus making it a non-total symbol +defined through an equation, using ? variables might be confusing since their +usage would be different from that presented in the previous sections.

+

Hence, the proposal to support this would be to write this as a proper ML rule. +A possible syntax for this purpose would be:

+
eq  interval(M,N)
+    ==
+    #Exists X:Int .
+        (X:Int #And { X >=Int M #Equals true } #And { X <=Int N #Equals true })
+
+

Additionally, the symbol declaration would require a special attribute to +signal the fact that it is not a constructor but a defined symbol.

+

Since this feature is not clearly needed by K users at the moment, it is only +presented here as an example; its implementation will be postponed for such time +when its usefulness becomes apparent.

+

Parser Generation

+

In addition to on-the-fly parser generation using kast, K is capable of +ahead-of-time parser generation of LR(1) or GLR parsers using Flex and Bison. +This can be done one of two different ways.

+
    +
  1. You can explicitly request for a particular parser to be generated by +invoking kast --gen-parser <outputFile> or +kast --gen-glr-parser <outputFile> respectively. kast will then create a +parser based on the same command line flags that govern on-the-fly parsing, +like -s to specify the starting sort, and -m to specify the module to +parse under. By default, this generates a parser for the sort of the $PGM +configuration variable in the main syntax module of the definition.
  2. +
  3. You can request that a specific set of parsers be generated for all the +configuration variables of your definition by passing the +--gen-bison-parser or --gen-glr-bison-parser flags to kompile. +kompile will decide the sorts to use as start symbols based on the sorts +in the configuration declaration for the configuration variables. The $PGM +configuration variable will be generated based on the main syntax module +of the definition. The user must explicitly annotate the configuration +declaration with the other modules to use to parse the other configuration +variables as attributes. For example, if I have the following cell in the +configuration declaration: <cell> foo($FOO:Foo, $BAR:Bar) </cell>, +One might annotate it with the attribute pair parser="FOO, TEST; BAR, TEST2" +to indicate that configuration variable $FOO should be parsed in the +TEST module, and configuration variable $BAR should be parsed in the +TEST2 module. If the user forgets to annotate the declaration with the +parser attribute, only the $PGM parser will be generated.
  4. +
+

Bison-generated parsers are extremely fast compared to kast, but they have +some important limitations:

+
    +
  • Bison parsers will always output Kore. You can then pass the resulting AST +directly to llvm-krun or kore-exec and bypass the krun frontend, making +them very fast, but lower-level.
  • +
  • Bison parsers do not yet support macros. This may change in a future release. +Note that you can use anywhere rules instead of macros in most cases to get +around this limitation, although they will not benefit from unparsing via the +alias attribute.
  • +
  • Obligation falls on the user to ensure that the grammar they write is LR(1) +if they choose to use LR(1) parsing. If this does not happen, the parser +generated will have shift/reduce or reduce/reduce conflicts and the parser +may behave differently than kast would (kast is a GLL parser, ie, it +is based on LL parsers and parses all unambiguous context-free grammars). +K provides an attribute, not-lr1, which can be applied to modules known to +not be LR(1), and will trigger a warning if the user attempts to generate an +LR(1) parser which recursively imports that module.
  • +
  • If you are using LR(1) based parsing, the prefer and avoid attributes are +ignored. It is only possible to implement these attributes by means of +generalized LL or LR parsing and a postprocessing on the AST to remove the +undesirable ambiguity.
  • +
  • Obligation falls on the user to ensure that the grammar they write has as +few conflicts as possible if they are using GLR parsing. Bison's GLR support +is quite primitive, and in the worst case it can use exponential space and +time to parse a program, which generally leads the generated parser to report +"memory exhausted", indicating that the parse could not be completed within +the stack space allocated by Bison. It's best to ensure that the grammar is +as close to LR(1) as possible and only utilizes conflicts where absolutely +necessary. One tool that can be used to facilitate this is to pass +--bison-lists to kompile. This will disable support for the List{Sort} +syntax production, and it will make NeList{Sort} left associative, but the +resulting productions generated for NeList{Sort} will be LR(1) and use bounded +stack space.
  • +
  • If the grammar you are parsing is context-sensitive (for example, because +it requires a symbol table to parse), one thing you can do to make this +language parse in K is to implement the language as an ambiguous grammar. +Bison's GLR parser will generate an amb production that is parametric in +the sort of the ambiguity. You can then import the K-AMBIGUITIES module +and use rewriting to resolve the ambiguities using whatever preprocessing +mechanisms you prefer.
  • +
+

Location Information

+

K is able to insert file, line, and column metadata into the parse tree on a +per-sort basis when parsing using a bison-generated parser. To enable this, +mark the sort with the locations attribute.

+
k
syntax Exp [locations] + syntax Exp ::= Exp "/" Exp | Int +
+

K implicitly wraps productions of these sorts in a #location term (see the +K-LOCATIONS module in kast.md). The metadata can thus be accessed with +ordinary rewrite rules:

+
k
rule #location(_ / 0, File, StartLine, _StartColumn, _EndLine, _EndColumn) => + "Error: Division by zero at " +String File +String ":" Int2String(StartLine) +
+

Sometimes it is desirable to allow code to be written in a file which +overwrites the current location information provided by the parser. This can be +done via a combination of the #LineMarker sort and the --bison-file flag to +the parser generator. If you declare a production of sort #LineMarker which +contains a regular expression terminal, this will be treated as a +line marker by the bison parser. The user will then be expected to provide +an implementation of the parser for the line marker in C. The function expected +by the parser has the signature void line_marker(char *, yyscan_t), where +yyscan_t is a +reentrant flex scanner. +The string value of the line marker token as specified by your regular +expression can be found in the first parameter of the function, and you can +set the line number used by the scanner using yyset_lineno(int, yyscan_t). If +you declare the variable extern char *filename, you can also set the current +file name by writing a malloc'd, zero-terminated string to that variable.

+

Unparsing

+

A number of factors go into how terms are unparsed in K. Here we describe some +of the features the user can use to control how unparsing happens.

+

Brackets

+

One of the phases that the unparser goes through is to insert productions +tagged with the bracket attribute where it believes this is necessary +in order to create a correct string that will be parsed back into the original +AST. The most common case of this is in expression grammars. For example, +consider the following grammar:

+
k
syntax Exp ::= Int + | Exp "*" Exp + > Exp "+" Exp +
+

Here we have declared that expressions can contain integer addition and +multiplication, and that multiplication binds tighter than addition. As a +result, when writing a program, if we want to write an expression that first +applies addition, then multiplication, we must use brackets: (1 + 2) * 3. +Similarly, if we have such an AST, we must insert brackets into the AST +in order to faithfully unparse the term in a manner that will be parsed back +into the same ast, because if we do not, we end up unparsing the term as +1 + 2 * 3, which will be parsed back as 1 + (2 * 3) because of the priority +declaration in the grammar.

+

You can control how the unparser will insert such brackets by adding a +production with the bracket attribute and the correct sort. For example, if, +instead of parentheses, you want to use curly braces, you could write:

+
k
syntax Exp ::= "{" Exp "}" [bracket] +
+

This would signal to the unparser how brackets should look for terms of sort +Exp, and it will use this syntax when unparsing terms of sort Exp.

+

Commutative collections

+

One thing that K will do (unless you pass the --no-sort-collections flag to +krun) is to sort associative, commutative collections (such as Set and Map) +alphanumerically. For example, if I have a collection whose keys are sort Id +and they have the values a, b, c, and d, then unparsing will always print +first the key a, then b, then c, then d, because this is the alphabetic order +of these keys when unparsed.

+

Furthermore, K will sort numeric keys numerically. For example, if I have a +collection whose keys are 1, 2, 5, 10, 30, it will first display 1, then 2, +then 5, then 10, then 30, because it will sort these keys numerically. Note +that this is different than an alphabetic sort, which would sort them as +1, 10, 2, 30, 5. We believe the former is more intuitive to users.

+

Substitution filtering

+

K will remove substitution terms corresponding to anonymous variables when +using the --pattern flag if those anonymous variables provide no information +about the named variables in your serach pattern. You can disable this behavior +by passing --no-substitution-filtering to krun. When this flag is not passed, +and you are using the Haskell backend, any equality in a substitution (ie, an +#Equals under an #And under an #Or), will be hidden from the user if the +left hand side is a variable that was anonymous in the --pattern passed by +the user, unless that variable appears elsewhere in the substitution. If you +want to see that variable in the substitution, you can either disable this +filtering, or give that variable a name in the original search pattern.

+

Variable alpha renaming

+

K will automatically rename variables that appear in the output configuration. +Similar to commutative collections, this is done to normalize the resulting +configuration so that equivalent configurations will be printed identically +regardless of how they happen to be reached. This pass can be disabled by +passing --no-alpha-renaming to krun.

+

Macro expansion

+

K will apply macros in reverse on the output configuration if the macro was +created with the alias or alias-rec attribute. See the section on macro +expansion for more details.

+

Formatting

+

format attribute

+

K allows you to control how terms are unparsed using the format attribute. +By default, a domain value is unparsed by printing its string value verbatim, +and an application pattern is unparsed by printing its terminals and children +in the sequence implied by its concrete syntax, separated by spaces. However, +K gives you complete control over how you want to unparse the symbol.

+

A format attribute is a string containing zero or more escape sequences that +tell K how to unparse the symbol. Escape sequences begin with a '%' and are +followed by either an integer, or a single non-digit character. Below is a +list of escape sequences recognized by the formatter:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Escape SequenceMeaning
nInsert '\n' followed by the current indentation level
iIncrease the current indentation level by 1
dDecrease the current indentation level by 1
cMove to the next color in the list of colors for this production
rReset color to the default foreground color for the terminal (See below for more information on how colors work)
an integerPrint a terminal or nonterminal from the production (See below for more information)
any other charPrint that character verbatim
+

Using the integer escape sequence

+

In the integer escape sequence %a, the integer a is treated as a 1-based +index into the terminals and nonterminals of the production.

+
    +
  • +

    If the offset refers to a terminal, move to the next color in the list of +colors for this production, print the value of that terminal, then reset the +color to the default foreground color for the terminal.

    +
  • +
  • +

    If the offset refers to a regular expression terminal, it is an error.

    +
  • +
  • +

    If the offset refers to a nonterminal, print the unparsed representation of +the corresponding child of the current term.

    +
  • +
+

color and colors attributes

+

K allows you to take advantage of ANSI terminal codes for foreground color +in order to colorize output pretty-printed by the unparser. This is controlled +via the color and colors attributes of productions. These attributes +combine with the format attribute to control how a term is colorized.

+

The first thing to understand about how colorization works is that the color +and colors attributes are used to construct a list of colors associated +with each production, and the format attribute then uses that list to choose +the color for each part of the production. For more information on how the +format attribute chooses a color from the list, see above, but essentially, +each terminal or %c in the format attribute advances the pointer in the list +by one element, and terminals and %r reset the current color to the default +foreground color of the terminal afterwards.

+

There are two ways you can construct a list of colors associated with a +production:

+
    +
  • +

    The color attribute creates the entire list all with the same color, as +specified by the value of the attribute. When combined with the default format +attribute, this will color all the terminals in that production that color, but +more advanced techniques can be used as well.

    +
  • +
  • +

    The colors attribute creates the list from a manual, comma-separated list +of colors. The attribute is invalid if the length of the list is not equal to +the number of terminals in the production plus the number of %c substrings in +the format attribute.

    +
  • +
+

Attributes Reference

+

Attribute Syntax Overview

+

In K, many different syntactic categories accept an optional trailing list of +keywords known as attributes. Attribute lists have two different syntaxes, +depending on where they occur. Each attribute also has a type which describes +where it may occur.

+

The first syntax is a square-bracketed ([]) list of words. This syntax is +available for following attribute types:

+
    +
  1. module attributes - may appear immediately after the module keyword
  2. +
  3. sort attributes - may appear immediately after a sort declaration
  4. +
  5. production attributes - may appear immediately after a BNF production +alternative
  6. +
  7. rule attributes - may appear immediately after a rule
  8. +
  9. context attributes - may appear immediately after a context or context +alias
  10. +
  11. context alias attributes - may appear immediately after a context alias
  12. +
  13. claim attributes - may appear immediately after a claim
  14. +
+

The second syntax is the XML attribute syntax, i.e., a space delemited list of +key-and-quoted-value pairs appearing inside the start tag of an XML element: +<element key1="value" key2="value2" ... > </element>. This syntax is +available for the following attribute types:

+
    +
  1. cell attributes - may appear inside of the cell start tag in +configuration declarations
  2. +
+

Unrecognized attributes are reported as an error. When we talk about +the type of an attribute, we mean a syntactic category to which an attribute +can be attached where the attribute has some semantic effect.

+

Attribute Index

+

We now provide an index of available attributes organized alphabetically with a +brief description of each. Note that the same attribute may appear in the index +multiple times to indicate its effect in different contexts or with/without +arguments. A legend describing how to interpret the index follows.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeBackendReference
alias-recprodallMacros and Aliases
aliasprodallMacros and Aliases
all-pathclaimhaskellall-path and one-path attributes to distinguish reachability claims
anywhereruleallanywhere rules
applyPriority(_)prodallSymbol priority and associativity
avoidprodallSymbol priority and associativity
binderprodallNo reference yet.
bracketprodallParametric productions and bracket attributes
color(_)prodallcolor and colors attributes
colors(_)prodallcolor and colors attributes
concretemodllvmsymbolic and concrete attribute
concrete(_)rulehaskellconcrete and symbolic attributes (Haskell backend)
concreterulehaskellconcrete and symbolic attributes (Haskell backend)
context(_)aliasallContext aliases
deprecatedprodalldeprecated attribute
exit = ""cellallexit attribute
formatprodallformat attribute
freshGeneratorprodallfreshGenerator attribute
functionprodallfunction and total attributes
group(_)allallSymbol priority and associativity
hook(_)prodallNo reference yet
hybrid(_)prodallhybrid attribute
hybridprodallhybrid attribute
klabel(_)prodallklabel(_) and symbol attributes
leftprodallSymbol priority and associativity
locationssortallLocation Information
macro-recprodallMacros and Aliases
macroprodallMacros and Aliases
memorulehaskellThe memo attribute
multiplicity = "_"cellallCollection Cells: multiplicity and type attributes
non-assocprodallSymbol priority and associativity
one-pathclaimhaskellall-path and one-path attributes to distinguish reachability claims
overload(_)prodalloverload(_) attribute
owiseruleallowise and priority attributes
prec(_)tokenallprec attribute
preferprodallSymbol priority and associativity
priority(_)ruleallowise and priority attributes
privatemodallprivate attribute
privateprodallpublic and private attribute
publicmodallNo reference yet.
publicprodallpublic and private attribute
result(_)ctxtallresult attribute
result(_)ruleallresult attribute
rightprodallSymbol priority and associativity
seqstrict(_)prodallstrict and seqstrict attributes
seqstrictprodallstrict and seqstrict attributes
simplificationrulehaskellsimplification attribute (Haskell backend)
simplification(_)rulehaskellsimplification attribute (Haskell backend)
smt-hook(_)prodhaskellSMT Translation
smtlib(_)prodhaskellSMT Translation
smt-lemmarulehaskellSMT Translation
strictprodallstrict and seqstrict attributes
strict(_)prodallstrict and seqstrict attributes
symbolicmodhaskellsymbolic and concrete attribute
symbolicrulehaskellconcrete and symbolic attributes (Haskell backend)
symbolic(_)rulehaskellconcrete and symbolic attributes (Haskell backend)
symbolprodallklabel(_) and symbol attributes
terminator-symbol(_)prodallklabel(_) and symbol attributes
tokenprodalltoken attribute
tokensortalltoken attribute
totalprodallfunction and total attributes
trustedclaimhaskelltrusted attribute
type = "_"cellallCollection Cells: multiplicity and type attributes
unboundVariables(_)ruleallThe unboundVariables attribute
unusedprodallunused attribute
concretemodallSpecify that this module should only be included in concrete backends (LLVM backend).
symbolicmodallSpecify that this module should only be included in symbolic backends (Haskell backend).
stream = "_"cellallSpecify that this cell should be hooked up to a stream, either stdin, stdout, or stderr.
+

Internal Attribute Index

+

Some attributes should not generally appear in user code, except in some +unusual or complex examples. Such attributes are typically generated by the +compiler and used internally. We list these attributes below as a reference for +interested readers:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeBackendReference
assocprodallassoc, comm, idem and unit attributes
commprodallassoc, comm, idem and unit attributes
digestmodallContains the hash of the textual contents of the module.
idemprodallassoc, comm, idem and unit attributes
unitprodallassoc, comm, idem and unit attributes
userListprodallIdentifies the desugared form of Lst ::= List{Elm,"delim"}
predicateprodallSpecifies the sort of a predicate label
elementprodallSpecifies the label of the elements in a list
bracketLabelprodallKeep track of the label of a bracket production since it can't have a klabel
injectiveprodallLabel a given production as injective (unique output for each input)
internalprodallProduction is reserved for internal use by the compiler
coolruleallstrict and seqstrict attributes
heatruleallstrict and seqstrict attributes
+

Index Legend

+
    +
  • +

    Name - the attribute's name (optionally followed by an underscore _ to indicate the attribute takes arguments)

    +
  • +
  • +

    Type - the syntactic categories where this attribute is not ignored; +the possible values are the types mentioned above or shorthands:

    +
      +
    1. all - short for any type except cell
    2. +
    3. mod - short for module
    4. +
    5. sort
    6. +
    7. prod - short for production
    8. +
    9. rule
    10. +
    11. ctxt - short for context or context alias
    12. +
    13. claim
    14. +
    15. cell
    16. +
    +
  • +
  • +

    Backend - the backends that do not ignore this attribute; possible values:

    +
      +
    1. all - all backends
    2. +
    3. llvm - the LLVM backend
    4. +
    5. haskell - the Haskell backend
    6. +
    +
  • +
  • +

    Effect - the attribute's effect (when it applies)

    +
  • +
+

Pending Documentation

+

Backend features not yet given documentation:

+
    +
  • Parser of KORE terms and definitions
  • +
  • Term representation of K terms
  • +
  • Hooked sorts and symbols
  • +
  • Substituting a substitution into the RHS of a rule +
      +
    • domain values
    • +
    • functions
    • +
    • variables
    • +
    • symbols
    • +
    • polymorphism
    • +
    • hooks
    • +
    • injection compaction
    • +
    • overload compaction
    • +
    +
  • +
  • Pattern Matching / Unification of subject and LHS of rule +
      +
    • domain values
    • +
    • symbols
    • +
    • side conditions
    • +
    • and/or patterns
    • +
    • list patterns
    • +
    • nonlinear variables
    • +
    • map/set patterns +
        +
      • deterministic
      • +
      • nondeterministic
      • +
      +
    • +
    • modulo injections
    • +
    • modulo overloads
    • +
    +
  • +
  • Stepping +
      +
    • initialization
    • +
    • termination
    • +
    +
  • +
  • Print kore terms
  • +
  • Equality/comparison of terms
  • +
  • Owise rules
  • +
  • Strategy #STUCK axiom
  • +
  • User substitution +
      +
    • binders
    • +
    • kvar
    • +
    +
  • +
+

To get a complete list of hooks supported by K, you can run:

+
grep -P -R "(?<=[^-])hook\([^)]*\)" k-distribution/include/kframework/builtin/ \
+     --include "*.k" -ho | \
+sed 's/hook(//' | sed 's/)//' | sort | uniq | grep -v org.kframework
+
+

All of these hooks will also eventually need documentation.

+
+
+
    +
  1. Except for in a very limited number of special cases from the +K standard library. ↩︎

    +
  2. +
  3. The Maude documentation +has an example in a context that's somewhat similar to K; discussion of +ad-hoc overloading is not relevant. ↩︎

    +
  4. +
+
+
+
+ + +
+ +
+
+ + K User Manual + +
+
+ + Introduction + + +
+
+ + Introduction to K + + +
+
+ + Module Declaration + + +
+
+ + Syntax Declaration + + +
+
+ + Configuration Declaration + + +
+
+ + Rule Declaration + + +
+
+ + Evaluation Strategy + + +
+
+ + Pattern Matching + +
+
+ + Set Variables + + +
+
+ + Variables occurring only in the RHS of a rule + + +
+ +
+
+
+ + Unparsing + + +
+
+ + Attributes Reference + + +
+ +
+
+ +
+ +
+
+ +
+
+ + + + + + + + + + + + diff --git a/editor_support/index.html b/editor_support/index.html new file mode 100644 index 00000000000..68d9d0298cd --- /dev/null +++ b/editor_support/index.html @@ -0,0 +1,463 @@ + + + + + + + + + + + + + + +Editor Support | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Editor Support

+

This page lists (alphabetically) known Editor/IDE plugins for K.

+

Please feel free to contribute to any of the projects below (via pull requests) or to suggest new entries.

+

Atom

+

K/Maude syntax highlighter for Atom based on the BBEdit grammar described below

+
    +
  • Source: https://github.com/traiansf/language-k
  • +
+

BBEdit/TextWrangler

+

K/Maude syntax highlighter for TextWrangler/BBEdit

+
    +
  • Source: https://github.com/kframework/k-editor-support/tree/master/BBEdit
  • +
+

Emacs

+

Emacs mode for K

+
    +
  • Source: https://github.com/kframework/k-editor-support/tree/master/emacs
  • +
+

IntelliJ Idea

+
    +
  • Comprehensive plugin for IntelliJ Idea
  • +
  • Binary: https://github.com/kframework/k-editor-support/blob/master/k-idea-plugin.jar
  • +
  • Source: https://github.com/kframework/k-editor-support/tree/master/k-idea-plugin-src
  • +
+

Notepad++

+

K syntax highlighter for Notepad++

+
    +
  • Source: https://github.com/kframework/k-editor-support/tree/master/notepad%2B%2B
  • +
+

Pygments

+

Support for https://pygments.org/ Pygments

+
    +
  • Source: https://github.com/kframework/k-editor-support/tree/master/pygments
  • +
  • Note: the lexer is far from being complete.
  • +
+

Vim

+

K/Maude syntax highlighter for vim

+
    +
  • Source: https://github.com/kframework/k-editor-support/tree/master/vim
  • +
+

Visual Studio Code

+

K extension for Visual Studio Code

+
    +
  • Extension page on Visual Studio Marketplace: https://marketplace.visualstudio.com/items?itemName=clv.kframework
  • +
  • Source code: https://github.com/LucianCumpata/K-VSCode
  • +
+
+
+ + + +
+ +
+
+ + + + + + + + + + + + diff --git a/events/k-a-rewriting-based-language-definitional-framework/index.html b/events/k-a-rewriting-based-language-definitional-framework/index.html new file mode 100644 index 00000000000..271853714e1 --- /dev/null +++ b/events/k-a-rewriting-based-language-definitional-framework/index.html @@ -0,0 +1,409 @@ + + + + + + + + + + + + + + +K: A Rewriting-Based Language Definitional Framework | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

K: A Rewriting-Based Language Definitional Framework

+

Tutorial at the 33rd ACM SIGPLAN International Conference on Programming Language Design and Implementation (PLDI)

+
    +
  • June 16, 2012 – Beijing (China)
  • +
  • Presenter: Grigore Rosu (the main designer of K)
  • +
  • Duration: Half a day
  • +
  • Expected participants: ~20
  • +
+

Description

+

K is an executable semantic framework in which programming languages, calculi, as well as type systems or formal analysis tools can be defined. K is a suitable framework for defining truly concurrent languages or calculi, even in the presence of sharing. Since computations can be handled like any other terms in a rewriting environment, that is, they can be matched, moved from one place to another in the original term, modified, or even deleted, K is also suitable for defining control-intensive language features such as abrupt termination, exceptions, or call/cc. K has been used to define real world languages like C.

+

This tutorial will provide participants with a basic knowledge of the framework, as well as hands-on experience with using K to define a real programming language. Definitional techniques available in K, as well as comparisons of such techniques with other formalisms will be described. Time will be spent showing how one can automatically generate an interpreter, debugger, state space search, and a model checker from a single semantic definition. After attending the tutorial, participants will be able to use K to define their own languages or calculi and then derive similar tools from their semantics for free.

+ +
    +
  • http://k-framework.org: The main page for the K framework (see the Quick Overview section for a movie, demo and slides).
  • +
  • http://k-framework.googlecode.com: The Googlecode page for the K tool.
  • +
+

Tutorial format

+

Material and instructions will be provided to participants to load software and examples on their laptops. The presenter will give background material and an introduction to K, then the majority of the time will be spent working through examples in the K tool. The examples will be used to demonstrate both features of K, as well as design decisions that must take place when defining a language. Participants will be encouraged to examine and understand the example languages, then guided through making their own changes/improvements to those languages.

+

Expected audience

+

The audience should be interested in practical aspects of programming language semantics. This includes interest in semantics as objects to be created/studied, as well as interest in the using such semantics for different program analyses. They need no previous knowledge, although a basic understanding of other definitional styles (such as SOS or evaluation contexts) may be helpful.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + diff --git a/exports/K.epub b/exports/K.epub new file mode 100644 index 00000000000..a435c551308 Binary files /dev/null and b/exports/K.epub differ diff --git a/exports/K.html b/exports/K.html new file mode 100644 index 00000000000..90c897274c8 --- /dev/null +++ b/exports/K.html @@ -0,0 +1,23714 @@ + + + + + K + + + + + + +
+

K

+ +

Table of Contents

+ + +
+

K is a rewrite-based
+executable semantic framework in which programming languages, type
+systems and formal analysis tools can be defined using configurations
+and rules. Configurations organize the state in units called cells,
+which are labeled and can be nested. K rewrite rules make it explicit
+which parts of the term are read-only, write-only, read-write, or
+unused. This makes K suitable for defining truly concurrent languages
+even in the presence of sharing. Computations are represented as
+syntactic extensions of the original language abstract syntax, using a
+nested list structure which sequentializes computational tasks, such
+as program fragments. Computations are like any other terms in a
+rewriting environment: they can be matched, moved from one place to
+another, modified, or deleted. This makes K suitable for defining
+control-intensive features such as abrupt termination, exceptions, or
+call/cc.

+

K Tool Download

+ +
    +
  • Install from the latest K GitHub Release.
  • +
  • Install pyk, K's scripting interface for Python. Check the API documentation for a complete reference of supported features.
  • +
  • Try our Editor Support page for links to K syntax highlighting definitions for various popular editors/IDEs. Please feel free to contribute.
  • +
  • Build or browse the code on GitHub, where you can also report bugs.
  • +
+

Learn K

+ + +

Support

+ + +

Resources

+ + +

K Tutorial

+ +

The purpose of this series of lessons is to teach developers how to program in
+K. While the primary use of K is in the specification of operational semantics
+of programming languages, this tutorial is agnostic on how the knowledge of K
+is used. For a more detailed tutorial explaining the basic principles of
+programming language design, refer to the
+K PL Tutorial. Note that that tutorial is somewhat
+out of date presently.

+

This K tutorial is a work in progress. Many lessons are currently simply
+placeholders for future content.

+

To start the K tutorial, begin with
+Section 1: Basic Programming in K.

+

Section 1: Basic K Concepts

+ +

The goal of this first section of the K tutorial is to teach the basic
+principles of K to someone with no prior experience with K as a programming
+language. However, this is not written with the intended audience of someone
+who is a complete beginner to programming. We are assuming that the reader
+has a firm grounding in computer science broadly, as well as that they have
+experience writing code in functional programming languages before.

+

By the end of this section, the reader ought to be able to write specifications
+of simple languages in K, use these specifications to generate a fast
+interpreter for their programming language, as well as write basic deductive
+program verification proofs over programs in their language. This should give
+them the theoretical grounding they need to begin expanding their knowledge
+of K in Section 2: Intermediate K Concepts.

+

To begin this section, refer to
+Lesson 1.1: Setting up a K Environment.

+

Lesson 1.1: Setting up a K Environment

+ +

The first step to learning K is to install K on your system, and configure your
+editor for K development.

+

Installing K

+ +

You have two options for how to install K, depending on how you intend to
+interact with the K codebase. If you are solely a user of K, and have no
+interest in developing or making changes to K, you most likely will want to
+install one of our binary releases of K. However, if you are going to be a K
+developer, or simply want to build K from source, you should follow the
+instructions for a source build of K.

+

Installing K from a binary release

+ +

K is developed as a rolling release, with each change to K that passes our
+CI infrastructure being deployed on GitHub for download. The latest release of
+K can be downloaded here.
+This page also contains information on how to install K. It is recommended
+that you fully uninstall the old version of K prior to installing the new one,
+as K does not maintain entries in package manager databases, with the exception
+of Homebrew on MacOS.

+

Installing K from source

+ +

You can clone K from GitHub with the following Git command:

+
git clone https://github.com/runtimeverification/k --recursive
+

Instructions on how to build K from source can be found
+here.

+

Configuring your editor

+ +

K maintains a set of scripts for a variety of text editors, including vim and
+emacs, in various states of maintenance. You can download these scripts with
+the following Git command:

+
git clone https://github.com/kframework/k-editor-support
+

Because K allows users to define their own grammars for parsing K itself,
+not all features of K can be effectively highlighted. However, at the cost of
+occasionally highlighting things incorrectly, you can get some pretty good
+results in many cases. With that being said, some of the editor scripts in the
+above repository are pretty out of date. If you manage to improve them, we
+welcome pull requests into the repository.

+

Troubleshooting

+ +

If you have problems installing K, we encourage you to reach out to us. If you
+follow the above install instructions and run into a problem, you can
+Create a bug report on GitHub

+

Next lesson

+ +

Once you have set up K on your system to your satisfaction, you can continue to
+Lesson 1.2: Basics of Functional K.

+

Lesson 1.2: Basics of Functional K

+ +

The purpose of this lesson is to explain the basics of productions and
+rules in K. These are two types of K sentences. A K file consists of
+one or more requires or modules in K. Each module consists of one or
+more imports or sentences. For more information on requires, modules, and
+sentences, refer to Lesson 1.5. However, for the time
+being, just think of a module as a container for sentences, and don't worry
+about requires or imports just yet.

+

Our first K program

+ +

To start with, input the following program into your editor as file
+lesson-02-a.k:

+
module LESSON-02-A
+
+  syntax Color ::= Yellow() | Blue()
+  syntax Fruit ::= Banana() | Blueberry()
+  syntax Color ::= colorOf(Fruit) [function]
+
+  rule colorOf(Banana()) => Yellow()
+  rule colorOf(Blueberry()) => Blue()
+
+endmodule
+

Save this file and then run:

+
kompile lesson-02-a.k
+

kompile is K's compiler. By default, it takes a program or specification
+written in K and compiles it into an interpreter for that input. Right now we
+are compiling a single file. A set of K files that are compiled together are
+called a K definition. We will cover multiple file K definitions later on.
+kompile will output a directory containing everything needed to execute
+programs and perform proofs using that definition. In this case, kompile will
+(by default) create the directory lesson-02-a-kompiled under the current
+directory.

+

Now, save the following input file in your editor as banana.color in the same
+directory as lesson-02-a.k:

+
colorOf(Banana())
+

We can now evaluate this K term by running (from the same directory):

+
krun banana.color
+

krun will use the interpreter generated by the first call to kompile to
+execute this program.

+

You will get the following output:

+
<k>
+  Yellow ( ) ~> .
+</k>
+

For now, don't worry about the <k>, </k>, or ~> . portions of this
+output file.

+

You can also execute small programs directly by specifying them on the command
+line instead of putting them in a file. For example, the same program above
+could also have been executed by running the following command:

+
krun -cPGM='colorOf(Banana())'
+

Now, let's look at what this definition and program did.

+

Productions, Constructors, and Functions

+ +

The first thing to realize is that this K definition contains 5 productions.
+Productions are introduced with the syntax keyword, followed by a sort,
+followed by the operator ::= followed by the definition of one or more
+productions themselves, separated by the | operator. There are different
+types of productions, but for now we only care about constructors and
+functions. Each declaration separated by the | operator is individually
+a single production, and the | symbol simply groups together productions that
+have the same sort. For example, we could equally have written an identical K
+definition lesson-02-b.k like so:

+
module LESSON-02-B
+
+  syntax Color ::= Yellow()
+  syntax Color ::= Blue()
+  syntax Fruit ::= Banana()
+  syntax Fruit ::= Blueberry()
+  syntax Color ::= colorOf(Fruit) [function]
+
+  rule colorOf(Banana()) => Yellow()
+  rule colorOf(Blueberry()) => Blue()
+
+endmodule
+

You can try compiling and running lesson-02-b.k to see that it produces the same output as lesson-02-a.k:

+
kompile lesson-02-b.k
+krun -cPGM='colorOf(Banana())' --definition 'lesson-02-b-kompiled'
+

where the --definition attribute points to the directory containing a compiled version of LESSON-02-B.
+Even the following definition is equivalent:

+
module LESSON-02-C
+
+  syntax Color ::= Yellow()
+                 | Blue()
+                 | colorOf(Fruit) [function]
+  syntax Fruit ::= Banana()
+                 | Blueberry()
+
+  rule colorOf(Banana()) => Yellow()
+  rule colorOf(Blueberry()) => Blue()
+
+endmodule
+

Each of these types of productions named above has the same underlying syntax,
+but context and attributes are used to distinguish between the different
+types. Tokens, brackets, lists, macros, aliases, and anywhere productions will
+be covered in a later lesson, but this lesson does introduce us to constructors
+and functions. Yellow(), Blue(), Banana(), and Blueberry() are
+constructors. You can think of a constructor like a constructor for an
+algebraic data type, if you're familiar with a functional language. The data
+type itself is the sort that appears on the left of the ::= operator. Sorts
+in K consist of uppercase identifiers.

+

Constructors can have arguments, but these ones do not. We will cover the
+syntax of productions in detail in the next lesson, but for now, you can write
+a production with no arguments as an uppercase or lowercase identifier followed
+by the () operator.

+

A function is distinguished from a constructor by the presence of the
+function attribute. Attributes appear in a comma separated list between
+square brackets after any sentence, including both productions and rules.
+Various attributes with built-in meanings exist in K and will be discussed
+throughout the tutorial.

+

Exercise

+ +

Use krun to compute the return value of the colorOf function on a
+Blueberry().

+

Rules, Matching, and Variables

+ +

Functions in K are given definitions using rules. A rule begins with the rule
+keyword and contains at least one rewrite operator. The rewrite operator
+is represented by the syntax =>. The rewrite operator is one of the built-in
+productions in K, and we will discuss in more detail how it can be used in
+future lessons, but for now, you can think of a rule as consisting of a
+left-hand side and a right-hand side, separated by the rewrite
+operator. On the left-hand side is the name of the function and zero or more
+patterns corresponding to the parameters of the function. On the right-hand
+side is another pattern. The meaning of the rule is relatively simple, having
+defined these components. If the function is called with arguments that
+match the patterns on the left-hand side, then the return value of the
+function is the pattern on the right-hand side.

+

For example, in the above example, if the argument of the colorOf function
+is Banana(), then the return value of the function is Yellow().

+

So far we have introduced that a constructor is a type of pattern in K. We
+will introduce more complex patterns in later lessons, but there is one other
+type of basic pattern: the variable. A variable, syntactically, consists
+of an uppercase identifier. However, unlike a constructor, a variable will
+match any pattern with one exception: Two variables with the same name
+must match the same pattern.

+

Here is a more complex example (lesson-02-d.k):

+
module LESSON-02-D
+
+  syntax Container ::= Jar(Fruit)
+  syntax Fruit ::= Apple() | Pear()
+
+  syntax Fruit ::= contentsOfJar(Container) [function]
+
+  rule contentsOfJar(Jar(F)) => F
+
+endmodule
+

Here we see that Jar is a constructor with a single argument. You can write a
+production with multiple arguments by putting the sorts of the arguments in a
+comma-separated list inside the parentheses.

+

In this example, F is a variable. It will match either Apple() or Pear().
+The return value of the function is created by substituting the matched
+values of all of the variables into the variables on the right-hand side of
+the rule.

+

To demonstrate, compile this definition and execute the following program with
+krun:

+
contentsOfJar(Jar(Apple()))
+

You will see when you run it that the program returns Apple(), because that
+is the pattern that was matched by F.

+

Exercises

+ +
    +
  1. Extend the definition in lesson-02-a.k with the addition of blackberries
    +and kiwis. For simplicity, blackberries are black and kiwis are green. Then
    +compile your definition and test that your additional fruits are correctly
    +handled by the colorOf function.
  2. +
  3. Create a new definition which defines an outfit as a multi-argument
    +constructor consisting of a hat, shirt, pants, and shoes. Define a new sort,
    +Boolean, with two constructors, true and false. Each of hat, shirt, pants,
    +and shoes will have a single argument (a color), either black or
    +white. Then define an outfitMatching function that will return true if all
    +the pieces of the outfit are the same color. You do not need to define the
    +case that returns false. Write some tests that your function behaves the way
    +you expect.
  4. +
+

Next lesson

+ +

Once you have completed the above exercises, you can continue to
+Lesson 1.3: BNF Syntax and Parser Generation.

+

Lesson 1.3: BNF Syntax and Parser Generation

+ +

The purpose of this lesson is to explain the full syntax and semantics of
+productions in K as well as how productions and other syntactic
+sentences can be used to define grammars for use parsing both rules as well
+as programs.

+

K's approach to parsing

+ +

K's grammar is divided into two components: the outer syntax of K and the
+inner syntax of K. Outer syntax refers to the parsing of requires,
+modules, imports, and sentences in a K definition. Inner syntax
+refers to the parsing of rules and programs. Unlike the outer syntax of
+K, which is predetermined, much of the inner syntax of K is defined by you, the
+developer. When rules or programs are parsed, they are parsed within the
+context of a module. Rules are parsed in the context of the module in which
+they exist, whereas programs are parsed in the context of the
+main syntax module of a K definition. The productions and other syntactic
+sentences in a module are used to construct the grammar of the module, which
+is then used to perform parsing.

+

Basic BNF productions

+ +

To illustrate how this works, we will consider a simple K definition which
+defines a relatively basic calculator capable of evaluating Boolean expressions
+containing and, or, not, and xor.

+

Input the following program into your editor as file lesson-03-a.k:

+
module LESSON-03-A
+
+  syntax Boolean ::= "true" | "false"
+                   | "!" Boolean [function]
+                   | Boolean "&&" Boolean [function]
+                   | Boolean "^" Boolean [function]
+                   | Boolean "||" Boolean [function]
+
+endmodule
+

You will notice that the productions in this file look a little different than
+the ones from the previous lesson. In point of fact, K has two different
+mechanisms for defining productions. We have previously been focused
+exclusively on the first mechanism, where the ::= symbol is followed by an
+alphanumeric identifier followed by a comma-separated list of sorts in
+parentheses. However, this is merely a special case of a more generic mechanism
+for defining the syntax of productions using a variant of
+BNF Form.

+

For example, in the previous lesson, we had the following set of productions:

+
module LESSON-03-B
+  syntax Color ::= Yellow() | Blue()
+  syntax Fruit ::= Banana() | Blueberry()
+  syntax Color ::= colorOf(Fruit) [function]
+endmodule
+

It turns out that this is equivalent to the following definition which defines
+the same grammar, but using BNF notation:

+
module LESSON-03-C
+  syntax Color ::= "Yellow" "(" ")" | "Blue" "(" ")"
+  syntax Fruit ::= "Banana" "(" ")" | "Blueberrry" "(" ")"
+  syntax Color ::= "colorOf" "(" Fruit ")" [function]
+endmodule
+

In this example, the sorts of the argument to the function are unchanged, but
+everything else has been wrapped in double quotation marks. This is because
+in BNF notation, we distinguish between two types of production items:
+terminals and non-terminals. A terminal represents simply a literal
+string of characters that is verbatim part of the syntax of that production.
+A non-terminal, conversely, represents a sort name, where the syntax of that
+production accepts any valid term of that sort at that position.

+

This is why, when we wrote the program colorOf(Banana()), krun was able to
+execute that program: because it represented a term of sort Color that was
+parsed and interpreted by K's interpreter. In other words, krun parses and
+interprets terms according to the grammar defined by the developer. It is
+automatically converted into an AST of that term, and then the colorOf
+function is evaluated using the function rules provided in the definition.

+

You can ask yourself: How does K match the strings between the double quotes?
+The answer is that K uses Flex to generate a scanner for the grammar. Flex looks
+for the longest possible match of a regular expression in the input. If there
+are ambiguities between 2 or more regular expressions, it will pick the one with
+the highest prec attribute. You can learn more about how Flex matching works
+here.

+

Bringing us back to the file lesson-03-a.k, we can see that this grammar
+has given a simple BNF grammar for expressions over Booleans. We have defined
+constructors corresponding to the Boolean values true and false, and functions
+corresponding to the Boolean operators for and, or, not, and xor. We have also
+given a syntax for each of these functions based on their syntax in the C
+programming language. As such, we can now write programs in the simple language
+we have defined.

+

Input the following program into your editor as and.bool in the same
+directory:

+
true && false
+

We cannot interpret this program yet, because we have not given rules defining
+the meaning of the && function yet, but we can parse it. To do this, you can
+run (from the same directory):

+
kast --output kore and.bool
+

kast is K's just-in-time parser. It will generate a grammar from your K
+definition on the fly and use it to parse the program passed on the command
+line. The --output flag controls how the resulting AST is represented; don't
+worry about the possible values yet, just use kore.

+

You ought to get the following AST printed on standard output, minus the
+formatting:

+
inj{SortBoolean{}, SortKItem{}}(
+  Lbl'UndsAnd-And-UndsUnds'LESSON-03-A'Unds'Boolean'Unds'Boolean'Unds'Boolean{}(
+    Lbltrue'Unds'LESSON-03-A'Unds'Boolean{}(),
+    Lblfalse'Unds'LESSON-03-A'Unds'Boolean{}()
+  )
+)
+

Don't worry about what exactly this means yet, just understand that it
+represents the AST of the program that you just parsed. You ought to be able
+to recognize the basic shape of it by seeing the words true, false, and
+And in there. This is Kore, the intermediate representation of K, and we
+will cover it in detail later.

+

Note that you can also tell kast to print the AST in other formats. For a
+more direct representation of the original K, while still maintaining the
+structure of an AST, you can say kast --output kast and.bool. This will
+yield the following output:

+
`_&&__LESSON-03-A_Boolean_Boolean_Boolean`(
+  `true_LESSON-03-A_Boolean`(.KList),
+  `false_LESSON-03-A_Boolean`(.KList)
+)
+

Note how the first output is largely a name-mangled version of the second
+output. The one difference is the presence of the inj symbol in the KORE
+output. We will talk more about this in later lessons.

+

Exercise

+ +

Parse the expression false || true with --output kast. See if you can
+predict approximately what the corresponding output would be with
+--output kore, then run the command yourself and compare it to your
+prediction.

+

Ambiguities

+ +

Now let's try a slightly more advanced example. Input the following program
+into your editor as and-or.bool:

+
true && false || false
+

When you try and parse this program, you ought to see the following error:

+
[Error] Inner Parser: Parsing ambiguity.
+1: syntax Boolean ::= Boolean "||" Boolean [function]
+
+`_||__LESSON-03-A_Boolean_Boolean_Boolean`(`_&&__LESSON-03-A_Boolean_Boolean_Boolean`(`true_LESSON-03-A_Boolean`(.KList),`false_LESSON-03-A_Boolean`(.KList)),`false_LESSON-03-A_Boolean`(.KList))
+2: syntax Boolean ::= Boolean "&&" Boolean [function]
+
+`_&&__LESSON-03-A_Boolean_Boolean_Boolean`(`true_LESSON-03-A_Boolean`(.KList),`_||__LESSON-03-A_Boolean_Boolean_Boolean`(`false_LESSON-03-A_Boolean`(.KList),`false_LESSON-03-A_Boolean`(.KList)))
+        Source(./and-or.bool)
+        Location(1,1,1,23)
+

This error is saying that kast was unable to parse this program because it is
+ambiguous. K's just-in-time parser is a GLL parser, which means it can handle
+the full generality of context-free grammars, including those grammars which
+are ambiguous. An ambiguous grammar is one where the same string can be parsed
+as multiple distinct ASTs. In this example, it can't decide whether it should
+be parsed as (true && false) || false or as true && (false || false). As a
+result, it reports the error to the user.

+

Brackets

+ +

Currently there is no way of resolving this ambiguity, making it impossible
+to write complex expressions in this language. This is obviously a problem.
+The standard solution in most programming languages to this problem is to
+use parentheses to indicate the appropriate grouping. K generalizes this notion
+into a type of production called a bracket. A bracket production in K
+is any production with the bracket attribute. It is required that such a
+production only have a single non-terminal, and the sort of the production
+must equal the sort of that non-terminal. However, K does not otherwise
+impose restrictions on the grammar the user provides for a bracket. With that
+being said, the most common type of bracket is one in which a non-terminal
+is surrounded by terminals representing some type of bracket such as
+(), [], {}, <>, etc. For example, we can define the most common
+type of bracket, the type used by the vast majority of programming languages,
+quite simply.

+

Consider the following modified definition, which we will save to
+lesson-03-d.k:

+
module LESSON-03-D
+
+  syntax Boolean ::= "true" | "false"
+                   | "(" Boolean ")" [bracket]
+                   | "!" Boolean [function]
+                   | Boolean "&&" Boolean [function]
+                   | Boolean "^" Boolean [function]
+                   | Boolean "||" Boolean [function]
+
+endmodule
+

In this definition, if the user does not explicitly define parentheses, the
+grammar remains ambiguous and K's just-in-time parser will report an error.
+However, you are now able to parse more complex programs by means of explicitly
+grouping subterms with the bracket we have just defined.

+

Consider and-or-left.bool:

+
(true && false) || false
+

Now consider and-or-right.bool:

+
true && (false || false)
+

If you parse these programs with kast, you will once again get a single
+unique AST with no error. If you look, you might notice that the bracket itself
+does not appear in the AST. In fact, this is a property unique to brackets:
+productions with the bracket attribute are not represented in the parsed AST
+of a term, and the child of the bracket is folded immediately into the parent
+term. This is the reason for the requirement that a bracket production have
+a single non-terminal of the same sort as the production itself.

+

Exercise

+ +

Write out what you expect the AST to be arising from parsing these two programs
+above with --output kast, then parse them yourself and compare them to the
+AST you expected. Confirm for yourself that the bracket production does not
+appear in the AST.

+

Tokens

+ +

So far we have seen how we can define the grammar of a language. However,
+the grammar is not the only relevant part of parsing a language. Also relevant
+is the lexical syntax of the language. Thus far, we have implicitly been using
+K's automatic lexer generation to generate a token in the scanner for each
+terminal in our grammar. However, sometimes we wish to define more complex
+lexical syntax. For example, consider the case of integers in C: an integer
+consists of a decimal, octal, or hexadecimal number followed by an optional
+suffix indicating the type of the literal.

+

In theory it would be possible to define this syntax via a grammar, but not
+only would it be cumbersome and tedious, you would also then have to deal with
+an AST generated for the literal which is not convenient to work with.

+

Instead of doing this, K allows you to define token productions, where
+a production consists of a regular expression followed by the token
+attribute, and the resulting AST consists of a typed string containing the
+value recognized by the regular expression.

+

For example, the builtin integers in K are defined using the following
+production:

+
syntax Int ::= r"[\\+\\-]?[0-9]+" [token]
+

Here we can see that we have defined that an integer is an optional sign
+followed by a nonzero sequence of digits. The r preceding the terminal
+indicates that what appears inside the double quotes is a regular expression,
+and the token attribute indicates that terms which parse as this production
+should be converted into a token by the parser.

+

It is also possible to define tokens that do not use regular expressions. This
+can be useful when you wish to declare particular identifiers for use in your
+semantics later. For example:

+
syntax Id ::= "main" [token]
+

Here, we declare that main is a token of sort Id. Instead of being parsed
+as a symbol, it gets parsed as a token, generating a typed string in the AST.
+This is useful in a semantics of C because the parser generally does not treat
+the main function in C specially; only the semantics treats it specially.

+

Of course, languages can have more complex lexical syntax. For example, if we
+wish to define the syntax of integers in C, we could use the following
+production:

+
syntax IntConstant ::= r"(([1-9][0-9]*)|(0[0-7]*)|(0[xX][0-9a-fA-F]+))(([uU][lL]?)|([uU]((ll)|(LL)))|([lL][uU]?)|(((ll)|(LL))[uU]?))?" [token]
+

As you may have noted above, long and complex regular expressions
+can be hard to read. They also suffer from the problem that unlike a grammar,
+they are not particularly modular.

+

We can get around this restriction by declaring explicit regular expressions,
+giving them a name, and then referring to them in productions.

+

Consider the following (equivalent) way to define the lexical syntax of
+integers in C:

+
syntax IntConstant ::= r"({DecConstant}|{OctConstant}|{HexConstant})({IntSuffix}?)" [token]
+syntax lexical DecConstant = r"{NonzeroDigit}({Digit}*)"
+syntax lexical OctConstant = r"0({OctDigit}*)"
+syntax lexical HexConstant = r"{HexPrefix}({HexDigit}+)"
+syntax lexical HexPrefix = r"0x|0X"
+syntax lexical NonzeroDigit = r"[1-9]"
+syntax lexical Digit = r"[0-9]"
+syntax lexical OctDigit = r"[0-7]"
+syntax lexical HexDigit = r"[0-9a-fA-F]"
+syntax lexical IntSuffix = r"{UnsignedSuffix}({LongSuffix}?)|{UnsignedSuffix}{LongLongSuffix}|{LongSuffix}({UnsignedSuffix}?)|{LongLongSuffix}({UnsignedSuffix}?)"
+syntax lexical UnsignedSuffix = r"[uU]"
+syntax lexical LongSuffix = r"[lL]"
+syntax lexical LongLongSuffix = r"ll|LL"
+

As you can see, this is rather more verbose, but it has the benefit of both
+being much easier to read and understand, and also increased modularity.
+Note that we refer to a named regular expression by putting the name in curly
+brackets. Note also that only the first sentence actually declares a new piece
+of syntax in the language. When the user writes syntax lexical, they are only
+declaring a regular expression. To declare an actual piece of syntax in the
+grammar, you still must actually declare an explicit token production.

+

One final note: K uses Flex to implement
+its lexical analysis. As a result, you can refer to the
+Flex Manual
+for a detailed description of the regular expression syntax supported. Note
+that for performance reasons, Flex's regular expressions are actually a regular
+language, and thus lack some of the syntactic convenience of modern
+"regular expression" libraries. If you need features that are not part of the
+syntax of Flex regular expressions, you are encouraged to express them via
+a grammar instead.

+

Ahead-of-time parser generation

+ +

So far we have been entirely focused on K's support for just-in-time parsing,
+where the parser is generated on the fly prior to being used. This benefits
+from being faster to generate the parser, but it suffers in performance if you
+have to repeatedly parse strings with the same parser. For this reason, it is
+generally encouraged that when parsing programs, you use K's ahead-of-time
+parser generation. K makes use of
+GNU Bison to generate parsers.

+

By default, you can enable ahead-of-time parsing via the --gen-bison-parser
+flag to kompile. This will make use of Bison's LR(1) parser generator. As
+such, if your grammar is not LR(1), it may not parse exactly the same as if
+you were to use the just-in-time parser, because Bison will automatically pick
+one of the possible branches whenever it encounters a shift-reduce or
+reduce-reduce conflict. In this case, you can either modify your grammar to be
+LR(1), or you can enable use of Bison's GLR support by instead passing
+--gen-glr-bison-parser to kompile. Note that if your grammar is ambiguous,
+the ahead-of-time parser will not provide you with particularly readable error
+messages at this time.

+

If you have a K definition named foo.k, and it generates a directory when
+you run kompile called foo-kompiled, you can invoke the ahead-of-time
+parser you generated by running foo-kompiled/parser_PGM <file> on a file.

+

Exercises

+ +
    +
  1. +

    Compile lesson-03-d.k with ahead-of-time parsing enabled. Then compare
    +how long it takes to run kast --output kore and-or-left.bool with how long it
    +takes to run lesson-03-d-kompiled/parser_PGM and-or-left.bool. Confirm for
    +yourself that both produce the same result, but that the latter is faster.

    +
  2. +
  3. +

    Define a simple grammar consisting of integers, brackets, addition,
    +subtraction, multiplication, division, and unary negation. Integers should be
    +in decimal form and lexically without a sign, whereas negative numbers can be
    +represented via unary negation. Ensure that you are able to parse some basic
    +arithmetic expressions using a generated ahead-of-time parser. Do not worry
    +about disambiguating the grammar or about writing rules to implement the
    +operations in this definition.

    +
  4. +
  5. +

    Write a program where the meaning of the arithmetic expression based on
    +the grammar you defined above is ambiguous, and then write programs that
    +express each individual intended meaning using brackets.

    +
  6. +
+

Next lesson

+ +

Once you have completed the above exercises, you can continue to
+Lesson 1.4: Disambiguating Parses.

+

Lesson 1.4: Disambiguating Parses

+ +

The purpose of this lesson is to teach how to use K's builtin features for
+disambiguation to transform an ambiguous grammar into an unambiguous one that
+expresses the intended ASTs.

+

Priority blocks

+ +

In practice, very few formal languages outside the domain of natural language
+processing are ambiguous. The main reason for this is that parsing unambiguous
+languages is asymptotically faster than parsing ambiguous languages.
+Programming language designers instead usually use the notions of operator
+precedence and associativity to make expression grammars unambiguous. These
+mechanisms work by instructing the parser to reject certain ASTs in favor of
+others in case of ambiguities; it is often possible to remove all ambiguities
+in a grammar with these techniques.

+

While it is sometimes possible to explicitly rewrite the grammar to remove
+these parses, because K's grammar specification and AST generation are
+inextricably linked, this is generally discouraged. Instead, we use the
+approach of explicitly expressing the relative precedence of different
+operators in different situations in order to resolve the ambiguity.

+

For example, in C, && binds tighter in precedence than ||, meaning that
+the expression true && false || false has only one valid AST:
+(true && false) || false.

+

Consider, then, the third iteration on the grammar of this definition
+(lesson-04-a.k):

+
module LESSON-04-A
+
+  syntax Boolean ::= "true" | "false"
+                   | "(" Boolean ")" [bracket]
+                   > "!" Boolean [function]
+                   > Boolean "&&" Boolean [function]
+                   > Boolean "^" Boolean [function]
+                   > Boolean "||" Boolean [function]
+
+endmodule
+

In this example, some of the | symbols separating productions in a single
+block have been replaced with >. This serves to describe the
+priority groups associated with this block of productions.
+The first priority group consists of the atoms of the
+language: true, false, and the bracket operator. In general, a priority
+group starts either at the ::= or > operator and extends until either the
+next > operator or the end of the production block. Thus, we can see that the
+second, third, fourth, and fifth priority groups in this grammar all consist
+of a single production.

+

The meaning of these priority groups becomes apparent when parsing programs:
+A symbol with a lesser priority, (i.e., one that binds looser), cannot
+appear as the direct child of a symbol with a greater priority (i.e.,
+one that binds tighter. In this case, the > operator can be seen as a
+greater-than operator describing a transitive partial ordering on the
+productions in the production block, expressing their relative priority.

+

To see this more concretely, let's look again at the program
+true && false || false. As noted before, previously this program was
+ambiguous because the parser could either choose that && was the child of ||
+or vice versa. However, because a symbol with lesser priority (i.e., ||)
+cannot appear as the direct child of a symbol with greater priority
+(i.e., &&), the parser will reject the parse where || is under the
+&& operator. As a result, we are left with the unambiguous parse
+(true && false) || false. Similarly, true || false && false parses
+unambiguously as true || (false && false). Conversely, if the user explicitly
+wants the other parse, they can express this using brackets by explicitly
+writing true && (false || false). This still parses successfully because the
+|| operator is no longer the direct child of the && operator, but is
+instead the direct child of the () operator, and the && operator is an
+indirect parent, which is not subject to the priority restriction.

+

Astute readers, however, will already have noticed what seems to be a
+contradiction: we have defined () as also having greater priority than ||.
+One would think that this should mean that || cannot appear as a direct
+child of (). This is a problem because priority groups are applied to every
+possible parse separately. That is to say, even if the term is unambiguous
+prior to this disambiguation rule, we still reject that parse if it violates
+the rule of priority.

+

In fact, however, we do not reject this program as a parse error. Why is that?
+Well, the rule for priority is slightly more complex than previously described.
+In actual fact, it applies only conditionally. Specifically, it applies in
+cases where the child is either the first or last production item in the
+parent's production. For example, in the production Bool "&&" Bool, the
+first Bool non-terminal is not preceded by any terminals, and the last Bool
+non-terminal is not followed by any terminals. As a result of this, we apply
+the priority rule to both children of &&. However, in the () operator,
+the sole non-terminal is both preceded by and followed by terminals. As a
+result, the priority rule is not applied when () is the parent. Because of
+this, the program we mentioned above successfully parses.

+

Exercise

+ +

Parse the program true && false || false using kast, and confirm that the AST
+places || as the top level symbol. Then modify the definition so that you
+will get the alternative parse.

+

Associativity

+ +

Even having broken the expression grammar into priority blocks, the resulting
+grammar is still ambiguous. We can see this if we try to parse the following
+program (assoc.bool):

+
true && false && false
+

Priority blocks will not help us here: the problem comes between two parses
+where both possible parses have a direct parent and child which is within a
+single priority block (in this case, && is in the same block as itself).

+

This is where the notion of associativity comes into play. Associativity
+applies the following additional rules to parses:

+
    +
  • a left-associative symbol cannot appear as a direct rightmost child of a
    +symbol with equal priority;
  • +
  • a right-associative symbol cannot appear as a direct leftmost child of a
    +symbol with equal priority; and
  • +
  • a non-associative symbol cannot appear as a direct leftmost or rightmost
    +child of a symbol with equal priority.
  • +
+

In C, binary operators are all left-associative, meaning that the expression
+true && false && false parses unambiguously as (true && false) && false,
+because && cannot appear as the rightmost child of itself.

+

Consider, then, the fourth iteration on the grammar of this definition
+(lesson-04-b.k):

+
module LESSON-04-B
+
+  syntax Boolean ::= "true" | "false"
+                   | "(" Boolean ")" [bracket]
+                   > "!" Boolean [function]
+                   > left: Boolean "&&" Boolean [function]
+                   > left: Boolean "^" Boolean [function]
+                   > left: Boolean "||" Boolean [function]
+
+endmodule
+

Here each priority group, immediately after the ::= or > operator, can
+be followed by a symbol representing the associativity of that priority group:
+either left: for left associativity, right: for right associativity, or
+non-assoc: for non-associativity. In this example, each priority group we
+apply associativity to has only a single production, but we could equally well
+write a priority block with multiple productions and an associativity.

+

For example, consider the following, different grammar (lesson-04-c.k):

+
module LESSON-04-C
+
+  syntax Boolean ::= "true" | "false"
+                   | "(" Boolean ")" [bracket]
+                   > "!" Boolean [function]
+                   > left:
+                     Boolean "&&" Boolean [function]
+                   | Boolean "^" Boolean [function]
+                   | Boolean "||" Boolean [function]
+
+endmodule
+

In this example, unlike the one above, &&, ^, and || have the same
+priority. However, viewed as a group, the entire group is left associative.
+This means that none of &&, ^, and || can appear as the right child of
+any of &&, ^, or ||. As a result of this, this grammar is also not
+ambiguous. However, it expresses a different grammar, and you are encouraged
+to think about what the differences are in practice.

+

Exercise

+ +

Parse the program true && false && false yourself, and confirm that the AST
+places the rightmost && at the top of the expression. Then modify the
+definition to generate the alternative parse.

+

Explicit priority and associativity declarations

+ +

Previously we have only considered the case where all of the productions
+which you wish to express a priority or associativity relation over are
+co-located in the same block of productions. However, in practice this is not
+always feasible or desirable, especially as a definition grows in size across
+multiple modules.

+

As a result of this, K provides a second way of declaring priority and
+associativity relations.

+

Consider the following grammar, which we will name lesson-04-d.k and which
+will express the exact same grammar as lesson-04-b.k

+
module LESSON-04-D
+
+  syntax Boolean ::= "true" [group(literal)] | "false" [group(literal)]
+                   | "(" Boolean ")" [group(atom), bracket]
+                   | "!" Boolean [group(not), function]
+                   | Boolean "&&" Boolean [group(and), function]
+                   | Boolean "^" Boolean [group(xor), function]
+                   | Boolean "||" Boolean [group(or), function]
+
+  syntax priority literal atom > not > and > xor > or
+  syntax left and
+  syntax left xor
+  syntax left or
+endmodule
+

This introduces a couple of new features of K. First, the group(_) attribute
+is used to conceptually group together sets of sentences under a common
+user-defined name. For example, literal in the syntax priority sentence is
+used to refer to all the productions marked with the group(literal) attribute,
+i.e., true and false. A production can belong to multiple groups using
+syntax such as group(myGrp1,myGrp2).

+

Once we understand this, it becomes relatively straightforward to understand
+the meaning of this grammar. Each syntax priority sentence defines a
+priority relation where > separates different priority groups. Each priority
+group is defined by a list of one or more group names, and consists of all
+productions which are members of at least one of those named groups.

+

In the same way, a syntax left, syntax right, or syntax non-assoc sentence
+defines an associativity relation among left-, right-, or non-associative
+groups. Specifically, this means that:

+
syntax left a b
+

is different to:

+
syntax left a
+syntax left b
+

As a consequence of this, syntax [left|right|non-assoc] should not be used to
+group together labels with different priority.

+

Prefer/avoid

+ +

Sometimes priority and associativity prove insufficient to disambiguate a
+grammar. In particular, sometimes it is desirable to be able to choose between
+two ambiguous parses directly while still not rejecting any parses if the term
+parsed is unambiguous. A good example of this is the famous "dangling else"
+problem in imperative C-like languages.

+

Consider the following definition (lesson-04-E.k):

+
module LESSON-04-E
+
+  syntax Exp ::= "true" | "false"
+  syntax Stmt ::= "if" "(" Exp ")" Stmt
+                | "if" "(" Exp ")" Stmt "else" Stmt
+                | "{" "}"
+endmodule
+

We can write the following program (dangling-else.if):

+
if (true) if (false) {} else {}
+

This is ambiguous because it is unclear whether the else clause is part of
+the outer if or the inner if. At first we might try to resolve this with
+priorities, saying that the if without an else cannot appear as a child of
+the if with an else. However, because the non-terminal in the parent symbol
+is both preceded and followed by a terminal, this will not work.

+

Instead, we can resolve the ambiguity directly by telling the parser to
+"prefer" or "avoid" certain productions when ambiguities arise. For example,
+when we parse this program, we see the following ambiguity as an error message:

+
[Error] Inner Parser: Parsing ambiguity.
+1: syntax Stmt ::= "if" "(" Exp ")" Stmt
+
+`if(_)__LESSON-04-E_Stmt_Exp_Stmt`(`true_LESSON-04-E_Exp`(.KList),`if(_)_else__LESSON-04-E_Stmt_Exp_Stmt_Stmt`(`false_LESSON-04-E_Exp`(.KList),`;_LESSON-04-E_Stmt`(.KList),`;_LESSON-04-E_Stmt`(.KList)))
+2: syntax Stmt ::= "if" "(" Exp ")" Stmt "else" Stmt
+
+`if(_)_else__LESSON-04-E_Stmt_Exp_Stmt_Stmt`(`true_LESSON-04-E_Exp`(.KList),`if(_)__LESSON-04-E_Stmt_Exp_Stmt`(`false_LESSON-04-E_Exp`(.KList),`;_LESSON-04-E_Stmt`(.KList)),`;_LESSON-04-E_Stmt`(.KList))
+        Source(./dangling-else.if)
+        Location(1,1,1,30)
+

Roughly, we see that the ambiguity is between an if with an else or an if
+without an else. Since we want to pick the first parse, we can tell K to
+"avoid" the second parse with the avoid attribute. Consider the following
+modified definition (lesson-04-f.k):

+
module LESSON-04-F
+
+  syntax Exp ::= "true" | "false"
+  syntax Stmt ::= "if" "(" Exp ")" Stmt
+                | "if" "(" Exp ")" Stmt "else" Stmt [avoid]
+                | "{" "}"
+endmodule
+

Here we have added the avoid attribute to the else production. As a result,
+when an ambiguity occurs and one or more of the possible parses has that symbol
+at the top of the ambiguous part of the parse, we remove those parses from
+consideration and consider only those remaining. The prefer attribute behaves
+similarly, but instead removes all parses which do not have that attribute.
+In both cases, no action is taken if the parse is not ambiguous.

+

Exercises

+ +
    +
  1. +

    Parse the program if (true) if (false) {} else {} using lesson-04-f.k
    +and confirm that else clause is part of the innermost if statement. Then
    +modify the definition so that you will get the alternative parse.

    +
  2. +
  3. +

    Modify your solution from Lesson 1.3, Exercise 2 so that unary negation should
    +bind tighter than multiplication and division, which should bind tighter than
    +addition and subtraction, and each binary operator should be left associative.
    +Write these priority and associativity declarations explicitly, and then
    +try to write them inline.

    +
  4. +
  5. +

    Write a simple grammar containing at least one ambiguity that cannot be
    +resolved via priority or associativity, and then use the prefer attribute to
    +resolve that ambiguity.

    +
  6. +
  7. +

    Explain why the following grammar is not labeled ambiguous by the K parser when parsing abb, then make the parser realize the ambiguity.

    +
  8. +
+
module EXERCISE4
+
+syntax Expr ::= "a" Expr "b"
+              | "abb"
+              | "b"
+
+endmodule
+

Next lesson

+ +

Once you have completed the above exercises, you can continue to
+Lesson 1.5: Modules, Imports, and Requires.

+

Lesson 1.5: Modules, Imports, and Requires

+ +

The purpose of this lesson is to explain how K definitions can be broken into
+separate modules and files and how these distinct components combine into a
+complete K definition.

+

K's outer syntax

+ +

Recall from Lesson 1.3 that K's grammar is broken
+into two components: the outer syntax of K and the inner syntax of K.
+Outer syntax, as previously mentioned, consists of requires, modules,
+imports, and sentences. A K semantics is expressed by the set of
+sentences contained in the definition. The scope of what is considered
+contained in that definition is determined both by the main semantics
+module
of a K definition, as well as the requires and imports present
+in the file that contains that module.

+

Basic module syntax

+ +

The basic unit of grouping sentences in K is the module. A module consists
+of a module name, an optional list of attributes, a list of
+imports, and a list of sentences.

+

A module name consists of one or more groups of letters, numbers, or
+underscores, separated by a hyphen. Here are some valid module names: FOO,
+FOO-BAR, foo0, foo0_bar-Baz9. Here are some invalid module names: -,
+-FOO, BAR-, FOO--BAR. Stylistically, modules names are usually all
+uppercase with hyphens separating words, but this is not strictly enforced.

+

Some example modules include an empty module:

+
module LESSON-05-A
+
+endmodule
+

A module with some attributes:

+
module LESSON-05-B [group(attr1,attr2), private]
+
+endmodule
+

A module with some sentences:

+
module LESSON-05-C
+  syntax Boolean ::= "true" | "false"
+  syntax Boolean ::= "not" Boolean [function]
+  rule not true => false
+  rule not false => true
+endmodule
+

Imports

+ +

Thus far we have only discussed definitions containing a single module.
+Definitions can also contain multiple modules, in which one module imports
+others.

+

An import in K appears at the top of a module, prior to any sentences. It can
+be specified with the imports keyword, followed by a module name.

+

For example, here is a simple definition with two modules (lesson-05-d.k):

+
module LESSON-05-D-1
+  syntax Boolean ::= "true" | "false"
+  syntax Boolean ::= "not" Boolean [function]
+endmodule
+
+module LESSON-05-D
+  imports LESSON-05-D-1
+
+  rule not true => false
+  rule not false => true
+endmodule
+

This K definition is equivalent to the definition expressed by the single module
+LESSON-05-C. Essentially, by importing a module, we include all of the
+sentences in the module being imported into the module that we import from.
+There are a few minor differences between importing a module and simply
+including its sentences in another module directly, but we will cover these
+differences later. Essentially, you can think of modules as a way of
+conceptually grouping sentences in a larger K definition.

+

Exercise

+ +

Modify lesson-05-d.k to include four modules: one containing the syntax, two
+with one rule each that imports the first module, and a final module
+LESSON-05-D containing no sentences that imports the second and third module.
+Check to make sure the definition still compiles and that you can still evaluate
+the not function.

+

Parsing in the presence of multiple modules

+ +

As you may have noticed, each module in a definition can express a distinct set
+of syntax. When parsing the sentences in a module, we use the syntax
+of that module, enriched with the basic syntax of K, in order to parse
+rules in that module. For example, the following definition is a parser error
+(lesson-05-e.k):

+
module LESSON-05-E-1
+  rule not true => false
+  rule not false => true
+endmodule
+
+module LESSON-05-E-2
+  syntax Boolean ::= "true" | "false"
+  syntax Boolean ::= "not" Boolean [function]
+endmodule
+

This is because the syntax referenced in module LESSON-05-E-1, namely, not,
+true, and false, is not imported by that module. You can solve this problem
+by simply importing the modules containing the syntax you want to use in your
+sentences.

+

Main syntax and semantics modules

+ +

When we are compiling a K definition, we need to know where to start. We
+designate two specific entry point modules: the main syntax module
+and the main semantics module. The main syntax module, as well as all the
+modules it imports recursively, are used to create the parser for programs that
+you use to parse programs that you execute with krun. The main semantics
+module, as well as all the modules it imports recursively, are used to
+determine the rules that can be applied at runtime in order to execute a
+program. For example, in the above example, if the main semantics module is
+module LESSON-05-D-1, then not is an uninterpreted function (i.e., has no
+rules associated with it), and the rules in module LESSON-05-D are not
+included.

+

While you can specify the entry point modules explicitly by passing the
+--main-module and --syntax-module flags to kompile, by default, if you
+type kompile foo.k, then the main semantics module will be FOO and the
+main syntax module will be FOO-SYNTAX.

+

Splitting a definition into multiple files

+ +

So far, while we have discussed ways to break definitions into separate
+conceptual components (modules), K also provides a mechanism for combining
+multiple files into a single K definition, namely, the requires directive.

+

In K, the requires keyword has two meanings. The first, the requires
+statement, appears at the top of a K file, prior to any module declarations. It
+consists of the keyword requires followed by a double-quoted string. The
+second meaning of the requires keyword will be covered in a later lesson,
+but it is distinguished because the second case occurs only inside modules.

+

The string passed to the requires statement contains a filename. When you run
+kompile on a file, it will look at all of the requires statements in that
+file, look up those files on disk, parse them, and then recursively process all
+the requires statements in those files. It then combines all the modules in all
+of those files together, and uses them collectively as the set of modules to
+which imports statements can refer.

+

Putting it all together

+ +

Putting it all together, here is one possible way in which we could break the
+definition lesson-02-c.k from Lesson 1.2 into
+multiple files and modules:

+

colors.k:

+
module COLORS
+  syntax Color ::= Yellow()
+                 | Blue()
+endmodule
+

fruits.k:

+
module FRUITS
+  syntax Fruit ::= Banana()
+                 | Blueberry()
+endmodule
+

colorOf.k:

+
requires "fruits.k"
+requires "colors.k"
+
+module COLOROF-SYNTAX
+  imports COLORS
+  imports FRUITS
+
+  syntax Color ::= colorOf(Fruit) [function]
+endmodule
+
+module COLOROF
+  imports COLOROF-SYNTAX
+
+  rule colorOf(Banana()) => Yellow()
+  rule colorOf(Blueberry()) => Blue()
+endmodule
+

You would then compile this definition with kompile colorOf.k and use it the
+same way as the original, single-module definition.

+

Exercise

+ +

Modify the name of the COLOROF module, and then recompile the definition.
+Try to understand why you now get a compiler error. Then, resolve this compiler
+error by passing the --main-module and --syntax-module flags to kompile.

+

Include path

+ +

One note can be made about how paths are resolved in requires statements.

+

By default, the path you specify is allowed to be an absolute or a relative
+path. If the path is absolute, that exact file is imported. If the path is
+relative, a matching file is looked for within all of the
+include directories specified to the compiler. By default, the include
+directories include the current working directory, followed by the
+include/kframework/builtin directory within your installation of K. You can
+also pass one or more directories to kompile via the -I command line flag,
+in which case these directories are prepended to the beginning of the list.

+

Exercises

+ +
    +
  1. +

    Take the solution to Lesson 1.4, Exercise 2 which included the explicit
    +priority and associativity declarations, and modify the definition so that
    +the syntax of integers and brackets is in one module, the syntax of addition,
    +subtraction, and unary negation is in another module, and the syntax of
    +multiplication and division is in a third module. Make sure you can still parse
    +the same set of expressions as before. Place priority declarations in the main
    +module.

    +
  2. +
  3. +

    Modify lesson-02-d.k from Lesson 1.2 so that the rules and syntax are in
    +separate modules in separate files.

    +
  4. +
  5. +

    Place the file containing the syntax from Exercise 2 in another directory,
    +then recompile the definition. Observe why a compilation error occurs. Then
    +fix the compiler error by passing -I to kompile.

    +
  6. +
+

Next lesson

+ +

Once you have completed the above exercises, you can continue to
+Lesson 1.6: Integers and Booleans.

+

Lesson 1.6: Integers and Booleans

+ +

The purpose of this lesson is to explain the two most basic types of builtin
+sorts in K, the Int sort and the Bool sort, representing
+arbitrary-precision integers and Boolean algebra.

+

Builtin sorts in K

+ +

K provides definitions of some useful sorts in
+domains.md, found in the
+include/kframework/builtin directory of the K installation. This file is
+defined via a
+Literate programming
+style that we will discuss in a future lesson. We will not cover all of the
+sorts found there immediately, however, this lesson discusses some of the
+details surrounding integers and Booleans, as well as providing information
+about how to look up more detailed knowledge about builtin functions in K's
+documentation.

+

Booleans in K

+ +

The most basic builtin sort K provides is the Bool sort, representing
+Boolean values (i.e., true and false). You have already seen how we were
+able to create this type ourselves using K's parsing and disambiguation
+features. However, in the vast majority of cases, we prefer instead to import
+the version of Boolean algebra defined by K itself. Most simply, you can do
+this by importing the module BOOL in your definition. For example
+(lesson-06-a.k):

+
module LESSON-06-A
+  imports BOOL
+
+  syntax Fruit ::= Blueberry() | Banana()
+  syntax Bool ::= isBlue(Fruit) [function]
+
+  rule isBlue(Blueberry()) => true
+  rule isBlue(Banana()) => false
+endmodule
+

Here we have defined a simple predicate, i.e., a function returning a
+Boolean value. We are now able to perform the usual Boolean operations of
+and, or, and not over these values. For example (lesson-06-b.k):"

+
module LESSON-06-B
+  imports BOOL
+
+  syntax Fruit ::= Blueberry() | Banana()
+  syntax Bool ::= isBlue(Fruit) [function]
+
+  rule isBlue(Blueberry()) => true
+  rule isBlue(Banana()) => false
+
+  syntax Bool ::= isYellow(Fruit) [function]
+                | isBlueOrYellow(Fruit) [function]
+
+  rule isYellow(Banana()) => true
+  rule isYellow(Blueberry()) => false
+
+  rule isBlueOrYellow(F) => isBlue(F) orBool isYellow(F)
+endmodule
+

In the above example, Boolean inclusive or is performed via the orBool
+function, which is defined in the BOOL module. As a matter of convention,
+many functions over builtin sorts in K are suffixed with the name of the
+primary sort over which those functions are defined. This happens so that the
+syntax of K does not (generally) conflict with the syntax of any other
+programming language, which would make it harder to define that programming
+language in K.

+

Exercise

+ +

Write a function isBlueAndNotYellow which computes the appropriate Boolean
+expression. If you are unsure what the appropriate syntax is to use, you
+can refer to the BOOL module in
+domains.md. Add a term of
+sort Fruit for which isBlue and isYellow both return true, and test that
+the isBlueAndNotYellow function behaves as expected on all three Fruits.

+

Syntax Modules

+ +

For most sorts in domains.md, K defines more than one module that can be
+imported by users. For example, for the Bool sort, K defines the BOOL
+module that has previously already been discussed, but also provides the
+BOOL-SYNTAX module. This module, unlike the BOOL module, only declares the
+values true and false, but not any of the functions that operate over the
+Bool sort. The rationale is that you may want to import this module into the
+main syntax module of your definition in some cases, whereas you generally do
+not want to do this with the version of the module that includes all the
+functions over the Bool sort. For example, if you were defining the semantics
+of C++, you might import BOOL-SYNTAX into the syntax module of your
+definition, because true and false are part of the grammar of C++, but
+you would only import the BOOL module into the main semantics module, because
+C++ defines its own syntax for and, or, and not that is different from the
+syntax defined in the BOOL module.

+

Here, for example, is how we might redefine our Boolean expression calculator
+to use the Bool sort while maintaining an idiomatic structure of modules
+and imports, for the first time including the rules to calculate the values of
+expressions themselves (lesson-06-c.k):

+
module LESSON-06-C-SYNTAX
+  imports BOOL-SYNTAX
+
+  syntax Bool ::= "(" Bool ")" [bracket]
+                > "!" Bool [function]
+                > left:
+                  Bool "&&" Bool [function]
+                | Bool "^" Bool [function]
+                | Bool "||" Bool [function]
+endmodule
+
+module LESSON-06-C
+  imports LESSON-06-C-SYNTAX
+  imports BOOL
+
+  rule ! B => notBool B
+  rule A && B => A andBool B
+  rule A ^ B => A xorBool B
+  rule A || B => A orBool B
+endmodule
+

Note the encapsulation of syntax: the LESSON-06-C-SYNTAX module contains
+exactly the syntax of our Boolean expressions, and no more, whereas any other
+syntax needed to implement those functions is in the LESSON-06-C module
+instead.

+

Exercise

+ +

Add an "implies" function to the above Boolean expression calculator, using the
+-> symbol to represent implication. You can look up K's builtin "implies"
+function in the BOOL module in domains.md.

+

Integers in K

+ +

Unlike most programming languages, where the most basic integer type is a
+fixed-precision integer type, the most commonly used integer sort in K is
+the Int sort, which represents the mathematical integers, ie,
+arbitrary-precision integers.

+

K provides three main modules for import when using the Int sort. The first,
+containing all the syntax of integers as well as all of the functions over
+integers, is the INT module. The second, which provides just the syntax
+of integer literals themselves, is the INT-SYNTAX module. However, unlike
+most builtin sorts in K, K also provides a third module for the Int sort:
+the UNSIGNED-INT-SYNTAX module. This module provides only the syntax of
+non-negative integers, i.e., natural numbers. The reasons for this involve
+lexical ambiguity. Generally speaking, in most programming languages, -1 is
+not a literal, but instead a literal to which the unary negation operator is
+applied. K thus provides this module to ease in specifying the syntax of such
+languages.

+

For detailed information about the functions available over the Int sort,
+refer to domains.md. Note again how we append Int to the end of most of the
+integer operations to ensure they do not collide with the syntax of other
+programming languages.

+

Exercises

+ +
    +
  1. +

    Extend your solution from Lesson 1.4, Exercise 2 to implement the rules
    +that define the behavior of addition, subtraction, multiplication, and
    +division. Do not worry about the case when the user tries to divide by zero
    +at this time. Use /Int to implement division. Test your new calculator
    +implementation by executing the arithmetic expressions you wrote as part of
    +Lesson 1.3, Exercise 2. Check to make sure each computes the value you expected.

    +
  2. +
  3. +

    Combine the Boolean expression calculator from this lesson with your
    +solution to Exercise 1, and then extend the combined calculator with the <,
    +<=, >, >=, ==, and != expressions. Write some Boolean expressions
    +that combine integer and Boolean operations, and test to ensure that these
    +expressions return the expected truth value.

    +
  4. +
  5. +

    Compute the following expressions using your solution from Exercise 2:
    +7 / 3, 7 / -3, -7 / 3, -7 / -3. Then replace the /Int function in
    +your definition with divInt instead, and observe how the value of the above
    +expressions changes. Why does this occur?

    +
  6. +
+

Next lesson

+ +

Once you have completed the above exercises, you can continue to
+Lesson 1.7: Side Conditions and Rule Priority.

+

Lesson 1.7: Side Conditions and Rule Priority

+ +

The purpose of this lesson is to explain how to write conditional rules in K,
+and to explain how to control the order in which rules are tried.

+

Side Conditions

+ +

So far, all of the rules we have discussed have been unconditional rules.
+If the left-hand side of the rule matches the arguments to the function, the
+rule applies. However, there is another type of rule, a conditional rule.
+A conditional rule consists of a rule body containing the patterns to
+match, and a side condition representing a Boolean expression that must
+evaluate to true in order for the rule to apply.

+

Side conditions in K are introduced via the requires keyword immediately
+following the rule body. For example, here is a rule with a side condition
+(lesson-07-a.k):

+
module LESSON-07-A
+  imports BOOL
+  imports INT
+
+  syntax Grade ::= "letter-A"
+                 | "letter-B"
+                 | "letter-C"
+                 | "letter-D"
+                 | "letter-F"
+                 | gradeFromPercentile(Int) [function]
+
+  rule gradeFromPercentile(I) => letter-A requires I >=Int 90
+endmodule
+

In this case, the gradeFromPercentile function takes a single integer
+argument. The function evaluates to letter-A if the argument passed is
+greater than 90. Note that the side condition is allowed to refer to variables
+that appear on the left-hand side of the rule. In the same manner as variables
+appearing on the right-hand side, variables that appear in the side condition
+evaluate to the value that was matched on the left-hand side. Then the
+functions in the side condition are evaluated, which returns a term of sort
+Bool. If the term is equal to true, then the rule applies. Bear in mind
+that the side condition is only evaluated at all if the patterns on the
+left-hand side of the rule match the term being evaluated.

+

Exercise

+ +

Write a rule that evaluates gradeFromPercentile to letter-B if the argument
+to the function is in the range [80,90). Test that the function correctly
+evaluates various numbers between 80 and 100.

+

owise Rules

+ +

So far, all the rules we have introduced have had the same priority. What
+this means is that K does not necessarily enforce an order in which the rules
+are tried. We have only discussed functions so far in K, so it is not
+immediately clear why this choice was made, given that a function is not
+considered well-defined if multiple rules for evaluating it are capable of
+evaluating the same arguments to different results. However, in future lessons
+we will discuss other types of rules in K, some of which can be
+non-deterministic. What this means is that if more than one rule is capable
+of matching, then K will explore both possible rules in parallel, and consider
+each of their respective results when executing your program. Don't worry too
+much about this right now, but just understand that because of the potential
+later for nondeterminism, we don't enforce a total ordering on the order in
+which rules are attempted to be applied.

+

However, sometimes this is not practical; It can be very convenient to express
+that a particular rule applies if no other rules for that function are
+applicable. This can be expressed by adding the owise attribute to a rule.
+What this means, in practice, is that this rule has lower priority than other
+rules, and will only be tried to be applied after all the other,
+higher-priority rules have been tried and they have failed.

+

For example, in the above exercise, we had to add a side condition containing
+two Boolean comparisons to the rule we wrote to handle letter-B grades.
+However, in practice this meant that we compare the percentile to 90 twice. We
+can more efficiently and more idiomatically write the letter-B case for the
+gradeFromPercentile rule using the owise attribute (lesson-07-b.k):

+
module LESSON-07-B
+  imports BOOL
+  imports INT
+
+  syntax Grade ::= "letter-A"
+                 | "letter-B"
+                 | "letter-C"
+                 | "letter-D"
+                 | "letter-F"
+                 | gradeFromPercentile(Int) [function]
+
+  rule gradeFromPercentile(I) => letter-A requires I >=Int 90
+  rule gradeFromPercentile(I) => letter-B requires I >=Int 80 [owise]
+endmodule
+

This rule is saying, "if all the other rules do not apply, then the grade is a
+B if the percentile is greater than or equal to 80." Note here that we use both
+a side condition and an owise attribute on the same rule. This is not
+required (as we will see later), but it is allowed. What this means is that the
+side condition is only tried if the other rules did not apply and the
+left-hand side of the rule matched. You can even use more complex matching on
+the left-hand side than simply a variable. More generally, you can also have
+multiple higher-priority rules, or multiple owise rules. What this means in
+practice is that all of the non-owise rules are tried first, in any order,
+followed by all the owise rules, in any order.

+

Exercise

+ +

The grades D and F correspond to the percentile ranges [60, 70) and [0, 60)
+respectively. Write another implementation of gradeFromPercentile which
+handles only these cases, and uses the owise attribute to avoid redundant
+Boolean comparisons. Test that various percentiles in the range [0, 70) are
+evaluated correctly.

+

Rule Priority

+ +

As it happens, the owise attribute is a specific case of a more general
+concept we call rule priority. In essence, each rule is assigned an integer
+priority. Rules are tried in increasing order of priority, starting with a
+rule with priority zero, and trying each increasing numerical value
+successively.

+

By default, a rule is assigned a priority of 50. If the rule has the owise
+attribute, it is instead given the priority 200. You can see why this will
+cause owise rules to be tried after regular rules.

+

However, it is also possible to directly assign a numerical priority to a rule
+via the priority attribute. For example, here is an alternative way
+we could express the same two rules in the gradeFromPercentile function
+(lesson-07-c.k):

+
module LESSON-07-C
+  imports BOOL
+  imports INT
+
+  syntax Grade ::= "letter-A"
+                 | "letter-B"
+                 | "letter-C"
+                 | "letter-D"
+                 | "letter-F"
+                 | gradeFromPercentile(Int) [function]
+
+  rule gradeFromPercentile(I) => letter-A requires I >=Int 90 [priority(50)]
+  rule gradeFromPercentile(I) => letter-B requires I >=Int 80 [priority(200)]
+endmodule
+

We can, of course, assign a priority equal to any non-negative integer. For
+example, here is a more complex example that handles the remaining grades
+(lesson-07-d.k):

+
module LESSON-07-D
+  imports BOOL
+  imports INT
+
+  syntax Grade ::= "letter-A"
+                 | "letter-B"
+                 | "letter-C"
+                 | "letter-D"
+                 | "letter-F"
+                 | gradeFromPercentile(Int) [function]
+
+  rule gradeFromPercentile(I) => letter-A requires I >=Int 90 [priority(50)]
+  rule gradeFromPercentile(I) => letter-B requires I >=Int 80 [priority(51)]
+  rule gradeFromPercentile(I) => letter-C requires I >=Int 70 [priority(52)]
+  rule gradeFromPercentile(I) => letter-D requires I >=Int 60 [priority(53)]
+  rule gradeFromPercentile(_) => letter-F                     [priority(54)]
+endmodule
+

Note that we have introduced a new piece of syntax here: _. This is actually
+just a variable. However, as a special case, when a variable is named _, it
+does not bind a value that can be used on the right-hand side of the rule, or
+in a side condition. Effectively, _ is a placeholder variable that means "I
+don't care about this term."

+

In this example, we have explicitly expressed the order in which the rules of
+this function are tried. Since rules are tried in increasing numerical
+priority, we first try the rule with priority 50, then 51, then 52, 53, and
+finally 54.

+

As a final note, remember that if you assign a rule a priority higher than 200,
+it will be tried after a rule with the owise attribute, and if you assign
+a rule a priority less than 50, it will be tried before a rule with no
+explicit priority.

+

Exercises

+ +
    +
  1. +

    Write a function isEven that returns whether an integer is an even number.
    +Use two rules and one side condition. The right-hand side of the rules should
    +be Boolean literals. Refer back to
    +domains.md for the relevant
    +integer operations.

    +
  2. +
  3. +

    Modify the calculator application from Lesson 1.6, Exercise 2, so that division
    +by zero will no longer make krun crash with a "Divison by zero" exception.
    +Instead, the / function should not match any of its rules if the denominator
    +is zero.

    +
  4. +
  5. +

    Write your own implementation of ==, <, <=, >, >= for integers and modify your solution from Exercise 2 to use it.
    +You can use any arithmetic operations in the INT module, but do not use any built-in boolean functions for comparing integers.

    +

    Hint: Use pattern matching and recursive definitions with rule priorities.

    +
  6. +
+

Next lesson

+ +

Once you have completed the above exercises, you can continue to
+Lesson 1.8: Literate Programming with Markdown.

+

Lesson 1.8: Literate Programming with Markdown

+ +

The purpose of this lesson is to teach a paradigm for performing literate
+programming in K, and explain how this can be used to create K definitions
+that are also documentation.

+

Markdown and K

+ +

The K tutorial so far has been written in
+Markdown. Markdown,
+for those not already familiar, is a lightweight plain-text format for styling
+text. From this point onward, we assume you are familiar with Markdown and how
+to write Markdown code. You can refer to the above link for a tutorial if you
+are not already familiar.

+

What you may not necessarily realize, however, is that the K tutorial is also
+a sequence of K definitions written in the manner of
+Literate Programming.
+For detailed information about Literate Programming, you can read the linked
+Wikipedia article, but the short summary is that literate programming is a way
+of intertwining documentation and code together in a manner that allows
+executable code to also be, simultaneously, a documented description of that
+code.

+

K is provided with built-in support for literate programming using Markdown.
+By default, if you pass a file with the .md file extension to kompile, it
+will look for any code blocks containing k code in that file, extract out
+that K code into pure K, and then compile it as if it were a .k file.

+

A K code block begins with a line of text containing the keyword ```k,
+and ends when it encounters another ``` keyword.

+

For example, if you view the markdown source of this document, this is a K
+code block:

+
module LESSON-08
+  imports INT
+

Only the code inside K code blocks will actually be sent to the compiler. The
+rest, while it may appear in the document when rendered by a markdown viewer,
+is essentially a form of code comment.

+

When you have multiple K code blocks in a document, K will append each one
+together into a single file before passing it off to the outer parser.

+

For example, the following code block contains sentences that are part of the
+LESSON-08 module that we declared the beginning of above:

+
  syntax Int ::= Int "+" Int [function]
+  rule I1 + I2 => I1 +Int I2
+

Exercise

+ +

Compile this file with kompile README.md --main-module LESSON-08. Confirm
+that you can use the resulting compiled definition to evaluate the +
+function.

+

Markdown Selectors

+ +

On occasion, you may want to generate multiple K definitions from a single
+Markdown file. You may also wish to include a block of syntax-highlighted K
+code that nonetheless does not appear as part of your K definition. It is
+possible to accomplish this by means of the built-in support for syntax
+highlighting in Markdown. Markdown allows a code block that was begun with
+``` to be immediately followed by a string which is used to signify what
+programming language the following code is written in. However, this feature
+actually allows arbitrary text to appear describing that code block. Markdown
+parsers are able to parse this text and render the code block differently
+depending on what text appears after the backticks.

+

In K, you can use this functionality to specify one or more
+Markdown selectors which are used to describe the code block. A Markdown
+selector consists of a sequence of characters containing letters, numbers, and
+underscores. A code block can be designated with a single selector by appending
+the selector immediately following the backticks that open the code block.

+

For example, here is a code block with the foo selector:

+
foo bar
+

Note that this is not K code. By convention, K code should have the k
+selector on it. You can express multiple selectors on a code block by putting
+them between curly braces and prepending each with the . character. For
+example, here is a code block with the foo and k selectors:

+
  syntax Int ::= foo(Int) [function]
+  rule foo(0) => 0
+

Because this code block contains the k Markdown selector, by default it is
+included as part of the K definition being compiled.

+

Exercise

+ +

Confirm this fact by using krun to evaluate foo(0).

+

Markdown Selector Expressions

+ +

By default, as previously stated, K includes in the definition any code block
+with the k selector. However, this is merely a specific instance of a general
+principle, namely, that K allows you to control which selectors get included
+in your K definition. This is done by means of the --md-selector flag to
+kompile. This flag accepts a Markdown selector expression, which you
+can essentially think of as a kind of Boolean algebra over Markdown selectors.
+Each selector becomes an atom, and you can combine these atoms via the &,
+|, !, and () operators.

+

Here is a grammar, written in K, of the language of Markdown selector
+expressions:

+
  syntax Selector ::= r"[0-9a-zA-Z_]+" [token]
+  syntax SelectorExp ::= Selector
+                       | "(" SelectorExp ")" [bracket]
+                       > right:
+                         "!" SelectorExp
+                       > right:
+                         SelectorExp "&" SelectorExp
+                       > right:
+                         SelectorExp "|" SelectorExp
+

Here is a selector expression that selects all the K code blocks in this
+definition except the one immediately above:

+
k & (! selector)
+

Addendum

+ +

This code block exists in order to make the above lesson a syntactically valid
+K definition. Consider why it is necessary.

+
endmodule
+

Exercises

+ +
    +
  1. +

    Compile this lesson with the selector expression k & (! foo) and confirm
    +that you get a parser error if you try to evaluate the foo function with the
    +resulting definition.

    +
  2. +
  3. +

    Compile Lesson 1.3
    +as a K definition. Identify why it fails to compile. Then pass an appropriate
    +--md-selector to the compiler in order to make it compile.

    +
  4. +
  5. +

    Modify your calculator application from Lesson 1.7, Exercise 2, to be written
    +in a literate style. Consider what text might be appropriate to turn the
    +resulting markdown file into documentation for your calculator.

    +
  6. +
+

Next lesson

+ +

Once you have completed the above exercises, you can continue to
+Lesson 1.9: Unparsing and the format and color attributes.

+

Lesson 1.9: Unparsing and the format and color attributes

+ +

The purpose of this lesson is to teach the user about how terms are
+pretty-printed in K, and how the user can make adjustments to the default
+settings for how to print specific terms.

+

Parsing, Execution, and Unparsing

+ +

When you use krun to interpret a program, the tool passes through three major
+phases. In the first, parsing, the program itself is parsed using either kast
+or an ahead-of-time parser generated via Bison, and the resulting AST becomes
+the input to the interpreter. In the second phase, execution, K evaluates
+functions and (as we will discuss in depth later) performs rewrite steps to
+iteratively transform the program state. The third and final phase is called
+unparsing, because it consists of taking the final state of the application
+after the program has been interpreted, and converting it from an AST back into
+text that (in theory, anyway) could be parsed back into the same AST that was
+the output of the execution phase.

+

In practice, parsing is not always precisely reversible. It turns out
+(although we are not going to cover exactly why this is here), that
+constructing a sound algorithm that takes a grammar and an AST and emits text
+that could be parsed via that grammar to the original AST is an
+NP-hard problem. As a result, in the interests of avoiding exponential time
+algorithms when users rarely care about unparsing being completely sound, we
+take certain shortcuts that provide a linear-time algorithm that approximates
+a sound solution to the problem while sacrificing the notion that the result
+can be parsed into the exact original term in all cases.

+

This is a lot of theoretical explanation, but at root, the unparsing process
+is fairly simple: it takes a K term that is the output of execution and pretty
+prints it according to the syntax defined by the user in their K definition.
+This is useful because the original AST is not terribly user-readable, and it
+is difficult to visualize the entire term or decipher information about the
+final state of the program at a quick glance. Of course, in rare cases, the
+pretty-printed configuration loses information of relevance, which is why K
+allows you to obtain the original AST on request.

+

As an example of all of this, consider the following K definition
+(lesson-09-a.k):

+
module LESSON-09-A
+  imports BOOL
+
+  syntax Exp ::= "(" Exp ")" [bracket]
+               | Bool
+               > "!" Exp
+               > left:
+                 Exp "&&" Exp
+               | Exp "^" Exp
+               | Exp "||" Exp
+
+  syntax Exp ::= id(Exp) [function]
+  rule id(E) => E
+endmodule
+

This is similar to the grammar we defined in LESSON-06-C, with the difference
+that the Boolean expressions are now constructors of sort Exp and we define a
+trivial function over expressions that returns its argument unchanged.

+

We can now parse a simple program in this definition and use it to unparse some
+Boolean expressions. For example (exp.bool):

+
id(true&&false&&!true^(false||true))
+

Here is a program that is not particularly legible at first glance, because all
+extraneous whitespace has been removed. However, if we run krun exp.bool, we
+see that the result of the unparser will pretty-print this expression rather
+nicely:

+
<k>
+  true && false && ! true ^ ( false || true ) ~> .
+</k>
+

Notably, not only does K insert whitespace where appropriate, it is also smart
+enough to insert parentheses where necessary in order to ensure the correct
+parse. For example, without those parentheses, the expression above would parse
+equivalent to the following one:

+
(((true && false) && ! true) ^ false) || true
+

Indeed, you can confirm this by passing that exact expression to the id
+function and evaluating it, then looking at the result of the unparser:

+
<k>
+  true && false && ! true ^ false || true ~> .
+</k>
+

Here, because the meaning of the AST is the same both with and without
+parentheses, K does not insert any parentheses when unparsing.

+

Exercise

+ +

Modify the grammar of LESSON-09-A above so that the binary operators are
+right associative. Try unparsing exp.bool again, and note how the result is
+different. Explain the reason for the difference.

+

Custom unparsing of terms

+ +

You may have noticed that right now, the unparsing of terms is not terribly
+imaginative. All it is doing is taking each child of the term, inserting it
+into the non-terminal positions of the production, then printing the production
+with a space between each terminal or non-terminal. It is easy to see why this
+might not be desirable in some cases. Consider the following K definition
+(lesson-09-b.k):

+
module LESSON-09-B
+  imports BOOL
+
+  syntax Stmt ::= "{" Stmt "}" | "{" "}"
+                > right:
+                  Stmt Stmt
+                | "if" "(" Bool ")" Stmt
+                | "if" "(" Bool ")" Stmt "else" Stmt [avoid]
+endmodule
+

This is a statement grammar, simplified to the point of meaninglessness, but
+still useful as an object lesson in unparsing. Consider the following program
+in this grammar (if.stmt):

+
if (true) {
+  if (true) {}
+  if (false) {}
+  if (true) {
+    if (false) {} else {}
+  } else {
+    if (false) {}
+  }
+}
+

This is how that term would be unparsed if it appeared in the output of krun:

+
if ( true ) { if ( true ) { } if ( false ) { } if ( true ) { if ( false ) { } else { } } else { if ( false ) { } } }
+

This is clearly much less legible than we started with! What are we to do?
+Well, K provides an attribute, format, that can be applied to any production,
+which controls how that production gets unparsed. You've seen how it gets
+unparsed by default, but via this attribute, the developer has complete control
+over how the term is printed. Of course, the user can trivially create ways to
+print terms that would not parse back into the same term. Sometimes this is
+even desirable. But in most cases, what you are interested in is controlling
+the line breaking, indentation, and spacing of the production.

+

Here is an example of how you might choose to apply the format attribute
+to improve how the above term is unparsed (lesson-09-c.k):

+
module LESSON-09-C
+  imports BOOL
+
+  syntax Stmt ::= "{" Stmt "}" [format(%1%i%n%2%d%n%3)] | "{" "}" [format(%1%2)]
+                > right:
+                  Stmt Stmt [format(%1%n%2)]
+                | "if" "(" Bool ")" Stmt [format(%1 %2%3%4 %5)]
+                | "if" "(" Bool ")" Stmt "else" Stmt [avoid, format(%1 %2%3%4 %5 %6 %7)]
+endmodule
+

If we compile this new definition and unparse the same term, this is the
+result we get:

+
if (true) {
+  if (true) {}
+  if (false) {}
+  if (true) {
+    if (false) {} else {}
+  } else {
+    if (false) {}
+  }
+}
+

This is the exact same text we started with! By adding the format attributes,
+we were able to indent the body of code blocks, adjust the spacing of if
+statements, and put each statement on a new line.

+

How exactly was this achieved? Well, each time the unparser reaches a term,
+it looks at the format attribute of that term. That format attribute is a
+mix of characters and format codes. Format codes begin with the %
+character. Each character in the format attribute other than a format code is
+appended verbatim to the output, and each format code is handled according to
+its meaning, transformed (possibly recursively) into a string of text, and
+spliced into the output at the position the format code appears in the format
+string.

+

Provided for reference is a table with a complete list of all valid format
+codes, followed by their meaning:

+ + + + + + + + + +
Format Code Meaning
n Insert '\n' followed by the current indentation + level
i Increase the current indentation level by 1
d Decrease the current indentation level by 1
c Move to the next color in the list of colors for + this production (see next section)
r Reset color to the default foreground color for + the terminal (see next section)
an integer Print a terminal or non-terminal from the + production. The integer is treated as a 1-based + index into the terminals and non-terminals of + the production. +
+
If the offset refers to a terminal, move to the + next color in the list of colors for this + production, print the value of that terminal, + then reset the color to the default foreground + color for the terminal. +
+
If the offset refers to a regular expression + terminal, it is an error. +
+
If the offset refers to a non-terminal, unparse + the corresponding child of the current term + (starting with the current indentation level) + and print the resulting text, then set the + current color and indentation level to the color + and indentation level following unparsing that + term.
other char Print that character verbatim
+

Exercise

+ +

Change the format attributes for LESSON-09-C so that if.stmt will unparse
+as follows:

+
if (true)
+{
+  if (true)
+  {
+  }
+  if (false)
+  {
+  }
+  if (true)
+  {
+    if (false)
+    {
+    }
+    else
+    {
+    }
+  }
+  else
+  {
+    if (false)
+    {
+    }
+  }
+}
+

Output coloring

+ +

When the output of unparsing is displayed on a terminal supporting colors, K
+is capable of coloring the output, similar to what is possible with a syntax
+highlighter. This is achieved via the color and colors attributes.

+

Essentially, both the color and colors attributes are used to construct a
+list of colors associated with each production, and then the format attribute
+is used to control how those colors are used to unparse the term. At its most
+basic level, you can set the color attribute to color all the terminals in
+the production a certain color, or you can use the colors attribute to
+specify a comma-separated list of colors for each terminal in the production.
+At a more advanced level, the %c and %r format codes control how the
+formatter interacts with the list of colors specified by the colors
+attribute. You can essentially think of the color attribute as a way of
+specifying that you want all the colors in the list to be the same color.

+

Note that the %c and %r format codes are relatively primitive in nature.
+The color and colors attributes merely maintain a list of colors, whereas
+the %c and %r format codes merely control how to advance through that list
+and how individual text is colored.

+

It is an error if the colors attribute does not provide all the colors needed
+by the terminals and escape codes in the production. %r does not change the
+position in the list of colors at all, so the next %c will advance to the
+following color.

+

As a complete example, here is a variant of LESSON-09-A which colors the
+various boolean operators:

+
module LESSON-09-D
+  imports BOOL
+
+  syntax Exp ::= "(" Exp ")" [bracket]
+               | Bool
+               > "!" Exp [color(yellow)]
+               > left:
+                 Exp "&&" Exp [color(red)]
+               | Exp "^" Exp [color(blue)]
+               | Exp "||" Exp [color(green)]
+
+  syntax Exp ::= id(Exp) [function]
+  rule id(E) => E
+endmodule
+

For a complete list of allowed colors, see
+here.

+

Exercises

+ +
    +
  1. +

    Use the color attribute on LESSON-09-C to color the keywords true and
    +false one color, the keywords if and else another color, and the operators
    +(, ), {, and } a third color.

    +
  2. +
  3. +

    Use the format, color, and colors attributes to tell the unparser to
    +style the expression grammar from Lesson 1.8, Exercise 3 according to your own
    +personal preferences for syntax highlighting and code formatting. You can
    +view the result of the unparser on a function term without evaluating that
    +function by means of the command kparse <file> | kore-print -.

    +
  4. +
+

Next lesson

+ +

Once you have completed the above exercises, you can continue to
+Lesson 1.10: Strings.

+

Lesson 1.10: Strings

+ +

The purpose of this lesson is to explain how to use the String sort in K to
+represent sequences of characters, and explain where to find additional
+information about builtin functions over strings.

+

The String Sort

+ +

In addition to the Int and Bool sorts covered in
+Lesson 1.6, K provides, among others, the
+String sort to represent sequences of characters. You can import this
+functionality via the STRING-SYNTAX module, which contains the syntax of
+string literals in K, and the STRING module, which contains all the functions
+that operate over the String type.

+

Strings in K are double-quoted. The following list of escape sequences is
+supported:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Escape SequenceMeaning
\"The literal character "
\\The literal character \
\nThe newline character (ASCII code 0x0a)
\rThe carriage return character (ASCII code 0x0d)
\tThe tab character (ASCII code 0x09)
\fThe form feed character (ASCII code 0x0c)
\x00\x followed by 2 hexadecimal digits indicates a code point between 0x00 and 0xFF
\u0000\u followed by 4 hexadecimal digits indicates a code point between 0x0000 and 0xFFFF
\U00000000\U followed by 8 hexadecimal digits indicates a code point between 0x000000 and 0x10FFFF
+

Please note that as of the current moment, K's unicode support is not fully
+complete, so you may run into errors using code points greater than 0xff.

+

As an example, you can construct a string literal containing the following
+block of text:

+
This is an example block of text.
+Here is a quotation: "Hello world."
+	This line is indented.
+ÁÉÍÓÚ
+

Like so:

+
"This is an example block of text.\nHere is a quotation: \"Hello world.\"\n\tThis line is indented.\n\xc1\xc9\xcd\xd3\xda\n"
+

Basic String Functions

+ +

The full list of functions provided for the String sort can be found in
+domains.md, but here we
+describe a few of the more basic ones.

+

String concatenation

+ +

The concatenation operator for strings is +String. For example, consider
+the following K rule that constructs a string from component parts
+(lesson-10.k):

+
module LESSON-10
+  imports STRING
+
+  syntax String ::= msg(String) [function]
+  rule msg(S) => "The string you provided: " +String S +String "\nHave a nice day!"
+endmodule
+

Note that this operator is O(N), so repeated concatenations are inefficient.
+For information about efficient string concatenation, refer to
+Lesson 2.14.

+

String length

+ +

The function to return the length of a string is lengthString. For example,
+lengthString("foo") will return 3, and lengthString("") will return 0.
+The return value is the length of the string in code points.

+

Substring computation

+ +

The function to compute the substring of a string is substrString. It
+takes two string indices, starting from 0, and returns the substring within the
+range [start..end). It is only defined if end >= start, start >= 0, and
+end <= length of string. Here, for example, we return the first 5 characters
+of a string:

+
substrString(S, 0, 5)
+

Here we return all but the first 3 characters:

+
substrString(S, 3, lengthString(S))
+

Exercises

+ +
    +
  1. Write a function that takes a paragraph of text (i.e., a sequence of
    +sentences, each ending in a period), and constructs a new (nonsense) sentence
    +composed of the first word of each sentence, followed by a period. Do not
    +worry about capitalization or periods within the sentence which do not end the
    +sentence (e.g. "Dr."). You can assume that all whitespace within the paragraph
    +are spaces. For more information about the functions over strings required to
    +implement such a function, refer to domains.md.
  2. +
+

Next lesson

+ +

Once you have completed the above exercises, you can continue to
+Lesson 1.11: Casting Terms.

+

Lesson 1.11: Casting Terms

+ +

The purpose of this lesson is to explain how to use cast expressions in
+order to disambiguate terms using sort information. We also explain how the
+variable sort inference algorithm works in K, and how to change the default
+behavior by casting variables to a particular sort.

+

Casting in K

+ +

Sometimes the grammar you write for your rules in K can be a little bit
+ambiguous on purpose. While grammars for programming languages may be
+unambiguous when considered in their entirety, K allows you to write rules
+involving arbitrary fragments of that grammar, and those fragments can
+sometimes be ambiguous by themselves, or similar enough to other fragments
+of the grammar to trigger ambiguity. As a result, in addition to the tools
+covered in Lesson 1.4, K provides one
+additional powerful tool for disambiguation: cast expressions.

+

K provides three main types of casts: the semantic cast, the strict cast, and
+the projection cast. We will cover each of them, and their similarities and
+differences, in turn.

+

Semantic casts

+ +

The most basic, and most common, type of cast in K is called the
+semantic cast. For every sort S declared in a module, K provides the
+following (implicit) production for use in sentences:

+
  syntax S ::= S ":S"
+

Note that S simply represents the name of the sort. For example, if we
+defined a sort Exp, the actual production for that sort would be:

+
  syntax Exp ::= Exp ":Exp"
+

At runtime, this expression will not actually exist; it is merely an annotation
+to the compiler describing the sort of the term inside the cast. It is telling
+the compiler that the term inside the cast must be of sort Exp. For example,
+if we had the following grammar:

+
module LESSON-11-A
+  imports INT
+
+  syntax Exp ::= Int | Exp "+" Exp
+  syntax Stmt ::= "if" "(" Exp ")" Stmt | "{" "}"
+endmodule
+

Then we would be able to write 1:Exp, or (1 + 2):Exp, but not {}:Exp.

+

You can also restrict the sort that a variable in a rule will match by casting
+it. For example, consider the following additional module:

+
module LESSON-11-B
+  imports LESSON-11-A
+  imports BOOL
+
+  syntax Term ::= Exp | Stmt
+  syntax Bool ::= isExpression(Term) [function]
+
+  rule isExpression(_E:Exp) => true
+  rule isExpression(_) => false [owise]
+endmodule
+

Here we have defined a very simple function that decides whether a term is
+an expression or a statement. It does this by casting the variable inside the
+isExpression rule to sort Exp. As a result, that variable will only match terms
+of sort Exp. Thus, isExpression(1) will return true, as will isExpression(1 + 2), but
+isExpression({}) will return false.

+

Exercise

+ +

Verify this fact for yourself by running isExpression on the above examples. Then
+write an isStatement function, and test that it works as expected.

+

Strict casts

+ +

On occasion, a semantic cast is not strict enough. It might be that you want
+to, for disambiguation purposes, say exactly what sort a term is. For
+example, consider the following definition:

+
module LESSON-11-C
+  imports INT
+
+  syntax Exp ::= Int
+               | "add[" Exp "," Exp "]"   [group(exp)]
+  syntax Exp2 ::= Exp
+               | "add[" Exp2 "," Exp2 "]" [group(exp2)]
+endmodule
+

This grammar is a little ambiguous and contrived, but it serves to demonstrate
+how a semantic cast might be insufficient to disambiguate a term. If we were
+to write the term add[ I1:Int , I2:Int ]:Exp2, the term would be ambiguous,
+because the cast is not sufficiently strict to determine whether you mean
+to derive the "add" production defined in group exp or the one in group exp2.

+

In this situation, there is a solution: the strict cast. For every sort
+S in your grammar, K also defines the following production:

+
  syntax S ::= S "::S"
+

This may at first glance seem the same as the previous cast. And indeed,
+from the perspective of the grammar and from the perspective of rewriting,
+they are in fact identical. However, the second variant has a unique meaning
+in the type system of K: namely, the term inside the cast cannot be a
+subsort, i.e., a term of another sort S2 such that the production
+syntax S ::= S2 exists.

+

As a result, if we were to write in the above grammar the term
+add[ I1:Int , I2:Int ]::Exp2, then we would know that the second derivation above
+should be chosen, whereas if we want the first derivation, we could write
+add[ I1:Int , I2:Int ]::Exp.

+

Care must be taken when using a strict cast with brackets. For example, consider a
+similar grammar but using an infix "+":

+
module LESSON-11-D
+  imports INT
+
+  syntax Exp ::= Int
+               | Exp "+" Exp   [group(exp)]
+  syntax Exp2 ::= Exp
+               | Exp2 "+" Exp2 [group(exp2)]
+               | "(" Exp2 ")"  [bracket]
+endmodule
+

The term I1:Int + I2:Int is ambiguous and could refer to either the production
+in group exp or the one in group exp2. To differentiate, you might try to write
+(I1:Int + I2:Int)::Exp2 similarly to the previous example.

+

Unfortunately though, this is still ambiguous. Here, the strict cast ::Exp2 applies
+directly to the brackets themselves rather than the underlying term within those brackets.
+As a result, it enforces that (I1:Int + I2:Int) cannot be a strict subsort of Exp2, but
+it has no effect on the sort of the subterm I1:Int + I2:Int.

+

For cases like this, K provides an alternative syntax for strict casts:

+
  syntax S ::= "{" S "}::S"
+

The ambiguity can then be resolved with {I1:Int + I2:Int}::Exp or {I1:Int + I2:Int}::Exp2.

+

Projection casts

+ +

Thus far we have focused entirely on casts which exist solely to inform the
+compiler about the sort of terms. However, sometimes when dealing with grammars
+containing subsorts, it can be desirable to reason with the subsort production
+itself, which injects one sort into another. Remember from above that such
+a production looks like syntax S ::= S2. This type of production, called a
+subsort production, can be thought of as a type of inheritance involving
+constructors. If we have the above production in our grammar, we say that S2
+is a subsort of S, or that any S2 is also an S. K implicitly maintains a
+symbol at runtime which keeps track of where such subsortings occur; this
+symbol is called an injection.

+

Sometimes, when one sort is a subsort of another, it can be the case that
+a function returns one sort, but you actually want to cast the result of
+calling that function to another sort which is a subsort of the first sort.
+This is similar to what happens with inheritance in an object-oriented
+language, where you might cast a superclass to a subclass if you know for
+sure the object at runtime is in fact an instance of that class.

+

K provides something similar for subsorts: the projection cast.

+

For each pair of sorts S and S2, K provides the following production:

+
  syntax S ::= "{" S2 "}" ":>S"
+

What this means is that you take any term of sort S2 and cast it to sort
+S. If the term of sort S2 consists of an injection containing a term of sort
+S, then this will return that term. Otherwise, an error occurs and rewriting
+fails, returning the projection function which failed to apply. The sort is
+not actually checked at compilation time; rather, it is a runtime check
+inserted into the code that runs when the rule applies.

+

For example, here is a module that makes use of projection casts:

+
module LESSON-11-E
+  imports INT
+  imports BOOL
+
+  syntax Exp ::= Int | Bool | Exp "+" Exp | Exp "&&" Exp
+
+  syntax Exp ::= eval(Exp) [function]
+  rule eval(I:Int) => I
+  rule eval(B:Bool) => B
+  rule eval(E1 + E2) => {eval(E1)}:>Int +Int {eval(E2)}:>Int
+  rule eval(E1 && E2) => {eval(E1)}:>Bool andBool {eval(E2)}:>Bool
+endmodule
+

Here we have defined constructors for a simple expression language over
+Booleans and integers, as well as a function eval that evaluates these
+expressions to a value. Because that value could be an integer or a Boolean,
+we need the casts in the last two rules in order to meet the type signature of
++Int and andBool. Of course, the user can write ill-formed expressions like
+1 && true or false + true, but these will cause errors at runtime, because
+the projection cast will fail.

+

Exercises

+ +
    +
  1. +

    Extend the eval function in LESSON-11-E to include Strings and add a .
    +operator which concatenates them.

    +
  2. +
  3. +

    Modify your solution from Lesson 1.9, Exercise 2 by using an Exp sort to
    +express the integer and Boolean expressions that it supports, in the same style
    +as LESSON-11-E. Then write an eval function that evaluates all terms of
    +sort Exp to either a Bool or an Int.

    +
  4. +
+

Next lesson

+ +

Once you have completed the above exercises, you can continue to
+Lesson 1.12: Syntactic Lists.

+

Lesson 1.12: Syntactic Lists

+ +

The purpose of this lesson is to explain how K provides support for syntactic
+repetition through the use of the List{} and NeList{} constructs,
+generally called syntactic lists.

+

The List{} construct

+ +

Sometimes, when defining a grammar in K, it is useful to define a syntactic
+construct consisting of an arbitrary-length sequence of items. For example,
+you might wish to define a function call construct, and need to express a way
+of passing arguments to the function. You can in theory simply define these
+productions using ordinary constructors, but it can be tricky to get the syntax
+exactly right in K without a lot of tedious glue code.

+

For this reason, K provides a way of specifying that a non-terminal represents
+a syntactic list (lesson-12-a.k):

+
module LESSON-12-A-SYNTAX
+  imports INT-SYNTAX
+
+  syntax Ints ::= List{Int,","}
+endmodule
+
+module LESSON-12-A
+  imports LESSON-12-A-SYNTAX
+endmodule
+

Note that instead of a sequence of terminals and non-terminals, the right hand
+side of the Ints production contains the symbol List followed by two items
+in curly braces. The first item is the non-terminal which is the element type
+of the list, and the second item is a terminal representing the separator of
+the list. As a special case, lists which are separated only by whitespace can
+be specified with a separator of "".

+

This List{} construct is roughly equivalent to the following definition
+(lesson-12-b.k):

+
module LESSON-12-B-SYNTAX
+  imports INT-SYNTAX
+
+  syntax Ints ::= Int "," Ints | ".Ints"
+endmodule
+
+module LESSON-12-B
+  imports LESSON-12-B-SYNTAX
+endmodule
+

As you can see, the List{} construct represents a cons-list with an element
+at the head and another list at the tail. The empty list is represented by
+a . followed by the sort of the list.

+

However, the List{} construct provides several key syntactic conveniences
+over the above definition. First of all, when writing a list in a rule,
+explicitly writing the terminator is not always required. For example, consider
+the following additional module (lesson-12-c.k):

+
module LESSON-12-C
+  imports LESSON-12-A
+  imports INT
+
+  syntax Int ::= sum(Ints) [function]
+  rule sum(I:Int) => I
+  rule sum(I1:Int, I2:Int, Is:Ints) => sum(I1 +Int I2, Is)
+endmodule
+

Here we see a function that sums together a non-empty list of integers. Note in
+particular the first rule. We do not explicitly mention .Ints, but in fact,
+the rule in question is equivalent to the following rule:

+
  rule sum(I:Int, .Ints) => I
+

The reason for this is that K will automatically insert a list terminator
+anywhere a syntactic list is expected, but an element of that list appears
+instead. This works even with lists of more than one element:

+
  rule sum(I1:Int, I2:Int) => I1 +Int I2
+

This rule is redundant, but here we explicitly match a list of exactly two
+elements, because the .Ints is implicitly added after I2.

+

Parsing Syntactic Lists in Programs

+ +

An additional syntactic convenience takes place when you want to express a
+syntactic list in the input to krun. In this case, K will automatically
+transform the grammar in LESSON-12-B-SYNTAX into the following
+(lesson-12-d.k):

+
module LESSON-12-D
+  imports INT-SYNTAX
+
+  syntax Ints ::= #NonEmptyInts | #IntsTerminator
+  syntax #NonEmptyInts ::= Int "," #NonEmptyInts
+                         | Int #IntsTerminator
+  syntax #IntsTerminator ::= ""
+endmodule
+

This allows you to express the usual comma-separated list of arguments where
+an empty list is represented by the empty string, and you don't have to
+explicitly terminate the list. Because of this, we can write the syntax
+of function calls in C very easily (lesson-12-e.k):

+
module LESSON-12-E
+  syntax Id ::= r"[a-zA-Z_][a-zA-Z0-9_]*" [token]
+  syntax Exp ::= Id | Exp "(" Exps ")"
+  syntax Exps ::= List{Exp,","}
+endmodule
+

Exercise

+ +

Write a function concat which takes a list of String and concatenates them
+all together. Do not worry if the function is O(n^2).
+Test your implementation using the syntactic sugar for lists added by the parser.

+

Then write some function call expressions using identifiers in C and verify with
+kast that the above grammar captures the intended syntax. Make sure to test
+with function calls with zero, one, and two or more arguments.

+

The NeList{} construct

+ +

One limitation of the List{} construct is that it is always possible to
+write a list of zero elements where a List{} is expected. While this is
+desirable in a number of cases, it is sometimes not what the grammar expects.

+

For example, in C, it is not allowable for an enum definition to have zero
+members. In other words, if we were to write the grammar for enumerations like
+so (lesson-12-f.k):

+
module LESSON-12-F
+  syntax Id ::= r"[a-zA-Z_][a-zA-Z0-9_]*" [token]
+  syntax Exp ::= Id
+
+  syntax EnumSpecifier ::= "enum" Id "{" Ids "}"
+  syntax Ids ::= List{Id,","}
+endmodule
+

Then we would be syntactically allowed to write enum X {}, which instead,
+ought to be a syntax error.

+

For this reason, we introduce the additional NeList{} construct. The syntax
+is identical to List{}, except with NeList instead of List before the
+curly braces. When parsing rules, it behaves identically to the List{}
+construct. However, when parsing inputs to krun, the above grammar, if we
+replaced syntax Ids ::= List{Id,","} with syntax Ids ::= NeList{Id,","},
+would become equivalent to the following (lesson-12-g.k):

+
module LESSON-12-G
+  syntax Id ::= r"[a-zA-Z_][a-zA-Z0-9_]*" [token]
+  syntax Exp ::= Id
+
+  syntax EnumSpecifier ::= "enum" Id "{" Ids "}"
+  syntax Ids ::= Id | Id "," Ids
+endmodule
+

In other words, only non-empty lists of Id would be allowed.

+

Exercises

+ +
    +
  1. +

    Modify the sum function in LESSON-12-C so that the Ints sort is an
    +NeList{}. Verify that calling sum() with no arguments is now a syntax
    +error.

    +
  2. +
  3. +

    Write a modified sum function with the List construct that can also sum
    +up an empty list of arguments. In such a case, the sum ought to be 0.

    +
  4. +
+

Next lesson

+ +

Once you have completed the above exercises, you can continue to
+Lesson 1.13: Basics of K Rewriting.

+

Lesson 1.13: Basics of K Rewriting

+ +

The purpose of this lesson is to explain how rewrite rules that are not the
+definition of a function behave, and how, using these rules, you can construct
+a semantics of programs in a programming language in K.

+

Recap: Function rules in K

+ +

Recall from Lesson 1.2 that we have, thus far,
+introduced two types of productions in K: constructors and functions.
+A function is identified by the function attribute placed on the
+production. As you may recall, when we write a rule with a function on the
+left-hand side of the => operator, we are defining the meaning of that
+function for inputs which match the patterns on the left-hand side of the rule.
+If the argument to the function match the patterns, then the function is
+evaluated to the value constructed by substituting the bindings for the
+variables into the right-hand side of the rule.

+

Top-level rules

+ +

However, function rules are not the only type of rule permissible in K, nor
+even the most frequently used. K also has a concept of a
+top-level rewrite rule. The simplest way to ensure that a rule is treated
+as a top-level rule is for the left-hand side of the rule to mention one or
+more cells. We will cover how cells work and are declared in more detail
+in a later lesson, but for now, what you should know is that when we ran krun
+in our very first example in Lesson 1.2 and got the following output:

+
<k>
+  Yellow ( ) ~> .
+</k>
+

<k> is a cell, known by convention as the K cell. This cell is available
+by default in any definition without needing to be explicitly declared.

+

The K cell contains a single term of sort K. K is a predefined sort in K
+with two constructors, that can be roughly represented by the following
+grammar:

+
  syntax K ::= KItem "~>" K
+             | "."
+

As a syntactic convenience, K allows you to treat ~> like it is an
+associative list (i.e., as if it were defined as syntax K ::= K "~>" K).
+When a definition is compiled, it will automatically transform the rules you
+write so that they treat the K sort as a cons-list. Another syntactic
+convenience is that, for disambiguation purposes, you can write .K anywhere
+you would otherwise write . and the meaning is identical.

+

Now, you may notice that the above grammar mentions the sort KItem. This is
+another built-in sort in K. For every sort S declared in a definition (with
+the exception of K and KItem), K will implicitly insert the following
+production:

+
  syntax KItem ::= S
+

In other words, every sort is a subsort of the sort KItem, and thus a term
+of any sort can be injected as an element of a term of sort K, also called
+a K sequence.

+

By default, when you krun a program, the AST of the program is inserted as
+the sole element of a K sequence into the <k> cell. This explains why we
+saw the output we did in Lesson 1.2.

+

With these preliminaries in mind, we can now explain how top-level rewrite
+rules work in K. Put simply, any rule where there is a cell (such as the K
+cell) at the top on the left-hand side will be a top-level rewrite rule. Once
+the initial program has been inserted into the K cell, the resulting term,
+called the configuration, will be matched against all the top-level
+rewrite rules in the definition. If only one rule matches, the substitution
+generated by the matching will be applied to the right-hand side of the rule
+and the resulting term is rewritten to be the new configuration. Rewriting
+proceeds by iteratively applying rules, also called taking steps, until
+no top-level rewrite rule can be applied. At this point the configuration
+becomes the final configuration and is output by krun.

+

If more than one top-level rule applies, by default, K will pick just one
+of those rules, apply it, and continue rewriting. However, it is
+non-deterministic which rule applies. In theory, it could be any of them.
+By passing the --search flag to krun, you are able to tell krun to
+explore all possible non-deterministic choices, and generate a complete list of
+all possible final configurations reachable by each nondeterminstic choice that
+can be made. Note that the --search flag to krun only works if you pass
+--enable-search to kompile first.

+

Unlike top-level rewrite rules, function rules are not associated with any
+particular set of cells in the configuration (although they can contain cells
+in their function arguments and return value). While top-level rewrite rules
+apply to the entire term being rewritten, function rules apply anywhere a
+function application for that function appears, and are immediately rewritten
+to their return value in that position.

+

Another key distinction between top-level rules and function rules is that
+function symbols, i.e., productions with the function attribute, are
+mathematical functions rather than constructors. While a constructor is
+logically distinct from any other constructor of the same sort, and can be
+matched against unconditionally, a function does not necessaraily have the
+same restriction unless it happens to be an injective function. Thus, two
+function symbols with different arguments may still ultimately produce the
+same value and thus compare equal to one another. Due to this, concrete
+execution (i.e., all K definitions introduced thus far; see Lesson 1.21)
+introduces the restriction that you cannot match on a function symbol on the
+left-hand side of a rule, except as the top symbol on the left-hand side of
+a function rule. This restriction will be later lifted when we introduce the
+Haskell Backend which performs symbolic execution.

+

Exercise

+ +

Pass a program containing no functions to krun. You can use a term of sort
+Exp from LESSON-11-E. Observe the output and try to understand why you get
+the output you do. Then write two rules that rewrite that program to another.
+Run krun --search on that program and observe both results. Then add a third
+rule that rewrites one of those results again. Test that that rule applies as
+well.

+

Using top-level rules to evaluate expressions

+ +

Thus far, we have focused primarily on defining functions over constructors
+in K. However, now that we have a basic understanding of top-level rules,
+it is possible to introduce a rewrite system to our definitions. A rewrite
+system is a collection of top-level rewrite rules which performs an organized
+transformation of a particular program into a result which expresses the
+meaning of that program. For example, we might rewrite an expression in a
+programming language into a value representing the result of evaluating that
+expression.

+

Recall in Lesson 1.11, we wrote a simple grammar of Boolean and integer
+expressions that looked roughly like this (lesson-13-a.k):

+
module LESSON-13-A
+  imports INT
+
+  syntax Exp ::= Int
+               | Bool
+               | Exp "+" Exp
+               | Exp "&&" Exp
+endmodule
+

In that lesson, we defined a function eval which evaluated such expressions
+to either an integer or Boolean.

+

However, it is more idiomatic to evaluate such expressions using top-level
+rewrite rules. Here is how one might do so in K (lesson-13-b.k):

+
module LESSON-13-B-SYNTAX
+  imports UNSIGNED-INT-SYNTAX
+  imports BOOL-SYNTAX
+
+  syntax Val ::= Int | Bool
+  syntax Exp ::= Val
+               > left: Exp "+" Exp
+               > left: Exp "&&" Exp
+endmodule
+
+module LESSON-13-B
+  imports LESSON-13-B-SYNTAX
+  imports INT
+  imports BOOL
+
+  rule <k> I1:Int + I2:Int ~> K:K </k> => <k> I1 +Int I2 ~> K </k>
+  rule <k> B1:Bool && B2:Bool ~> K:K </k> => <k> B1 andBool B2 ~> K </k>
+
+  syntax KItem ::= freezer1(Val) | freezer2(Exp)
+                 | freezer3(Val) | freezer4(Exp)
+
+  rule <k> E1:Val + E2:Exp ~> K:K </k> => <k> E2 ~> freezer1(E1) ~> K </k> [priority(51)]
+  rule <k> E1:Exp + E2:Exp ~> K:K </k> => <k> E1 ~> freezer2(E2) ~> K </k> [priority(52)]
+  rule <k> E1:Val && E2:Exp ~> K:K </k> => <k> E2 ~> freezer3(E1) ~> K </k> [priority(51)]
+  rule <k> E1:Exp && E2:Exp ~> K:K </k> => <k> E1 ~> freezer4(E2) ~> K </k> [priority(52)]
+
+  rule <k> E2:Val ~> freezer1(E1) ~> K:K </k> => <k> E1 + E2 ~> K </k>
+  rule <k> E1:Val ~> freezer2(E2) ~> K:K </k> => <k> E1 + E2 ~> K </k>
+  rule <k> E2:Val ~> freezer3(E1) ~> K:K </k> => <k> E1 && E2 ~> K </k>
+  rule <k> E1:Val ~> freezer4(E2) ~> K:K </k> => <k> E1 && E2 ~> K </k>
+endmodule
+

This is of course rather cumbersome currently, but we will soon introduce
+syntactic convenience which makes writing definitions of this type considerably
+easier. For now, notice that there are roughly 3 types of rules here: the first
+matches a K cell in which the first element of the K sequence is an Exp whose
+arguments are values, and rewrites the first element of the sequence to the
+result of that expression. The second also matches a K cell with an Exp in
+the first element of its K sequence, but it matches when one or both arguments
+of the Exp are not values, and replaces the first element of the K sequence
+with two new elements: one being an argument to evaluate, and the other being
+a special constructor called a freezer. Finally, the third matches a K
+sequence where a Val is first, and a freezer is second, and replaces them
+with a partially evaluated expression.

+

This general pattern is what is known as heating an expression,
+evaluating its arguments, cooling the arguments into the expression
+again, and evaluating the expression itself. By repeatedly performing
+this sequence of actions, we can evaluate an entire AST containing a complex
+expression down into its resulting value.

+

Exercise

+ +

Write an addition expression with integers. Use krun --depth 1 to see the
+result of rewriting after applying a single top-level rule. Gradually increase
+the value of --depth to see successive states. Observe how this combination
+of rules is eventually able to evaluate the entire expression.

+

Simplifying the evaluator: Local rewrites and cell ellipses

+ +

As you saw above, the definition we wrote is rather cumbersome. Over the
+remainder of Lessons 1.13 and 1.14, we will greatly simplify it. The first step
+in doing so is to teach a bit more about the rewrite operator, =>. Thus far,
+all the rules we have written look like rule LHS => RHS. However, this is not
+the only way the rewrite operator can be used. It is actually possible to place
+a constructor or function at the very top of the rule, and place rewrite
+operators inside that term. While a rewrite operator cannot appear nested
+inside another rewrite operator, by doing this, we can express that some parts
+of what we are matching are not changed by the rewrite operator. For
+example, consider the following rule from above:

+
  rule <k> I1:Int + I2:Int ~> K:K </k> => <k> I1 +Int I2 ~> K </k>
+

We can equivalently write it like following:

+
  rule <k> (I1:Int + I2:Int => I1 +Int I2) ~> _:K </k>
+

When you put a rewrite inside a term like this, in essence, you are telling
+the rule to only rewrite part of the left-hand side to the right-hand side.
+In practice, this is implemented by lifting the rewrite operator to the top of
+the rule by means of duplicating the surrounding context.

+

There is a way that the above rule can be simplified further, however. K
+provides a special syntax for each cell containing a term of sort K, indicating
+that we want to match only on some prefix of the K sequence. For example, the
+above rule can be simplified further like so:

+
  rule <k> I1:Int + I2:Int => I1 +Int I2 ...</k>
+

Here we have placed the symbol ... immediately prior to the </k> which ends
+the cell. What this tells the compiler is to take the contents of the cell,
+treat it as the prefix of a K sequence, and insert an anonymous variable of
+sort K at the end. Thus we can think of ... as a way of saying we
+don't care about the part of the K sequence after the beginning, leaving
+it unchanged.

+

Putting all this together, we can rewrite LESSON-13-B like so
+(lesson-13-c.k):

+
module LESSON-13-C-SYNTAX
+  imports UNSIGNED-INT-SYNTAX
+  imports BOOL-SYNTAX
+
+  syntax Val ::= Int | Bool
+  syntax Exp ::= Val
+               > left: Exp "+" Exp
+               > left: Exp "&&" Exp
+endmodule
+
+module LESSON-13-C
+  imports LESSON-13-C-SYNTAX
+  imports INT
+  imports BOOL
+
+  rule <k> I1:Int + I2:Int => I1 +Int I2 ...</k>
+  rule <k> B1:Bool && B2:Bool => B1 andBool B2 ...</k>
+
+  syntax KItem ::= freezer1(Val) | freezer2(Exp)
+                 | freezer3(Val) | freezer4(Exp)
+
+  rule <k> E1:Val + E2:Exp => E2 ~> freezer1(E1) ...</k> [priority(51)]
+  rule <k> E1:Exp + E2:Exp => E1 ~> freezer2(E2) ...</k> [priority(52)]
+  rule <k> E1:Val && E2:Exp => E2 ~> freezer3(E1) ...</k> [priority(51)]
+  rule <k> E1:Exp && E2:Exp => E1 ~> freezer4(E2) ...</k> [priority(52)]
+
+  rule <k> E2:Val ~> freezer1(E1) => E1 + E2 ...</k>
+  rule <k> E1:Val ~> freezer2(E2) => E1 + E2 ...</k>
+  rule <k> E2:Val ~> freezer3(E1) => E1 && E2 ...</k>
+  rule <k> E1:Val ~> freezer4(E2) => E1 && E2 ...</k>
+endmodule
+

This is still rather cumbersome, but it is already greatly simplified. In the
+next lesson, we will see how additional features of K can be used to specify
+heating and cooling rules much more compactly.

+

Exercises

+ +
    +
  1. Modify LESSON-13-C to add rules to evaluate integer subtraction.
  2. +
+

Next lesson

+ +

Once you have completed the above exercises, you can continue to
+Lesson 1.14: Defining Evaluation Order.

+

Lesson 1.14: Defining Evaluation Order

+ +

The purpose of this lesson is to explain how to use the heat and cool
+attributes, context and context alias sentences, and the strict and
+seqstrict attributes to more compactly express heating and cooling in K,
+and to express more advanced evaluation strategies in K.

+

The heat and cool attributes

+ +

Thus far, we have been using rule priority and casts to express when to heat
+an expression and when to cool it. For example, the rules for heating have
+lower priority, so they do not apply if the term could be evaluated instead,
+and the rules for heating are expressly written only to apply if the argument
+of the expression is a value.

+

However, K has built-in support for deciding when to heat and when to cool.
+This support comes in the form of the rule attributes heat and cool as
+well as the specially named function isKResult.

+

Consider the following definition, which is equivalent to LESSON-13-C
+(lesson-14-a.k):

+
module LESSON-14-A-SYNTAX
+  imports UNSIGNED-INT-SYNTAX
+  imports BOOL-SYNTAX
+
+  syntax Exp ::= Int
+               | Bool
+               > left: Exp "+" Exp
+               > left: Exp "&&" Exp
+endmodule
+
+module LESSON-14-A
+  imports LESSON-14-A-SYNTAX
+  imports INT
+  imports BOOL
+
+  rule <k> I1:Int + I2:Int => I1 +Int I2 ...</k>
+  rule <k> B1:Bool && B2:Bool => B1 andBool B2 ...</k>
+
+  syntax KItem ::= freezer1(Exp) | freezer2(Exp)
+                 | freezer3(Exp) | freezer4(Exp)
+
+  rule <k> E:Exp + HOLE:Exp => HOLE ~> freezer1(E) ...</k>
+    requires isKResult(E) [heat]
+  rule <k> HOLE:Exp + E:Exp => HOLE ~> freezer2(E) ...</k> [heat]
+  rule <k> E:Exp && HOLE:Exp => HOLE ~> freezer3(E) ...</k>
+    requires isKResult(E) [heat]
+  rule <k> HOLE:Exp && E:Exp => HOLE ~> freezer4(E) ...</k> [heat]
+
+  rule <k> HOLE:Exp ~> freezer1(E) => E + HOLE ...</k> [cool]
+  rule <k> HOLE:Exp ~> freezer2(E) => HOLE + E ...</k> [cool]
+  rule <k> HOLE:Exp ~> freezer3(E) => E && HOLE ...</k> [cool]
+  rule <k> HOLE:Exp ~> freezer4(E) => HOLE && E ...</k> [cool]
+
+  syntax Bool ::= isKResult(K) [function, symbol]
+  rule isKResult(_:Int) => true
+  rule isKResult(_:Bool) => true
+  rule isKResult(_) => false [owise]
+endmodule
+

We have introduced three major changes to this definition. First, we have
+removed the Val sort. We replace it instead with a function isKResult.
+The function in question must have the same signature and attributes as seen in
+this example. It ought to return true whenever a term should not be heated
+(because it is a value) and false when it should be heated (because it is not
+a value). We thus also insert isKResult calls in the side condition of two
+of the heating rules, where the Val sort was previously used.

+

Second, we have removed the rule priorities on the heating rules and the use of
+the Val sort on the cooling rules, and replaced them with the heat and
+cool attributes. These attributes instruct the compiler that these rules are
+heating and cooling rules, and thus should implicitly apply only when certain
+terms on the LHS either are or are not a KResult (i.e., isKResult returns
+true versus false).

+

Third, we have renamed some of the variables in the heating and cooling rules
+to the special variable HOLE. Syntactically, HOLE is just a special name
+for a variable, but it is treated specially by the compiler. By naming a
+variable HOLE, we have informed the compiler which term is being heated
+or cooled. The compiler will automatically insert the side condition
+requires isKResult(HOLE) to cooling rules and the side condition
+requires notBool isKResult(HOLE) to heating rules.

+

Exercise

+ +

Modify LESSON-14-A to add rules to evaluate integer subtraction.

+

Simplifying further with Contexts

+ +

The above example is still rather cumbersome to write. We must explicitly write
+both the heating and the cooling rule separately, even though they are
+essentially inverses of one another. It would be nice to instead simply
+indicate which terms should be heated and cooled, and what part of them to
+operate on.

+

To do this, K introduces a new type of sentence, the context. Contexts
+begin with the context keyword instead of the rule keyword, and usually
+do not contain a rewrite operator.

+

Consider the following definition which is equivalent to LESSON-14-A
+(lesson-14-b.k):

+
module LESSON-14-B-SYNTAX
+  imports UNSIGNED-INT-SYNTAX
+  imports BOOL-SYNTAX
+
+  syntax Exp ::= Int
+               | Bool
+               > left: Exp "+" Exp
+               > left: Exp "&&" Exp
+endmodule
+
+module LESSON-14-B
+  imports LESSON-14-B-SYNTAX
+  imports INT
+  imports BOOL
+
+  rule <k> I1:Int + I2:Int => I1 +Int I2 ...</k>
+  rule <k> B1:Bool && B2:Bool => B1 andBool B2 ...</k>
+
+  context <k> E:Exp + HOLE:Exp ...</k>
+    requires isKResult(E)
+  context <k> HOLE:Exp + _:Exp ...</k>
+  context <k> E:Exp && HOLE:Exp ...</k>
+    requires isKResult(E)
+  context <k> HOLE:Exp && _:Exp ...</k>
+
+  syntax Bool ::= isKResult(K) [function, symbol]
+  rule isKResult(_:Int) => true
+  rule isKResult(_:Bool) => true
+  rule isKResult(_) => false [owise]
+endmodule
+

In this example, the heat and cool rules have been removed entirely, as
+have been the productions defining the freezers. Don't worry, they still exist
+under the hood; the compiler is just generating them automatically. For each
+context sentence like above, the compiler generates a #freezer production,
+a heat rule, and a cool rule. The generated form is equivalent to the
+rules we wrote manually in LESSON-14-A. However, we are now starting to
+considerably simplify the definition. Instead of 3 sentences, we just have one.

+

context alias sentences and the strict and seqstrict attributes

+ +

Notice that the contexts we included in LESSON-14-B still seem rather
+similar in form. For each expression we want to evaluate, we are declaring
+one context for each operand of that expression, and they are each rather
+similar to one another. We would like to be able to simplify further by
+simply annotating each expression production with information about how
+it is to be evaluated instead. We can do this with the seqstrict attribute.

+

Consider the following definition, once again equivalent to those above
+(lesson-14-c.k):

+
module LESSON-14-C-SYNTAX
+  imports UNSIGNED-INT-SYNTAX
+  imports BOOL-SYNTAX
+
+  syntax Exp ::= Int
+               | Bool
+               > left: Exp "+" Exp [seqstrict(exp; 1, 2)]
+               > left: Exp "&&" Exp [seqstrict(exp; 1, 2)]
+endmodule
+
+module LESSON-14-C
+  imports LESSON-14-C-SYNTAX
+  imports INT
+  imports BOOL
+
+  rule <k> I1:Int + I2:Int => I1 +Int I2 ...</k>
+  rule <k> B1:Bool && B2:Bool => B1 andBool B2 ...</k>
+
+  context alias [exp]: <k> HERE ...</k>
+
+  syntax Bool ::= isKResult(K) [function, symbol]
+  rule isKResult(_:Int) => true
+  rule isKResult(_:Bool) => true
+  rule isKResult(_) => false [owise]
+endmodule
+

This definition has two important changes from the one above. The first is
+that the individual context sentences have been removed and have been
+replaced with a single context alias sentence. You may notice that this
+sentence begins with an identifier in square brackets followed by a colon. This
+syntax is a way of naming individual sentences in K for reference by the tool
+or by other sentences. The context alias sentence also has a special variable
+HERE.

+

The second is that the productions in LESSON-14-C-SYNTAX have been given a
+seqstrict attribute. The value of this attribute has two parts. The first
+is the name of a context alias sentence. The second is a comma-separated list
+of integers. Each integer represents an index of a non-terminal in the
+production, counting from 1. For each integer present, the compiler implicitly
+generates a new context sentence according to the following rules:

+
    +
  1. The compiler starts by looking for the context alias sentence named. If
    +there is more than one, then one context sentence is created per
    +context alias sentence with that name.
  2. +
  3. For each context created, the variable HERE in the context alias is
    +substituted with an instance of the production the seqstrict attribute is
    +attached to. Each child of that production is a variable. The non-terminal
    +indicated by the integer offset of the seqstrict attribute is given the name
    +HOLE.
  4. +
  5. For each integer offset prior in the list to the one currently being
    +processed, the predicate isKResult(E) is conjuncted together and included
    +as a side condition, where E is the child of the production term with that
    +offset, starting from 1. For example, if the attribute lists 1, 2, then
    +the rule generated for the 2 will include isKResult(E1) where E1 is the
    +first child of the production.
  6. +
+

As you can see if you work through the process, the above code will ultimately
+generate the same contexts present in LESSON-14-B.

+

Finally, note that there are a few minor syntactic conveniences provided by the
+seqstrict attribute. First, in the special case of the context alias sentence
+being <k> HERE ...</k>, you can omit both the context alias sentence
+and the name from the seqstrict attribute.

+

Second, if the numbered list of offsets contains every non-terminal in the
+production, it can be omitted from the attribute value.

+

Thus, we can finally produce the idiomatic K definition for this example
+(lesson-14-d.k):

+
module LESSON-14-D-SYNTAX
+  imports UNSIGNED-INT-SYNTAX
+  imports BOOL-SYNTAX
+
+  syntax Exp ::= Int
+               | Bool
+               > left: Exp "+" Exp [seqstrict]
+               > left: Exp "&&" Exp [seqstrict]
+endmodule
+
+module LESSON-14-D
+  imports LESSON-14-D-SYNTAX
+  imports INT
+  imports BOOL
+
+  rule <k> I1:Int + I2:Int => I1 +Int I2 ...</k>
+  rule <k> B1:Bool && B2:Bool => B1 andBool B2 ...</k>
+
+  syntax Bool ::= isKResult(K) [function, symbol]
+  rule isKResult(_:Int) => true
+  rule isKResult(_:Bool) => true
+  rule isKResult(_) => false [owise]
+endmodule
+

Exercise

+ +

Modify LESSON-14-D to add a production and rule to evaluate integer
+subtraction.

+

Nondeterministic evaluation order with the strict attribute

+ +

Thus far, we have focused entirely on deterministic evaluation order. However,
+not all languages are deterministic in the order they evaluate expressions.
+For example, in C, the expression a() + b() + c() is guaranteed to parse
+to (a() + b()) + c(), but it is not guaranteed that a will be called before
+b before c. In fact, this evaluation order is non-deterministic.

+

We can express non-deterministic evaluation orders with the strict attribute.
+Its behavior is identical to the seqstrict attribute, except that step 3 in
+the above list (with the side condition automatically added) does not take
+place. In other words, if we wrote syntax Exp ::= Exp "+" Exp [strict]
+instead of syntax Exp ::= Exp "+" Exp [seqstrict], it would generate the
+following two contexts instead of the ones found in LESSON-14-B:

+
  context <k> _:Exp + HOLE:Exp ...</k>
+  context <k> HOLE:Exp + _:Exp ...</k>
+

As you can see, these contexts will generate heating rules that can both
+apply to the same term. As a result, the choice of which heating rule
+applies first is non-deterministic, and as we saw in Lesson 1.13, we can
+get all possible behaviors by passing --search to krun.

+

Exercises

+ +
    +
  1. +

    Add integer division to LESSON-14-D. Make division and addition strict
    +instead of seqstrict, and write a rule evaluating integer division with a
    +side condition that the denominator is non-zero. Run krun --search on the
    +program 1 / 0 + 2 / 1 and observe all possible outputs of the program. How
    +many are there total, and why?

    +
  2. +
  3. +

    Rework your solution from Lesson 1.9, Exercise 2 to evaluate expressions from left to right using the seqstrict attribute.

    +
  4. +
+

Next lesson

+ +

Once you have completed the above exercises, you can continue to
+Lesson 1.15: Configuration Declarations and Cell Nesting.

+

Lesson 1.15: Configuration Declarations and Cell Nesting

+ +

The purpose of this lesson is to explain how to store additional information
+about the state of your interpreter by declaring cells using the
+configuration sentence, as well as how to add additional inputs to your
+definition.

+

Cells and Configuration Declarations

+ +

We have already covered the absolute basics of cells in K by looking at the
+<k> cell. As explained in Lesson 1.13, the
+<k> cell is available without being explicitly declared. It turns out this is
+because, if the user does not explicitly specify a configuration sentence
+anywhere in the main module of their definition, the configuration sentence
+from the DEFAULT-CONFIGURATION module of
+kast.md is imported
+automatically. Here is what that sentence looks like:

+
  configuration <k> $PGM:K </k>
+

This configuration declaration declares a single cell, the <k> cell. It also
+declares that at the start of rewriting, the contents of that cell should be
+initialized with the value of the $PGM configuration variable.
+Configuration variables function as inputs to krun. These terms are supplied
+to krun in the form of ASTs parsed using a particular module. By default, the
+$PGM configuration variable uses the main syntax module of the definition.

+

The cast on the configuration variable also specifies the sort that is used as
+the entry point to the parser, in this case the K sort. It is often
+useful to cast to other sorts there as well for better control over the accepted
+language. The sort used for the $PGM variable is referred to as the start
+symbol. During parsing, the default start symbol K subsumes all user-defined
+sorts except for syntactic lists. These are excluded because they will always
+produce an ambiguity error when parsing a single element.

+

Note that we did not explicitly specify the $PGM configuration variable when
+we invoked krun on a file. This is because krun handles the $PGM variable
+specially, and allows you to pass the term for that variable via a file passed
+as a positional argument to krun. We did, however, specify the PGM name
+explicitly when we called krun with the -cPGM command line argument in
+Lesson 1.2. This is the other, explicit, way of
+specifying an input to krun.

+

This explains the most basic use of configuration declarations in K. We can,
+however, declare multiple cells and multiple configuration variables. We can
+also specify the initial values of cells statically, rather than dynamically
+via krun.

+

For example, consider the following definition (lesson-15-a.k):

+
module LESSON-15-A-SYNTAX
+  imports INT-SYNTAX
+
+  syntax Ints ::= List{Int,","}
+endmodule
+
+module LESSON-15-A
+  imports LESSON-15-A-SYNTAX
+  imports INT
+
+  configuration <k> $PGM:Ints </k>
+                <sum> 0 </sum>
+
+  rule <k> I:Int, Is:Ints => Is ...</k>
+       <sum> SUM:Int => SUM +Int I </sum>
+endmodule
+

This simple definition takes a list of integers as input and sums them
+together. Here we have declared two cells: <k> and <sum>. Unlike <k>,
+<sum> does not get initialized via a configuration variable, but instead
+is initialized statically with the value 0.

+

Note the rule in the second module: we have explicitly specified multiple
+cells in a single rule. K will expect each of these cells to match in order for
+the rule to apply.

+

Here is a second example (lesson-15-b.k):

+
module LESSON-15-B-SYNTAX
+  imports INT-SYNTAX
+endmodule
+
+module LESSON-15-B
+  imports LESSON-15-B-SYNTAX
+  imports INT
+  imports BOOL
+
+  configuration <k> . </k>
+                <first> $FIRST:Int </first>
+                <second> $SECOND:Int </second>
+
+  rule <k> . => FIRST >Int SECOND </k>
+       <first> FIRST </first>
+       <second> SECOND </second>
+endmodule
+

This definition takes two integers as command-line arguments and populates the
+<k> cell with a Boolean indicating whether the first integer is greater than
+the second. Notice that we have specified no $PGM configuration variable
+here. As a result, we cannot invoke krun via the syntax krun $file.
+Instead, we must explicitly pass values for each configuration variable via the
+-cFIRST and -cSECOND command line flags. For example, if we invoke
+krun -cFIRST=0 -cSECOND=1, we will get the value false in the K cell.

+

You can also specify both a $PGM configuration variable and other
+configuration variables in a single configuration declaration, in which case
+you would be able to initialize $PGM with either a positional argument or the
+-cPGM command line flag, but the other configuration variables would need
+to be explicitly initialized with -c.

+

Exercise

+ +

Modify your solution to Lesson 1.14, Exercise 2 to add a new cell with a
+configuration variable of sort Bool. This variable should determine whether
+the / operator is evaluated using /Int or divInt. Test that by specifying
+different values for this variable, you can change the behavior of rounding on
+division of negative numbers.

+

Cell Nesting

+ +

It is possible to nest cells inside one another. A cell that contains other
+cells must contain only other cells, but in doing this, you are able to
+create a hierarchical structure to the configuration. Consider the following
+definition (lesson-15-c.k), which is equivalent to the one in LESSON-15-B:

+
module LESSON-15-C-SYNTAX
+  imports INT-SYNTAX
+endmodule
+
+module LESSON-15-C
+  imports LESSON-15-C-SYNTAX
+  imports INT
+  imports BOOL
+
+  configuration <T>
+                  <k> . </k>
+                  <state>
+                    <first> $FIRST:Int </first>
+                    <second> $SECOND:Int </second>
+                  </state>
+                </T>
+
+  rule <k> . => FIRST >Int SECOND </k>
+       <first> FIRST </first>
+       <second> SECOND </second>
+endmodule
+

Note that we have added some new cells to the configuration declaration:
+the <T> cell wraps the entire configuration, and the <state> cell is
+introduced around the <first> and <second> cells.

+

However, we have not changed the rule in this definition. This is because of
+a concept in K called configuration abstraction. K allows you to specify
+any number of cells in a rule (except zero) in any order you want, and K will
+compile the rules into a form that matches the structure of the configuration
+specified by the configuration declaration.

+

Here then, is how this rule would look after the configuration abstraction
+has been resolved:

+
  rule <T>
+         <k> . => FIRST >Int SECOND </k>
+         <state>
+           <first> FIRST </first>
+           <second> SECOND </second>
+         </state>
+       </T>
+

In other words, K will complete cells to the top of the configuration by
+inserting parent cells where appropriate based on the declared structure of
+the configuration. This is useful because as a definition evolves, the
+configuration may change, but you don't want to have to modify every single
+rule each time. Thus, K follows the principle that you should only mention the
+cells in a rule that are actually needed in order to accomplish its specific
+goal. By following this best practice, you can significantly increase the
+modularity of the definition and make it easier to maintain and modify.

+

Note that unlike top-level rewrite rules, cells that appear inside function
+rules are not necessarily completed to the top of the configuration. They still
+participate in cell ccompletion in the sense that you can mention cell
+structure loosely inside a function rule and it will be completed into the
+correct cell structure specified by the configuration declaration. However,
+they do not complete all the way to the top, instead completing only up to
+the top-most cell mentioned in the rule.

+

For example, if I write the following function rule in the above definition:

+
  rule doStuff(<first> FIRST </first>) => FIRST
+

The function will only match on the first cell, rather than the entire
+configuration. However, if we had mentioned a parent cell in the rule, it still
+would have completed the children of that parent cell as needed to ensure that
+the resulting term is well formed.

+

Exercise

+ +

Modify your definition from the previous exercise in this lesson to wrap the
+two cells you have declared in a top cell <T>. You should not have to change
+any other rules in the definition.

+

Cell Variables

+ +

Sometimes it is desirable to explicitly match a variable against certain
+fragments of the configuration. Because K's configuration is hierarchical,
+we can grab subsets of the configuration as if they were just another term.
+However, configuration abstraction applies here as well.
+In particular, for each cell you specify in a configuration declaration, a
+unique sort is assigned for that cell with a single constructor (the cell
+itself). The sort name is taken by removing all special characters,
+capitalizing the first letter and each letter after a hyphen, and adding the
+word Cell at the end. For example, in the above example, the cell sorts are
+TCell, KCell, StateCell, FirstCell, and SecondCell. If we had declared
+a cell as <first-number>, then the cell sort name would be FirstNumberCell.

+

You can explicitly reference a variable of one of these sorts anywhere you
+might instead write that cell. For example, consider the following rule:

+
  rule <k> true => S </k>
+       (S:StateCell => <state>... .Bag ...</state>)
+

Here we have introduced two new concepts. The first is the variable of sort
+StateCell, which matches the entire <state> part of the configuration. The
+second is that we have introduced the concept of ... once again. When a cell
+contains other cells, it is also possible to specify ... on either the left,
+right or both sides of the cell term. Each of these three syntaxes are
+equivalent in this case. When they appear on the left-hand side of a rule, they
+indicate that we don't care what value any cells not explicitly named might
+have. For example, we might write <state>... <first> 0 </first> ...</state> on
+the left-hand side of a rule in order to indicate that we want to match the
+rule when the <first> cell contains a zero, regardless of what the <second>
+cell contains. If we had not included this ellipsis, it would have been a
+syntax error, because K would have expected you to provide a value for each of
+the child cells.

+

However, if, as in the example above, the ... appeared on the right-hand side
+of a rule, this instead indicates that the cells not explicitly mentioned under
+the cell should be initialized with their default value from the configuration
+declaration. In other words, that rule will set the value of <first> and
+<second> to zero.

+

You may note the presence of the phrase .Bag here. You can think of this as
+the empty set of cells. It is used as the child of a cell when you want to
+indicate that no cells should be explicitly named. We will cover other uses
+of this term in later lessons.

+

Exercises

+ +
    +
  1. Modify the definition from the previous exercise in this lesson so that the
    +Boolean cell you created is initialized to false. Then add a production
    +syntax Stmt ::= Bool ";" Exp, and a rule that uses this Stmt to set the
    +value of the Boolean flag. Then add another production
    +syntax Stmt ::= "reset" ";" Exp which sets the value of the Boolean flag back
    +to its default value via a ... on the right-hand side. You will need to add
    +an additional cell around the Boolean cell to make this work.
  2. +
+

Next lesson

+ +

Once you have completed the above exercises, you can continue to
+Lesson 1.16: Maps, Semantic Lists, and Sets.

+

Lesson 1.16: Maps, Semantic Lists, and Sets

+ +

The purpose of this lesson is to explain how to use the data structure sorts
+provided by K: maps, lists, and sets.

+

Maps

+ +

The most frequently used type of data structure in K is the map. The sort
+provided by K for this purpose is the Map sort, and it is provided in
+domains.md in the MAP
+module. This type is not (currently) polymorphic. All Map terms are maps that
+map terms of sort KItem to other terms of sort KItem. A KItem can contain
+any sort except a K sequence. If you need to store such a term in a
+map, you can always use a wrapper such as syntax KItem ::= kseq(K).

+

A Map pattern consists of zero or more map elements (as represented by the
+symbol syntax Map ::= KItem "|->" KItem), mixed in any order, separated by
+whitespace, with zero or one variables of sort Map. The empty map is
+represented by .Map. If all of the bindings for the variables in the keys
+of the map can be deterministically chosen, these patterns can be matched in
+O(1) time. If they cannot, then each map element that cannot be
+deterministically constructed contributes a single dimension of polynomial
+time to the cost of the matching. In other words, a single such element is
+linear, two are quadratic, three are cubic, etc.

+

Patterns like the above are the only type of Map pattern that can appear
+on the left-hand-side of a rule. In other words, you are not allowed to write
+a Map pattern on the left-hand-side with more than one variable of sort Map
+in it. You are, however, allowed to write such patterns on the right-hand-side
+of a rule. You can also write a function pattern in the key of a map element
+so long as all the variables in the function pattern can be deterministically
+chosen.

+

Note the meaning of matching on a Map pattern: a map pattern with no
+variables of sort Map will match if the map being matched has exactly as
+many bindings as |-> symbols in the pattern. It will then match if each
+binding in the map pattern matches exactly one distinct binding in the map
+being matched. A map pattern with one Map variable will also match any map
+that contains such a map as a subset. The variable of sort Map will be bound
+to whatever bindings are left over (.Map if there are no bindings left over).

+

Here is an example of a simple definition that implements a very basic
+variable declaration semantics using a Map to store the value of variables
+(lesson-16-a.k):

+
module LESSON-16-A-SYNTAX
+  imports INT-SYNTAX
+  imports ID-SYNTAX
+
+  syntax Exp ::= Id | Int
+  syntax Decl ::= "int" Id "=" Exp ";" [strict(2)]
+  syntax Pgm ::= List{Decl,""}
+endmodule
+
+module LESSON-16-A
+  imports LESSON-16-A-SYNTAX
+  imports BOOL
+
+  configuration <T>
+                  <k> $PGM:Pgm </k>
+                  <state> .Map </state>
+                </T>
+
+  // declaration sequence
+  rule <k> D:Decl P:Pgm => D ~> P ...</k>
+  rule <k> .Pgm => . ...</k>
+
+  // variable declaration
+  rule <k> int X:Id = I:Int ; => . ...</k>
+       <state> STATE => STATE [ X <- I ] </state>
+
+  // variable lookup
+  rule <k> X:Id => I ...</k>
+       <state>... X |-> I ...</state>
+
+  syntax Bool ::= isKResult(K) [symbol, function]
+  rule isKResult(_:Int) => true
+  rule isKResult(_) => false [owise]
+endmodule
+

There are several new features in this definition. First, note we import
+the module ID-SYNTAX. This module is defined in domains.md and provides a
+basic syntax for identifiers. We are using the Id sort provided by this
+module in this definition to implement the names of program variables. This
+syntax is only imported when parsing programs, not when parsing rules. Later in
+this lesson we will see how to reference specific concrete identifiers in a
+rule.

+

Second, we introduce a single new function over the Map sort. This function,
+which is represented by the symbol
+syntax Map ::= Map "[" KItem "<-" KItem "]", represents the map update
+operation. Other functions over the Map sort can be found in domains.md.

+

Finally, we have used the ... syntax on a cell containing a Map. In this
+case, the meaning of <state>... Pattern ...</state>,
+<state>... Pattern </state>, and <state> Pattern ...</state> are the same:
+it is equivalent to writing <state> (Pattern) _:Map </state>.

+

Consider the following program (a.decl):

+
int x = 0;
+int y = 1;
+int a = x;
+

If we run this program with krun, we will get the following result:

+
<T>
+  <k>
+    .
+  </k>
+  <state>
+    a |-> 0
+    x |-> 0
+    y |-> 1
+  </state>
+</T>
+

Note that krun has automatically sorted the collection for you. This doesn't
+happen at runtime, so you still get the performance of a hash map, but it will
+help make the output more readable.

+

Exercise

+ +

Create a sort Stmt that is a subsort of Decl. Create a production of sort
+Stmt for variable assignment in addition to the variable declaration
+production. Feel free to use the syntax syntax Stmt ::= Id "=" Exp ";". Write
+a rule that implements variable assignment using a map update function. Then
+write the same rule using a map pattern. Test your implementations with some
+programs to ensure they behave as expected.

+

Semantic Lists

+ +

In a previous lesson, we explained how to represent lists in the AST of a
+program. However, this is not the only context where lists can be used. We also
+frequently use lists in the configuration of an interpreter in order to
+represent certain types of program state. For this purpose, it is generally
+useful to have an associative-list sort, rather than the cons-list sorts
+provided in Lesson 1.12.

+

The type provided by K for this purpose is the List sort, and it is also
+provided in domains.md, in the LIST module. This type is also not
+(currently) polymorphic. Like Map, all List terms are lists of terms of the
+KItem sort.

+

A List pattern in K consists of zero or more list elements (as represented by
+the ListItem symbol), followed by zero or one variables of sort List,
+followed by zero or more list elements. An empty list is represented by
+.List. These patterns can be matched in O(log(N)) time. This is the only
+type of List pattern that can appear on the left-hand-side of a rule. In
+other words, you are not allowed to write a List pattern on the
+left-hand-side with more than one variable of sort List in it. You are,
+however, allowed to write such patterns on the right-hand-side of a rule.

+

Note the meaning of matching on a List pattern: a list pattern with no
+variables of sort List will match if the list being matched has exactly as
+many elements as ListItem symbols in the pattern. It will then match if each
+element in sequence matches the pattern contained in the ListItem symbol. A
+list pattern with one variable of sort List operates the same way, except
+that it can match any list with at least as many elements as ListItem
+symbols, so long as the prefix and suffix of the list match the patterns inside
+the ListItem symbols. The variable of sort List will be bound to whatever
+elements are left over (.List if there are no elements left over).

+

The ... syntax is allowed on cells containing lists as well. In this case,
+the meaning of <cell>... Pattern </cell> is the same as
+<cell> _:List (Pattern) </cell>, the meaning of <cell> Pattern ...</cell>
+is the same as <cell> (Pattern) _:List</cell>. Because list patterns with
+multiple variables of sort List are not allowed, it is an error to write
+<cell>... Pattern ...</cell>.

+

Here is an example of a simple definition that implements a very basic
+function-call semantics using a List as a function stack (lesson-16-b.k):

+
module LESSON-16-B-SYNTAX
+  imports INT-SYNTAX
+  imports ID-SYNTAX
+
+  syntax Exp ::= Id "(" ")" | Int
+  syntax Stmt ::= "return" Exp ";" [strict]
+  syntax Decl ::= "fun" Id "(" ")" "{" Stmt "}"
+  syntax Pgm ::= List{Decl,""}
+  syntax Id ::= "main" [token]
+endmodule
+
+module LESSON-16-B
+  imports LESSON-16-B-SYNTAX
+  imports BOOL
+  imports LIST
+
+  configuration <T>
+                  <k> $PGM:Pgm ~> main () </k>
+                  <functions> .Map </functions>
+                  <fstack> .List </fstack>
+                </T>
+
+  // declaration sequence
+  rule <k> D:Decl P:Pgm => D ~> P ...</k>
+  rule <k> .Pgm => . ...</k>
+
+  // function definitions
+  rule <k> fun X:Id () { S } => . ...</k>
+       <functions>... .Map => X |-> S ...</functions>
+
+  // function call
+  syntax KItem ::= stackFrame(K)
+  rule <k> X:Id () ~> K => S </k>
+       <functions>... X |-> S ...</functions>
+       <fstack> .List => ListItem(stackFrame(K)) ...</fstack>
+
+  // return statement
+  rule <k> return I:Int ; ~> _ => I ~> K </k>
+       <fstack> ListItem(stackFrame(K)) => .List ...</fstack>
+
+  syntax Bool ::= isKResult(K) [function, symbol]
+  rule isKResult(_:Int) => true
+  rule isKResult(_) => false [owise]
+endmodule
+

Notice that we have declared the production syntax Id ::= "main" [token].
+Since we use the ID-SYNTAX module, this declaration is necessary in order to
+be able to refer to the main identifier directly in the configuration
+declaration. Our <k> cell now contains a K sequence initially: first we
+process all the declarations in the program, then we call the main function.

+

Consider the following program (foo.func):

+
fun foo() { return 5; }
+fun main() { return foo(); }
+

When we krun this program, we should get the following output:

+
<T>
+  <k>
+    5 ~> .
+  </k>
+  <functions>
+    foo |-> return 5 ;
+    main |-> return foo ( ) ;
+  </functions>
+  <fstack>
+    .List
+  </fstack>
+</T>
+

Note that we have successfully put on the <k> cell the value returned by the
+main function.

+

Exercise

+ +

Add a term of sort Id to the stackFrame operator to keep track of the
+name of the function in that stack frame. Then write a function
+syntax String ::= printStackTrace(List) that takes the contents of the
+<fstack> cell and pretty prints the current stack trace. You can concatenate
+strings with +String in the STRING module in domains.md, and you can
+convert an Id to a String with the Id2String function in the ID module.
+Test this function by creating a new expression that returns the current stack
+trace as a string. Make sure to update isKResult and the Exp sort as
+appropriate to allow strings as values.

+

Sets

+ +

The final primary data structure sort in K is a set, i.e., an idempotent
+unordered collection where elements are deduplicated. The sort provided by K
+for this purpose is the Set sort and it is provided in domains.md in the
+SET module. Like maps and lists, this type is not (currently) polymorphic.
+Like Map and List, all Set terms are sets of terms of the KItem sort.

+

A Set pattern has the exact same restrictions as a Map pattern, except that
+its elements are treated like keys, and there are no values. It has the same
+performance characteristics as well. However, syntactically it is more similar
+to the List sort: An empty Set is represented by .Set, but a set element
+is represented by the SetItem symbol.

+

Matching behaves similarly to the Map sort: a set pattern with no variables
+of sort Set will match if the set has exactly as many bindings as SetItem
+symbols, and if each element pattern matches one distinct element in the set.
+A set with a variable of sort Set also matches any superset of such a set.
+As with map, the elements left over will be bound to the Set variable (or
+.Set if no elements are left over).

+

Like Map, the ... syntax on a set is syntactic sugar for an anonymous
+variable of sort Set.

+

Here is an example of a simple modification to LESSON-16-A which uses a Set
+to ensure that variables are never declared more than once. In practice, you
+would likely just use the in_keys symbol over maps to test for this, but
+it's still useful as an example of sets in practice:

+
module LESSON-16-C-SYNTAX
+  imports LESSON-16-A-SYNTAX
+endmodule
+
+module LESSON-16-C
+  imports LESSON-16-C-SYNTAX
+  imports BOOL
+  imports SET
+
+  configuration <T>
+                  <k> $PGM:Pgm </k>
+                  <state> .Map </state>
+                  <declared> .Set </declared>
+                </T>
+
+  // declaration sequence
+  rule <k> D:Decl P:Pgm => D ~> P ...</k>
+  rule <k> .Pgm => . ...</k>
+
+  // variable declaration
+  rule <k> int X:Id = I:Int ; => . ...</k>
+       <state> STATE => STATE [ X <- I ] </state>
+       <declared> D => D SetItem(X) </declared>
+    requires notBool X in D
+
+  // variable lookup
+  rule <k> X:Id => I ...</k>
+       <state>... X |-> I ...</state>
+       <declared>... SetItem(X) ...</declared>
+
+  syntax Bool ::= isKResult(K) [symbol, function]
+  rule isKResult(_:Int) => true
+  rule isKResult(_) => false [owise]
+endmodule
+

Now if we krun a program containing duplicate declarations, it will get
+stuck on the declaration.

+

Exercises

+ +
    +
  1. Modify your solution to Lesson 1.14, Exercise 2 and introduce the sorts
    +Decls, Decl, and Stmt which include variable and function declaration
    +(without function parameters), and return and assignment statements, as well
    +as call expressions. Use List and Map to implement these operators, making
    +sure to consider the interactions between components, such as saving and
    +restoring the environment of variables at each call site. Don't worry about
    +local function definitions or global variables for now. Make sure to test the
    +resulting interpreter.
  2. +
+

Next lesson

+ +

Once you have completed the above exercises, you can continue to
+Lesson 1.17: Cell Multiplicity and Cell Collections.

+

Lesson 1.17: Cell Multiplicity and Cell Collections

+ +

The purpose of this lesson is to explain how you can create optional cells
+and cells that repeat multiple times in a configuration using a feature called
+cell multiplicity.

+

Cell Multiplicity

+ +

K allows you to specify attributes for cell productions as part of the syntax
+of configuration declarations. Unlike regular productions, which use the []
+syntax for attributes, configuration cells use an XML-like attribute syntax:

+
configuration <k color="red"> $PGM:K </k>
+

This configuration declaration gives the <k> cell the color red during
+unparsing using the color attribute as discussed in
+Lesson 1.9.

+

However, in addition to the usual attributes for productions, there are some
+other attributes that can be applied to cells with special meaning. One such
+attribute is the multiplicity attribute. By default, each cell that is
+declared occurs exactly once in every configuration term. However, using the
+multiplicity attribute, this default behavior can be changed. There are two
+values that this attribute can have: ? and *.

+

Optional cells

+ +

The first cell multiplicity we will discuss is ?. Similar to a regular
+expression language, this attribute tells the compiler that this cell can
+appear 0 or 1 times in the configuration. In other words, it is an
+optional cell. By default, K does not create optional cells in the initial
+configuration, unless that optional cell has a configuration variable inside
+it. However, it is possible to override the default behavior and create that
+cell initially by adding the additional cell attribute initial="".

+

K uses the .Bag symbol to represent the absence of any cells in a particular
+rule. Consider the following module:

+
module LESSON-17-A
+  imports INT
+
+  configuration <k> $PGM:K </k>
+                <optional multiplicity="?"> 0 </optional>
+
+  syntax KItem ::= "init" | "destroy"
+
+  rule <k> init => . ...</k>
+       (.Bag => <optional> 0 </optional>)
+  rule <k> destroy => . ...</k>
+       (<optional> _ </optional> => .Bag)
+
+endmodule
+

In this definition, when the init symbol is executed, the <optional> cell
+is added to the configuration, and when the destroy symbol is executed, it
+is removed. Any rule that matches on that cell will only match if that cell is
+present in the configuration.

+

Exercise

+ +

Create a simple definition with a Stmts sort that is a List{Stmt,""} and
+a Stmt sort with the constructors
+syntax Stmt ::= "enable" | "increment" | "decrement" | "disable". The
+configuration should have an optional cell that contains an integer that
+is created with the enable command, destroyed with the disable command,
+and its value is incremented or decremented by the increment and decrement
+command.

+

Cell collections

+ +

The second type of cell multiplicity we will discuss is *. Simlar to a
+regular expression language, this attribute tells the compiler that this cell
+can appear 0 or more times in the configuration. In other words, it is a
+cell collection. Cells with multiplicity * must be the only child of
+their parent cell. As a convention, the inner cell is usually named with the
+singular form of what it contains, and the outer cell with the plural form, for
+example, "thread" and "threads".

+

All cell collections are required to have the type attribute set to either
+Set or Map. A Set cell collection is represented as a set and behaves
+internally the same as the Set sort, although it actually declares a new
+sort. A Map cell collection is represented as a Map in which the first
+subcell of the cell collection is the key and the remaining cells are the
+value.

+

For example, consider the following module:

+
module LESSON-17-B
+  imports INT
+  imports BOOL
+  imports ID-SYNTAX
+
+  syntax Stmt ::= Id "=" Exp ";" [strict(2)]
+                | "return" Exp ";" [strict]
+  syntax Stmts ::= List{Stmt,""}
+  syntax Exp ::= Id
+               | Int
+               | Exp "+" Exp [seqstrict]
+               | "spawn" "{" Stmts "}"
+               | "join" Exp ";" [strict]
+
+  configuration <threads>
+                  <thread multiplicity="*" type="Map">
+                    <id> 0 </id>
+                    <k> $PGM:K </k>
+                  </thread>
+                </threads>
+                <state> .Map </state>
+                <next-id> 1 </next-id>
+
+  rule <k> X:Id => I:Int ...</k>
+       <state>... X |-> I ...</state>
+  rule <k> X:Id = I:Int ; => . ...</k>
+       <state> STATE => STATE [ X <- I ] </state>
+  rule <k> S:Stmt Ss:Stmts => S ~> Ss ...</k>
+  rule <k> I1:Int + I2:Int => I1 +Int I2 ...</k>
+
+  rule <thread>...
+         <k> spawn { Ss } => NEXTID ...</k>
+       ...</thread>
+       <next-id> NEXTID => NEXTID +Int 1 </next-id>
+       (.Bag =>
+       <thread>
+         <id> NEXTID </id>
+         <k> Ss </k>
+       </thread>)
+
+  rule <thread>...
+         <k> join ID:Int ; => I ...</k>
+       ...</thread>
+       (<thread>
+         <id> ID </id>
+         <k> return I:Int ; ...</k>
+       </thread> => .Bag)
+
+  syntax Bool ::= isKResult(K) [function, symbol]
+  rule isKResult(_:Int) => true
+  rule isKResult(_) => false [owise]
+endmodule
+

This module implements a very basic fork/join semantics. The spawn expression
+spawns a new thread to execute a sequence of statements and returns a thread
+id, and the join statement waits until a thread executes return and then
+returns the return value of the thread.

+

Note something quite novel here: the <k> cell is inside a cell of
+multiplicity *. Since the <k> cell is just a regular cell (mostly), this
+is perfectly allowable. Rules that don't mention a specific thread are
+automatically completed to match any thread.

+

When you execute programs in this language, the cells in the cell collection
+get sorted and printed like any other collection, but they still display like
+cells. Rules in this language also benefit from all the structural power of
+cells, allowing you to omit cells you don't care about or complete the
+configuration automatically. This allows you to have the power of cells while
+still being a collection under the hood.

+

Exercises

+ +
    +
  1. Modify the solution from Lesson 1.16, Exercise 1 so that the cell you use to
    +keep track of functions in a Map is now a cell collection. Run some programs
    +and compare how they get unparsed before and after this change.
  2. +
+

Next lesson

+ +

Once you have completed the above exercises, you can continue to
+Lesson 1.18: Term Equality and the Ternary Operator.

+

Lesson 1.18: Term Equality and the Ternary Operator

+ +

The purpose of this lesson is to introduce how to compare equality of terms in
+K, and how to put conditional expressions directly into the right-hand side of
+rules.

+

Term Equality

+ +

One major way you can compare whether two terms are equal in K is to simply
+match both terms with a variable with the same name. This will only succeed
+in matching if the two terms are equal structurally. However, sometimes this
+is impractical, and it is useful to have access to a way to actually compare
+whether two terms in K are equal. The operator for this is found in
+domains.md in the K-EQUAL
+module. The operator is ==K and takes two terms of sort K and returns a
+Bool. It returns true if they are equal. This includes equality over builtin
+types such as Map and Set where equality is not purely structural in
+nature. However, it does not include any notion of semantic equality over
+user-defined syntax. The inverse symbol for inequality is =/=K.

+

Ternary Operator

+ +

One way to introduce conditional logic in K is to have two separate rules,
+each with a side condition (or one rule with a side condition and another with
+the owise attribute). However, sometimes it is useful to explicitly write
+a conditional expression directly in the right-hand side of a rule. For this
+purpose, K defines one more operator in the K-EQUAL module, which corresponds
+to the usual ternary operator found in many languages. Here is an example of its
+usage (lesson-18.k):

+
module LESSON-18
+  imports INT
+  imports BOOL
+  imports K-EQUAL
+
+  syntax Exp ::= Int | Bool | "if" "(" Exp ")" Exp "else" Exp [strict(1)]
+
+  syntax Bool ::= isKResult(K) [function, symbol]
+  rule isKResult(_:Int) => true
+  rule isKResult(_:Bool) => true
+
+  rule if (B:Bool) E1:Exp else E2:Exp => #if B #then E1 #else E2 #fi
+endmodule
+

Note the symbol on the right-hand side of the final rule. This symbol is
+polymorphic: B must be of sort Bool, but E1 and E2 could have been
+any sort so long as both were of the same sort, and the sort of the entire
+expression becomes equal to that sort. K supports polymorphic built-in
+operators, but does not yet allow users to write their own polymorphic
+productions.

+

The behavior of this function is to evaluate the Boolean expression to a
+Boolean, then pick one of the two children and return it based on whether the
+Boolean is true or false. Please note that it is not a good idea to use this
+symbol in cases where one or both of the children is potentially undefined
+(for example, an integer expression that divides by zero). While the default
+implementation is smart enough to only evaluate the branch that happens to be
+picked, this will not be true when we begin to do program verification. If
+you need short circuiting behavior, it is better to use a side condition.

+

Exercises

+ +
    +
  1. +

    Write a function in K that takes two terms of sort K and returns an
    +Int: the Int should be 0 if the terms are equal and 1 if the terms are
    +unequal.

    +
  2. +
  3. +

    Modify your solution to Lesson 1.16, Exercise 1 and introduce an if
    +Stmt to the syntax of the language, then implement it using the #if symbol.
    +Make sure to write tests for the resulting interpreter.

    +
  4. +
+

Next lesson

+ +

Once you have completed the above exercises, you can continue to
+Lesson 1.19: Debugging with GDB.

+

Lesson 1.19: Debugging with GDB or LLDB

+ +

The purpose of this lesson is to teach how to debug your K interpreter using
+the K-language support provided in GDB or
+LLDB.

+

Caveats

+ +

This lesson has been written with GDB support on Linux in mind. Unfortunately,
+on macOS, GDB has limited support. To address this, we have introduced early
+experimental support for debugging with LLDB on macOS. In some cases, the
+features supported by LLDB are slightly different to those supported by GDB; the
+tutorial text will make this clear where necessary. If you use a macOS with an
+LLVM version older than 15, you may need to upgrade it to use the LLDB
+correctly. If you encounter an issue on either operating system, please open an
+issue against the K repository.

+

Getting started

+ +

On Linux, you will need GDB in order to complete this lesson. If you do not
+already have GDB installed, then do so. Steps to install GDB are outlined in
+this GDB Tutorial.

+

On macOS, LLDB should already have been installed with K's build dependencies
+(whether you have built K from source, or installed it using kup or Homebrew).

+

The first thing neccessary in order to debug a K interpreter is to build the
+interpreter with full debugging support enabled. This can be done relatively
+simply. First, run kompile with the command line flag --enable-llvm-debug.
+The resulting compiled K definition will be ready to support debugging.

+

Once you have a compiled K definition and a program you wish to debug, you can
+start the debugger by passing the --debugger flag to krun. This will
+automatically load the program you are executing into GDB and drop you into a
+GDB shell ready to start executing the program.

+

As an example, consider the following K definition (lesson-19-a.k):

+
module LESSON-19-A
+  imports INT
+
+  rule I => I +Int 1
+    requires I <Int 100
+endmodule
+

If we compile this definition with kompile lesson-19-a.k --enable-llvm-debug,
+and run the program 0 in the debugger with krun -cPGM=0 --debugger, we will
+see the following output (roughly, and depending on which platform you are
+using):

+

GDB / Linux

+ +
GNU gdb (Ubuntu 9.2-0ubuntu1~20.04) 9.2
+Copyright (C) 2020 Free Software Foundation, Inc.
+License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>
+This is free software: you are free to change and redistribute it.
+There is NO WARRANTY, to the extent permitted by law.
+Type "show copying" and "show warranty" for details.
+This GDB was configured as "x86_64-linux-gnu".
+Type "show configuration" for configuration details.
+For bug reporting instructions, please see:
+<http://www.gnu.org/software/gdb/bugs/>.
+Find the GDB manual and other documentation resources online at:
+    <http://www.gnu.org/software/gdb/documentation/>.
+
+For help, type "help".
+Type "apropos word" to search for commands related to "word"...
+Reading symbols from ./lesson-19-a-kompiled/interpreter...
+warning: File "/home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-a-kompiled/interpreter" auto-loading has been declined by your `auto-load safe-path' set to "$debugdir:$datadir/auto-load".
+To enable execution of this file add
+        add-auto-load-safe-path /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-a-kompiled/interpreter
+line to your configuration file "/home/dwightguth/.gdbinit".
+To completely disable this security protection add
+        set auto-load safe-path /
+line to your configuration file "/home/dwightguth/.gdbinit".
+For more information about this security protection see the
+"Auto-loading safe path" section in the GDB manual.  E.g., run from the shell:
+        info "(gdb)Auto-loading safe path"
+(gdb)
+

To make full advantage of the GDB features of K, you should follow the first
+command listed in this output message and add the corresponding
+add-auto-load-safe-path command to your ~/.gdbinit file as prompted.
+Please note that the path will be different on your machine than the one
+listed above. Adding directories to the "load safe path" effectively tells GDB
+to trust those directories. All content under a given directory will be recursively
+trusted, so if you want to avoid having to add paths to the "load safe path" every
+time you kompile a different K definition, then you can just trust a minimal
+directory containing all your kompiled files; however, do not choose a top-level directory containing arbitrary files as this amounts to trusting arbitrary files and is a security risk. More info on the load safe path
+can be found here.

+

LLDB / macOS

+ +
(lldb) target create "./lesson-19-a-kompiled/interpreter"
+warning: 'interpreter' contains a debug script. To run this script in this debug session:
+
+    command script import "/Users/brucecollie/code/scratch/lesson-19-a-kompiled/interpreter.dSYM/Contents/Resources/Python/interpreter.py"
+
+To run all discovered debug scripts in this session:
+
+    settings set target.load-script-from-symbol-file true
+
+Current executable set to '/Users/brucecollie/code/scratch/lesson-19-a-kompiled/interpreter' (x86_64).
+(lldb) settings set -- target.run-args  ".krun-2023-03-20-11-22-46-TcYt9ffhb2/tmp.in.RupiLwHNfn" "-1" ".krun-2023-03-20-11-22-46-TcYt9ffhb2/result.kore"
+(lldb) 
+

LLDB applies slightly different security policies to GDB. To load K's debugging
+scripts for this session only, you can run the command script import line at
+the LLDB prompt. The loaded scripts will not persist across debugging sessions
+if you do this. It is also possible to configure LLDB to automatically load the
+K scripts when an interpreter is started in LLDB; doing so requires a slightly
+less broad permission than GDB.

+

On macOS, the .dSYM directory that contains debugging symbols for an
+executable can also contain Python scripts in Contents/Resources/Python. If
+there is a Python script with a name matching the name of the current executable
+(here, interpreter and interpreter.py), it will be automatically loaded if
+the target.load-script-from-symbol-file setting is set). You can therefore add
+the settings set command to your ~/.lldbinit without enabling full arbitrary
+code execution, but you should be aware of the paths from which code can be
+executed if you do so.

+

Basic commands

+ +
+

LLDB Note: the k start and k step commands are currently not
+implemented in the K LLDB scripts. To work around this limitation temporarily,
+you can run process launch --stop-at-entry instead of k start. To emulate
+k step, first run rbreak k_step once, then continue instead of each k step. We hope to address these limitations soon.

+
+

The most basic commands you can execute in the K GDB session are to run your
+program or to step through it. The first can be accomplished using GDB's
+built-in run command. This will automatically start the program and begin
+executing it. It will continue until the program aborts or finishes, or the
+debugger is interrupted with Ctrl-C.

+

Sometimes you want finer-grained control over how you proceed through the
+program you are debugging. To step through the rule applications in your
+program, you can use the k start and k step GDB commands.

+

k start is similar to the built-in start command in that it starts the
+program and then immediately breaks before doing any work. However, unlike
+the start command which will break immediately after the main method of
+a program is executed, the K start program will initialize the rewriter,
+evaluate the initial configuration, and break immediately prior to applying
+any rewrite steps.

+

In the example above, here is what we see when we run the k start command:

+
Temporary breakpoint 1 at 0x239210
+Starting program: /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-a-kompiled/interpreter .krun-2021-08-13-14-10-50-sMwBkbRicw/tmp.in.01aQt85TaA -1 .krun-2021-08-13-14-10-50-sMwBkbRicw/result.kore
+[Thread debugging using libthread_db enabled]
+Using host libthread_db library "/lib/x86_64-linux-gnu/libthread_db.so.1".
+
+Temporary breakpoint 1, 0x0000000000239210 in main ()
+0x0000000000231890 in step (subject=<k>
+  0 ~> .
+</k>)
+(gdb)
+

As you can see, we are stopped at the step function in the interpreter.
+This function is responsible for taking top-level rewrite steps. The subject
+parameter to this function is the current K configuration.

+

We can step through K rewrite steps one at a time by running the k step
+command. By default, this takes a single rewrite step (including any function
+rule applications that are part of that step).

+

Here is what we see when we run that command:

+
Continuing.
+
+Temporary breakpoint -22, 0x0000000000231890 in step (subject=<k>
+  1 ~> .
+</k>)
+(gdb)
+

As we can see, we have taken a single rewrite step. We can also pass a number
+to the k step command which indicates the number of rewrite steps to take.

+

Here is what we see if we run k step 10:

+
Continuing.
+
+Temporary breakpoint -23, 0x0000000000231890 in step (subject=<k>
+  11 ~> .
+</k>)
+(gdb)
+

As we can see, ten rewrite steps were taken.

+

Breakpoints

+ +

The next important step in debugging an application in GDB is to be able to
+set breakpoints. Generally speaking, there are three types of breakpoints we
+are interested in a K semantics: Setting a breakpoint when a particular
+function is called, setting a breakpoint when a particular rule is applied,
+and setting a breakpoint when a side condition of a rule is evaluated.

+

The easiest way to do the first two things is to set a breakpoint on the
+line of code containing the function or rule.

+

For example, consider the following K definition (lesson-19-b.k):

+
module LESSON-19-B
+  imports BOOL
+
+  syntax Bool ::= isBlue(Fruit) [function]
+  syntax Fruit ::= Blueberry() | Banana()
+  rule isBlue(Blueberry()) => true
+  rule isBlue(Banana()) => false
+
+  rule F:Fruit => isBlue(F)
+endmodule
+

Once this program has been compiled for debugging, we can run the program
+Blueberry(). We can then set a breakpoint that stops when the isBlue
+function is called with the following command in GDB:

+
break lesson-19-b.k:4
+

Similarly, in LLDB, run:

+
breakpoint set --file lesson-19-b.k --line 4
+

Here is what we see if we set this breakpoint and then run the interpreter:

+
(gdb) break lesson-19-b.k:4
+Breakpoint 1 at 0x231040: file /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-b.k, line 4.
+(gdb) run
+Starting program: /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-b-kompiled/interpreter .krun-2021-08-13-14-20-27-vXOQmV6lwS/tmp.in.fga98yqXlc -1 .krun-2021-08-13-14-20-27-vXOQmV6lwS/result.kore
+[Thread debugging using libthread_db enabled]
+Using host libthread_db library "/lib/x86_64-linux-gnu/libthread_db.so.1".
+
+Breakpoint 1, LblisBlue'LParUndsRParUnds'LESSON-19-B'Unds'Bool'Unds'Fruit (_1=Blueberry ( )) at /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-b.k:4
+4         syntax Bool ::= isBlue(Fruit) [function]
+(gdb)
+
(lldb) breakpoint set --file lesson-19-b.k --line 4
+Breakpoint 1: where = interpreter`LblisBlue'LParUndsRParUnds'LESSON-19-B'Unds'Bool'Unds'Fruit + 20 at lesson-19-b.k:4:19, address = 0x0000000100003ff4
+(lldb) run
+Process 50546 launched: '/Users/brucecollie/code/scratch/lesson-19-b-kompiled/interpreter' (x86_64)
+Process 50546 stopped
+* thread #1, queue = 'com.apple.main-thread', stop reason = breakpoint 1.1
+    frame #0: 0x0000000100003ff4 interpreter`LblisBlue'LParUndsRParUnds'LESSON-19-B'Unds'Bool'Unds'Fruit(_1=Blueberry ( )) at lesson-19-b.k:4:19
+   1   	module LESSON-19-B
+   2   	  imports BOOL
+   3   	
+-> 4   	  syntax Bool ::= isBlue(Fruit) [function]
+   5   	  syntax Fruit ::= Blueberry() | Banana()
+   6   	  rule isBlue(Blueberry()) => true
+   7   	  rule isBlue(Banana()) => false
+(lldb)
+

As we can see, we have stopped at the point where we are evaluating that
+function. The value _1 that is a parameter to that function shows the
+value passed to the function by the caller.

+

We can also break when the isBlue(Blueberry()) => true rule applies by simply
+changing the line number to the line number of that rule:

+
(gdb) break lesson-19-b.k:6
+Breakpoint 1 at 0x2af710: file /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-b.k, line 6.
+(gdb) run
+Starting program: /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-b-kompiled/interpreter .krun-2021-08-13-14-32-36-7kD0ic7XwD/tmp.in.8JNH5Qtmow -1 .krun-2021-08-13-14-32-36-7kD0ic7XwD/result.kore
+[Thread debugging using libthread_db enabled]
+Using host libthread_db library "/lib/x86_64-linux-gnu/libthread_db.so.1".
+
+Breakpoint 1, apply_rule_138 () at /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-b.k:6
+6         rule isBlue(Blueberry()) => true
+(gdb)
+
(lldb) breakpoint set --file lesson-19-b.k --line 6
+Breakpoint 1: where = interpreter`apply_rule_140 at lesson-19-b.k:6:8, address = 0x0000000100004620
+(lldb) run
+Process 50681 launched: '/Users/brucecollie/code/scratch/lesson-19-b-kompiled/interpreter' (x86_64)
+Process 50681 stopped
+* thread #1, queue = 'com.apple.main-thread', stop reason = breakpoint 1.1
+    frame #0: 0x0000000100004620 interpreter`apply_rule_140 at lesson-19-b.k:6:8
+   3   	
+   4   	  syntax Bool ::= isBlue(Fruit) [function]
+   5   	  syntax Fruit ::= Blueberry() | Banana()
+-> 6   	  rule isBlue(Blueberry()) => true
+   7   	  rule isBlue(Banana()) => false
+   8   	
+   9   	  rule F:Fruit => isBlue(F)
+(lldb) 
+

We can also do the same with a top-level rule:

+
(gdb) break lesson-19-b.k:9
+Breakpoint 1 at 0x2aefa0: lesson-19-b.k:9. (2 locations)
+(gdb) run
+Starting program: /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-b-kompiled/interpreter .krun-2021-08-13-14-33-13-9fC8Sz4aO3/tmp.in.jih1vtxSiQ -1 .krun-2021-08-13-14-33-13-9fC8Sz4aO3/result.kore
+[Thread debugging using libthread_db enabled]
+Using host libthread_db library "/lib/x86_64-linux-gnu/libthread_db.so.1".
+
+Breakpoint 1, apply_rule_107 (Var'Unds'DotVar0=<generatedCounter>
+  0
+</generatedCounter>, Var'Unds'DotVar1=., VarF=Blueberry ( )) at /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-b.k:9
+9         rule F:Fruit => isBlue(F)
+(gdb)
+
(lldb) breakpoint set --file lesson-19-b.k --line 9
+Breakpoint 1: 2 locations.
+(lldb) run
+Process 50798 launched: '/Users/brucecollie/code/scratch/lesson-19-b-kompiled/interpreter' (x86_64)
+Process 50798 stopped
+* thread #1, queue = 'com.apple.main-thread', stop reason = breakpoint 1.1
+    frame #0: 0x0000000100003f2e interpreter`apply_rule_109(Var'Unds'DotVar0=<generatedCounter>
+  0
+</generatedCounter>, Var'Unds'DotVar1=., VarF=Blueberry ( )) at lesson-19-b.k:9:8
+   6   	  rule isBlue(Blueberry()) => true
+   7   	  rule isBlue(Banana()) => false
+   8   	
+-> 9   	  rule F:Fruit => isBlue(F)
+   10  	endmodule
+(lldb)  
+

Unlike the function rule above, we see several parameters to this function.
+These are the substitution that was matched for the function. Variables only
+appear in this substitution if they are actually used on the right-hand side
+of the rule.

+

Advanced breakpoints

+ +

Sometimes it is inconvenient to set the breakpoint based on a line number.

+

It is also possible to set a breakpoint based on the rule label of a particular
+rule. Consider the following definition (lesson-19-c.k):

+
module LESSON-19-C
+  imports INT
+  imports BOOL
+
+  syntax Bool ::= isEven(Int) [function]
+  rule [isEven]: isEven(I) => true requires I %Int 2 ==Int 0
+  rule [isOdd]: isEven(I) => false requires I %Int 2 =/=Int 0
+
+endmodule
+

We will run the program isEven(4). We can set a breakpoint for when a rule
+applies by means of the MODULE-NAME.label.rhs syntax:

+
(gdb) break LESSON-19-C.isEven.rhs
+Breakpoint 1 at 0x2afda0: file /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-c.k, line 6.
+(gdb) run
+Starting program: /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-c-kompiled/interpreter .krun-2021-08-13-14-40-29-LNNT8YEZ61/tmp.in.ZG93vWCGGC -1 .krun-2021-08-13-14-40-29-LNNT8YEZ61/result.kore
+[Thread debugging using libthread_db enabled]
+Using host libthread_db library "/lib/x86_64-linux-gnu/libthread_db.so.1".
+
+Breakpoint 1, LESSON-19-C.isEven.rhs () at /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-c.k:6
+6         rule [isEven]: isEven(I) => true requires I %Int 2 ==Int 0
+(gdb)
+
(lldb) breakpoint set --name LESSON-19-C.isEven.rhs
+Breakpoint 1: where = interpreter`LESSON-19-C.isEven.rhs at lesson-19-c.k:6:18, address = 0x00000001000038e0
+(lldb) run
+Process 51205 launched: '/Users/brucecollie/code/scratch/lesson-19-c-kompiled/interpreter' (x86_64)
+Process 51205 stopped
+* thread #1, queue = 'com.apple.main-thread', stop reason = breakpoint 1.1
+    frame #0: 0x00000001000038e0 interpreter`LESSON-19-C.isEven.rhs at lesson-19-c.k:6:18
+   3   	  imports BOOL
+   4   	
+   5   	  syntax Bool ::= isEven(Int) [function]
+-> 6   	  rule [isEven]: isEven(I) => true requires I %Int 2 ==Int 0
+   7   	  rule [isOdd]: isEven(I) => false requires I %Int 2 =/=Int 0
+   8   	
+   9   	endmodule
+(lldb) 
+

We can also set a breakpoint for when a rule's side condition is evaluated
+by means of the MODULE-NAME.label.sc syntax:

+
(gdb) break LESSON-19-C.isEven.sc
+Breakpoint 1 at 0x2afd70: file /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-c.k, line 6.
+(gdb) run
+Starting program: /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-c-kompiled/interpreter .krun-2021-08-13-14-41-48-1BoGfJRbYc/tmp.in.kg4F8cwfCe -1 .krun-2021-08-13-14-41-48-1BoGfJRbYc/result.kore
+[Thread debugging using libthread_db enabled]
+Using host libthread_db library "/lib/x86_64-linux-gnu/libthread_db.so.1".
+
+Breakpoint 1, LESSON-19-C.isEven.sc (VarI=4) at /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-c.k:6
+6         rule [isEven]: isEven(I) => true requires I %Int 2 ==Int 0
+(gdb) finish
+Run till exit from #0  LESSON-19-C.isEven.sc (VarI=4) at /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-c.k:6
+0x00000000002b2662 in LblisEven'LParUndsRParUnds'LESSON-19-C'Unds'Bool'Unds'Int (_1=4) at /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-c.k:5
+5         syntax Bool ::= isEven(Int) [function]
+Value returned is $1 = true
+(gdb)
+
(lldb) breakpoint set --name LESSON-19-C.isEven.sc
+Breakpoint 1: where = interpreter`LESSON-19-C.isEven.sc + 1 at lesson-19-c.k:6:18, address = 0x00000001000038c1
+(lldb) run
+Process 52530 launched: '/Users/brucecollie/code/scratch/lesson-19-c-kompiled/interpreter' (x86_64)
+Process 52530 stopped
+* thread #1, queue = 'com.apple.main-thread', stop reason = breakpoint 1.1
+    frame #0: 0x00000001000038c1 interpreter`LESSON-19-C.isEven.sc(VarI=0x0000000101800088) at lesson-19-c.k:6:18
+   3   	  imports BOOL
+   4   	
+   5   	  syntax Bool ::= isEven(Int) [function]
+-> 6   	  rule [isEven]: isEven(I) => true requires I %Int 2 ==Int 0
+   7   	  rule [isOdd]: isEven(I) => false requires I %Int 2 =/=Int 0
+   8   	
+   9   	endmodule
+(lldb) finish
+Process 52649 stopped
+* thread #1, queue = 'com.apple.main-thread', stop reason = step out
+Return value: (bool) $0 = true
+
+    frame #0: 0x00000001000069e5 interpreter`LblisEven'LParUndsRParUnds'LESSON-19-C'Unds'Bool'Unds'Int(_1=0x0000000101800088) at lesson-19-c.k:5:19
+   2   	  imports INT
+   3   	  imports BOOL
+   4   	
+-> 5   	  syntax Bool ::= isEven(Int) [function]
+   6   	  rule [isEven]: isEven(I) => true requires I %Int 2 ==Int 0
+   7   	  rule [isOdd]: isEven(I) => false requires I %Int 2 =/=Int 0
+   8
+(lldb)
+

Here we have used the built-in command finish to tell us whether the side
+condition returned true or not. Note that once again, we see the substitution
+that was matched from the left-hand side. Like before, a variable will only
+appear here if it is used in the side condition.

+

Debugging rule matching

+ +

Sometimes it is useful to try to determine why a particular rule did or did
+not apply. K provides some basic debugging commands which make it easier
+to determine this.

+

Consider the following K definition (lesson-19-d.k):

+
module LESSON-19-D
+
+  syntax Foo ::= foo(Bar)
+  syntax Bar ::= bar(Baz) | bar2(Baz)
+  syntax Baz ::= baz() | baz2()
+
+  rule [baz]: foo(bar(baz())) => .K
+
+endmodule
+

Suppose we try to run the program foo(bar(baz2())). It is obvious from this
+example why the rule in this definition will not apply. However, in practice,
+such cases are not always obvious. You might look at a rule and not immediately
+spot why it didn't apply on a particular term. For this reason, it can be
+useful to get the debugger to provide a log about how it tried to match that
+term. You can do this with the k match command. If you are stopped after
+having run k start or k step, you can obtain this log for any rule after
+any step by running the command k match MODULE.label subject for a particular
+top-level rule label.

+

For example, with the baz rule above, we get the following output:

+
(gdb) k match LESSON-19-D.baz subject
+Subject:
+baz2 ( )
+does not match pattern:
+baz ( )
+
(lldb) k match LESSON-19-D.baz subject
+Subject:
+baz2 ( )
+does not match pattern:
+baz ( )
+

As we can see, it provided the exact subterm which did not match against the
+rule, as well as the particular subpattern it ought to have matched against.

+

This command does not actually take any rewrite steps. In the event that
+matching actually succeeds, you will still need to run the k step command
+to advance to the next step.

+

Final notes

+ +

In addition to the functionality provided above, you have the full power of
+GDB or LLDB at your disposal when debugging. Some features are not particularly
+well-adapted to K code and may require more advanced knowledge of the
+term representation or implementation to use effectively, but anything that
+can be done in GDB or LLDB can in theory be done using this debugging functionality.
+We suggest you refer to the
+GDB Documentation or
+LLDB Tutorial if you
+want to try to do something and are unsure as to how.

+

Exercises

+ +
    +
  1. Compile your solution to Lesson 1.18, Exercise 2 with debugging support
    +enabled and step through several programs you have previously used to test.
    +Then set a breakpoint on the isKResult function and observe the state of the
    +interpreter when stopped at that breakpoint. Set a breakpoint on the rule for
    +addition and run a program that causes it to be stopped at that breakpoint.
    +Finally, step through the program until the addition symbol is at the top
    +of the K cell, and then use the k match command to report the reason why
    +the subtraction rule does not apply. You may need to modify the definition
    +to insert some rule labels.
  2. +
+

Next lesson

+ +

Once you have completed the above exercises, you can continue to
+Lesson 1.20: K Backends and the Haskell Backend.

+

Lesson 1.20: K Backends and the Haskell Backend

+ +

The purpose of this lesson is to teach about the multiple backends of K,
+in particular the Haskell Backend which is the complement of the backend we
+have been using so far.

+

K Backends

+ +

Thus far, we have not discussed the distinction between the K frontend and
+the K backends at all. We have simply assumed that if you run kompile on a
+K definition, there will be a compiler backend that will allow you to execute
+the K definition you have compiled.

+

K actually has multiple different backends. The one we have been using so far
+implicitly, the default backend, is called the LLVM Backend. It is
+designed to support efficient, optimized concrete execution and search. It
+does this by compiling your K definition to LLVM bitcode and then using LLVM
+to generate machine code for it that is compiled and linked and executed.
+However, K is a formal methods toolkit at the end of the day, and the primary
+goal many people have when defining a programming language in K is to
+ultimately be able to perform more advanced verification on programs in their
+programming language.

+

It is for this purpose that K also provides the Haskell Backend, so called
+because it is implemented in Haskell. While we will cover the features of the
+Haskell Backend in more detail in the next two lessons, the important thing to
+understand is that it is a separate backend which is optimized for more formal
+reasoning about programming languages. While it is capable of performing
+concrete execution, it does not do so as efficiently as the LLVM Backend.
+In exchange, it provides more advanced features.

+

Choosing a backend

+ +

You can choose which backend to use to compile a K definition by means of the
+--backend flag to kompile. By default, if you do not specify this flag, it
+is equivalent to if you had specified --backend llvm. However, to use the
+Haskell Backend instead, you can simply say kompile --backend haskell on a
+particular K definition.

+

As an example, here is a simple K definition that we have seen before in the
+previous lesson (lesson-20.k):

+
module LESSON-20
+  imports INT
+
+  rule I => I +Int 1
+    requires I <Int 100
+endmodule
+

Previously we compiled this definition using the LLVM Backend, but if we
+instead execute the command kompile lesson-20.k --backend haskell, we
+will get an interpreter for this K definition that is implemented in Haskell
+instead. Unlike the default LLVM Backend, the Haskell Backend is not a
+compiler per se. It does not generate new Haskell code corresponding to your
+programming language and then compile and execute it. Instead, it is an
+interpreter which reads the generated IR from kompile and implements in
+Haskell an interpreter that is capable of interpreting any K definition.

+

Note that on arm64 macOS (Apple Silicon), there is a known issue with the Compact
+library that causes crashes in the Haskell backend. Pass the additional flag
+--no-haskell-binary to kompile to resolve this.
+This flag is also needed when using krun.

+

Exercise

+ +

Try running the program 0 in this K definition on the Haskell Backend and
+compare the final configuration to what you would get compiling the same
+definition with the LLVM Backend.

+

Legacy backends

+ +

As a quick note, K does provide one other backend, which exists primarily as
+legacy code which should be considered deprecated. This is the
+Java Backend. The Java Backend is essentially a precursor to the Haskell
+Backend. We will not cover this backend in any detail since it is deprecated,
+but we still mention it here for the purposes of understanding.

+

Exercises

+ +
    +
  1. Compile your solution to Lesson 1.18, Exercise 2 with the Haskell Backend
    +and execute some programs. Compare the resulting configurations with the
    +output of the same program on the LLVM Backend. Note that if you are getting
    +different behaviors on the Haskell backend, you might have some luck debugging
    +by passing --search to krun when using the LLVM backend.
  2. +
+

Next lesson

+ +

Once you have completed the above exercises, you can continue to
+Lesson 1.21: Unification and Symbolic Execution.

+

Lesson 1.21: Unification and Symbolic Execution

+ +

The purpose of this lesson is to teach the basic concepts of symbolic execution
+in order to introduce the unique capabilities of the Haskell Backend at a
+conceptual level.

+

Symbolic Execution

+ +

Thus far, all of the programs we have run using K have been concrete
+configurations. What this means is that the configuration we use to initialize
+the K rewrite engine is concrete; in other words, contains no logical
+variables. The LLVM Backend is a concrete execution engine, meaning that
+it is only capable of rewriting concrete configurations.

+

By contrast, the Haskell Backend performs symbolic execution, which is
+capable of rewriting any configuration, including those where parts of the
+configuration are symbolic, ie, contain variables or uninterpreted
+functions.

+

Unification

+ +

Previously, we have introduced the concept that K rewrite rules operate by
+means of pattern matching: the current configuration being rewritten is pattern
+matched against the left-hand side of the rewrite rule, and the substitution
+is used in order to construct a new term from the right-hand side. In symbolic
+execution, we use
+unification
+instead of pattern matching. To summarize, unification behaves akin to a
+two-way pattern matching where both the configuration and the left-hand side
+of the rule can contain variables, and the algorithm generates a
+most general unifier containing substitutions for the variables in both
+which will make both terms equal.

+

Feasibility

+ +

Unification by itself cannot completely solve the problem of symbolic
+execution. One task symbolic execution must perform is to identify whether
+a particular symbolic term is feasible, that is to say, that there actually
+exists a concrete instantiation of that term such that all the logical
+constraints on that term can actually be satisfied. The Haskell Backend
+delegates this task to Z3, an
+SMT solver.
+This solver is used to periodically trim configurations that are determined
+to be mathematically infeasible.

+

Symbolic terms

+ +

The final component of symbolic execution consists of the task of introducing
+symbolic terms into the configuration. This can be done one of two different
+ways. First, the term being passed to krun can actually be symbolic. This
+is less frequently used because it requires the user to construct an AST
+that contains variables, something which our current parsing capabilities are
+not well-equipped to do. The second, more common, way of introducing symbolic
+terms into a configuration consists of writing rules where there exists an
+existentially qualified variable on the right-hand side of the rule that does
+not exist on the left-hand side of the rule.

+

In order to prevent users from writing such rules by accident, K requires
+that such variables begin with the ? prefix. For example, here is a rule
+that rewrites a constructor foo to a symbolic integer:

+
rule <k> foo => ?X:Int ...</k>
+

When this rule applies, a fresh variable is introduced to the configuration, which
+then is unified against the rules that might apply in order to symbolically
+execute that configuration.

+

ensures clauses

+ +

We also introduce here a new feature of K rules that applies when a rule
+has this type of variable on the right-hand side: the ensures clause.
+An ensures clause is similar to a requires clause and can appear after
+a rule body, or after a requires clause. The ensures clause is used to
+introduce constraints that might apply to the variable that was introduced by
+that rule. For example, we could write the rule above with the additional
+constraint that the symbolic integer that was introduced must be less than
+five, by means of the following rule:

+
rule <k> foo => ?X:Int ...</k> ensures ?X <Int 5
+

Putting it all together

+ +

Putting all these pieces together, it is possible to use the Haskell Backend
+to perform symbolic reasoning about a particular K module, determining all the
+possible states that can be reached by a symbolic configuration.

+

For example, consider the following K definition (lesson-21.k):

+
module LESSON-21
+    imports INT
+
+    rule <k> 0 => ?X:Int ... </k> ensures ?X =/=Int 0
+    rule <k> X:Int => 5  ... </k> requires X >=Int 10
+endmodule
+

When we symbolically execute the program 0, we get the following output
+from the Haskell Backend:

+
    <k>
+      5 ~> .
+    </k>
+  #And
+    {
+      true
+    #Equals
+      ?X:Int >=Int 10
+    }
+  #And
+    #Not ( {
+      ?X:Int
+    #Equals
+      0
+    } )
+#Or
+    <k>
+      ?X:Int ~> .
+    </k>
+  #And
+    #Not ( {
+      true
+    #Equals
+      ?X:Int >=Int 10
+    } )
+  #And
+    #Not ( {
+      ?X:Int
+    #Equals
+      0
+    } )
+

Note some new symbols introduced by this configuration: #And, #Or, and
+#Equals. While andBool, orBool, and ==K represent functions of sort
+Bool, #And, #Or, and #Equals are matching logic connectives. We
+will discuss matching logic in more detail later in the tutorial, but the basic
+idea is that these symbols represent Boolean operators over the domain of
+configurations and constraints, as opposed to over the Bool sort.

+

Notice that the configuration listed above is a disjunction of conjunctions.
+This is the most common form of output that can be produced by the Haskell
+Backend. In this case, each conjunction consists of a configuration and a set
+of constraints. What this conjunction describes, essentially, is a
+configuration and a set of information that was derived to be true while
+rewriting that configuration.

+

Similar to how we saw --search in a previous lesson, the reason we have
+multiple disjuncts is because there are multiple possible output states
+for this program, depending on whether or not the second rule applied. In the
+first case, we see that ?X is greater than or equal to 10, so the second rule
+applied, rewriting the symbolic integer to the concrete integer 5. In the
+second case, we see that the second rule did not apply because ?X is less
+than 10. Moreover, because of the ensures clause on the first rule, we know
+that ?X is not zero, therefore the first rule will not apply a second time.
+If we had omitted this constraint, we would have ended up infinitely applying
+the first rule, leading to krun not terminating.

+

In the next lesson, we will cover how symbolic execution forms the backbone
+of deductive program verification in K and how we can use K to prove programs
+correct against a specification.

+

Exercises

+ +
    +
  1. Create another rule in LESSON-21 that rewrites odd integers greater than
    +ten to a symbolic even integer less than 10 and greater than 0. This rule will
    +now apply nondeterministically along with the existing rules. Predict what the
    +resulting output configuration will be from rewriting 0 after adding this
    +rule. Then run the program and see whether your prediction is correct.
  2. +
+

Once you have completed the above exercises, you can continue to
+Lesson 1.22: Basics of Deductive Program Verification using K.

+

Lesson 1.22: Basics of Deductive Program Verification using K

+ +

In this lesson, you will familiarize yourself with the basics of using K for
+deductive program verification.

+

1. Setup: Simple Programming Language with Function Calls

+ +

We base this lesson on a simple programming language with functions,
+assignment, if conditionals, and while loops. Take your time to study its
+formalization below (lesson-22.k):

+
module LESSON-22-SYNTAX
+    imports INT-SYNTAX
+    imports BOOL-SYNTAX
+    imports ID-SYNTAX
+
+    syntax Exp ::= IExp | BExp
+
+    syntax IExp ::= Id | Int
+
+    syntax KResult ::= Int | Bool | Ints
+
+    // Take this sort structure:
+    //
+    //     IExp
+    //    /    \
+    // Int      Id
+    //
+    // Through the List{_, ","} functor.
+    // Must add a `Bot`, for a common subsort for the empty list.
+
+    syntax Bot
+    syntax Bots ::= List{Bot, ","} [klabel(exps)]
+    syntax Ints ::= List{Int, ","} [klabel(exps)]
+                  | Bots
+    syntax Ids  ::= List{Id, ","}  [klabel(exps)]
+                  | Bots
+    syntax Exps ::= List{Exp, ","} [klabel(exps), seqstrict]
+                  | Ids | Ints
+
+    syntax IExp ::= "(" IExp ")" [bracket]
+                  | IExp "+" IExp [seqstrict]
+                  | IExp "-" IExp [seqstrict]
+                  > IExp "*" IExp [seqstrict]
+                  | IExp "/" IExp [seqstrict]
+                  > IExp "^" IExp [seqstrict]
+                  | Id "(" Exps ")" [strict(2)]
+
+    syntax BExp ::= Bool
+
+    syntax BExp ::= "(" BExp ")" [bracket]
+                  | IExp "<=" IExp [seqstrict]
+                  | IExp "<"  IExp [seqstrict]
+                  | IExp ">=" IExp [seqstrict]
+                  | IExp ">"  IExp [seqstrict]
+                  | IExp "==" IExp [seqstrict]
+                  | IExp "!=" IExp [seqstrict]
+
+    syntax BExp ::= BExp "&&" BExp
+                  | BExp "||" BExp
+
+    syntax Stmt ::=
+         Id "=" IExp ";" [strict(2)]                        // Assignment
+       | Stmt Stmt [left]                                   // Sequence
+       | Block                                              // Block
+       | "if" "(" BExp ")" Block "else" Block [strict(1)]   // If conditional
+       | "while" "(" BExp ")" Block                         // While loop
+       | "return" IExp ";"                    [seqstrict]   // Return statement
+       | "def" Id "(" Ids ")" Block                         // Function definition
+
+    syntax Block ::=
+         "{" Stmt "}"    // Block with statement
+       | "{" "}"         // Empty block
+endmodule
+
+module LESSON-22
+    imports INT
+    imports BOOL
+    imports LIST
+    imports MAP
+    imports LESSON-22-SYNTAX
+
+    configuration
+      <k> $PGM:Stmt </k>
+      <store> .Map </store>
+      <funcs> .Map </funcs>
+      <stack> .List </stack>
+
+ // -----------------------------------------------
+    rule <k> I1 + I2 => I1 +Int I2 ... </k>
+    rule <k> I1 - I2 => I1 -Int I2 ... </k>
+    rule <k> I1 * I2 => I1 *Int I2 ... </k>
+    rule <k> I1 / I2 => I1 /Int I2 ... </k>
+    rule <k> I1 ^ I2 => I1 ^Int I2 ... </k>
+
+    rule <k> I:Id => STORE[I] ... </k>
+         <store> STORE </store>
+
+ // ------------------------------------------------
+    rule <k> I1 <= I2 => I1  <=Int I2 ... </k>
+    rule <k> I1  < I2 => I1   <Int I2 ... </k>
+    rule <k> I1 >= I2 => I1  >=Int I2 ... </k>
+    rule <k> I1  > I2 => I1   >Int I2 ... </k>
+    rule <k> I1 == I2 => I1  ==Int I2 ... </k>
+    rule <k> I1 != I2 => I1 =/=Int I2 ... </k>
+
+    rule <k> B1 && B2 => B1 andBool B2 ... </k>
+    rule <k> B1 || B2 => B1  orBool B2 ... </k>
+
+    rule <k> S1:Stmt S2:Stmt => S1 ~> S2 ... </k>
+
+    rule <k> ID = I:Int ; => . ... </k>
+         <store> STORE => STORE [ ID <- I ] </store>
+
+    rule <k> { S } => S ... </k>
+    rule <k> {   } => . ... </k>
+
+    rule <k> if (true)   THEN else _ELSE => THEN ... </k>
+    rule <k> if (false) _THEN else  ELSE => ELSE ... </k>
+
+    rule <k> while ( BE ) BODY => if ( BE ) { BODY while ( BE ) BODY } else { } ... </k>
+
+    rule <k> def FNAME ( ARGS ) BODY => . ... </k>
+         <funcs> FS => FS [ FNAME <- def FNAME ( ARGS ) BODY ] </funcs>
+
+    rule <k> FNAME ( IS:Ints ) ~> CONT => #makeBindings(ARGS, IS) ~> BODY </k>
+         <funcs> ... FNAME |-> def FNAME ( ARGS ) BODY ... </funcs>
+         <store> STORE => .Map </store>
+         <stack> .List => ListItem(state(CONT, STORE)) ... </stack>
+
+    rule <k> return I:Int ; ~> _ => I ~> CONT </k>
+         <stack> ListItem(state(CONT, STORE)) => .List ... </stack>
+         <store> _ => STORE </store>
+
+    rule <k> return I:Int ; ~> . => I </k>
+         <stack> .List </stack>
+
+    syntax KItem ::= #makeBindings(Ids, Ints)
+                   | state(continuation: K, store: Map)
+ // ----------------------------------------------------
+    rule <k> #makeBindings(.Ids, .Ints) => . ... </k>
+    rule <k> #makeBindings((I:Id, IDS => IDS), (IN:Int, INTS => INTS)) ... </k>
+         <store> STORE => STORE [ I <- IN ] </store>
+endmodule
+

Next, compile this example using kompile lesson-22.k --backend haskell. If
+your processor is an Apple Silicon processor, add the --no-haskell-binary
+flag if the compilation fails.

+

2. Setup: Proof Environment

+ +

Next, take the following snippet of K code and save it in lesson-22-spec.k.
+This is a skeleton of the proof environment, and we will complete it as the
+lesson progresses.

+
requires "lesson-22.k"
+requires "domains.md"
+
+module LESSON-22-SPEC-SYNTAX
+    imports LESSON-22-SYNTAX
+
+endmodule
+
+module VERIFICATION
+    imports K-EQUAL
+    imports LESSON-22-SPEC-SYNTAX
+    imports LESSON-22
+    imports MAP-SYMBOLIC
+
+endmodule
+
+module LESSON-22-SPEC
+    imports VERIFICATION
+
+endmodule
+

3. Claims

+ +
    +
  1. The first claim we will ask K to prove is that 3 + 4, in fact, equals 7.
    +Claims are stated using the claim keyword, followed by the claim
    +statement:
  2. +
+
claim <k> 3 + 4 => 7 ... </k>
+

Add this claim to the LESSON-22-SPEC module and run the K prover using the
+command kprove lesson-22-spec.k. You should get back the output #Top,
+which denotes the Matching Logic equivalent of true and means, in this
+context, that all claims have been proven correctly.

+
    +
  1. The second claim reasons about the if statement that has a concrete condition:
  2. +
+
claim <k> if ( 3 + 4 == 7 ) {
+            $a = 1 ;
+            } else {
+            $a = 2 ;
+            }
+        => . ... </k>
+        <store> STORE => STORE [ $a <- 1 ] </store>
+

stating that the given program terminates (=> .), and when it does, the value
+of the variable $a is set to 1, meaning that the execution will have taken
+the then branch. Add this claim to the LESSON-22-SPEC module, but also add

+
syntax Id ::= "$a" [token]
+

to the LESSON-22-SPEC-SYNTAX module in order to declare $a as a token so
+that it can be used as a program variable. Re-run the K prover, which should
+again return #Top.

+
    +
  1. Our third claim demonstrates how to reason about both branches of an if
    +statement at the same time:
  2. +
+
claim <k> $a = A:Int ; $b = B:Int ;
+          if ($a < $b) {
+            $c = $b ;
+          } else {
+            $c = $a ;
+          }
+        => . ... </k>
+        <store> STORE => STORE [ $a <- A ] [ $b <- B ] [ $c <- ?C:Int ] </store>
+    ensures (?C ==Int A) orBool (?C ==Int B)
+

The program in question first assigns symbolic integers A and B to program
+variables $a and $b, respectively, and then executes the given if
+statement, which has a symbolic condition (A < B), updating the value of the
+program variable $c in both branches. The specification we give states that
+the if statement terminates, with $a and $b updated, respectively, to A
+and B, and $c updated to some symbolic integer value ?C. Via the
+ensures clause, which is used to specify additional constraints that hold
+after execution, we also state that this existentially quantified ?C equals
+either A or B.

+

Add the productions declaring $b and $c as tokens to the
+LESSON-22-SPEC-SYNTAX module, the claim to the LESSON-22-SPEC module, run
+the K prover again, and observe the output, which should not be #Top this
+time. This means that K was not able to prove the claim, and we now need to
+understand why. We do so by examining the output, which should look as follows:

+
    (InfoReachability) while checking the implication:
+    The configuration's term unifies with the destination's term,
+    but the implication check between the conditions has failed.
+
+  #Not (
+    #Exists ?C . {
+        STORE [ $a <- A:Int ] [ $b <- B:Int ] [ $c <- ?C:Int ]
+      #Equals
+        STORE [ $a <- A:Int ] [ $b <- B:Int ] [ $c <- B:Int ]
+    }
+  #And
+    {
+      true
+    #Equals
+      ?C ==Int A orBool ?C ==Int B
+    }
+  )
+#And
+  <generatedTop>
+    <k>
+      _DotVar1
+    </k>
+    <store>
+      STORE [ $a <- A:Int ] [ $b <- B:Int ] [ $c <- B:Int ]
+    </store>
+    <funcs>
+      _Gen3
+    </funcs>
+    <stack>
+      _Gen5
+    </stack>
+  </generatedTop>
+#And
+  {
+    true
+  #Equals
+    A <Int B
+  }
+

This output starts with a message telling us at which point the proof failed,
+followed by the final state, which consists of three parts: some negative
+Matching Logic (ML) constraints, the final configuration (<generatedTop> ... </generatedTop>), and some positive ML constraints. Generally speaking,
+these positive and the negative constraints could arise from various sources,
+such as (but not limited to) branches taken by the execution
+(e.g. { true #Equals A <Int B } or #Not ( { true #Equals A <Int B } )),
+or ensures constraints.

+

First, we examine the message:

+
(InfoReachability) while checking the implication:
+The configuration's term unifies with the destination's term,
+but the implication check between the conditions has failed.
+

which tells us that the structure of the final configuration is as expected,
+but that some of the associated constraints cannot be proven. We next look at
+the final configuration, in which the relevant item is the <store> ... </store> cell, because it is the only one that we are reasoning about. By
+inspecting its contents:

+
STORE [ $a <- A:Int ] [ $b <- B:Int ] [ $c <- B:Int ]
+

we see that we should be within the constraints of the ensures, since the
+value of $c in the store equals B in this branch. We next examine the
+negative and positive constraints of the output and, more often than not, the
+goal is to instruct K how to use the information from the final configuration
+and the positive constraints to falsify one of the negative constraints. This
+is done through simplifications.

+

So, the positive constraint that we have is

+
{ true #Equals A <Int B }
+

meaning that A <Int B holds. Given the analysed program, this tells us that
+we are in the then branch of the if. The negative constraint is

+
  #Not (
+    #Exists ?C . {
+        STORE [ $a <- A:Int ] [ $b <- B:Int ] [ $c <- ?C:Int ]
+      #Equals
+        STORE [ $a <- A:Int ] [ $b <- B:Int ] [ $c <- B:Int ]
+    }
+  #And
+    { true #Equals ?C ==Int A orBool ?C ==Int B }
+  )
+

and we observe, from the first equality, that the existential ?C should be
+instantiated with B. This would make both branches of the #And true,
+falsifying the outside #Not. We just need to show K how to conclude that
+?C ==Int B. We do so by introducing the following simplification into the
+VERIFICATION module:

+
rule { M:Map [ K <- V ] #Equals M [ K <- V' ] } => { V #Equals V' } [simplification]
+

which formalizes our internal understanding of ?C ==Int B. The rule states
+that when we update the same key in the same map with two values, and the
+resulting maps are equal, then the two values must be equal as well. The
+[simplification] attribute indicates to K to use this rule to simplify the
+state when trying to prove claims. Like function rules, simplification rules
+do not complete to the top of the configuration, but instead apply anywhere
+their left-hand-side matches. Re-run the K prover, which should now return
+#Top, indicating that K was able to use the simplification and prove the
+required claims.

+
    +
  1. Next, we show how to state and prove properties of while loops. In
    +particular, we consider the following loop
  2. +
+
claim
+    <k>
+        while ( 0 < $n ) {
+            $s = $s + $n;
+            $n = $n - 1;
+            } => . ...
+    </k>
+    <store>
+        $s |-> (S:Int => S +Int ((N +Int 1) *Int N /Int 2))
+        $n |-> (N:Int => 0)
+    </store>
+    requires N >=Int 0
+

which adds the sum of the first $n integers to $s, assuming the value of $n
+is non-negative to begin with. This is reflected in the store by stating that,
+after the execution of the loop, the original value of $s (which is set to
+equal some symbolic integer S) is incremented by ((N +Int 1) *Int N /Int 2), and the value of $n always equals 0. Add $n and $s as tokens in
+the LESSON-22-SPEC-SYNTAX module, the above claim to the LESSON-22-SPEC
+module, and run the K prover, which should return #Top.

+
    +
  1. Finally, our last claim is about a program that uses function calls:
  2. +
+
claim
+    <k>
+        def $sum($n, .Ids) {
+            $s = 0 ;
+            while (0 < $n) {
+                $s = $s + $n;
+                $n = $n - 1;
+            }
+            return $s;
+        }
+
+        $s = $sum(N:Int, .Ints);
+    => . ... </k>
+    <funcs> .Map => ?_ </funcs>
+    <store> $s |-> (_ => ((N +Int 1) *Int N /Int 2)) </store>
+    <stack> .List </stack>
+    requires N >=Int 0
+

Essentially, we have wrapped the while loop from claim 3.4 into a function
+$sum, and then called that function with a symbolic integer N, storing the
+return value in the variable $s. The specification states that this program
+ends up storing the sum of the first N integers in the variable $n. Add $sum
+to the LESSON-22-SPEC-SYNTAX module, the above claim to the
+LESSON-22-SPEC module, and run the K prover, which should again return
+#Top.

+

Exercises

+ +
    +
  1. +

    Change the condition of the if statement in part 3.2 to take the else
    +branch and adjust the claim so that the proof passes.

    +
  2. +
  3. +

    The post-condition of the specification in part 3.3 loses some information.
    +In particular, the value of ?C is in fact the maximum of A and B.
    +Prove the same claim as in 3.2, but with the post-condition ensures (?C ==Int maxInt(A, B)). For this, you will need to extend the VERIFICATION
    +module with two simplifications that capture the meaning of maxInt(A:Int, B:Int). Keep in mind that any rewriting rule can be used as a
    +simplification; in particular, that simplifications can have requires
    +clauses.

    +
  4. +
  5. +

    Following the pattern shown in part 3.4, assuming a non-negative initial
    +value of $b, specify and verify the following while loop:

    +
  6. +
+
while ( 0 < $b ) {
+    $a = $a + $c;
+    $b = $b - 1;
+    $c = $c - 1;
+}
+

Hint: You will not need additional simplifications---once you've got the
+specification right, the proof will go through.

+
    +
  1. Write an arbitrary yet not-too-complex function (or several functions
    +interacting with each other), and try to specify and verify it (them) in K.
  2. +
+

Section 2: Intermediate K Concepts

+ +

The goal of this second section is to supplement a beginning developer's
+knowledge of K after they have gained a basic understanding of K. Each lesson
+in this section can be completed independently in order to learn about a
+particular facet of the K language. The lessons are written to provide basic
+understanding of less commonly-used features of K to someone who is still
+learning K. For more complete references of these features, the reader ought to
+consult the User Manual.

+

The reader ought to be able to complete lessons in this section as needed in
+order to learn about specific features of interest, but if desired, can also
+complete the entire section in one go. Someone who has completed this entire
+section ought to be able to read and understand most K specifications, as well
+as write their own specifications of some complexity, and use them to perform
+most common K-related tasks. They can then read about specific lessons in
+Section 3: Advanced K Concepts if they want to
+learn more.

+

Table of Contents

+ +
    +
  1. Macros, Aliases, and Anywhere Rules
  2. +
  3. Fresh Constants
  4. +
  5. KLabels and Abstract Syntax
  6. +
  7. Overloaded Symbols
  8. +
  9. Matching Logic Connectives and #Or Patterns
  10. +
  11. Function Context
  12. +
  13. Record Productions and Named Nonterminals
  14. +
  15. #fun and #let
  16. +
  17. #as patterns
  18. +
  19. The Matching Operators, :=K and :/=K
  20. +
  21. Uncommon Evaluation Order Concepts
  22. +
  23. IEEE 754 Floating Point and Fixed Width Integers
  24. +
  25. Alpha-renaming-aware Substitution
  26. +
  27. File I/O
  28. +
  29. String Buffers and Byte Sequences
  30. +
  31. The Intermediate Language of K, KORE
  32. +
  33. Debugging Proofs using the Haskell Backend REPL
  34. +
+

Lesson 2.1: Macros, Aliases, and Anywhere Rules

+ +

The purpose of this lesson is to explain the behavior of the macro,
+macro-rec, alias, and alias-rec production attributes, as well as the
+anywhere rule attribute. These attributes control the meaning of how rules
+associated with them are applied.

+

Macros

+ +

Thus far in the K tutorial, we have described three different types of rules:

+
    +
  1. Top-level rewrite rules, which rewrite a configuration composed of cells to
    +another configuration;
  2. +
  3. Function rules, which define the behavior of a function written over
    +arbitrary input and output types; and
  4. +
  5. Simplification rules, which describe ways in which the symbolic execution
    +engine ought to simplify terms containing symbolic values.
  6. +
+

This lesson introduces three more types of rules, the first of which are
+macros. A production is a macro if it has the macro attribute, and all
+rules whose top symbol on the left hand side is a macro are macro rules
+which define the behavior of the macro. Like function rules and simplification
+rules, macro rules do not participate in cell completion. However, unlike
+function rules and simplification rules, macro rules are applied statically
+before rewriting begins, and the macro symbol is expected to no longer appear
+in the initial configuration for rewriting once all macros in that
+configuration are rewritten.

+

The rationale behind macros is they allow you to define one piece of syntax
+in terms of another piece of syntax without any runtime overhead associated
+with the cost of rewriting one to the other. This process is a common one in
+programming language design and specification and is referred to as
+desugaring; The syntax that is transformed is typically also referred to as
+syntactic sugar for another type of syntax. For example, in a language with
+if statements and curly braces, you could write the following fragment
+(lesson-01.k):

+
module LESSON-01
+  imports BOOL
+
+  syntax Stmt ::= "if" "(" Exp ")" Stmt             [macro]
+                | "if" "(" Exp ")" Stmt "else" Stmt
+                | "{" Stmts "}"
+  syntax Stmts ::= List{Stmt,""}
+  syntax Exp ::= Bool
+
+  rule if ( E ) S => if ( E ) S else { .Stmts }
+endmodule
+

In this example, we see that an if statement without an else clause is
+defined in terms of one with an else clause. As a result, we would only
+need to give a single rule for how to rewrite if statements, rather than
+two separate rules for two types of if statements. This is a common pattern
+for dealing with program syntax that contains an optional component to it.

+

It is worth noting that by default, macros are not applied recursively. To be
+more precise, by default a macro that arises as a result of the expansion of
+the same macro is not rewritten further. This is primarily to simplify the
+macro expansion process and reduce the risk that improperly defined macros will
+lead to non-terminating behavior.

+

It is possible, however, to tell K to expand a macro recursively. To do this,
+simply replace the macro attribute with the macro-rec attribute. Note that
+K does not do any kind of checking to ensure termination here, so it is
+important that rules be defined correctly to always terminate, otherwise the
+macro expansion phase will run forever. Fortunately, in practice it is very
+simple to ensure this property for most of the types of macros that are
+typically used in real-world semantics.

+

Exercise

+ +

Using a Nat sort containing the constructors 0 and S (i.e., a
+Peano-style axiomatization of the
+natural numbers where S(N) = N + 1, S(S(N)) = N + 2, etc), write a macro
+that will compute the sum of two numbers.

+

Aliases

+ +

NOTE: This lesson introduces the concept of "aliases", which are a variant
+of macros. While similar, this is different from the concept of "aliases" in
+matching logic, which is introduced in Lesson 2.16.

+

Macros can be very useful in helping you define a programming language.
+However, they can be disruptive while pretty printing a configuration. For
+example, you might write a set of macros that transforms the code the user
+wrote into equivalent code that is slightly harder to read. This can make it
+more difficult to understand the code when it is pretty printed as part of the
+output of rewriting.

+

K defines a relatively straightforward but novel solution to this problem,
+which is known as a K alias. An alias in K is very similar to a macro,
+with the exception that the rewrite rule will also be applied backwards
+during the pretty-printing process.

+

It is very simple to make a production be an alias instead of a macro: simply
+use the alias or alias-rec attributes instead of the macro or macro-rec
+attributes. For example, if the example involving if statements above was
+declared using an alias instead of a macro, the Stmt term if (E) {} else {}
+would be pretty-printed as if (E) {}. This is because during pretty-printing,
+the term participates in another macro-expansion pass. However, this macro
+expansion step will only apply rules with the alias or alias-rec attribute,
+and, critically, it will reverse the rule by treating the left-hand side as if
+it were the right-hand side, and vice versa.

+

This can be very useful to allow you to define one construct in terms of
+another while still being able to pretty-print the result as if it were
+the original term in question. This can be especially useful for applications
+of K where we are taking the output of rewriting and attempting to use it as
+a code fragment that we then execute, such as with test generation.

+

Exercise

+ +

Modify LESSON-01 above to use an alias instead of a macro and experiment
+with how various terms are pretty-printed by invoking krun on them.

+

anywhere rules

+ +

The last type of rule introduced in this lesson is the anywhere rule. An
+anywhere rule is specified by adding the anywhere attribute to a rule. Such a
+rule is similar to a function rule in that it does not participate in cell
+completion, and will apply anywhere that the left-hand-side matches in the
+configuration, but distinct in that the symbol in question can still be matched
+against in the left-hand side of other rules, even during concrete rewriting.
+The reasoning behind this is that instead of the symbol in question being a
+constructor, it is a constructor modulo the axioms defined with the
+anywhere attribute. Essentially, the rules with the anywhere attribute will
+apply as soon as they appear in the right-hand side of a rule being applied,
+but the symbol in question will still be treated as a symbol that can be
+matched on if it is not completely removed by those rules.

+

This can be useful in certain cases to allow you to define transformations over
+particular pieces of syntax while still generally giving those pieces of syntax
+another meaning when the anywhere rule does not apply. For example, the ISO C
+standard defines the semantics of *&x as exactly equal to x, with no
+reading or writing of memory taking place, and the K semantics of C implements
+this functionality using an anywhere rule that is applied at compilation time.

+

NOTE: the anywhere attribute is only implemented on the LLVM backend
+currently. Attempting to use it in a semantics that is compiled with the
+Haskell backend will result in an error being reported by the compiler. This
+should be remembered when using this attribute, as it may not be suitable for
+a segment of a semantics which is intended to be symbolically executed.

+

Exercises

+ +
    +
  1. Write a version of the calculator from Lesson 1.14 Exercise 1, which uses
    +the same syntax for evaluating expressions, but defines its arithmetic logic
    +using anywhere rules rather than top-level rewrite rules.
  2. +
+

Return to Top

+ +

Click here to return to the Table of Contents for Section 2.

+

Lesson 2.2: Fresh Constants

+ +

Return to Top

+ +

Click here to return to the Table of Contents for Section 2.

+

Lesson 2.3: KLabels and Abstract Syntax

+ +

Return to Top

+ +

Click here to return to the Table of Contents for Section 2.

+

Lesson 2.4: Overloaded Symbols

+ +

Return to Top

+ +

Click here to return to the Table of Contents for Section 2.

+

Lesson 2.5: Matching Logic Connectives and #Or Patterns

+ +

Return to Top

+ +

Click here to return to the Table of Contents for Section 2.

+

Lesson 2.6: Function Context

+ +

Return to Top

+ +

Click here to return to the Table of Contents for Section 2.

+

Lesson 2.7: Record Productions and Named Nonterminals

+ +

Return to Top

+ +

Click here to return to the Table of Contents for Section 2.

+

Lesson 2.8: #fun and #let

+ +

Return to Top

+ +

Click here to return to the Table of Contents for Section 2.

+

Lesson 2.9: #as Patterns

+ +

Return to Top

+ +

Click here to return to the Table of Contents for Section 2.

+

Lesson 2.10: The Matching Operators, :=K and :/=K

+ +

Return to Top

+ +

Click here to return to the Table of Contents for Section 2.

+

Lesson 2.11: Uncommon Evaluation Order Concepts

+ +

Return to Top

+ +

Click here to return to the Table of Contents for Section 2.

+

Lesson 2.12: IEEE 754 Floating Point and Fixed Width Integers

+ +

Return to Top

+ +

Click here to return to the Table of Contents for Section 2.

+

Lesson 2.13: Alpha-renaming-aware Substitution

+ +

Return to Top

+ +

Click here to return to the Table of Contents for Section 2.

+

Lesson 2.14: File I/O

+ +

Return to Top

+ +

Click here to return to the Table of Contents for Section 2.

+

Lesson 2.15: String Buffers and Byte Sequences

+ +

Return to Top

+ +

Click here to return to the Table of Contents for Section 2.

+

Lesson 2.16: The Intermediate Language of K, KORE

+ +

Return to Top

+ +

Click here to return to the Table of Contents for Section 2.

+

Lesson 2.17: Debugging Proofs using the Haskell Backend REPL

+ +

Return to Top

+ +

Click here to return to the Table of Contents for Section 2.

+

K User Manual

+

NOTE: The K User Manual is still under construction; some features of K
+may have partial or missing documentation.

+

Introduction

+

Why K?

+ +

The K Framework is a programming language and system design toolkit made for
+practioners and researchers alike.

+

K For Practioners:
+K is a framework for deriving programming languages tools from their semantic
+specifications.

+

Typically, programming language tool development follows a similar pattern.
+After a new programming language is designed, separate teams will develop
+separate language tools (e.g. a compiler, interpreter, parser, symbolic
+execution engine, etc). Code reuse is uncommon. The end result is that for each
+new language, the same basic tools and patterns are re-implemented again and
+again.

+

K approaches the problem differently -- it generates each of these tools from a single language specification.
+The work of programming language design and tool implementation are made separate concerns.
+The end result is that the exercise of
+designing new languages and their associated tooling is now reduced to
+developing a single language specification from which we derive our tooling for
+free
.

+

K For Researchers:
+K is a configuration- and rewrite-based executable semantic framework.

+

In more detail, K specifications are:

+
    +
  1. Executable: compile into runnable and testable programs;
  2. +
  3. Semantic: correspond to a logical theory with a sound and relatively
    +complete proof system;
  4. +
  5. Configuration-based: organize system states into compositional,
    +hierarchical, labelled units called cells;
  6. +
  7. Rewrite-based: define system transitions using rewrite rules.
  8. +
+

K specifications are compiled into particular matching logic theories, giving
+them a simple and expressive semantics. K semantic rules are implicitly defined
+over the entire configuration structure, but omit unused cells, enabling a
+highly modular definitional style. Furthermore, K has been used to develop
+programming languages, type systems, and formal analysis tools.

+

Manual Objectives

+ +

As mentioned in the Why K? section above, the K Framework is designed as a
+collection of language-generic command-line interface (CLI) tools which revolve
+around K specifications. These tools cover a broad range of uses, but they
+typically fall into one of the following categories:

+
    +
  1. Transforming K Specs (e.g. compilation)
  2. +
  3. Running K Specs (e.g. concrete and symbolic execution)
  4. +
  5. Analyzing K Specs (e.g. theorem proving)
  6. +
+

The main user-facing K tools include:

+
    +
  • kompile - the K compiler driver
  • +
  • kparse - the stanadlone K parser and abstract syntax tree (AST)
    +transformation tool
  • +
  • krun - the K interpreter and symbolic execution engine driver
  • +
  • kprove - the K theorem prover
  • +
+

This user manual is designed to be a tool reference.
+In particular, it is not desgined to be a tutorial on how to write K
+specifications or to teach the logical foundations of K. New K users should
+consult our dedicated
+K tutorial,
+or the more language-design oriented
+PL tutorial.
+Researchers seeking to learn more about the logic underlying K are encouraged
+to peruse the
+growing literature on K and matching logic.
+We will consider the manual complete when it provides a complete description of
+all user-facing K tools and features.

+

Introduction to K

+

Since K specifications are the primary input into the entire system, let us
+take a moment to describe them. At the highest level, K specifications describe
+a programming language or system using three different pieces:

+
    +
  1. the system primitives, the base datatypes used during system operation,
    +e.g., numbers, lists, maps, etc;
  2. +
  3. the system state, a tuple or record over system primitives which gives a
    +complete snapshot of the system at any given moment;
  4. +
  5. the system behavior, a set of rules which defines possible system
    +evolutions.
  6. +
+

K specifications are then defined by a collection of sentences which
+correspond to the three concepts above:

+
    +
  1. syntax declarations encode the system primitives;
  2. +
  3. configuration declarations encode the system state;
  4. +
  5. context and rule declarations encode the system behavior.
  6. +
+

K sentences are then organized into one or modules which are stored in one or
+more files. In this scheme, files may require other files and modules may
+import other modules, giving rise to a hierarchy of files and modules. We
+give an intuitive sketch of the two levels of grouping in the diagram below:

+
   example.k file
+  +=======================+
+  | requires ".." --------|--> File_1
+  | ...                   |
+  | requires ".." --------|--> File_N
+  |                       |
+  |  +-----------------+  |
+  |  | module ..       |  |
+  |  |   imports .. ---|--|--> Module_1
+  |  |   ...           |  |
+  |  |   imports .. ---|--|--> Module_M
+  |  |                 |  |
+  |  |   sentence_1    |  |
+  |  |   ...           |  |
+  |  |   sentence_K    |  |
+  |  | endmodule       |  |
+  |  +-----------------+  |
+  |                       |
+  +=======================+
+

where:

+
    +
  • files and modules are denoted by double-bordered and single-borded boxes
    +respectively;
  • +
  • file or module identifiers are denoted by double dots (..);
  • +
  • potential repititions are denoted by triple dots (...).
  • +
+

In the end, we require that the file and module hierarchies both form a
+directed acyclic graph (DAG). This is, no file may recursively require itself,
+and likewise, no module may recursively import itself.

+

We now zoom in further to discuss the various kinds of sentences contained in K
+specifications:

+
    +
  1. +

    sentences that define our system's primitives, including:

    +
      +
    • sort declarations: define new categories of primitive datatypes
    • +
    • Backus-Naur Form (BNF) grammar declarations: define the
      +operators that inhabit our primitive datatypes
    • +
    • lexical syntax declarations: define lexemes/tokens for the
      +lexer/tokenizer
    • +
    • syntax associativity declarations: specify the
      +associativity/grouping of our declared operators
    • +
    • syntax priority declarations: specify the priority of
      +potential ambiguous operators
    • +
    +
  2. +
  3. +

    sentences that define our system's state, including:

    +
      +
    • configuration declarations: define labelled, hierarchical records
      +using an nested XML-like syntax
    • +
    +
  4. +
  5. +

    sentences that define our system's behavior, including:

    +
      +
    • context declarations: describe how primitives and configurations
      +can simplify
    • +
    • context alias declarations: define templates that can generate new
      +contexts
    • +
    • rule declarations: define how the system transitions from one state
      +to the next
    • +
    +
  6. +
+

K Process Overview

+ +

We now examine how the K tools are generally used. The main input to all of the
+K tools is a K specification. For effieciency reasons, this specification is
+first compiled into an intermediate representation called Kore. Once we have
+obtained this intermediate representation, we can use it to do:

+
    +
  1. parsing/pretty-printing, i.e., converting a K term, whose syntax is defined
    +by a K specification, into a alternate representation
  2. +
  3. concrete and abstract execution of a K specification
  4. +
  5. theorem proving, i.e., verifying whether a set of claims about a K
    +specification hold
  6. +
+

We represent the overall process using the graphic below:

+
 K Compilation Process
++============================================================+
+|                     +---------+                            |
+|  K Specification ---| kompile |--> Kore Specification --+  |
+|                     +---------+                         |  |
++=========================================================|==+
+                                                          |
+ K Execution Process                                      |
++=========================================================|==+
+|                                                         |  |
+|             +-------------------------------------------+  |
+|             |                                              |
+|             |       +---------+                            |
+|  K Term ----+-------| kparse  |--> K Term                  |
+|             |       +---------+                            |
+|             |                                              |
+|             |       +---------+                            |
+|  K Term ----+-------|  krun   |--> K Term                  |
+|             |       +---------+                            |
+|             |                                              |
+|             |       +---------+                            |
+|  K Claims --+-------| kprove  |--> K Claims                |
+|                     +---------+                            |
+|                                                            |
++============================================================+
+

where:

+
    +
  • process outlines are denoted by boxes with double-lined borders
  • +
  • executables are denoted by boxes with single-lined borders
  • +
  • inputs and outputs are denoted by words attached to lines
  • +
  • K terms typically correspond to programs defined in a particular
    +language's syntax (which are either parsed using kparse or executed using
    +krun)
  • +
  • K claims are a notation for describing how certain K programs should
    +execute (which are checked by our theorem prover kprove)
  • +
+

K Compilation Process:
+Let us start with a description of the compilation process. According to the
+above diagram, the compiler driver is called kompile. For our purposes, it is
+enough to view the K compilation process as a black box that transforms a K
+specification into a lower-level Kore specification that encodes the same
+information, but that is easier to work with programmatically.

+

K Execution Process:
+We now turn our attention to the K execution process. Abstractly, we can divide
+the K execution process into the following stages:

+
    +
  1. the kore specification is loaded (which defines a lexer, parser, and
    +unparser among other things)
  2. +
  3. the input string is lexed into a token stream
  4. +
  5. the token stream is parsed into K terms/claims
  6. +
  7. the K term/claims are transformed according the K tool being used (e.g.
    +kparse, krun, or kprove)
  8. +
  9. the K term/claims are unparsed into a string form and printed
  10. +
+

Note that all of the above steps performed in K execution process are fully
+prescribed by the input K specification. Of course, there are entire languages
+devoted to encoding these various stages proces individually, e.g., flex for
+lexers, bison for parsers, etc. What K offers is a consistent language to
+package the above concepts in a way that we believe is convenient and practical
+for a wide range of uses.

+

Module Declaration

+

K modules are declared at the top level of a K file. They begin with the
+module keyword and are followed by a module ID and an optional set of
+attributes. They continue with zero or more imports and zero or more sentences
+until the endmodule keyword is reached.

+

A module ID consists of an optional # at the beginning, followed by one or
+more components separated by hyphens. Each component can contain letters,
+numbers, or underscores.

+

After the module ID, attributes can be specified in square brackets. See below
+for an (incomplete) list of allowed module attributes.

+

Following the attributes, a module can contain zero or more imports. An
+import consists of the import or imports keywords followed by a module ID.
+An import tells the compiler that this module should contain all the sentences
+(recursively) contained by the module being imported.

+

Imports can be public or private. By default, they are public, which
+means that all the imported syntax can be used by any module that imports the
+module doing the import. However, you can explicitly override the visibility
+of the import with the public or private keyword immediately prior to the
+module name. A module imported privately does not export its syntax to modules
+that import the module doing the import.

+

Following imports, a module can contain zero or more sentences. A sentence can
+be a syntax declaration, a rule, a configuration declaration, a context, a
+claim, or a context alias. Details on each of these can be found in subsequent
+sections.

+

private attribute

+ +

If the module is given the private attribute, all of its imports and syntax
+are private by default. Individual pieces of syntax can be made public with
+the public attribute, and individual imports can be made public with the
+public keyword. See relevant sections on syntax and modules for more details
+on what it means for syntax and imports to be public or private.

+

symbolic and concrete attribute

+ +

These attributes may be placed on modules to indicate that they should only
+be used by the Haskell and LLVM backends respectively. If the definition is
+compiled on the opposite backend, they are implicitly removed from the
+definition prior to parsing anywhere they are imported. This can be useful when
+used in limited capacity in order to provide alternate semantics for certain
+features on different backends. It should be used sparingly as it makes it more
+difficult to trust the correctness of your semantics, even in the presence of
+testing.

+

Syntax Declaration

+

Named Non-Terminals

+ +

We have added a syntax to Productions which allows non-terminals to be given a
+name in productions. This significantly improves the ability to document K, by
+providing a way to explicitly explain what a field in a production corresponds
+to instead of having to infer it from a comment or from the rule body.

+

The syntax is:

+
name: Sort
+

This syntax can be used anywhere in a K definition that expects a non-terminal.

+

symbol(_) attribute

+ +

By default, when compiling a definition, K generates a unique "mangled" label
+identifier for each syntactic production. These identifiers can be used to
+reference productions externally, for example when constructing terms by hand
+or programmatically via Pyk.

+

The symbol(_) attribute can be applied to a production to control the precise
+identifier for a production that appears in a compiled definition. For example:

+
module SYMBOLS
+    syntax Foo ::= foo() [symbol(foo)]
+                 | bar()
+endmodule
+

Here, the compiled definition will contain the following symbol declarations:

+
  symbol Lblfoo{}() ...
+  symbol Lblbar'LParRParUnds'SYMBOLS'Unds'Foo{}() ...
+

The compiler enforces uniqueness[1] of symbol names specified in
+this way; it would be an error to apply symbol(foo) to another production in
+the module above. Additionally, symbol(_) with an argument may not co-occur
+with the klabel(_) attribute (see below).

+

overload attribute

+ +

K supports subsort overloading[2] on symbols, whereby a
+constructor can have a more specific sort for certain arguments. For example,
+consider the following productions derived from a C-like language semantics:

+
syntax Exp  ::= LVal
+              | Exp  "." Id
+syntax LVal ::= LVal "." Id
+

Here, it is useful for the result of the dot operator to be an LVal if the
+left-hand side is itself an LVal. However, there is an issue with the code
+as written: if L() is a term of sort LVal, then the program L() . x has a
+parsing ambiguity between the two productions for the dot operator. To resolve
+this, we can mark the productions as overloads:

+
syntax Exp  ::= LVal
+              | Exp  "." Id [overload(_._)]
+syntax LVal ::= LVal "." Id [overload(_._)]
+

Now, the parser will select the most specific overloaded production when it
+resolves ambiguities in L() . x (that is, L() . x parses to a term of sort
+LVal.

+

Formally, the compiler organises productions into a partial order that defines
+the overload relation as follows. We say that P is a more specific overload
+of Q if:

+
    +
  • P and Q have the same overload(_) attribute. Note that the argument
    +supplied has no semantic meaning other than as a key grouping productions
    +together.
  • +
  • Let S_P be the sort of P, and S_p1 etc. be the sorts of its arguments
    +(c.f. for Q). The tuple (S_P, S_p1, ..., S_pN) must be elementwise
    +strictly less than (S_Q, S_q1, ..., S_qN) according to the definition's
    +subsorting relationship. That is, a term from production P is a restriction
    +of one from production Q; when its arguments are more precise, we can give
    +the result a more precise sort.
  • +
+

klabel(_) and symbol attributes

+ +

Note: the klabel(_), symbol approach described in this section is a legacy
+feature that will be removed in the future. New code should use the symbol(_)
+and overload(_) attributes to opt into explicit naming and overloading
+respectively.

+

References here to "overloading" are explained in the section above; the use
+of the klabel(_) attribute without symbol is equivalent to the new
+overload(_) syntax.

+

By default K generates for each syntax definition a long and obfuscated klabel
+string, which serves as a unique internal identifier and also is used in kast
+format of that syntax. If we need to reference a certain syntax production
+externally, we have to manually define the klabels using the klabel attribute.
+One example of where you would want to do this is to be able to refer to a given
+symbol via the syntax priority attribute, or to enable overloading of a
+given symbol.

+

If you only provide the klabel attribute, you can use the provided klabel to
+refer to that symbol anywhere in the frontend K code. However, the internal
+identifier seen by the backend for that symbol will still be the long obfuscated
+generated string. Sometimes you want control over the internal identifier used as
+well, in which case you use the symbol attribute. This tells the frontend to
+use whatever the declared klabel is directly as the internal identifier.

+

For example:

+
module MYMODULE
+    syntax FooBarBaz ::= #Foo( Int, Int ) [klabel(#Foo), symbol] // symbol1
+                       | #Bar( Int, Int ) [klabel(#Bar)]         // symbol2
+                       | #Baz( Int, Int )                        // symbol3
+endmodule
+

Here, we have that:

+
    +
  • In frontend K, you can refer to "symbol1" as #Foo (from klabel(#Foo)),
    +and the backend will see 'Hash'Foo as the symbol name.
  • +
  • In frontend K, you can refer to "symbol2" as #Bar (from klabel(#Bar)),
    +and the backend will see
    +'Hash'Bar'LParUndsCommUndsRParUnds'MYMODULE'Unds'FooBarBaz'Unds'Int'Unds'Int
    +as the symbol name.
  • +
  • In frontend K, you can refer to "symbol3" as
    +#Baz(_,_)_MYMODULE_FooBarBaz_Int_Int (from auto-generated klabel), and
    +the backend will see
    +'Hash'Baz'LParUndsCommUndsRParUnds'MYMODULE'Unds'FooBarBaz'Unds'Int'Unds'Int
    +as the symbol name.
  • +
+

The symbol provided must be unique to this definition. This is enforced by
+K. In general, it's recommended to use the symbol attribute whenever you use
+klabel unless you explicitly have a reason not to (e.g. you want to overload
+symbols, or you're using a deprecated backend). It can be very helpful use the
+symbol attribute for debugging, as many debugging messages are printed in
+Kast format which will be more readable with the symbol names you explicitly
+declare. In addition, if you are programatically manipulating definitions via
+the JSON Kast format, building terms using the user-provided pretty
+symbol, klabel(...) is easier and less error-prone if the auto-generation
+process for klabels changes.

+

Syntactic Lists

+ +

When using K's support for syntactic lists, a production like:

+
syntax Ints ::= List{Int, ","} [symbol(ints)]
+

will desugar into two productions:

+
syntax Ints ::= Int "," Ints [symbol(ints)]
+syntax Ints ::= ".Ints"      [symbol(List{"ints"})]
+

Note that the symbol for the terminator of the list has been generated
+automatically from the label on the original production. It is possible to
+control what the terminator's label is using the terminator-symbol(_)
+attribute. For example:

+
syntax Ints ::= List{Int, ","} [symbol(ints), terminator-symbol(.ints)]
+

will desugar into two productions:

+
syntax Ints ::= Int "," Ints [symbol(ints)]
+syntax Ints ::= ".Ints"      [symbol(.ints)]
+

It is an error to apply terminator-symbol(_) to a non-production sentence, or
+to a production that does not declare a syntactic list.

+

Parametric productions and bracket attributes

+ +

Some syntax productions, like the rewrite operator, the bracket operator, and
+the #if #then #else #fi operator, cannot have their precise type system
+expressed using only concrete sorts.

+

Prior versions of K solved this issue by using the K sort in this case, but
+this introduces inexactness in which poorly typed terms can be created even
+without having a cast operator present in the syntax, which is a design
+consideration we would prefer to avoid.

+

It also introduces cases where terms cannot be placed in positions where they
+ought to be well sorted unless their return sort is made to be KBott, which in
+turn vastly complicates the grammar and makes parsing much slower.

+

In order to introduce this, we provide a new syntax for parametric productions
+in K. This allows you to express syntax that has a sort signature based on
+parametric polymorphism. We do this by means of an optional curly-brace-
+enclosed list of parameters prior to the return sort of a production.

+

Some examples:

+
syntax {Sort} Sort ::= "(" Sort ")" [bracket]
+syntax {Sort} KItem ::= Sort
+syntax {Sort} Sort ::= KBott
+syntax {Sort} Sort ::= Sort "=>" Sort
+syntax {Sort} Sort ::= "#if" Bool "#then" Sort "#else" Sort "#fi"
+syntax {Sort1, Sort2} Sort1 ::= "#fun" "(" Sort2 "=>" Sort1 ")" "(" Sort2 ")"
+

Here we have:

+
    +
  1. Brackets, which can enclose any sort but should be of the same sort that was
    +enclosed.
  2. +
  3. Every sort is a KItem.
  4. +
  5. A KBott term can appear inside any sort.
  6. +
  7. Rewrites, which can rewrite a value of any sort to a value of the same sort.
    +Note that this allows the lhs or rhs to be a subsort of the other.
  8. +
  9. If then else, which can return any sort but which must contain that sort on
    +both the true and false branches.
  10. +
  11. lambda applications, in which the argument and parameter must be the same
    +sort, and the return value of the application must be the same sort as the
    +return value of the function.
  12. +
+

Note the last case, in which two different parameters are specified separated
+by a comma. This indicates that we have multiple independent parameters which
+must be the same each place they occur, but not the same as the other
+parameters.

+

In practice, because every sort is a subsort of K, the Sort2
+parameter in #6 above does nothing during parsing. It cannot
+actually reject any parse, because it can always infer that the sort of the
+argument and parameter are K, and it has no effect on the resulting sort of
+the term. However, it will nevertheless affect the kore generated from the term
+by introducing an additional parameter to the symbol generated for the term.

+

function and total attributes

+ +

Many times it becomes easier to write a semantics if you have "helper"
+functions written which can be used in the RHS of rules. The function
+attribute tells K that a given symbol should be simplified immediately when it
+appears anywhere in the configuration. Semantically, it means that evaluation
+of that symbol will result in at most one return value (that is, the symbol is
+a partial function).

+

The total attribute indicates that a symbol cannot be equal to matching logic
+bottom; in other words, it has at least one value for every possible set of
+arguments. It can be added to a production with the function attribute to
+indicate to the symbolic reasoning engine that a given symbol is a
+total function, that is it has exactly one return value for every possible
+input. Other uses of the total attribute (i.e., on multi-valued symbols to
+indicate they always have at least one value) are not yet implemented.

+

For example, here we define the _+Word_ total function and the _/Word_
+partial function, which can be used to do addition/division modulo
+2 ^Int 256. These functions can be used anywhere in the semantics where
+integers should not grow larger than 2 ^Int 256. Notice how _/Word_ is
+not defined when the denominator is 0.

+
syntax Int ::= Int "+Word" Int [function, total]
+             | Int "/Word" Int [function]
+
+rule I1 +Word I2 => (I1 +Int I2) modInt (2 ^Int 256)
+rule I1 /Word I2 => (I1 /Int I2) modInt (2 ^Int 256) requires I2 =/=Int 0
+

freshGenerator attribute

+ +

In K, you can access "fresh" values in a given domain using the syntax
+!VARNAME:VarSort (with the !-prefixed variable name). This is supported for
+builtin sorts Int and Id already. For example, you can generate fresh
+memory locations for declared identifiers as such:

+
rule <k> new var x ; => . ... </k>
+     <env> ENV => ENV [ x <- !I:Int ] </env>
+     <mem> MEM => MEM [ !I <- 0     ] </mem>
+

Each time a !-prefixed variable is encountered, a new integer will be used,
+so each variable declared with new var _ ; will get a unique position in the
+<mem>.

+

Sometimes you want to have generation of fresh constants in a user-defined
+sort. For this, K will still generate a fresh Int, but can use a converter
+function you supply to turn it into the correct sort. For example, here we can
+generate fresh Foos using the freshFoo(_) function annotated with
+freshGenerator.

+
syntax Foo ::= "a" | "b" | "c" | d ( Int )
+
+syntax Foo ::= freshFoo ( Int ) [freshGenerator, function, total]
+
+rule freshFoo(0) => a
+rule freshFoo(1) => b
+rule freshFoo(2) => c
+rule freshFoo(I) => d(I) [owise]
+
+rule <k> new var x ; => . ... </k>
+     <env> ENV => ENV [ x <- !I:Int  ] </env>
+     <mem> MEM => MEM [ !I <- !F:Foo ] </mem>
+

Now each newly allocated memory slot will have a fresh Foo placed in it.

+

token attribute

+ +

The token attribute signals to the Kore generator that the associated sort
+will be inhabited by domain values. Sorts inhabited by domain values must not
+have any constructors declared.

+
syntax Bytes [hook(BYTES.Bytes), token]
+

Converting between [token] sorts

+ +

You can convert between tokens of one sort via Strings by defining functions
+implemented by builtin hooks.
+The hook STRING.token2string allows conversion of any token to a string:

+
syntax String ::= FooToString(Foo)  [function, total, hook(STRING.token2string)]
+

Similarly, the hook STRING.string2Token allows the inverse:

+
syntax Bar ::= StringToBar(String) [function, total, hook(STRING.string2token)]
+

WARNING: This sort of conversion does NOT do any sort of parsing or validation.
+Thus, we can create arbitary tokens of any sort:

+
StringToBar("The sun rises in the west.")
+

Composing these two functions lets us convert from Foo to Bar

+
syntax Bar ::= FooToBar(Foo) [function]
+rule FooToBar(F) => StringToBar(FooToString(F))
+

Parsing comments, and the #Layout sort

+ +

Productions for the #Layout sort are used to describe tokens that are
+considered "whitespace". The scanner removes tokens matching these productions
+so they are not even seen by the parser. Below, we use it to define
+lines begining with ; (semicolon) as comments.

+
syntax #Layout ::= r"(;[^\\n\\r]*)"    // Semi-colon comments
+                 | r"([\\ \\n\\r\\t])" // Whitespace
+

prec attribute

+ +

Consider the following naive attempt at creating a language what syntax that
+allows two types of variables: names that contain underbars, and names that
+contain sharps/hashes/pound-signs:

+
syntax NameWithUnderbar ::= r"[a-zA-Z][A-Za-z0-9_]*"  [token]
+syntax NameWithSharp    ::= r"[a-zA-Z][A-Za-z0-9_#]*" [token]
+syntax Pgm ::= underbar(NameWithUnderbar)
+             | sharp(NameWithSharp)
+

Although, it seems that K has enough information to parse the programs
+underbar(foo) and sharp(foo) with, the lexer does not take into account
+whether a token is being parsed for the sharp or for the underbar
+production. It chooses an arbitary sort for the token foo (perhaps
+NameWithUnderbar). Thus, during paring it is unable to construct a valid term
+for one of those programs (sharp(foo)) and produces the error message:
+Inner Parser: Parse error: unexpected token 'foo'.

+

Since calculating inclusions and intersections between regular expressions is
+tricky, we must provide this information to K. We do this via the prec(N)
+attribute. The lexer will always prefer longer tokens to shorter tokens.
+However, when it has to choose between two different tokens of equal length,
+token productions with higher precedence are tried first. Note that the default
+precedence value is zero when the prec attribute is not specified.

+

For example, the BUILTIN-ID-TOKENS module defines #UpperId and #LowerId with
+the prec(2) attribute.

+
  syntax #LowerId ::= r"[a-z][a-zA-Z0-9]*"                    [prec(2), token]
+  syntax #UpperId ::= r"[A-Z][a-zA-Z0-9]*"                    [prec(2), token]
+

Furthermore, we also need to make sorts with more specific tokens subsorts of ones with more
+general tokens. We add the token attribute to this production so that all
+tokens of a particular sort are marked with the sort they are parsed as and not a
+subsort thereof. e.g. we get underbar(#token("foo", "NameWithUnderbar"))
+instead of underbar(#token("foo", "#LowerId"))

+
imports BUILTIN-ID-TOKENS
+syntax NameWithUnderbar ::= r"[a-zA-Z][A-Za-z0-9_]*" [prec(1), token]
+                          | #UpperId                [token]
+                          | #LowerId                [token]
+syntax NameWithSharp ::= r"[a-zA-Z][A-Za-z0-9_#]*" [prec(1), token]
+                       | #UpperId                 [token]
+                       | #LowerId                 [token]
+syntax Pgm ::= underbar(NameWithUnderbar)
+             | sharp(NameWithSharp)
+

unused attribute

+ +

K will warn you if you declare a symbol that is not used in any of the rules of
+your definition. Sometimes this is intentional, however; in this case, you can
+suppress the warning by adding the unused attribute to the production or
+cell.

+
syntax Foo ::= foo() [unused]
+
+configuration <foo unused=""> .K </foo>
+

deprecated attribute

+ +

Symbols can be marked as deprecated by adding the deprecated attribute to
+their declaration. If that symbol subsequently appears in the definition (in a
+rule, context, context alias or configuration), the compiler will issue a
+warning.

+
syntax Foo ::= foo() [deprecated]
+rule foo() => . // warning on this line
+

Symbol priority and associativity

+ +

Unlike most other parser generators, K combines the task of parsing with AST
+generation. A production declared with the syntax keyword in K is both a
+piece of syntax used when parsing, and a symbol that is used when rewriting.
+As a result, it is generally convenient to describe expression grammars using
+priority and associativity declarations rather than explicitly transforming
+your grammar into a series of nonterminals, one for each level of operator
+precedence. Thus, for example, a simple grammar for addition and multiplication
+will look like this:

+
syntax Exp ::= Exp "*" Exp
+             | Exp "+" Exp
+

However, this grammar is ambiguous. The term x+y*z might refer to x+(y*z)
+or to (x+y)*z. In order to differentiate this, we introduce a partial
+ordering between productions known as priority. A symbol "has tighter priority"
+than another symbol if the first symbol can appear under the second, but the
+second cannot appear under the first without a bracket. For example, in
+traditional arithmetic, multiplication has tighter priority than addition,
+which means that x+y*z cannot parse as (x+y)*z because the addition
+operator would appear directly beneath the multiplication, which is forbidden
+by the priority filter.

+

Priority is applied individually to each possible ambiguous parse of a term. It
+then either accepts or rejects that parse. If there is only a single remaining
+parse (after all the other disambiguation steps have happened), this is the
+parse that is chosen. If all the parses were rejected, it is a parse error. If
+multiple parses remain, they might be resolved by further disambiguation such
+as via the prefer and avoid attributes, but if multiple parses remain after
+disambiguation finishes, this is an ambiguous parse error, indicating there is
+not a unique parse for that term. In the vast majority of cases, this is
+an error and indicates that you ought to either change your grammar or add
+brackets to the term in question.

+

Priority is specified in K grammars by means of one of two different
+mechanisms. The first, and simplest, simply replaces the | operator in a
+sequence of K productions with the > operator. This operator indicates that
+everything prior to the > operator (including transitively) binds tighter
+than what comes after. For example, a more complete grammar for simple
+arithmetic might be:

+
syntax Exp ::= Exp "*" Exp
+             | Exp "/" Exp
+             > Exp "+" Exp
+             | Exp "-" Exp
+

This indicates that multiplication and division bind tigher than addition
+and subtraction, but that there is no relationship in priority between
+multiplication and division.

+

As you may have noticed, this grammar is also ambiguous. x*y/z might refer to
+x*(y/z) or to (x*y)/z. Indeed, if we removed division and subtraction
+entirely, the grammar would still be ambiguous: x*y*z might parse as
+x*(y*z), or as (x*y)*z. To resolve this, we introduce another feature:
+associativity. Roughly, asssociativity tells us how symbols are allowed to nest
+within other symbols with the same priority. If a set of symbols is left
+associative, then symbols in that set cannot appear as the rightmost child
+of other symbols in that set. If a set of symbols is right associative, then
+symbols in that set cannot appear as the leftmost child of other symbols in
+that set. Finally, if a set of symbols is non-associative, then symbols
+in that set cannot appear as the rightmost or leftmost child of other symbols
+in that set. For example, in the above example, if addition and subtraction
+are left associative, then x+y+z will parse as (x+y)+z and x+y-z will
+parse as (x+y)-z (because the other parse will have been rejected).

+

You might notice that this seems to apply only to binary infix operators. In
+fact, the real behavior is slightly more complicated. Priority and
+associativity (for technical reasons that go beyond the scope of this document)
+really only apply when the rightmost or leftmost item in a production is a
+nonterminal. If the rightmost nonterminal is followed by a terminal (or
+respectively the leftmost preceded), priority and associativity do not apply.
+Thus we can generalize these concepts to arbitrary context-free grammars.

+

Note that in some cases, this is not the behavior you want. You may actually
+want to reject parses even though the leftmost and rightmost item in a
+production are terminals. You can accomplish this by means of the
+applyPriority attribute. When placed on a production, it tells the parser
+which nonterminals of a production the priority filter ought to reject children
+under, overriding the default behavior. For example, I might have a production
+like syntax Exp ::= foo(Exp, Exp) [applyPriority(1)]. This tells the parser
+to reject terms with looser priority binding under the first Exp, but not
+the second. By default, with this production, neither position would apply
+to the priority filter, because the first and last items of the production
+are both terminals.

+

Associativity is specified in K grammars by means of one of two different
+mechanisms. The first, and simplest, adds the associativity of a priority block
+of symbols prior to that block. For example, we can remove the remaining
+ambiguities in the above grammar like so:

+
syntax Exp ::= left:
+               Exp "*" Exp
+             | Exp "/" Exp
+             > right:
+               Exp "+" Exp
+             | Exp "-" Exp
+

This indicates that multiplication and division are left-associative, ie, after
+symbols with higher priority are parsed as innermost, symbols are nested with
+the rightmost on top. Addition and subtraction are right associative, which
+is the opposite and indicates that symbols are nested with the leftmost on top.
+Note that this is similar but different from evaluation order, which also
+concerns itself with the ordering of symbols, which is described in the next
+section.

+

You may note we have not yet introduced the second syntax for priority
+and associativity. In some cases, syntax for a grammar might be spread across
+multiple modules, sometimes for very good reasons with respect to code
+modularity. As a result, it becomes infeasible to declare priority and
+associativity inline within a set of productions, because the productions
+are not contiguous within a single file.

+

For this purpose, we introduce the equivalent syntax priority,
+syntax left, syntax right, and syntax non-assoc declarations. For
+example, the above grammar can be written equivalently as:

+
syntax Exp ::= Exp "*" Exp [group(mult)]
+             | Exp "/" Exp [group(div)]
+             | Exp "+" Exp [group(add)]
+             | Exp "-" Exp [group(sub)]
+
+syntax priority mult div > add sub
+syntax left mult div
+syntax right add sub
+

Here, the group(_) attribute is used to create user-defined groups of
+sentences. A particular group name collectively refers to the whole set of
+sentences within that group. The sets are flattened together, so we could
+equivalently have written:

+
syntax Exp ::= Exp "*" Exp [group(mult)]
+             | Exp "/" Exp [group(mult)]
+             | Exp "+" Exp [group(add)]
+             | Exp "-" Exp [group(add)]
+
+syntax priority mult > add
+syntax left mult
+syntax right add
+

Note that syntax [left|right|non-assoc] should not be used to group together
+productions with different priorities. For example, this code would be invalid:

+
syntax priority mult > add
+syntax left mult add
+

Note that there is one other way to describe associativity, but it is
+prone to a very common mistake. You can apply the attribute left, right,
+or non-assoc directly to a production to indicate that it is, by itself,
+left-, right-, or non-associative.

+

However, this often does not mean what users think it means. In particular:

+
syntax Exp ::= Exp "+" Exp [left]
+             | Exp "-" Exp [left]
+

is not equivalent to:

+
syntax Exp ::= left:
+               Exp "+" Exp
+             | Exp "-" Exp
+

Under the first, each production is associative with itself, but not each
+other. Thus, x+y+z will parse unambiguously as (x+y)+z, but x+y-z will
+be ambiguous. However, in the second, x+y-z will parse unambiguously as
+(x+y)-z.

+

Think carefully about how you want your grammar to parse. In general, if you're
+not sure, it's probably best to group associativity together into the same
+blocks you use for priority, rather than using left, right, or non-assoc
+attributes on the productions.

+

Lexical identifiers

+ +

Sometimes it is convenient to be able to give a certain regular expression a
+name and then refer to it in one or more regular expression terminals. This
+can be done with a syntax lexical sentence in K:

+
syntax lexical Alphanum = r"[0-9a-zA-Z]"
+

This defines a lexical identifier Alphanum which can be expanded in any
+regular expression terminal to the above regular expression. For example, I
+might choose to then implement the syntax of identifiers as follows:

+
syntax Id ::= r"[a-zA-Z]{Alphanum}*" [token]
+

Here {Alphanum} expands to the above regular expression, making the sentence
+equivalent to the following:

+
syntax Id ::= r"[a-zA-Z]([0-9a-zA-Z])*" [token]
+

This feature can be used to more modularly construct the lexical syntax of your
+language. Note that K does not currently check that lexical identifiers used
+in regular expressions have been defined; this will generate an error when
+creating the scanner, however, and the user ought to be able to debug what
+happened.

+

assoc, comm, idem, and unit attributes

+ +

These attributes are used to indicate whether a collection or a production
+is associative, commutative, idempotent, and/or has a unit.
+In general, you should not need to apply these attributes to productions
+yourself, however, they do have certain special meaning to K. K will generate
+axioms related to each of these concepts into your definition for you
+automatically. It will also automatically sort associative-commutative
+collections, and flatten the indentation of associative collections, when
+unparsing.

+

public and private attribute

+ +

K allows users to declare certain pieces of syntax as either public or private.
+All syntax is public by default. Public syntax can be used from any module that
+imports that piece of syntax. A piece of syntax can be declared private with
+the private attribute. This means that that syntax can only be used in the
+module in which it is declared; it is not visible from modules that import
+that module.

+

You can also change the default visibility of a module with the private
+attribute, when it is placed directly on a module. A module with the private
+attribute has all syntax private by default; this can be overridden on
+specific sentences with the public attribute.

+

Note that the private module attribute also changes the default visiblity
+of imports; please refer to the appropriate section elsewhere in the manual
+for more details.

+

Here is an example usage:

+
module WIDGET-SYNTAX
+
+  syntax Widget ::= foo()
+  syntax WidgetHelper ::= bar() [private] // this production is not visible
+                                          // outside this module
+endmodule
+
+module WIDGET [private]
+  imports WIDGET-SYNTAX
+
+  syntax Widget ::= fooImpl() // this production is not visible outside this
+                              // module
+
+  // this production is visible outside this module
+  syntax KItem ::= adjustWidget(Widget) [function, public]
+endmodule
+

Configuration Declaration

+

exit attribute

+ +

A single configuration cell containing an integer may have the "exit"
+attribute. This integer will then be used as the return value on the console
+when executing the program.

+

For example:

+
configuration <k> $PGM:Pgm </k>
+              <status-code exit=""> 1 </status-code>
+

declares that the cell status-code should be used as the exit-code for
+invocations of krun. Additionally, we state that the default exit-code is 1
+(an error state). One use of this is for writing testing harnesses which assume
+that the test fails until proven otherwise and only set the <status-code> cell
+to 0 if the test succeeds.

+

Collection Cells: multiplicity and type attributes

+ +

Sometimes a semantics needs to allow multiple copies of the same cell, for
+example if you are making a concurrent multi-threading programming language.
+For this purpose, K supports the multiplicity and type attributes on cells
+declared in the configuration.

+

multiplicity can take on values * and ?. Declaring multiplicity="*"
+indicates that the cell may appear any number of times in a runtime
+configuration. Setting multiplicity="?" indicates that the cell may only
+appear exactly 0 or 1 times in a runtime configuration. If there are no
+configuration variables present in the cell collection, the initial
+configuration will start with exactly 0 instances of the cell collection. If
+there are configuration variables present in the cell collection, the initial
+configuration will start with exactly 1 instance of the cell collection.

+

type can take on values Set, List, and Map. For example, here we declare
+several collecion cells:

+
configuration <k> $PGM:Pgm </k>
+              <sets>  <set  multiplicity="?" type="Set">  0:Int </set>  </sets>
+              <lists> <list multiplicity="*" type="List"> 0:Int </list> </lists>
+              <maps>
+                <map multiplicity="*" type="Map">
+                  <map-key> 0:Int </map-key>
+                  <map-value-1> "":String </map-value-1>
+                  <map-value-2> 0:Int     </map-value-2>
+                </map>
+              </maps>
+

Declaring type="Set" indicates that duplicate occurrences of the cell should
+be de-duplicated, and accesses to instances of the cell will be nondeterministic
+choices (constrained by any other parts of the match and side-conditions).
+Similarly, declaring type="List" means that new instances of the cell can be
+added at the front or back, and elements can be accessed from the front or back,
+and the order of the cells will be maintained. The following are examples of
+introduction and elimination rules for these collections:

+
rule <k> introduce-set(I:Int) => . ... </k>
+     <sets> .Bag => <set> I </set> </sets>
+
+rule <k> eliminate-set => I ... </k>
+     <sets> <set> I </set> => .Bag </sets>
+
+rule <k> introduce-list-start(I:Int) => . ... </k>
+     <lists> (.Bag => <list> I </list>) ... </lists>
+
+rule <k> introduce-list-end(I:Int) => . ... </k>
+     <lists> ... (.Bag => <list> I </list>) </lists>
+
+rule <k> eliminate-list-start => I ... </k>
+     <lists> (<list> I </list> => .Bag) ... </lists>
+
+rule <k> eliminate-list-end => I ... </k>
+     <lists> ... (<list> I </list> => .Bag) </lists>
+

Notice that for multiplicity="?", we only admit a single <set> instance at
+a time. For the type=List cell, we can add/eliminate cells from the from or
+back of the <lists> cell. Also note that we use .Bag to indicate the empty
+cell collection in all cases.

+

Declaring type="Map" indicates that the first sub-cell will be used as a
+cell-key. This means that matching on those cells will be done as a map-lookup
+operation if the cell-key is mentioned in the rule (for performance). If the
+cell-key is not mentioned, it will fallback to normal nondeterministic
+constrained by other parts of the match and any side-conditions. Note that there
+is no special meaning to the name of the cells (in this case <map>,
+<map-key>, <map-value-1>, and <map-value-2>). Additionally, any number of
+sub-cells are allowed, and the entire instance of the cell collection is
+considered part of the cell-value, including the cell-key (<map-key> in this
+case) and the surrounding collection cell (<map> in this case).

+

For example, the following rules introduce, set, retrieve from, and eliminate
+type="Map" cells:

+
rule <k> introduce-map(I:Int) => . ... </k>
+     <maps> ... (.Bag => <map> <map-key> I </map-key> ... </map>) ... </maps>
+
+rule <k> set-map-value-1(I:Int, S:String) => . ... </k>
+     <map> <map-key> I </map-key> <map-value-1> _ => S </map-value-1> ... </map>
+
+rule <k> set-map-value-2(I:Int, V:Int) => . ... </k>
+     <map> <map-key> I </map-key> <map-value-2> _ => V </map-value-2> ... </map>
+
+rule <k> retrieve-map-value-1(I:Int) => S ... </k>
+     <map> <map-key> I </map-key> <map-value-1> S </map-value-1> ... </map>
+
+rule <k> retrieve-map-value-2(I:Int) => V ... </k>
+     <map> <map-key> I </map-key> <map-value-2> V </map-value-2> ... </map>
+
+rule <k> eliminate-map(I:Int) => . ... </k>
+     <maps> ... (<map> <map-key> I </map-key> ... </map> => .Bag) ... </maps>
+

Note how each rule makes sure that <map-key> cell is mentioned, and we
+continue to use .Bag to indicate the empty collection. Also note that
+when introducing new map elements, you may omit any of the sub-cells which are
+not the cell-key. In case you do omit sub-cells, you must use structural
+framing ... to indicate the missing cells, they will receive the default
+value given in the configuration ... declaration.

+

Rule Declaration

+

Rule Structure

+ +

Each K rule follows the same basic structure (given as an example here):

+
rule LHS => RHS requires REQ ensures ENS [ATTRS]
+

The portion between rule and requires is referred to as the rule body,
+and may contain one or more rewrites (though not nested). Here, the rule body is
+LHS => RHS, where LHS and RHS are used as placeholders for the pre- and
+post- states. Note that we lose no generality referring to the LHS or the
+RHS, even in the presence of multiple rewrites, as the rewrites are pulled to
+the top-level anyway.

+

Next is the requires clause, represented here as REQ. The requires clause is
+an additional predicate (function-like term of sort Bool), which is to be
+evaluated before applying the rule. If the requires clause does not evaluate to
+true, then the rule does not apply.

+

Finally is the ensures clause, represented here as ENS. The ensures clause
+is to be interpreted as a post-condition, and will be automatically added to the
+path condition if the rule applies. It may cause the entire term to become
+undefined, but the backend will not stop itself from applying the rule in this
+case. Note that concrete backends (eg. the LLVM backend) are free to ignore the
+ensures clause.

+

Overall, the transition represented by such a rule is from a state
+LHS #And REQ ending in a state RHS #And ENS. When backends apply this rule
+as a transition/rewrite, they should:

+
    +
  • Check if pattern LHS matches (or unifies) with the current term, giving
    +substitution alpha.
  • +
  • Check if the instantiation alpha(REQ) is valid (or satisfiable).
  • +
  • Build the new term alpha(RHS #And ENS), and check if it's satisfiable.
  • +
+

Pattern Matching operator

+ +

Sometimes when you want to express a side condition, you want to say that a
+rule matches if a particular term matches a particular pattern, or if it
+instead does /not/ match a particular pattern.

+

The syntax in K for this is :=K and :/=K. It has similar meaning to ==K and
+=/=K, except that where ==K and =/=K express equality, :=K and =/=K express
+model membership. That is to say, whether or not the rhs is a member of the set
+of terms expressed by the lhs pattern. Because the lhs of these operators is a
+pattern, the user can use variables in the lhs of the operator. However, due to
+current limitations, these variables are NOT bound in the rest of the term.
+The user is thus encouraged to use anonymous variables only, although this is
+not required.

+

This is compiled by the K frontend down to an efficient pattern matching on a
+fresh function symbol.

+

Anonymous function applications

+ +

There are a number of cases in K where you would prefer to be able to take some
+term on the RHS, bind it to a variable, and refer to it in multiple different
+places in a rule.

+

You might also prefer to take a variable for which you know some of its
+structure, and modify some of its internal structure without requiring you to
+match on every single field contained inside that structure.

+

In order to do this, we introduce syntax to K that allows you to construct
+anonymous functions in the RHS of a rule and apply them to a term.

+

The syntax for this is:

+
#fun(RuleBody)(Argument)
+

Note the limitations currently imposed by the implementation. These functions
+are not first-order: you cannot bind them to a variable and inject them like
+you can with a regular klabel for a function. You also cannot express multiple
+rules or multiple parameters, or side conditions. All of these are extensions
+we would like to support in the future, however.

+

In the following, we use three examples to illustrate the behavior of #fun.
+We point out that the support for #fun is provided by the frontend, not the
+backends.

+

The three examples are real examples borrowed or modified from existing language
+semantics.

+

Example 1 (A Simple Self-Explained Example).

+
#fun(V:Val => isFoo(V) andBool isBar(V))(someFunctionReturningVal())
+

Example 2 (Nested #fun).

+
   #fun(C
+=> #fun(R
+=> #fun(E
+=> foo1(E, R, C)
+  )(foo2(C))
+  )(foo3(0))
+  )(foo4(1))
+

This example is from the beacon
+semantics:https://github.com/runtimeverification/beacon-chain-spec/blob/master/b
+eacon-chain.k at line 302, with some modification for simplicity. Note how
+variables C, R, E are bound in the nested #fun.

+

Example 3 (Matching a structure).

+
rule foo(K, RECORD) =>
+  #fun(record(... field: _ => K))(RECORD)
+

Unlike previous examples, the LHS of #fun in this example is no longer a
+variable, but a structure. It has the same spirit as the first two examples,
+but we match the RECORD with a structure record( DotVar, field: X), instead
+of a standalone variable. We also use K's local rewrite syntax (i.e., the
+rewriting symbol => does not occur at the top-level) to prevent writing
+duplicate expressions on the LHS and RHS of the rewriting.

+

Macros and Aliases

+ +

A production can be tagged with the macro, alias, macro-rec, or alias-rec
+attributes. In all cases, what this signifies is that this is a macro production.
+Macro rules are rules where the top symbol of the left-hand-side are macro
+labels. Macro rules are applied statically during compilation on all terms that
+they match, and statically before program execution on the initial configuration.
+Currently, macro rules are required to not have side conditions, although they
+can contain sort checks.

+

alias rules are also applied statically in reverse prior to unparsing on the
+final configuration. Note that a macro rule can have unbound variables in the
+right hand side. When such a macro exists, it should be used only on the left
+hand side of rules, unless the user is performing symbolic execution and expects
+to introduce symbolic terms into the subject being rewritten.

+

However, when used on the left hand side of a rule, it functions similarly to a
+pattern alias, and allows the user to concisely express a reusable pattern that
+they wish to match on in multiple places.

+

For example, consider the following semantics:

+
syntax KItem ::= "foo" [alias] | "foobar"
+syntax KItem ::= bar(KItem) [macro] | baz(Int, KItem)
+rule foo => foobar
+rule bar(I) => baz(?_, I)
+rule bar(I) => I
+

This will rewrite baz(0, foo) to foo. First baz(0, foo) will be rewritten
+statically to baz(0, foobar). Then the non-macro rule will apply (because
+the rule will have been rewritten to rule baz(_, I) => I). Then foobar will
+be rewritten statically after rewriting finishes to foo via the reverse form
+of the alias.

+

Note that macros do not apply recursively within their own expansion. This is
+done so as to ensure that macro expansion will always terminate. If the user
+genuinely desires a recursive macro, the macro-rec and alias-rec attributes
+can be used to provide this behavior.

+

For example, consider the following semantics:

+
syntax Exp ::= "int" Exp ";" | "int" Exps ";" [macro] | Exp Exp | Id
+syntax Exps ::= List{Exp,","}
+
+rule int X:Id, X':Id, Xs:Exps ; => int X ; int X', Xs ;
+

This will expand int x, y, z; to int x; int y, z; because the macro does
+not apply the second time after applying the substitution of the first
+application. However, if the macro attribute were changed to the macro-rec
+attribute, it would instead expand (as the user likely intended) to
+int x; int y; int z;.

+

The alias-rec attribute behaves with respect to the alias attribute the
+same way the macro-rec attribute behaves with respect to macro.

+

anywhere rules

+ +

Some rules are not functional, but you want them to apply anywhere in the
+configuration (similar to functional rules). You can use the anywhere
+attribute on a rule to instruct the backends to make sure they apply anywhere
+they match in the entire configuration.

+

For example, if you want to make sure that some associative operator is always
+right-associated anywhere in the configuration, you can do:

+
syntax Stmt ::= Stmt ";" Stmt
+
+rule (S1 ; S2) ; S3 => S1 ; (S2 ; S3) [anywhere]
+

Then after every step, all occurrences of _;_ will be re-associated. Note that
+this allows the symbol _;_ to still be a constructor, even though it is
+simplified similarly to a function.

+

trusted claims

+ +

You may add the trusted attribute to a given claim for the K prover to
+automatically add it to the list of proven circularities, instead of trying to
+discharge it separately.

+

Projection and Predicate functions

+ +

K automatically generates certain predicate and projection functions from the
+syntax you declare. For example, if you write:

+
syntax Foo ::= foo(bar: Bar)
+

It will automatically generate the following K code:

+
syntax Bool ::= isFoo(K) [function]
+syntax Foo ::= "{" K "}" ":>Foo" [function]
+syntax Bar ::= bar(Foo) [function]
+
+rule isFoo(F:Foo) => true
+rule isFoo(_) => false [owise]
+
+rule { F:Foo }:>Foo => F
+rule bar(foo(B:Bar)) => B
+

The first two types of functions are generated automatically for every sort in
+your K definition, and the third type of function is generated automatically
+for each named nonterminal in your definition. Essentially, isFoo for some
+sort Foo will tell you whether a particular term of sort K is a Foo,
+{F}:>Foo will cast F to sort Foo if F is of sort Foo and will be
+undefined (i.e., theoretically defined as #Bottom, the bottom symbol in
+matching logic) otherwise. Finally, bar will project out the child of a foo
+named bar in its production declaration.

+

Note that if another term of equal or smaller sort to Foo exists and has a
+child named bar of equal or smaller sort to Bar, this will generate an
+ambiguity during parsing, so care should be taken to ensure that named
+nonterminals are sufficiently unique from one another to prevent such
+ambiguities. Of course, the compiler will generate a warning in this case.

+

simplification attribute

+ +

The simplification attribute identifies rules outside the main semantics that
+are used to simplify function patterns.

+

Conditions: A simplification rule is applied by matching the function
+arguments, instead of unification as when applying function definition
+rules. This allows function symbols to appear nested as arguments to other
+functions on the left-hand side of a simplification rule, which is forbidden in
+function definition rules. For example, this rule would not be accepted as a
+function definition rule:

+
rule (X +Int Y) +Int Z => X +Int (Y +Int Z) [simplification]
+

A simplification rule is only applied when the current side condition implies
+the requires clause of the rule, like function definition rules.

+

Order: The simplification attribute accepts an optional integer argument
+which is the rule's simplification priority; if the optional argument is not
+specified, it is equivalent to a simplification priority of 50. Backends
+should attempt simplification rules in order of their simplification
+priority
, but are not required to do so; in fact, the backend is free to apply
+simplification rules at any time. Because of this, users must ensure that
+simplification rules are sound regardless of their order of application. This
+differs from the priority attribute in that rules with the priority
+attribute must be applied in their priority order by the backend. It is an
+error to have the priority attribute on a simplification rule.

+

For example, for the following definition:

+
    syntax WordStack ::= Int ":" WordStack | ".WordStack"
+    syntax Int ::= sizeWordStack    ( WordStack       ) [function]
+                 | sizeWordStackAux ( WordStack , Int ) [function]
+ // --------------------------------------------------------------
+    rule sizeWordStack(WS) => sizeWordStackAux(WS, 0)
+
+    rule sizeWordStackAux(.WordStack, N) => N
+    rule sizeWordStackAux(W : WS    , N) => sizeWordStackAux(WS, N +Int 1)
+

We might add the following simplification lemma:

+
    rule sizeWordStackAux(WS, N) => N +Int sizeWordStackAux(WS, 0)
+      requires N =/=Int 0
+      [simplification]
+

Then this simplification rule will only apply if the Haskell backend can prove
+that notBool N =/=Int 0 is unsatisfiable. This avoids an infinite cycle of
+applying this simplification lemma.

+

NOTE: The frontend and Haskell backend do not check that supplied
+simplification rules are sound, this is the developer's responsibility. In
+particular, rules with the simplification attribute must preserve definedness;
+that is, if the left-hand side refers to any partial function then:

+
    +
  • the right-hand side must be #Bottom when the left-hand side is #Bottom, or
  • +
  • the rule must have an ensures clause that is false when the left-hand
    +side is #Bottom, or
  • +
  • the rule must have a requires clause that is false when the left-hand
    +side is #Bottom.
  • +
+

These conditions are in order of decreasing preference: the best option is to
+preserve #Bottom on the right-hand side, the next best option is to have an
+ensures clause, and the least-preferred option is to have a requires clause.
+The most preferred option is to write total functions and avoid the entire issue.

+

NOTE: The Haskell backend does not attempt to prove claims which right-hand
+side is #Bottom. The reason for this is that the general case is undecidable,
+and the backend might enter an infinite loop. Therefore, the backend emits a
+warning if it encounters such a claim.

+

concrete and symbolic attributes (Haskell backend)

+ +

Users can control the application of simplification rules using the concrete
+and the symbolic attributes by specifying the type of patterns the rule's
+arguments are to match.

+

A concrete pattern is a pattern which does not contain variables or unevaluated
+functions, otherwise the pattern is symbolic.

+

The semantics of the two attributes is defined as follows:

+
    +
  • If a simplification rule is marked concrete, then all arguments must be
    +concrete for the rule to match.
  • +
  • If a simplification rule is marked symbolic, then all arguments must be
    +symbolic for the rule to match.
  • +
  • The following syntax concrete(<variables>) (resp. symbolic(<variables>)),
    +where <variables> is a list of variable names separated by commas, can be used
    +to specify the exact arguments the user expects to match concrete (resp. symbolic)
    +patterns.
  • +
+

For example, the following will only match when all arguments
+are concrete:

+
rule X +Int (Y +Int Z) => (X +Int Y) +Int Z [simplification, concrete]
+

Conversely, the following will only match when all arguments
+are symbolic:

+
rule X +Int (Y +Int Z) => (X +Int Y) +Int Z [simplification, symbolic]
+

In practice, the following rules will re-associate and commute terms to combine
+concrete arguments:

+
rule (A +Int Y) +Int Z => A +Int (Y +Int Z)
+  [concrete(Y, Z), symbolic(A), simplification]
+
+rule X +Int (B +Int Z) => B +Int (X +Int Z)
+  [concrete(X, Z), symbolic(B), simplification]
+

The unboundVariables attribute

+ +

Normally, K rules are not allowed to contain regular (i.e., not fresh, not
+existential) variables in the RHS / requires / ensures clauses which are not
+bound in the LHS.

+

However, in certain cases this behavior might be desired, like, for example,
+when specifying a macro rule which is to be used in the LHS of other rules.
+To allow for such cases, but still be useful and perform the unboundness checks
+in regular cases, the unboundVariables attributes allows the user to specify
+a comma-separated list of names of variables which can be unbound in the rule.

+

For example, in the macro declaration

+
  rule cppEnumType => bar(_, scopedEnum() #Or unscopedEnum() ) [unboundVariables(_)]
+

the declaration unboundVariables(_) allows the rule to pass the unbound
+variable checks, and this in turn allows for cppEnumType to be used in
+the LHS of a rule to mean the pattern above:

+
  rule inverseConvertType(cppEnumType, foo((cppEnumType #as T::CPPType => underlyingType(T))))
+

The memo attribute

+ +

The memo attribute is a hint from the user to the backend to memoize a
+function. Not all backends support memoization, but when the attribute is used
+and the definition is compiled for a memo-supporting backend, then calls to
+the function may be cached. At the time of writing, only the Haskell
+backend supports memoization.

+

Limitations of memoization with the Haskell backend

+ +

The Haskell backend will only cache a function call if all arguments are concrete.

+

It is recommended not to memoize recursive functions, as each recursive call
+will be stored in the cache, but only the first iteration will be retrieved from
+the cache; that is, the cache will be filled with many unreachable
+entries. Instead, we recommend to perform a worker-wrapper transformation on
+recursive functions, and apply the memo attribute to the wrapper.

+

Warning: A function declared with the memo attribute must not use
+uninterpreted functions in the side-condition of any rule. Memoizing such an
+impure function is unsound. To see why, consider the following rules:

+
syntax Bool ::= impure( Int ) [function]
+
+syntax Int ::= unsound( Int ) [function, memo]
+rule unsound(X:Int) => X +Int 1 requires impure(X)
+rule unsound(X:Int) => X        requires notBool impure(X)
+

Because the function impure is not given rules to cover all inputs, unsound
+can be memoized incoherently. For example,

+
{unsound(0) #And {impure(0) #Equals true}} #Equals 1
+

but

+
{unsound(0) #And {impure(0) #Equals false}} #Equals 0
+

The memoized value of unsound(0) would be incoherently determined by which
+pattern the backend encounters first.

+

Variable Sort Inference

+ +

In K, it is not required that users declare the sorts of variables in rules or
+in the initial configuration. If the user does not explicitly declare the sort
+of a variable somewhere via a cast (see below), the sort of the variable is
+inferred from context based on the sort signature of every place the variable
+appears in the rule.

+

As an example, consider the rule for addition in IMP:

+
    syntax Exp ::= Exp "+" Exp | Int
+
+    rule I1 + I2 => I1 +Int I2
+

Here +Int is defined in the INT module with the following signature:

+
    syntax Int ::= Int "+Int" Int [function]
+

In the rule above, the sort of both I1 and I2 is inferred as Int. This is because
+a variable must have the same sort every place it appears within the same rule.
+While a variable appearing only on the left-hand-side of the rule could have
+sort Exp instead, the same variable appears as a child of +Int, which
+constriants the sorts of I1 and I2 more tightly. Since the sort must be a
+subsort of Int or equal to Int, and Int has no subsorts, we infer Int
+as the sorts of I1 and I2. This means that the above rule will not match
+until I1 and I2 become integers (i.e., have already been evaluated).

+

More complex examples are possible, however:

+
    syntax Exp ::= Exp "+" Int | Int
+    rule _ + _ => 0
+

Here we have two anonymous variables. They do not refer to the same variable
+as one another, so they can have different sorts. The right side is constrained
+by + to be of sort Int, but the left side could be either Exp or Int.
+When this occurs, we have multiple solutions to the sorts of the variables in
+the rule. K will only choose solutions which are maximal, however. To be
+precise, if two different solutions exist, but the sorts of one solution are
+all greater than or equal to the sorts of the other solution, K will discard
+the smaller solution. Thus, in the case above, the variable on the left side
+of the + is inferred of sort Exp, because the solution (Exp, Int) is
+strictly greater than the solution (Int, Int).

+

It is possible, however, for terms to have multiple maximal solutions:

+
    syntax Exp ::= Exp "+" Int | Int "+" Exp | Int
+    rule I1 + I2 => 0
+

In this example, there is an ambiguous parse. This could parse as either
+the first + or the second. In the first case, the maximal solution chosen is
+(Exp, Int). In the second, it is (Int, Exp). Neither of these solutions is
+greater than the other, so both are allowed by K. As a result, this program
+will emit an error because the parse is ambiguous. To pick one solution over
+the other, a cast or a prefer or avoid attribute can be used.

+

Casting

+ +

There are three main types of casts in K: the semantic cast, the strict cast,
+and the projection cast.

+

Semantic casts

+ +

For every sort S declared in your grammar, K will define the following
+production for you for use in rules:

+
    syntax S ::= S ":S"
+

The meaning of this cast is that the term inside the cast must be less than
+or equal to Sort. This can be used to resolve ambiguities, but its principle
+purpose is to guide execution by telling K what sort variables must match in
+order for the rule to apply. When compiled, it will generate a pattern that
+matches on an injection into Sort.

+

Strict casts

+ +

K also introduces the strict cast:

+
    syntax S ::= S "::S"
+

The meaning at runtime is exactly the same as the semantic cast; however, it
+restricts the sort of the term inside the cast to exactly Sort. That is
+to say, if you use it on something that is a strictly smaller sort, it will
+generate a type error. This is useful in certain circumstances to help
+disambiguate terms, when a semantic cast would not have resolved the ambiguity.
+As such, it is primarily used to solve ambiguities rather than to guide
+execution.

+

Projection casts

+ +

K also introduces the projection cast:

+
    syntax {S2} S ::= "{" S2 "}" ":>S"
+

The meaning of this cast at runtime is that if the term inside is of sort
+Sort, it should have it injection stripped away and the value inside is
+returned as a term of static sort Sort. However, if the term is of a
+different sort, it is an error and execution will get stuck. Thus the primary
+usefulness of this cast is to cast the return value of a function with a
+greater sort down to a strictly smaller sort that you expect the return value
+of the function to have. For example:

+
    syntax Exp ::= foo(Exp) [function] | bar(Int) | Int
+    rule foo(I:Int) => I
+    rule bar(I) => bar({foo(I +Int 1)}:>Int)
+

Here we know that foo(I +Int 1) will return an Int, but the return sort of
+foo is Exp. So we project the result into the Int sort so that it can
+be placed as the child of a bar.

+

owise and priority attributes.

+ +

Sometimes, it is simply not convenient to explicitly describe every
+single negative case under which a rule should not apply. Instead,
+we simply wish to say that a rule should only apply after some other set of
+rules have been tried. K introduces two different attributes that can be
+added to rules which will automatically generate the necessary matching
+conditions in a manner which is performant for concrete execution (indeed,
+it generally outperforms during concrete execution code where the conditions
+are written explicitly).

+

The first is the owise attribute. Very roughly, rules without an attribute
+indicating their priority apply first, followed by rules with the owise
+attribute only if all the other rules have been tried and failed. For example,
+consider the following function:

+
syntax Int ::= foo(Int) [function]
+rule foo(0) => 0
+rule foo(_) => 1 [owise]
+

Here foo(0) is defined explicitly as 0. Any other integer yields the
+integer 1. In particular, the second rule above will only be tried after the
+first rule has been shown not to apply.

+

This is because the first rule has a lower number assigned for its priority
+than the second rule. In practice, each rule in your semantics is implicitly
+or explicitly assigned a numerical priority. Rules are tried in increasing
+order of priority, starting at zero and trying each increasing numerical value
+successively.

+

You can specify the priority of a rule with the priority attribute. For
+example, I could equivalently write the second rule above as:

+
rule foo(_) => 1 [priority(200)]
+

The number 200 is not chosen at random. In fact, when you use the owise
+attribute, what you are doing is implicitly setting the priority of the rule
+to 200. This has a couple of implications:

+
    +
  1. Multiple rules with the owise attribute all have the same priority and thus
    +can apply in any order.
  2. +
  3. Rules with priority higher than 200 apply after all rules with the
    +owise attribute have been tried.
  4. +
+

There is one more rule by which priorities are assigned: a rule with no
+attributes indicating its priority is assigned the priority 50. Thus,
+with each priority explicitly declared, the above example looks like:

+
syntax Int ::= foo(Int) [function]
+rule foo(0) => 0 [priority(50)]
+rule foo(_) => 1 [owise]
+

One final note: the llvm backend reserves priorities between 50 and 150
+inclusive for certain specific purposes. Because of this, explicit
+priorities which are given within this region may not behave precisely as
+described above. This is primarily in order that it be possible where necessary
+to provide guidance to the pattern matching algorithm when it would otherwise
+make bad choices about which rules to try first. You generally should not
+give any rule a priority within this region unless you know exactly what the
+implications are with respect to how the llvm backend orders matches.

+

Evaluation Strategy

+

strict and seqstrict attributes

+ +

The strictness attributes allow defining evaluation strategies without having
+to explicitly make rules which implement them. This is done by injecting
+heating and cooling rules for the subterms. For this to work, you need to
+define what a result is for K, by extending the KResult sort.

+

For example:

+
syntax AExp ::= Int
+              | AExp "+" AExp [strict, klabel(addExp)]
+

This generates two heating rules (where the hole syntaxes "[]" "+" AExp and
+AExp "+" "[]" is automatically added to create an evaluation context):

+
rule [addExp1-heat]: <k> HOLE:AExp +  AE2:AExp => HOLE ~>  [] + AE2 ... </k> [heat]
+rule [addExp2-heat]: <k>  AE1:AExp + HOLE:AExp => HOLE ~> AE1 +  [] ... </k> [heat]
+

And two corresponding cooling rules:

+
rule [addExp1-cool]: <k> HOLE:AExp ~>  [] + AE2 => HOLE +  AE2 ... </k> [cool]
+rule [addExp2-cool]: <k> HOLE:AExp ~> AE1 +  [] =>  AE1 + HOLE ... </k> [cool]
+

Note that the rules are given labels based on the klabel of the production, which
+nonterminal is the hole, and whether it's the heating or the cooling rule.

+

You will note that these rules can apply one after another infinitely. In
+practice, the KResult sort is used to break this cycle by ensuring that only
+terms that are not part of the KResult sort will be heated. The heat and
+cool attributes are used to tell the compiler that these are heating and
+cooling rules and should be handled in the manner just described. Nothing stops
+the user from writing such heating and cooling rules directly if they wish,
+although we describe other more convenient syntax for most of the advanced
+cases below.

+

One other thing to note is that in the above sentences, HOLE is just a
+variable, but it has special meaning in the context of sentences with the
+heat or cool attribute. In heating or cooling rules, the variable named
+HOLE is considered to be the term being heated or cooled and the compiler
+will generate isKResult(HOLE) and notBool isKResult(HOLE) side conditions
+appropriately to ensure that the backend does not loop infinitely. The module
+BOOL will also be automatically and privately included for semantic
+purposes. The syntax for parsing programs will not be affected.

+

In order for this functionality to work, you need to define the KResult sort.
+For instance, we tell K that a term is fully evaluated once it becomes an Int
+here:

+
syntax KResult ::= Int
+

Note that you can also say that a given expression is only strict only in
+specific argument positions. Here we use this to define "short-circuiting"
+boolean operators.

+
syntax KResult ::= Bool
+
+syntax BExp ::= Bool
+              | BExp "||" BExp [strict(1)]
+              | BExp "&&" BExp [strict(1)]
+
+rule <k> true  || _    => true ... </k>
+rule <k> false || REST => REST ... </k>
+
+rule <k> true  && REST => REST  ... </k>
+rule <k> false && _    => false ... </k>
+

If you want to force a specific evaluation order of the arguments, you can use
+the variant seqstrict to do so. For example, this would make the boolean
+operators short-circuit in their second argument first:

+
syntax KResult ::= Bool
+
+syntax BExp ::= Bool
+              | BExp "||" BExp [seqstrict(2,1)]
+              | BExp "&&" BExp [seqstrict(2,1)]
+
+rule <k> _    || true  => true ... </k>
+rule <k> REST || false => REST ... </k>
+
+rule <k> REST && true  => REST  ... </k>
+rule <k> _    && false => false ... </k>
+

This will generate rules like this in the case of _||_ (note that BE1 will
+not be heated unless isKResult(BE2) is true, meaning that BE2 must be
+evaluated first):

+
rule <k>  BE1:BExp || HOLE:BExp => HOLE ~> BE1 ||  [] ... </k> [heat]
+rule <k> HOLE:BExp ||  BE2:BExp => HOLE ~>  [] || BE2 ... </k> requires isKResult(BE2) [heat]
+
+rule <k> HOLE:BExp ~>  [] || BE2 => HOLE ||  BE2 ... </k> [cool]
+rule <k> HOLE:BExp ~> BE1 ||  [] =>  BE1 || HOLE ... </k> [cool]
+

Context Declaration

+ +

Sometimes more advanced evaluation strategies are needed. By default, the
+strict and seqstrict attributes are limited in that they cannot describe
+the context in which heating or cooling should occur. When this type of
+control over the evaluation strategy is required, context sentences can be
+used to simplify the process of declaring heating and cooling when it would be
+unnecessarily verbose to write heating and cooling rules directly.

+

For example, if the user wants to heat a term if it exists under a foo
+constructor if the term to be heated is of sort bar, one might write the
+following context (with the optional label):

+
context [foo]: foo(HOLE:Bar)
+

Once again, note that HOLE is just a variable, but one that has special
+meaning to the compiler indicating the position in the context that should
+be heated or cooled.

+

This will automatically generate the following sentences:

+
rule [foo-heat]: <k> foo(HOLE:Bar) => HOLE ~> foo([]) ... </k> [heat]
+rule [foo-cool]: <k> HOLE:Bar ~> foo([]) => foo(HOLE) ... </k> [cool]
+

The user may also write the K cell explicitly in the context declaration
+if they want to match on another cell as well, for example:

+
context <k> foo(HOLE:Bar) ... </k> <state> .Map </state>
+

This context will now only heat or cool if the state cell is empty.

+

Side conditions in context declarations

+ +

The user is allowed to write a side condition in a context declaration, like
+so:

+
context foo(HOLE:Bar) requires baz(HOLE)
+

This side condition will be appended verbatim to the heating rule that is
+generated, however, it will not affect the cooling rule that is generated:

+
rule <k> foo(HOLE:Bar) => HOLE ~> foo([]) ... </k> requires baz(HOLE) [heat]
+rule <k> HOLE:Bar ~> foo([]) => foo(HOLE) ... </k> [cool]
+

Rewrites in context declarations

+ +

The user can also include exactly one rewrite operation in a context
+declaration if that rule rewrites the variable HOLE on the left hand side
+to a term containing HOLE on the right hand side. For exampl;e:

+
context foo(HOLE:Bar => bar(HOLE))
+

In this case, the code generated will be as follows:

+
rule <k> foo(HOLE:Bar) => bar(HOLE) ~> foo([]) ... </k> [heat]
+rule <k> bar(HOLE:Bar) ~> foo([]) => foo(HOLE) ... </k> [cool]
+

This can be useful if the user wishes to evaluate a term using a different
+set of rules than normal.

+

result attribute

+ +

Sometimes it is necessary to be able to evaluate a term to a different sort
+than KResult. This is done by means of adding the result attribute to
+a strict production, a context, or an explicit heating or cooling rule:

+
syntax BExp ::= Bool
+              | BExp "||" BExp [seqstrict(2,1), result(Bool)]
+

In this case, the sort check used by seqstrict and by the heat and cool
+attributes will be isBool instead of isKResult. This particular example
+does not really require use of the result attribute, but if the user wishes
+to evaluate a term of sort KResult further, the result attribute would be
+required.

+

hybrid attribute

+ +

In certain situations, it is desirable to treat a particular production which
+has the strict attribute as a result if the term has had its arguments fully
+evaluated. This can be accomplished by means of the hybrid attribute:

+
syntax KResult ::= Bool
+
+syntax BExp ::= Bool
+              | BExp "||" BExp [strict(1), hybrid]
+

This attribute is equivalent in this case to the following additional axiom
+being added to the definition of isKResult:

+
rule isKResult(BE1:BExp || BE2:BExp) => true requires isKResult(BE1)
+

Sometimes you wish to declare a production hybrid with respect to a predicate
+other than isKResult. You can do this by specifying a sort as the body of the
+hybrid attribute, e.g.:

+
syntax BExp ::= BExp "||" BExp [strict(1), hybrid(Foo)]
+

generates the rule:

+
rule isFoo(BE1:BExp || BE2:BExp) => true requires isFoo(BE1)
+

Properly speaking, hybrid takes an optional comma-separated list of sort
+names. If the list is empty, the attribute is equivalent to hybrid(KResult).
+Otherwise, it generates hybrid predicates for exactly the sorts named.

+

Context aliases

+ +

Sometimes it is necessary to define a fairly complicated evaluation strategy
+for a lot of different operators. In this case, the user could simply write
+a number of complex context declarations, however, this quickly becomes
+tedious. For this purpose, K has a concept called a context alias. A context
+alias is a bit like a template for describing contexts. The template can then
+be instantiated against particular productions using the strict and
+seqstrict attributes.

+

Here is a (simplified) example taken from the K semantics of C++:

+
context alias [c]: <k> HERE:K ... </k> <evaluate> false </evaluate>
+context alias [c]: <k> HERE:K ... </k> <evaluate> true </evaluate> [result(ExecResult)]
+
+syntax Expr ::= Expr "=" Init [strict(c; 1)]
+

This defines the evaluation strategy during the translation phase of a C++
+program for the assignment operator. It is equivalent to writing the following
+context declarations:

+
context <k> HOLE:Expr = I:Init ... </k> <evaluate> false </evaluate>
+context <k> HOLE:Expr = I:Init ... </k> <evaluate> true </evaluate> [result(ExecResult)]
+

What this is saying is, if the evaluate cell is false, evaluate the term
+like normal to a KResult. But if the evaluate cell is true, instead
+evaluate it to the ExecResult sort.

+

Essentially, we have given a name to this evaluation strategy in the form of
+the rule label on the context alias sentences (in this case, c). We can
+then say that we want to use this evaluation strategy to evaluate particular
+arguments of particular productions by referring to it by name in a strict
+attribute. For example, strict(c) will instantiate these contexts once for
+each argument of the production, whereas strict(c; 1) will instantiate it
+only for the first argument. The special variable HERE is used to tell the
+compiler where you want to place the production that is to be heated or cooled.

+

You can also specify multiple context aliases for different parts of a production,
+for example:

+
syntax Exp ::= foo(Exp, Exp) [strict(left; 1; right; 2)]
+

This says that we can evaluate the left and right arguments in either order, but to evaluate
+the left using the left context alias and the right using the right context alias.

+

We can also say seqstrict(left; 1; right; 2), in which case we additionally must evaluate
+the left argument before the right argument. Note, all strict positions are considered collectively
+when determining the evaluation order of seqstrict or the hybrid predicates.

+

A strict attribute with no rule label associated with it is equivalent to
+a strict attribute given with the following context alias:

+
context alias [default]: <k> HERE:K ... </k>
+

One syntactic convenience that is provided is that if you wish to declare the following context:

+
context foo(HOLE => bar(HOLE))
+

you can simply write the following:

+
syntax Foo ::= foo(Bar) [strict(alias)]
+
+context alias [alias]: HERE [context(bar)]
+

Pattern Matching

+

As Patterns

+ +

New syntax has been added to K for matching a pattern and binding the resulting
+match in its entirety to a variable.

+

The syntax is:

+
Pattern #as V::Var
+

In this case, Pattern, including any variables, is matched and the resulting
+variables are added to the substitution if matching succeeds. Furthermore, the
+term matched by Pattern is added to the substitution as V.

+

This code can also be used outside of any rewrite, in which case matching
+occurs as if it appeared on the left hand side, and the right hand side becomes
+a variable corresponding to the alias.

+

It is an error to use an as pattern on the right hand side of a rule.

+

Record-like KApply Patterns

+ +

We have added a syntax for matching on KApply terms which mimics the record
+syntax in functional languages. This allows us to more easily express patterns
+involving a KApply term in which we don't care about some or most of the
+children, without introducing a dependency into the code on the number of
+arguments which could be changed by a future refactoring.

+

The syntax is:

+
record(... field1: Pattern1, field2: Pattern2)
+

Note that this only applies to productions that are prefix productions.
+A prefix production is considered by the implementation to be any production
+whose production items match the following regular expression:

+
(Terminal(_)*) Terminal("(")
+(NonTerminal (Terminal(",") NonTerminal)* )?
+Terminal(")")
+

In other words, any sequence of terminals followed by an open parenthesis, an
+optional comma separated list of non-terminals, and a close parenthesis.

+

If a prefix production has no named nonterminals, a record(...) syntax is
+allowed, but in order to reference specific fields, it is necessary to give one
+or more of the non-terminals in the production names.

+

Note: because the implementation currently creates one production per possible
+set of fields to match on, and because all possible permutations of all
+possible subsets of a list of n elements is a number that scales factorially
+and reaches over 100 thousand productions at n=8, we currently do not allow
+fields to be matched in any order like a true record, but only in the same
+order as appears in the production itself.

+

Given that this only reduces the number of productions to the size of the power
+set, this will still explode the parsing time if we create large productions of
+10 or more fields that all have names. This is something that should probably
+be improved, however, productions with that large of an arity are rare, and
+thus it has not been viewed as a priority.

+

Or Patterns

+ +

Sometimes you wish to express that a rule should match if one out of multiple
+patterns should match the same subterm. We can now express this in K by means
+of using the #Or ML connective on the left hand side of a rule.

+

For example:

+
rule foo #Or bar #Or baz => qux
+

Here any of foo, bar, or baz will match this rule. Note that the behavior is
+ill-defined if it is not the case that all the clauses of the or have the same
+bound variables.

+

Matching global context in function rules

+ +

On occasion it is highly desirable to be able to look up information from the
+global configuration and match against it when evaluating a function. For this
+purpose, we introduce a new syntax for function rules.

+

This syntax allows the user to match on function context from within a
+function rule:

+
syntax Int ::= foo(Int) [function]
+
+rule [[ foo(0) => I ]]
+     <bar> I </bar>
+
+rule something => foo(0)
+

This is completely desugared by the K frontend and does not require any special
+support in the backend. It is an error to have a rewrite inside function
+context, as we do not currently support propagating such changes back into the
+global configuration. It is also an error if the context is not at the top
+level of a rule body.

+

Desugared code:

+
syntax Int ::= foo(Int, GeneratedTopCell) [function]
+
+rule foo(0, <generatedTop>
+              <bar> I </bar>
+              ...
+            </generatedTop> #as Configuration) => I
+rule <generatedTop>
+       <k> something ... </k>
+       ...
+     </generatedTop> #as Configuration
+  => <generatedTop>
+       <k> foo(0, Configuration> ... </k>
+       ...
+     </generatedTop>
+

Collection patterns

+ +

It is allowed to write patterns on the left hand side of rules which refer to
+complex terms of sort Map, List, and Set, despite these patterns ostensibly
+breaking the rule that terms which are functions should not appear on the left
+hand side of rules. Such terms are destructured into pattern matching
+operations.

+

The following forms are allowed:

+
// 0 or more elements followed by 0 or 1 variables of sort List followed by
+// 0 or more elements
+ListItem(E1) ListItem(E2) L:List ListItem(E3) ListItem(E4)
+
+// the empty list
+.List
+
+// 1 or more list update operations applied to a variable
+L:List [ K1 <- E1 ] [ K2 <- E2 ]
+
+// 0 or more elements in any order plus 0 or 1 variables of sort Set
+// in any order
+SetItem(K1) SetItem(K2) S::Set SetItem(K3) SetItem(K4)
+
+// the empty set
+.Set
+
+// 0 or more elements in any order plus by 0 or 1 variables of sort Map
+// in any order
+K1 |-> E1 K2 |-> E2 M::Map K3 |-> E3 K4 |-> E4
+
+// the empty map
+.Map
+

Here K1, K2, K3, K4 etc can be any pattern except a pattern containing both
+function symbols and unbound variables. An unbound variable is a variable whose
+binding cannot be determined by means of decomposing non-set-or-map patterns or
+map elements whose keys contain no unbound variables.

+

This is determined recursively, ie, the term K1 |-> E2 E2 |-> E3 E3 |-> E4 is
+considered to contain no unbound variables.

+

Note that in the pattern K1 |-> E2 K3 |-> E4 E4 |-> E5, K1 and K3 are
+unbound, but E4 is bound because it is bound by deconstructing the key E3, even
+though E3 is itself unbound.

+

In the above examples, E1, E2, E3, and E4 can be any pattern that is normally
+allowed on the lhs of a rule.

+

When a map, set, or list key contains function symbols, we know that the
+variables in that key are bound (because of the above restriction), so it is
+possible to evaluate the function to a concrete term prior to performing the
+lookup.

+

Indeed, this is the precise semantics which occurs; the function is evaluated
+and the result is looked up in the collection.

+

For example:

+
syntax Int ::= f(Int) [function]
+rule f(I:Int) => I +Int 1
+rule <k> I:Int => . ... </k> <state> ... SetItem(f(I)) ... </state>
+

This will rewrite I to . if and only if the state cell contains
+I +Int 1.

+

Note that in the case of Set and Map, one guarantee is that K1, K2, K3, and K4
+represent /distinct/ elements. Pattern matching fails if the correct number of
+distinct elements cannot be found.

+

Matching on cell fragments

+ +

K allows matching fragments of the configuration and using them to construct
+terms and use as function parameters.

+
configuration <t>
+                <k> #init ~> #collectOdd ~> $PGM </k>
+                <fs>
+                  <f multiplicity="*" type="Set"> 1 </f>
+                </fs>
+              </t>
+

The #collectOdd construct grabs the entire content of the <fs> cell.
+We may also match on only a portion of its content. Note that the fragment
+must be wrapped in a <f> cell at the call site.

+
syntax KItem ::= "#collectOdd"
+rule <k> #collectOdd => collectOdd(<fs> Fs </fs>) ... </k>
+     <fs> Fs </fs>
+

The collectOdd function collects the items it needs

+
syntax Set ::= collectOdd(FsCell) [function]
+rule collectOdd(<fs> <f> I </f> REST </fs>) => SetItem(I) collectOdd(<fs> REST </fs>) requires I %Int 2 ==Int 1
+rule collectOdd(<fs> <f> I </f> REST </fs>) =>            collectOdd(<fs> REST </fs>) requires I %Int 2 ==Int 0
+rule collectOdd(<fs> .Bag </fs>) => .Set
+

all-path and one-path attributes to distinguish reachability claims

+ +

As the Haskell backend can handle both one-path and all-path reachability
+claims, but both these are encoded as rewrite rules in K, these attributes can
+be used to clarify what kind of claim a rule is.

+

In addition of being able to annotate a rule with one of them
+(if annotating with more at the same time, only one of them would be chosen),
+one can also annotate whole modules, to give a default claim type for all rules
+in that module.

+

Additionally, the Haskell backend introduces an extra command line option
+for the K frontend, --default-claim-type, with possible values
+all-path and one-path to allow choosing a default type for all
+claims.

+

Set Variables

+ +

Motivation

+ +

Set variables were introduced as part of Matching Mu Logic, the mathematical
+foundations for K. In Matching Mu Logic, terms evaluate to sets of values.
+This is useful for both capturing partiality (as in 3/0) and capturing
+non-determinism (as in 3 #Or 5). Consequently, symbol interpretation is
+extended to have a collective interpretation over sets of input values.

+

Usually, K rules are given using regular variables, which expect that the term
+they match is both defined and has a unique interpretation.

+

However, it is sometimes useful to have simplification rules which work over
+any kind of pattern, be it undefined or non-deterministic. This behavior can be
+achieved by using set variables to stand for any kind of pattern.

+

Syntax

+ +

Any variable prefixed by @ will be considered a set variable.

+

Example

+ +

Below is a simplification rule which motivated this extension:

+
  rule #Ceil(@I1:Int /Int @I2:Int) =>
+    {(@I2 =/=Int 0) #Equals true} #And #Ceil(@I1) #And #Ceil(@I2)
+    [anywhere]
+

This rule basically says that @I1:Int /Int @I2:Int is defined if @I1 and
+@I2 are defined and @I2 is not 0. Using sets variables here is important as
+it allows the simplification rule to apply any symbolic patterns, without
+caring whether they are defined or not.

+

This allows simplifying the expression #Ceil((A:Int /Int B:Int) / C:Int) to:

+
{(C =/=Int 0) #Equals true} #And #Ceil(C) #And ({(B =/=Int 0) #Equals true}
+#And #Ceil(B) #And #Ceil(A)`
+

See kframework/kore#729 for
+more details.

+

SMT Translation

+ +

K makes queries to an SMT solver (Z3) to discharge proof obligations when doing
+symbolic execution. You can control how these queries are made using the
+attributes smtlib, smt-hook, and smt-lemma on declared productions.
+These attributes guide the prover when it tries to apply rules to discharge a
+proof obligation.

+
    +
  • smt-hook(...) allows you to specify a term in SMTLIB2 format which should
    +be used to encode that production, and assumes that all symbols appearing in
    +the term are already declared by the SMT solver.
  • +
  • smtlib(...) allows you to declare a new SMT symbol to be used when that
    +production is sent to Z3, and gives it uninterpreted function semantics.
  • +
  • smt-lemma can be applied to a rule to encode it as a conditional equality
    +when sending queries to Z3. A rule rule LHS => RHS requires REQ will be
    +encoded as the conditional equality (=> REQ (= (LHS RHS)). Every symbol
    +present in the rule must have an smt-hook(...) or smtlib(...) attribute.
  • +
+
syntax Int ::= "~Int" Int          [function, klabel(~Int_), symbol,
+                                    smtlib(notInt)]
+             | Int "^%Int" Int Int [function, klabel(_^%Int__), symbol,
+                                    smt-hook((mod (^ #1 #2) #3))]
+

In the example above, we declare two productions ~Int_ and _^%Int__, and
+tell the SMT solver to:

+
    +
  • use uninterpreted function semantics for ~Int_ via SMTLIB2 symbol
    +notInt, and
  • +
  • use the SMTLIB2 term (mod (^ #1 #2) #3) (where #N marks the Nth
    +production non-terminal argument positions) for _^%Int__, where mod and
    +^ already are declared by the SMT solver.
  • +
+

Caution

+ +

Set variables are currently only supported by the Haskell backend.
+The use of rules with set variables should be sound for all other backends
+which just execute by rewriting, however it might not be safe for backends
+which want to guarantee coverage.

+

Variables occurring only in the RHS of a rule

+ +

This section presents possible scenarios requiring variables to only appear in
+the RHS of a rule.

+

Summary

+ +

Except for ? variables and ! (fresh) variables, which are
+required to only appear in the RHS of a rule, all other variables must
+also appear in the LHS of a rule. This restriction also applies to anonymous
+variables; in particular, for claims, ?_ (not _) should be used in the RHS
+to indicate that something changes but we don't care to what value.

+

To support specifying random-like behavior, the above restriction can be relaxed
+by annotating a rule with the unboundVariables attribute whenever the rule
+intentionally contains regular variables only occurring in the RHS.

+

Introduction

+ +

K uses question mark variables of the form ?X to refer to
+existential variables, and uses ensures to specify logical constraints on
+those variables.
+These variables are only allowed to appear in the RHS of a K rule.

+

If the rules represent rewrite (semantic) steps or verification claims,
+then the ? variables are existentially quantified at the top of the RHS;
+otherwise, if they represent equations, the ? variables are quantified at the
+top of the entire rule.

+

Note that when both ?-variables and regular variables are present,
+regular variables are (implicitly) universally quantified on top of the rule
+(already containing the existential quantifications).
+This essentially makes all ? variables depend on all regular variables.

+

All examples below are intended more for program verification /
+symbolic execution, and thus concrete implementations might choose to ignore
+them altogether or to provide ad-hoc implementations for them.

+

Example: Verification claims

+ +

Consider the following definition of a (transition) system:

+
module A
+  rule foo => true
+  rule bar => true
+  rule bar => false
+endmodule
+

Consider also, the following specification of claims about the definition above:

+
module A-SPEC
+  rule [s1]: foo => ?X:Bool
+  rule [s2]: foo =>  X:Bool  [unboundVariables(X)]
+  rule [s3]: bar => ?X:Bool
+  rule [s4]: bar =>  X:Bool  [unboundVariables(X)]
+endmodule
+
One-path interpretation
+ +
    +
  • (s1) says that there exists a path from foo to some boolean, which is
    +satisfied easily using the foo => true rule
  • +
  • (s3) says the same thing about bar and can be satisfied by either of
    +bar => true and bar => false rules
  • +
  • (s2) and (s4) can be better understood by replacing them with instances for
    +each element of type Bool, which can be interpreted that
    +both true and false are reachable from foo for (s2), or bar for (s4),
    +respectively. +
      +
    • (s2) cannot be verified as we cannot find a path from foo to false.
    • +
    • (s4) can be verified by using bar => true to show true is reachable and
      +bar => false to achieve the same thing for false
    • +
    +
  • +
+
All-path interpretation
+ +
    +
  • +

    (s1) says that all paths from foo will reach some boolean, which is
    +satisfied by the foo => true rule and the lack of other rules for foo

    +
  • +
  • +

    (s3) says the same thing about bar and can be satisfied by checking that
    +both bar => true and bar => false end in a boolean, and there are no
    +other rules for bar

    +
  • +
  • +

    (s2) and (s4) can be better understood by replacing them with instances for
    +each element of type Bool, which can be interpreted that
    +both true and false are reachable in all paths originating in
    +foo for (s2), or bar for (s4), respectively.
    +This is a very strong claim, requiring that all paths originating in
    +foo (bar) pass through both true and false,
    +so neither (s2) nor (s4) can be verified.

    +

    Interestingly enough, adding a rule like false => true would make both
    +(s2) and (s4) hold.

    +
  • +
+

Example: Random Number Construct rand()

+ +

The random number construct rand() is a language construct which could be
+easily conceived to be part of the syntax of a programming language:

+
Exp ::= "rand" "(" ")"
+

The intended semantics of rand() is that it can rewrite to any integer in
+a single step. This could be expressed as the following following infinitely
+many rules.

+
rule  rand() => 0
+rule  rand() => 1
+rule  rand() => 2
+  ...    ...
+rule rand() => (-1)
+rule rand() => (-2)
+  ...    ...
+

Since we need an instance of the rule for every integer, one could summarize
+the above infinitely many rules with the rule

+
rule rand() => I:Int [unboundVariables(I)]
+

Note that I occurs only in the RHS in the rule above, and thus the rule
+needs the unboundVariables(I) attribute to signal that this is intentionally.

+

One can define variants of rand() by further constraining the output variable
+as a precondition to the rule.

+
Rand-like examples
+ +
    +
  1. +

    randBounded(M,N) can rewrite to any integer between M and N

    +
    syntax Exp ::= randBounded(Int, Int)
    +rule randBounded(M, N) => I
    +  requires M <=Int I andBool I <=Int N
    +  [unboundVariables(I)]
    +
  2. +
  3. +

    randInList(Is) takes a list Is of items
    +and can rewrite in one step to any item in Is.

    +
    syntax Exp ::= randInList (List)
    +rule randInList(Is) => I
    +  requires I inList Is
    +  [unboundVariables(I)]
    +
  4. +
  5. +

    randNotInList(Is) takes a list Is of items
    +and can rewrite in one step to any item not in Is.

    +
    syntax Exp ::= randNotInList (List)
    +rule randNotInList(Is) => I
    +  requires notBool(I inList Is)
    +  [unboundVariables(I)]
    +
  6. +
  7. +

    randPrime(), can rewrite to any prime number.

    +
    syntax Exp ::= randPrime ()
    +rule randPrime() => X:Int
    +  requires isPrime(X)
    +  [unboundVariables(X)]
    +

    where isPrime(_) is a predicate that can be defined in the usual way.

    +
  8. +
+

Note 1: all above are not function symbols, but language constructs.

+

Note 2: Currently the frontend does not allow rules with universally quantified
+variables in the RHS which are not bound in the LHS.

+

Note 3. Allowing these rules in a concrete execution engine would require an
+algorithm for generating concrete instances for such variables, satisfying the
+given constraints; thus the unboundVariables attribute serves two purposes:

+
    +
  • to allow such rules to pass the variable checks, and
  • +
  • to signal (concrete execution) backends that specialized algorithm would be
    +needed to instantiate these variables.
  • +
+

Example: Fresh Integer Construct fresh(Is)

+ +

The fresh integer construct fresh(Is) is a language construct.

+
Exp ::= ... | "fresh" "(" List{Int} ")"
+

The intended semantics of fresh(Is) is that it can always rewrite to an
+integer that in not in Is.

+

Note that fresh(Is) and randNotInList(Is) are different; the former
+does not need to be able to rewrite to every integers not in Is,
+while the latter requires so.

+

For example, it is correct to implement fresh(Is) so it always returns the
+smallest positive integer that is not in Is, but same implementation for
+randNotInList(Is) might be considered inadequate.
+In other words, there exist multiple correct implementations of fresh(Is),
+some of which may be deterministic, but there only exists a unique
+implementation of randNotInList(Is).
+Finally, note that randNotInList(Is) is a correct implementation
+for fresh(Is); Hence, concrete execution engines can choose to handle
+such rules accordingly.

+

We use the following K syntax to define fresh(Is)

+
syntax Exp ::= fresh (List{Int})
+rule fresh(Is:List{Int}) => ?I:Int
+  ensures notBool (?I inList{Int} Is)
+

A variant of this would be a choiceInList(Is) language construct which would
+choose some number from a list:

+
syntax Exp ::= choiceInList (List{Int})
+rule choiceInList(Is:List{Int}) => ?I:Int
+  ensures ?I inList{Int} Is
+

Note: This definition is different from one using a ! variable to indicate
+freshness because using ! is just syntactic sugar for generating globally
+unique instances and relies on a special configuration cell, and cannot be
+constrained, while the fresh described here is local and can be constrained.
+While the first is more appropriate for concrete execution, this might be
+better for symbolic execution / program verification.

+

Example: Arbitrary Number (Unspecific Function) arb()

+ +

The function arb() is not a PL construct, but a mathematical function.
+Therefore, its definition should not be interpreted as an execution step, but
+rather as an equality.

+

The intended semantics of arb() is that it is an unspecified nullary function.
+The exact return value of arb() is unspecified in the semantics but up to the
+implementations.
+However, being a mathematical function, arb() must return the same value in
+any one implementation.

+

We do not need special frontend syntax to define arb().
+We only need to define it in the usual way as a function
+(instead of a language construct), and provide no axioms for it.
+The total attribute ensures that the function is total, i.e.,
+that it evaluates to precisely one value for each input.

+
Variants
+ +

There are many variants of arb(). For example, arbInList(Is) is
+an unspecified function whose return value must be an element from Is.

+

Note that arbInList(Is) is different from choiceInList(Is), because
+choiceInList(Is) transitions to an integer in Is (could be a different one
+each time it is used), while arbInList(Is) is equal to a (fixed)
+integer not in Is.

+

W.r.t. the arb variants, we can use ? variables and the function
+annotation to signal that we're defining a function and the value of the
+function is fixed, but non-determinate.

+
syntax Int ::= arbInList(List{Int}) [function]
+rule arbInList(Is:List{Int}) => ?I:Int
+  ensures ?I inList{Int} Is
+

If elimination of existentials in equational rules is needed, one possible
+approach would be through Skolemization,
+i.e., replacing the ? variable with a new uninterpreted function depending
+on the regular variables present in the function.

+

Example: Interval (Non-function Symbols) interval()

+ +

The symbol interval(M,N) is not a PL construct, nor a function in the
+first-order sense, but a proper matching-logic symbol, whose interpretation is
+in the powerset of its domain.
+Its axioms will not use rewrites but equalities.

+

The intended semantics of interval(M,N) is that it equals the set of
+integers that are larger than or equal to M and smaller than or equal to N.

+

Since expressing the axiom for interval requires an an existential
+quantification on the right-hand-side, thus making it a non-total symbol
+defined through an equation, using ? variables might be confusing since their
+usage would be different from that presented in the previous sections.

+

Hence, the proposal to support this would be to write this as a proper ML rule.
+A possible syntax for this purpose would be:

+
eq  interval(M,N)
+    ==
+    #Exists X:Int .
+        (X:Int #And { X >=Int M #Equals true } #And { X <=Int N #Equals true })
+

Additionally, the symbol declaration would require a special attribute to
+signal the fact that it is not a constructor but a defined symbol.

+

Since this feature is not clearly needed by K users at the moment, it is only
+presented here as an example; its implementation will be postponed for such time
+when its usefulness becomes apparent.

+

Parser Generation

+

In addition to on-the-fly parser generation using kast, K is capable of
+ahead-of-time parser generation of LR(1) or GLR parsers using Flex and Bison.
+This can be done one of two different ways.

+
    +
  1. You can explicitly request for a particular parser to be generated by
    +invoking kast --gen-parser <outputFile> or
    +kast --gen-glr-parser <outputFile> respectively. kast will then create a
    +parser based on the same command line flags that govern on-the-fly parsing,
    +like -s to specify the starting sort, and -m to specify the module to
    +parse under. By default, this generates a parser for the sort of the $PGM
    +configuration variable in the main syntax module of the definition.
  2. +
  3. You can request that a specific set of parsers be generated for all the
    +configuration variables of your definition by passing the
    +--gen-bison-parser or --gen-glr-bison-parser flags to kompile.
    +kompile will decide the sorts to use as start symbols based on the sorts
    +in the configuration declaration for the configuration variables. The $PGM
    +configuration variable will be generated based on the main syntax module
    +of the definition. The user must explicitly annotate the configuration
    +declaration with the other modules to use to parse the other configuration
    +variables as attributes. For example, if I have the following cell in the
    +configuration declaration: <cell> foo($FOO:Foo, $BAR:Bar) </cell>,
    +One might annotate it with the attribute pair parser="FOO, TEST; BAR, TEST2"
    +to indicate that configuration variable $FOO should be parsed in the
    +TEST module, and configuration variable $BAR should be parsed in the
    +TEST2 module. If the user forgets to annotate the declaration with the
    +parser attribute, only the $PGM parser will be generated.
  4. +
+

Bison-generated parsers are extremely fast compared to kast, but they have
+some important limitations:

+
    +
  • Bison parsers will always output Kore. You can then pass the resulting AST
    +directly to llvm-krun or kore-exec and bypass the krun frontend, making
    +them very fast, but lower-level.
  • +
  • Bison parsers do not yet support macros. This may change in a future release.
    +Note that you can use anywhere rules instead of macros in most cases to get
    +around this limitation, although they will not benefit from unparsing via the
    +alias attribute.
  • +
  • Obligation falls on the user to ensure that the grammar they write is LR(1)
    +if they choose to use LR(1) parsing. If this does not happen, the parser
    +generated will have shift/reduce or reduce/reduce conflicts and the parser
    +may behave differently than kast would (kast is a GLL parser, ie, it
    +is based on LL parsers and parses all unambiguous context-free grammars).
    +K provides an attribute, not-lr1, which can be applied to modules known to
    +not be LR(1), and will trigger a warning if the user attempts to generate an
    +LR(1) parser which recursively imports that module.
  • +
  • If you are using LR(1) based parsing, the prefer and avoid attributes are
    +ignored. It is only possible to implement these attributes by means of
    +generalized LL or LR parsing and a postprocessing on the AST to remove the
    +undesirable ambiguity.
  • +
  • Obligation falls on the user to ensure that the grammar they write has as
    +few conflicts as possible if they are using GLR parsing. Bison's GLR support
    +is quite primitive, and in the worst case it can use exponential space and
    +time to parse a program, which generally leads the generated parser to report
    +"memory exhausted", indicating that the parse could not be completed within
    +the stack space allocated by Bison. It's best to ensure that the grammar is
    +as close to LR(1) as possible and only utilizes conflicts where absolutely
    +necessary. One tool that can be used to facilitate this is to pass
    +--bison-lists to kompile. This will disable support for the List{Sort}
    +syntax production, and it will make NeList{Sort} left associative, but the
    +resulting productions generated for NeList{Sort} will be LR(1) and use bounded
    +stack space.
  • +
  • If the grammar you are parsing is context-sensitive (for example, because
    +it requires a symbol table to parse), one thing you can do to make this
    +language parse in K is to implement the language as an ambiguous grammar.
    +Bison's GLR parser will generate an amb production that is parametric in
    +the sort of the ambiguity. You can then import the K-AMBIGUITIES module
    +and use rewriting to resolve the ambiguities using whatever preprocessing
    +mechanisms you prefer.
  • +
+

Location Information

+

K is able to insert file, line, and column metadata into the parse tree on a
+per-sort basis when parsing using a bison-generated parser. To enable this,
+mark the sort with the locations attribute.

+
  syntax Exp [locations]
+  syntax Exp ::= Exp "/" Exp | Int
+

K implicitly wraps productions of these sorts in a #location term (see the
+K-LOCATIONS module in kast.md). The metadata can thus be accessed with
+ordinary rewrite rules:

+
  rule #location(_ / 0, File, StartLine, _StartColumn, _EndLine, _EndColumn) =>
+  "Error: Division by zero at " +String File +String ":" Int2String(StartLine)
+

Sometimes it is desirable to allow code to be written in a file which
+overwrites the current location information provided by the parser. This can be
+done via a combination of the #LineMarker sort and the --bison-file flag to
+the parser generator. If you declare a production of sort #LineMarker which
+contains a regular expression terminal, this will be treated as a
+line marker by the bison parser. The user will then be expected to provide
+an implementation of the parser for the line marker in C. The function expected
+by the parser has the signature void line_marker(char *, yyscan_t), where
+yyscan_t is a
+reentrant flex scanner.
+The string value of the line marker token as specified by your regular
+expression can be found in the first parameter of the function, and you can
+set the line number used by the scanner using yyset_lineno(int, yyscan_t). If
+you declare the variable extern char *filename, you can also set the current
+file name by writing a malloc'd, zero-terminated string to that variable.

+

Unparsing

+

A number of factors go into how terms are unparsed in K. Here we describe some
+of the features the user can use to control how unparsing happens.

+

Brackets

+ +

One of the phases that the unparser goes through is to insert productions
+tagged with the bracket attribute where it believes this is necessary
+in order to create a correct string that will be parsed back into the original
+AST. The most common case of this is in expression grammars. For example,
+consider the following grammar:

+
syntax Exp ::= Int
+             | Exp "*" Exp
+             > Exp "+" Exp
+

Here we have declared that expressions can contain integer addition and
+multiplication, and that multiplication binds tighter than addition. As a
+result, when writing a program, if we want to write an expression that first
+applies addition, then multiplication, we must use brackets: (1 + 2) * 3.
+Similarly, if we have such an AST, we must insert brackets into the AST
+in order to faithfully unparse the term in a manner that will be parsed back
+into the same ast, because if we do not, we end up unparsing the term as
+1 + 2 * 3, which will be parsed back as 1 + (2 * 3) because of the priority
+declaration in the grammar.

+

You can control how the unparser will insert such brackets by adding a
+production with the bracket attribute and the correct sort. For example, if,
+instead of parentheses, you want to use curly braces, you could write:

+
syntax Exp ::= "{" Exp "}" [bracket]
+

This would signal to the unparser how brackets should look for terms of sort
+Exp, and it will use this syntax when unparsing terms of sort Exp.

+

Commutative collections

+ +

One thing that K will do (unless you pass the --no-sort-collections flag to
+krun) is to sort associative, commutative collections (such as Set and Map)
+alphanumerically. For example, if I have a collection whose keys are sort Id
+and they have the values a, b, c, and d, then unparsing will always print
+first the key a, then b, then c, then d, because this is the alphabetic order
+of these keys when unparsed.

+

Furthermore, K will sort numeric keys numerically. For example, if I have a
+collection whose keys are 1, 2, 5, 10, 30, it will first display 1, then 2,
+then 5, then 10, then 30, because it will sort these keys numerically. Note
+that this is different than an alphabetic sort, which would sort them as
+1, 10, 2, 30, 5. We believe the former is more intuitive to users.

+

Substitution filtering

+ +

K will remove substitution terms corresponding to anonymous variables when
+using the --pattern flag if those anonymous variables provide no information
+about the named variables in your serach pattern. You can disable this behavior
+by passing --no-substitution-filtering to krun. When this flag is not passed,
+and you are using the Haskell backend, any equality in a substitution (ie, an
+#Equals under an #And under an #Or), will be hidden from the user if the
+left hand side is a variable that was anonymous in the --pattern passed by
+the user, unless that variable appears elsewhere in the substitution. If you
+want to see that variable in the substitution, you can either disable this
+filtering, or give that variable a name in the original search pattern.

+

Variable alpha renaming

+ +

K will automatically rename variables that appear in the output configuration.
+Similar to commutative collections, this is done to normalize the resulting
+configuration so that equivalent configurations will be printed identically
+regardless of how they happen to be reached. This pass can be disabled by
+passing --no-alpha-renaming to krun.

+

Macro expansion

+ +

K will apply macros in reverse on the output configuration if the macro was
+created with the alias or alias-rec attribute. See the section on macro
+expansion for more details.

+

Formatting

+ +

format attribute

+ +

K allows you to control how terms are unparsed using the format attribute.
+By default, a domain value is unparsed by printing its string value verbatim,
+and an application pattern is unparsed by printing its terminals and children
+in the sequence implied by its concrete syntax, separated by spaces. However,
+K gives you complete control over how you want to unparse the symbol.

+

A format attribute is a string containing zero or more escape sequences that
+tell K how to unparse the symbol. Escape sequences begin with a '%' and are
+followed by either an integer, or a single non-digit character. Below is a
+list of escape sequences recognized by the formatter:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Escape SequenceMeaning
nInsert '\n' followed by the current indentation level
iIncrease the current indentation level by 1
dDecrease the current indentation level by 1
cMove to the next color in the list of colors for this production
rReset color to the default foreground color for the terminal (See below for more information on how colors work)
an integerPrint a terminal or nonterminal from the production (See below for more information)
any other charPrint that character verbatim
+

Using the integer escape sequence

+ +

In the integer escape sequence %a, the integer a is treated as a 1-based
+index into the terminals and nonterminals of the production.

+
    +
  • +

    If the offset refers to a terminal, move to the next color in the list of
    +colors for this production, print the value of that terminal, then reset the
    +color to the default foreground color for the terminal.

    +
  • +
  • +

    If the offset refers to a regular expression terminal, it is an error.

    +
  • +
  • +

    If the offset refers to a nonterminal, print the unparsed representation of
    +the corresponding child of the current term.

    +
  • +
+

color and colors attributes

+ +

K allows you to take advantage of ANSI terminal codes for foreground color
+in order to colorize output pretty-printed by the unparser. This is controlled
+via the color and colors attributes of productions. These attributes
+combine with the format attribute to control how a term is colorized.

+

The first thing to understand about how colorization works is that the color
+and colors attributes are used to construct a list of colors associated
+with each production, and the format attribute then uses that list to choose
+the color for each part of the production. For more information on how the
+format attribute chooses a color from the list, see above, but essentially,
+each terminal or %c in the format attribute advances the pointer in the list
+by one element, and terminals and %r reset the current color to the default
+foreground color of the terminal afterwards.

+

There are two ways you can construct a list of colors associated with a
+production:

+
    +
  • +

    The color attribute creates the entire list all with the same color, as
    +specified by the value of the attribute. When combined with the default format
    +attribute, this will color all the terminals in that production that color, but
    +more advanced techniques can be used as well.

    +
  • +
  • +

    The colors attribute creates the list from a manual, comma-separated list
    +of colors. The attribute is invalid if the length of the list is not equal to
    +the number of terminals in the production plus the number of %c substrings in
    +the format attribute.

    +
  • +
+

Attributes Reference

+

Attribute Syntax Overview

+ +

In K, many different syntactic categories accept an optional trailing list of
+keywords known as attributes. Attribute lists have two different syntaxes,
+depending on where they occur. Each attribute also has a type which describes
+where it may occur.

+

The first syntax is a square-bracketed ([]) list of words. This syntax is
+available for following attribute types:

+
    +
  1. module attributes - may appear immediately after the module keyword
  2. +
  3. sort attributes - may appear immediately after a sort declaration
  4. +
  5. production attributes - may appear immediately after a BNF production
    +alternative
  6. +
  7. rule attributes - may appear immediately after a rule
  8. +
  9. context attributes - may appear immediately after a context or context
    +alias
  10. +
  11. context alias attributes - may appear immediately after a context alias
  12. +
  13. claim attributes - may appear immediately after a claim
  14. +
+

The second syntax is the XML attribute syntax, i.e., a space delemited list of
+key-and-quoted-value pairs appearing inside the start tag of an XML element:
+<element key1="value" key2="value2" ... > </element>. This syntax is
+available for the following attribute types:

+
    +
  1. cell attributes - may appear inside of the cell start tag in
    +configuration declarations
  2. +
+

Unrecognized attributes are reported as an error. When we talk about
+the type of an attribute, we mean a syntactic category to which an attribute
+can be attached where the attribute has some semantic effect.

+

Attribute Index

+ +

We now provide an index of available attributes organized alphabetically with a
+brief description of each. Note that the same attribute may appear in the index
+multiple times to indicate its effect in different contexts or with/without
+arguments. A legend describing how to interpret the index follows.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeBackendReference
alias-recprodallMacros and Aliases
aliasprodallMacros and Aliases
all-pathclaimhaskellall-path and one-path attributes to distinguish reachability claims
anywhereruleallanywhere rules
applyPriority(_)prodallSymbol priority and associativity
avoidprodallSymbol priority and associativity
binderprodallNo reference yet.
bracketprodallParametric productions and bracket attributes
color(_)prodallcolor and colors attributes
colors(_)prodallcolor and colors attributes
concretemodllvmsymbolic and concrete attribute
concrete(_)rulehaskellconcrete and symbolic attributes (Haskell backend)
concreterulehaskellconcrete and symbolic attributes (Haskell backend)
context(_)aliasallContext aliases
deprecatedprodalldeprecated attribute
exit = ""cellallexit attribute
formatprodallformat attribute
freshGeneratorprodallfreshGenerator attribute
functionprodallfunction and total attributes
group(_)allallSymbol priority and associativity
hook(_)prodallNo reference yet
hybrid(_)prodallhybrid attribute
hybridprodallhybrid attribute
klabel(_)prodallklabel(_) and symbol attributes
leftprodallSymbol priority and associativity
locationssortallLocation Information
macro-recprodallMacros and Aliases
macroprodallMacros and Aliases
memorulehaskellThe memo attribute
multiplicity = "_"cellallCollection Cells: multiplicity and type attributes
non-assocprodallSymbol priority and associativity
one-pathclaimhaskellall-path and one-path attributes to distinguish reachability claims
overload(_)prodalloverload(_) attribute
owiseruleallowise and priority attributes
prec(_)tokenallprec attribute
preferprodallSymbol priority and associativity
priority(_)ruleallowise and priority attributes
privatemodallprivate attribute
privateprodallpublic and private attribute
publicmodallNo reference yet.
publicprodallpublic and private attribute
result(_)ctxtallresult attribute
result(_)ruleallresult attribute
rightprodallSymbol priority and associativity
seqstrict(_)prodallstrict and seqstrict attributes
seqstrictprodallstrict and seqstrict attributes
simplificationrulehaskellsimplification attribute (Haskell backend)
simplification(_)rulehaskellsimplification attribute (Haskell backend)
smt-hook(_)prodhaskellSMT Translation
smtlib(_)prodhaskellSMT Translation
smt-lemmarulehaskellSMT Translation
strictprodallstrict and seqstrict attributes
strict(_)prodallstrict and seqstrict attributes
symbolicmodhaskellsymbolic and concrete attribute
symbolicrulehaskellconcrete and symbolic attributes (Haskell backend)
symbolic(_)rulehaskellconcrete and symbolic attributes (Haskell backend)
symbolprodallklabel(_) and symbol attributes
terminator-symbol(_)prodallklabel(_) and symbol attributes
tokenprodalltoken attribute
tokensortalltoken attribute
totalprodallfunction and total attributes
trustedclaimhaskelltrusted attribute
type = "_"cellallCollection Cells: multiplicity and type attributes
unboundVariables(_)ruleallThe unboundVariables attribute
unusedprodallunused attribute
concretemodallSpecify that this module should only be included in concrete backends (LLVM backend).
symbolicmodallSpecify that this module should only be included in symbolic backends (Haskell backend).
stream = "_"cellallSpecify that this cell should be hooked up to a stream, either stdin, stdout, or stderr.
+

Internal Attribute Index

+ +

Some attributes should not generally appear in user code, except in some
+unusual or complex examples. Such attributes are typically generated by the
+compiler and used internally. We list these attributes below as a reference for
+interested readers:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeBackendReference
assocprodallassoc, comm, idem and unit attributes
commprodallassoc, comm, idem and unit attributes
digestmodallContains the hash of the textual contents of the module.
idemprodallassoc, comm, idem and unit attributes
unitprodallassoc, comm, idem and unit attributes
userListprodallIdentifies the desugared form of Lst ::= List{Elm,"delim"}
predicateprodallSpecifies the sort of a predicate label
elementprodallSpecifies the label of the elements in a list
bracketLabelprodallKeep track of the label of a bracket production since it can't have a klabel
injectiveprodallLabel a given production as injective (unique output for each input)
internalprodallProduction is reserved for internal use by the compiler
coolruleallstrict and seqstrict attributes
heatruleallstrict and seqstrict attributes
+

Index Legend

+ +
    +
  • +

    Name - the attribute's name (optionally followed by an underscore _ to indicate the attribute takes arguments)

    +
  • +
  • +

    Type - the syntactic categories where this attribute is not ignored;
    +the possible values are the types mentioned above or shorthands:

    +
      +
    1. all - short for any type except cell
    2. +
    3. mod - short for module
    4. +
    5. sort
    6. +
    7. prod - short for production
    8. +
    9. rule
    10. +
    11. ctxt - short for context or context alias
    12. +
    13. claim
    14. +
    15. cell
    16. +
    +
  • +
  • +

    Backend - the backends that do not ignore this attribute; possible values:

    +
      +
    1. all - all backends
    2. +
    3. llvm - the LLVM backend
    4. +
    5. haskell - the Haskell backend
    6. +
    +
  • +
  • +

    Effect - the attribute's effect (when it applies)

    +
  • +
+

Pending Documentation

+

Backend features not yet given documentation:

+
    +
  • Parser of KORE terms and definitions
  • +
  • Term representation of K terms
  • +
  • Hooked sorts and symbols
  • +
  • Substituting a substitution into the RHS of a rule +
      +
    • domain values
    • +
    • functions
    • +
    • variables
    • +
    • symbols
    • +
    • polymorphism
    • +
    • hooks
    • +
    • injection compaction
    • +
    • overload compaction
    • +
    +
  • +
  • Pattern Matching / Unification of subject and LHS of rule +
      +
    • domain values
    • +
    • symbols
    • +
    • side conditions
    • +
    • and/or patterns
    • +
    • list patterns
    • +
    • nonlinear variables
    • +
    • map/set patterns +
        +
      • deterministic
      • +
      • nondeterministic
      • +
      +
    • +
    • modulo injections
    • +
    • modulo overloads
    • +
    +
  • +
  • Stepping +
      +
    • initialization
    • +
    • termination
    • +
    +
  • +
  • Print kore terms
  • +
  • Equality/comparison of terms
  • +
  • Owise rules
  • +
  • Strategy #STUCK axiom
  • +
  • User substitution +
      +
    • binders
    • +
    • kvar
    • +
    +
  • +
+

To get a complete list of hooks supported by K, you can run:

+
grep -P -R "(?<=[^-])hook\([^)]*\)" k-distribution/include/kframework/builtin/ \
+     --include "*.k" -ho | \
+sed 's/hook(//' | sed 's/)//' | sort | uniq | grep -v org.kframework
+

All of these hooks will also eventually need documentation.

+
+
+
    +
  1. Except for in a very limited number of special cases from the
    +K standard library. ↩︎

    +
  2. +
  3. The Maude documentation
    +has an example in a context that's somewhat similar to K; discussion of
    +ad-hoc overloading is not relevant. ↩︎

    +
  4. +
+
+

K Cheat Sheet

+

This is a quick reference of the most commonly used K tools.

+
kompile (--gen-bison-parser)? {file}                : generate parser, optionally with ahead of time
+krun {file}                                         : interpret file
+krun -cPGM='{string}'                               : interpret string
+kast --output (kore | kast) (-e|{file})             : parse expression or file
+kompile (--enable-search --backend haskell)? {file} : generate parser, enabling non-deterministic run
+krun (--search-all)? {file}                         : interpret file, evaluating non-deterministic runs as well
+foo-kompiled/parser_PGM {file}                      : ahead of time parse
+kompile (--main-module)? (--syntax-module)? {file}  : generate parser for {file}.k {file}-syntax.k, explicitly state main modules
+kparse <file> | kore-print -                        : parse and unparse a file
+kompile {file} --enable-llvm-debug                  : generate debuggable output for {file}.k
+krun {file} --debugger                              : debug K code
+kprove {file}                                       : Verify specs in {file}
+

During GDB debugging session (see here for
+LLDB breakpoint syntax):

+
break {file}:{linenum}                              : add a breakpoint to {file}'s {linenum} numbered line
+k match {module}.{label} subject                    : investigate matching
+

K Tools

+

Here we document how to use some of the most commonly used K tools.

+

Minimizing Output

+

When one is working with kore-repl or the prover in general and looking at
+specific configurations using config, sometimes the configurations can be huge.

+

One tool to help print configuration compactly is the pyk print utility:

+
pyk print
+

We are going to use --minimize option (which is actually used automatically
+when printing with pyk). This will filter out many uninteresting cells for the
+current config and make the result more compact.

+

Then, when invoking the prover, you can minimize your output by piping it into
+the pyk print ... facility with arguments for controlling the output:

+
kprove --output json --definition DEFN ... \
+    | jq .term                             \
+    | pyk print DEFN /dev/stdin --omit-labels ... --keep-labels ...
+

You can also use this in the kore-repl more easily, by making a help script.
+In your current directory, save a new script pykprint.sh:

+
#!/bin/bash
+
+kast --input kore --output json --definition $1 /dev/stdin \
+    | jq .term                                             \
+    | pyk print $1 /dev/stdin --omit-labels $2
+

Now call config | bash pykprint.sh DEFN in Kore REPL to make the output
+smaller.

+

The options you have to control the output are as follows:

+
    +
  • --no-minimize: do not remove uninteresting cells.
  • +
  • --omit-cells: remove the selected cells from the output.
  • +
  • --keep-cells: keep only the selected cells in the output.
  • +
+

Note: Make sure that there is no whitespace around , in the omit list,
+otherwise you'll get an error (, is a list separator, so this
+requirement is strict).

+

Debugging

+

The LLVM Backend has support for integration with GDB. You can run the debugger
+on a particular program by passing the --debugger flag to krun, or by
+invoking the llvm backend interpreter directly. Below we provide a simple
+tutorial to explain some of the basic commands supported by the LLVM backend.

+

LLDB Support

+ +

GDB is not well-supported on macOS, particularly on newer OS versions and Apple
+Silicon ARM hardware. Consequently, if the --debugger option is passed to krun
+on macOS, LLDB[^1] is launched instead of GDB. However, the K-specific debugger
+scripts that GDB uses have not been ported to LLDB yet, and so the instructions
+in the rest of this section will not work.

+

The K Definition

+ +

Here is a sample K definition we will use to demonstrate debugging
+capabilities:

+
module TEST
+  imports INT
+
+  configuration <k> foo(5) </k>
+  rule [test]: I:Int => I +Int 1 requires I <Int 10
+
+  syntax Int ::= foo(Int) [function]
+  rule foo(I) => 0 -Int I
+
+endmodule
+

You should compile this definition with --backend llvm --enable-llvm-debug to
+use the debugger most effectively.

+

Stepping

+ +

Important: When you first run krun with option --debugger, GDB / LLDB
+will instruct you on how to modify ~/.gdbinit or ~/.lldbinit to enable
+printing abstract syntax of K terms in the debugger. If you do not perform this
+step, you can still use all the other features, but K terms will be printed as
+their raw address in memory.

+

GDB will need the kompiled interpreter in its safe path in order to access the
+pretty printing python script within it. A good way to do this would be to pick
+a minimum top-level path that covers all of your kompiled semantics (ie. set auto-load safe-path ~/k-semantics). LLDB has slightly different security
+policies that do not require fully-arbitrary code execution.

+

This section uses GDB syntax to demonstrate the debugging features. Please
+refer to the GDB to LLDB command map on
+macOS.

+

You can break before every step of execution is taken by setting a breakpoint
+on the k_step function.

+
(gdb) break definition.kore:k_step
+Breakpoint 1 at 0x25e340
+(gdb) run
+Breakpoint 1, 0x000000000025e340 in step (subject=`<generatedTop>{}`(`<k>{}`(`kseq{}`(`inj{Int{}, KItem{}}`(#token("0", "Int")),dotk{}(.KList))),`<generatedCounter>{}`(#token("0", "Int"))))
+(gdb) continue
+Continuing.
+
+Breakpoint 1, 0x000000000025e340 in step (subject=`<generatedTop>{}`(`<k>{}`(`kseq{}`(`inj{Int{}, KItem{}}`(#token("1", "Int")),dotk{}(.KList))),`<generatedCounter>{}`(#token("0", "Int"))))
+(gdb) continue 2
+Will ignore next crossing of breakpoint 1.  Continuing.
+
+Breakpoint 1, 0x000000000025e340 in step (subject=`<generatedTop>{}`(`<k>{}`(`kseq{}`(`inj{Int{}, KItem{}}`(#token("3", "Int")),dotk{}(.KList))),`<generatedCounter>{}`(#token("0", "Int"))))
+(gdb)
+

Breaking on a specific rule

+ +

You can break when a rule is applied by giving the rule a rule label. If the
+module name is TEST and the rule label is test, you can break when the rule
+applies by setting a breakpoint on the TEST.test.rhs function:

+
(gdb) break TEST.test.rhs
+Breakpoint 1 at 0x25e250: file /home/dwightguth/test/./test.k, line 4.
+(gdb) run
+Breakpoint 1, TEST.test.rhs (VarDotVar0=`<generatedCounter>{}`(#token("0", "Int")), VarDotVar1=dotk{}(.KList), VarI=#token("0", "Int")) at /home/dwightguth/test/./test.k:4
+4         rule [test]: I:Int => I +Int 1 requires I <Int 10
+(gdb)
+

Note that the substitution associated with that rule is visible in the
+description of the frame.

+

You can also break when a side condition is applied using the TEST.test.sc
+function:

+
(gdb) break TEST.test.sc
+Breakpoint 1 at 0x25e230: file /home/dwightguth/test/./test.k, line 4.
+(gdb) run
+Breakpoint 1, TEST.test.sc (VarI=#token("0", "Int")) at /home/dwightguth/test/./test.k:4
+4         rule [test]: I:Int => I +Int 1 requires I <Int 10
+(gdb)
+

Note that every variable used in the side condition can have its value
+inspected when stopped at this breakpoint, but other variables are not visible.

+

You can also break on a rule by its location:

+
(gdb) break test.k:4
+Breakpoint 1 at 0x25e230: test.k:4. (2 locations)
+(gdb) run
+Breakpoint 1, TEST.test.sc (VarI=#token("0", "Int")) at /home/dwightguth/test/./test.k:4
+4         rule [test]: I:Int => I +Int 1 requires I <Int 10
+(gdb) continue
+Continuing.
+
+Breakpoint 1, TEST.test.rhs (VarDotVar0=`<generatedCounter>{}`(#token("0", "Int")), VarDotVar1=dotk{}(.KList), VarI=#token("0", "Int")) at /home/dwightguth/test/./test.k:4
+4         rule [test]: I:Int => I +Int 1 requires I <Int 10
+(gdb) continue
+Continuing.
+
+Breakpoint 1, TEST.test.sc (VarI=#token("1", "Int")) at /home/dwightguth/test/./test.k:4
+4         rule [test]: I:Int => I +Int 1 requires I <Int 10
+(gdb)
+

Note that this sets a breakpoint at two locations: one on the side condition
+and one on the right hand side. If the rule had no side condition, the first
+would not be set. You can also view the locations of the breakpoints and
+disable them individually:

+
(gdb) info breakpoint
+Num     Type           Disp Enb Address            What
+1       breakpoint     keep y   <MULTIPLE>
+        breakpoint already hit 3 times
+1.1                         y     0x000000000025e230 in TEST.test.sc at /home/dwightguth/test/./test.k:4
+1.2                         y     0x000000000025e250 in TEST.test.rhs at /home/dwightguth/test/./test.k:4
+(gdb) disable 1.1
+(gdb) continue
+Continuing.
+
+Breakpoint 1, TEST.test.rhs (VarDotVar0=`<generatedCounter>{}`(#token("0", "Int")), VarDotVar1=dotk{}(.KList), VarI=#token("1", "Int")) at /home/dwightguth/test/./test.k:4
+4         rule [test]: I:Int => I +Int 1 requires I <Int 10
+(gdb) continue
+Continuing.
+
+Breakpoint 1, TEST.test.rhs (VarDotVar0=`<generatedCounter>{}`(#token("0", "Int")), VarDotVar1=dotk{}(.KList), VarI=#token("2", "Int")) at /home/dwightguth/test/./test.k:4
+4         rule [test]: I:Int => I +Int 1 requires I <Int 10
+(gdb)
+

Now only the breakpoint when the rule applies is enabled.

+

Breaking on a function

+ +

You can also break when a particular function in your semantics is invoked:

+
(gdb) info functions foo
+All functions matching regular expression "foo":
+
+File /home/dwightguth/test/./test.k:
+struct __mpz_struct *Lblfoo'LParUndsRParUnds'TEST'UndsUnds'Int(struct __mpz_struct *);
+(gdb) break Lblfoo'LParUndsRParUnds'TEST'UndsUnds'Int
+Breakpoint 1 at 0x25e640: file /home/dwightguth/test/./test.k, line 6.
+(gdb) run
+Breakpoint 1, Lblfoo'LParUndsRParUnds'TEST'UndsUnds'Int (_1=#token("1", "Int")) at /home/dwightguth/test/./test.k:6
+6         syntax Int ::= foo(Int) [function]
+(gdb)
+

In this case, the variables have numbers instead of names because the names of
+arguments in functions in K come from rules, and we are stopped before any
+specific rule has applied. For example, _1 is the first argument to the
+function.

+

You can also set a breakpoint in this location by setting it on the line
+associated with its production:

+
(gdb) break test.k:6
+Breakpoint 1 at 0x25e640: file /home/dwightguth/test/./test.k, line 6.
+(gdb) run
+Breakpoint 1, Lblfoo'LParUndsRParUnds'TEST'UndsUnds'Int (_1=#token("1", "Int")) at /home/dwightguth/test/./test.k:6
+6         syntax Int ::= foo(Int) [function]
+

These two syntaxes are equivalent; use whichever is easier for you.

+

You can also view the stack of function applications:

+
(gdb) bt
+#0  Lblfoo'LParUndsRParUnds'TEST'UndsUnds'Int (_1=#token("1", "Int")) at /home/dwightguth/test/./test.k:6
+#1  0x000000000025e5f8 in apply_rule_111 (VarDotVar0=`<generatedCounter>{}`(#token("0", "Int")), VarDotVar1=dotk{}(.KList)) at /home/dwightguth/test/./test.k:9
+#2  0x0000000000268a52 in take_steps ()
+#3  0x000000000026b7b4 in main ()
+(gdb)
+

Here we see that foo was invoked while applying the rule on line 9 of test.k,
+and we also can see the substitution of that rule. If foo was evaluated while
+evaluating another function, we would also be able to see the arguments of that
+function as well, unless the function was tail recursive, in which case no
+stack frame would exist once the tail call was performed.

+

Breaking on a set of rules or functions

+ +

Using rbreak <regex> you can set breakpoints on multiple functions.

+
    +
  • +

    rbreak Lbl - sets a breakpoint on all non hooked functions

    +
  • +
  • +

    rbreak Lbl.*TEST - sets a breakpoint on all functions from module TEST

    +
  • +
  • +

    rbreak hook_INT - sets a breakpoint on all hooks from module INT

    +
  • +
+

Other debugger issues

+ +
    +
  • <optimized out> try kompiling without -O1, -O2, or -O3.
  • +
  • (gdb) break definition.kore:break -> No source file named definition.kore.
    +send --enable-llvm-debug to kompile in order to generate debug info symbols.
  • +
+

Profiling your K semantics

+

The first thing to be aware of is in order to get meaningful data,
+you need to build the semantics and all of its dependencies with
+optimizations enabled but without the frame pointer elimination
+optimization
. For example, for EVM, this means rebuilding GMP, MPFR,
+JEMalloc, Crypto++, SECP256K1, etc with the following exports.

+
export CFLAGS="-DNDEBUG -O2 -fno-omit-frame-pointer"
+export CXXFLAGS="-DNDEBUG -O2 -fno-omit-frame-pointer"
+

You can skip this step, but if you do, any samples within these
+libraries will not have correct stack trace information, which means
+you will likely not get a meaningful set of data that will tell you
+where the majority of time is really being spent. Don't worry about
+rebuilding literally every single dependency though. Just focus on the
+ones that you expect to take a non-negligible amount of runtime. You
+will be able to tell if you haven't done enough later, and you can go
+back and rebuild more. Once this is done, you then build K with
+optimizations and debug info enabled, like so:

+
mvn package -Dproject.build.type="FastBuild"
+

Next, you build the semantics with optimizations and debug info
+enabled (i.e., kompile -ccopt -O2 --iterated -ccopt -fno-omit-frame-pointer).

+

Once all this is done, you should be ready to profile your
+application. Essentially, you should run whatever test suite you
+usually run, but with perf record -g -- prefixed to the front. For
+example, for KEVM it's the following command. (For best data, don't
+run this step in parallel.)

+
perf record -g -- make test-conformance
+

Finally, you want to filter out just the samples that landed within
+the llvm backend and view the report. For this, you need to know the
+name of the binary that was generated by your build system. Normally
+it is interpreter, but e.g. if you are building the web3 client for
+kevm, it would be kevm-client. You will want to run the following
+command.

+
perf report -g -c $binary_name
+

If all goes well, you should see a breakdown of where CPU time has
+been spent executing the application. You will know that sufficient
+time was spent rebuilding dependencies with the correct flags when the
+total time reported by the main method is close to 100%. If it's not
+close to 100%, this is probably because a decent amount of self time
+was reported in stack traces that were not built with frame pointers
+enabled, meaning that perf was unable to walk the stack. You will have
+to go back, rebuild the appropriate libraries, and then record your
+trace again.

+

Your ultimate goal is to identify the hotspots that take the most
+time, and make them execute faster. Entries like step and
+step_1234 like functions refer to the cost of matching. An entry
+like side_condition_1234 is a side condition and apply_rule_1234
+is constructing the rhs of a rule. You can convert from this rule
+ordinal to a location using the llvm-kompile-compute-loc script in
+the bin folder of the llvm backend repo. For example,

+
llvm-kompile-compute-loc 5868 evm-semantics/.build/defn/llvm/driver-kompiled
+

spits out the following text.

+
Line: 18529
+/home/dwightguth/evm-semantics/./.build/defn/llvm/driver.k:493:10
+

This is the line of definition.kore that the axiom appears on as
+well as the original location of the rule in the K semantics. You can
+use this information to figure out which rules and functions are
+causing the most time and optimize them to be more efficient.

+

Running tests - kserver

+

The kserver is a front-end tool based on Nailgun
+which helps to reduce the startup time of the JVM. Calling kserver in a terminal
+window will wait for all kompile/kprove calls and force them to run in the same process
+and share the same threads. This also reduces the thread contention significantly. kompile
+uses all the threads available to do rule parsing. Another benefit is that it saves caches,
+and each time you call kprove/kast, you can access those directly w/o extra disk usage.
+Running the regression-new integration tests on a powerful machine (32 threads) takes 8m,
+with the kserver active it takes 2m. You can start the kserver in two ways.

+
    +
  • blocking: call kserver in the command line. Close it after you are done testing. Useful for quick testing.
  • +
  • non-blocking: call spawn-kserver <log.flie> and close it with stop-kserver - this is used for automation on CI
  • +
+

Because we reuse caches, you should stop and restart the server between runs.
+The Nailgun implementation hasn't been updated in the last 3-5 years, and it's not compatible with Java 18 onwards.

+

K Builtins

+

The K Builtins (also referred to as the K Prelude or the K Standard Library)
+consists of several files which contain definitions that make working with K
+simpler. These files can be found under include/kframework/builtin in your K
+installation directory, and can be imported with requires "FILENAME" (without
+the path prefix).

+
    +
  • domains: Basic datatypes which are universally useful.
  • +
  • kast: Representation of K internal data-structures (not to be
    +included in normal definitions).
  • +
  • prelude: Automatically included into every K definition.
  • +
  • ffi: FFI interface for calling out to native C code from K.
  • +
  • json: JSON datatype and parsers/unparsers for JSON strings.
  • +
  • rat: Rational number representation.
  • +
  • substitution: Hooked implementation of capture-aware
    +sustitution for K definitions.
  • +
  • unification: Hooked implementation of unification
    +exposed directly to K definitions.
  • +
+

Basic Builtin Types in K

+

A major piece of the K prelude consists of a series of modules that contain
+implementations of basic data types and language features in K. You do not need
+to require this file yourself; it is required automatically in every K
+definition unless --no-prelude is passed to kompile. K may not work correctly
+if some of these modules do not exist or do not declare certain functions.

+

Note that some functions in the K prelude functions are not total, that is,
+they are not defined on all possible input values. When you invoke such a
+function on an undefined input, the behavior is undefined. In particular, when
+this happens, interpreters generated by the K LLVM backend may crash.

+
requires "kast.md"
+

Default Modules

+

K declares certain modules that contain most of the builtins you usually want
+when defining a language in K. In particular, this includes integers, booleans,
+strings, identifiers, I/O, lists, maps, and sets. The DOMAINS-SYNTAX module
+is designed to be imported by the syntax module of the language and contains
+only the program-level syntax of identifiers, integers, booleans, and strings.
+The DOMAINS module contains the rest of the syntax, including builtin
+functions over those and the remaining types.

+

Note that not all modules are included in DOMAINS. A few less-common modules
+are not, including ARRAY, COLLECTIONS, FLOAT, STRING-BUFFER, BYTES,
+K-REFLECTION, MINT.

+
module DOMAINS-SYNTAX
+  imports SORT-K
+  imports ID-SYNTAX
+  imports UNSIGNED-INT-SYNTAX
+  imports BOOL-SYNTAX
+  imports STRING-SYNTAX
+endmodule
+
+module DOMAINS
+  imports DOMAINS-SYNTAX
+  imports INT
+  imports BOOL
+  imports STRING
+  imports BASIC-K
+  imports LIST
+  imports K-IO
+  imports MAP
+  imports SET
+  imports ID
+  imports K-EQUAL
+endmodule
+

Arrays

+

Provided here is an implementation for fixed-sized, contiguous maps from Int
+to KItem. In some previous versions of K, the Array type was a builtin type
+backed by mutable arrays of objects. However, in modern K, the Array type is
+implemented by means of the List type; users should not access this interface
+directly and should instead make only of the functions listed below. Users of
+this module should import only the ARRAY module.

+
module ARRAY-SYNTAX
+  imports private LIST
+
+  syntax Array
+

Array lookup

+ +

You can look up an element in an Array by its index in O(log(N)) time. Note
+that the base of the logarithm is a relatively high number and thus the time is
+effectively constant.

+
  syntax KItem ::= Array "[" Int "]" [function]
+

Array update

+ +

You can create a new Array with a new value for a key in O(log(N)) time, or
+effectively constant.

+
  syntax Array ::= Array "[" key: Int "<-" value: KItem "]" [function, symbol(_[_<-_])]
+

Array reset

+ +

You can create a new Array where a particular key is reset to its default
+value in O(log(N)) time, or effectively constant.

+
  syntax Array ::= Array "[" Int "<-" "undef" "]" [function]
+

Multiple array update

+ +

You can create a new Array from a List L of size N where the N
+elements starting at index are replaced with the contents of L, in
+O(N*log(K)) time (where K is the size of the array), or effectively linear.
+Having index + N > K yields an exception.

+
  syntax Array ::= updateArray(Array, index: Int, List) [function]
+

Array fill

+ +

You can create a new Array where the length elements starting at index
+are replaced with value, in O(length*log(N)) time, or effectively linear.

+
  syntax Array ::= fillArray(Array, index: Int, length: Int, value: KItem) [function]
+

Array range check

+ +

You can test whether an integer is within the bounds of an array in O(1) time.

+
  syntax Bool ::= Int "in_keys" "(" Array ")" [function, total]
+
endmodule
+
+module ARRAY-IN-K [private]
+  imports public ARRAY-SYNTAX
+  imports private LIST
+  imports private K-EQUAL
+  imports private INT
+  imports private BOOL
+

Array creation

+ +

You can create an array with length elements where each element is
+initialized to value in O(1) time. Note that the array is stored in a manner
+where only the highest element that is actually modified is given a value
+in its internal representation, which means that subsequent array operations
+may incur a one-time O(N) resizing cost, possibly amortized across multiple
+operations.

+
  syntax Array ::= makeArray(length: Int, value: KItem) [function, public]
+

Implementation of Arrays

+ +

The remainder of this section consists of an implementation in K of the
+operations listed above. Users of the ARRAY module should not make use
+of any of the syntax defined in any of these modules.

+
  syntax Array ::= arr(List, Int, KItem)
+
+  rule makeArray(I::Int, D::KItem) => arr(.List, I, D)
+
+  rule arr(L::List, _, _       ) [ IDX::Int ] => L[IDX] requires 0 <=Int IDX andBool IDX  <Int size(L)
+  rule arr(_      , _, D::KItem) [ _        ] => D      [owise]
+
+  syntax List ::= ensureOffsetList(List, Int, KItem) [function]
+  rule ensureOffsetList(L::List, IDX::Int, D::KItem) => L makeList(IDX +Int 1 -Int size(L), D) requires         IDX >=Int size(L)
+  rule ensureOffsetList(L::List, IDX::Int, _::KItem) => L                                      requires notBool IDX >=Int size(L)
+
+  rule arr(L::List, I::Int, D::KItem) [ IDX::Int <- VAL::KItem ] => arr(ensureOffsetList(L, IDX, D) [ IDX <- VAL ], I, D)
+
+  rule arr(L::List, I::Int, D::KItem) [ IDX::Int <- undef ] => arr(L, I, D) [ IDX <- D ]
+
+  rule updateArray(arr(L::List, I::Int, D::KItem), IDX::Int, L2::List) => arr(updateList(ensureOffsetList(L, IDX +Int size(L2) -Int 1, D), IDX, L2), I, D)
+
+  rule fillArray(arr(L::List, I::Int, D::KItem), IDX::Int, LEN::Int, VAL::KItem) => arr(fillList(ensureOffsetList(L, IDX +Int LEN -Int 1, D), IDX, LEN, VAL), I, D)
+
+  rule IDX::Int in_keys(arr(_, I::Int, _)) => IDX >=Int 0 andBool IDX <Int I
+endmodule
+
+module ARRAY-SYMBOLIC [symbolic]
+  imports ARRAY-IN-K
+endmodule
+
+module ARRAY-KORE
+  imports ARRAY-IN-K
+endmodule
+
+module ARRAY
+  imports ARRAY-SYMBOLIC
+  imports ARRAY-KORE
+endmodule
+

Maps

+

Provided here is the syntax of an implementation of immutable, associative,
+commutative maps from KItem to KItem. This type is hooked to an
+implementation of maps provided by the backend. For more information on
+matching on maps and allowable patterns for doing so, refer to K's
+user documentation.

+
module MAP
+  imports private BOOL-SYNTAX
+  imports private INT-SYNTAX
+  imports private LIST
+  imports private SET
+
+  syntax Map [hook(MAP.Map)]
+

Map concatenation

+ +

The Map sort represents a generalized associative array. Each key can be
+paired with an arbitrary value, and can be used to reference its associated
+value. Multiple bindings for the same key are not allowed.

+

You can construct a new Map consisting of key/value pairs of two Maps. The
+result is #False if the maps have keys in common (in particular, this will
+yield an exception during concrete execution). This operation is O(Nlog(M))
+where N is the size of the smaller map, when it appears on the right hand side.
+When it appears on the left hand side and all variables are bound, it is
+O(N
log(M)) where M is the size of the map it is matching and N is the number
+of elements being matched. When it appears on the left hand side containing
+variables not bound elsewhere in the term, it is O(N^K) where N is the size of
+the map it is matching and K is the number of unbound keys being matched. In
+other words, one unbound variable is linear, two is quadratic, three is cubic,
+etc.

+
  syntax Map ::= Map Map                        [left, function, hook(MAP.concat), symbol(_Map_), assoc, comm, unit(.Map), element(_|->_), index(0), format(%1%n%2)]
+

Map unit

+ +

The map with zero elements is represented by .Map.

+
  syntax Map ::= ".Map"                         [function, total, hook(MAP.unit), symbol(.Map)]
+

Map elements

+ +

An element of a Map is constructed via the |-> operator. The key is on the
+left and the value is on the right.

+
  syntax Map ::= KItem "|->" KItem                      [function, total, hook(MAP.element), symbol(_|->_), injective]
+
+  syntax priority _|->_ > _Map_ .Map
+  syntax non-assoc _|->_
+

Map lookup

+ +

You can look up the value associated with the key of a map in O(log(N)) time.
+Note that the base of the logarithm is a relatively high number and thus the
+time is effectively constant. The value is #False if the key is not in the
+map (in particular, this will yield an exception during concrete execution).

+
  syntax KItem ::= Map "[" KItem "]"                    [function, hook(MAP.lookup), symbol(Map:lookup)]
+

Map lookup with default

+ +

You can also look up the value associated with the key of a map using a
+total function that assigns a specific default value if the key is not present
+in the map. This operation is also O(log(N)), or effectively constant.

+
  syntax KItem ::= Map "[" KItem "]" "orDefault" KItem      [function, total, hook(MAP.lookupOrDefault), symbol(Map:lookupOrDefault)]
+

Map update

+ +

You can insert a key/value pair into a map in O(log(N)) time, or effectively
+constant.

+
  syntax Map ::= Map "[" key: KItem "<-" value: KItem "]"           [function, total, symbol(Map:update), hook(MAP.update), prefer]
+

Map delete

+ +

You can remove a key/value pair from a map via its key in O(log(N)) time, or
+effectively constant.

+
  syntax Map ::= Map "[" KItem "<-" "undef" "]"     [function, total, hook(MAP.remove), symbol(_[_<-undef])]
+

Map difference

+ +

You can remove the key/value pairs in a map that are present in another map in
+O(N*log(M)) time (where M is the size of the first map and N is the size of the
+second), or effectively linear. Note that only keys whose value is the same
+in both maps are removed. To remove all the keys in one map from another map,
+you can say removeAll(M1, keys(M2)).

+
  syntax Map ::= Map "-Map" Map                 [function, total, hook(MAP.difference)]
+

Multiple map update

+ +

You can update a map by adding all the key/value pairs in the second map in
+O(N*log(M)) time (where M is the size of the first map and N is the size of the
+second map), or effectively linear. If any keys are present in both maps, the
+value from the second map overwrites the value in the first. This function is
+total, which is distinct from map concatenation, a partial function only
+defined on maps with disjoint keys.

+
  syntax Map ::= updateMap(Map, Map)            [function, total, hook(MAP.updateAll)]
+

Multiple map removal

+ +

You can remove a Set of keys from a map in O(N*log(M)) time (where M is the
+size of the Map and N is the size of the Set), or effectively linear.

+
  syntax Map ::= removeAll(Map, Set)            [function, total, hook(MAP.removeAll)]
+

Map keys (as Set)

+ +

You can get a Set of all the keys in a Map in O(N) time.

+
  syntax Set ::= keys(Map)                      [function, total, hook(MAP.keys)]
+

Map keys (as List)

+ +

You can get a List of all the keys in a Map in O(N) time.

+
  syntax List ::= "keys_list" "(" Map ")"       [function, hook(MAP.keys_list)]
+

Map key membership

+ +

You can check whether a key is present in a map in O(1) time.

+
  syntax Bool ::= KItem "in_keys" "(" Map ")"       [function, total, hook(MAP.in_keys)]
+

Map values (as List)

+ +

You can get a List of all the values in a map in O(N) time.

+
  syntax List ::= values(Map)                   [function, hook(MAP.values)]
+

Map size

+ +

You can get the number of key/value pairs in a map in O(1) time.

+
  syntax Int ::= size(Map)                      [function, total, hook(MAP.size), symbol(sizeMap)]
+

Map inclusion

+ +

You can determine whether a Map is a strict subset of another Map in O(N)
+time (where N is the size of the first map). Only keys that are bound to the
+same value are considered equal.

+
  syntax Bool ::= Map "<=Map" Map               [function, total, hook(MAP.inclusion)]
+

Map choice

+ +

You can get an arbitrarily chosen key of a Map in O(1) time. The same key
+will always be returned for the same map, but no guarantee is given that two
+different maps will return the same element, even if they are similar.

+
  syntax KItem ::= choice(Map)                      [function, hook(MAP.choice), symbol(Map:choice)]
+

Implementation of Maps

+ +

The remainder of this section contains lemmas used by the Java and Haskell
+backend to simplify expressions of sort Map. They do not affect the semantics
+of maps, merely describing additional rules that the backend can use to
+simplify terms.

+
endmodule
+
+module MAP-KORE-SYMBOLIC [symbolic,haskell]
+  imports MAP
+  imports private K-EQUAL
+  imports private BOOL
+
+  rule #Ceil(@M:Map [@K:KItem]) => {(@K in_keys(@M)) #Equals true} #And #Ceil(@M) #And #Ceil(@K) [simplification]
+
+  // Symbolic update
+
+  // Adding the definedness condition `notBool (K in_keys(M))` in the ensures clause of the following rule would be redundant
+  // because K also appears in the rhs, preserving the case when it's #Bottom.
+  rule (K |-> _ M:Map) [ K <- V ] => (K |-> V M) [simplification]
+  rule M:Map [ K <- V ] => (K |-> V M) requires notBool (K in_keys(M)) [simplification]
+  rule M:Map [ K <- _ ] [ K <- V ] => M [ K <- V ] [simplification]
+  // Adding the definedness condition `notBool (K1 in_keys(M))` in the ensures clause of the following rule would be redundant
+  // because K1 also appears in the rhs, preserving the case when it's #Bottom.
+  rule (K1 |-> V1 M:Map) [ K2 <- V2 ] => (K1 |-> V1 (M [ K2 <- V2 ])) requires K1 =/=K K2 [simplification]
+
+  // Symbolic remove
+  rule (K |-> _ M:Map) [ K <- undef ] => M ensures notBool (K in_keys(M)) [simplification]
+  rule M:Map [ K <- undef ] => M requires notBool (K in_keys(M)) [simplification]
+  // Adding the definedness condition `notBool (K1 in_keys(M))` in the ensures clause of the following rule would be redundant
+  // because K1 also appears in the rhs, preserving the case when it's #Bottom.
+  rule (K1 |-> V1 M:Map) [ K2 <- undef ] => (K1 |-> V1 (M [ K2 <- undef ])) requires K1 =/=K K2 [simplification]
+
+  // Symbolic lookup
+  rule (K  |->  V M:Map) [ K ]  => V ensures notBool (K in_keys(M)) [simplification]
+  rule (K1 |-> _V M:Map) [ K2 ] => M [K2] requires K1 =/=K K2 ensures notBool (K1 in_keys(M)) [simplification]
+  rule (_MAP:Map [ K  <-  V1 ]) [ K ]  => V1 [simplification]
+  rule ( MAP:Map [ K1 <- _V1 ]) [ K2 ] => MAP [ K2 ] requires K1 =/=K K2 [simplification]
+
+  rule (K  |->  V M:Map) [  K ] orDefault _ => V ensures notBool (K in_keys(M)) [simplification]
+  rule (K1 |-> _V M:Map) [ K2 ] orDefault D => M [K2] orDefault D requires K1 =/=K K2 ensures notBool (K1 in_keys(M)) [simplification]
+  rule (_MAP:Map [ K  <-  V1 ]) [ K ] orDefault _ => V1 [simplification]
+  rule ( MAP:Map [ K1 <- _V1 ]) [ K2 ] orDefault D => MAP [ K2 ] orDefault D requires K1 =/=K K2 [simplification]
+  rule .Map [ _ ] orDefault D => D [simplification]
+
+  // Symbolic in_keys
+  rule K in_keys(_M [ K <- undef ]) => false [simplification]
+  rule K in_keys(_M [ K <- _ ]) => true [simplification]
+  rule K1 in_keys(M [ K2 <- _ ]) => true requires K1 ==K K2 orBool K1 in_keys(M) [simplification]
+  rule K1 in_keys(M [ K2 <- _ ]) => K1 in_keys(M) requires K1 =/=K K2 [simplification]
+
+  rule {false #Equals @Key in_keys(.Map)} => #Ceil(@Key) [simplification]
+  rule {@Key in_keys(.Map) #Equals false} => #Ceil(@Key) [simplification]
+  rule {false #Equals @Key in_keys(Key' |-> Val @M)} => #Ceil(@Key) #And #Ceil(Key' |-> Val @M) #And #Not({@Key #Equals Key'}) #And {false #Equals @Key in_keys(@M)} [simplification]
+  rule {@Key in_keys(Key' |-> Val @M) #Equals false} => #Ceil(@Key) #And #Ceil(Key' |-> Val @M) #And #Not({@Key #Equals Key'}) #And {@Key in_keys(@M) #Equals false} [simplification]
+
+/*
+// The rule below is automatically generated by the frontend for every sort
+// hooked to MAP.Map. It is left here to serve as documentation.
+
+  rule #Ceil(@M:Map (@K:KItem |-> @V:KItem)) => {(@K in_keys(@M)) #Equals false} #And #Ceil(@M) #And #Ceil(@K) #And #Ceil(@V)
+    [simplification]
+*/
+endmodule
+
+module MAP-SYMBOLIC
+  imports MAP-KORE-SYMBOLIC
+endmodule
+

Range Maps

+

Provided here is the syntax of an implementation of immutable, associative,
+commutative range maps from Int to KItem. This type is hooked to an
+implementation of range maps provided by the LLVM backend.
+Currently, this type is not supported by other backends.
+Although the underlying range map data structure supports any key sort, the
+current implementation by the backend only supports Int keys due to
+limitations of the underlying ordering function.

+
module RANGEMAP
+  imports private BOOL-SYNTAX
+  imports private INT-SYNTAX
+  imports private LIST
+  imports private SET
+
+

Range, bounded inclusively below and exclusively above.

+ +
  syntax Range ::= "[" KItem "," KItem ")"    [symbol(RangeMap:Range)]
+
+  syntax RangeMap [hook(RANGEMAP.RangeMap)]
+

Range map concatenation

+ +

The RangeMap sort represents a map whose keys are stored as ranges, bounded
+inclusively below and exclusively above. Contiguous or overlapping ranges that
+map to the same value are merged into a single range.

+

You can construct a new RangeMap consisting of range/value pairs of two
+RangeMaps. If the RangeMaps have overlapping ranges an exception will be
+thrown during concrete execution. This operation is O(N*log(M)) (where N is
+the size of the smaller map and M is the size of the larger map).

+
  syntax RangeMap ::= RangeMap RangeMap                        [left, function, hook(RANGEMAP.concat), symbol(_RangeMap_), assoc, comm, unit(.RangeMap), element(_r|->_), index(0), format(%1%n%2)]
+

Range map unit

+ +

The RangeMap with zero elements is represented by .RangeMap.

+
  syntax RangeMap ::= ".RangeMap"                         [function, total, hook(RANGEMAP.unit), symbol(.RangeMap)]
+

Range map elements

+ +

An element of a RangeMap is constructed via the r|-> operator. The range
+of keys is on the left, and the value is on the right.

+
  syntax RangeMap ::= Range "r|->" KItem                      [function, hook(RANGEMAP.elementRng), symbol(_r|->_), injective]
+
+  syntax priority _r|->_ > _RangeMap_ .RangeMap
+  syntax non-assoc _r|->_
+

Range map lookup

+ +

You can look up the value associated with a key of a RangeMap in O(log(N))
+time (where N is the size of the RangeMap). This will yield an exception
+during concrete execution if the key is not in the range map.

+
  syntax KItem ::= RangeMap "[" KItem "]"                    [function, hook(RANGEMAP.lookup), symbol(RangeMap:lookup)]
+

Range map lookup with default

+ +

You can also look up the value associated with a key of a RangeMap using a
+total function that assigns a specific default value if the key is not present
+in the RangeMap. This operation is also O(log(N)) (where N is the size of
+the range map).

+
  syntax KItem ::= RangeMap "[" KItem "]" "orDefault" KItem      [function, total, hook(RANGEMAP.lookupOrDefault), symbol(RangeMap:lookupOrDefault)]
+

Range map lookup for range of key

+ +

You can look up for the range that a key of a RangeMap is stored in in
+O(log(N)) time (where N is the size of the RangeMap). This will yield an
+exception during concrete execution if the key is not in the range map.

+
  syntax Range ::= "find_range" "(" RangeMap "," KItem ")"                    [function, hook(RANGEMAP.find_range), symbol(RangeMap:find_range)]
+

Range map update

+ +

You can insert a range/value pair into a RangeMap in O(log(N)) time (where N
+is the size of the RangeMap). Any ranges adjacent to or overlapping with the
+range to be inserted will be updated accordingly.

+
  syntax RangeMap ::= RangeMap "[" keyRange: Range "<-" value: KItem "]"           [function, symbol(RangeMap:update), hook(RANGEMAP.updateRng), prefer]
+

Range map delete

+ +

You can remove a range/value pair from a RangeMap in O(log(N)) time (where N
+is the size of the RangeMap). If all or any part of the range is present in
+the range map, it will be removed.

+
  syntax RangeMap ::= RangeMap "[" Range "<-" "undef" "]"     [function, hook(RANGEMAP.removeRng), symbol(_r[_<-undef])]
+

Range map difference

+ +

You can remove the range/value pairs in a RangeMap that are also present in
+another RangeMap in O(max{M,N}*log(M)) time (where M is the size of the
+first RangeMap and N is the size of the second RangeMap). Note that only
+the parts of overlapping ranges whose value is the same in both range maps
+will be removed.

+
  syntax RangeMap ::= RangeMap "-RangeMap" RangeMap                 [function, total, hook(RANGEMAP.difference)]
+

Multiple range map update

+ +

You can update a RangeMap by adding all the range/value pairs in the second
+RangeMap in O(N*log(M+N)) time (where M is the size of the first RangeMap
+and N is the size of the second RangeMap). If any ranges are overlapping,
+the value from the second range map overwrites the value in the first for the
+parts where ranges are overlapping. This function is total, which is distinct
+from range map concatenation, a partial function only defined on range maps
+with non overlapping ranges.

+
  syntax RangeMap ::= updateRangeMap(RangeMap, RangeMap)            [function, total, hook(RANGEMAP.updateAll)]
+

Multiple range map removal

+ +

You can remove a Set of ranges from a RangeMap in O(N*log(M)) time (where
+M is the size of the RangeMap and N is the size of the Set). For every
+range in the set, all or any part of it that is present in the range map will
+be removed.

+
  syntax RangeMap ::= removeAll(RangeMap, Set)            [function, hook(RANGEMAP.removeAll)]
+

Range map keys (as Set)

+ +

You can get a Set of all the ranges in a RangeMap in O(N) time (where N
+is the size of the RangeMap).

+
  syntax Set ::= keys(RangeMap)                      [function, total, hook(RANGEMAP.keys)]
+

Range map keys (as List)

+ +

You can get a List of all the ranges in a RangeMap in O(N) time (where N
+is the size of the RangeMap).

+
  syntax List ::= "keys_list" "(" RangeMap ")"       [function, hook(RANGEMAP.keys_list)]
+

Range map key membership

+ +

You can check whether a key is present in a RangeMap in O(log(N)) time (where
+N is the size of the RangeMap).

+
  syntax Bool ::= KItem "in_keys" "(" RangeMap ")"       [function, total, hook(RANGEMAP.in_keys)]
+

Range map values (as List)

+ +

You can get a List of all values in a RangeMap in O(N) time (where N is the
+size of the RangeMap).

+
  syntax List ::= values(RangeMap)                   [function, hook(RANGEMAP.values)]
+

Range map size

+ +

You can get the number of range/value pairs in a RangeMap in O(1) time.

+
  syntax Int ::= size(RangeMap)                      [function, total, hook(RANGEMAP.size), symbol(sizeRangeMap)]
+

Range map inclusion

+ +

You can determine whether a RangeMap is a strict subset of another RangeMap
+in O(M+N) time (where M is the size of the first RangeMap and N is the size
+of the second RangeMap). Only keys within equal or overlapping ranges that
+are bound to the same value are considered equal.

+
  syntax Bool ::= RangeMap "<=RangeMap" RangeMap               [function, total, hook(RANGEMAP.inclusion)]
+

Range map choice

+ +

You can get an arbitrarily chosen key of a RangeMap in O(1) time. The same
+key will always be returned for the same range map, but no guarantee is given
+that two different range maps will return the same element, even if they are
+similar.

+
  syntax KItem ::= choice(RangeMap)                      [function, hook(RANGEMAP.choice), symbol(RangeMap:choice)]
+endmodule
+

Sets

+

Provided here is the syntax of an implementation of immutable, associative,
+commutative sets of KItem. This type is hooked to an implementation of sets
+provided by the backend. For more information on matching on sets and allowable
+patterns for doing so, refer to K's
+user documentation.

+
module SET
+  imports private INT-SYNTAX
+  imports private BASIC-K
+
+  syntax Set [hook(SET.Set)]
+

Set concatenation

+ +

The Set sort represents a mathematical set (A collection of unique items).
+The sets are nilpotent, i.e., the concatenation of two sets containing elements
+in common is #False (note however, this may be silently allowed during
+concrete execution). If you intend to add an element to a set that might
+already be present in the set, use the |Set operator instead.

+

The concatenation operator is O(Nlog(M)) where N is the size of the smaller
+set, when it appears on the right hand side. When it appears on the left hand
+side and all variables are bound, it is O(N
log(M)) where M is the size of the
+set it is matching and N is the number of elements being matched. When it
+appears on the left hand side containing variables not bound elsewhere in the
+term, it is O(N^K) where N is the size of the set it is matching and K is the
+number of unbound keys being mached. In other words, one unbound variable is
+linear, two is quadratic, three is cubic, etc.

+
  syntax Set ::= Set Set                  [left, function, hook(SET.concat), symbol(_Set_), assoc, comm, unit(.Set), idem, element(SetItem), format(%1%n%2)]
+

Set unit

+ +

The set with zero elements is represented by .Set.

+
  syntax Set ::= ".Set"                   [function, total, hook(SET.unit), symbol(.Set)]
+

Set elements

+ +

An element of a Set is constructed via the SetItem operator.

+
  syntax Set ::= SetItem(KItem)               [function, total, hook(SET.element), symbol(SetItem), injective]
+

Set union

+ +

You can compute the union of two sets in O(N*log(M)) time (Where N is the size
+of the smaller set). Note that the base of the logarithm is a relatively high
+number and thus the time is effectively linear. The union consists of all the
+elements present in either set.

+
  syntax Set ::= Set "|Set" Set              [left, function, total, hook(SET.union), comm]
+  rule S1:Set |Set S2:Set => S1 (S2 -Set S1) [concrete]
+

Set intersection

+ +

You can compute the intersection of two sets in O(N*log(M)) time (where N
+is the size of the smaller set), or effectively linear. The intersection
+consists of all the elements present in both sets.

+
  syntax Set ::= intersectSet(Set, Set)   [function, total, hook(SET.intersection), comm]
+

Set complement

+ +

You can compute the relative complement of two sets in O(N*log(M)) time (where
+N is the size of the second set), or effectively linear. This is the set of
+elements in the first set that are not present in the second set.

+
  syntax Set ::= Set "-Set" Set           [function, total, hook(SET.difference), symbol(Set:difference)]
+

Set membership

+ +

You can compute whether an element is a member of a set in O(1) time.

+
  syntax Bool ::= KItem "in" Set              [function, total, hook(SET.in), symbol(Set:in)]
+

Set inclusion

+ +

You can determine whether a Set is a strict subset of another Set in O(N)
+time (where N is the size of the first set).

+
  syntax Bool ::= Set "<=Set" Set         [function, total, hook(SET.inclusion)]
+

Set size

+ +

You can get the number of elements (the cardinality) of a set in O(1) time.

+
  syntax Int ::= size(Set)                [function, total, hook(SET.size)]
+

Set choice

+ +

You can get an arbitrarily chosen element of a Set in O(1) time. The same
+element will always be returned for the same set, but no guarantee is given
+that two different sets will return the same element, even if they are similar.

+
  syntax KItem ::= choice(Set)                [function, hook(SET.choice), symbol(Set:choice)]
+
endmodule
+

Implementation of Sets

+ +

The following lemmas are simplifications that the Haskell backend can
+apply to simplify expressions of sort Set.

+
module SET-KORE-SYMBOLIC [symbolic,haskell]
+  imports SET
+  imports private K-EQUAL
+  imports private BOOL
+
+  //Temporarly rule for #Ceil simplification, should be generated in front-end
+
+// Matching for this version not implemented.
+  // rule #Ceil(@S1:Set @S2:Set) =>
+  //        {intersectSet(@S1, @S2) #Equals .Set} #And #Ceil(@S1) #And #Ceil(@S2)
+  //   [simplification]
+
+//simpler version
+  rule #Ceil(@S:Set SetItem(@E:KItem)) =>
+         {(@E in @S) #Equals false} #And #Ceil(@S) #And #Ceil(@E)
+    [simplification]
+
+  // -Set simplifications
+  rule S              -Set .Set           => S          [simplification]
+  rule .Set           -Set  _             => .Set       [simplification]
+  rule SetItem(X)     -Set (S SetItem(X)) => .Set
+                               ensures notBool (X in S) [simplification]
+  rule S              -Set (S SetItem(X)) => .Set
+                               ensures notBool (X in S) [simplification]
+  rule (S SetItem(X)) -Set S              => SetItem(X)
+                               ensures notBool (X in S) [simplification]
+  rule (S SetItem(X)) -Set SetItem(X)     => S
+                               ensures notBool (X in S) [simplification]
+  // rule SetItem(X)     -Set S              => SetItem(X)
+  //                            requires notBool (X in S)  [simplification]
+  // rule (S1 SetItem(X)) -Set (S2 SetItem(X))  => S1 -Set S2
+  //                             ensures notBool (X in S1)
+  //                             andBool notBool (X in S2) [simplification]
+
+
+
+  // |Set simplifications
+  rule S    |Set .Set => S    [simplification, comm]
+  rule S    |Set S    => S    [simplification]
+
+  rule (S SetItem(X)) |Set SetItem(X) => S SetItem(X)
+                             ensures notBool (X in S) [simplification, comm]
+  // Currently disabled, see runtimeverification/haskell-backend#3301
+  // rule (S SetItem(X)) |Set S          => S SetItem(X)
+  //                            ensures notBool (X in S) [simplification, comm]
+
+  // intersectSet simplifications
+  rule intersectSet(.Set, _   ) => .Set    [simplification, comm]
+  rule intersectSet( S  , S   ) =>  S      [simplification]
+
+  rule intersectSet( S SetItem(X), SetItem(X))     => SetItem(X)
+                                                        ensures notBool (X in S)      [simplification, comm]
+  // Currently disabled, see runtimeverification/haskell-backend#3294
+  // rule intersectSet( S SetItem(X) , S)             => S ensures notBool (X in S)      [simplification, comm]
+  rule intersectSet( S1 SetItem(X), S2 SetItem(X)) => intersectSet(S1, S2) SetItem(X)
+                                                        ensures notBool (X in S1)
+                                                        andBool notBool (X in S2)     [simplification]
+
+  // membership simplifications
+  rule _E in .Set           => false   [simplification]
+  rule E  in (S SetItem(E)) => true
+              ensures notBool (E in S) [simplification]
+
+// These two rules would be sound but impose a giant overhead on `in` evaluation:
+  // rule E1 in (S SetItem(E2)) => true requires E1 in S
+  //                                 ensures notBool (E2 in S) [simplification]
+  // rule E1 in (S SetItem(E2)) => E1 in S requires E1 =/=K E2
+  //                                 ensures notBool (E2 in S) [simplification]
+
+  rule X in ((SetItem(X) S) |Set  _            ) => true
+                                    ensures notBool (X in S) [simplification]
+  rule X in ( _             |Set (SetItem(X) S)) => true
+                                    ensures notBool (X in S) [simplification]
+
+endmodule
+
+module SET-SYMBOLIC
+  imports SET-KORE-SYMBOLIC
+endmodule
+

Lists

+

Provided here is the syntax of an implementation of immutable, associative
+lists of KItem. This type is hooked to an implementation of lists provided
+by the backend. For more information on matching on lists and allowable
+patterns for doing so, refer to K's
+user documentation.

+
module LIST
+  imports private INT-SYNTAX
+  imports private BASIC-K
+
+  syntax List [hook(LIST.List)]
+

List concatenation

+ +

The List sort is an ordered collection that may contain duplicate elements.
+They are backed by relaxed radix balanced trees, which means that they support
+efficiently adding elements to both sides of the list, concatenating two lists,
+indexing, and updating elements.

+

The concatenation operator is O(log(N)) (where N is the size of the longer
+list) when it appears on the right hand side. When it appears on the left hand
+side, it is O(N), where N is the number of elements matched on the front and
+back of the list.

+
  syntax List ::= List List               [left, function, total, hook(LIST.concat), symbol(_List_), smtlib(smt_seq_concat), assoc, unit(.List), element(ListItem), update(List:set), format(%1%n%2)]
+

List unit

+ +

The list with zero elements is represented by .List.

+
  syntax List ::= ".List"                 [function, total, hook(LIST.unit), symbol(.List), smtlib(smt_seq_nil)]
+

List elements

+ +

An element of a List is constucted via the ListItem operator.

+
  syntax List ::= ListItem(KItem)             [function, total, hook(LIST.element), symbol(ListItem), smtlib(smt_seq_elem)]
+

List prepend

+ +

An element can be added to the front of a List using the pushList operator.

+
  syntax List ::= pushList(KItem, List)       [function, total, hook(LIST.push), symbol(pushList)]
+  rule pushList(K::KItem, L1::List) => ListItem(K) L1
+

List indexing

+ +

You can get an element of a list by its integer offset in O(log(N)) time, or
+effectively constant. Positive indices are 0-indexed from the beginning of the
+list, and negative indices are -1-indexed from the end of the list. In other
+words, 0 is the first element and -1 is the last element.

+
  syntax KItem ::= List "[" Int "]"           [function, hook(LIST.get), symbol(List:get)]
+

List update

+ +

You can create a new List with a new value at a particular index in
+O(log(N)) time, or effectively constant.

+
  syntax List ::= List "[" index: Int "<-" value: KItem "]" [function, hook(LIST.update), symbol(List:set)]
+

List of identical elements

+ +

You can create a list with length elements, each containing value, in O(N)
+time.

+
  syntax List ::= makeList(length: Int, value: KItem) [function, hook(LIST.make)]
+

Multiple list update

+ +

You can create a new List which is equal to dest except the N elements
+starting at index are replaced with the contents of src in O(N*log(K)) time
+(where K is the size of destand N is the size of src), or effectively linear. Having index + N > K yields an exception.

+
  syntax List ::= updateList(dest: List, index: Int, src: List) [function, hook(LIST.updateAll)]
+

List fill

+ +

You can create a new List where the length elements starting at index
+are replaced with value, in O(length*log(N)) time, or effectively linear.

+
  syntax List ::= fillList(List, index: Int, length: Int, value: KItem) [function, hook(LIST.fill)]
+

List slicing

+ +

You can compute a new List by removing fromFront elements from the front
+of the list and fromBack elements from the back of the list in
+O((fromFront+fromBack)*log(N)) time, or effectively linear.

+
  syntax List ::= range(List, fromFront: Int, fromBack: Int)   [function, hook(LIST.range), symbol(List:range)]
+

List membership

+ +

You can compute whether an element is in a list in O(N) time. For repeated
+comparisons, it is much better to first convert to a set using List2Set.

+
  syntax Bool ::= KItem "in" List             [function, total, hook(LIST.in), symbol(_inList_)]
+

List size

+ +

You can get the number of elements of a list in O(1) time.

+
  syntax Int ::= size(List)               [function, total, hook(LIST.size), symbol(sizeList), smtlib(smt_seq_len)]
+
endmodule
+

Collection Conversions

+

It is possible to convert from a List to a Set or from a Set to a list.
+Converting from a List to a Set and back will not provide the same list;
+duplicates will have been removed and the list may be reordered. Converting
+from a Set to a List and back will generate the same set.

+

Note that because sets are unordered and lists are ordered, converting from a
+Set to a List will generate some arbitrary ordering of elements, which may
+be different from the natural ordering you might assume, or may not. Two
+equal sets are guaranteed to generate the same ordering, but no guarantee is
+otherwise provided about what the ordering will be. In particular, adding an
+element to a set may completely reorder the elements already in the set, when
+it is converted to a list.

+
module COLLECTIONS
+  imports LIST
+  imports SET
+  imports MAP
+
+  syntax List ::= Set2List(Set) [function, total, hook(SET.set2list)]
+  syntax Set ::= List2Set(List) [function, total, hook(SET.list2set)]
+
+endmodule
+

Booleans

+

Provided here is the syntax of an implementation of boolean algebra in K.
+This type is hooked to an implementation of booleans provided by the backend.
+Note that this algebra is different from the builtin truth in matching logic.
+You can, however, convert from the truth of the Bool sort to the truth in
+matching logic via the expression {B #Equals true}.

+

The boolean values are true and false.

+
module SORT-BOOL
+  syntax Bool [hook(BOOL.Bool)]
+endmodule
+
+module BOOL-SYNTAX
+  imports SORT-BOOL
+  syntax Bool ::= "true"  [token]
+  syntax Bool ::= "false" [token]
+endmodule
+
+module BOOL-COMMON
+  imports private BASIC-K
+  imports BOOL-SYNTAX
+

Basic boolean arithmetic

+ +

You can:

+
    +
  • Negate a boolean value.
  • +
  • AND two boolean values.
  • +
  • XOR two boolean values.
  • +
  • OR two boolean values.
  • +
  • IMPLIES two boolean values (i.e., P impliesBool Q is the same as
    +notBool P orBool Q)
  • +
  • Check equality of two boolean values.
  • +
  • Check inequality of two boolean values.
  • +
+

Note that only andThenBool and orElseBool are short-circuiting. andBool
+and orBool may be short-circuited in concrete backends, but in symbolic
+backends, both arguments will be evaluated.

+
  syntax Bool ::= "notBool" Bool          [function, total, symbol(notBool_), smt-hook(not), group(boolOperation), hook(BOOL.not)]
+                > Bool "andBool" Bool     [function, total, symbol(_andBool_), left, smt-hook(and), group(boolOperation), hook(BOOL.and)]
+                | Bool "andThenBool" Bool [function, total, symbol(_andThenBool_), left, smt-hook(and), group(boolOperation), hook(BOOL.andThen)]
+                | Bool "xorBool" Bool     [function, total, symbol(_xorBool_), left, smt-hook(xor), group(boolOperation), hook(BOOL.xor)]
+                | Bool "orBool" Bool      [function, total, symbol(_orBool_), left, smt-hook(or), group(boolOperation), hook(BOOL.or)]
+                | Bool "orElseBool" Bool  [function, total, symbol(_orElseBool_), left, smt-hook(or), group(boolOperation), hook(BOOL.orElse)]
+                | Bool "impliesBool" Bool [function, total, symbol(_impliesBool_), left, smt-hook(=>), group(boolOperation), hook(BOOL.implies)]
+                > left:
+                  Bool "==Bool" Bool      [function, total, symbol(_==Bool_), left, comm, smt-hook(=), hook(BOOL.eq)]
+                | Bool "=/=Bool" Bool     [function, total, symbol(_=/=Bool_), left, comm, smt-hook(distinct), hook(BOOL.ne)]
+

Implementation of Booleans

+ +

The remainder of this section consists of an implementation in K of the
+operations listed above.

+
  rule notBool true => false
+  rule notBool false => true
+
+  rule true andBool B:Bool => B:Bool
+  rule B:Bool andBool true => B:Bool [simplification]
+  rule false andBool _:Bool => false
+  rule _:Bool andBool false => false [simplification]
+
+  rule true andThenBool K::Bool => K
+  rule K::Bool andThenBool true => K [simplification]
+  rule false andThenBool _ => false
+  rule _ andThenBool false => false  [simplification]
+
+  rule false xorBool B:Bool => B:Bool
+  rule B:Bool xorBool false => B:Bool [simplification]
+  rule B:Bool xorBool B:Bool => false
+
+  rule true orBool _:Bool => true
+  rule _:Bool orBool true => true [simplification]
+  rule false orBool B:Bool => B
+  rule B:Bool orBool false => B   [simplification]
+
+  rule true orElseBool _ => true
+  rule _ orElseBool true => true     [simplification]
+  rule false orElseBool K::Bool => K
+  rule K::Bool orElseBool false => K [simplification]
+
+  rule true impliesBool B:Bool => B
+  rule false impliesBool _:Bool => true
+  rule _:Bool impliesBool true => true       [simplification]
+  rule B:Bool impliesBool false => notBool B [simplification]
+
+  rule B1:Bool =/=Bool B2:Bool => notBool (B1 ==Bool B2)
+endmodule
+
+module BOOL-KORE [symbolic]
+  imports BOOL-COMMON
+
+  rule {true #Equals notBool @B} => {false #Equals @B} [simplification]
+  rule {notBool @B #Equals true} => {@B #Equals false} [simplification]
+  rule {false #Equals notBool @B} => {true #Equals @B} [simplification]
+  rule {notBool @B #Equals false} => {@B #Equals true} [simplification]
+
+  rule {true #Equals @B1 andBool @B2} => {true #Equals @B1} #And {true #Equals @B2} [simplification]
+  rule {@B1 andBool @B2 #Equals true} => {@B1 #Equals true} #And {@B2 #Equals true} [simplification]
+  rule {false #Equals @B1 orBool @B2} => {false #Equals @B1} #And {false #Equals @B2} [simplification]
+  rule {@B1 orBool @B2 #Equals false} => {@B1 #Equals false} #And {@B2 #Equals false} [simplification]
+endmodule
+
+module BOOL
+  imports BOOL-COMMON
+  imports BOOL-KORE
+endmodule
+

Integers

+

Provided here is the syntax of an implementation of arbitrary-precision
+integer arithmetic in K. This type is hooked to an implementation of integers
+provided by the backend. For a fixed-width integer type, see the MINT module
+below.

+

The UNSIGNED-INT-SYNTAX module provides a syntax of whole numbers in K.
+This is useful because often programming languages implement the sign of an
+integer as a unary operator rather than part of the lexical syntax of integers.
+However, you can also directly reference integers with a sign using the
+INT-SYNTAX module.

+
module UNSIGNED-INT-SYNTAX
+  syntax Int [hook(INT.Int)]
+  syntax Int ::= r"[0-9]+" [prefer, token, prec(2)]
+endmodule
+
+module INT-SYNTAX
+  imports UNSIGNED-INT-SYNTAX
+  syntax Int ::= r"[\\+\\-]?[0-9]+" [prefer, token, prec(2)]
+endmodule
+
+module INT-COMMON
+  imports INT-SYNTAX
+  imports private BOOL
+

Integer arithmetic

+ +

You can:

+
    +
  • Compute the bitwise complement ~Int of an integer value in twos-complement.
  • +
  • Compute the exponentiation ^Int of two integers.
  • +
  • Compute the exponentiation of two integers modulo another integer (^%Int).
    +A ^%Int B C is equal in value to (A ^Int B) %Int C, but has a better
    +asymptotic complexity.
  • +
  • Compute the product *Int of two integers.
  • +
  • Compute the quotient /Int or modulus %Int of two integers using
    +t-division, which rounds towards zero. Division by zero is #False.
  • +
  • Compute the quotient divInt or modulus modInt of two integers using
    +Euclidean division, in which the remainder is always non-negative. Division
    +by zero is #False.
  • +
  • Compute the sum +Int or difference -Int of two integers.
  • +
  • Compute the arithmetic right shift >>Int of two integers. Shifting by a
    +negative quantity is #False.
  • +
  • Compute the left shift of two integers. Shifting by a negative quantity is
    +#False.
  • +
  • Compute the bitwise and of two integers in twos-complement.
  • +
  • Compute the bitwise xor of two integers in twos-complement.
  • +
  • Compute the bitwise inclusive-or of two integers in twos-complement.
  • +
+
  syntax Int ::= "~Int" Int                     [function, symbol(~Int_), total, hook(INT.not), smtlib(notInt)]
+               > left:
+                 Int "^Int" Int                 [function, symbol(_^Int_), left, smt-hook(^), hook(INT.pow)]
+               | Int "^%Int" Int Int            [function, symbol(_^%Int__), left, smt-hook((mod (^ #1 #2) #3)), hook(INT.powmod)]
+               > left:
+                 Int "*Int" Int                 [function, total, symbol(_*Int_), left, comm, smt-hook(*), hook(INT.mul)]
+               /* FIXME: translate /Int and %Int into smtlib */
+               /* /Int and %Int implement t-division, which rounds towards 0. SMT hooks need to convert from Euclidian division operations */
+               | Int "/Int" Int                 [function, symbol(_/Int_), left,
+                                                 smt-hook((ite (or (= 0 (mod #1 #2)) (>= #1 0)) (div #1 #2) (ite (> #2 0) (+ (div #1 #2) 1) (- (div #1 #2) 1)))),
+                                                 hook(INT.tdiv)]
+               | Int "%Int" Int                 [function, symbol(_%Int_), left,
+                                                 smt-hook((ite (or (= 0 (mod #1 #2)) (>= #1 0)) (mod #1 #2) (ite (> #2 0) (- (mod #1 #2) #2) (+ (mod #1 #2) #2)))),
+                                                 hook(INT.tmod)]
+               /* divInt and modInt implement e-division according to the Euclidean division theorem, therefore the remainder is always positive */
+               | Int "divInt" Int               [function, symbol(_divInt_), left, smt-hook(div), hook(INT.ediv)]
+               | Int "modInt" Int               [function, symbol(_modInt_), left, smt-hook(mod), hook(INT.emod)]
+               > left:
+                 Int "+Int" Int                 [function, total, symbol(_+Int_), left, comm, smt-hook(+), hook(INT.add)]
+               | Int "-Int" Int                 [function, total, symbol(_-Int_), left, smt-hook(-), hook(INT.sub)]
+               > left:
+                 Int ">>Int" Int                [function, symbol(_>>Int_), left, hook(INT.shr), smtlib(shrInt)]
+               | Int "<<Int" Int                [function, symbol(_<<Int_), left, hook(INT.shl), smtlib(shlInt)]
+               > left:
+                 Int "&Int" Int                 [function, total, symbol(_&Int_), left, comm, hook(INT.and), smtlib(andInt)]
+               > left:
+                 Int "xorInt" Int               [function, total, symbol(_xorInt_), left, comm, hook(INT.xor), smtlib(xorInt)]
+               > left:
+                 Int "|Int" Int                 [function, total, symbol(_|Int_), left, comm, hook(INT.or), smtlib(orInt)]
+

Integer minimum and maximum

+ +

You can compute the minimum and maximum minInt and maxInt of two integers.

+
  syntax Int ::= "minInt" "(" Int "," Int ")"   [function, total, smt-hook((ite (< #1 #2) #1 #2)), hook(INT.min)]
+               | "maxInt" "(" Int "," Int ")"   [function, total, smt-hook((ite (< #1 #2) #2 #1)), hook(INT.max)]
+

Absolute value

+ +

You can compute the absolute value absInt of an integer.

+
  syntax Int ::= absInt ( Int )                 [function, total, smt-hook((ite (< #1 0) (- 0 #1) #1)), hook(INT.abs)]
+

Log base 2

+ +

You can compute the log base 2, rounded towards zero, of an integer. The log
+base 2 of an integer is equal to the index of the highest bit set in the
+representation of a positive integer. Log base 2 of zero or a negative number
+is #False.

+
  syntax Int ::= log2Int ( Int )                [function, hook(INT.log2)]
+

Bit slicing

+ +

You can compute the value of a range of bits in the twos-complement
+representation of an integer, as interpeted either unsigned or signed, of an
+integer. index is offset from 0 and length is the number of bits, starting
+with index, that should be read. The number is assumed to be represented
+in little endian notation with each byte going from least significant to
+most significant. In other words, 0 is the least-significant bit, and each
+successive bit is more significant than the last.

+
  syntax Int ::= bitRangeInt           ( Int, index: Int, length: Int ) [function, hook(INT.bitRange)]
+               | signExtendBitRangeInt ( Int, index: Int, length: Int ) [function, hook(INT.signExtendBitRange)]
+

Integer comparisons

+ +

You can compute whether two integers are less than or equal to, less than,
+greater than or equal to, greater than, equal, or unequal to another integer.

+
  syntax Bool ::= Int "<=Int" Int         [function, total, symbol(_<=Int_), smt-hook(<=), hook(INT.le)]
+                | Int "<Int" Int          [function, total, symbol(_<Int_), smt-hook(<), hook(INT.lt)]
+                | Int ">=Int" Int         [function, total, symbol(_>=Int_), smt-hook(>=), hook(INT.ge)]
+                | Int ">Int" Int          [function, total, symbol(_>Int_), smt-hook(>), hook(INT.gt)]
+                | Int "==Int" Int         [function, total, symbol(_==Int_), comm, smt-hook(=), hook(INT.eq)]
+                | Int "=/=Int" Int        [function, total, symbol(_=/=Int_), comm, smt-hook(distinct), hook(INT.ne)]
+

Divides

+ +

You can compute whether one integer evenly divides another. This is the
+case when the second integer modulo the first integer is equal to zero.

+
  syntax Bool ::= Int "dividesInt" Int    [function]
+

Random integers

+ +

You can, on concrete backends, compute a pseudorandom integer, or seed the
+pseudorandom number generator. These operations are represented as
+uninterpreted functions on symbolic backends.

+
  syntax Int ::= randInt(Int) [function, hook(INT.rand), impure]
+  syntax K ::= srandInt(Int) [function, hook(INT.srand), impure]
+

Implementation of Integers

+ +

The remainder of this section consists of an implementation in K of some
+of the operators above, as well as lemmas used by the Java and Haskell backend
+to simplify expressions of sort Int. They do not affect the semantics of
+integers, merely describing additional rules that the backend can use to
+simplify terms.

+
endmodule
+
+module INT-SYMBOLIC [symbolic]
+  imports INT-COMMON
+  imports INT-SYMBOLIC-KORE
+  imports private BOOL
+
+  // Arithmetic Normalization
+  rule I +Int 0 => I [simplification]
+  rule I -Int 0 => I [simplification]
+
+  rule X modInt N => X requires 0 <=Int X andBool X <Int N [simplification]
+  rule X   %Int N => X requires 0 <=Int X andBool X <Int N [simplification]
+
+  // Bit-shifts
+  rule X <<Int 0 => X                    [simplification, preserves-definedness]
+  rule 0 <<Int Y => 0 requires 0 <=Int Y [simplification, preserves-definedness]
+  rule X >>Int 0 => X                    [simplification, preserves-definedness]
+  rule 0 >>Int Y => 0 requires 0 <=Int Y [simplification, preserves-definedness]
+endmodule
+
+module INT-SYMBOLIC-KORE [symbolic, haskell]
+  imports INT-COMMON
+  imports ML-SYNTAX
+  imports private BOOL
+
+  // Definability Conditions
+  rule #Ceil(@I1:Int /Int   @I2:Int) => {(@I2 =/=Int 0) #Equals true} #And #Ceil(@I1) #And #Ceil(@I2) [simplification]
+  rule #Ceil(@I1:Int %Int   @I2:Int) => {(@I2 =/=Int 0) #Equals true} #And #Ceil(@I1) #And #Ceil(@I2) [simplification]
+  rule #Ceil(@I1:Int modInt @I2:Int) => {(@I2 =/=Int 0) #Equals true} #And #Ceil(@I1) #And #Ceil(@I2) [simplification]
+  rule #Ceil(@I1:Int >>Int  @I2:Int) => {(@I2 >=Int 0)  #Equals true} #And #Ceil(@I1) #And #Ceil(@I2) [simplification]
+  rule #Ceil(@I1:Int <<Int  @I2:Int) => {(@I2 >=Int 0)  #Equals true} #And #Ceil(@I1) #And #Ceil(@I2) [simplification]
+endmodule
+
+module INT-KORE [symbolic]
+  imports private K-EQUAL
+  imports private BOOL
+  imports INT-COMMON
+
+  rule [eq-k-to-eq-int]     : I1:Int ==K I2:Int            => I1 ==Int I2           [simplification]
+  rule [eq-int-true-left]   : {K1 ==Int K2 #Equals true}   => {K1 #Equals K2}       [simplification]
+  rule [eq-int-true-rigth]  : {true #Equals K1 ==Int K2}   => {K1 #Equals K2}       [simplification]
+  rule [eq-int-false-left]  : {K1 ==Int K2 #Equals false}  => #Not({K1 #Equals K2}) [simplification]
+  rule [eq-int-false-rigth] : {false #Equals K1 ==Int K2}  => #Not({K1 #Equals K2}) [simplification]
+  rule [neq-int-true-left]  : {K1 =/=Int K2 #Equals true}  => #Not({K1 #Equals K2}) [simplification]
+  rule [neq-int-true-right] : {true #Equals K1 =/=Int K2}  => #Not({K1 #Equals K2}) [simplification]
+  rule [neq-int-false-left] : {K1 =/=Int K2 #Equals false} => {K1 #Equals K2}       [simplification]
+  rule [neq-int-false-right]: {false #Equals K1 =/=Int K2} => {K1 #Equals K2}       [simplification]
+
+  // Arithmetic Normalization
+  rule I +Int B => B +Int I          [concrete(I), symbolic(B), simplification(51)]
+  rule A -Int I => A +Int (0 -Int I) [concrete(I), symbolic(A), simplification(51)]
+
+  rule (A +Int I2) +Int I3 => A +Int (I2 +Int I3) [concrete(I2, I3), symbolic(A), simplification]
+  rule I1 +Int (B +Int I3) => B +Int (I1 +Int I3) [concrete(I1, I3), symbolic(B), simplification]
+  rule I1 -Int (B +Int I3) => (I1 -Int I3) -Int B [concrete(I1, I3), symbolic(B), simplification]
+  rule I1 +Int (I2 +Int C) => (I1 +Int I2) +Int C [concrete(I1, I2), symbolic(C), simplification]
+  rule I1 +Int (I2 -Int C) => (I1 +Int I2) -Int C [concrete(I1, I2), symbolic(C), simplification]
+  rule (I1 -Int B) +Int I3 => (I1 +Int I3) -Int B [concrete(I1, I3), symbolic(B), simplification]
+  rule I1 -Int (I2 +Int C) => (I1 -Int I2) -Int C [concrete(I1, I2), symbolic(C), simplification]
+  rule I1 -Int (I2 -Int C) => (I1 -Int I2) +Int C [concrete(I1, I2), symbolic(C), simplification]
+  rule (C -Int I2) -Int I3 => C -Int (I2 +Int I3) [concrete(I2, I3), symbolic(C), simplification]
+
+  rule I1 &Int (I2 &Int C) => (I1 &Int I2) &Int C [concrete(I1, I2), symbolic(C), simplification]
+endmodule
+
+module INT
+  imports INT-COMMON
+  imports INT-SYMBOLIC
+  imports INT-KORE
+  imports private K-EQUAL
+  imports private BOOL
+
+  rule bitRangeInt(I::Int, IDX::Int, LEN::Int) => (I >>Int IDX) modInt (1 <<Int LEN)
+
+  rule signExtendBitRangeInt(I::Int, IDX::Int, LEN::Int) => (bitRangeInt(I, IDX, LEN) +Int (1 <<Int (LEN -Int 1))) modInt (1 <<Int LEN) -Int (1 <<Int (LEN -Int 1))
+
+  rule I1:Int divInt I2:Int => (I1 -Int (I1 modInt I2)) /Int I2
+  requires I2 =/=Int 0
+  rule
+    I1:Int modInt I2:Int
+  =>
+    ((I1 %Int absInt(I2)) +Int absInt(I2)) %Int absInt(I2)
+  requires I2 =/=Int 0    [concrete, simplification]
+
+  rule minInt(I1:Int, I2:Int) => I1 requires I1 <Int  I2
+  rule minInt(I1:Int, I2:Int) => I2 requires I1 >=Int I2
+
+  rule I1:Int =/=Int I2:Int => notBool (I1 ==Int I2)
+  rule (I1:Int dividesInt I2:Int) => (I2 %Int I1) ==Int 0
+
+  syntax Int ::= freshInt(Int)    [freshGenerator, function, total, private]
+  rule freshInt(I:Int) => I
+endmodule
+

IEEE 754 Floating-point Numbers

+

Provided here is the syntax of an implementation of arbitrary-precision
+floating-point arithmetic in K based on a generalization of the IEEE 754
+standard. This type is hooked to an implementation of floats provided by the
+backend.

+

The syntax of ordinary floating-point values in K consists of an optional sign
+(+ or -) followed by an optional integer part, followed by a decimal point,
+followed by an optional fractional part. Either the integer part or the
+fractional part must be specified. The mantissa is followed by an optional
+exponent part, which consists of an e or E, an optional sign (+ or -),
+and an integer. The expoennt is followed by an optional suffix, which can be
+either f, F, d, D, or pNxM where N and M are positive integers.
+p and x can be either upper or lowercase.

+

The value of a floating-point literal is computed as follows: First the
+mantissa is read as a rational number. Then it is multiplied by 10 to the
+power of the exponent, which is interpreted as an integer, and defaults to
+zero if it is not present. Finally, it is rounded to the nearest possible
+value in a floating-point type represented like an IEEE754 floating-point type,
+with the number of bits of precision and exponent specified by the suffix.
+A suffix of f or f represents the IEEE binary32 format. A suffix of d
+or D, or no suffix, represents the IEEE binary64 format. A suffix of
+pNxM (either upper or lowercase) specifies exactly N bits of precision and
+M bits of exponent. The number of bits of precision is assumed to include
+any optional 1 that precedes the IEEE 754 mantissa. In other words, p24x8
+is equal to the IEEE binary32 format, and p53x11 is equal to the IEEE
+binary64 format.

+
module FLOAT-SYNTAX
+  syntax Float [hook(FLOAT.Float)]
+  syntax Float ::= r"([\\+\\-]?[0-9]+(\\.[0-9]*)?|\\.[0-9]+)([eE][\\+\\-]?[0-9]+)?([fFdD]|([pP][0-9]+[xX][0-9]+))?" [token, prec(1)]
+  syntax Float ::= r"[\\+\\-]?Infinity([fFdD]|([pP][0-9]+[xX][0-9]+))?" [token, prec(3)]
+  syntax Float ::= r"NaN([fFdD]|([pP][0-9]+[xX][0-9]+))?" [token, prec(3)]
+endmodule
+
+module FLOAT
+  imports FLOAT-SYNTAX
+  imports private BOOL
+  imports private INT-SYNTAX
+

Float precision

+ +

You can retrieve the number of bits of precision in a Float.

+
  syntax Int ::= precisionFloat(Float) [function, total, hook(FLOAT.precision)]
+

Float exponent bits

+ +

You can retrieve the number of bits of exponent range in a Float.

+
  syntax Int ::= exponentBitsFloat(Float) [function, total, hook(FLOAT.exponentBits)]
+

Float exponent

+ +

You can retrieve the value of the exponent bits of a Float as an integer.

+
  syntax Int ::= exponentFloat(Float) [function, total, hook(FLOAT.exponent)]
+

Float sign

+ +

You can retrieve the value of the sign bit of a Float as a boolean. True
+means the sign bit is set.

+
  syntax Bool ::= signFloat(Float)      [function, total, hook(FLOAT.sign)]
+

Float special values

+ +

You can check whether a Float value is infinite or Not-a-Number.

+
  syntax Bool ::= isNaN(Float)          [function, total, smt-hook(fp.isNaN), hook(FLOAT.isNaN)]
+                | isInfinite(Float)     [function, total]
+

Float arithmetic

+ +

You can:

+
    +
  • Compute the unary negation --Float of a float. --Float X is distinct
    +from 0.0 -Float X. For example, 0.0 -Float 0.0 is positive zero.
    +--Float 0.0 is negative zero.
  • +
  • Compute the exponentation ^Float of two floats.
  • +
  • Compute the product *Float, quotient /Float, or remainder %Float of two
    +floats. The remainder is computed based on rounding the quotient of the two
    +floats to the nearest integer.
  • +
  • Compute the sum +Float or difference -Float of two floats.
  • +
+
  syntax Float ::= "--Float" Float             [function, total, smt-hook(fp.neg), hook(FLOAT.neg)]
+                 > Float "^Float" Float        [function, left, hook(FLOAT.pow)]
+                 > left:
+                   Float "*Float" Float        [function, left, smt-hook((fp.mul roundNearestTiesToEven #1 #2)), hook(FLOAT.mul)]
+                 | Float "/Float" Float        [function, left, smt-hook((fp.div roundNearestTiesToEven #1 #2)), hook(FLOAT.div)]
+                 | Float "%Float" Float        [function, left, smt-hook((fp.rem roundNearestTiesToEven #1 #2)), hook(FLOAT.rem)]
+                 > left:
+                   Float "+Float" Float        [function, left, smt-hook((fp.add roundNearestTiesToEven #1 #2)), hook(FLOAT.add)]
+                 | Float "-Float" Float        [function, left, smt-hook((fp.sub roundNearestTiesToEven #1 #2)), hook(FLOAT.sub)]
+

Floating-point mathematics

+ +

You can:

+
    +
  • Compute the Nth integer root rootFloat of a float.
  • +
  • Compute the absolute value absFloat of a float.
  • +
  • Round a floating-point number to a specified precision and exponent
    +range (roundFloat). The resulting Float will yield the specified values
    +when calling precisionFloat and exponentBitsFloat and when performing
    +further computation.
  • +
  • Round a float to the next lowest floating-point value which is an integer
    +(floorFloat).
  • +
  • Round a float to the next highest floating-point value which is an integer
    +(ceilFloat).
  • +
  • Round a float to the next closest floating-point value which is an integer, in
    +the direction of zero (truncFloat).
  • +
  • Compute the natural exponential expFloat of a float (i.e. e^x).
  • +
  • Compute the natural logarithm logFloat of a float.
  • +
  • Compute the sine sinFloat of a float.
  • +
  • Compute the cosine cosFloat of a float.
  • +
  • Compute the tangent tanFlooat of a float.
  • +
  • Compute the arcsine asinFloat of a float.
  • +
  • Compute the arccosine acosFloat of a float.
  • +
  • Compute the arctangent atanFloat of a float.
  • +
  • Compute the arctangent atan2Float of two floats.
  • +
  • Compute the maximum maxFloat of two floats.
  • +
  • Compute the minimum minFloat of two floats.
  • +
  • Compute the square root sqrtFloat of a float.
  • +
  • Compute the largest finite value expressible in a specified precision and
    +exponent range (maxValueFloat).
  • +
  • Compute the smallest positive finite value expressible in a specified
    +precision and exponent range (minValueFloat).
  • +
+
  syntax Float ::= rootFloat(Float, Int)        [function, hook(FLOAT.root)]
+                 | absFloat(Float)              [function, total, smt-hook(fp.abs), hook(FLOAT.abs)]
+                 | roundFloat(Float, precision: Int, exponentBits: Int)  [function, hook(FLOAT.round)]
+                 | floorFloat(Float)            [function, total, hook(FLOAT.floor)]
+                 | ceilFloat(Float)             [function, total, hook(FLOAT.ceil)]
+                 | truncFloat(Float)            [function, total, hook(FLOAT.trunc)]
+                 | expFloat(Float)              [function, total, hook(FLOAT.exp)]
+                 | logFloat(Float)              [function, hook(FLOAT.log)]
+                 | sinFloat(Float)              [function, total, hook(FLOAT.sin)]
+                 | cosFloat(Float)              [function, total, hook(FLOAT.cos)]
+                 | tanFloat(Float)              [function, hook(FLOAT.tan)]
+                 | asinFloat(Float)             [function, hook(FLOAT.asin)]
+                 | acosFloat(Float)             [function, hook(FLOAT.acos)]
+                 | atanFloat(Float)             [function, total, hook(FLOAT.atan)]
+                 | atan2Float(Float, Float)     [function, hook(FLOAT.atan2)]
+                 | maxFloat(Float, Float)       [function, smt-hook(fp.max), hook(FLOAT.max)]
+                 | minFloat(Float, Float)       [function, smt-hook(fp.min), hook(FLOAT.min)]
+                 | sqrtFloat(Float)             [function]
+                 | maxValueFloat(precision: Int, exponentBits: Int)      [function, hook(FLOAT.maxValue)]
+                 | minValueFloat(precision: Int, exponentBits: Int)      [function, hook(FLOAT.minValue)]
+

Floating-point comparisons

+ +

Compute whether a float is less than or equasl to, less than, greater than or
+equal to, greater than, equal, or unequal to another float. Note that
+X ==Float Y and X ==K Y might yield different values. The latter should be
+used in cases where you want to compare whether two values of sort Float
+contain the same term. The former should be used when you want to implement
+the == operator of a programming language. In particular, NaN =/=Float NaN
+is true, because NaN compares unequal to all values, including itself, in
+IEEE 754 arithmetic. 0.0 ==Float -0.0 is also true.

+
  syntax Bool ::= Float "<=Float" Float       [function, smt-hook(fp.leq), hook(FLOAT.le)]
+                | Float "<Float" Float        [function, smt-hook(fp.lt), hook(FLOAT.lt)]
+                | Float ">=Float" Float       [function, smt-hook(fp.geq), hook(FLOAT.ge)]
+                | Float ">Float" Float        [function, smt-hook(fg.gt), hook(FLOAT.gt)]
+                | Float "==Float" Float       [function, comm, smt-hook(fp.eq), hook(FLOAT.eq), symbol(_==Float_)]
+                | Float "=/=Float" Float      [function, comm, smt-hook((not (fp.eq #1 #2)))]
+
+  rule F1:Float =/=Float F2:Float => notBool (F1 ==Float F2)
+

Conversion between integer and float

+ +

You can convert an integer to a floating-point number with the specified
+precision and exponent range. You can also convert a floating-point number
+to the nearest integer. This operation rounds to the nearest integer, but it
+also avoids the double-rounding that is present in ceilFloat and floorFloat
+if the nearest integer is not representable in the specified floating-point
+type.

+
  syntax Float ::= Int2Float(Int, precision: Int, exponentBits: Int)    [function, hook(FLOAT.int2float)]
+  syntax Int ::= Float2Int(Float)    [function, total, hook(FLOAT.float2int)]
+

Implementation of Floats

+ +

The remainder of this section consists of an implementation in K of some of the
+operators above.

+
  rule sqrtFloat(F:Float) => rootFloat(F, 2)
+
+  rule isInfinite(F:Float) => F >Float maxValueFloat(precisionFloat(F), exponentBitsFloat(F)) orBool F <Float --Float maxValueFloat(precisionFloat(F), exponentBitsFloat(F))
+
+endmodule
+

Strings

+

Provided here is the syntax of an implementation of Unicode strings in K. This
+type is hooked to an implementation of strings provided by the backend. The
+implementation is currently incomplete and does not fully support encodings
+and code points beyond the initial 256 code points of the Basic Latin and
+Latin-1 Supplement blocks. In the future, there may be breaking changes to
+the semantics of this module in order to support this functionality.

+

The syntax of strings in K is delineated by double quotes. Inside the double
+quotes, any character can appear verbatim except double quotes, backslash,
+newline, and carriage return. K also supports the following escape sequences:

+
    +
  • " - the " character
  • +
  • \ - the \ character
  • +
  • \n - newline character
  • +
  • \r - carriage return character
  • +
  • \t - tab character
  • +
  • \f - form feed character
  • +
  • \xFF - \x followed by two hexadecimal characters indicates a code point
    +between 0x00 and 0xff
  • +
  • \uFFFF - \u followed by four hexadecimal characters indicates a code point
    +between 0x0000 and 0xffff
  • +
  • \UFFFFFFFF - \U followed by eight hexadecimal characters indicates a code
    +point between 0x000000 and 0x10ffff
  • +
+
module STRING-SYNTAX
+  syntax String [hook(STRING.String)]
+  syntax String ::= r"[\\\"](([^\\\"\\n\\r\\\\])|([\\\\][nrtf\\\"\\\\])|([\\\\][x][0-9a-fA-F]{2})|([\\\\][u][0-9a-fA-F]{4})|([\\\\][U][0-9a-fA-F]{8}))*[\\\"]"      [token]
+endmodule
+
+module STRING-COMMON
+  imports STRING-SYNTAX
+  imports private INT
+  imports private FLOAT-SYNTAX
+  imports private K-EQUAL
+  imports private BOOL
+

String concatenation

+ +

You can concatenate two strings in O(N) time. For successive concatenation
+operations, it may be better to use the STRING-BUFFER module.

+
  syntax String ::= String "+String" String    [function, total, left, hook(STRING.concat)]
+

String length

+ +

You can get the length of a string in O(1) time.

+
  syntax Int ::= lengthString ( String ) [function, total, hook(STRING.length)]
+

Character and integer conversion

+ +

You can convert between a character (as represented by a string containing
+a single code point) and an integer in O(1) time.

+
  syntax String ::= chrChar ( Int )      [function, hook(STRING.chr)]
+  syntax Int ::= ordChar ( String )      [function, hook(STRING.ord)]
+

String substring

+ +

You can compute a substring of a string in O(N) time (where N is the
+length of the substring). There are two important facts to note:

+
    +
  1. the range generated includes the character at startIndex but excludes the
    +character at endIndex, i.e., the range is [startIndex..endIndex).
  2. +
  3. this function is only defined on valid indices (i.e., it is defined when
    +startIndex < endIndex and endIndex is less than or equal to the string
    +length).
  4. +
+
  syntax String ::= substrString ( String , startIndex: Int , endIndex: Int ) [function, total, hook(STRING.substr)]
+
+ +

You can find the first (respectively, last) occurrence of a substring, starting
+at a certain index, in another string in O(N*M) time.
+Returns -1 if the substring is not found.

+
  syntax Int ::= findString ( haystack: String , needle: String , index: Int )   [function, hook(STRING.find)]
+  syntax Int ::= rfindString ( haystack: String , needle: String , index: Int )  [function, hook(STRING.rfind)]
+
+ +

You can find the first (respectively, last) occurrence of one of the characters
+of the search string, starting at a certain index, in another string in
+O(N*M) time.

+
  syntax Int ::= findChar ( haystack: String , needles: String , index: Int )     [function, hook(STRING.findChar)]
+  syntax Int ::= rfindChar ( haystack: String , needles: String , index: Int )    [function, hook(STRING.rfindChar)]
+

String and Bool conversion

+ +
  syntax String ::= Bool2String(Bool) [function, total]
+  rule Bool2String(true)  => "true"
+  rule Bool2String(false) => "false"
+
  syntax Bool ::= String2Bool(String) [function]
+  rule String2Bool("true")  => true
+  rule String2Bool("false") => false
+

String and float conversion

+ +

You can convert between a String and a Float. The String will be
+represented in the syntax of the Float sort (see the section on the FLOAT
+module above for details of that syntax). Which particular string is returned
+by Float2String is determined by the backend, but the same Float is
+guaranteed to return the same String, and converting that String back to a
+Float is guaranteed to return the original Float.

+

You can also convert a Float to a string in a particular syntax using the
+variant of Float2String with a format. In this case, the resulting string
+is one which results directly from passing that format to mpfr_printf. This
+functionality may not be supported on backends that do not use Gnu MPFR to
+implement floating-point numbers.

+
  syntax String ::= Float2String ( Float )              [function, total, hook(STRING.float2string)]
+  syntax String ::= Float2String ( Float , format: String )     [function, symbol(FloatFormat), hook(STRING.floatFormat)]
+  syntax Float  ::= String2Float ( String )             [function, hook(STRING.string2float)]
+

String and integer conversions

+ +

You can convert between a String and an Int. The String will be represented
+in the syntax of the INT module (i.e., a nonempty sequence of digits
+optionally prefixed by a sign). When converting from an Int to a String,
+the sign will not be present unless the integer is negative.

+

You can also convert between a String and an Int in a particular radix.
+This radix can be anywhere between 2 and 36. For a radix 2 <= N <= 10, the
+digits 0 to N-1 will be used. For a radix 11 <= N <= 36, the digits 0 to 9
+and the first N-10 letters of the Latin alphabet will be used. Both uppercase
+and lowercase letters are supported by String2Base. Whether the letters
+returned by Base2String are upper or lowercase is determined by the backend,
+but the backend will consistently choose one or the other.

+
  syntax Int    ::= String2Int   ( String )             [function, hook(STRING.string2int)]
+  syntax String ::= Int2String   ( Int )                [function, total, hook(STRING.int2string)]
+  syntax String ::= Base2String  ( Int , base: Int )          [function, hook(STRING.base2string)]
+  syntax Int    ::= String2Base  ( String , base: Int )       [function, hook(STRING.string2base)]
+

String count and replace

+ +

You can replace one, some, or all occurrences of a string within another
+string in O(N*M) time. The replaceAll, replace, and replaceFirst methods
+are identical, except replaceFirst replaces exactly one ocurrence of the
+string, the first occurrence. replace replaces the first times occurrences.
+And replaceAll replaces every occurrence.

+

You can also count the number of times a string occurs within another string
+using countAllOccurrences.

+
  syntax String ::= "replaceAll" "(" haystack: String "," needle: String "," replacement: String ")"      [function, total, hook(STRING.replaceAll)]
+  syntax String ::= "replace" "(" haystack: String "," needle: String "," replacement: String "," times: Int ")" [function, hook(STRING.replace)]
+  syntax String ::= "replaceFirst" "(" haystack: String "," needle: String "," replacement: String ")"    [function, total, hook(STRING.replaceFirst)]
+  syntax Int ::= "countAllOccurrences" "(" haystack: String "," needle: String ")"            [function, total, hook(STRING.countAllOccurrences)]
+

String equality and lexicographic comparison

+ +

You can compare whether two strings are equal or unequal, or whether one string
+is less than, less than or equal to, greater than, or greater than or equal to
+another according to the natural lexicographic ordering of strings.

+
  syntax Bool ::= String "==String" String  [function, total, comm, hook(STRING.eq)]
+                | String "=/=String" String [function, total, comm, hook(STRING.ne)]
+  rule S1:String =/=String S2:String => notBool (S1 ==String S2)
+
+  syntax Bool ::= String  "<String" String [function, total, hook(STRING.lt)]
+                | String "<=String" String [function, total, hook(STRING.le)]
+                | String  ">String" String [function, total, hook(STRING.gt)]
+                | String ">=String" String [function, total, hook(STRING.ge)]
+

Implementation of Strings

+ +

What follows is a few String hooks which are deprecated and only are supported
+on certain outdated backends of K, as well as an implementation of several
+of the above operations in K.

+
  syntax String ::= categoryChar(String)       [function, hook(STRING.category)]
+                  | directionalityChar(String) [function, hook(STRING.directionality)]
+
+  syntax String ::= "newUUID" [function, hook(STRING.uuid), impure]
+
+  rule S1:String <=String S2:String => notBool (S2 <String S1)
+  rule S1:String >String S2:String => S2 <String S1
+  rule S1:String >=String S2:String => notBool (S1 <String S2)
+
+  rule findChar(S1:String, S2:String, I:Int) => #if findString(S1, substrString(S2, 0, 1), I) ==Int -1 #then findChar(S1, substrString(S2, 1, lengthString(S2)), I) #else #if findChar(S1, substrString(S2, 1, lengthString(S2)), I) ==Int -1 #then findString(S1, substrString(S2, 0, 1), I) #else minInt(findString(S1, substrString(S2, 0, 1), I), findChar(S1, substrString(S2, 1, lengthString(S2)), I)) #fi #fi requires S2 =/=String ""
+  rule findChar(_, "", _) => -1
+  rule rfindChar(S1:String, S2:String, I:Int) => maxInt(rfindString(S1, substrString(S2, 0, 1), I), rfindChar(S1, substrString(S2, 1, lengthString(S2)), I)) requires S2 =/=String ""
+  rule rfindChar(_, "", _) => -1
+
+  rule countAllOccurrences(Source:String, ToCount:String) => 0
+            requires findString(Source, ToCount, 0) <Int 0
+  rule countAllOccurrences(Source:String, ToCount:String) => 1 +Int countAllOccurrences(substrString(Source, findString(Source, ToCount, 0) +Int lengthString(ToCount), lengthString(Source)), ToCount)
+            requires findString(Source, ToCount, 0) >=Int 0
+
+  rule replaceFirst(Source:String, ToReplace:String, Replacement:String) => substrString(Source, 0, findString(Source, ToReplace, 0))
+                +String Replacement +String substrString(Source, findString(Source, ToReplace, 0) +Int lengthString(ToReplace), lengthString(Source))
+                requires findString(Source, ToReplace, 0) >=Int 0
+  rule replaceFirst(Source:String, ToReplace:String, _:String) => Source
+        requires findString(Source, ToReplace, 0) <Int 0
+
+
+  // Note that the replace function is undefined when Count < 0. This allows different backends to
+  // implement their own behavior without contradicting these semantics. For instance, a symbolic
+  // backend can return #Bottom for that case, while a concrete backend can throw an exception.
+  rule replace(Source:String, ToReplace:String, Replacement:String, Count:Int) =>
+       substrString(Source, 0, findString(Source, ToReplace, 0)) +String Replacement +String
+       replace(substrString(Source, findString(Source, ToReplace, 0) +Int lengthString(ToReplace), lengthString(Source)), ToReplace, Replacement, Count -Int 1)
+        requires Count >Int 0 andBool findString(Source, ToReplace, 0) >=Int 0
+  rule replace(Source:String, _, _, Count) => Source
+        requires Count >=Int 0 [owise]
+  rule replaceAll(Source:String, ToReplace:String, Replacement:String) => replace(Source, ToReplace, Replacement, countAllOccurrences(Source, ToReplace))
+
+endmodule
+
+module STRING-KORE [symbolic]
+  imports private K-EQUAL
+  imports STRING-COMMON
+
+  rule S1:String ==K S2:String => S1 ==String S2 [simplification]
+
+endmodule
+
+module STRING
+  imports STRING-COMMON
+  imports STRING-KORE
+endmodule
+

String Buffers

+

It is a well known fact that repeated string concatenations are quadratic
+in performance whereas use of an efficient mutable representation of arrays
+can yield linear performance. We thus provide such a sort, the StringBuffer
+sort. Axiomatically, it is implemented below on symbolic backends using the
+String module. However, on concrete backends it provides an efficient
+implementation of string concatenation. There are three operations:

+
    +
  • .StringBuffer creates a new StringBuffer with current content equal
    +to the empty string.
  • +
  • +String takes a StringBuffer and a String and appends the String to
    +the end of the StringBuffer
  • +
  • StringBuffer2String converts a StringBuffer to a String. This operation
    +copies the string so that subsequent modifications to the StringBuffer
    +will not change the value of the String returned by this function.
  • +
+
module STRING-BUFFER-IN-K [symbolic]
+  imports private BASIC-K
+  imports STRING
+
+  syntax StringBuffer ::= ".StringBuffer" [function, total]
+  syntax StringBuffer ::= StringBuffer "+String" String [function, total, avoid]
+  syntax StringBuffer ::= String
+  syntax String ::= StringBuffer2String ( StringBuffer ) [function, total]
+
+  rule {SB:String +String S:String}::StringBuffer => (SB +String S)::String
+  rule .StringBuffer => ""
+  rule StringBuffer2String(S:String) => S
+endmodule
+
+module STRING-BUFFER-HOOKED [concrete]
+  imports private BASIC-K
+  imports STRING
+
+  syntax StringBuffer [hook(BUFFER.StringBuffer)]
+  syntax StringBuffer ::= ".StringBuffer" [function, total, hook(BUFFER.empty), impure]
+  syntax StringBuffer ::= StringBuffer "+String" String [function, total, hook(BUFFER.concat), avoid]
+  syntax String ::= StringBuffer2String ( StringBuffer ) [function, total, hook(BUFFER.toString)]
+endmodule
+
+module STRING-BUFFER
+  imports STRING-BUFFER-HOOKED
+  imports STRING-BUFFER-IN-K
+endmodule
+

Byte Arrays

+

Provided here is the syntax of an implementation of fixed-width arrays of Bytes
+in K. This type is hooked to an implementation of bytes provided by the backend.
+On the LLVM backend, it is possible to opt in to a faster, mutable
+representation (using the --llvm-mutable-bytes flag to kompile) where
+multiple references can occur to the same Bytes object and when one is
+modified, the others are also modified. Care should be taken when using this
+feature, however, as it is possible to experience divergent behavior with
+symbolic backends unless the Bytes type is used in a manner that preserves
+consistency.

+
module BYTES-SYNTAX
+  imports private STRING-SYNTAX
+
+  syntax Bytes [hook(BYTES.Bytes)]
+  syntax Bytes ::= r"b[\\\"](([ !#-\\[\\]-~])|([\\\\][tnfr\\\"\\\\])|([\\\\][x][0-9a-fA-F]{2}))*[\\\"]"      [token]
+endmodule
+
module BYTES-STRING-ENCODE [symbolic]
+  imports BYTES-SYNTAX
+

Encoding/decoding between Bytes and String

+ +

You can encode/decode between Bytes and String using UTF-8, UTF-16LE, UTF-16BE, UTF-32LE, and UTF-32BE

+
    syntax String ::= decodeBytes ( encoding: String , contents: Bytes ) [function, hook(BYTES.decodeBytes)]
+    syntax Bytes ::= encodeBytes ( encoding: String , contents: String ) [function, hook(BYTES.encodeBytes)]
+endmodule
+
module BYTES-HOOKED
+  imports STRING-SYNTAX
+  imports BYTES-SYNTAX
+  imports BYTES-STRING-ENCODE
+

Empty byte array

+ +

The byte array of length zero is represented by .Bytes.

+
  syntax Bytes ::= ".Bytes" [function, total, hook(BYTES.empty)]
+

Endianness

+ +

When converting to/from an integer, byte arrays can be treated as either little
+endian (ie, least significant byte first) or big endian (ie, most significant
+byte first).

+
  syntax Endianness ::= "LE" [symbol(littleEndianBytes)]
+                      | "BE" [symbol(bigEndianBytes)]
+

Signedness

+ +

When converting to/from an integer, byte arrays can be treated as either signed
+or unsigned.

+
  syntax Signedness ::= "Signed" [symbol(signedBytes)]
+                      | "Unsigned" [symbol(unsignedBytes)]
+

Integer and Bytes conversion

+ +

You can convert from a Bytes to an Int. In order to do this, the endianness
+and signedness of the Bytes must be provided. The resulting integer is
+created by means of interpreting the Bytes as either a twos-complement
+representation, or an unsigned representation, of an integer, in the specified
+byte order.

+

You can also convert from an Int to a Bytes. This comes in two variants.
+In the first, the length of the resulting Bytes in bytes is explicitly
+specified. If the length is greater than the highest set bit in the magnitude
+of the integer, the result is padded with 0 bits if the number is positive
+and 1 bits if the number is negative. If the length is less than the highest
+bit set in the magnitude of the integer, the most-significant bits of the
+integer will be truncated. The endianness of the resulting Bytes object
+is as specified.

+

In the second variant, both endianness and signedness are specified, and
+the resulting Bytes object will be the smallest number of bytes necessary
+for the resulting Bytes object to be convertible back to the original integer
+via Bytes2Int. In other words, if the highest bit set in the magnitude of the
+integer is N, then the byte array will be at least N+1 bits long, rounded up
+to the nearest byte.

+
  syntax Int ::= Bytes2Int(Bytes, Endianness, Signedness) [function, total, hook(BYTES.bytes2int)]
+  syntax Bytes ::= Int2Bytes(length: Int, Int, Endianness) [function, total, hook(BYTES.int2bytes)]
+                 | Int2Bytes(Int, Endianness, Signedness) [function, total, symbol(Int2BytesNoLen)]
+

String and Bytes conversion

+ +

You can convert between a Bytes and a String in O(N) time. The resulting
+value is a copy of the original and will not be affected by subsequent
+mutations of the input or output value.

+
  syntax String ::= Bytes2String(Bytes) [function, total, hook(BYTES.bytes2string)]
+  syntax Bytes ::= String2Bytes(String) [function, total, hook(BYTES.string2bytes)]
+

Bytes update

+ +

You can set the value of a particular byte in a Bytes object in O(1) time.
+The result is #False if value is not in the range [0..255] or if index
+is not a valid index (ie, less than zero or greater than or equal to the length
+of the Bytes term).

+
  syntax Bytes ::= Bytes "[" index: Int "<-" value: Int "]" [function, hook(BYTES.update)]
+

Bytes lookup

+ +

You can get the value of a particular byte in a Bytes object in O(1) time.
+The result is #False if index is not a valid index (see above).

+
  syntax Int ::= Bytes "[" Int "]" [function, hook(BYTES.get)]
+

Bytes substring

+ +

You can get a new Bytes object containing a range of bytes from the input
+Bytes in O(N) time (where N is the length of the substring). The range
+of bytes included is [startIndex..endIndex). The resulting Bytes is
+a copy and mutations to it do not affect mutations to the original Bytes.
+The result is #False if startIndex or endIndex are not valid.

+
  syntax Bytes ::= substrBytes(Bytes, startIndex: Int, endIndex: Int) [function, hook(BYTES.substr)]
+

Multiple bytes update

+ +

You can modify a Bytes to return a Bytes which is equal to dest except the
+N elements starting at index are replaced with the contents of src in O(N)
+time. If --llvm-mutable-bytes is active, this will not create a new Bytes
+object and will instead modify the original on concrete backends. The result is
+#False if index + N is not a valid index.

+
  syntax Bytes ::= replaceAtBytes(dest: Bytes, index: Int, src: Bytes) [function, hook(BYTES.replaceAt)]
+

Multiple bytes update

+ +

You can modify a Bytes to return a Bytes which is equal to dest except the
+count bytes starting at index are replaced with count bytes of value
+Int2Bytes(1, v, LE/BE) in O(count) time. This does not create a new Bytes
+object and will instead modify the original if --llvm-mutable-bytes is active.
+This will throw an exception if index + count is not a valid index. The
+acceptable range of values for v is -128 to 127. This will throw an exception
+if v is outside of this range. This is implemented only for the LLVM backend.

+
  syntax Bytes ::= memsetBytes(dest: Bytes, index: Int, count: Int, v: Int) [function, hook(BYTES.memset)]
+

Bytes padding

+ +

You can create a new Bytes object which is at least length bytes long by
+taking the input sequence and padding it on the right (respectively, on the
+left) with the specified value. If --llvm-mutable-bytes is active, this does
+not create a new Bytes object if the input is already at least length bytes
+long, and will instead return the input unchanged. The result is #False if
+value is not in the range [0..255], or if the length is negative.

+
  syntax Bytes ::= padRightBytes(Bytes, length: Int, value: Int) [function, hook(BYTES.padRight)]
+                 | padLeftBytes(Bytes, length: Int, value: Int) [function, hook(BYTES.padLeft)]
+

Bytes reverse

+ +

You can reverse a Bytes object in O(N) time. If --llvm-mutable-bytes is
+active, this will not create a new Bytes object and will instead modify the
+original.

+
  syntax Bytes ::= reverseBytes(Bytes) [function, total, hook(BYTES.reverse)]
+

Bytes length

+ +

You can get the length of a Bytes term in O(1) time.

+
  syntax Int ::= lengthBytes(Bytes) [function, total, hook(BYTES.length), smtlib(lengthBytes)]
+

Bytes concatenation

+ +

You can create a new Bytes object by concatenating two Bytes objects
+together in O(N) time.

+
  syntax Bytes ::= Bytes "+Bytes" Bytes [function, total, hook(BYTES.concat), right]
+
+endmodule
+

Implementation of Bytes

+ +

The remainder of this module consists of an implementation of some of the
+operators listed above in K.

+
module BYTES-CONCRETE [concrete]
+  imports BYTES-HOOKED
+endmodule
+
+module BYTES-KORE
+  imports BYTES-HOOKED
+  imports BYTES-SYMBOLIC-CEIL
+endmodule
+
+module BYTES-SYMBOLIC-CEIL [symbolic]
+  imports BYTES-HOOKED
+  imports private INT
+  imports private BOOL
+
+  rule #Ceil(padRightBytes(_, LEN, VAL)) => {(0 <=Int LEN andBool 0 <=Int VAL andBool VAL <Int 256) #Equals true} [simplification]
+  rule #Ceil(padLeftBytes(_, LEN, VAL))  => {(0 <=Int LEN andBool 0 <=Int VAL andBool VAL <Int 256) #Equals true} [simplification]
+endmodule
+
+module BYTES
+  imports BYTES-CONCRETE
+  imports BYTES-KORE
+  imports private INT
+
+  rule Int2Bytes(I::Int, _::Endianness, _)        => .Bytes
+    requires I ==Int 0
+  rule Int2Bytes(I::Int, E::Endianness, Unsigned) => Int2Bytes((log2Int(I) +Int 8) /Int 8, I, E)
+    requires I >Int 0 [preserves-definedness]
+  rule Int2Bytes(I::Int, E::Endianness, Signed  ) => Int2Bytes((log2Int(I) +Int 9) /Int 8, I, E)
+    requires I >Int 0 [preserves-definedness]
+  rule Int2Bytes(I::Int, E::Endianness, Signed  ) => Int2Bytes((log2Int(~Int I) +Int 9) /Int 8, I, E)
+    requires I <Int -1 [preserves-definedness]
+  rule Int2Bytes(I::Int, E::Endianness, Signed  ) => Int2Bytes(1, -1, E)
+    requires I ==Int -1 [preserves-definedness]
+endmodule
+

Program identifiers

+

Provided here is an implementation for program identifiers in K. Developers
+of semantics for a particular language may wish to use their own implementation
+instead of the one provided here if their syntax differs from the syntax
+defined below. However, this is provided for convenience for developers who
+do not care about the lexical syntax of identifiers.

+

Provided are the following pieces of functionality:

+
    +
  • Id2String - Convert an Id to a String containing its name
  • +
  • String2Id - Convert a String to an Id with the specified name
  • +
  • !X:Id - You can get a fresh identifier distinct from any previous identifier
    +generated by this syntax.
  • +
+
module ID-SYNTAX-PROGRAM-PARSING
+  imports BUILTIN-ID-TOKENS
+  syntax Id ::= r"[A-Za-z\\_][A-Za-z0-9\\_]*"     [prec(1), token]
+              | #LowerId                                             [token]
+              | #UpperId                                             [token]
+endmodule
+
+module ID-SYNTAX
+  syntax Id [token]
+endmodule
+
+module ID-COMMON
+  imports ID-SYNTAX
+  imports private STRING
+
+  syntax String ::= Id2String ( Id )    [function, total, hook(STRING.token2string)]
+  syntax Id ::= String2Id (String) [function, total, hook(STRING.string2token)]
+  syntax Id ::= freshId(Int)    [freshGenerator, function, total, private]
+
+  rule freshId(I:Int) => String2Id("_" +String Int2String(I))
+endmodule
+
+module ID
+  imports ID-COMMON
+endmodule
+

Equality and conditionals

+

Provided here are implementations of two important primitives in K:

+
    +
  • ==K - the equality between two terms. Returns true if they are equal
    +and false if they are not equal.
  • +
  • #if #then #else #fi - polymorphic conditional function. If the first
    +argument evaluates to true, the second argument is returned. Otherwise,
    +the third argument is returned. Note that this does not short-circuit on
    +symbolic backends.
  • +
+
module K-EQUAL-SYNTAX
+  imports private BOOL
+  imports private BASIC-K
+
+  syntax Bool ::= left:
+                  K "==K" K           [function, total, comm, smt-hook(=), hook(KEQUAL.eq), symbol(_==K_), group(equalEqualK)]
+                | K "=/=K" K          [function, total, comm, smt-hook(distinct), hook(KEQUAL.ne), symbol(_=/=K_), group(notEqualEqualK)]
+
+  syntax priority equalEqualK notEqualEqualK > boolOperation mlOp
+
+  syntax {Sort} Sort ::= "#if" Bool "#then" Sort "#else" Sort "#fi" [function, total, symbol(ite), smt-hook(ite), hook(KEQUAL.ite)]
+
+endmodule
+
+module K-EQUAL-KORE [symbolic]
+  imports private BOOL
+  imports K-EQUAL-SYNTAX
+
+  rule K1:Bool ==K K2:Bool => K1 ==Bool K2 [simplification]
+  rule {K1 ==K K2 #Equals true} => {K1 #Equals K2} [simplification]
+  rule {true #Equals K1 ==K K2} => {K1 #Equals K2} [simplification]
+  rule {K1 ==K K2 #Equals false} => #Not({K1 #Equals K2}) [simplification]
+  rule {false #Equals K1 ==K K2} => #Not({K1 #Equals K2}) [simplification]
+  rule {K1 =/=K K2 #Equals true} => #Not({K1 #Equals K2}) [simplification]
+  rule {true #Equals K1 =/=K K2} => #Not({K1 #Equals K2}) [simplification]
+  rule {K1 =/=K K2 #Equals false} => {K1 #Equals K2} [simplification]
+  rule {false #Equals K1 =/=K K2} => {K1 #Equals K2} [simplification]
+
+endmodule
+
+module K-EQUAL
+  imports private BOOL
+  imports K-EQUAL-SYNTAX
+  imports K-EQUAL-KORE
+
+  rule K1:K =/=K K2:K => notBool (K1 ==K K2)
+
+  rule #if C:Bool #then B1::K #else _ #fi => B1 requires C
+  rule #if C:Bool #then _ #else B2::K #fi => B2 requires notBool C
+
+endmodule
+

Meta operations

+

Provided below are a few miscellaneous, mostly deprecated functions in K.
+It is not recommended to use any of them directly as they are largely
+unsupported in modern K. There are a few exceptions:

+
    +
  • #getenv - Returns the value of an environment variable
  • +
  • #kompiledDirectory - Returns the path to the current compiled K definition
    +directory.
  • +
  • #unparseKORE - Takes a K term and converts it to a string.
  • +
+
module K-REFLECTION
+  imports BASIC-K
+  imports STRING
+
+  syntax K ::= "#configuration" [function, impure, hook(KREFLECTION.configuration)]
+  syntax String ::= #sort(K) [function, hook(KREFLECTION.sort)]
+  syntax KItem ::= #fresh(String)   [function, hook(KREFLECTION.fresh), impure]
+  syntax KItem ::= getsymbol(K)  [function, hook(KREFLECTION.getKLabel)]
+
+  syntax K ::= #getenv(String) [function, impure, hook(KREFLECTION.getenv)]
+
+  syntax String ::= #kompiledDirectory() [function, hook(KREFLECTION.kompiledDir)]
+
+  // meaningful only for the purposes of compilation to a binary, otherwise
+  // undefined
+  syntax List ::= #argv() [function, hook(KREFLECTION.argv)]
+
+  syntax {Sort} String ::= #unparseKORE(Sort) [function, hook(KREFLECTION.printKORE)]
+  syntax IOError ::= "#noParse" "(" String ")" [symbol(#noParse)]
+
+endmodule
+

I/O in K

+

Concrete execution in K supports I/O operations. This functionality is not
+supported during symbolic execution, because symbolic execution must exist
+completely free of side-effects, and I/O is an irreducible type of side effect.
+However, it is useful in many cases when defining concrete execution to be able
+to make reference to I/O operations.

+

The design of these I/O operations is based on the POSIX standard, for the most
+part. For example, the #read K function maps to the read POSIX function. We
+do not at this time have a higher-level API for I/O, but this may be
+implemented at some point in the future.

+

I/O operations generally return either their result, or an IOError term
+corresponding to the errno returned by the underlying system call.

+
module K-IO
+  imports private LIST
+  imports private STRING
+  imports private INT
+

I/O errors

+ +

Aside from EOF, which is returned by #getc if the file is at end-of-file, all
+of the below I/O errors correspond to possible values for errno after calling
+a library function. If the errno returned is not one of the below errnos
+known to K, #unknownIOError is returned along with the integer errno value.

+
  syntax IOError ::= "#EOF" [symbol(#EOF)]
+                   | #unknownIOError(errno: Int) [symbol(#unknownIOError)]
+                   | "#E2BIG" [symbol(#E2BIG)]
+                   | "#EACCES" [symbol(#EACCES)]
+                   | "#EAGAIN" [symbol(#EAGAIN)]
+                   | "#EBADF" [symbol(#EBADF)]
+                   | "#EBUSY" [symbol(#EBUSY)]
+                   | "#ECHILD" [symbol(#ECHILD)]
+                   | "#EDEADLK" [symbol(#EDEADLK)]
+                   | "#EDOM" [symbol(#EDOM)]
+                   | "#EEXIST" [symbol(#EEXIST)]
+                   | "#EFAULT" [symbol(#EFAULT)]
+                   | "#EFBIG" [symbol(#EFBIG)]
+                   | "#EINTR" [symbol(#EINTR)]
+                   | "#EINVAL" [symbol(#EINVAL)]
+                   | "#EIO" [symbol(#EIO)]
+                   | "#EISDIR" [symbol(#EISDIR)]
+                   | "#EMFILE" [symbol(#EMFILE)]
+                   | "#EMLINK" [symbol(#EMLINK)]
+                   | "#ENAMETOOLONG" [symbol(#ENAMETOOLONG)]
+                   | "#ENFILE" [symbol(#ENFILE)]
+                   | "#ENODEV" [symbol(#ENODEV)]
+                   | "#ENOENT" [symbol(#ENOENT)]
+                   | "#ENOEXEC" [symbol(#ENOEXEC)]
+                   | "#ENOLCK" [symbol(#ENOLCK)]
+                   | "#ENOMEM" [symbol(#ENOMEM)]
+                   | "#ENOSPC" [symbol(#ENOSPC)]
+                   | "#ENOSYS" [symbol(#ENOSYS)]
+                   | "#ENOTDIR" [symbol(#ENOTDIR)]
+                   | "#ENOTEMPTY" [symbol(#ENOTEMPTY)]
+                   | "#ENOTTY" [symbol(#ENOTTY)]
+                   | "#ENXIO" [symbol(#ENXIO)]
+                   | "#EPERM" [symbol(#EPERM)]
+                   | "#EPIPE" [symbol(#EPIPE)]
+                   | "#ERANGE" [symbol(#ERANGE)]
+                   | "#EROFS" [symbol(#EROFS)]
+                   | "#ESPIPE" [symbol(#ESPIPE)]
+                   | "#ESRCH" [symbol(#ESRCH)]
+                   | "#EXDEV" [symbol(#EXDEV)]
+                   | "#EWOULDBLOCK" [symbol(#EWOULDBLOCK)]
+                   | "#EINPROGRESS" [symbol(#EINPROGRESS)]
+                   | "#EALREADY" [symbol(#EALREADY)]
+                   | "#ENOTSOCK" [symbol(#ENOTSOCK)]
+                   | "#EDESTADDRREQ" [symbol(#EDESTADDRREQ)]
+                   | "#EMSGSIZE" [symbol(#EMSGSIZE)]
+                   | "#EPROTOTYPE" [symbol(#EPROTOTYPE)]
+                   | "#ENOPROTOOPT" [symbol(#ENOPROTOOPT)]
+                   | "#EPROTONOSUPPORT" [symbol(#EPROTONOSUPPORT)]
+                   | "#ESOCKTNOSUPPORT" [symbol(#ESOCKTNOSUPPORT)]
+                   | "#EOPNOTSUPP" [symbol(#EOPNOTSUPP)]
+                   | "#EPFNOSUPPORT" [symbol(#EPFNOSUPPORT)]
+                   | "#EAFNOSUPPORT" [symbol(#EAFNOSUPPORT)]
+                   | "#EADDRINUSE" [symbol(#EADDRINUSE)]
+                   | "#EADDRNOTAVAIL" [symbol(#EADDRNOTAVAIL)]
+                   | "#ENETDOWN" [symbol(#ENETDOWN)]
+                   | "#ENETUNREACH" [symbol(#ENETUNREACH)]
+                   | "#ENETRESET" [symbol(#ENETRESET)]
+                   | "#ECONNABORTED" [symbol(#ECONNABORTED)]
+                   | "#ECONNRESET" [symbol(#ECONNRESET)]
+                   | "#ENOBUFS" [symbol(#ENOBUFS)]
+                   | "#EISCONN" [symbol(#EISCONN)]
+                   | "#ENOTCONN" [symbol(#ENOTCONN)]
+                   | "#ESHUTDOWN" [symbol(#ESHUTDOWN)]
+                   | "#ETOOMANYREFS" [symbol(#ETOOMANYREFS)]
+                   | "#ETIMEDOUT" [symbol(#ETIMEDOUT)]
+                   | "#ECONNREFUSED" [symbol(#ECONNREFUSED)]
+                   | "#EHOSTDOWN" [symbol(#EHOSTDOWN)]
+                   | "#EHOSTUNREACH" [symbol(#EHOSTUNREACH)]
+                   | "#ELOOP" [symbol(#ELOOP)]
+                   | "#EOVERFLOW" [symbol(#EOVERFLOW)]
+

I/O result sorts

+ +

Here we see sorts defined to contain either an Int or an IOError, or
+either a String or an IOError. These sorts are used to implement the
+return sort of functions that may succeed, in which case they return a value,
+or may fail, in which case their return value indicates an error and the
+error indicated is returned via errno.

+
  syntax IOInt ::= Int | IOError
+  syntax IOString ::= String | IOError
+

Opening a file

+ +

You can open a file in K using #open. An optional mode indicates the file
+open mode, which can have any value allowed by the fopen function in C.
+The returned value is the file descriptor that was opened, or an error.

+
  syntax IOInt ::= "#open" "(" path: String ")" [function]
+               | "#open" "(" path: String "," mode: String ")" [function, hook(IO.open), impure]
+
+  rule #open(S:String) => #open(S:String, "r+")
+

Get/set position in file

+ +

You can get the current offset in a file using #tell. You can also seek
+to a particular offset using #seek or #seekEnd. #seek is implemented via
+a call to lseek with the SEEK_SET whence. #seekEnd is implemented via a
+call to lseek with the SEEK_END whence. You can emulate the SEEK_CUR
+whence by means of #seek(FD, #tell(FD) +Int Offset).

+
  syntax IOInt ::= "#tell" "(" fd: Int ")" [function, hook(IO.tell), impure]
+  syntax K ::= "#seek" "(" fd: Int "," index: Int ")" [function, hook(IO.seek), impure]
+             | "#seekEnd" "(" fd: Int "," fromEnd: Int ")" [function, hook(IO.seekEnd), impure]
+

Read from file

+ +

You can read a single character from a file using #getc. #EOF is returned
+if you are at end-of-fie.

+

You can also read up to length characters in a file using #read. The
+resulting read characters are returned, which may be fewer characters than
+requested. A string of zero length being returned indicates end-of-file.

+
  syntax IOInt ::= "#getc" "(" fd: Int ")"             [function, hook(IO.getc), impure]
+  syntax IOString ::= "#read" "(" fd: Int "," length: Int ")"    [function, hook(IO.read), impure]
+

Write to file

+ +

You can write a single character to a file using #putc. You can also write
+a string to a file using #write. The returned value on success is .K.

+
  syntax K ::= "#putc" "(" fd: Int "," value: Int ")"      [function, hook(IO.putc), impure]
+             | "#write" "(" fd: Int "," value: String ")" [function, hook(IO.write), impure]
+

Closing a file

+ +

You can close a file using #close. The returned value on success is .K.

+
  syntax K ::= "#close" "(" fd: Int ")" [function, hook(IO.close), impure]
+

Locking/unlocking a file

+ +

You can lock or unlock parts of a file using the #lock and #unlock
+functions. The lock starts at the beginning of the file and continues for
+endIndex bytes. Note that Unix systems do not actually prevent locked files
+from being read and modified; you will have to lock both sides of a concurrent
+access to guarantee exclusivity.

+
  syntax K ::= "#lock" "(" fd: Int "," endIndex: Int ")" [function, hook(IO.lock), impure]
+             | "#unlock" "(" fd: Int "," endIndex: Int ")" [function, hook(IO.unlock), impure]
+

Networking

+ +

You can accept a connection on a socket using #accept, or shut down the
+write end of a socket with #shutdownWrite. Note that facility is not provided
+for opening, binding, and listening on sockets. These functions are implemented
+in order to support creating stateful request/response servers where the
+request loop is implemented using rewriting in K, but the connection
+initialization is written in native code and linked into the LLVM backend.

+
  syntax IOInt ::= "#accept" "(" fd: Int ")" [function, hook(IO.accept), impure]
+  syntax K ::= "#shutdownWrite" "(" fd: Int ")" [function, hook(IO.shutdownWrite), impure]
+

Time

+ +

You can get the current time in seconds since midnight UTC on January 1, 1970
+using #time.

+
  syntax Int ::= "#time" "(" ")" [function, hook(IO.time), impure]
+

Builtin file descriptors

+ +

Provided here are functions that return the file descriptor for standard input,
+standard output, and standard error.

+
  syntax Int ::= "#stdin"   [function, total]
+                | "#stdout" [function, total]
+                | "#stderr" [function, total]
+
+  rule #stdin => 0
+  rule #stdout => 1
+  rule #stderr => 2
+

Shell access

+ +

You can execute a command using the shell using the #system operator. Care
+must be taken to sanitize inputs to this function or security issues may
+result. Note that K has no facility for reasoning about logic that happens
+outside its process, so any functionality that you wish to be able to formally
+reason about in K should not be implemented via the #system operator.

+
  syntax KItem ::= #system ( String ) [function, hook(IO.system), impure]
+                 | "#systemResult" "(" Int /* exit code */ "," String /* stdout */ "," String /* stderr */ ")" [symbol(#systemResult)]
+

Temporary files

+ +

You can get a temporary file and open it atomically using the #mkstemp
+operator. The resulting file will be closed and deleted when K rewriting ends.
+For more info on the argument to #mkstemp, see man mkstemp.

+
  syntax IOFile ::= #mkstemp(template: String) [function, hook(IO.mkstemp), impure]
+  syntax IOFile ::= IOError
+                  | "#tempFile" "(" path: String "," fd: Int ")" [symbol(#tempFile)]
+

Deleting a file

+ +

You can delete a file using its absolute or relative path using the #remove
+operator. It returns .K on success or an IOError on failure.

+
  syntax K ::= #remove(path: String) [function, total, hook(IO.remove), impure]
+

Logging

+ +

You can log information to disk using the #logToFile operator. Semantically,
+this operator returns .K. However, it has a side effect that is not reasoned
+about which is that value will be written to a uniquely-identified file
+containing name in its name. The file is only flushed to disk when rewriting
+finishes.

+
  syntax K ::= #logToFile(name: String, value: String) [function, total, hook(IO.log), impure, returnsUnit, symbol(#logToFile)]
+

Strings can also be logged via the logging mechanisms available to the backend.
+On the LLVM backend, this just means logging the text to standard error. On the
+Haskell backend, a log message of type InfoUserLog is created with the
+specified text.

+
  syntax K ::= #log(value: String) [function, total, hook(IO.logString), impure, returnsUnit, symbol(#log)]
+

Terms can also be logged to standard error in surface syntax, rather than as
+KORE using #trace. This operator has similar semantics to #logToFile (i.e.
+it returns .K, but prints as an impure side effect). Note that calling
+#trace is equivalent to invoking the kprint tool for the first term that is
+logged, which requires re-parsing the underlying K definition. Subsequent calls
+do not incur this overhead again; the definition is cached.

+
  syntax K ::= #trace(value: KItem) [function, total, hook(IO.traceTerm), impure, returnsUnit, symbol(#trace)]
+             | #traceK(value: K)    [function, total, hook(IO.traceTerm), impure, returnsUnit, symbol(#traceK)]
+

Implementation of high-level I/O streams in K

+ +

Below is an implementation of the stream="stdin" and stream="stdout"
+cell attributes in K. You should not refer to these symbols or modules directly
+in your definition. It is provided only so that the K compiler can make use of
+it. For more information on how to use this feature, refer to IMP++ in the K
+tutorial.

+
  syntax Stream ::= #buffer(K)
+                  | #istream(Int)
+                  | #parseInput(String, String)
+                  | #ostream(Int)
+
+endmodule
+
+// NOTE: DO NOT DIRECTLY IMPORT *-STREAM MODULES
+// These stream modules will be automatically instantiated and implicitly imported
+// into the main module when `stream` attributes appear in configuration cells.
+// Only `Stream` productions and `[stream]` rules will be imported.
+// The cell name will be replaced with the one of the main configuration.
+
+module STDIN-STREAM
+  imports K-IO
+  imports K-REFLECTION
+  imports LIST
+  imports INT
+  imports BOOL
+
+  configuration <stdin> ListItem(#buffer($STDIN:String)) ListItem($IO:String) ListItem(#istream(#stdin)) </stdin>
+
+  // read one character at a time until we read whitespace
+  rule [stdinGetc]:
+       <stdin>
+       ListItem(#parseInput(_:String, Delimiters:String))
+       ListItem(#buffer(S:String => S +String chrChar({#getc(N)}:>Int)))
+       ListItem("on")
+       ListItem(#istream(N:Int))
+       </stdin>
+    requires findChar(S, Delimiters, 0) ==Int -1 // [stdin]
+       [stream, priority(200)]
+
+  // when we reach whitespace, if it parses create a ListItem
+  rule [stdinParseString]:
+       <stdin>
+       (ListItem(#parseInput("String", Delimiters:String)) => ListItem(S))
+       ListItem(#buffer(S:String => ""))
+       _:List
+       </stdin>
+    requires findChar(S, Delimiters, 0) =/=Int -1 // [stdin]
+       [stream]
+
+  // a hack: handle the case when we read integers without the help of the IO server
+  rule [stdinParseInt]:
+       <stdin>
+       (ListItem(#parseInput("Int", Delimiters:String))
+       => ListItem(String2Int(substrString(S, 0, findChar(S, Delimiters, 0)))))
+       ListItem(#buffer(S:String => substrString(S,findChar(S, Delimiters, 0) +Int 1, lengthString(S))))
+       _:List
+       </stdin>
+    requires findChar(S, Delimiters, 0) =/=Int -1
+       andBool lengthString(S) >Int 1 // [stdin]
+       [stream]
+
+  rule [stdinTrim]:
+       <stdin>
+       ListItem(#parseInput(Sort:String, Delimiters:String))
+       ListItem(#buffer(S:String => substrString(S, 1, lengthString(S))))
+       _:List
+       </stdin>
+    requires findChar(S, Delimiters, 0) =/=Int -1
+       andBool Sort =/=String "String"
+       andBool lengthString(S) <=Int 1 // [stdin]
+       [stream]
+
+  // NOTE: This unblocking rule will be instantiated and inserted carefully
+  // when necessary according to user-defined rules, since otherwise it will
+  // lead to a diverging (i.e., non-terminating) transition system definition.
+  // Currently, it supports only a simple pattern matching on the top of the
+  // input stream cell, e.g.,
+  //   rule <k> read() => V ... </k> <in> ListItem(V:Int) => .List ...  </in>
+  // Non-supported rules that refer to the input stream cell in a sophisticated
+  // way will get stuck in concrete execution mode with real IO enabled (i.e.,
+  // under `--io on` option), while they will still work in symbolic execution
+  // mode or concrete execution mode with real IO disabled (i.e., under `--io
+  // off`, `--search`, or `--debug` options).
+  //
+  // TODO: More patterns need to be supported as well. In that case, we need to
+  // have a way to specify such patterns.
+  rule [stdinUnblock]:
+       <stdin>
+         (.List => ListItem(#parseInput(?Sort:String, ?Delimiters:String)))
+         ListItem(#buffer(_:String))
+         ...
+       </stdin>
+
+  /*
+  syntax Stream ::= "#noIO"
+
+  rule ListItem(#buffer(_))
+       (ListItem(#noIO) ListItem(#istream(_:Int)) => .List) [stdin]
+  */
+
+endmodule
+
+module STDOUT-STREAM
+  imports K-IO
+  imports LIST
+  imports STRING
+
+  configuration <stdout> ListItem(#ostream(#stdout)) ListItem($IO:String) ListItem(#buffer("")) </stdout>
+//configuration <stderr> ListItem(#ostream(#stderr)) ListItem($IO:String) ListItem(#buffer("")) </stderr>
+
+  rule [stdoutBufferFloat]:
+       <stdout>
+       ListItem(#ostream(_))
+       ListItem(_)
+       ListItem(#buffer(Buffer:String => Buffer +String Float2String(F)))
+       (ListItem(F:Float) => .List)
+       _:List
+       </stdout>
+       // [stdout, stderr]
+       [stream, priority(25)]
+  rule [stdoutBufferInt]:
+       <stdout>
+       ListItem(#ostream(_))
+       ListItem(_)
+       ListItem(#buffer(Buffer:String => Buffer +String Int2String(I)))
+       (ListItem(I:Int) => .List)
+       _:List
+       </stdout>
+       // [stdout, stderr]
+       [stream, priority(25)]
+  rule [stdoutBufferString]:
+       <stdout>
+       ListItem(#ostream(_))
+       ListItem(_)
+       ListItem(#buffer(Buffer:String => Buffer +String S))
+       (ListItem(S:String) => .List)
+       _:List
+       </stdout>
+       // [stdout, stderr]
+       [stream, priority(25)]
+
+  // Send first char from the buffer to the server
+  rule [stdoutWrite]:
+       <stdout>
+       ListItem(#ostream(N:Int => {#write(N, S) ~> N:Int}:>Int))
+       ListItem("on")
+       ListItem(#buffer(S:String => ""))
+       _:List
+       </stdout>
+    requires S =/=String "" // [stdout, stderr]
+       [stream, priority(30)]
+
+  /*
+  syntax Stream ::= "#noIO"
+
+  rule ListItem(#buffer(Buffer:String => Buffer +String Float2String(F)))
+       (ListItem(F:Float) => .List)
+       _:List [stdout, stderr]
+  rule ListItem(#buffer(Buffer:String => Buffer +String Int2String(I)))
+       (ListItem(I:Int) => .List)
+       _:List [stdout, stderr]
+  rule ListItem(#buffer(Buffer:String => Buffer +String S))
+       (ListItem(S:String) => .List)
+       _:List [stdout, stderr]
+
+  rule (ListItem(#ostream(_:Int)) ListItem(#noIO) => .List)
+       ListItem(#buffer(_))
+       _:List [stdout, stderr]
+  */
+
+endmodule
+

Machine Integers

+

Provided here is an implementation of arbitrarily large fixed-precision binary
+integers in K. This type is hooked to an implementation of integers provided
+by the backend, and in particular makes use of native machine integers for
+certain sizes of integer. For arbitrary-precision integers, see the INT
+module above.

+

The syntax of machine integers in K is the same as arbitrary-precision integers
+(i.e., an optional sign followed by a sequence of digits) except that machine
+integers always end in a suffix pN where N is an integer indicating the
+width in bits of the integer. The MInt sort is parametric, and this is
+reflected in the literals. For example, the sort of 0p8 is MInt{8}.

+
module MINT-SYNTAX
+  /*@\section{Description} The MInt implements machine integers of arbitrary
+   * bit width represented in 2's complement. */
+  syntax {Width} MInt{Width} [hook(MINT.MInt)]
+
+  /*@ Machine integer of bit width and value. */
+  syntax {Width} MInt{Width} ::= r"[\\+\\-]?[0-9]+[pP][0-9]+" [token, prec(2), hook(MINT.literal)]
+endmodule
+
+module MINT
+  imports MINT-SYNTAX
+  imports private INT
+  imports private BOOL
+

Bitwidth of MInt

+ +

You can get the number of bits of width in an MInt using bitwidthMInt.

+
  syntax {Width} Int ::= bitwidthMInt(MInt{Width})   [function, total, hook(MINT.bitwidth)]
+

Int and MInt conversions

+ +

You can convert from an MInt to an Int using the MInt2Signed and
+MInt2Unsigned functions. an MInt does not have a sign; its sign is instead
+reflected in how operators interpret its value either as a signed integer or as
+an unsigned integer. Thus, you can interpret a MInt as a signed integer witth
+MInt2Signed, or as an unsigned integer respectively using MInt2Unsigned.

+

You can also convert from an Int to an MInt using Int2MInt. Care must
+be given to ensure that the sort context where the Int2MInt operator appears
+has the correct bitwidth, as this will influence the width of the resulting
+MInt.

+
  syntax {Width} Int ::= MInt2Signed(MInt{Width})     [function, total, hook(MINT.svalue)]
+                       | MInt2Unsigned(MInt{Width})     [function, total, hook(MINT.uvalue), smt-hook(bv2int)]
+
+  syntax {Width} MInt{Width} ::= Int2MInt(Int) [function, total, hook(MINT.integer), smt-hook(int2bv)]
+

MInt min and max values

+ +

You can get the minimum and maximum values of a signed or unsigned MInt
+with az specified bit width using sminMInt, smaxMInt, uminMInt, and
+umaxMInt.

+
  syntax Int ::= sminMInt(Int)    [function]
+               | smaxMInt(Int)    [function]
+               | uminMInt(Int)    [function]
+               | umaxMInt(Int)    [function]
+  rule sminMInt(N:Int) => 0 -Int (1 <<Int (N -Int 1))
+  rule smaxMInt(N:Int) => (1 <<Int (N -Int 1)) -Int 1
+  rule uminMInt(_:Int) => 0
+  rule umaxMInt(N:Int) => (1 <<Int N) -Int 1
+

MInt bounds checking

+ +

You can check whether a specified Int will be represented in an MInt
+with a specified width without any loss of precision when interpreted as
+a signed or unsigned integer using soverflowMInt and uoverflowMInt.

+
  syntax Bool ::= soverflowMInt(width: Int, Int)   [function]
+                | uoverflowMInt(width: Int, Int)   [function]
+  rule
+    soverflowMInt(N:Int, I:Int)
+  =>
+    I <Int sminMInt(N) orBool I >Int smaxMInt(N)
+  rule
+    uoverflowMInt(N:Int, I:Int)
+  =>
+    I <Int uminMInt(N) orBool I >Int umaxMInt(N)
+

MInt arithmetic

+ +

You can:

+
    +
  • Compute the bitwise complement ~MInt of an MInt.
  • +
  • Compute the unary negation --MInt of an MInt.
  • +
  • Compute the product *MInt of two MInts.
  • +
  • Compute the quotient /sMInt of two MInts interpreted as signed integers.
  • +
  • Compute the modulus %sMInt of two MInts interpreted as signed integers.
  • +
  • Compute the quotient /uMInt of two MInts interpreted as unsigned
    +integers.
  • +
  • Compute the modulus %uMInt of two MInts interpreted as unsigned integers.
  • +
  • Compute the sum +MInt of two MInts.
  • +
  • Compute the difference -MInt of two MInts.
  • +
  • Compute the left shift <<MInt of two MInts. The second MInt is always
    +interpreted as positive.
  • +
  • Compute the arithmetic right shift >>aMInt of two MInts. The second
    +MInt is always interpreted as positve.
  • +
  • Compute the logical right shift >>lMInt of two MInts. The second MInt
    +is always interpreted as positive.
  • +
  • Compute the bitwise and &MInt of two MInts.
  • +
  • Compute the bitwise xor xorMInt of two MInts.
  • +
  • Compute the bitwise inclusive or |MInt of two MInts.
  • +
+
  syntax {Width} MInt{Width} ::= "~MInt" MInt{Width} [function, total, hook(MINT.not), smt-hook(bvnot)]
+                               | "--MInt" MInt{Width} [function, total, hook(MINT.neg), smt-hook(bvuminus)]
+                               > left:
+                                 MInt{Width} "*MInt" MInt{Width} [function, total, hook(MINT.mul), smt-hook(bvmul)]
+                               | MInt{Width} "/sMInt" MInt{Width} [function, hook(MINT.sdiv), smt-hook(bvsdiv)]
+                               | MInt{Width} "%sMInt" MInt{Width} [function, hook(MINT.srem), smt-hook(bvsrem)]
+                               | MInt{Width} "/uMInt" MInt{Width} [function, hook(MINT.udiv), smt-hook(bvudiv)]
+                               | MInt{Width} "%uMInt" MInt{Width} [function, hook(MINT.urem), smt-hook(bvurem)]
+                               > left:
+                                 MInt{Width} "+MInt" MInt{Width} [function, total, hook(MINT.add), smt-hook(bvadd)]
+                               | MInt{Width} "-MInt" MInt{Width} [function, total, hook(MINT.sub), smt-hook(bvsub)]
+                               > left:
+                                 MInt{Width} "<<MInt" MInt{Width} [function, hook(MINT.shl), smt-hook(bvshl)]
+                               | MInt{Width} ">>aMInt" MInt{Width} [function, hook(MINT.ashr), smt-hook(bvashr)]
+                               | MInt{Width} ">>lMInt" MInt{Width} [function, hook(MINT.lshr), smt-hook(bvlshr)]
+                               > left:
+                                 MInt{Width} "&MInt" MInt{Width} [function, total, hook(MINT.and), smt-hook(bvand)]
+                               > left:
+                                 MInt{Width} "xorMInt" MInt{Width} [function, total, hook(MINT.xor), smt-hook(bvxor)]
+                               > left:
+                                 MInt{Width} "|MInt" MInt{Width} [function, total, hook(MINT.or), smt-hook(bvor)]
+

MInt comparison

+ +

You can compute whether one MInt is less than, less than or equal to, greater
+than, or greater than or equal to another MInt when interpreted as signed
+or unsigned integers. You can also compute whether one MInt is equal to or
+unequal to another MInt.

+
  syntax {Width} Bool ::= MInt{Width} "<sMInt" MInt{Width} [function, total, hook(MINT.slt), smt-hook(bvslt)]
+                        | MInt{Width} "<uMInt" MInt{Width} [function, total, hook(MINT.ult), smt-hook(bvult)]
+                        | MInt{Width} "<=sMInt" MInt{Width} [function, total, hook(MINT.sle), smt-hook(bvsle)]
+                        | MInt{Width} "<=uMInt" MInt{Width} [function, total, hook(MINT.ule), smt-hook(bvule)]
+                        | MInt{Width} ">sMInt" MInt{Width} [function, total, hook(MINT.sgt), smt-hook(bvsgt)]
+                        | MInt{Width} ">uMInt" MInt{Width} [function, total, hook(MINT.ugt), smt-hook(bvugt)]
+                        | MInt{Width} ">=sMInt" MInt{Width} [function, total, hook(MINT.sge), smt-hook(bvsge)]
+                        | MInt{Width} ">=uMInt" MInt{Width} [function, total, hook(MINT.uge), smt-hook(bvuge)]
+                        | MInt{Width} "==MInt" MInt{Width} [function, total, hook(MINT.eq), smt-hook(=)]
+                        | MInt{Width} "=/=MInt" MInt{Width} [function, total, hook(MINT.ne), smt-hook(distinct)]
+

MInt min/max

+ +

You can compute the signed minimum sMinMInt, the signed maximum sMaxMInt,
+the unsigned minimum uMinMInt, and the unsigned maximum uMaxMInt of two
+MInts.

+
  syntax {Width} MInt{Width} ::= sMaxMInt(MInt{Width}, MInt{Width}) [function, total, hook(MINT.smax), smt-hook((ite (bvslt #1 #2) #2 #1))]
+                               | sMinMInt(MInt{Width}, MInt{Width}) [function, total, hook(MINT.smin), smt-hook((ite (bvslt #1 #2) #1 #2))]
+                               | uMaxMInt(MInt{Width}, MInt{Width}) [function, total, hook(MINT.umax), smt-hook((ite (bvult #1 #2) #2 #1))]
+                               | uMinMInt(MInt{Width}, MInt{Width}) [function, total, hook(MINT.umin), smt-hook((ite (bvult #1 #2) #1 #2))]
+

MInt to MInt conversion

+ +

You can convert an MInt of one width to another width with roundMInt.
+The resulting MInt will be truncated starting from the most significant bit
+if the resulting width is smaller than the input. The resulting MInt will be
+zero-extended with the same low-order bits if the resulting width is larger
+than the input.

+
  syntax {Width1, Width2} MInt{Width1} ::= roundMInt(MInt{Width2}) [function, total, hook(MINT.round)]
+  syntax {Width1, Width2} MInt{Width1} ::= signExtendMInt(MInt{Width2}) [function, total, hook(MINT.sext)]
+
endmodule
+

K Language Features

+

Defined below is a series of modules used to parse inner syntax in K (ie, the
+contents of rules, configuration declarations, and contexts).

+

Much of this file exists in tight correspondence with the K implementation, and
+K will not work correctly if it is altered without corresponding changes to the
+source code of the K tools.

+

Users should only import a few modules from this file. In particular, this
+includes SORT-K, BASIC-K, ML-SYNTAX, DEFAULT-LAYOUT,
+DEFAULT-CONFIGURATION, and K-AMBIGUITIES. The remaining modules should not
+be imported by the user; they are used implicitly by the implementation of K.

+

Basic K Sorts

+

The SORT-K module declares the K sort, and nothing else.

+
module SORT-K
+  syntax K [hook(K.K)]
+endmodule
+

The BASIC-K module declares the K, KItem, and KConfigVar sorts, and
+imports the syntax of matching logic.

+
module BASIC-K
+  imports ML-SYNTAX
+  imports SORT-BOOL
+  syntax KItem [hook(K.KItem)]
+  syntax K     ::= KItem
+  syntax KConfigVar [token]
+  syntax KItem ::= KConfigVar
+endmodule
+

KAST Abstract Syntax

+

Below is defined the abstract syntax of concrete terms in K, the KAST syntax.
+Users should rarely if ever have to refer to this syntax; in general, it
+suffices to use concrete syntax in rules, configuration declarations, contexts,
+etc.

+

This syntax is used directly by the K implementation, and exists here as a
+reference for the syntax of KAST, but it should not be imported directly by
+the user.

+
module KSTRING
+  syntax KString ::= r"[\\\"](([^\\\"\\n\\r\\\\])|([\\\\][nrtf\\\"\\\\])|([\\\\][x][0-9a-fA-F]{2})|([\\\\][u][0-9a-fA-F]{4})|([\\\\][U][0-9a-fA-F]{8}))*[\\\"]"      [token]
+  // optionally qualified strings, like in Scala "abc", i"abc", r"a*bc", etc.
+endmodule
+
+module BUILTIN-ID-TOKENS
+  syntax #LowerId ::= r"[a-z][a-zA-Z0-9]*"                    [prec(2), token]
+  syntax #UpperId ::= r"[A-Z][a-zA-Z0-9]*"                    [prec(2), token]
+endmodule
+
+module SORT-KBOTT
+  imports SORT-K
+  syntax KBott
+endmodule
+
+module KAST
+  imports BASIC-K
+  imports SORT-KBOTT
+  imports KSTRING
+  imports BUILTIN-ID-TOKENS
+
+  syntax KBott ::= "#token" "(" KString "," KString ")"  [symbol(#KToken)]
+                 | "#klabel" "(" KLabel ")"              [symbol(#WrappedKLabel)]
+                 | KLabel "(" KList ")"                  [symbol(#KApply)]
+  syntax KItem ::= KBott
+
+  syntax KLabel ::= r"`(\\\\`|\\\\\\\\|[^`\\\\\\n\\r])+`" [token]
+                  | #LowerId                                   [token]
+                  | r"[#a-z][a-zA-Z0-9]*"               [token, prec(1)]
+
+  syntax KList ::= K
+                 | ".KList"          [symbol(#EmptyKList)]
+                 | KList "," KList   [symbol(#KList), left, assoc, unit(#EmptyKList), prefer]
+endmodule
+
+
+// To be used when parsing/pretty-printing ground configurations
+module KSEQ
+  imports KAST
+  imports K-TOP-SORT
+  syntax K ::= ".K"      [symbol(#EmptyK)]
+             | "."       [symbol(#EmptyK), deprecated, unparseAvoid]
+  syntax K ::= K "~>" K  [symbol(#KSequence), left, assoc, unit(#EmptyK)]
+  syntax left #KSequence
+  syntax {Sort} Sort     ::= "(" Sort ")"    [bracket, group(defaultBracket), applyPriority(1)]
+endmodule
+

Syntax of Matching Logic

+

K provides direct access to the symbols of Matching Logic, while giving them
+their own concrete syntax distinct from the syntax of the KORE intermediate
+representation. These symbols are primarily used during symbolic execution.
+The LLVM Backend has relatively little understanding of Matching Logic directly
+and use of these symbols directly in rules is likely to cause it to crash.
+However, these symbols are necessary when providing lemmas and other types of
+logical assistance to proofs and symbolic execution in the Haskell Backend.

+

The correspondance between K symbols and KORE symbols is as follows:

+
    +
  • #Top - \top
  • +
  • #Bottom - \bottom
  • +
  • #Not - \not
  • +
  • #Ceil - \ceil
  • +
  • #Floor - \floor
  • +
  • #Equals - \equals
  • +
  • #And - \and
  • +
  • #Or - \or
  • +
  • #Implies - \implies
  • +
  • #Exists - \exists
  • +
  • #Forall - \forall
  • +
  • #AG - allPathGlobally
  • +
  • #wEF - weakExistsFinally
  • +
  • #wAF - weakAlwaysFinally
  • +
+
module ML-SYNTAX [not-lr1]
+  imports SORT-K
+
+  syntax {Sort} Sort ::= "#Top" [symbol(#Top), group(mlUnary)]
+                       | "#Bottom" [symbol(#Bottom), group(mlUnary)]
+                       | "#Not" "(" Sort ")" [symbol(#Not), mlOp, group(mlUnary, mlOp)]
+
+  syntax {Sort1, Sort2} Sort2 ::= "#Ceil" "(" Sort1 ")" [symbol(#Ceil), mlOp, group(mlUnary, mlOp)]
+                                | "#Floor" "(" Sort1 ")" [symbol(#Floor), mlOp, group(mlUnary, mlOp)]
+                                | "{" Sort1 "#Equals" Sort1 "}" [symbol(#Equals), mlOp, group(mlEquals, mlOp), comm, format(%1%i%n%2%d%n%3%i%n%4%d%n%5)]
+
+  syntax priority mlUnary > mlEquals > mlAnd
+
+  syntax {Sort} Sort ::= Sort "#And" Sort [symbol(#And), assoc, left, comm, unit(#Top), mlOp, group(mlAnd, mlOp), format(%i%1%d%n%2%n%i%3%d)]
+                       > Sort "#Or" Sort [symbol(#Or), assoc, left, comm, unit(#Bottom), mlOp, group(mlOp), format(%i%1%d%n%2%n%i%3%d)]
+                       > Sort "#Implies" Sort [symbol(#Implies), mlOp, group(mlImplies, mlOp), format(%i%1%d%n%2%n%i%3%d)]
+
+  syntax priority mlImplies > mlQuantifier
+
+  syntax {Sort1, Sort2} Sort2 ::= "#Exists" Sort1 "." Sort2 [symbol(#Exists), mlOp, mlBinder, group(mlQuantifier, mlOp)]
+                                | "#Forall" Sort1 "." Sort2 [symbol(#Forall), mlOp, mlBinder, group(mlQuantifier, mlOp)]
+
+  syntax {Sort} Sort ::= "#AG" "(" Sort ")" [symbol(#AG), mlOp, group(mlOp)]
+                       | "#wEF" "(" Sort ")" [symbol(weakExistsFinally), mlOp, group(mlOp)]
+                       | "#wAF" "(" Sort ")" [symbol(weakAlwaysFinally), mlOp, group(mlOp)]
+endmodule
+

Variables in K

+

Provided below is the syntax of variables in K. There are four types of
+variables in K:

+
    +
  1. Regular variables. These are denoted by variables that begin with an
    +underscore or a capital letter. These variables match exactly one value
    +and can be used to refer to it on the right-hand-side.
  2. +
  3. Fresh constants. These are denoted by variables that begin with an !. This
    +is a convenience syntax which can be used on the right-hand-side only, and
    +refer to a unique value of the specified sort which is distinct from any
    +other value that has been generated or will be generated by the !X syntax.
    +Note that this may not be distinct from values produced via other means.
  4. +
  5. Existential variables. This refers to variables that are existentially
    +quantified and begin with a ?. They are not required to appear on the
    +left-hand-side prior to appearing on the right-hand-side, and generally
    +refer to symbolic quantities that are introduced during rewriting. Refer to
    +K's documentation for more details.
  6. +
  7. Set variables. These are denoted by variables that begin with a @.
    +These variables refer to a set of values and are generally used when writing
    +simplification rules in the Haskell Backend. For more information, refer to
    +K's documentation.
  8. +
+

There is also a fifth type of "variable", although it is not technically a
+variable. This refers to configuration variables, which are used to insert
+values into the initial configuration that come from outside the semantics.
+The most common of these is the $PGM variable, which conventionally contains
+the program being executed and is placed in the <k> cell in the configuration
+declaration. These "variables" begin with a $ and their values are populated
+by the frontend prior to symbolic or concrete execution of a program.

+
module KVARIABLE-SYNTAX
+  syntax #KVariable
+endmodule
+
+// To be used when parsing/pretty-printing symbolic configurations
+module KSEQ-SYMBOLIC
+  imports KSEQ
+  imports ML-SYNTAX
+  imports KVARIABLE-SYNTAX
+
+  syntax #KVariable ::= r"(\\!|\\?|@)?([A-Z][A-Za-z0-9'_]*|_|_[A-Z][A-Za-z0-9'_]*)"   [token, prec(1)]
+                      | #UpperId                                                          [token]
+  syntax KConfigVar ::= r"(\\$)([A-Z][A-Za-z0-9'_]*)"            [token]
+  syntax KBott      ::= #KVariable
+  syntax KBott      ::= KConfigVar
+endmodule
+

Syntax of Cells

+

While the backend treats cells as regular productions like any other, the
+frontend provides a significant amount of convenience notation for dealing with
+groups of cells, in order to make writing modular definitions easier. As a
+result, we need a syntax for groups of cells and for referring to cells within
+rules, configuration declarations, and functions.

+

For historical reasons, the Bag sort is used to refer to groups of cells.
+This may change in a future release. Users can combine cells in any order
+by concatenating them together, and can refer to the absence of any cells with
+the .Bag symbol. You can also refer to cells within a function by placing
+the cell context symbol, [[ K ]] at the top of a rule, placing a function
+symbol inside, and referring to cells afterwards. This implicitly inserts
+a reference to the configuration at the time prior to the currently-applied
+rule being applied which can be matched on within the function. Functions with
+such context cannot be referred to in the initial configuration, because the
+prior configuration does not yet exist.

+
module KCELLS
+  imports KAST
+
+  syntax Cell
+  syntax Bag ::= Bag Bag  [left, assoc, symbol(#cells), unit(#cells)]
+               | ".Bag"   [symbol(#cells)]
+               | ".::Bag" [symbol(#cells)]
+               | Cell
+  syntax Bag ::= "(" Bag ")" [bracket]
+  syntax KItem ::= Bag
+  syntax #RuleBody ::= "[" "[" K "]" "]" Bag    [symbol(#withConfig), avoid]
+  syntax non-assoc #withConfig
+  syntax Bag ::= KBott
+endmodule
+

Users can also refer to cells in rules. When doing so, an optional ... can
+be placed immediately after the start of the cell or immediately before the
+end. In a cell whose contents are commutative, these are equivalent to one
+another and are also equivalent to placing ... in both places. This means
+that what is placed in the cell will be combined with the cell contents'
+concatenation operator with an unnamed variable. In other words, you match on
+some number of elements in the collection and do not care about the rest of
+the collection.

+

In a cell whose contents are not commutative, the ... operators correspond
+to a variable on the respective side of the contents of the cell that the
+... appears. For example, <foo>... L </foo>, if L is a list, means
+some number of elements followed by L. Note that not all combinations are
+supported. Cells whose contents are sort K can only have ... appear at the
+tail of the cell, and cells whose contents are sort List can only have ...
+appear on at most one side in a single rule.

+
module RULE-CELLS
+  imports KCELLS
+  imports RULE-LISTS
+  // if this module is imported, the parser automatically
+  // generates, for all productions that have the attribute 'cell' or 'maincell',
+  // a production like below:
+  //syntax Cell ::= "<top>" #OptionalDots K #OptionalDots "</top>" [symbol(<top>)]
+
+  syntax #OptionalDots ::= "..." [symbol(#dots)]
+                         | ""    [symbol(#noDots)]
+
+  syntax Int
+  // this production will be added by the compiler to help handle bang variables,
+  // however, it is valuable to put it here because without this production, it
+  // will not exist at the point in time when rules and claims are parsed, and
+  // as a result it makes it very difficult to write proof claims over fragments
+  // of code that exercise rules containing bang variables. We put it here because
+  // this production will "vanish" after parsing finishes and not be picked up
+  // by the compiler, which is the behavior we want in this case since an actual
+  // production will be generated by the compiler later on.
+  syntax GeneratedCounterCell ::= "<generatedCounter>" Int "</generatedCounter>" [cell, symbol(<generatedCounter>), internal]
+endmodule
+

Users can also declare cells in a configuration declaration. This generates a
+specific set of productions that is used internally to implement the cell. The
+most important of these is the cell itself, and attributes on this production
+can be specified in an xml-attribute-like syntax.

+

You can also use an xml-short-tag-like syntax to compose configuration cells
+together which were defined in different modules. However, it is a requirement
+that any K definition have at most one fully-composed configuration; thus, all
+other configuration declarations must appear composed within another
+configuration declaration.

+
module CONFIG-CELLS
+  imports KCELLS
+  imports RULE-LISTS
+  syntax #CellName ::= r"[a-zA-Z][a-zA-Z0-9\\-]*"  [token, prec(1)]
+                     | #LowerId            [token]
+                     | #UpperId            [token]
+
+  syntax Cell ::= "<" #CellName #CellProperties ">" K "</" #CellName ">" [symbol(#configCell)]
+  syntax Cell ::= "<" #CellName "/>" [symbol(#externalCell)]
+
+  syntax #CellProperties ::= #CellProperty #CellProperties [symbol(#cellPropertyList)]
+                           | ""                            [symbol(#cellPropertyListTerminator)]
+  syntax #CellProperty ::= #CellName "=" KString           [symbol(#cellProperty)]
+endmodule
+

Syntax of Rules

+

Rules can have an optional requires clause or an ensures clause. For backwards-
+compatibility, you can refer to the requires clause with both the requires
+and when keywords; The latter, however, is deprecated and may be removed in
+a future release.

+

The requires clause specifies the preconditions that must be true in order
+for the rule to apply. The ensures clause specifies the information which
+becomes true after the rule has applied. It is a requirement that information
+present in the ensures clause refer to existential variables only.

+

When doing concrete execution, you can think of the requires clause as a
+side-condition. In other words, even if the rule matches, it will not apply
+unless the requires clause, which must be of sort Bool, evaluates to
+true.

+
module REQUIRES-ENSURES
+  imports BASIC-K
+
+  syntax #RuleBody ::= K
+
+  syntax #RuleContent ::= #RuleBody                                 [symbol("#ruleNoConditions")]
+                        | #RuleBody "requires" Bool                 [symbol("#ruleRequires")]
+                        | #RuleBody "ensures"  Bool                 [symbol("#ruleEnsures")]
+                        | #RuleBody "requires" Bool "ensures" Bool  [symbol("#ruleRequiresEnsures")]
+endmodule
+

Miscellaneous modules

+

The below modules are used in various ways as indicators to the implementation
+that certain automatically generated syntax should be created by the parser.
+These modules should not be imported directly by the user.

+
module K-TOP-SORT
+  imports SORT-KBOTT
+  syntax KItem ::= KBott
+  syntax {Sort} KItem ::= Sort
+endmodule
+
+module K-BOTTOM-SORT
+  imports SORT-KBOTT
+  syntax KItem ::= KBott
+  syntax {Sort} Sort ::= KBott
+endmodule
+
+module K-SORT-LATTICE
+  imports K-TOP-SORT
+  imports K-BOTTOM-SORT
+endmodule
+
+module AUTO-CASTS
+  // if this module is imported, the parser automatically
+  // generates, for all sorts, productions of the form:
+  // Sort  ::= Sort ":Sort"  // semantic cast - force the inner term to be `Sort` or a subsort
+  // Sort  ::= Sort "::Sort" // strict cast - force the inner term to be exactly `Sort`. Useful for disambiguation
+  // Sort ::= "{" Sort "}" "::Sort" // synonym for strict cast
+  // Sort  ::= "{" K "}"    ":>Sort" // projection cast. Allows any term to be placed in a context that expects `Sort`
+  // this is part of the mechanism that allows concrete user syntax in K
+endmodule
+
+module AUTO-FOLLOW
+  // if this module is imported, the parser automatically
+  // generates a follow restriction for every terminal which is a prefix
+  // of another terminal. This is useful to prevent ambiguities such as:
+  // syntax K ::= "a"
+  // syntax K ::= "b"
+  // syntax K ::= "ab"
+  // syntax K ::= K K
+  // #parse("ab", "K")
+  // In the above example, the terminal "a" is not allowed to be followed by a "b"
+  // because it would turn the terminal into the terminal "ab".
+endmodule
+
+module PROGRAM-LISTS
+  imports SORT-K
+  // if this module is imported, the parser automatically
+  // replaces the default productions for lists:
+  // Es ::= E "," Es [userList("*"), symbol('_,_)]
+  //      | ".Es"    [userList("*"), symbol('.Es)]
+  // into a series of productions more suitable for programs:
+  // Es#Terminator ::= ""      [symbol('.Es)]
+  // Ne#Es ::= E "," Ne#Es     [symbol('_,_)]
+  //         | E Es#Terminator [symbol('_,_)]
+  // Es ::= Ne#Es
+  //      | Es#Terminator      // if the list is *
+endmodule
+
+module RULE-LISTS
+  // if this module is imported, the parser automatically
+  // adds the subsort production to the parsing module only:
+  // Es ::= E        [userList("*")]
+
+endmodule
+
+module RECORD-PRODUCTIONS
+  // if this module is imported, prefix productions of the form
+  // syntax Sort ::= name(Args)
+  // will be able to be parsed with don't-care variables according
+  // to their nonterminal's names
+endmodule
+
+module SORT-PREDICATES
+  // if this module is imported, the Bool sort will be annotated with
+  // syntax Bool ::= isSort(K) [function]
+  // and all sorts will be annotated with
+  // syntax Sort ::= project:Sort(K) [function]
+endmodule
+

Additional Syntax for K Terms in Rules

+

Certain additional features are available when parsing the contents of rules
+and contexts. For more information on each of these, refer to K's
+documentation.

+
module KREWRITE
+  syntax {Sort} Sort ::= Sort "=>" Sort [symbol(#KRewrite)]
+  syntax non-assoc #KRewrite
+  syntax priority #KRewrite > #withConfig
+endmodule
+
+// To be used to parse semantic rules
+module K
+  imports KSEQ-SYMBOLIC
+  imports REQUIRES-ENSURES
+  imports RECORD-PRODUCTIONS
+  imports SORT-PREDICATES
+  imports K-SORT-LATTICE
+  imports AUTO-CASTS
+  imports AUTO-FOLLOW
+  imports KREWRITE
+
+  syntax {Sort} Sort ::= Sort "#as" Sort [symbol(#KAs)]
+  // functions that preserve sorts and can therefore have inner rewrites
+  syntax {Sort} Sort ::= "#fun" "(" Sort ")" "(" Sort ")" [symbol(#fun2), prefer]
+  // functions that do not preserve sort and therefore cannot have inner rewrites
+  syntax {Sort1, Sort2} Sort1 ::= "#fun" "(" Sort2 "=>" Sort1 ")" "(" Sort2 ")" [symbol(#fun3)]
+
+  syntax {Sort1, Sort2} Sort1 ::= "#let" Sort2 "=" Sort2 "#in" Sort1 [symbol(#let)]
+
+  /*@ Set membership over terms. In addition to equality over
+      concrete patterns, K also supports computing equality
+      between a concrete pattern and a symbolic pattern.
+      This is compiled efficiently down to pattern matching,
+      and can be used by putting a term with unbound variables
+      in the left child of :=K or =/=K. Note that this does not
+      bind variables used on the lhs however (although this may
+      change in the future).*/
+
+  syntax Bool ::= left:
+                  K ":=K" K           [function, total, symbol(_:=K_), group(equalEqualK)]
+                | K ":/=K" K          [function, total, symbol(_:/=K_), group(notEqualEqualK)]
+endmodule
+
+// To be used to parse terms in full K
+module K-TERM
+  imports KSEQ-SYMBOLIC
+  imports RECORD-PRODUCTIONS
+  imports SORT-PREDICATES
+  imports K-SORT-LATTICE
+  imports AUTO-CASTS
+  imports AUTO-FOLLOW
+  imports KREWRITE
+endmodule
+

Layout Information

+

When constructing a scanner for use during parsing, often you wish to ignore
+certain types of text, such as whitespace and comments. However, the specific
+syntax which each language must ignore is a little different from language
+to language, and thus you wish to specify it manually. You can do this by
+defining productions of the #Layout sort. For more information, refer to
+K's documentation. However, this module will be implicitly imported if no
+productions are declared of sort #Layout. This module will also be used
+for the purposes of parsing K rules. If you wish to declare a language with
+no layout productions, simply create a sort declaration for the #Layout sort
+in your code (e.g. syntax #Layout).

+
module DEFAULT-LAYOUT
+    syntax #Layout ::= r"(\\/\\*([^\\*]|(\\*+([^\\*\\/])))*\\*+\\/)" // C-style multi-line comments
+                     | r"(\\/\\/[^\\n\\r]*)"                         // C-style single-line comments
+                     | r"([\\ \\n\\r\\t])"                           // Whitespace
+endmodule
+

Default Configuration

+

If the user has no configuration declaration in their seamantics, the below
+configuration declaration will be implicitly imported.

+
module DEFAULT-CONFIGURATION
+  imports BASIC-K
+
+  configuration <k> $PGM:K </k>
+endmodule
+

Parsing Ambiguous Languages

+

On occasion, it may be desirable to parse a language with an ambiguous grammar
+when parsing a program, and perform additional semantic analysis at a later
+time in order to resolve the ambiguities. A good example of this is as a
+substitute for the lexer hack in parsers of the C programming language.

+

The following module contains a declaration for ambiguities in K. Usually,
+an ambiguous parse is an error. However, when you use the --gen-glr-parser
+flag to kast, or the --gen-glr-bison-parser flag to kompile, ambiguities
+instead become instances of the below parametric production, which you can use
+regular K rules to disambiguate as necessary.

+
module K-AMBIGUITIES
+
+  syntax {Sort} Sort ::= amb(Sort, Sort) [symbol(amb)]
+
+endmodule
+

Annotating Parses with Locations

+

Another feature of K's Bison parser is the ability to annotate terms parsed
+with location information about the file and line where they occurred. For
+more information about how to use this, refer to K's documentation. However,
+the below module exists to provide a user syntax for the annotations that
+are generated by the parser.

+
module K-LOCATIONS
+  imports STRING-SYNTAX
+  imports INT-SYNTAX
+
+  // filename, startLine, startCol, endLine, endCol
+  syntax {Sort} Sort ::= #location(Sort, String, Int, Int, Int, Int) [symbol(#location), format(%3)]
+
+endmodule
+

K Prelude

+

The following files, integral to defining semantics in K, are automatically
+required by every definition via this file. This behavior can be disabled
+via kompile --no-prelude, however, semantics will likely break unless
+they provide their own versions of these files, which are assumed to exist
+by the compiler. There are, however, circumstances where passing this flag is
+appropriate, such as if you are manually requiring these files in your
+definition, if your definition was automatically condensed into a single file
+with kompile -E, or if you wish to modify the inner syntax of K by providing
+your own version of these files with different syntax.

+
requires "kast.md"
+requires "domains.md"
+

K Foreign Function Interface

+

The K Foreign Function Interface (FFI) module provides a way to call native
+functions directly from a K semantics using the C ABI. It also provides
+utilities for allocating and deallocating byte buffers with static addresses
+that are suitable for being passed to native code.

+

It is built off of the underlying libffi library
+(https://sourceware.org/libffi/) and is subject to some of the same
+limitations as that library. Bear in mind, because this library exposes
+a number of unsafe C APIs directly, misuse of the library is likely to lead
+to memory corruption in your interpreter and can cause segmentation faults or
+corrupted term representations that lead to undefined behavior at runtime.

+
requires "domains.md"
+
+module FFI-SYNTAX
+  imports private LIST
+

The FFIType sort is used to declare the native C ABI types of operands passed
+to the #ffiCall function. These types roughly correspond to the types
+declared in ffi.h by libffi.

+
  syntax FFIType ::= "#void" [symbol(#ffi_void)]
+                  | "#uint8" [symbol(#ffi_uint8)]
+                  | "#sint8" [symbol(#ffi_sint8)]
+                  | "#uint16" [symbol(#ffi_uint16)]
+                  | "#sint16" [symbol(#ffi_sint16)]
+                  | "#uint32" [symbol(#ffi_uint32)]
+                  | "#sint32" [symbol(#ffi_sint32)]
+                  | "#uint64" [symbol(#ffi_uint64)]
+                  | "#sint64" [symbol(#ffi_sint64)]
+                  | "#float" [symbol(#ffi_float)]
+                  | "#double" [symbol(#ffi_double)]
+                  | "#uchar" [symbol(#ffi_uchar)]
+                  | "#schar" [symbol(#ffi_schar)]
+                  | "#ushort" [symbol(#ffi_ushort)]
+                  | "#sshort" [symbol(#ffi_sshort)]
+                  | "#uint" [symbol(#ffi_uint)]
+                  | "#sint" [symbol(#ffi_sint)]
+                  | "#ulong" [symbol(#ffi_ulong)]
+                  | "#slong" [symbol(#ffi_slong)]
+                  | "#longdouble" [symbol(#ffi_longdouble)]
+                  | "#pointer" [symbol(#ffi_pointer)]
+                  | "#complexfloat" [symbol(#ffi_complexfloat)]
+                  | "#complexdouble" [symbol(#ffi_complexdouble)]
+                  | "#complexlongdouble" [symbol(#ffi_complexlongdouble)]
+                  | "#struct" "(" List ")" [symbol(#ffi_struct)]
+endmodule
+
+module FFI
+  imports FFI-SYNTAX
+  imports private BYTES
+  imports private STRING
+  imports private BOOL
+  imports private LIST
+  imports private INT
+
+

FFI Calls

+

The #ffiCall functions are designed to call a native C ABI function and
+return a native result. They come in three variants:

+

Non-variadic

+ +

In the first variant, #ffiCall(Address, Args, ArgTypes, ReturnType) takes
+an integer address of a function (which can be obtained from
+#functionAddress), a List of Bytes containing the arguments of the
+function, a List of FFITypes containing the types of the parameters of the
+function, and an FFIType containing the return type of the function, and
+returns the return value of the function as a Bytes.

+
  syntax Bytes ::= "#ffiCall" "(" Int "," List "," List "," FFIType ")" [function, hook(FFI.call)]
+

Variadic

+ +

In the second variant,
+#ffiCall(Address, Args, FixedTypes, VariadicTypes, ReturnType takes an
+integer address of a function, a List of Bytes containing the arguments
+of the call, a List of FFITypes containing the types of the fixed
+parameters of the function, a List of FFITypes containing the types of the
+variadic parameters of the function, and an FFIType containing the return
+type of the function, and returns the return value of the function as a
+Bytes.

+
  syntax Bytes ::= "#ffiCall" "(" Int "," List "," List "," List "," FFIType ")" [function, hook(FFI.call_variadic)]
+

Generic

+ +

In the third variant,
+#ffiCall(IsVariadic, Address, Args, ArgTypes, NFixed, ReturnType takes
+a boolean indicating whether the function is variadic or not, an integer
+address of a function, a List of Bytes containing the arguments of the
+call, a List of FFITypes containing the parameter typess of the call
+followed by the types of the variadic arguments of the call, if any, an Int
+containing how many of the arguments of the call are fixed or not, and an
+FFIType containing the return type of the function, and returns the return
+value of the function as a Bytes.

+
  syntax Bytes ::= "#ffiCall" "(" Bool "," Int "," List "," List "," Int "," FFIType ")" [function]
+
+  rule #ffiCall(false, Addr::Int, Args::List, Types::List, _, Ret::FFIType) => #ffiCall(Addr, Args, Types, Ret)
+  rule #ffiCall(true, Addr::Int, Args::List, Types::List, NFixed::Int, Ret::FFIType) => #ffiCall(Addr, Args, range(Types, 0, size(Types) -Int NFixed), range(Types, NFixed, 0), Ret)
+

Symbol Lookup

+

The FFI module provides a mechanism to look up any function symbol and return
+that function's address.

+
  syntax Int ::= "#functionAddress" "(" String ")" [function, hook(FFI.address)]
+

Direct Memory Management

+

Most memory used by the LLVM backend to represent terms is managed
+automatically via garbage collection. However, a consequence of this is that
+a particular term does not have a fixed address across its entire lifetime
+in most cases. Sometimes this is undesirable, especially if you intend for
+the address of the memory to be taken by the semantics or if you intend
+to pass this memory directly to native code. As a result, the FFI module
+exposes the following unsafe APIs for memory management. Note that use of
+these APIs leaves the burden of memory management completely on the user,
+and thus misuse of these functions can lead to things like use-after-free
+and other memory corruption bugs.

+

Allocation

+ +

#alloc(Key, Size, Align) will allocate Size bytes with an alignment
+requirement of Align (which must be a power of two), and return it as a
+Bytes term. The memory is uniquely identified by its key and that key will
+be used later to free the memory. The memory is not implicitly freed by garbage
+collection; failure to call #free on the memory at a later date can lead to
+memory leaks.

+
  syntax Bytes ::= "#alloc" "(" KItem "," Int "," Int ")" [function, hook(FFI.alloc)]
+

Addressing

+ +

#addess(B) will return an Int representing the address of the first byte of
+B, which must be a Bytes. Unless the Bytes term was allocated by #alloc,
+the return value is unspecified and may not be the same across multipl
+invocations on the same byte buffer. However, it is guaranteed that memory
+allocated by #alloc will have the same address throughout its lifetime.

+
  syntax Int ::= "#address" "(" Bytes ")" [function, hook(FFI.bytes_address)]
+

Deallocation

+ +

#free(Key) will free the memory of the Bytes object that was allocated
+by a previous call to #alloc. If Key was not used in a previous call to
+#alloc, or the memory was already freed, no action is taken. It will generate
+undefined behavior if the Bytes term returned by the previous call to
+#alloc is still referenced by any other term in the configuration or a
+currently evaluating rule. The function returns .K.

+
  syntax K ::= "#free" "(" KItem ")" [function, hook(FFI.free)]
+

Reading

+ +

#nativeRead(Addr, Mem) will read native memory at address Addr into Mem,
+reading exactly lengthBytes(Mem) bytes. This will generate undefined behavior
+if Addr does not point to a readable segment of memory at least
+lengthBytes(Mem) bytes long.

+
  syntax K ::= "#nativeRead" "(" Int "," Bytes ")" [function, hook(FFI.read)]
+

Writing

+ +

#nativeWrite(Addr, Mem) will write the contents of Mem to native memory at
+address Addr. The memory will be read prior to being written, and a write
+will only happen if the memory has a different value than the current value of
+Mem. This will generate undefined behavior if Addr does not point to a
+readable segment of memory at least lengthBytes(Mem) bytes long, or if the
+memory at address Addr has a different value than currently contained in
+Mem, and the memory in question is not writeable.

+
  syntax K ::= "#nativeWrite" "(" Int "," Bytes ")" [function, hook(FFI.write)]
+endmodule
+

Syntax of JSON

+

K provides builtin support for reading/writing to JSON. While the JSON-SYNTAX
+module is not precisely the syntax of JSON (utilizing K's syntax for strings,
+integers, and floating point numbers rather than the syntax used by JSON),
+you can still convert directly to/from the actual syntax of JSON using
+the JSON2String and String2JSON hooks.

+
module JSON-SYNTAX
+    imports INT-SYNTAX
+    imports STRING-SYNTAX
+    imports BOOL-SYNTAX
+    imports FLOAT-SYNTAX
+
+    syntax JSONs   ::= List{JSON,","}      [symbol(JSONs)]
+    syntax JSONKey ::= String
+    syntax JSON    ::= "null"              [symbol(JSONnull)]
+                     | String | Int | Float | Bool
+                     | JSONKey ":" JSON    [symbol(JSONEntry)]
+                     | "{" JSONs "}"       [symbol(JSONObject)]
+                     | "[" JSONs "]"       [symbol(JSONList)]
+endmodule
+

Conversion between JSON and String

+

Given a string written in valid JSON, you can convert it to the JSON
+sort with the String2JSON function. Assuming the user has not extended
+the syntax of the JSON sort with their own constructors, any term of sort
+JSON can also be converted to a String using the JSON2String function.

+
module JSON
+    imports JSON-SYNTAX
+
+    syntax String ::= JSON2String(JSON) [function, symbol(JSON2String), hook(JSON.json2string)]
+
+    syntax JSON ::= String2JSON(String) [function, symbol(String2JSON), hook(JSON.string2json)]
+endmodule
+

Rational Numbers in K

+

K provides support for arbitrary-precision rational numbers represented as a
+quotient between two integers. The sort representing these values is Rat.
+Int is a subsort of Rat, and it is guaranteed that any integer will be
+represented as an Int and can be matched as such on the left hand side
+of rules. K also supports the usual arithmetic operators over rational numbers.

+
module RAT-SYNTAX
+  imports INT-SYNTAX
+  imports private BOOL
+
+  syntax Rat
+
+  syntax Rat ::= Int
+

Arithmetic

+

You can:

+
    +
  • Raise a rational number to any negative or nonnegative integer.
  • +
  • Multiply or divide two rational numbers to obtain a product or quotient.
  • +
  • Add or subtract two rational numbers to obtain a sum or difference.
  • +
+
  syntax Rat ::= left:
+                 Rat "^Rat" Int [function, total, symbol(_^Rat_),  smtlib(ratpow), hook(RAT.pow)]
+               > left:
+                 Rat "*Rat" Rat [function, total, symbol(_*Rat_),  left, smtlib(ratmul), hook(RAT.mul)]
+               | Rat "/Rat" Rat [function,             symbol(_/Rat_),  left, smtlib(ratdiv), hook(RAT.div)]
+               > left:
+                 Rat "+Rat" Rat [function, total, symbol(_+Rat_),  left, smtlib(ratadd), hook(RAT.add)]
+               | Rat "-Rat" Rat [function, total, symbol(_-Rat_),  left, smtlib(ratsub), hook(RAT.sub)]
+

Comparison

+

You can determine whether two rational numbers are equal, unequal, or compare
+one of less than, less than or equalto, greater than, or greater than or equal
+to the other:

+
  syntax Bool ::= Rat  "==Rat" Rat [function, total, symbol(_==Rat_),  smtlib(rateq), hook(RAT.eq)]
+                | Rat "=/=Rat" Rat [function, total, symbol(_=/=Rat_), smtlib(ratne), hook(RAT.ne)]
+                | Rat   ">Rat" Rat [function, total, symbol(_>Rat_),   smtlib(ratgt), hook(RAT.gt)]
+                | Rat  ">=Rat" Rat [function, total, symbol(_>=Rat_),  smtlib(ratge), hook(RAT.ge)]
+                | Rat   "<Rat" Rat [function, total, symbol(_<Rat_),   smtlib(ratlt), hook(RAT.lt)]
+                | Rat  "<=Rat" Rat [function, total, symbol(_<=Rat_),  smtlib(ratle), hook(RAT.le)]
+

Min/Max

+

You can compute the minimum and maximum of two rational numbers:

+
  syntax Rat ::= minRat(Rat, Rat) [function, total, symbol(minRat), smtlib(ratmin), hook(RAT.min)]
+               | maxRat(Rat, Rat) [function, total, symbol(maxRat), smtlib(ratmax), hook(RAT.max)]
+

Conversion to Floating Point

+

You can convert a rational number to the nearest floating point number that
+is representable in a Float of a specified number of precision and exponent
+bits:

+
  syntax Float ::= Rat2Float(Rat, precision: Int, exponentBits: Int) [function]
+endmodule
+

Implementation of Rational Numbers

+

The remainder of this file consists of an implementation in K of the
+operations listed above. Users of the RAT module should not use any of the
+syntax defined in any of these modules.

+

As a point of reference for users, it is worth noting that rational numbers
+are normalized to a canonical form by this module,. with the canonical form
+bearing the property that it is either an Int, or a pair of integers
+I /Rat J such that
+I =/=Int 0 andBool J >=Int 2 andBool gcdInt(I, J) ==Int 1 is always true.

+
module RAT-COMMON
+  imports RAT-SYNTAX
+
+  // invariant of < I , J >Rat : I =/= 0, J >= 2, and I and J are coprime
+  syntax Rat ::= "<" Int "," Int ">Rat" [format(%2 /Rat %4)]
+endmodule
+
+module RAT-SYMBOLIC [symbolic]
+  imports private RAT-COMMON
+  imports ML-SYNTAX
+  imports private BOOL
+
+  rule
+    #Ceil(@R1:Rat /Rat @R2:Rat)
+  =>
+    {(@R2 =/=Rat 0) #Equals true} #And #Ceil(@R1) #And #Ceil(@R2)
+  [simplification]
+endmodule
+
+module RAT-KORE
+  imports private RAT-COMMON
+  imports private K-EQUAL
+
+  /*
+   * equalities
+   */
+
+  // NOTE: the two rules below may not work correctly in non-kore backends
+
+  rule R ==Rat S => R ==K S
+
+  rule R =/=Rat S => R =/=K S
+endmodule
+
+module RAT [private]
+  imports private RAT-COMMON
+  imports public RAT-SYMBOLIC
+  imports public RAT-KORE
+  imports public RAT-SYNTAX
+  imports private INT
+  imports private BOOL
+
+  /*
+   * arithmetic
+   */
+
+  rule < I , I' >Rat +Rat < J , J' >Rat => ((I *Int J') +Int (I' *Int J)) /Rat (I' *Int J')
+  rule I:Int         +Rat < J , J' >Rat => ((I *Int J') +Int J) /Rat J'
+  rule < J , J' >Rat +Rat I:Int         => I +Rat < J , J' >Rat
+  rule I:Int         +Rat J:Int         => I +Int J
+
+  rule < I , I' >Rat *Rat < J , J' >Rat => (I *Int J) /Rat (I' *Int J')
+  rule I:Int         *Rat < J , J' >Rat => (I *Int J) /Rat J'
+  rule < J , J' >Rat *Rat I:Int         => I *Rat < J , J' >Rat
+  rule I:Int         *Rat J:Int         => I *Int J
+
+  rule < I , I' >Rat /Rat < J , J' >Rat => (I *Int J') /Rat (I' *Int J)
+  rule I:Int         /Rat < J , J' >Rat => (I *Int J') /Rat J
+  rule < I , I' >Rat /Rat J:Int         => I /Rat (I' *Int J) requires J =/=Int 0
+  rule I:Int         /Rat J:Int         => makeRat(I, J)      requires J =/=Int 0
+
+  // derived
+
+  rule R -Rat S => R +Rat (-1 *Rat S)
+
+  // normalize
+
+  syntax Rat ::= makeRat(Int, Int)      [function]
+               | makeRat(Int, Int, Int) [function]
+
+  rule makeRat(0, J) => 0 requires J =/=Int 0
+
+  rule makeRat(I, J) => makeRat(I, J, gcdInt(I,J)) requires I =/=Int 0 andBool J =/=Int 0
+
+  // makeRat(I, J, D) is defined when I =/= 0, J =/= 0, D > 0, and D = gcd(I,J)
+  rule makeRat(I, J, D) => I /Int D                       requires J ==Int D // implies J > 0 since D > 0
+  rule makeRat(I, J, D) => < I /Int D , J /Int D >Rat     requires J >Int 0 andBool J =/=Int D
+  rule makeRat(I, J, D) => makeRat(0 -Int I, 0 -Int J, D) requires J <Int 0
+
+  // gcdInt(a,b) computes the gcd of |a| and |b|, which is positive.
+  syntax Int ::= gcdInt(Int, Int) [function, public]
+
+  rule gcdInt(A, 0) => A        requires A >Int 0
+  rule gcdInt(A, 0) => 0 -Int A requires A <Int 0
+  rule gcdInt(A, B) => gcdInt(B, A %Int B) requires B =/=Int 0 // since |A %Int B| = |A| %Int |B|
+
+  /*
+   * exponentiation
+   */
+
+  rule _ ^Rat 0 => 1
+  rule 0 ^Rat N => 0 requires N =/=Int 0
+
+  rule < I , J >Rat ^Rat N => powRat(< I , J >Rat, N) requires N >Int 0
+  rule X:Int        ^Rat N => X ^Int N                requires N >Int 0
+
+  rule X ^Rat N => (1 /Rat X) ^Rat (0 -Int N) requires X =/=Rat 0 andBool N <Int 0
+
+  // exponentiation by squaring
+
+  syntax Rat ::= powRat(Rat, Int) [function]
+
+  // powRat(X, N) is defined when X =/= 0 and N > 0
+  rule powRat(X, 1) => X
+  rule powRat(X, N) => powRat(X *Rat X, N /Int 2) requires N >Int 1 andBool N %Int 2  ==Int 0
+  rule powRat(X, N) => powRat(X, N -Int 1) *Rat X requires N >Int 1 andBool N %Int 2 =/=Int 0
+
+  /*
+   * inequalities
+   */
+
+  rule R >Rat S => R -Rat S >Rat 0 requires S =/=Rat 0
+
+  rule < I , _ >Rat >Rat 0 => I >Int 0
+  rule I:Int        >Rat 0 => I >Int 0
+
+  // derived
+
+  rule R >=Rat S => notBool R <Rat S
+
+  rule R <Rat S => S >Rat R
+
+  rule R <=Rat S => S >=Rat R
+
+  rule minRat(R, S) => R requires R <=Rat S
+  rule minRat(R, S) => S requires S <=Rat R
+
+  rule maxRat(R, S) => R requires R >=Rat S
+  rule maxRat(R, S) => S requires S >=Rat R
+
+  syntax Float ::= #Rat2Float(Int, Int, Int, Int) [function, hook(FLOAT.rat2float)]
+  rule Rat2Float(Num:Int, Prec:Int, Exp:Int) => #Rat2Float(Num, 1, Prec, Exp)
+  rule Rat2Float(< Num, Dem >Rat, Prec, Exp) => #Rat2Float(Num, Dem, Prec, Exp)
+
+endmodule
+

Capture-Aware Substitution in K

+

One of the traditional ways in which functional languages are given operational
+semantics is via substitution. In particular, you can view a function as
+declaring a particular bound variable, the parameter of the function, as well
+as the body of the function, within which both bound and free variables can
+occur, and implement the process of beta-reduction (one of the axioms of the
+lambda calculus) by means of a substitution operator which is aware of the
+difference between free variables and bound variables and prevents variable
+capture.

+

In K this is implemented using two mechanisms: The KVar sort, and the
+binder attribute.

+

The KVar Sort

+

K introduces a new hooked sort, KVar, which the substitution operator
+(defined below) understands in a particular way. The syntax of KVar is the
+same as for sort Id in DOMAINS, but with a different sort name. Similarly,
+some of the same operators are defined over KVar which are defined for Id,
+such as conversion from String to KVar and support for the !Var:KVar
+syntax.

+

A KVar is simply an identifier with special meaning during substitution.
+KVars must begin with a letter or underscore,
+and can be followed by zero or more letters, numbers, or underscores.

+
module KVAR-SYNTAX-PROGRAM-PARSING
+  imports BUILTIN-ID-TOKENS
+
+  syntax KVar ::= r"[A-Za-z\\_][A-Za-z0-9\\_]*"     [prec(1), token]
+                | #LowerId                                             [token]
+                | #UpperId                                             [token]
+endmodule
+
+module KVAR-SYNTAX
+  syntax KVar [token, hook(KVAR.KVar)]
+endmodule
+
+module KVAR-COMMON
+  imports KVAR-SYNTAX
+  imports private STRING
+
+  syntax KVar ::= String2KVar (String) [function, total, hook(STRING.string2token)]
+  syntax KVar ::= freshKVar(Int)    [freshGenerator, function, total, private]
+
+  rule freshKVar(I:Int) => String2KVar("_" +String Int2String(I))
+endmodule
+
+module KVAR
+  imports KVAR-COMMON
+endmodule
+

The binder Attribute

+

A production can be given the attribute binder. Such a production must have
+at least two nonterminals. The first nonterminal from left to right must be of
+sort KVar, and contains the bound variable. The last nonterminal from left
+to right contains the term that is bound. For example, I could describe lambdas
+in the lambda calculus with the production
+syntax Val ::= "lambda" KVar "." Exp [binder].

+

Substitution

+

K provides a hooked implementation of substitution, currently only implemented
+on the Java and LLVM backends. Two variants exist: the first substitutes
+a single KVar for a single KItem. The second takes a Map with KVar
+keys and KItem values, and substitutes each element in the map atomically.

+

Internally, this is implemented in the LLVM backend by a combination of
+de Bruijn indices for bound variables and names for free variables. Free
+variables are also sometimes given a unique numeric identifier in order to
+prevent capture, and the rewriter will automatically assign unique names to
+such identifiers when rewriting finishes. The names assigned will always begin
+with the original name of the variable and be followed by a unique integer
+suffix. However, the names assigned after rewriting finishes might be different
+from the names that would be assigned if rewriting were to halt prematurely,
+for example due to krun --depth.

+
module SUBSTITUTION
+  imports private MAP
+  imports KVAR
+
+  syntax {Sort} Sort ::= Sort "[" KItem "/" KItem "]"  [function, hook(SUBSTITUTION.substOne), impure]
+  syntax {Sort} Sort ::= Sort "[" Map "]"      [function, hook(SUBSTITUTION.substMany), impure]
+endmodule
+

K PL Tutorial

+ +

Here you will learn how to use the K tool to define languages by means of a series of screencast movies. It is recommended to do these in the indicated order, because K features already discussed in a previous language definition will likely not be rediscussed in latter definitions. The screencasts follow quite closely the structure of the files under the tutorial folder in the K tool distribution. If you'd rather follow the instructions there and do the tutorial exercises yourself, then go back to https://kframework.org and download the K tool, if you have not done it already. Or, you can first watch the screencasts below and then do the exercises, or do them in parallel.

+

K Overview

+ +

Make sure you watch the K overview video before you do the K tutorial:

+ +

Learning K

+ +

[34'46"] Part 1: Defining LAMBDA

+ +

Here you will learn how to define a very simple functional language in K and the basics of how to use the K tool. The language is a call-by-value variant of lambda calculus with builtins and mu, and its definition is based on substitution.

+ +

[37'07"] Part 2: Defining IMP

+ +

Here you will learn how to define a very simple, prototypical textbook C-like imperative language, called IMP, and several new features of the K tool.

+ +

[33'10"] Part 3: Defining LAMBDA++

+ +

Here you will learn how to define constructs which abruptly change the execution control, as well as how to define functional languages using environments and closures. LAMBDA++ extends the LAMBDA language above with a callcc construct.

+ +

[46'46"] Part 4: Defining IMP++

+ +

Here you will learn how to refine configurations, how to generate fresh elements, how to tag syntactic constructs and rules, how to exhaustively search the space of non-deterministic or concurrent program executions, etc. IMP++ extends the IMP language above with increment, blocks and locals, dynamic threads, input/output, and abrupt termination.

+ +

[17'03"] Part 5: Defining Type Systems

+ +

Here you will learn how to define various kinds of type systems following various approaches or styles using K.

+ +

[??'??"] Part 6: Miscellaneous Other K Features

+ +

Here you will learn a few other K features, and better understand how features that you have already seen work.

+
    +
  • [??'??"] ...
  • +
+

Learning Language Design and Semantics using K

+ +

[??'??"] Part 7: SIMPLE: Designing Imperative Programming Languages

+ +

Here you will learn how to design imperative programming languages using K. SIMPLE is an imperative language with functions, threads, pointers, exceptions, multi-dimensional arrays, etc. We first define an untyped version of SIMPLE, then a typed version. For the typed version, we define both a static and a dynamic semantics.

+ +

[??'??"] Part 8: KOOL: Designing Object-Oriented Programming Languages

+ +

Here woul will learn how to design object-oriented programming languages using K. KOOL is an object-oriented language that extends SIMPLE with classes and objects. We first define an untyped version of KOOL, then a typed version, with both a dynamic and a static semantics.

+ +

[??'??"] Part 9: FUN: Designing Functional Programming Languages

+ +

H
+ere woul will learn how to design functional programming languages using K. FUN is a higher-order functional language with general let, letrec, pattern matching, references, lists, callcc, etc. We first define an untyped version of FUN, then a let-polymorphic type inferencer.

+ +

[??'??"] Part 10: LOGIK: Designing Logic Programming Languages

+ +

Here you will learn how to design a logic programming language using K.

+ +

K overview

+ + +

Go to Youtube mirror, if the above does not work.

+

Go back to https://kframework.org for further links, the K tool and contact information.

+

Learning K

+ +

We start by introducing the basic features of K by means of a series
+of very simple languages. The objective here is neither to learn those
+languages nor to study their underlying paradigm, but simply to learn K.

+
    +
  • LAMBDA: Lambda calculus defined.
  • +
  • IMP: A simple imperative language.
  • +
  • LAMBDA++: LAMBDA extended with control flow.
  • +
  • IMP++: IMP extended with threads and IO.
  • +
  • TYPES: LAMBDA type system.
  • +
+

Part 1: Defining LAMBDA

+ +

Here you will learn how to define a very simple language in K and the basics
+of how to use the K tool. The language is a variant of call-by-value lambda
+calculus and its definition is based on substitution. Specifically, you will
+learn the following:

+
    +
  • How to define a module.
  • +
  • How to define a language syntax.
  • +
  • How to use the defined syntax to parse programs.
  • +
  • How to import predefined modules.
  • +
  • How to define evaluation strategies using strictness attributes.
  • +
  • How to define semantic rules.
  • +
  • How the predefined generic substitution works.
  • +
  • How to generate PDF and HTML documentation from ASCII definitions.
  • +
  • How to include builtins (integers and Booleans) into your language.
  • +
  • How to define derived language constructs.
  • +
+

This folder contains several lessons, each adding new features to LAMBDA.

+

Syntax Modules and Basic K Commands

+ +

Here we define our first K module, which contains the initial syntax of the
+LAMBDA language, and learn how to use the basic K commands.

+

Let us create an empty working folder, and open a terminal window
+(to the left) and an editor window (to the right). We will edit our K
+definition in the right window in a file called lambda.k, and will call
+the K tool commands in the left window.

+

Let us start by defining a K module, containing the syntax of LAMBDA.

+

K modules are introduced with the keywords module ... endmodule.

+

The keyword syntax adds new productions to the syntax grammar, using a
+BNF-like notation.

+

Terminals are enclosed in double-quotes, like strings.

+

You can define multiple productions for the same non-terminal in the same
+syntax declaration using the | separator.

+

Productions can have attributes, which are enclosed in square brackets.

+

The attribute left tells the parser that we want the lambda application to be
+left associative. For example, a b c d will then parse as (((a b) c) d).

+

The attribute bracket tells the parser to not generate a node for the
+parenthesis production in the abstract syntax trees associated to programs.
+In other words, we want to allow parentheses to be used for grouping, but we
+do not want to bother to give them their obvious (ignore) semantics.

+

In our variant of lambda calculus defined here, identifiers and lambda
+abstractions are meant to be irreducible, that is, are meant to be values.
+However, so far Val is just another non-terminal, just like Exp,
+without any semantic meaning. It will get a semantic meaning later.

+

After we are done typing our definition in the file lambda.k, we can kompile
+it with the command:

+
kompile lambda.k
+
+

If we get no errors then a parser has been generated. This parser will be
+called from now on by default by the krun tool. To see whether and how the
+parser works, we are going to write some LAMBDA programs and store them in
+files with the extension .lambda.

+

Let us create a file identity.lambda, which contains the identity lambda
+abstraction:

+
lambda x . x
+
+

Now let us call krun on identity.lambda:

+
krun identity.lambda
+
+

Make sure you call the krun command from the folder containing your language
+definition (otherwise type krun --help to learn how to pass a language
+definition as a parameter to krun). The krun command produces the output:

+
<k>
+  lambda x . x
+</k>
+
+

If you see such an output it means that your program has been parsed (and then
+pretty printed) correctly. If you want to see the internal abstract syntax
+tree (AST) representation of the parsed program, which we call the K AST, then
+type kast in the command instead of krun:

+
kast identity.lambda
+
+

You should normally never need to see this internal representation in your
+K definitions, so do not get scared (yes, it is ugly for humans, but it is
+very convenient for tools).

+

Note that krun placed the program in a <k> ... </k> cell. In K, computations
+happen only in cells. If you do not define a configuration in your definition,
+like we did here, then a configuration will be created automatically for you
+which contains only one cell, the default k cell, which holds the program.

+

Next, let us create a file free-variable-capture.lambda, which contains an
+expression which, in order to execute correctly in a substitution-based
+semantics of LAMBDA, the substitution operation needs to avoid
+variable-capture:

+
a (((lambda x.lambda y.x) y) z)
+
+

Next, file closed-variable-capture.lambda shows an expression which also
+requires a capture-free substitution, but this expression is closed (that is,
+it has no free variables) and all its bound variables are distinct (I believe
+this is the smallest such expression):

+
(lambda z.(z z)) (lambda x.lambda y.(x y))
+
+

Finally, the file omega.lambda contains the classic omega combinator
+(or closed expression), which is the smallest expression which loops forever
+(not now, but after we define the semantics of LAMBDA):

+
(lambda x.(x x)) (lambda x.(x x))
+
+

Feel free to define and parse several other LAMBDA programs to get a feel for
+how the parser works. Parse also some incorrect programs, to see how the
+parser generates error messages.

+

In the next lesson we will see how to define semantic rules that iteratively
+rewrite expressions over the defined syntax until they evaluate to a result.
+This way, we obtain our first programming language defined using K.

+

Go to Lesson 2, LAMBDA: Module Importing, Rules, Variables

+

MOVIE (out of date) [4'07"]

+

Module Importing, Rules, Variables

+ +

We here learn how to include a predefined module (SUBSTITUTION), how to
+use it to define a K rule (the characteristic rule of lambda calculus),
+and how to make proper use of variables in rules.

+

Let us continue our lambda.k definition started in the previous lesson.

+

The requires keyword takes a .k file containing language features that
+are needed for the current definition, which can be found in the
+k-distribution/include/kframework/builtin folder. Thus, the command

+
requires "substitution.k"
+
+

says that the subsequent definition of LAMBDA needs the generic substitution,
+which is predefined in file substitution.k under the folder
+k-distribution/include/kframework/builtin. Note that substitution can be defined itself in K,
+although it uses advanced features that we have not discussed yet in this
+tutorial, so it may not be easy to understand now.

+

Using the imports keyword, we can now modify LAMBDA to import the module
+SUBSTITUTION, which is defined in the required substitution.k file.

+

Now we have all the substitution machinery available for our definition.
+However, since our substitution is generic, it cannot know which language
+constructs bind variables, and what counts as a variable; however, this
+information is critical in order to correctly solve the variable capture
+problem. Thus, you have to tell the substitution that your lambda construct
+is meant to be a binder, and that your Id terms should be treated as variables
+for substitution. The former is done using the attribute binder.
+By default, binder binds all the variables occurring anywhere in the first
+argument of the corresponding syntactic construct within its other arguments;
+you can configure which arguments are bound where, but that will be discussed
+in subsequent lectures. To tell K which terms are meant to act as variables
+for binding and substitution, we have to explicitly subsort the desired syntactic
+categories to the builtin KVariable sort.

+

Now we are ready to define our first K rule. Rules are introduced with the
+keyword rule and make use of the rewrite symbol, =>. In our case,
+the rule defines the so-called lambda calculus beta-reduction, which
+makes use of substitution in its right-hand side, as shown in lambda.k.

+

By convention, variables that appear in rules start with a capital letter
+(the current implementation of the K tool may even enforce that).

+

Variables may be explicitly tagged with their syntactic category (also called
+sort). If tagged, the matching term will be checked at run-time for
+membership to the claimed sort. If not tagged, then no check will be made.
+The former is safer, but involves the generation of a side condition to the
+rule, so the resulting definition may execute slightly slower overall.

+

In our rule in lambda.k we tagged all variables with their sorts, so we chose
+the safest path. Only the V variable really needs to be tagged there,
+because we can prove (using other means, not the K tool, as the K tool is not
+yet concerned with proving) that the first two variables will always have the
+claimed sorts whenever we execute any expression that parses within our
+original grammar.

+

Let us compile the definition and then run some programs. For example,

+
krun closed-variable-capture.lambda
+
+

yields the output

+
<k>
+  lambda y . ((lambda x . (lambda y . (x  y))) y)
+</k> 
+
+

Notice that only certain programs reduce (some even yield non-termination,
+such as omega.lambda), while others do not. For example,
+free-variable-capture.lambda does not reduce its second argument expression
+to y, as we would expect. This is because the K rewrite rules between syntactic
+terms do not apply anywhere they match. They only apply where they have been
+given permission to apply by means of appropriate evaluation strategies of language
+constructs, which is done using strictness attributes, evaluation contexts,
+heating/cooling rules, etc., as discussed in the next lessons.

+

The next lesson will show how to add LAMBDA the desired evaluation strategies
+using strictness attributes.

+

Go to Lesson 3, LAMBDA: Evaluation Strategies using Strictness

+

MOVIE (out of date) [4'03"]

+

Evaluation Strategies using Strictness

+ +

Here we learn how to use the K strict attribute to define desired evaluation
+strategies. We will also learn how to tell K which terms are already
+evaluated, so it does not attempt to evaluate them anymore and treats them
+internally as results of computations.

+

Recall from the previous lecture that the LAMBDA program
+free-variable-capture.lambda was stuck, because K was not given permission
+to evaluate the arguments of the lambda application construct.

+

You can use the attribute strict to tell K that the corresponding construct
+has a strict evaluation strategy, that is, that its arguments need to be
+evaluated before the semantics of the construct applies. The order of
+argument evaluation is purposely unspecified when using strict, and indeed
+the K tool allows us to detect all possible non-deterministic behaviors that
+result from such intended underspecification of evaluation strategies. We will
+learn how to do that when we define the IMP language later in this tutorial;
+we will also learn how to enforce a particular order of evaluation.

+

In order for the above strictness declaration to work effectively and
+efficiently, we need to tell the K tool which expressions are meant to be
+results of computations, so that it will not attempt to evaluate them anymore.
+One way to do it is to make Val a syntactic subcategory of the builtin
+KResult syntactic category. Since we use the same K parser to also parse
+the semantics, we use the same syntax keyword to define additional syntax
+needed exclusively for the semantics (like KResults). See lambda.k.

+

Compile again and then run some programs. They should all work as expected.
+In particular, free-variable-capture.lambda now evaluates to a y.

+

We now got a complete and working semantic definition of call-by-value
+lambda-calculus. While theoretically correct, our definition is not
+easy to use and disseminate. In the next lessons we will learn how to
+generate formatted documentation for LAMBDA and how to extend LAMBDA
+in order to write human readable and interesting programs.

+

Go to Lesson 4, LAMBDA: Generating Documentation; Latex Attributes.

+

MOVIE (out of date) [2'20"]

+

Generating Documentation; Latex Attributes

+ +

In this lesson we learn how to generate formatted documentation from K
+language definitions. We also learn how to use Latex attributes to control
+the formatting of language constructs, particularly of ones which have a
+mathematical flavor and we want to display accordingly.

+

To enhance readability, we may want to replace the keyword lambda by the
+mathematical lambda symbol in the generated documentation. We can control
+the way we display language constructs in the generated documentation
+by associating them Latex attributes.

+

This is actually quite easy. All we have to do is to associate a latex
+attribute to the production defining the construct in question, following
+the Latex syntax for defining new commands (or macros).

+

In our case, we associate the attribute latex(\lambda{#1}.{#2}) to the
+production declaring the lambda abstraction (recall that in Latex, #n refers
+to the n-th argument of the defined new command).

+

We will later see, in Lesson 9, that we can add arbitrarily complex Latex
+comments and headers to our language definitions, which give us maximum
+flexibility in formatting our language definitions.

+

Now we have a simple programming language, with a nice documentation. However,
+it is not easy to write interesting programs in this language. Almost all
+programming languages build upon existing data-types and libraries. The K
+tool provides a few of these (and you can add more).

+

In the next lesson we show how we can add builtin integers and Booleans to
+LAMBDA, so we can start to evaluate meaningful expressions.

+

Go to Lesson 5, LAMBDA: Adding Builtins; Side Conditions.

+

MOVIE (out of date) [3'13"]

+

Adding Builtins; Side Conditions

+ +

We have already added the builtin identifiers (sort Id) to LAMBDA expressions,
+but those had no operations on them. In this lesson we add integers and
+Booleans to LAMBDA, and extend the builtin operations on them into
+corresponding operations on LAMBDA expressions. We will also learn how to add
+side conditions to rules, to limit the number of instances where they can
+apply.

+

The K tool provides several builtins, which are automatically included in all
+definitions. These can be used in the languages that we define, typically by
+including them in the desired syntactic categories. You can also define your
+own builtins in case the provided ones are not suitable for your language
+(e.g., the provided builtin integers and operations on them are arbitrary
+precision).

+

For example, to add integers and Booleans as values to our LAMBDA, we have to
+add the productions

+
syntax Val ::= Int | Bool
+
+

Int and Bool are the nonterminals that correspond to these builtins.

+

To make use of these builtins, we have to add some arithmetic operation
+constructs to our language. We prefer to use the conventional infix notation
+for these, and the usual precedences (i.e., multiplication and division bind
+tighter than addition, which binds tighter than relational operators).
+Inspired from SDF, we use > instead of
+| to state that all the previous constructs bind tighter than all the
+subsequent ones. See lambda.k.

+

The only thing left is to link the LAMBDA arithmetic operations to the
+corresponding builtin operations, when their arguments are evaluated.
+This can be easily done using trivial rewrite rules, as shown in lambda.k.
+In general, the K tool attempts to uniformly add the corresponding builtin
+name as a suffix to all the operations over builtins. For example, the
+addition over integers is an infix operation named +Int.

+

Compile the new lambda.k definition and evaluate some simple arithmetic
+expressions. For example, if arithmetic.lambda is (1+2*3)/4 <= 1, then

+
krun arithmetic.lambda
+
+

yields, as expected, true. Note that the parser took the desired operation
+precedence into account.

+

Let us now try to evaluate an expression which performs a wrong computation,
+namely a division by zero. Consider the expression arithmetic-div-zero.lambda
+which is 1/(2/3). Since division is strict and 2/3 evaluates to 0, this
+expression reduces to 1/0, which further reduces to 1 /Int 0 by the rule for
+division, which is now stuck (with the current back-end to the K tool).

+

In fact, depending upon the back-end that we use to execute K definitions and
+in particular to evaluate expressions over builtins, 1 /Int 0 can evaluate to
+anything. It just happens that the current back-end keeps it as an
+irreducible term. Other K back-ends may reduce it to an explicit error
+element, or issue a segmentation fault followed by a core dump, or throw an
+exception, etc.

+

To avoid requesting the back-end to perform an illegal operation, we may use a
+side condition in the rule of division, to make sure it only applies when the
+denominator is non-zero.

+

Like in other operational formalisms, the role of the K side
+conditions is to filter the number of instances of the rule. The notion
+of a side condition comes from logics, where a sharp distinction is made
+between a side condition (cheap) and a premise (expensive). Premises are
+usually resolved using further (expensive) logical derivations, while side
+conditions are simple (cheap) conditions over the rule meta-variables within
+the underlying mathematical domains (which in K can be extended by the user,
+as we will see in future lessons). Regarded as a logic, K derives rewrite
+rules from other rewrite rules; therefore, the K side conditions cannot
+contain other rewrites in them (using =>). This contrasts other rewrite
+engines, for example Maude, which
+allow conditional rules with rewrites in conditions.
+The rationale behind this deliberate restriction in K is twofold:

+
    +
  • On the one hand, general conditional rules require a complex, and thus slower
    +rewrite engine, which starts recursive (sometimes exhaustive) rewrite sessions
    +to resolve the rewrites in conditions. In contrast, the side conditions in K
    +can be evaluated efficiently by back-ends, for example by evaluating builtin
    +expressions and/or by calling builtin functions.
  • +
  • On the other hand, the semantic definitional philosophy of K is that rule
    +premises are unnecessary, so there is no need to provide support for them.
  • +
+

Having builtin arithmetic is useful, but writing programs with just lambda
+and arithmetic constructs is still a pain. In the next two lessons we will
+add conditional (if_then_else) and binding (let and letrec) constructs,
+which will allow us to write nicer programs.

+

Go to Lesson 6, LAMBDA: Selective Strictness; Anonymous Variables.

+

MOVIE (out of date) [4'52"]

+

Selective Strictness; Anonymous Variables

+ +

We here show how to define selective strictness of language constructs,
+that is, how to state that certain language constructs are strict only
+in some arguments. We also show how to use anonymous variables.

+

We next define a conditional if construct, which takes three arguments,
+evaluates only the first one, and then reduces to either the second or the
+third, depending on whether the first one evaluated to true or to false.

+

K allows to define selective strictness using the same strict attribute,
+but passing it a list of numbers. The numbers correspond to the arguments
+in which we want the defined construct to be strict. In our case,

+
syntax Exp ::= "if" Exp "then" Exp "else" Exp   [strict(1)]
+
+

states that the conditional construct is strict in the first argument.

+

We can now assume that its first argument will eventually reduce to a value, so
+we only write the following two semantic rules:

+
rule if true  then E else _ => E
+rule if false then _ else E => E
+
+

Thus, we assume that the first argument evaluates to either true or false.

+

Note the use of the anonymous variable _. We use such variables purely for
+structural reasons, to state that something is there but we don't care what.
+An anonymous variable is therefore completely equivalent to a normal variable
+which is unsorted and different from all the other variables in the rule. If
+you use _ multiple times in a rule, they will all be considered distinct.

+

Compile lambda.k and write and execute some interesting expressions making
+use of the conditional construct. For example, the expression

+
if 2<=1 then 3/0 else 10
+
+

evaluates to 10 and will never evaluate 3/0, thus avoiding an unwanted
+division-by-zero.

+

In the next lesson we will introduce two new language constructs, called
+let and letrec and conventionally found in functional programming
+languages, which will allow us to already write interesting LAMBDA programs.

+

Go to Lesson 7, LAMBDA: Derived Constructs; Extending Predefined Syntax.

+

MOVIE (out of date) [2'14"]

+

Derived Constructs, Extending Predefined Syntax

+ +

In this lesson we will learn how to define derived language constructs, that
+is, ones whose semantics is defined completely in terms of other language
+constructs. We will also learn how to add new constructs to predefined
+syntactic categories.

+

When defining a language, we often want certain language constructs to be
+defined in terms of other constructs. For example, a let-binding construct
+of the form

+
let x = e in e'
+
+

is nothing but syntactic sugar for

+
(lambda x . e') e
+
+

This can be easily achieved with a rule, as shown in lambda.k.

+

Compile lambda.k and write some programs using let binders.

+

For example, consider a lets.lambda program which takes arithmetic.lambda
+and replaces each integer by a let-bound variable. It should evaluate to
+true, just like the original arithmetic.lambda.

+

Let us now consider a more interesting program, namely one that calculates the
+factorial of 10:

+
let f = lambda x . (
+        (lambda t . lambda x . (t t x))
+        (lambda f . lambda x . (if x <= 1 then 1 else (x * (f f (x + -1)))))
+        x
+      )
+in (f 10)
+
+

This program follows a common technique to define fixed points in untyped
+lambda calculus, based on passing a function to itself.

+

We may not like to define fixed-points following the approach above, because
+it requires global changes in the body of the function meant to be recursive,
+basically to pass it to itself (f f in our case above). The approach below
+isolates the fixed-point aspect of the function in a so-called fixed-point
+combinator
, which we call fix below, and then apply it to the function
+defining the body of the factorial, without any changes to it:

+
let fix = lambda f . (
+          (lambda x . (f (lambda y . (x x y))))
+          (lambda x . (f (lambda y . (x x y))))
+        )
+in let f = fix (lambda f . lambda x .
+                (if x <= 1 then 1 else (x * (f (x + -1)))))
+   in (f 10)
+
+

Although the above techniques are interesting and powerful (indeed, untyped
+lambda calculus is in fact Turing complete), programmers will probably not
+like to write programs this way.

+

We can easily define a more complex derived construct, called letrec and
+conventionally encountered in functional programming languages, whose semantics
+captures the fixed-point idea above. In order to keep its definition simple
+and intuitive, we define a simplified variant of letrec, namely one which only
+allows to define one recursive one-argument function. See lambda.k.

+

There are two interesting observations here.

+

First, note that we have already in-lined the definition of the fix
+combinator in the definition of the factorial, to save one application of the
+beta reduction rule (and the involved substitution steps). We could have
+in-lined the definition of the remaining let, too, but we believe that the
+current definition is easier to read.

+

Second, note that we extended the predefined Id syntactic category with two
+new constants, $x and $y. The predefined identifiers cannot start with
+$, so programs that will be executed with this semantics cannot possibly
+contain the identifiers xandx andy. In other words, by adding them to Id they
+become indirectly reserved for the semantics. This is indeed desirable,
+because any possible uses of xinthebodyofthefunctiondefinedusingletrecwouldbecapturedbythelambdax in the body of the function defined +using `letrec` would be captured by the `lambdaxdeclaration in the definition ofletrec`.

+

Using letrec, we can now write the factorial program as elegantly as it can
+be written in a functional language:

+
letrec f x = if x <= 1 then 1 else (x * (f (x + -1)))
+in (f 10)
+
+

In the next lesson we will discuss an alternative definition of letrec, based
+on another binder, mu, specifically designed to define fixed points.

+

Go to Lesson 8, LAMBDA: Multiple Binding Constructs.

+

MOVIE (out of date) [5'10"]

+

Multiple Binding Constructs

+ +

Here we learn how multiple language constructs that bind variables can
+coexist. We will also learn about or recall another famous binder besides
+lambda, namely mu, which can be used to elegantly define all kinds of
+interesting fixed-point constructs.

+

The mu binder has the same syntax as lambda, except that it replaces
+lambda with mu.

+

Since mu is a binder, in order for substitution to know how to deal with
+variable capture in the presence of mu, we have to tell it that mu is a
+binding construct, same like lambda. We take advantage of being there and
+also add mu its desired latex attribute.

+

The intuition for

+
mu x . e
+
+

is that it reduces to e, but each free occurrence of x in e behaves
+like a pointer that points back to mu x . e.

+

With that in mind, let us postpone the definition of mu and instead redefine
+letrec F X = E in E' as a derived construct, assuming mu available. The
+idea is to simply regard F as a fixed-point of the function

+
lambda X . E
+
+

that is, to first calculate

+
mu F . lambda X . E
+
+

and then to evaluate E' where F is bound to this fixed-point:

+
let F = mu F . lambda X . E in E'
+
+

This new definition of letrec may still look a bit tricky, particularly
+because F is bound twice, but it is much simpler and cleaner than our
+previous definition. Moreover, now it is done in a type-safe manner
+(this aspect goes beyond our objective in this tutorial).

+

Let us now define the semantic rule of mu.

+

The semantics of mu is actually disarmingly simple. We just have to
+substitute mu X . E for each free occurrence of X in E:

+
mu X . E => E[(mu X . E) / X]
+
+

Compile lambda.k and execute some recursive programs. They should be now
+several times faster. Write a few more recursive programs, for example ones
+for calculating the Ackermann function, for calculating the number of moves
+needed to solve the Hanoi tower problem, etc.

+

We have defined our first programming language in K, which allows us to
+write interesting functional programs. In the next lesson we will learn how
+to fully document our language definition, in order to disseminate it, to ship
+it to colleagues or friends, to publish it, to teach it, and so on.

+

Go to Lesson 9, LAMBDA: A Complete and Commented Definition.

+

MOVIE (out of date) [2'40"]

+

A Complete and Documented K Definition

+ +

In this lesson you will learn how to add formal comments to your K definition,
+in order to nicely document it. The generated document can be then used for
+various purposes: to ease understanding the K definition, to publish it,
+to send it to others, etc.

+

The K tool allows a literate programming style, where the executable
+language definition can be documented by means of annotations. One such
+annotation is the latex(_) annotation, where you can specify how to format
+the given production when producing Latex output via the --output latex
+option to krun, kast, and kprove.

+

There are three types of comments, which we discuss next.

+

Ordinary comments

+ +

These use // or /* ... */, like in various programming languages. These
+comments are completely ignored.

+

Document annotations

+ +

Use the @ symbol right after // or /* in order for the comment to be
+considered an annotation and thus be processed by the K tool when it
+generates documentation.

+

As an example, we can go ahead and add such an annotation at the beginning
+of the LAMBDA module, explaining how we define the syntax of this language.

+

Header annotations

+ +

Use the ! symbol right after // or /* if you want the comment to be
+considered a header annotation, that is, one which goes before
+\begin{document} in the generated Latex. You typically need header
+annotations to include macros, or to define a title, etc.

+

As an example, let us set a Latex length and then add a title and an
+author to this K definition.

+

Compile the documentation and take a look at the results. Notice the title.

+

Feel free to now add lots of annotations to lambda.k.

+

Then compile and check the result. Depending on your PDF viewer, you
+may also see a nice click-able table of contents, with all the sections
+of your document. This could be quite convenient when you define large
+languages, because it helps you jump to any part of the semantics.

+

Tutorial 1 is now complete. The next tutorial will take us through the
+definition of a simple imperative language and will expose us to more
+feature of the K framework and the K tool.

+

MOVIE (out of date) [6'07"]

+

Part 2: Defining IMP

+ +

Here you will learn how to define a very simple imperative language in K
+and the basics of how to work with configurations, cells, and computations.
+Specifically, you will learn the following:

+
    +
  • How to define languages using multiple modules.
  • +
  • How to define sequentially strict syntactic constructs.
  • +
  • How to use K's syntactic lists.
  • +
  • How to define, initialize and configure configurations.
  • +
  • How the language syntax is swallowed by the builtin K syntactic category.
  • +
  • The additional syntax of the K syntactic category.
  • +
  • How the strictness annotations are automatically desugared into rules.
  • +
  • The first steps of the configuration abstraction mechanism.
  • +
+

Like in the previous tutorial, this folder contains several lessons, each
+adding new features to IMP. Do them in order. Also, make sure you completed
+and understood the previous tutorial.

+

Defining a More Complex Syntax

+ +

Here we learn how to define a more complex language syntax than LAMBDA's,
+namely the C-like syntax of IMP. Also, we will learn how to define languages
+using multiple modules, because we are going to separate IMP's syntax from
+its semantics using modules. Finally, we will also learn how to use K's
+builtin support for syntactic lists.

+

The K tool provides modules for grouping language features. In general, we
+can organize our languages in arbitrarily complex module structures.
+While there are no rigid requirements or even guidelines for how to group
+language features in modules, we often separate the language syntax from the
+language semantics in different modules.

+

In our case here, we start by defining two modules, IMP-SYNTAX and IMP, and
+import the first in the second, using the keyword imports. As their names
+suggest, we will place all IMP's syntax definition in IMP-SYNTAX and all its
+semantics in IMP.

+

Note, however, that K does no more than simply includes all the
+contents of the imported module in the one which imports it (making sure
+that everything is only kept once, even if you import it multiple times).
+In other words, there is currently nothing fancy in K tool's module system.

+

IMP has six syntactic categories, as shown in imp.k: AExp for arithmetic
+expressions, BExp for Boolean expressions, Block for blocks, Stmt for
+statements, Pgm for programs and Ids for comma-separated lists of
+identifiers. Blocks are special statements, whose role is to syntactically
+constrain the conditional statement and the while loop statement to only
+take blocks as branches and body, respectively.

+

There is nothing special about arithmetic and Boolean expressions. They
+are given the expected strictness attributes, except for <= and &&,
+for demonstration purposes.

+

The <= is defined to be seqstrict, which means that it evaluates its
+arguments in order, from left-to-right (recall that the strict operators
+can evaluate their arguments in any, fully interleaved, orders). Like
+strict, the seqstrict annotation can also be configured; for example, one
+can specify in which arguments and in what order. By default, seqstrict
+refers to all the arguments, in their left-to-right order. In our case here,
+it is equivalent with seqstrict(1 2).

+

The && is only strict in its first argument, because we will give it a
+short-circuited semantics (its second argument will only be evaluated when
+the first evaluates to true). Recall the K tool also allows us to associate
+LaTex attributes to constructs, telling the document generator how to display
+them. For example, we associate <= the attribute latex({#1}\leq{#2}),
+which makes it be displayed \leq everywhere in the generated LaTex
+documentation.

+

In this tutorial we take the freedom to associate the various constructs
+parsing precedences that we have already tested and we know work well, so that
+we can focus on the semantics here instead of syntax. In practice, though,
+you typically need to experiment with precedences until you obtain the desired
+parser.

+

Blocks are defined using curly brackets, and they can either be empty or
+hold a statement.

+

Nothing special about the IMP statements. Note that ; is an assignment
+statement terminator, not a statement separator. Note also that blocks are
+special statements.

+

An IMP program declares a comma-separated list of variables using the keyword
+int like in C, followed by a semicolon ;, followed by a statement.
+Syntactically, the idea here is that we can wrap any IMP program within a
+main(){...} function and get a valid C program. IMP does not allow variable
+declarations anywhere else except through this construct, at the top-level of
+the program. Other languages provided with the K distribution (see, e.g., the
+IMP++ language also discussed in this tutorial) remove this top-level program
+construct of IMP and add instead variable declaration as a statement construct,
+which can be used anywhere in the program, not only at the top level.

+

Note how we defined the comma-separated list of identifiers using
+List{Id,","}. The K tool provides builtin support for generic syntactic
+lists. In general,

+
syntax B ::= List{A,T}
+
+

declares a new non-terminal, B, corresponding to T-separated sequences of
+elements of A, where A is a non-terminal and T is a terminal. These
+lists can also be empty, that is, IMP programs declaring no variable are also
+allowed (e.g., int; {} is a valid IMP program). To instantiate and use
+the K builtin lists, you should alias each instance with a (typically fresh)
+non-terminal in your syntax, like we do with the Ids nonterminal.

+

Like with other K features, there are ways to configure the syntactic lists,
+but we do not discuss them here.

+

Recall from Tutorial 1 (LAMBDA) that in order for strictness to work well
+we also need to tell K which computations are meant to be results. We do
+this as well now, in the module IMP: integers and Booleans are K results.

+

Kompile imp.k and test the generated parser by running some programs.
+Since IMP is a fragment of C, you may want to select the C mode in your
+editor when writing these programs. This will also give your the feel that
+you are writing programs in a real programming language.

+

For example, here is sum.imp, which sums in sum all numbers up to n:

+
int n, sum;
+n = 100;
+sum=0;
+while (!(n <= 0)) {
+  sum = sum + n;
+  n = n + -1;
+}
+
+

Now krun it and see how it looks parsed in the default k cell.

+

The program collatz.imp tests the Collatz conjecture for all numbers up to
+m and accumulates the total number of steps in s:

+
int m, n, q, r, s;
+m = 10;
+while (!(m<=2)) {
+  n = m;
+  m = m + -1;
+  while (!(n<=1)) {
+    s = s+1;
+    q = n/2;
+    r = q+q+1;
+    if (r<=n) {
+      n = n+n+n+1;         // n becomes 3*n+1 if odd
+    } else {n=q;}          //        of   n/2 if even
+  }
+}
+
+

Finally, program primes.imp counts in s all the prime numbers up to m:

+
int i, m, n, q, r, s, t, x, y, z;
+m = 10;  n = 2;
+while (n <= m) {
+  // checking primality of n and writing t to 1 or 0
+  i = 2;  q = n/i;  t = 1;
+  while (i<=q && 1<=t) {
+    x = i;
+    y = q;
+    // fast multiplication (base 2) algorithm
+    z = 0;
+    while (!(x <= 0)) {
+      q = x/2;
+      r = q+q+1;
+      if (r <= x) { z = z+y; } else {}
+      x = q;
+      y = y+y;
+    } // end fast multiplication
+    if (n <= z) { t = 0; } else { i = i+1;  q = n/i; }
+  } // end checking primality
+  if (1 <= t) { s = s+1; } else {}
+  n = n+1;
+}
+
+

All the programs above will run once we define the semantics of IMP. If you
+want to execute them now, wrap them in a main(){...} function and compile
+them and run them with your favorite C compiler.

+

Before we move to the K semantics of IMP, we would like to make some
+clarifications regarding the K builtin parser, kast. Although it is quite
+powerful, you should not expect magic from it! While the K parser can parse
+many non-trivial languages (see, for example, the KOOL language in
+pl-tutorial/2_languages) in the K distribution), it was
+never meant to be a substitute for real parsers. We often call the syntax
+defined in K the syntax of the semantics, to highlight the fact that its
+role is to serve as a convenient notation when writing the semantics, not
+necessarily as a means to define concrete syntax of arbitrarily complex
+programming languages. See the KERNELC language for an example on how to connect an external parser for concrete syntax to
+the K tool.

+

The above being said, we strongly encourage you to strive to make the
+builtin parser work with your desired language syntax! Do not give up
+simply because you don't want to deal with syntactic problems. On the
+contrary, fight for your syntax! If you really cannot define your desired
+syntax because of tool limitations, we would like to know. Please tell us.

+

Until now we have only seen default configurations. In the next lesson we
+will learn how to define a K custom configuration.

+

Go to Lesson 2, IMP: Defining a Configuration.

+

MOVIE (out of date) [09'15"]

+

Defining a Configuration

+ +

Here we learn how to define a configuration in K. We also learn how to
+initialize and how to display it.

+

As explained in the overview presentation on K, configurations are quite
+important, because all semantic rules match and apply on them.
+Moreover, they are the backbone of configuration abstraction, which allows
+you to only mention the relevant cells in each semantic rule, the rest of
+the configuration context being inferred automatically. The importance of
+configuration abstraction will become clear when we define more complex
+languages (even in IMP++). IMP does not really need it. K configurations
+are constructed making use of cells, which are labeled and can be arbitrarily
+nested.

+

Configurations are defined with the keyword configuration. Cells are
+defined using an XML-ish notation stating clearly where the cell starts
+and where it ends.

+

While not enforced by the tool, we typically like to put the entire
+configuration in a top-level cell, called T. So let's define it:

+
configuration <T>...</T>
+
+

Cells can have other cells inside. In our case of IMP, we need a cell to
+hold the remaining program, cell which we typically call k, and a cell to
+hold the program state. Let us add them:

+
configuration <T> <k>...</k> <state>...</state> </T>
+
+

K allows us to also specify how to initialize a configuration at the same
+time with declaring the configuration. All we have to do is to fill in
+the contents of the cells with some terms. The syntactic categories of
+those terms will also indirectly define the types of the corresponding
+cells.

+

For example, we want the k cell to initially hold the program that is passed
+to krun. K provides a builtin configuration variable, called $PGM, which
+is specifically designed for this purpose: krun will place its program there
+(after it parses it, or course). The K tool allows users to define their own
+configuration variables, too, which can be used to develop custom
+initializations of program configurations with the help of krun; this can be
+quite useful when defining complex languages, but we do not discuss it in
+this tutorial.

+
configuration <T> <k> $PGM </k> <state>...</state>  </T>
+
+

Moreover, we want the program to be a proper Pgm term (because we do not
+want to allow krun to take fragments of programs, for example, statements).
+Therefore, we tag $PGM with the desired syntactic category, Pgm:

+
configuration <T> <k> $PGM:Pgm </k> <state>...</state>  </T>
+
+

Like for other variable tags in K, a run-time check will be performed and the
+semantics will get stuck if the passed term is not a well-formed program.

+

We next tell K that the state cell should be initialized with the empty map:

+
configuration <T> <k> $PGM:Pgm </k> <state> .Map </state>  </T>
+
+

Recall that in K . stands for nothing. However, since there are various
+types of nothing, to avoid confusion we can suffix the . with its desired
+type. K has several builtin data-types, including lists, sets, bags, and
+maps. .Map is the empty map.

+

Kompile imp.k and run several programs to see how the configuration is
+initialized as desired.

+

When configurations get large, and they do when defining large programming
+languages, you may want to color the cells in order to more easily distinguish
+them. This can be easily achieved using the color cell attribute, following
+again an XML-ish style:

+
configuration <T color="yellow">
+                <k color="green"> $PGM:Pgm </k>
+                <state color="red"> .Map </state>
+              </T>
+
+

In the next lesson we will learn how to write rules that involve cells.

+

Go to Lesson 3, IMP: Computations, Results, Strictness; Rules Involving Cells.

+

MOVIE (out of date) [04'21"]

+

Computations, Results, Strictness; Rules Involving Cells

+ +

In this lesson we will learn about the syntactic category K of computations,
+about how strictness attributes are in fact syntactic sugar for rewrite rules
+over computations, and why it is important to tell the tool which
+computations are results. We will also see a K rule that involves cells.

+

K Computations

+ +

Computation structures, or more simply computations, extend the abstract
+syntax of your language with a list structure using ~> (read followed
+by
or and then, and written \curvearrowright in Latex) as a separator.
+K provides a distinguished sort, K, for computations. The extension of the
+abstract syntax of your language into computations is done automatically by
+the K tool when you declare constructs using the syntax keyword, so the K
+semantic rules can uniformly operate only on terms of sort K. The intuition
+for computation structures of the form

+
t1 ~> t2 ~> ... ~> tn
+
+

is that the listed tasks are to be processed in order. The initial
+computation typically contains the original program as its sole task, but
+rules can then modify it into task sequences, as seen shortly.

+

Strictness in Theory

+ +

The strictness attributes, used as annotations to language constructs,
+actually correspond to rules over computations. For example, the
+strict(2) attribute of the assignment statement corresponds to the
+following two opposite rules (X ranges over Id and A over AExp):

+
X=A; => A ~> X=[];
+A ~> X=[]; => X=A;
+
+

The first rule pulls A from the syntactic context X=A; and schedules it
+for processing. The second rule plugs A back into its context.
+Inspired from the chemical abstract machine, we call rules of the first
+type above heating rules and rules of the second type cooling rules.
+Similar rules are generated for other arguments in which operations are
+strict. Iterative applications of heating rules eventually bring to the
+top of the computation atomic tasks, such as a variable lookup, or a
+builtin operation, which then make computational progress by means of other
+rules. Once progress is made, cooling rules can iteratively plug the result
+back into context, so that heating rules can pick another candidate for
+reduction, and so on and so forth.

+

When operations are strict only in some of their arguments, the corresponding
+positions of the arguments in which they are strict are explicitly enumerated
+in the argument of the strict attribute, e.g., strict(2) like above, or
+strict(2 3) for an operation strict in its second and third arguments, etc.
+If an operation is simply declared strict then it means that it is strict
+in all its arguments. For example, the strictness of addition yields:

+
A1+A2 => A1 ~> []+A2
+A1 ~> []+A2 => A1+A2
+A1+A2 => A2 ~> A1+[]
+A2 ~> A1+[] => A1+A2
+
+

It can be seen that such heating/cooling rules can easily lead to
+non-determinism, since the same term may be heated many different ways;
+these different evaluation orders may lead to different behaviors in some
+languages (not in IMP, because its expressions do not have side effects,
+but we will experiment with non-determinism in its successor, IMP++).

+

A similar desugaring applies to sequential strictness, declared with the
+keyword seqstrict. While the order of arguments of strict is irrelevant,
+it matters in the case of seqstrict: they are to be evaluated in the
+specified order; if no arguments are given, then they are assumed by default
+to be evaluated from left-to-right. For example, the default heating/cooling
+rules associated to the sequentially strict <= construct above are
+(A1, A2 range over AExp and I1 over Int):

+
A1<=A2 => A1 ~> []<=A2
+A1 ~> []<=A2 => A1<=A2
+I1<=A2 => A2 ~> I1<=[]
+A2 ~> I1<=[] => I1<=A2
+
+

In other words, A2 is only heated/cooled after A1 is already evaluated.

+

While the heating/cooling rules give us a nice and uniform means to define
+all the various allowable ways in which a program can evaluate, all based
+on rewriting, the fact that they are reversible comes with a serious practical
+problem: they make the K definitions unexecutable, because they lead to
+non-termination.

+

Strictness in Practice; K Results

+ +

To break the reversibility of the theoretical heating/cooling rules, and,
+moreover, to efficiently execute K definitions, the current implementation of
+the K tool relies on users giving explicit definitions of their languages'
+results.

+

The K tool provides a predicate isKResult, which is automatically defined
+as we add syntactic constructs to KResult (in fact the K tool defines such
+predicates for all syntactic categories, which are used, for example, as
+rule side conditions to check user-declared variable memberships, such as
+V:Val stating that V belongs to Val).

+

The kompile tool, depending upon what it is requested to do, changes the
+reversible heating/cooling rules corresponding to evaluation strategy
+definitions (e.g., those corresponding to strictness attributes) to avoid
+non-termination. For example, when one is interested in obtaining an
+executable model of the language (which is the default compilation mode of
+kompile), then heating is performed only when the to-be-pulled syntactic
+fragment is not a result, and the corresponding cooling only when the
+to-be-plugged fragment is a result. In this case, e.g., the heating/cooling
+rules for assignment are modified as follows:

+
X=A; => A ~> X=[];  requires notBool isKResult(A)
+A ~> X=[]; => X=A;  requires isKResult(A)
+
+

Note that non-termination of heating/cooling is avoided now. The only thing
+lost is the number of possible behaviors that a program can manifest, but
+this is irrelevant when all we want is one behavior.

+

As will be discussed in the IMP++ tutorial, the heating/cooling rules are
+modified differently by kompile when we are interested in other aspects
+of the language definition, such us, for example, in a search-able model that
+comprises all program behaviors. This latter model is obviously more general
+from a theoretical perspective, but, in practice, it is also slower to execute.
+The kompile tool strives to give you the best model of the language for the
+task you are interested in.

+

Can't Results be Inferred Automatically?

+ +

This is a long story, but the short answer is: No!. Maybe in some cases
+it is possible, but we prefer to not attempt it in the K tool. For example,
+you most likely do not want any stuck computation to count as a result,
+since some of them can happen simply because you forgot a semantic rule that
+could have further reduce it! Besides, in our experience with defining large
+languages, it is quite useful to take your time and think of what the results
+of your language's computations are. This fact in itself may help you improve
+your overall language design. We typically do it at the same time with
+defining the evaluation strategies of our languages. Although in theory K
+could infer the results of your language as the stuck computations, based on
+the above we have deliberately decided to not provide this feature, in spite
+of requests from some users. So you currently do have to explicitly define
+your K results if you want to effectively use the K tool. Note, however, that
+theoretical definitions, not meant to be executed, need not worry about
+defining results (that's because in theory semantic rules apply modulo the
+reversible heating/cooling rules, so results are not necessary).

+

A K Rule Involving Cells

+ +

All our K rules so far in the tutorial were of the form

+
rule left => right requires condition
+
+

where left and right were syntactic, or more generally computation, terms.

+

Here is our first K rule explicitly involving cells:

+
rule <k> X:Id => I ...</k> <state>... X |-> I ...</state>
+
+

Recall that the k cell holds computations, which are sequences of tasks
+separated by ~>. Also, the state cell holds a map, which is a set of
+bindings, each binding being a pair of computations (currently, the
+K builtin data-structures, like maps, are untyped; or, said differently,
+they are all over the type of computations, K).

+

Therefore, the two cells mentioned in the rule above hold collections
+of things, ordered or not. The ...s, which we also call cell frames,
+stand for more stuff there, which we do not care about.

+

The rewrite relation => is allowed in K to appear anywhere in a term, its
+meaning being that the corresponding subterm is rewritten as indicated in the
+shown context. We say that K's rewriting is local.

+

The rule above says that if the identifier X is the first task in the k
+cell, and if X is bound to I somewhere in the state, then X rewrites
+to I locally in the k cell. Therefore, IMP variables need to be already
+declared when looked up.

+

Of course, the K rule above can be translated into an ordinary rewrite rule
+of the form

+
rule <k> X ~> Rest </k> <state> Before (X |-> I) After </state>
+  => <k> I ~> Rest </k> <state> Before (X |-> I) After </state>
+
+

Besides being more verbose and thus tedious to write, this ordinary rule
+is also more error-prone; for example, we may forget the Rest variable
+in the right-hand-side, etc. Moreover, the concurrent semantics of K
+allows for its rules to be interpreted as concurrent transactions, where
+the context is the read-only component of the transaction, while the
+subterms which are rewritten are read/write component of the transaction;
+thus, K rule instances can apply concurrently if they only overlap
+on read-only parts, while they cannot if regarded as ordinary rewrite logic
+rules. Note: our current implementation of the K tool is not concurrent,
+so K rules are in fact desugared as normal rewrite rules in the K tool.

+

Kompile imp.k using a documentation option and check out how the K rule
+looks in the generated document. The ... frames are displayed as cell
+tears, metaphorically implying that those parts of the cells that we
+do not care about are torn away. The rewrite relation is replaced by a
+horizontal line: specifically, the subterm which rewrites, X, is
+underlined, and its replacement is written underneath the line.

+

In the next lesson we define the complete K semantics of IMP and
+run the programs we parsed in the first lesson.

+

Go to Lesson 4, IMP: Configuration Abstraction, Part 1; Types of Rules.

+

MOVIE (out of date) [10'30"]

+

Configuration Abstraction, Part 1; Types of Rules

+ +

Here we will complete the K definition of IMP and, while doing so, we will
+learn the very first step of what we call configuration abstraction.

+

The IMP Semantic Rules

+ +

Let us add the remaining rules, in the order in which the language constructs
+were defined in IMP-SYNTAX.

+

The rules for the arithmetic and Boolean constructs are self-explanatory.
+Note, however, that K will infer the correct sorts of all the variables in
+these rules, because they appear as arguments of the builtin operations
+(_+Int_, etc.). Moreover, the inferred sorts will be enforced dynamically.
+Indeed, we do not want to apply the rule for addition, for example, when the
+two arguments are not integers. In the rules for &&, although we prefer to
+not do it here for simplicity, we could have eliminated the dynamic check by
+replacing B (and similarly for _) with B:K. Indeed, it can be shown
+that whenever any of these rules apply, B (or _) is a BExp anyway.
+That's because there is no rule that can touch such a B (or _); this
+will become clearer shortly, when we discuss the first step of configuration
+abstraction. Therefore, since we know that B will be a BExp anyway, we
+could save the time it takes to check its sort; such times may look minor,
+but they accumulate, so some designers may prefer to avoid run-time checks
+whenever possible.

+

The block rules are trivial. However, the rule for non-empty blocks is
+semantically correct only because we do not have local variable declarations
+in IMP. We will have to change this rule in IMP++.

+

The assignment rule has two =>: one in the k cell dissolving the
+assignment statement, and the other in the state cell updating the value of
+the assigned variable. Note that the one in the state is surrounded by
+parentheses: (_ => I). That is because => is greedy: it matches as much
+as it can to the left and to the right, until it reaches the cell boundaries
+(closed or open). If you want to limit its scope, or for clarity, you can use
+parentheses like here.

+

The rule for sequential composition simply desugars S1 S2 into S1 ~> S2.
+Indeed, the two have exactly the same semantics. Note that statements
+evaluate to nothing (.), so once S1 is processed in S1 ~> S2, then the
+next task is automatically S2, without wasting any step for the transition.

+

The rules for the conditional and while statements are clear. One thing to
+keep in mind now is that the while unrolling rule will not apply
+indefinitely in the positive branch of the resulting conditional, because
+of K's configuration abstraction, which will be discussed shortly.

+

An IMP program declares a set of variables and then executes a
+statement in the state obtained after initializing all those variables
+to 0. The rules for programs initialize the declared variables one by one,
+checking also that there are no duplicates. We check for duplicates only for
+demonstration purposes, to illustrate the keys predefined operation that
+returns the set of keys of a map, and the set membership operation in.
+In practice, we typically define a static type checker for our language,
+which we execute before the semantics and reject inappropriate programs.

+

The use of the .Ids in the second rule is not necessary. We could have
+written int; S instead of int .Ids; S and the K tool would parse it and
+kompile the definition correctly, because it uses the same parser used for
+parsing programs also to parse the semantics. However, we typically prefer to
+explicitly write the nothing values in the semantics, for clarity;
+the parser has been extended to accept these. Note that the first rule
+matches the entire k cell, because int_;_ is the top-level program
+construct in IMP, so there is nothing following it in the computation cell.
+The anonymous variable stands for the second argument of this top-level program
+construct, not for the rest of the computation. The second rule could have
+also been put in a complete k cell, but we preferred not to, for simplicity.

+

Our IMP semantics is now complete, but there are a few more things that we
+need to understand and do.

+

Configuration Abstraction, Part 1

+ +

First, let us briefly discuss the very first step of configuration abstraction.
+In K, all semantic rules are in fact rules between configurations. As soon
+explained in the IMP++ tutorial, the declared configuration cell structure is
+used to automatically complete the missing configuration parts in rules.
+However, many rules do not involve any cells, being rules between syntactic
+terms (of sort K); for example, we had only three rules involving cells in our
+IMP semantics. In this case, the k cell will be added automatically and the
+actual rewrite will happen on top of the enclosed computation. For example,
+the rule for the while loop is automatically translated into the following:

+
rule <k> while (B) S => if (B) {S while (B) S} else {} ...</k>
+
+

Since the first task in computations is what needs to be done next, the
+intuition for this rule completion is that the syntactic transition
+only happens when the term to rewrite is ready for processing. This explains,
+for example, why the while loop unrolling does not indefinitely apply in the
+positive branch of the conditional: the inner while loop is not ready for
+evaluation yet. We call this rule completion process, as well as other
+similar ones, configuration abstraction. That is because the incomplete
+rule abstracts away the configuration structure, thus being easier to read.
+As seen soon when we define IMP++, configuration abstraction is not only a
+user convenience; it actually significantly increases the modularity of our
+definitions. The k-cell-completion is only the very first step, though.

+

If you really want certain rewrites over syntactic terms to apply
+anywhere they match, then you should tag the rule with the attribute
+anywhere, which was discussed in Tutorial 1, Lesson 2.5.

+

Kompile and then krun the programs that you only parsed in Lesson 1. They
+should all execute as expected. The state cell shows the final state
+of the program. The k cell shows the final code contents, which should be
+empty whenever the IMP program executes correctly.

+

Kompile also with the documentation option and take a look at the generated
+documentation. The assignment rule should particularly be of interest,
+because it contains two local rewrites.

+

In the next lesson we comment the IMP definition and conclude this tutorial.

+

Go to Lesson 5, IMP: Completing and Documenting IMP.

+

MOVIE (out of date) [09'16"]

+

Completing and Documenting IMP

+ +

We here learn no new concepts, but it is a good moment to take a break
+and contemplate what we learned so far.

+

Let us add lots of formal annotations to imp.k.

+

Once we are done with the annotations, we kompile with the documentation
+option and then take a look at the produced document. We often call these
+documents language posters. Depending on how much information you add to
+these language posters, they can serve as standalone, formal presentations
+of your languages. For example, you can print them as large posters and
+post them on the wall, or in poster sessions at conferences.

+

This completes our second tutorial. The next tutorials will teach us more
+features of the K framework, such as how to define languages with complex
+control constructs (like callcc), languages which are concurrent, and so on.

+

MOVIE (out of date) [03'45"]

+

Part 3: Defining LAMBDA++

+ +

Here you will learn how to define language constructs which abruptly change
+the execution control flow, and how to define language semantics following
+and environment/store style. Specifically, you will learn the following:

+
    +
  • How to define constructs like callcc, which allow you to take snapshots of
    +program executions and to go back in time at any moment.
  • +
  • How to define languages in an environment/store style.
  • +
  • Some basic notions about the use of closures and closure-like semantic
    +structures to save and restore execution environments.
  • +
  • Some basic intuitions about reusing existing semantics in new languages,
    +as well as some of the pitfalls in doing so.
  • +
+

Abrupt Changes of Control

+ +

Here we add call-with-current-continuation (callcc) to the definition of
+LAMBDA completed in Tutorial 1, and call the resulting language LAMBDA++.
+While doing so, we will learn how to define language constructs that
+abruptly change the execution control flow.

+

Take over the lambda.k definition from Lesson 8 in Part 1 of this Tutorial,
+which is the complete definition of the LAMBDA language, but without the
+comments.

+

callcc is a good example for studying the capabilities of a framework to
+support abrupt changes of control, because it is one of the most
+control-intensive language constructs known. Scheme is probably the first
+programming language that incorporated the callcc construct, although
+similar constructs have been recently included in many other languages in
+one form or another.

+

Here is a quick description: callcc e passes the remaining computation
+context, packaged as a function k, to e (which is expected to be a function);
+if during its evaluation e passes any value to k, then the current
+execution context is discarded and replaced by the one encoded by k and
+the value is passed to it; if e evaluates normally to some value v and
+passes nothing to k in the process, then v is returned as a result of
+callcc e and the execution continues normally. For example, we want the
+program callcc-jump.lambda:

+
(callcc (lambda k . ((k 5) + 2))) + 10
+
+

to evaluate to 15, not 17! Indeed, the computation context [] + 10 is
+passed to callcc's argument, which then sends it a 5, so the computation
+resumes to 5 + 10. On the other hand, the program callcc-not-jump.lambda

+
(callcc (lambda k . (5 + 2))) + 10
+
+

evaluates to 17.

+

If you like playing games, you can metaphorically think of callcc e as
+saving your game state in a file and passing it to your friend e.
+Then e can decide at some moment to drop everything she was doing, load
+your game and continue to play it from where you were.

+

The behavior of many popular control-changing constructs can be obtained
+using callcc. The program callcc-return.lambda shows, for example, how to
+obtain the behavior of a return statement, which exits the current execution
+context inside a function and returns a value to the caller's context:

+
letrec f x = callcc (lambda return . (
+  f (if (x <= 0) then ((return 1) / 0) else 2)
+))
+in (f -3)
+
+

This should evaluate to 1, in spite of the recursive call to f
+and of the division by zero! Note that return is nothing but a variable
+name, but one which is bound to the current continuation at the beginning of
+the function execution. As soon as 1 is passed to return, the computation
+jumps back in time to where callcc was defined! Change -3 to 3 and the
+program will loop forever.

+

callcc is quite a powerful and beautiful language construct, although one
+which is admittedly hard to give semantics to in some frameworks.
+But not in K 😃 Here is the entire K syntax and semantics of callcc:

+
syntax Exp ::= "callcc" Exp  [strict]
+syntax Val ::= cc(K)
+rule <k> (callcc V:Val => V cc(K)) ~> K </k>
+rule <k> cc(K) V ~> _ =>  V ~> K </k>
+
+

Let us first discuss the annotated syntax. We declared callcc strict,
+because its argument may not necessarily be a function yet, so it may need
+to be evaluated. As explained above, we need to encode the remaining
+computation somehow and pass it to callcc's argument. More specifically,
+since LAMBDA is call-by-value, we have to encode the remaining computation as
+a value. We do not want to simply subsort computations to Val, because there
+are computations which we do not want to be values. A simple solution to
+achieve our goal here is to introduce a new value construct, say cc (from
+current-continuation), which holds any computation.

+

Note that, inspired from SDF,
+K allows you to define the syntax of helping semantic operations, like cc,
+more compactly. Typically, we do not need a fancy syntax for such operators;
+all we need is a name, followed by open parenthesis, followed by a
+comma-separated list of arguments, followed by closed parenthesis. If this
+is the syntax that you want for a particular construct, then K allows you to
+drop all the quotes surrounding the terminals, as we did above for cc.

+

The semantic rules do exactly what the English semantics of callcc says.
+Note that here, unlike in our definition of LAMBDA in Tutorial 1, we had
+to mention the cell <k/> in our rules. This is because we need to make sure
+that we match the entire remaining computation, not only a fragment of it!
+For example, if we replace the two rules above with

+
rule (callcc V:Val => V cc(K)) ~> K
+rule cc(K) V ~> _ =>  V ~> K
+
+

then we get a callcc which is allowed to non-deterministically pick a
+prefix of the remaining computation and pass it to its argument, and then
+when invoked within its argument, a non-deterministic prefix of the new
+computation is discarded and replaced by the saved one. Wow, that would
+be quite a language! Would you like to write programs in it? 😃

+

Consequently, in K we can abruptly change the execution control flow of a
+program by simply changing the contents of the <k/> cell. This is one of
+the advantages of having an explicit representation of the execution context,
+like in K or in reduction semantics with evaluation contexts. Constructs like
+callcc are very hard and non-elegant to define in frameworks such as SOS,
+because those implicitly represent the execution context as proof context,
+and the latter cannot be easily changed.

+

Now that we know how to handle cells in configurations and use them in rules,
+in the next lesson we take a fresh look at LAMBDA and define it using
+an environment-based style, which avoids the complexity of substitution
+(e.g., having to deal with variable capture) and is closer in spirit to how
+functional languages are implemented.

+

Go to Lesson 2, LAMBDA++: Semantic (Non-Syntactic) Computation Items.

+

MOVIE (out of date) [6'28"]

+

Semantic (Non-Syntactic) Computation Items

+ +

In this lesson we start another semantic definition of LAMBDA++, which
+follows a style based on environments instead of substitution. In terms of
+K, we will learn how easy it is to add new items to the syntactic category
+of computations K, even ones which do not have a syntactic nature.

+

An environment binds variable names of interest to locations where their
+values are stored. The idea of environment-based definitions is to maintain
+a global store mapping locations to values, and then have environments
+available when we evaluate expressions telling where the variables are
+located in the store. Since LAMBDA++ is a relatively simple language, we
+only need to maintain one global environment. Following a similar style
+like in IMP, we place all cells into a top cell T:

+
configuration <T>
+                <k> $PGM:Exp </k>
+                <env> .Map </env>
+                <store> .Map </store>
+              </T>
+
+

Recall that $PGM is where the program is placed by krun after parsing. So
+the program execution starts with an empty environment and an empty store.

+

In environment-based definitions of lambda-calculi, lambda abstractions
+evaluate to so-called closures:

+
rule <k> lambda X:Id . E => closure(Rho,X,E) ...</k>
+     <env> Rho </env>
+
+

A closure is like a lambda abstraction, but it also holds the environment
+in which it was declared. This way, when invoked, a closure knows where to
+find in the store the values of all the variables that its body expression
+refers to. We will define the lookup rule shortly.

+

Therefore, unlike in the substitution-based definitions of LAMBDA and
+LAMBDA++, neither the lambda abstractions nor the identifiers are values
+anymore here, because they both evaluate further: lambda abstractions to
+closures and identifiers to their values in the store. In fact, the only
+values at this moment are the closures, and they are purely semantic entities,
+which cannot be used explicitly in programs. That's why we modified the
+original syntax of the language to include no Val syntactic category
+anymore, and that's why we need to add closures as values now; same like
+before, we add a Val syntactic category which is subsorted
+to KResult. In general, whenever you have any strictness attributes,
+your should also define some K results.

+

Invoking a closure is a bit more involved than the substitution-based
+beta-reduction: we need to switch to the closure's environment, then create a
+new, or fresh, binding for the closure's parameter to the value passed to the
+closure, then evaluate the closure's body, and then switch back to the
+caller's environment, which needs to be stored somewhere in the meanwhile.
+We can do all these with one rule:

+
rule <k> closure(Rho,X,E) V:Val => E ~> Rho' ...</k>
+     <env> Rho' => Rho[X <- !N] </env>
+     <store>... .Map => (!N:Int |-> V) ...</store>
+
+

Therefore, we atomically do all the following:

+
    +
  • switch the computation to the closure's body, E, followed by a
    +caller-environment-recovery task Rho' (note that Rho' is the
    +current environment),
  • +
  • generate a fresh location !N (the ! is important, we discuss it below),
    +bind X to !N in closure's environment and switch the current environment
    +Rho' to that one,
  • +
  • write the value passed to the closure, V, at location !N.
  • +
+

This was the most complex K rule we've seen so far in the tutorial. Note,
+however, that this one rule achieves a lot. It is, in fact, quite compact
+considering how much it does. Note also that everything that this K rule
+mentions is needed also conceptually in order to achieve this task, so it
+is minimal from that point of view. That would not be the case if we
+used, instead, a conventional rewrite rule, because we would have had to
+mention the remaining store, say Sigma, in both sides of the rule, to say
+it stays unchanged. Here we just use ....

+

The declaration of the fresh variable above, !N, is new and needs
+some explanation. First, note that !N appears only in the right-hand-side
+terms in the rule, that is, it is not matched when the rule is applied.
+Instead, a fresh Nat element is generated each time the rule is applied.
+In K, we can define syntactic categories which have the capability to
+generate fresh elements like above, using unbound variables whose name starts
+with a !. The details of how to do that are beyond the scope of this
+tutorial (see Tutorial 6). All we need to know here is that an arbitrary
+fresh element of that syntactic category is generated each time the rule
+is applied. We cannot rely on the particular name or value of the generated
+element, because that can change with the next version of the K tool, or
+even from execution to execution with the same version. All you can rely
+on is that each newly generated element is distinct from the previously
+generated elements for the same syntactic category.

+

Unlike in the substitution-based definition, we now also need a lookup rule:

+
rule <k> X => V ...</k>
+     <env>... X |-> N ...</env>
+     <store>... N |-> V ...</store>
+
+

This rule speaks for itself: replace X by the value V located in the store
+at X's location N in the current environment.

+

The only thing left to define is the auxiliary environment-recovery operation:

+

rule _:Val ~> (Rho => .) ... _ => Rho

+

When the item preceding the environment recovery task Rho in the
+computation becomes a value, replace the current environment with Rho
+and dissolve Rho from the computation.

+

Let us kompile and ... fail:

+
kompile lambda
+
+

gives a parsing error saying that V:Val does not fit there in the closure
+invocation rule. That's because Val and Exp are currently completely
+disconnected, so K rightfully complains that we want to apply a value to
+another one, because application was defined to work with expressions, not
+values. What we forgot here was to state that Exp includes Val:

+
syntax Exp ::= Val
+
+

Now everything works, but it is a good time to reflect a bit.

+

So we added closures, which are inherently semantic entities, to the syntax
+of expressions. Does that mean that we can now write LAMBDA programs with
+closures in them? Interestingly, with our current definition of LAMBDA,
+which purposely did not follow the nice organization of IMP into syntax and
+semantic modules, and with K's default parser, kast, you can. But you are
+not supposed to speculate this! In fact, if you use an external parser, that
+parser will reject programs with explicit closures. Also, if we split the
+LAMBDA definition into two modules, one called LAMBDA-SYNTAX containing
+exclusively the desired program syntax and one called LAMBDA importing the
+former and defining the syntax of the auxiliary operations and the semantics,
+then even K's default parser will reject programs using auxiliary syntactic
+constructs.

+

Indeed, when you kompile a language, say lang.k, the tool will by default
+attempt to find a module LANG-SYNTAX and generate the program parser from
+that. If it cannot find it, then it will use the module LANG instead. There
+are also ways to tell kompile precisely which syntax module you want to use
+for the program parser if you don't like the default convention.
+See kompile --help.

+

Another insightful thought to reflect upon, is the relationship between your
+language's values and other syntactic categories. It is often the case that
+values form a subset of the original language syntax, like in IMP (Part 2 of
+the tutorial), but sometimes that is not true, like in our case here. When
+that happens, in order for the semantics to be given smoothly and uniformly
+using the original syntax, you need to extend your language's original
+syntactic categories with the new values. The same holds true in other
+semantic approaches, not only in K, even in ones which are considered purely
+syntactic. As it should be clear by now, K does not enforce you to use a
+purely syntactic style in your definitions; nevertheless, K does allow you to
+develop purely syntactic definitions, like LAMBDA in Part 1 of the tutorial,
+if you prefer those.

+

krun some programs, such as those provided in Lesson 1 of the LAMBDA
+tutorial (Part 1). Note the closures, both as results in the <k/> cell,
+and as values in the store. Also, since variables are not values anymore,
+expressions that contain free variables may get stuck with one of those on
+top of their computation. See, for example, free-variable-capture.lambda,
+which gets stuck on z, because z is free, so it cannot evaluate it.
+If you want, you can go ahead and manually provide a configuration with
+z mapped to some location in the environment and that location mapped to
+some value in the store, and then you can also execute this program. The
+program omega.lambda should still loop.

+

Although we completely changed the definitional style of LAMBDA, the semantics
+of the other constructs do not need to change, as seen in the next lesson.

+

Go to Lesson 3, LAMBDA++: Reusing Existing Semantics.

+

MOVIE (out of date) [8'02"]

+

Reusing Existing Semantics

+ +

In this lesson we will learn that, in some cases, we can reuse existing
+semantics of language features without having to make any change!

+

Although the definitional style of the basic LAMBDA language changed quite
+radically in our previous lesson, compared to its original definition in
+Part 1 of the tutorial, we fortunately can reuse a large portion of the
+previous definition. For example, let us just cut-and-paste the rest of the
+definition from Lesson 7 in Part 1 of the tutorial.

+

Let us kompile and krun all the remaining programs from Part 1 of the
+tutorial. Everything should work fine, although the store contains lots of
+garbage. Garbage collection is an interesting topic, but we do not do it
+here. Nevertheless, much of this garbage is caused by the intricate use of
+the fixed-point combinator to define recursion. In a future lesson in this
+tutorial we will see that a different, environment-based definition of
+fixed-points will allocate much less memory.

+

One interesting question at this stage is: how do we know when we can reuse
+an existing semantics of a language feature? Well, I'm afraid the answer is:
+we don't. In the next lesson we will learn how reuse can fail for quite subtle
+reasons, which are impossible to detect statically (and some non-experts may
+fail to even detect them at all).

+

Go to Lesson 4, LAMBDA++: Do Not Reuse Blindly!.

+

MOVIE (out of date) [3'21"]

+

Do Not Reuse Blindly!

+ +

It may be tempting to base your decision to reuse an existing semantics of
+a language feature solely on syntactic considerations; for example, to reuse
+whenever the parser does not complain. As seen in this lesson, this could
+be quite risky.

+

Let's try (and fail) to reuse the definition of callcc from Lesson 1:

+
syntax Exp ::= "callcc" Exp  [strict]
+syntax Val ::= cc(K)
+rule <k> (callcc V:Val => V cc(K)) ~> K </k>
+rule <k> cc(K) V ~> _ =>  V ~> K </k>
+
+

The callcc examples that we tried in Lesson 1 work, so it may look it works.

+

However, the problem is that cc(K) should also include an environment,
+and that environment should also be restored when cc(K) is invoked.
+Let's try to illustrate this bug with callcc-env1.lambda

+
let x = 1 in
+  ((callcc lambda k . (let x = 2 in (k x))) + x)
+
+

where the second argument of +, x, should be bound to the top x, which
+is 1. However, since callcc does not restore the environment, that x
+should be looked up in the wrong, callcc-inner environment, so we should see
+the overall result 4.

+

Hm, we get the right result, 3 ... (Note: you may get 4, depending on
+your version of K and platform; but both 3 and 4 are possible results, as
+explained below and seen in the tests). How can we get 3? Well, recall that
++ is strict, which means that it can evaluate its arguments in any order.
+It just happened that in the execution that took place above its second
+argument was evaluated first, to 1, and then the callcc was evaluated, but
+its cc value K had already included the 1 instead of x ... In Part 4 of
+the tutorial we will see how to explore all the non-deterministic behaviors of
+a program; we could use that feature of K to debug semantics, too.
+For example, in this case, we could search for all behaviors of this program
+and we would indeed get two possible value results: 3 and 4.

+

One may think that the problem is the non-deterministic evaluation order
+of +, and thus that all we need to do is to enforce a deterministic order
+in which the arguments of + are evaluated. Let us follow this path to
+see what happens. There are two simple ways to make the evaluation order
+of +'s arguments deterministic. One is to make + seqstrict in the
+semantics, to enforce its evaluation from left-to-right. Do it and then
+run the program above again; you should get only one behavior for the
+program above, 4, which therefore shows that copying-and-pasting our old
+definition of callcc was incorrect. However, as seen shortly, that only
+fixed the problem for the particular example above, but not in general.
+Another conventional approach to enforce the desired evaluation order is to
+modify the program to enforce the left-to-right evaluation order using let
+binders, as we do in callcc-env2.lambda:

+
let x = 1 in
+  let a = callcc lambda k . (let x = 2 in (k x)) in
+    let b = x in
+      (a + b)
+
+

With your installation of K you may get the "expected" result 4 when you
+execute this program, so it may look like our non-deterministic problem is
+fixed. Unfortunately, it is not. Using the K tool to search for all the
+behaviors in the program above reveals that the final result 3 is still
+possible. Moreover, both the 3 and the 4 behaviors are possible regardless
+of whether + is declared to be seqstrict or just strict. How is that
+possible? The problem is now the non-deterministic evaluation strategy of
+the function application construct. Indeed, recall that the semantics of
+the let-in construct is defined by desugaring to lambda application:

+
rule let X = E in E' => (lambda X . E') E
+
+

With this, the program above eventually reduces to

+
(lambda a . ((lambda b . a + b) x))
+(callcc lambda k . (let x = 2 in (k x)))
+
+

in an environment where x is 1. If the first expression evaluates first,
+then it does so to a closure in which x is bound to a location holding 1,
+so when applied later on to the x inside the argument of callcc (which is
+2), it will correctly lookup x in its enclosed environment and thus the
+program will evaluate to 3. On the other hand, if the second expression
+evaluates first, then the cc value will freeze the first expression as is,
+breaking the relationship between its x and the current environment in which
+it is bound to 1, being inadvertently captured by the environment of the
+let-in construct inside the callcc and thus making the entire expression
+evaluate to 4.

+

So the morale is: Do not reuse blindly. Think!

+

In the next lesson we fix the environment-based semantics of callcc by having
+cc also wrap an environment, besides a computation. We will also give a more
+direct semantics to recursion, based on environments instead of fixed-point
+combinators.

+

Go to Lesson 5, LAMBDA++: More Semantic Computation Items.

+

MOVIE (out of date) [3'37"]

+

More Semantic Computation Items

+ +

In this lesson we see more examples of semantic (i.e., non-syntactic)
+computational items, and how useful they can be. Specifically, we fix the
+environment-based definition of callcc and give an environment-based
+definition of the mu construct for recursion.

+

Let us first fix callcc. As discussed in Lesson 4, the problem that we
+noticed there was that we only recovered the computation, but not the
+environment, when a value was passed to the current continuation. This is
+quite easy to fix: we modify cc to take both an environment and a
+computation, and its rules to take a snapshot of the current environment with
+it, and to recover it at invocation time:

+
syntax Val ::= cc(Map,K)
+rule <k> (callcc V:Val => V cc(Rho,K)) ~> K </k> <env> Rho </env>
+rule <k> cc(Rho,K) V:Val ~> _ =>  V ~> K </k> <env> _ => Rho </env>
+
+

Let us kompile and make sure it works with the callcc-env2.lambda program,
+which should evaluate to 3, not to 4.

+

Note that the cc value, which can be used as a computation item in the <k/>
+cell, is now quite semantic in nature, pretty much the same as the closures.

+

Let us next add one more closure-like semantic computational item, for mu.
+But before that, let us reuse the semantics of letrec in terms of mu that
+was defined in Lesson 8 of Part 1 of the tutorial on LAMBDA:

+
syntax Exp ::= "letrec" Id Id "=" Exp "in" Exp [macro]
+             | "mu" Id "." Exp                 [latex(\mu{#1}.{#2})]
+rule letrec F:Id X = E in E' => let F = mu F . lambda X . E in E'
+
+

We removed the binder annotation of mu, because it is not necessary
+anymore (since we do not work with substitutions anymore).

+

To save the number of locations needed to evaluate mu X . E, let us replace
+it with a special closure which already binds X to a fresh location holding
+the closure itself:

+
syntax Exp ::= muclosure(Map,Exp)
+
+rule <k> mu X . E => muclosure(Rho[X <- !N], E) ...</k>
+     <env> Rho </env>
+     <store>... .Map => (!N:Int |-> muclosure(Rho[X <- !N], E)) ...</store>
+
+

Since each time mu X . E is encountered during the evaluation it needs to
+evaluate E, we conclude that muclosure cannot be a value. We can declare
+it as either an expression or as a computation. Let's go with the former.

+

Finally, here is the rule unrolling the muclosure:

+

rule muclosure(Rho,E) => E ~> Rho' ...
+ Rho' => Rho

+

Note that the current environment Rho' needs to be saved before and
+restored after E is executed, because the fixed point may be invoked
+from a context with a completely different environment from the one
+in which mu X . E was declared.

+

We are done. Let us now kompile and krun factorial-letrec.lambda from
+Lesson 7 in Part 1 of the tutorial on LAMBDA. Recall that in the previous
+lesson this program generated a lot of garbage into the store, due to the
+need to allocate space for the arguments of all those lambda abstractions
+needed to run the fixed-point combinator. Now we need much fewer locations,
+essentially only locations for the argument of the factorial function, one at
+each recursive call. Anyway, much better than before.

+

In the next lesson we wrap up the environment definition of LAMBDA++ and
+generate its documentation.

+

Go to Lesson 6, LAMBDA++: Wrapping Up and Documenting LAMBDA++.

+

MOVIE (out of date) [5'19"]

+

Wrapping Up and Documenting LAMBDA++

+ +

In this lesson we wrap up and nicely document LAMBDA++. In doing so, we also
+take the freedom to reorganize the semantics a bit, to make it look better.

+

See the lambda.k file, which is self-explanatory.

+

Part 3 of the tutorial is now complete. Part 4 will teach you more features
+of the K framework, in particular how to exhaustively explore the behaviors
+of non-deterministic or concurrent programs.

+

MOVIE (out of date) [6'23"]

+

Part 4: Defining IMP++

+ +

IMP++ extends IMP, which was discussed in Part 2 of this tutorial, with several
+new syntactic constructs. Also, some existing syntax is generalized, which
+requires non-modular changes of the existing IMP semantics. For example,
+global variable declarations become local declarations and can occur
+anywhere a statement can occur. In this tutorial we will learn the following:

+
    +
  • That (and how) existing syntax/semantics may change as a language evolves.
  • +
  • How to refine configurations as a language evolves.
  • +
  • How to define and use fresh elements of desired sorts.
  • +
  • How to tag syntactic constructs and rules, and how to use such tags
    +with the superheat/supercool options of kompile.
  • +
  • How the search option of krun works.
  • +
  • How to stream cells holding semantic lists to the standard input/output,
    +and thus obtain interactive interpreters for the defined languages.
  • +
  • How to delete, save and restore cell contents.
  • +
  • How to add/delete cells dynamically.
  • +
  • More details on how the configuration abstraction mechanism works.
  • +
+

Like in the previous tutorials, this folder contains several lessons, each
+adding new features to IMP++. Do them in order and make sure you completed
+and understood the previous tutorials.

+

Extending/Changing an Existing Language Syntax

+ +

Here we learn how to extend the syntax of an existing language, both with
+new syntactic constructs and with more general uses of existing constructs.
+The latter, in particular, requires changes of the existing semantics.

+

Consider the IMP language, as defined in Lesson 4 of Part 2 of the tutorial.

+

Let us first add the new syntactic constructs, with their precedences:

+
    +
  • variable increment, ++, which increments an integer variable and
    +evaluates to the new value;
  • +
  • read, which reads and evaluates to a new integer from the input buffer;
  • +
  • print, which takes a comma-separated list of arithmetic expressions and
    +evaluates and prints each of them in order, from left to right, to the
    +output buffer; we therefore define a new list syntactic category, AExps,
    +which we pass as an argument to print; note we do not want to declare
    +print to be strict, because we do not want to first evaluate the
    +arguments and then print them (for example, if the second argument performs
    +an illegal operation, say division by zero, we still want to print the first
    +argument); we also go ahead and add strings as arithmetic expressions,
    +because we intend print to also take strings, in order to print nice
    +messages to the user;
  • +
  • halt, which abruptly terminates the program; and
  • +
  • spawn, which takes a statement and creates a new concurrent thread
    +executing it and sharing its environment with the parent thread.
  • +
+

Also, we want to allow local variable declarations, which can appear anywhere
+a statement can appear. Their scope ranges from the place they are defined
+until the end of the current block, and they can shadow previous declarations,
+both inside and outside the current block. The simplest way to define the
+syntax of the new variable declarations is as ordinary statements, at the same
+time removing the previous Pgm syntactic category and its construct.
+Programs are now just statements.

+

We are now done with adding the new syntax and modifying the old one.
+Note that the old syntax was modified in a way which makes the previous IMP
+programs still parse, but this time as statements. Let us then modify
+the configuration variable $PGM to have the sort Stmt instead of Pgm,
+and let us try to run the old IMP programs, for example sum.imp.

+

Note that they actually get stuck with the global declaration on the top
+of their computations. This is because variable declarations are now treated
+like any statements, in particular, the sequential composition rule applies.
+This makes the old IMP rule for global variable declarations not match anymore.
+We can easily fix it by replacing the anonymous variable _, which matched
+the program's statement that now turned into the remaining computation in
+the <k/> cell, with the cell frame variable ..., which matches the
+remaining computation. Similarly, we have to change the rule for the case
+where there are no variables left to declare into one that dissolves itself.

+

We can now run all the previous IMP programs, in spite of the fact that
+our IMP++ semantics is incomplete and, more interestingly, in spite of the
+fact that our current semantics of blocks is incorrect in what regards the
+semantics of local variable declarations (note that the old IMP programs do
+not declare block-local variables, which is why they still run correctly).

+

Let us also write some proper IMP++ programs, which we would like to execute
+once we give semantics to the new constructs.

+

div.imp is a program manifesting non-deterministic behaviors due to the
+desired non-deterministic evaluation strategy of division and the fact that
+expressions will have side effects once we add variable increment. We will
+be able to see all the different behaviors of this program. Challenge: can
+you identify the behavior where the program performs a division-by-zero?

+

If we run div.imp now, it will get stuck with the variable increment
+construct on top of the computation cell. Once we give it a semantics,
+div.imp will execute completely (all the other constructs in div.imp
+already have their semantics defined as part of IMP).

+

Note that some people prefer to define all their semantics in a by need
+style, that is, they first write and parse lots of programs, and then they
+add semantics to each language construct on which any of the programs gets
+stuck, and so on and so forth until they can run all the programs.

+

io.imp is a program which exercises the input/output capabilities of the
+language: reads two integers and prints three strings and an integer.
+Note that the variable declaration is not the first statement anymore.

+

sum-io.imp is an interactive variant of the sum program.

+

spawn.imp is a program which dynamically creates two threads that interact
+with the main thread via the shared variable x. Lots of behaviors will be
+seen here once we give spawn the right semantics.

+

Finally, locals.imp tests whether variable shadowing/unshadowing works well.

+

In the next lesson we will prepare the configuration for the new constructs,
+and will see what it takes to adapt the semantics to the new configuration.
+Specifically, we will split the state cell into an environment cell and a
+store cell, like in LAMBDA++ in Part 3 of the tutorial.

+

Go to Lesson 2, IMP++: Configuration Refinement; Freshness.

+

MOVIE (out of date) [07'47"]

+

Configuration Refinement; Freshness

+ +

To prepare for the semantics of threads and local variables, in this lesson we
+split the state cell into an environment and a store. The environment and
+the store will be similar to those in the definition of LAMBDA++ in Part
+3 of the Tutorial. This configuration refinement will require us to change
+some of IMP's rules, namely those that used the state.

+

To split the state map, which binds program variables to values, into an
+environment mapping program variables to locations and a store mapping
+locations to values, we replace in the configuration declaration the cell

+
<state color="red"> .Map </state>
+
+

with two cells

+
<env color="LightSkyBlue"> .Map </env>
+<store color="red"> .Map </store>
+
+

Structurally speaking, this split of a cell into other cells is a major
+semantic change, which, unfortunately, requires us to revisit the existing
+rules that used the state cell. One could, of course, argue that we could
+have avoided this problem if we had followed from the very beginning the
+good-practice style to work with an environment and a store, instead of a
+monolithic state. While that is a valid argument, highlighting the fact that
+modularity is not only a feature of the framework alone, but one should also
+follow good practices to achieve it, it is also true that if all we wanted
+in Part 2 of the tutorial was to define IMP as is, then the split of the state
+in an environment and a store is unnecessary and not really justified.

+

The first rule which used a state cell is the lookup rule:

+
rule <k> X:Id => I ...</k> <state>... X |-> I ...</state>
+
+

We modify it as follows:

+
rule <k> X:Id => I ...</k>
+     <env>... X |-> N ...</env>
+     <store>... N |-> I ...</store>
+
+

So we first match the location N of X in the environment, then the value
+I at location N in the store, and finally we rewrite X to I into the
+computation. This rule also shows an instance of a more complex
+multiset matching, where two variables (X and N) are matched each twice.

+

The assignment rule is modified quite similarly.

+

The variable declaration rule is trickier, though, because we need to allocate
+a fresh location in the store and bind the newly declared variable to it.
+This is quite similar to the way we allocated space for variables in
+the environment-based definition of LAMBDA++ in Part 3 of the tutorial.

+
rule <k> int (X,Xs => Xs); ...</k>
+     <env> Rho => Rho[X <- !N:Int] </env>
+     <store>... .Map => !N |-> 0 ...</store>
+
+

Note the use of the fresh (!N) variable notation above. Recall from
+the LAMBDA++ tutorial that each time the rule with fresh (!) variables is
+applied, fresh elements of corresponding sorts are generated for the fresh
+variables, distinct from all the previously generated elements; also, we
+cannot and should not assume anything about the particular element that is
+being generated, except that it is different from the previous ones.

+

kompile and krun sum.imp to see how the fresh locations have been
+generated and used. There were two fresh locations needed, for the two
+variables. Note also that a cell holding the counter has been added to the
+configuration.

+

In the next lesson we will add the semantics of variable increment, and see
+how that yields non-deterministic behaviors in programs and how to explore
+those behaviors using the K tool.

+

Go to Lesson 3, IMP++: Tagging; Superheat/Supercool Kompilation Options.

+

MOVIE (out of date) [04'06"]

+

Variable increment; Search

+ +

In this lesson we add the semantics of variable increment. We also learn
+how to instruct the kompile tool to instrument the language model for
+exhaustive analysis.

+

The variable increment rule is self-explanatory:

+
rule <k> ++X => I +Int 1 ...</k>
+     <env>... X |-> N ...</env>
+     <store>... N |-> (I => I +Int 1) ...</store>
+
+

We can now run programs like our div.imp program introduced in Lesson 1.
+Do it.

+

The addition of increment makes the evaluation of expressions have side
+effects. That, in combination with the non-determinism allowed by the
+strictness attributes in how expression constructs evaluate their
+arguments, makes expressions in particular and programs in general have
+non-deterministic behaviors. One possible execution of the div.imp program
+assigns 1 to y's location, for example, but this program manifests several
+other behaviors, too.

+

To see all the (final-state) behaviors that a program can have, you can kompile
+the semantics with --enable-search and call the krun tool with the option
+--search. For example:

+
krun div.imp --search
+
+

In the next lesson we add input/output to our language and learn how to
+generate a model of it which behaves like an interactive interpreter!

+

Go to Lesson 4, IMP++: Semantic Lists; Input/Output Streaming.

+

MOVIE (out of date) [06'56"]

+

Semantic Lists; Input/Output Streaming

+ +

In this lesson we add semantics to the read and print IMP++ constructs.
+In doing so, we also learn how to use semantic lists and how to connect
+cells holding semantic lists to the standard input and standard output.
+This allows us to turn the K semantics into an interactive interpreter.

+

We start by adding two new cells to the configuration,

+
<in color="magenta"> .List </in>
+<out color="Orchid"> .List </out>
+
+

each holding a semantic list, initially empty. Semantic lists are
+space-separated sequences of items, each item being a term of the form
+ListItem(t), where t is a term of sort K. Recall that the semantic maps,
+which we use for states, environments, stores, etc., are sets of pairs
+t1 |-> t2, where t1 and t2 are terms of sort K. The ListItem wrapper
+is currently needed, to avoid parsing ambiguities.

+

Since we want the print statement to also print strings, we need to tell
+K that strings are results. To make it more interesting, let us also overload
+the + symbol on arithmetic expressions to also take strings and, as a
+result, to concatenate them. Since + is already strict, we only need to add
+a rule reducing the IMP addition of strings to the builtin operation +String
+which concatenates two strings.

+

The semantics of read is immediate: read and consumes the first integer item
+from the <in/> cell; note that our read only reads integer values (it gets
+stuck if the first item in the <in/> cell is not an integer).

+

The semantics of print is a bit trickier. Recall that print takes an
+arbitrary number of arithmetic expression arguments, and evaluates and outputs
+each of them in order, from left to right. For example,
+print("Hello", 3/0, "Bye"); outputs "Hello" and then gets stuck on the
+illegal division by zero operation. In other words, we do not want it to
+first evaluate all its arguments and then print them, because that would miss
+outputting potentially valuable information. So the first step is to evaluate
+the first argument of print. In some sense, what we'd like to say is that
+print has the evaluation strategy strict(1). However, strictness
+attributes only work with individual language constructs, while what we need
+is an evaluation strategy that involves two constructs: print and the list
+(comma) construct of AExps. If we naively associate print the strict(1)
+evaluation strategy then its first and unique argument, an AExps list, will
+be scheduled for evaluation and the execution will get stuck because we have
+no rules for evaluating AExps terms. If we make the list construct of
+AExps strict then we get the wrong semantics for print which first
+evaluates all its arguments and then outputs them. The correct way to
+tell K that print should evaluate only its first argument is by using a
+context declaration:

+
context print(HOLE:AExp, _);
+
+

Note the HOLE of sort AExp above. Contexts allow us to define finer-grain
+evaluation strategies than the strictness attributes, involving potentially
+more than one language construct, like above. The HOLE indicates the
+argument which is requested to be evaluated. For example, the strict
+attribute of division corresponds to two contexts:

+
context HOLE / _
+context _ / HOLE
+
+

In their full generality, contexts can be any terms with precisely one
+occurrence of a HOLE, and with arbitrary side conditions on any variables
+occurring in the context term as well as on the HOLE. See Part 6 of the
+tutorial for more examples.

+

Once evaluated, the first argument of print is expected to become either an
+integer or a string. Since we want to print both integers and string values,
+to avoid writing two rules, one for each type of value, we instead add a new
+syntactic category, Printable, which is the union of integers and strings.

+

Let us kompile and krun the io.imp program discussed in Lesson 1. As
+expected, it gets stuck with a read construct on top of the computation and
+with an empty <in/> cell. To run it, we need to provide some items in the
+<in/> cell, so that the rule of read can match. Let us add

+
<in> ListItem(3) ListItem(5) ListItem(7) </in>
+
+

Now, if we krun io.imp, we can see that its execution completes normally
+(the <k/> cell is empty), that the first two items have been removed by the
+two read constructs from the <in/> cell, and that the desired strings and
+numbers have been placed into the <out/> cell.

+

Cells holding semantic lists can be connected to the standard input and
+standard output buffers, and krun knows how to handle these appropriately.
+Let us connect the <in/> cell to the standard input using the cell attribute
+stream="stdin" and the <out/> cell to the standard output with the
+attribute stream="sdtout". A cell connected to the standard input will
+take its items from the standard input and block the rewriting process when
+an input is needed until an item is available in the standard input buffer.
+A cell connected to the standard output buffer will send all its items, in
+order, to the standard output.

+

Let us kompile and krun io.imp again. It prints the message and then
+waits for your input numbers. Type in two numbers, then press <Enter>.
+A message with their sum is then printed, followed by the final configuration.
+If you do not want to see the final configuration, and thus obtain a realistic
+interpreter for our language, then call krun with the option --output none:

+
krun io.imp --output none
+
+

Let us now krun our interactive sum program, which continuously reads numbers
+from the console and prints the sum of numbers up to them:

+
krun sum-io.imp
+
+

Try a few numbers, then 0. Note that the program terminated, but with junk
+in the <k/> cell, essentially with a halt statement on its top. Of course,
+because halt has been reached and it has no semantics yet.

+

In the next lesson we give the semantics of halt and also fix the semantics
+of blocks with local variable declarations.

+

Go to Lesson 5, IMP++: Deleting, Saving and Restoring Cell Contents.

+

MOVIE (out of date) [05'21"]

+

Deleting, Saving and Restoring Cell Contents

+ +

In this lesson we will see how easily we can delete, save and/or restore
+contents of cells in order to achieve the desired semantics of language
+constructs that involve abrupt changes of control or environments. We have
+seen similar or related K features in the LAMBDA++ language in Part 3 of the
+tutorial.

+

Let us start by adding semantics to the halt statement. As its name says,
+what we want is to abruptly terminate the execution of the program. Moreover,
+we want the program configuration to look as if the program terminated
+normally, with an empty computation cell. The simplest way to achieve that is
+to simply empty the computation cell when halt is encountered:

+
rule <k> halt; ~> _ => . </k>
+
+

It is important to mention the entire <k/> cell here, with both its membranes
+closed, to make sure that its entire contents is discarded. Note the
+anonymous variable, which matches the rest of the computation.

+

kompile and krun sum-io.imp. Note that unlike in Lesson 4, the program
+terminates with an empty computation cell now.

+

As mentioned earlier, the semantics of blocks that was inherited from IMP is
+wrong. Program locals.imp shows it very clearly: the environments are not
+correctly restored at block exits. One way to fix the problem is to take
+a snapshot of the current environment when a block is entered and save it
+somewhere, and then to restore it when the block is left. There are many
+ways to do this, which you can explore on your own: for example you can add
+a new list cell for this task where to push/pop the environment snapshots in
+a stack style; or you can use the existing environment cell for this purpose,
+but then you need to change the variable access rules to search through the
+stacked environments for the variable.

+

My preferred solution is to follow a style similar to how we saved/restored
+LAMBDA++ environments in Part 3 of the Tutorial, namely to use the already
+existing <k/> cell for such operations. More specifically, we place a
+reminder item in the computation whenever we need to take a snapshot of
+some cell contents; the item simply consists of the entire contents of the cell.
+Then, when the reminder item is reached, we restore the contents of the cell:

+
rule <k> {S} => S ~> Rho ...</k> <env> Rho </env>
+
+

The only thing left now is to give the definition of environment restore:

+
rule <k> Rho => . ...</k> <env> _ => Rho </env>
+
+

Done. kompile and krun locals.imp. Everything should work correctly now.
+Note that the rule above is different from the one we had for LAMBDA++ in
+Part 3 of the tutorial, in that here there is no value preceding the environment
+restoration item in the computation; that's because IMP++ statements,
+unlike LAMBDA++'s expressions, evaluate to nothing (.).

+

In the next lesson we will give semantics to the spawn S construct, which
+dynamically creates a concurrent shared-memory thread executing statement S.

+

Go to Lesson 6, IMP++: Adding/Deleting Cells Dynamically; Configuration Abstraction, Part 2.

+

MOVIE (out of date) [04'30"]

+

Adding/Deleting Cells Dynamically; Configuration Abstraction, Part 2

+ +

In this lesson we add dynamic thread creation and termination to IMP, and
+while doing so we learn how to define and use configurations whose structure
+can evolve dynamically.

+

Recall that the intended semantics of spawn S is to spawn a new concurrent
+thread that executes S. The new thread is being passed at creation time
+its parent's environment, so it can share with its parent the memory
+locations that its parent had access to at creation time. No other locations
+can be shared, and no other memory sharing mechanism is available.
+The parent and the child threads can evolve unrestricted, in particular they
+can change their environments by declaring new variables or shadowing existing
+ones, can create other threads, and so on.

+

The above suggests that each thread should have its own computation and its
+own environment. This can be elegantly achieved if we group the <k/> and
+<env/> cells in a <thread/> cell in the configuration. Since at any given
+moment during the execution of a program there could be zero, one or more
+instances of such a <thread/> cell in the configuration, it is a good idea
+to declare the <thread/> cell with multiplicity * (i.e., zero, one or more):

+
<thread multiplicity="*" color="blue">
+  <k color="green"> $PGM:Stmt </k>
+  <env color="LightSkyBlue"> .Map </env>
+</thread>
+
+

This multiplicity declaration is not necessary, but it is a good idea to do
+it for several reasons:

+
    +
  1. it may help the configuration abstraction process,
    +which may in turn significantly increase the compactness and modularity of
    +your subsequent rules;
  2. +
  3. it may help various analysis and execution tools,
    +for example static analyzers to give you error messages when you create cells
    +where you should not, or K compilers to improve performance by starting
    +actual concurrent hardware threads or processes corresponding to each cell
    +instance; and
  4. +
  5. it may help you better understand and control the dynamics
    +of your configuration, and thus your overall semantics.
  6. +
+

For good encapsulation, I also prefer to put all thread cells into one cell,
+<threads/>. This is technically unnecessary, though; to convince yourself
+that this is indeed the case, you can remove this cell once we are done with
+the semantics and everything will work without having to make any changes.

+

Before we continue, let us kompile an krun some programs that used to
+work, say sum-io.imp. In spite of the relatively radical configuration
+reorganization, those programs execute just fine! How is that possible?
+In particular, why do rules like the lookup and assignment still work,
+unchanged, in spite of the fact that the <k/> and <env/> cells are not at
+the same level with the <store/> cell in the configuration anymore?

+

Welcome to configuration abstraction, part 2. Recall that the role of
+configuration abstraction is to allow you to only write the relevant
+information in each rule, and have the compiler fill-in the obvious and boring
+details. According to the configuration that we declared for our new
+language, there is only one reasonable way to complete rules like the lookup,
+namely to place the <k/> and </env> cells inside a <thread/> cell,
+inside a <threads/> cell:

+
rule <threads>...
+       <thread>...
+         <k> X:Id => I ...</k>
+         <env>... X |-> N ...</env>
+       ...</thread>
+     ...<threads/>
+     <store>... N |-> I ...</store>  [lookup]
+
+

This is the most direct, compact and local way to complete the configuration
+context of the lookup rule. If for some reason you wanted here to match the
+<k/> cell of one thread and the <env/> cell of another thread, then you
+would need to explicitly tell K so, by mentioning the two thread cells,
+for example:

+
rule <thread>...
+         <k> X:Id => I ...</k>
+     ...</thread>
+     <thread>...
+         <env>... X |-> N ...</env>
+     ...</thread>
+     <store>... N |-> I ...</store>  [lookup]
+
+

By default, K completes rules in a greedy style. Think this way: what is the
+minimal number of changes to my rule to make it fit the declared
+configuration? That's what the K tool will do.

+

Configuration abstraction is technically unnecessary, but once you start
+using it and get a feel for how it works, it will become your best friend.
+It allows you to focus on the essentials of your semantics, and at the same
+time gives you flexibility in changing the configuration later on without
+having to touch the rules. For example, it allows you to remove the
+<threads/> cell from the configuration, if you don't like it, without
+having to touch any rule.

+

We are now ready to give the semantics of spawn:

+
rule <k> spawn S => . ...</k> <env> Rho </env>
+     (. => <thread>... <k> S </k> <env> Rho </env> ...</thread>)
+
+

Note configuration abstraction at work, again. Taking into account
+the declared configuration, and in particular the multiplicity information
+* in the <thread/> cell, the only reasonable way to complete the rule
+above is to wrap the <k/> and <env/> cells on the first line within a
+<thread/> cell, and to fill-in the ...s in the child thread with the
+default contents of the other subcells in <thread/>. In this case there
+are no other cells, so we can get rid of those ...s, but that would
+decrease the modularity of this rule: indeed, we may later on add other
+cells within <thread/> as the language evolves, for example a function
+or an exception stack, etc.

+

In theory, we should be able to write the rule above even more compactly
+and modularly, namely as

+
rule <k> spawn S => . ...</k> <env> Rho </env>
+     (. => <k> S </k> <env> Rho </env>)
+
+

Unfortunately, this currently does not work in the K tool, due to some
+known limitations of our current configuration abstraction algorithm.
+This latter rule would be more modular, because it would not even depend
+on the cell name thread. For example, we may later decide to change
+thread into agent, and we would not have to touch this rule.
+We hope this current limitation will be eliminated soon.

+

Once a thread terminates, its computation cell becomes empty. When that
+happens, we can go ahead and remove the useless thread cell:

+
rule <thread>... <k> . </k> ...</thread> => .
+
+

Let's see what we've got. kompile and krun spawn.imp.
+Note the following:

+
    +
  • The <threads/> cell is empty, so all threads terminated normally;
  • +
  • The value printed is different from the value in the store; the store value
    +is not even the one obtained if the threads executed sequentially.
  • +
+

Therefore, interesting behaviors may happen; we would like to see them all!

+
krun spawn.imp --search
+
+

However, the above does not work.

+

spawn.imp is an interactive program, which reads a number from the
+standard input. When analyzing programs exhaustively using the search option,
+krun has to disable the streaming capabilities (just think about it and you
+will realize why). The best you can do in terms of interactivity with search
+is to pipe some input to krun: krun will flush the standard input buffer
+into the cells connected to it when creating the initial configuration (will
+do that no matter whether you run it with or without the --search option).
+For example:

+
echo 23 | krun spawn.imp --search
+
+

puts 23 in the standard input buffer, which is then transferred in the
+<in/> cell as a list item, and then the exhaustive search procedure is
+invoked.

+

However, even after piping some input, the spawn.imp program outputs
+an error:

+
[Error] krun: You must pass --enable-search to kompile to be able to use krun --search with the LLVM backend
+
+

As explained in Lesson 3, by default kompile optimizes the generated
+language model for execution. In particular, it does not insert any
+backtracking markers where transition attempts should be made, so krun
+lacks the information it needs to exhaustively search the generated language
+model.

+

kompile with the search feature enabled:

+
kompile imp --enable-search
+
+

Now echo 23 | krun spawn.imp --search gives us all 12 behaviors of the
+spawn.imp program.

+

We currently have no mechanism for thread synchronization. In the next lesson
+we add a join statement, which allows a thread to wait until another completes.

+

Go to Lesson 7, IMP++: Everything Changes: Syntax, Configuration, Semantics.

+

MOVIE (out of date) [11'40"]

+

Everything Changes: Syntax, Configuration, Semantics

+ +

In this lesson we add thread joining, one of the simplest thread
+synchronization mechanisms. In doing so, we need to add unique ids
+to threads in the configuration, and to modify the syntax to allow spawn
+to return the id of the newly created thread. This gives us an opportunity
+to make several other small syntactic and semantics changes to the language,
+which make it more powerful or more compact at a rather low cost.

+

Before we start, let us first copy and modify the previous spawn.imp program
+from Lesson 1 to make use of thread joining. Recall from Lesson 6 that in some
+runs of this program the main thread completed before the child threads,
+printing a possibly undesired value of x. What we want now is to assign
+unique ids to the two spawned threads, and then to modify the main thread to
+join the two child threads before printing. To avoid adding a new type to
+the language, let's assume that thread ids are integer numbers. So we declare
+two integers, t1 and t2, and assign them the two spawn commands. In order
+for this to parse, we will have to change the syntax of spawn to be an
+arithmetic expression construct instead of a statement. Once we do that,
+we have a slight syntactic annoyance: we need to put two consecutive ;
+after the spawn assignment, one for the assignment statement inside the spawn,
+and another for the outer assignment. To avoid the two consecutive semicolons,
+we can syntactically enforce spawn to take a block as argument, instead of a
+statement. Now it looks better. The new spawn.imp program is still
+non-deterministic, because the two threads can execute in any order and even
+continue to have a data-race on the shared variable x, but we should see fewer
+behaviors when we use the join statements. If we want to fully synchronize
+this program, we can have the second thread start with a join(t1) statement.
+Then we should only see one behavior for this program.

+

Let us now modify the language semantics. First, we move the spawn
+construct from statements to expressions, and make it take a block.
+Second, we add one more sub-cell to the thread cell in the configuration,
+<id/>, to hold the unique identifier of the thread. We want the main
+thread to have id 0, so we initialize this cell with 0. Third, we modify
+the spawn rule to generate a fresh integer identifier, which is put in the
+<id/> cell of the child thread and returned as a result of spawn in the
+parent thread. Fourth, let us add the join statement to the language,
+both syntactically and semantically. So in order for the join(T) statement
+to execute, thread T must have its computation empty. However, in order
+for this to work we have to get rid of the thread termination cleanup rule.
+Indeed, we need to store somewhere the information that thread T terminated;
+the simplest way to do it is to not remove the terminated threads. Feel free
+to experiment with other possibilities, too, here. For example, you may add
+another cell, <done/>, in which you can store all the thread ids of the
+terminated and garbage-collected threads.

+

Let us now kompile imp.k and convince ourselves that the new spawn.imp
+with join statements indeed has fewer behaviors than its variant without
+join statements. Also, let us convince ourselves that the fully synchronized
+variant of it indeed has only one behavior.

+

Note that now spawn, like variable increment, makes the evaluation of
+expressions to have side effects. Many programming languages in fact allow
+expressions to be evaluated only for their side effects, and not for their
+value. This is typically done by simply adding a ; after the expression
+and thus turning it into a statement. For example, ++x;. Let as also
+allow arithmetic expressions in our language to be used as statements, by
+simply adding the production AExp ";" to Stmt, with evaluation strategy
+strict and with the expected semantics discarding the value of the AExp.

+

Another simple change in syntax and semantics which gives our language more
+power, is to remove the ; from the syntax of variable assignments and to make
+them expression instead of statement constructs. This change, combined with
+the previous one, will still allow us to parse all the programs that we could
+parse before, but will also allow us to parse more programs. For example, we
+can now do sequence assignments like in C: x = y = z = 0. The semantics
+of assignment now has to return the assigned value also to the computation,
+because we want the assignment expression to evaluate to the assigned value.

+

Let us also make another change, but this time one which only makes the
+definition more compact. Instead of defining statement sequential
+composition as a binary construct for statements, let us define a new
+syntactic construct, Stmts, as whitespace-separated lists of Stmt. This
+allows us to get rid of the empty blocks, because we can change the syntax of
+blocks to {Stmts} and Stmts also allows the empty sequence of statements.
+However, we do have to make sure that .Stmts dissolves.

+

In general, unless you are defining a well-established programming language,
+it is quite likely that your definitions will suffer lots of changes like the
+ones seen in this lecture. You add a new construct, which suggests changes
+in the existing syntax making in fact your language parse more programs,
+which then requires corresponding changes in the semantics, and so on.
+Also, compact definitions are desirable in general, because they are easier
+to read and easier to change if needed later.

+

In the next lesson we wrap up and document the definition of IMP++.

+

Go to Lesson 8, IMP++: Wrapping up Larger Languages.

+

Wrapping up Larger Languages

+ +

In this lesson we wrap up IMP++'s semantics and also generate its poster.
+While doing so, we also learn how to display larger configurations in order
+to make them easier to read and print.

+

Note that we rearrange a bit the semantics, to group the semantics of old
+IMP's constructs together, and separate it from the new IMP++'s semantics.

+

You can go even further and manually edit the generated Latex document.
+You typically want to do that when you want to publish your language
+definition, or parts of it, and you need to finely tune it to fit the
+editing requirements. For example, you may want to insert some negative
+spaces, etc.

+

Part 4 of the tutorial is now complete. At this moment you should know most
+of K framework's features and how to use the K tool. You can now define or
+design your own programming languages, and then execute and analyze programs.

+

MOVIE (out of date) [06'26"]

+

Part 5: Defining Type Systems

+ +

In this part of the tutorial we will show that defining type systems for
+languages is essentially no different from defining semantics. The major
+difference is that programs and fragments of programs now rewrite to their
+types, instead of to concrete values. In terms of K, we will learn how
+to use it for a certain particular but important kind of applications.

+

Imperative, Environment-Based Type Systems

+ +

In this lesson you learn how to define a type system for an imperative
+language (the IMP++ language defined in Part 4 of the tutorial), using a style
+based on type environments.

+

Let us copy the imp.k file from Part 4 of the tutorial, Lesson 7, which holds
+the semantics of IMP++, and modify it into a type system. The resulting type
+system, when executed, yields a type checker.

+

We start by defining the new strictness attributes of the IMP++ syntax.
+While doing so, remember that programs and fragments of programs now reduce
+to their types. So types will be the new results of our new (type) semantics.
+We also clean up the semantics by removing the unnecessary tags, and also
+use strict instead of seqstrict wherever possible, because strict gives
+implementations more freedom. Interestingly, note that spawn is strict now,
+because the code of the child thread should type in the current parent's type
+environment. Note that this is not always the case for threads, see for example
+SIMPLE in the languages tutorial, but it works here for our simpler IMP++.

+

From a typing perspective, the && construct is strict in both its arguments;
+its short-circuit (concrete) semantics is irrelevant for its (static) type
+system. Similarly, both the conditional and the while loop are strict
+constructs when regarded through the typing lenses.

+

Finally, the sequential composition is now sequentially strict! Indeed,
+statements are now going to reduce to their type, stmt, and it is critical
+for sequential composition to type its argument statements left-to-right;
+for example, imagine that the second argument is a variable declaration (whose
+type semantics will modify the type environment).

+

We continue by defining the new results of computations, that is, the actual
+types. In this simple imperative language, we only have a few constant types:
+int, bool, string, block and stmt.

+

We next define the new configuration, which is actually quite simple. Besides
+the <k/> cell, all we need is a type environment cell, <tenv/>, which will
+hold a map from identifiers to their types. A type environment is therefore
+like a state in the abstract domain of type values.

+

Let us next modify the semantic rules, turning them into a type system. In
+short, the idea is to reduce the basic values to their types, and then have a
+rule for each language construct reducing it to its result type whenever its
+arguments have the expected types.

+

We write the rules in the order given by the syntax declarations, to make
+sure we do not forget any construct.

+

Integers reduce to their type, int.

+

So do the strings.

+

Variables are now looked up in the type environment and reduced to their type
+there. Since we only declare integer variables in IMP++, their type in tenv
+will always be int. Nevertheless, we write the rule generically, so that we
+would not have to change it later if we add other type declarations to IMP++.
+Note that we reject programs which lookup undeclared variables. Rejection,
+in this case, means rewriting getting stuck.

+

Variable increment types to int, provided the variable has type int.

+

Read types to int, because we only allow integer input.

+

Division is only allowed on integers, so it rewrites to int provided that its
+arguments rewrite to int. Note, however, that in order to write int / int,
+we have to explicitly add int to the syntax of arithmetic expressions.
+Otherwise, the K parser rightfully complains, because / was declared on
+arithmetic expressions, not on types. One simple and generic way to allow
+types to appear anywhere, is to define Type as a syntactic subcategory of all
+the other syntactic categories. Let's do it on a by-need basis, though.

+

Addition is overloaded, so we add two typing rules for it: one for integers
+and another for strings.

+

As discussed, spawn types to stmt provided that its argument types to
+block.

+

The assignment construct was strict(2); its typing policy is that the declared
+type of X should be identical to the type of the assigned value. Like for
+lookup, we define this rule more generically than needed for IMP++, for any
+type, not only for int.

+

The typing rules for Boolean expression constructs are in the same spirit.
+Note that we need only one rule for &&.

+

The typing of blocks is a bit trickier. First, note that we still need to
+recover the environment after the block is typed, because we do not want the
+block-local variables to be visible in the outer type environment. We recover
+the type environment only after the block-enclosed statements type; moreover,
+we also opportunistically yield a block type on the computation when we
+discard the type environment recovery item. To account for the fact that the
+block-enclosed statement can itself be a block (e.g., {{S}}), we would need an
+additional rule. Since we do not like repetition, we instead group the types
+block and stmt into one syntactic category, BlockOrStmtType, and now we
+can have only one rule. We also include BlockOrStmtType in Type, as a
+replacement for the two basic types.

+

The expression statement types as expected. Recall that we only allow
+arithmetic expressions, which type to int, to be used as statements in IMP++.

+

The conditional was declared strict in all its arguments. Its typing policy
+is that its first argument types to bool and its two branches to block.
+If that is the case, then it yields a stmt type.

+

For while, its first argument should type to bool and its second to block.

+

Variable declarations add new bindings to the type environment. Recall that
+we can only declare variables of integer type in IMP++.

+

The typing policy of print is that it can only print integer or string values,
+and in that case it types to stmt. Like for BlockOrStmtType, to avoid
+having two similar rules, one for int and another for string, we prefer to
+introduce an additional syntactic category, PrintableType, which includes both
+int and string types.

+

halt types to stmt; so its subsequent code is also typed.

+

join types to stmt, provided that its argument types to int.

+

Sequential composition was declared as a whitespace-separated sequentially
+strict list. Its typing policy is that all the statements in the list should
+type to stmt or block in order for the list to type to stmt. Since
+lists are maintained internally as cons-lists, this is probably the simplest
+way to do it:

+
rule .Stmts => stmt
+rule _:BlockOrStmtType Ss => Ss
+
+

Note that the first rule, which types the empty sequence of statements to stmt,
+is needed anyway, to type empty blocks {} (together with the block rule).

+

kompile imp.k and krun all the programs in Part 4 of the tutorial. They
+should all type to stmt.

+

In the next lesson we will define a substitution-based type system for LAMBDA.

+

Go to Lesson 2, Type Systems: Substitution-Based Higher-Order Type Systems.

+

MOVIE (out of date) [10'11"]

+

Substitution-Based Higher-Order Type Systems

+ +

In this lesson you learn how to define a substitution-based type system for
+a higher-order language, namely the LAMBDA language defined in Part 1 of the
+tutorial.

+

Let us copy the definition of LAMBDA from Part 1 of the tutorial, Lesson 8.
+We are going to modify it into a type systems for LAMBDA.

+

Before we start, it is important to clarify an important detail, namely that
+our type system will yield a type checker when executed, not a type
+inferencer. In particular, we are going to change the LAMBDA syntax
+to allow us to associate a type to each declared variable. The
+constructs which declare variables are lambda, let, letrec and mu.
+The syntax of all these will therefore change.

+

Since here we are not interested in a LAMBDA semantics anymore, we take the
+freedom to eliminate the Val syntactic category, our previous results.
+Our new results are going to be the types, because programs will now reduce
+to their types.

+

As explained, the syntax of the lambda construct needs to change, to also
+declare the type of the variable that it binds. We add the new syntactic
+category Type, with the following constructs: int, bool, the function
+type (which gives it its higher-order status), and parentheses as bracket.
+Also, we make types our K results.

+

We are now ready to define the typing rules.

+

Let us start with the typing rule for lambda abstraction: lambda X : T . E
+types to the function type T -> T', where T' is the type obtained by further
+typing E[T/X]. This can be elegantly achieved by reducing the lambda
+abstraction to T -> E[T/X], provided that we extend the function type construct
+to take expressions, not only types, as arguments, and to be strict.
+This can be easily achieved by redeclaring it as a strict expression construct
+(strictness in the second argument would suffice in this example, but it is
+more uniform to define it strict overall).

+

The typing rule for application is as simple as it can get: (T1->T2) T1 => T2.

+

Let us now give the typing rules of arithmetic and Boolean expression
+constructs. First, let us get rid of Val. Second, rewrite each value to its
+type, similarly to the type system for IMP++ in the previous lesson. Third,
+replace each semantic rule by its typing rule. Fourth, make sure you
+do not forget to subsort Type to Exp, so your rules above will parse.

+

The typing policy of the conditional statement is that its first argument
+should type to bool and its other two arguments should type to the same type
+T, which will also be the result type of the conditional. So we make the
+conditional construct strict in all its three arguments and we write the
+obvious rule: if bool then T:Type else T => T. We want a runtime check that
+the latter arguments are actually typed, so we write T:Type.

+

There is nothing special about let, except that we have to make sure we
+change its syntax to account for the type of the variable that it binds.
+This rule is a macro, so the let is desugared statically.

+

Similarly, the syntax of letrec and mu needs to change to account for the
+type of the variable that they bind. The typing of letrec remains based on
+its desugaring to mu; we have to make sure the types are also included now.

+

The typing policy of mu is that its body should type to the same type T of
+its variable, which is also the type of the entire mu expression. This can
+be elegantly achieved by rewriting it to (T -> T) E[T/X]. Recall that
+application is strict, so E[T/X] will be eventually reduced to its type.
+Then the application types correctly only if that type is also T, and in
+that case the result type will also be T.

+

kompile and krun some programs. You can, for example, take the LAMBDA
+programs from the first tutorial, modify them by adding types to their
+variable declarations, and then type check them using krun.

+

In the next lesson we will discuss an environment-based type system
+for LAMBDA.

+

Go to Lesson 3, Type Systems: Environment-Based Higher-Order Type Systems.

+

MOVIE (out of date) [6'52"]

+

Environment-Based Higher-Order Type Systems

+ +

In this lesson you learn how to define an environment-based type system for
+a higher-order language, namely the LAMBDA language defined in Part 1 of the
+tutorial.

+

The simplest and fastest way to proceed is to copy the substitution-based
+type system of LAMBDA from the previous lesson and modify it into an
+environment-based one. A large portion of the substitution-based definition
+will remain unchanged. We only have to modify the rules that use
+substitution.

+

We do not need the substitution anymore, so we can remove the require and
+import statements. The syntax of types and expressions stays unchanged, but
+we can now remove the binder tag of lambda.

+

Like in the type system of IMP++ in Lesson 1, we need a configuration that
+contains, besides the <k/> cell, a <tenv/> cell that will hold the type
+environment.

+

In an environment-based definition, unlike in a substitution-based one, we
+need to lookup variables in the environment. So let us start with the
+type lookup rule:

+
rule <k> X:Id => T ...</k> <tenv>... X |-> T ...</k>
+
+

The type environment is populated by the semantic rule of lambda:

+
rule <k> lambda X : T . E => (T -> E) ~> Rho ...</k>
+     <tenv> Rho => Rho[X <- T] </tenv>
+
+

So X is bound to its type T in the type environment, and then T -> E
+is scheduled for processing. Recall that the arrow type construct has been
+extended into a strict expression construct, so E will be eventually reduced
+to its type. Like in other environment-based definitions, we need to make
+sure that we recover the type environment after the computation in the scope
+of the declared variable terminates.

+

The typing rule of application does not change, so it stays as elegant as it
+was in the substitution-based definition:

+
rule (T1 -> T2) T1 => T2
+
+

So do the rules for arithmetic and Boolean constructs, and those for the
+if, and let, and letrec.

+

The mu rule needs to change, because it was previously defined using
+substitution. We modify it in the same spirit as we modified the lambda
+rule: bind X to its type in the environment, schedule its body for typing
+in its right context, and then recover the type environment.

+

Finally, we give the semantics of environment recovery, making sure
+the environment is recovered only after the preceding computation is
+reduced to a type:

+

rule _:Type ~> (Rho => .) ... _ => Rho

+

The changes that we applied to the substitution-based definition were
+therefore quite systematic: each substitution invocation was replaced with
+an appropriate type environment update/recovery.

+

Go to Lesson 4, Type Systems: A Naive Substitution-Based Type Inferencer.

+

A Naive Substitution-Based Type Inferencer

+ +

In this lesson you learn how to define a naive substitution-based type
+inferencer for a higher-order language, namely the LAMBDA language
+defined in Part 1 of the tutorial.

+

Unlike in the type checker defined in Lessons 2 and 3, where we had to
+associate a type with each declared variable, a type inferencer
+attempts to infer the types of all the variables from the way those
+variables are used. Let us take a look at this program, say plus.lambda:

+
lambda x . lambda y . x + y
+
+

Since x and y are used in an integer addition context, we can infer
+that they must have the type int and the result of the addition is
+also an int, so the type of the entire expression is int -> int -> int.
+Similarly, the program if.lambda

+
lambda x . lambda y . lambda z .
+  if x then y else z
+
+

can only make sense when x has type bool and y and z have the same
+type, say t, in which case the type of the entire expression is
+bool -> t -> t -> t. Since the type t can be anything, we say that
+the type of this expression is polymorphic. That means that the code
+above can be used in different contexts, where t can be an int, a
+bool, a function type int -> int, and so on.

+

In the identity.lambda program

+
let f = lambda x . x
+in f 1
+
+

f has such a polymorphic type, which is then applied to an integer,
+so this program is type-safe and its type is int.

+

A typical polymorphic expression is the composition

+
lambda f . lambda g . lambda x .
+  g (f x)
+
+

which has the type (t1 -> t2) -> (t2 -> t3) -> (t1 -> t3), polymorphic
+in 3 types.

+

Let us now define our naive type inferencer and then we discuss more
+examples. The idea is quite simple: we conceptually do the same
+operations like we did within the type checker defined in Lesson 2,
+with two important differences:

+
    +
  1. instead of declaring a type with each declared variable, we assume
    +a fresh type for that variable; and
  2. +
  3. instead of checking that the types of expressions satisfy the
    +type properties of the context in which they are used, we impose
    +those properties as type equality constraints. A general-purpose
    +unification-based constraint solving mechanism is then used to solve
    +the generated type constraints.
  4. +
+

Let us start with the syntax, which is essentially identical to that
+of the type checker in Lesson 2, except that bound variables are not
+declared a type anymore. Also, to keep things more compact, we put
+all the Exp syntax declarations in one syntax declaration this time.

+

Before we modify the rules, let us first define our machinery for
+adding and solving constraints. First, we require and import the
+unification procedure. We do not discuss unification here, but if you
+are interested you can consult the unification.k files under
+k-distribution/include/kframework/builtin, which contains our current generic
+definition of unification, which is written also in K. The generic unification
+provides a sort, Mgu, for most-general-unifier, an operation
+updateMgu(Mgu,T1,T2) which updates Mgu with additional constraints
+generated by forcing the terms T1 and T2 to be equal, and an operation
+applyMgu(Mgu,T) which applies Mgu to term T. For our use
+of unification here, we do not even need to know how Mgu terms are
+represented internally.

+

We define a K item construct, =, which takes two Type terms and
+enforces them to be equal by means of updating the current Mgu.
+Once the constraints are added to the Mgu, the equality dissolves
+itself. With this semantics of = in mind, we can now go ahead and
+modify the rules of the type checker systematically into rules
+for a type inferencer. The changes are self-explanatory and
+mechanical: for example, the rule

+
rule int * int => int
+
+

changes into rule

+
rule T1:Type  * T2:Type => T1 = int ~> T2 = int ~> int
+
+

generating the constraints that the two arguments of multiplication
+have the type int, and the result type is int. Recall that each type
+equality on the <k/> cell updates the current Mgu appropriately and
+then dissolves itself; thus, the above says that after imposing the
+constraints T1=int and T2=int, multiplication yields a type int.

+

As mentioned above, since types of variables are not declared anymore,
+but inferred, we have to generate a fresh type for each variable at its
+declaration time, and then generate appropriately constraints for it.
+For example, the type semantics of lambda and mu become:

+
rule lambda X . E => T -> E[T/X]  when fresh(T:Type)
+rule mu X . E => (T -> T) E[T/X]  when fresh(T:Type)
+
+

that is, we add a condition stating that the previously declared type
+is now a fresh one. This type will be further constrained by how the
+variable X is being used within E.

+

Interestingly, the previous typing rule for lambda application is not
+powerful enough anymore. Indeed, since types are not given anymore,
+it may very well be the case that the inferred type of the first
+argument of the application construct is not yet a function type
+(remember, for example, the program composition.lambda above). What
+we have to do is to enforce it to be a function type, by means of
+fresh types and constraints. We can introduce a fresh type for the
+result of the application, and then write the expected rule as
+follows:

+
rule T1:Type T2:Type => T1 = (T2 -> T) ~> T  when fresh(T:Type)
+
+

The conditional requires that its first argument is a bool and its
+second and third arguments have the same type, which is also the
+result type.

+

The macros do not change, in particular let is desugared into lambda
+application. We will next see that this is a significant restriction,
+because it limits the polymorphism of our type system.

+

We are done. We have a working type inferencer for LAMBDA.

+

Let's kompile it and krun the programs above. They all work as
+expected. Let us also try some additional programs, to push it to its
+limits.

+

First, let us test mu by means of a letrec example:

+
letrec f x = 3
+in f
+
+

We can also try all the programs that we had in our first tutorial, on
+lambda, for example the factorial.imp program:

+
letrec f x = if x <= 1 then 1 else (x * (f (x + -1)))
+in (f 10)
+
+

Those programs are simple enough that they should all work as
+expected with our naive type inferencer here.

+

Let us next try to type some tricky programs, which involve more
+complex and indirect type constraints.

+

tricky-1.lambda:

+
lambda f . lambda x . lambda y . (
+  (f x y) + x + (let x = y in x)
+)
+
+

tricky-2.lambda:

+
lambda x .
+  let f = lambda y . if true then y else x
+  in (lambda x . f 0)
+
+

tricky-3.lambda:

+
lambda x . let f = lambda y . if true then x 7 else x y
+           in f
+
+

tricky-4.lambda:

+
lambda x . let f = lambda x . x
+           in let d = (f x) + 1
+              in x
+
+

tricky-5.lambda:

+
lambda x . let f = lambda y . x y
+           in let z = x 0 in f
+
+

It is now time to see the limitations of this naive type inferencer.
+Consider the program

+
let id = lambda x . x
+in if (id true) then (id 1) else (id 2)
+
+

Our type inferencer fails graciously with a clash in the <mgu/> cell
+between int and bool. Indeed, the desugaring macro of let turns it
+into a lambda and an application, which further enforce id to have a
+type of the form t -> t for some fresh type t. The first use of id
+in the condition of if will then constrain t to be bool, while the
+other uses in the two branches will enforce t to be int. Thus the
+clash in the <mgu/> cell.

+

Similarly, the program

+
let id = lambda x . x
+in id id
+
+

yields a different kind of conflict: if id has type t -> t, in order
+to apply id to itself it must be the case that its argument, t, equals
+t -> t. These two type terms cannot be unified because there is a
+circular dependence on t, so we get a cycle in the <mgu/> cell.

+

Both limitations above will be solved when we change the semantics of
+let later on, to account for the desired polymorphism.

+

Before we conclude this lesson, let us see one more interesting
+example, where the lack of let-polymorphism leads not to a type error,
+but to a less generic type:

+
let f1 = lambda x . x in
+  let f2 = f1 in
+    let f3 = f2 in
+      let f4 = f3 in
+        let f5 = f4 in
+          if (f5 true) then f2 else f3
+
+

Our current type inferencer will infer the type bool -> bool for the
+program above. Nevertheless, since all functions f1, f2, f3, f4, f5
+are the identity function, which is polymorphic, we would expect the
+entire program to type to the same polymorphic identity function type.

+

This limitation will be also addressed when we define our
+let-polymorphic type inferencer.

+

Before that, in the next lesson we will show how easily we can turn
+the naive substitution-based type inferencer discussed in this lesson
+into a similarly naive, but environment-based type inferencer.

+

Go to Lesson 5, Type Systems: A Naive Environment-Based Type Inferencer.

+

A Naive Environment-Based Type Inferencer

+ +

In this lesson you learn how to define a naive environment-based type
+inferencer for a higher-order language. Specifically, we take the
+substitution-based type inferencer for LAMBDA defined in Lesson 4 and
+turn it into an environment-based one.

+

Recall from Lesson 3, where we defined an environment-based type
+checker for LAMBDA based on the substitution-based one in Lesson 2,
+that the transition from a substitution-based definition to an
+environment-based one was quite systematic and mechanical: each
+substitution occurrence E[T/X] is replaced by E, but at the same time
+the variable X is bound to type T in the type environment. One benefit
+of using type environments instead of substitution is that we replace
+a linear complexity operation (the substitution) with a constant
+complexity one (the variable lookup).

+

There is not much left to say which has not been already said in
+Lesson 3: we remove the unnecessary binder annotations for the
+variable binding operations, then add a <tenv/> cell to the
+configuration to hold the type environment, then add a new rule for
+variable lookup, and finally apply the transformation of substitutions
+E[T/X] into E as explained above.

+

The resulting type inferencer should now work exactly the same way as
+the substitution-based one, except, of course, that the resulting
+configurations will contain a <tenv/> cell now.

+

As sanity check, let us consider two more LAMBDA programs that test
+the static scoping nature of the inferencer. We do that because
+faulty environment-based definitions often have this problem. The
+program

+
let x = 1
+in let f = lambda a . x
+   in let x = true
+      in f 3
+
+

should type to int, not to bool, and so it does. Similarly, the
+program

+
let y = 0
+in letrec f x = if x <= 0
+                then y
+                else let y = true
+                     in f (x + 1)
+   in f 1
+
+

should also type to int, not bool, and so it does, too.

+

The type inferencer defined in this lesson has the same limitations,
+in terms of polymorphism, as the one in Lesson 4. In the next
+lesson we will see how it can be parallelized, and in further lessons
+how to make it polymorphic.

+

Go to Lesson 6, Type Systems: Parallel Type Checkers/Inferencers.

+

Parallel Type Checkers/Inferencers

+ +

In this lesson you learn how to define parallel type checkers or
+inferencers. For the sake of a choice, we will parallelize the one in
+the previous lesson, but the ideas are general. We are using the same
+idea to define type checkers for other languages in the K tool
+distribution, such as SIMPLE and KOOL.

+

The idea is in fact quite simple. Instead of one monolithic typing
+task, we generate many smaller tasks, which can be processed in
+parallel. We use the same approach to define parallel semantics as we
+used for threads in IMP++ in Part 4 of the tutorial, that is, we add a
+cell holding all the parallel tasks, making sure we declare the cell
+holding a task with multiplicity *. For the particular type
+inferencer that we chose here, the one in Lesson 5, each task will
+hold an expression to type together with a type environment (so it
+knows where to lookup its free variables). We have the following
+configuration then:

+
configuration <tasks color="yellow">
+                <task color="orange" multiplicity="*">
+                  <k color="green"> $PGM:Exp </k>
+                  <tenv color="red"> .Map </tenv>
+                </task>
+              </tasks>
+              <mgu color="blue"> .Mgu </mgu>
+
+

Now we have to take each typing rule we had before and change it to
+yield parallel typing. For example, our rule for typing
+multiplication was the following in Lesson 5:

+
rule T1:Type * T2:Type => T1 = int ~> T2 = int ~> int
+
+

Since * was strict, its two arguments eventually type, and once that
+happens the rule above fires. Unfortunately, the strictness of
+multiplication makes the typing of the two expressions sequential in
+our previous definition. To avoid typing the two expressions
+sequentially and instead generating two parallel tasks, we remove the
+strict attribute of multiplication and replace the rule above with the
+following:

+
rule <k> E1 * E2 => int ...</k> <tenv> Rho </tenv>
+     (. => <task> <k> E1 = int </k> <tenv> Rho </tenv> </task>
+           <task> <k> E2 = int </k> <tenv> Rho </tenv> </task>)
+
+

Therefore, we generate two tasks for typing E1 and E2 in the same type
+environment as the current task, and let the current task continue by
+simply optimistically reducing E1*E2 to its expected result type, int.
+If E1 or E2 will not type to int, then either their corresponding
+tasks will get stuck or the <mgu/> cell will result into a clash or cycle,
+so the program will not type overall in spite of the fact that we
+allowed the task containing the multiplication to continue. This is
+how we get maximum of parallelism in this case.

+

Before we continue, note that the new tasks hold equalities in them,
+where one of its arguments is an expression, while previously the
+equality construct was declared to take types. What we want now is
+for the equality construct to possibly take any expressions, and first
+type them and then generate the type constraint like before. This can
+be done very easily by just extending the equality construct to
+expressions and declaring it strict:

+
syntax KItem ::= Exp "=" Exp  [strict]
+
+

Unlike before, where we only passed types to the equality construct,
+we now need a runtime check that its arguments are indeed types before
+we can generate the updateMgu command:

+
rule <k> T:Type = T':Type => . ...</k>
+     <mgu> Theta:Mgu => updateMgu(Theta,T,T') </mgu>
+
+

Like before, an equality will therefore update the <mgu/> cell and then
+it dissolves itself, letting the <k/> cell in the corresponding task
+empty. Such empty tasks are unnecessary, so they can be erased:

+
rule <task>... <k> . </k> ...</task> => .
+
+

We can now follow the same style as for multiplication to write the
+parallel typing rules of the other arithmetic constructs, and even for
+the conditional.

+

To parallelize the typing of lambda we generate two fresh types, one
+for the variable and one for the body, and make sure that we generate
+the correct type constraint and environment in the body task:

+
rule <k> lambda X . E => Tx -> Te ...</k> <tenv> TEnv </tenv>
+     (. => <task> <k> E = Te </k> <tenv> TEnv[Tx/X] </tenv> </task>)
+  when fresh(Tx:Type) andBool fresh(Te:Type)
+
+

Note that the above also allows us to not need to change and then
+recover the environment of the current cell.

+

For function application we also need to generate two fresh types:

+
rule <k> E1 E2 => T ...</k> <tenv> Rho </tenv>
+     (. => <task> <k> E1 = T2 -> T </k> <tenv> Rho </tenv> </task>
+           <task> <k> E2 = T2 </k> <tenv> Rho </tenv> </task>)
+  when fresh(T2:Type) andBool fresh(T:Type)
+
+

The only rule left is that of mu X . E. In this case we only need one
+fresh type, because X, E and mu X . E have all the same type:

+
rule <k> mu X . E => T ...</k>  <tenv> TEnv </tenv>
+     (. => <task> <k> E = T </k> <tenv> TEnv[T/X] </tenv> </task>)
+  when fresh(T:Type)
+
+

We do not need the type environment recovery operation, so we delete it.

+

We can now kompile and krun all the programs that we typed in Lesson 5.
+Everything should work.

+

In this lesson we only aimed at parallelizing the type inferencer in
+Lesson 5, not to improve its expressiveness; it still has the same
+limitations in terms of polymorphism. The next lessons are dedicated
+to polymorphic type inferencers.

+

Go to Lesson 7, Type Systems: A Naive Substitution-based Polymorphic Type Inferencer.

+

A Naive Substitution-based Polymorphic Type Inferencer

+ +

In this lesson you learn how little it takes to turn a naive monomorphic
+type inferencer into a naive polymorphic one, basically only changing
+a few characters. In terms of the K framework, you will learn that
+you can have complex combinations of substitutions in K, both over
+expressions and over types.

+

Let us start directly with the change. All we have to do is to take
+the LAMBDA type inferencer in Lesson 4 and only change the macro

+
rule let X = E in E' => (lambda X . E') E  [macro]
+
+

as follows:

+
rule let X = E in E' => E'[E/X]  [macro]
+
+

In other words, we are inlining the beta-reduction rule of
+lambda-calculus within the original rule. In terms of typing,
+the above forces the type inferencer to type E in place for each
+occurrence of X in E'. Unlike in the first rule, where X had to get
+one type only which satisfied the constrains of all X's occurrences in
+E', we now never associate any type to X anymore.

+

Let us kompile and krun some examples. Everything that worked with
+the type inferencer in Lesson 4 should still work here, although the
+types of some programs can now be more general. For example, reconsider
+the nested-lets.lambda program

+
let f1 = lambda x . x in
+  let f2 = f1 in
+    let f3 = f2 in
+      let f4 = f3 in
+        let f5 = f4 in
+          if (f5 true) then f2 else f3
+
+

which was previously typed to bool -> bool. With the new rule above,
+the sequence of lets is iteratively eliminated and we end up with the
+program

+
if (lambda x . x) true then (lambda x . x) else (lambda x . x)
+
+

which now types (with both type inferencers) to a type of the form
+t -> t, for some type variable t, which is more general than the
+previous bool -> bool type that the program typed to in Lesson 4.

+

We can also now type programs that were not typable before, such as

+
let id = lambda x . x
+in if (id true) then (id 1) else (id 2)
+
+

and

+
let id = lambda x . x
+in id id
+
+

Let us also test it on some trickier programs, also not typable
+before, such as

+
let f = lambda x . x
+in let g = lambda y . f y
+   in g g
+
+

which gives us a type of the form t -> t for some type variable t,
+and as

+
let f = let g = lambda x . x
+        in let h = lambda x . lambda x . (g g g g)
+           in h
+in f
+
+

which types to t1 -> t2 -> t3 -> t3 for some type variables t1, t2, t3.

+

Here is another program which was not typable before, which is
+trickier than the others above in that a lambda-bound variable appears
+free in a let-bound expression:

+
lambda x . (
+  let y = lambda z . x
+  in if (y true) then (y 1) else (y (lambda x . x))
+)
+
+

The above presents no problem now, because once lambda z . x gets
+substituted for y we get a well-typed expression which yields that x
+has the type bool, so the entire expression types to bool -> bool.

+

The cheap type inferencer that we obtained above therefore works as
+expected. However, it has two problems which justify a more advanced
+solution. First, substitution is typically considered an elegant
+mathematical instrument which is not too practical in implementations,
+so an implementation of this type inferencer will likely be based on
+type environments anyway. Additionally, we mix two kinds of
+substitutions in this definition, one where we substitute types and
+another where we substitute expressions, which can only make things
+harder to implement efficiently. Second, our naive substitution of E
+for X in E' can yield an exponential explosion in size of the original
+program. Consider, for example, the following classic example which
+is known to generate a type whose size is exponential in the size of
+the program (and is thus used as an argument for why let-polymorphic
+type inference is exponential in the worst-case):

+
let f00 = lambda x . lambda y . x in
+  let f01 = lambda x . f00 (f00 x) in
+    let f02 = lambda x . f01 (f01 x) in
+      let f03 = lambda x . f02 (f02 x) in
+        let f04 = lambda x . f03 (f03 x) in
+          // ... you can add more nested lets here
+          f04
+
+

The particular instance of the pattern above generates a type which
+has 17 type variables! The desugaring of each let doubles the size of
+the program and of its resulting type. While such programs are little
+likely to appear in practice, it is often the case that functions can
+be quite complex and large while their type can be quite simple in the
+end, so we should simply avoid retyping each function each time it is
+used.

+

This is precisely what we will do next. Before we present the classic
+let-polymorphic type inferencer in Lesson 9, which is based on
+environments, we first quickly discuss in Lesson 8 an intermediate
+step, namely a naive environment-based variant of the inferencer
+defined here.

+

Go to Lesson 8, Type Systems: A Naive Environment-based Polymorphic Type Inferencer.

+

A Naive Environment-based Polymorphic Type Inferencer

+ +

In this short lesson we discuss how to quickly turn a naive
+environment-based monomorphic type inferencer into a naive let-polymorphic
+one. Like in the previous lesson, we only need to change a few
+characters. In terms of the K framework, you will learn how to have
+both environments and substitution in the same definition.

+

Like in the previous lesson, all we have to do is to take the LAMBDA
+type inferencer in Lesson 5 and only change the rule

+
rule let X = E in E' => (lambda X . E') E
+
+

as follows:

+
rule let X = E in E' => E'[E/X]
+
+

The reasons why this works have already been explained in the previous
+lesson, so we do not repeat them here.

+

Since our new let rule uses substitution, we have to require the
+substitution module at the top and also import SUBSTITUTION in the
+current module, besides the already existing UNIFICATION.

+

Everything which worked with the type inferencer in Lesson 7 should
+also work now. Let us only try the exponential type example,

+
let f00 = lambda x . lambda y . x in
+  let f01 = lambda x . f00 (f00 x) in
+    let f02 = lambda x . f01 (f01 x) in
+      let f03 = lambda x . f02 (f02 x) in
+        let f04 = lambda x . f03 (f03 x) in
+          f04
+
+

As expected, this gives us precisely the same type as in Lesson 7.

+

So the only difference between this type inferencer and the one in
+Lesson 7 is that substitution is only used for LAMBDA-to-LAMBDA
+transformations, but not for infusing types within LAMBDA programs.
+Thus, the syntax of LAMBDA programs is preserved intact, which some
+may prefer. Nevertheless, this type inferencer is still expensive and
+wasteful, because the let-bound expression is typed over and over
+again in each place where the let-bound variable occurs.

+

In the next lesson we will discuss a type inferencer based on the
+classic Damas-Hindley-Milner type system, which maximizes the reuse of
+typing work by means of parametric types.

+

Go to Lesson 9, Type Systems: Let-Polymorphic Type Inferencer (Damas-Hindley-Milner).

+

Let-Polymorphic Type Inferencer (Damas-Hindley-Milner)

+ +

In this lesson we discuss a type inferencer based on what we call today
+the Damas-Hindley-Milner type system, which is at the core of many
+modern functional programming languages. The first variant of it was
+proposed by Hindley in 1969, then, interestingly, Milner rediscovered
+it in 1978 in the context of the ML language. Damas formalized it as
+a type system in his PhD thesis in 1985. More specifically, our type
+inferencer here, like many others as well as many implementations of
+it, follows more closely the syntax-driven variant proposed by Clement
+in 1987.

+

In terms of K, we will see how easily we can turn one definition which
+is considered naive (our previous type inferencer in Lesson 8) into a
+definition which is considered advanced. All we have to do is to
+change one existing rule (the rule of the let binder) and to add a new
+one. We will also learn some new predefined features of K, which make
+the above possible.

+

The main idea is to replace the rule

+
rule let X = E in E' => E'[E/X]
+
+

which creates potentially many copies of E within E' with a rule
+which types E once and then reuses that type in each place where X
+occurs free in E'. The simplest K way to type E is to declare the
+let construct strict(2). Now we cannot simply bind X to the type
+of E, because we would obtain a variant of the naive type inferencer
+we already discussed, together with its limitations, in Lesson 5 of this
+tutorial. The trick here is to parameterize the type of E in all its
+unconstrained fresh types, and then create fresh copies of those
+parameters in each free occurrence of X in E'.

+

Let us discuss some examples, before we go into the technical details.
+Consider the first let-polymorphic example which failed to be typed
+with our first naive type-inferencer:

+
let id = lambda x . x
+in if (id true) then (id 1) else (id 2)
+
+

When typing lambda x . x, we get a type of the form t -> t, for some
+fresh type t. Instead of assigning this type to id as we did in the
+naive type inferencers, we now first parametrize this type in its
+fresh variable t, written

+
(forall t) t -> t
+
+

and then bind id to this parametric type. The intuition for the
+parameter is that it can be instantiated with any other type, so this
+parametric type stands, in fact, for infinitely many non-parametric
+types. This is similar to what happens in formal logic proof systems,
+where rule schemas stand for infinitely many concrete instances of
+them. For this reason, parametric types are also called type schemas.

+

Now each time id is looked up within the let-body, we create a fresh
+copy of the parameter t, which can this way be independently
+constrained by each local context. Let's suppose that the three id
+lookups yield the types t1 -> t1, t2 -> t2, and respectively t3 -> t3.
+Then t1 will be constrained to be bool, and t2 and t3 to be int,
+so we can now safely type the program above to int.

+

Therefore, a type schema comprises a summary of all the typing work
+that has been done for typing the corresponding expression, and an
+instantiation of its parameters with fresh copies represents an
+elegant way to reuse all that typing work.

+

There are some subtleties regarding what fresh types can be made
+parameters. Let us consider another example, discussed as part of
+Lesson 7 on naive let-polymorphism:

+
lambda x . (
+  let y = lambda z . x
+  in if (y true) then (y 1) else (y (lambda x . x))
+)
+
+

This program should type to bool -> bool, as explained in Lesson 7.
+The lambda construct will bind x to some fresh type tx. Then the
+let-bound expression lambda z . x types to tz -> tx for some
+additional fresh type tz. The question now is what should the
+parameters of this type be when we generate the type schema? If we
+naively parameterize in all fresh variables, that is in both tz and
+tx obtaining the type schema (forall tz,tx) tz -> tx, then there will
+be no way to infer that the type of x, tx, must be a bool! The
+inferred type of this expression would then wrongly be tx -> t for
+some fresh types tx and t. That's because the parameters are replaced
+with fresh copies in each occurrence of y, and thus their relationship
+to the original x is completely lost. This tells us that we cannot
+parameterize in all fresh types that appear in the type of the
+let-bound expression. In particular, we cannot parameterize in those
+which some variables are already bound to in the current type
+environment (like x is bound to tx in our example above).
+In our example, the correct type schema is (forall tz) tz -> tx,
+which now allows us to correctly infer that tx is bool.

+

Let us now discuss another example, which should fail to type:

+
lambda x .
+  let f = lambda y . x y
+  in if (f true) then (f 1) else (f 2)
+
+

This should fail to type because lambda y . x y is equivalent to x,
+so the conditional imposes the conflicting constraints that x should be
+a function whose argument is either a bool or an int. Let us try to
+type it using our currently informal procedure. Like in the previous
+example, x will be bound to a fresh type tx. Then the let-bound
+expression types to ty -> tz with ty and tz fresh types, adding also
+the constraint tx = ty -> tz. What should the parameters of this type
+be? If we ignore the type constraint and simply make both ty and tz
+parameters because no variable is bound to them in the type
+environment (indeed, the only variable x in the type environment is
+bound to tx), then we can wrongly type this program to tx -> tz
+following a reasoning similar to the one in the example above.
+In fact, in this example, none of ty and tz can be parameters, because
+they are constrained by tx.

+

The examples above tell us two things: first, that we have to take the
+type constraints into account when deciding the parameters of the
+schema; second, that after applying the most-general-unifier solution
+given by the type constraints everywhere, the remaining fresh types
+appearing anywhere in the type environment are consequently constrained
+and cannot be turned into parameters. Since the type environment can in
+fact also hold type schemas, which already bind some types, we only need
+to ensure that none of the fresh types appearing free anywhere in the
+type environment are turned into parameters of type schemas.

+

Thanks to generic support offered by the K tool, we can easily achieve
+all the above as follows.

+

First, add syntax for type schemas:

+
syntax TypeSchema ::= "(" "forall" Set ")" Type  [binder]
+
+

The definition below will be given in such a way that the Set argument
+of a type schema will always be a set of fresh types. We also declare
+this construct to be a binder, so that we can make use of the generic
+free variable function provided by the K tool.

+

We now replace the old rule for let

+
rule let X = E in E' => E'[E/X]
+
+

with the following rule:

+
rule <k> let X = T:Type in E => E ~> tenv(TEnv) ...</k>
+     <mgu> Theta:Mgu </mgu>
+     <tenv> TEnv
+      => TEnv[(forall freeVariables(applyMgu(Theta, T)) -Set
+                      freeVariables(applyMgu(Theta, values TEnv))
+              ) applyMgu(Theta, T) / X]
+     </tenv>
+
+

So the type T of E is being parameterized and then bound to X in the
+type environment. The current mgu Theta, which comprises all the type
+constraints accumulated so far, is applied to both T and the types in
+the type environment. The remaining fresh types in T which do not
+appear free in the type environment are then turned into type parameters.
+The function freeVariables returns, as expected, the free variables of
+its argument as a Set; this is why we declared the type schema to be a
+binder above.

+

Now a LAMBDA variable in the type environment can be bound to either a
+type or a type schema. In the first case, the previous rule we had
+for variable lookup can be reused, but we have to make sure we check
+that T there is of sort Type (adding a sort membership, for example).
+In the second case, as explained above, we have to create fresh copies
+of the parameters. This can be easily achieved with another
+predefined K function, as follows:

+
rule <k> X:Id => freshVariables(Tvs,T) ...</k>
+     <tenv>... X |-> (forall Tvs) T ...</tenv>
+
+

Indeed, freshVariables takes a set of variables and a term, and returns the
+same term but with each of the given variables replaced by a fresh copy.

+

The operations freeVariables and freshVariables are useful in many K
+definitions, so they are predefined in module substitution.k.

+

Our definition of this let-polymorphic type inferencer is now
+complete. To test it, kompile it and then krun all the LAMBDA
+programs discussed since Lesson 4. They should all work as expected.

+

K Languages

+ +

Here we present several "real-world" language examples. These languages
+demonstrate many of the features you would expect to find in a full-fledged
+programming language.

+
    +
  • SIMPLE: Imperative programming language with threads.
  • +
  • KOOL: SIMPLE extended with object-oriented features.
  • +
  • FUN: A functional language with algebraic data-types and pattern-matching.
  • +
  • LOGIK: A logical programming language based on clause unification.
  • +
+

SIMPLE — Untyped

+ +

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+ +

This is the K semantic definition of the untyped SIMPLE language.
+SIMPLE is intended to be a pedagogical and research language that captures
+the essence of the imperative programming paradigm, extended with several
+features often encountered in imperative programming languages.
+A program consists of a set of global variable declarations and
+function definitions. Like in C, function definitions cannot be
+nested and each program must have one function called main,
+which is invoked when the program is executed. To make it more
+interesting and to highlight some of K's strengths, SIMPLE includes
+the following features in addition to the conventional imperative
+expression and statement constructs:

+
    +
  • +

    Multidimensional arrays and array references. An array evaluates
    +to an array reference, which is a special value holding a location (where
    +the elements of the array start) together with the size of the array;
    +the elements of the array can be array references themselves (particularly
    +when the array is multi-dimensional). Array references are ordinary values,
    +so they can be assigned to variables and passed/received by functions.

    +
  • +
  • +

    Functions and function values. Functions can have zero or
    +more parameters and can return abruptly using a return statement.
    +SIMPLE follows a call-by-value parameter passing style, with static scoping.
    +Function names evaluate to function abstractions, which hereby become ordinary
    +values in the language, same like the array references.

    +
  • +
  • +

    Blocks with locals. SIMPLE variables can be declared
    +anywhere, their scope being from the place where they are declared
    +until the end of the most nested enclosing block.

    +
  • +
  • +

    Input/Output. The expression read() evaluates to the
    +next value in the input buffer, and the statement write(e)
    +evaluates e and outputs its value to the output buffer. The
    +input and output buffers are lists of values.

    +
  • +
  • +

    Exceptions. SIMPLE has parametric exceptions (the value thrown as
    +an exception can be caught and bound).

    +
  • +
  • +

    Concurrency via dynamic thread creation/termination and
    +synchronization. One can spawn a thread to execute any statement.
    +The spawned thread shares with its parent its environment at creation time.
    +Threads can be synchronized via a join command which blocks the current thread
    +until the joined thread completes, via re-entrant locks which can be acquired
    +and released, as well as through rendezvous commands.

    +
  • +
+

Like in many other languages, some of SIMPLE's constructs can be
+desugared into a smaller set of basic constructs. We do that at the end
+of the syntax module, and then we only give semantics to the core constructs.

+

Note: This definition is commented slightly more than others, because it is
+intended to be one of the first non-trivial definitions that the new
+user of K sees. We recommend the beginner user to first check the
+language definitions discussed in the K tutorial.

+
module SIMPLE-UNTYPED-SYNTAX
+  imports DOMAINS-SYNTAX
+

Syntax

+ +

We start by defining the SIMPLE syntax. The language constructs discussed
+above have the expected syntax and evaluation strategies. Recall that in K
+we annotate the syntax with appropriate strictness attributes, thus giving
+each language construct the desired evaluation strategy.

+

Identifiers

+ +

Recall from the K tutorial that identifiers are builtin and come under the
+syntactic category Id. The special identifier for the function
+main belongs to all programs, and plays a special role in the semantics,
+so we declare it explicitly. This would not be necessary if the identifiers
+were all included automatically in semantic definitions, but that is not
+possible because of parsing reasons (e.g., K variables used to match
+concrete identifiers would then be ambiguously parsed as identifiers). They
+are only included in the parser generated to parse programs (and used by the
+kast tool). Consequently, we have to explicitly declare all the
+concrete identifiers that play a special role in the semantics, like
+main below.

+
  syntax Id ::= "main" [token]
+

Declarations

+ +

There are two types of declarations: for variables (including arrays) and
+for functions. We are going to allow declarations of the form
+var x=10, a[10,10], y=23;, which is why we allow the var
+keyword to take a list of expressions. The non-terminals used in the two
+productions below are defined shortly.

+
  syntax Stmt ::= "var" Exps ";"
+                | "function" Id "(" Ids ")" Block
+

Expressions

+ +

The expression constructs below are standard. Increment (++) takes
+an expression rather than a variable because it can also increment an array
+element. Recall that the syntax we define in K is what we call the syntax
+of the semantics
: while powerful enough to define non-trivial syntaxes
+(thanks to the underlying SDF technology that we use), we typically refrain
+from defining precise syntaxes, that is, ones which accept precisely the
+well-formed programs (that would not be possible anyway in general). That job
+is deferred to type systems, which can also be defined in K. In other words,
+we are not making any effort to guarantee syntactically that only variables
+or array elements are passed to the increment construct, we allow any
+expression. Nevertheless, we will only give semantics to those, so expressions
+of the form ++5, which parse (but which will be rejected by our type
+system in the typed version of SIMPLE later), will get stuck when executed.
+Arrays can be multidimensional and can hold other arrays, so their
+lookup operation takes a list of expressions as argument and applies to an
+expression (which can in particular be another array lookup), respectively.
+The construct sizeOf gives the size of an array in number of elements
+of its first dimension. Note that almost all constructs are strict. The only
+constructs which are not strict are the increment (since its first argument
+gets updated, so it cannot be evaluated), the input read which takes no
+arguments so strictness is irrelevant for it, the logical and and or constructs
+which are short-circuited, the thread spawning construct which creates a new
+thread executing the argument expression and return its unique identifier to
+the creating thread (so it cannot just evaluate its argument in place), and the
+assignment which is only strict in its second argument (for the same reason as
+the increment).

+
  syntax Exp ::= Int | Bool | String | Id
+               | "(" Exp ")"             [bracket]
+               | "++" Exp
+               > Exp "[" Exps "]"        [strict]
+               > Exp "(" Exps ")"        [strict]
+               | "-" Exp                 [strict]
+               | "sizeOf" "(" Exp ")"    [strict]
+               | "read" "(" ")"
+               > left:
+                 Exp "*" Exp             [strict, left]
+               | Exp "/" Exp             [strict, left]
+               | Exp "%" Exp             [strict, left]
+               > left:
+                 Exp "+" Exp             [strict, left]
+               | Exp "-" Exp             [strict, left]
+               > non-assoc:
+                 Exp "<" Exp             [strict, non-assoc]
+               | Exp "<=" Exp            [strict, non-assoc]
+               | Exp ">" Exp             [strict, non-assoc]
+               | Exp ">=" Exp            [strict, non-assoc]
+               | Exp "==" Exp            [strict, non-assoc]
+               | Exp "!=" Exp            [strict, non-assoc]
+               > "!" Exp                 [strict]
+               > left:
+                 Exp "&&" Exp            [strict(1), left]
+               | Exp "||" Exp            [strict(1), left]
+               > "spawn" Block
+               > Exp "=" Exp             [strict(2), right]
+

We also need comma-separated lists of identifiers and of expressions.
+Moreover, we want them to be strict, that is, to evaluate to lists of results
+whenever requested (e.g., when they appear as strict arguments of
+the constructs above).

+
  syntax Ids  ::= List{Id,","}           [overload(Exps)]
+  syntax Exps ::= List{Exp,","}          [overload(Exps), strict]  // automatically hybrid now
+  syntax Exps ::= Ids
+  syntax Val
+  syntax Vals ::= List{Val,","}          [overload(Exps)]
+  syntax Bottom
+  syntax Bottoms ::= List{Bottom,","}    [overload(Exps)]
+  syntax Ids ::= Bottoms
+

Statements

+ +

Most of the statement constructs are standard for imperative languages.
+We syntactically distinguish between empty and non-empty blocks, because we
+chose Stmts not to be a (;-separated) list of
+Stmt. Variables can be declared anywhere inside a block, their scope
+ending with the block. Expressions are allowed to be used for their side
+effects only (followed by a semicolon ;). Functions are allowed
+to abruptly return. The exceptions are parametric, i.e., one can throw a value
+which is bound to the variable declared by catch. Threads can be
+dynamically created and terminated, and can synchronize with join,
+acquire, release and rendezvous. Note that the
+strictness attributes obey the intended evaluation strategy of the various
+constructs. In particular, the if-then-else construct is strict only in its
+first argument (the if-then construct will be desugared into if-then-else),
+while the loop constructs are not strict in any arguments. The print
+statement construct is variadic, that is, it takes an arbitrary number of
+arguments.

+
  syntax Block ::= "{" "}"
+                | "{" Stmt "}"
+
+  syntax Stmt ::= Block
+                | Exp ";"                               [strict]
+                | "if" "(" Exp ")" Block "else" Block   [avoid, strict(1)]
+                | "if" "(" Exp ")" Block                [macro]
+                | "while" "(" Exp ")" Block
+                | "for" "(" Stmt Exp ";" Exp ")" Block  [macro]
+                | "return" Exp ";"                      [strict]
+                | "return" ";"                          [macro]
+                | "print" "(" Exps ")" ";"              [strict]
+// NOTE: print strict allows non-deterministic evaluation of its arguments
+// Either keep like this but document, or otherwise make Exps seqstrict.
+// Of define and use a different expression list here, which is seqstrict.
+                | "try" Block "catch" "(" Id ")" Block
+                | "throw" Exp ";"                       [strict]
+                | "join" Exp ";"                        [strict]
+                | "acquire" Exp ";"                     [strict]
+                | "release" Exp ";"                     [strict]
+                | "rendezvous" Exp ";"                  [strict]
+

The reason we allow Stmts as the first argument of for
+instead of Stmt is because we want to allow more than one statement
+to be executed when the loop is initialized. Also, as seens shorly, macros
+may expand one statement into more statements; for example, an initialized
+variable declaration statement var x=0; desugars into two statements,
+namely var x; x=0;, so if we use Stmt instead of Stmts
+in the production of for above then we risk that the macro expansion
+of statement var x=0; happens before the macro expansion of for,
+also shown below, in which case the latter would not apply anymore because
+of syntactic mismatch.

+
  syntax Stmt ::= Stmt Stmt                          [right]
+
+// I wish I were able to write the following instead, but confuses the parser.
+//
+// syntax Stmts ::= List{Stmt,""}
+// syntax Top ::= Stmt | "function" Id "(" Ids ")" Block
+// syntax Pgm ::= List{Top,""}
+//
+// With that, I could have also eliminated the empty block
+

Desugared Syntax

+ +

This part desugars some of SIMPLE's language constructs into core ones.
+We only want to give semantics to core constructs, so we get rid of the
+derived ones before we start the semantics. All desugaring macros below are
+straightforward.

+
  rule if (E) S => if (E) S else {}
+  rule for(Start Cond; Step) {S} => {Start while (Cond) {S Step;}}
+  rule for(Start Cond; Step) {} => {Start while (Cond) {Step;}}
+  rule var E1:Exp, E2:Exp, Es:Exps; => var E1; var E2, Es;
+  rule var X:Id = E; => var X; X = E;
+

For the semantics, we can therefore assume from now on that each
+conditional has both branches, that there are only while loops, and
+that each variable is declared alone and without any initialization as part of
+the declaration.

+
endmodule
+
+
+module SIMPLE-UNTYPED
+  imports SIMPLE-UNTYPED-SYNTAX
+  imports DOMAINS
+

Basic Semantic Infrastructure

+ +

Before one starts adding semantic rules to a K definition, one needs to
+define the basic semantic infrastructure consisting of definitions for
+values and configuration. As discussed in the definitions
+in the K tutorial, the values are needed to know when to stop applying
+the heating rules and when to start applying the cooling rules corresponding
+to strictness or context declarations. The configuration serves as a backbone
+for the process of configuration abstraction which allows users to only
+mention the relevant cells in each semantic rule, the rest of the configuration
+context being inferred automatically. Although in some cases the configuration
+could be automatically inferred from the rules, we believe that it is very
+useful for language designers/semanticists to actually think of and design
+their configuration explicitly, so the current implementation of K requires
+one to define it.

+

Values

+ +

We here define the values of the language that the various fragments of
+programs evaluate to. First, integers and Booleans are values. As discussed,
+arrays evaluate to special array reference values holding (1) a location from
+where the array's elements are contiguously allocated in the store, and
+(2) the size of the array. Functions evaluate to function values as
+λ-abstractions (we do not need to evaluate functions to closures
+because each function is executed in the fixed global environment and
+function definitions cannot be nested). Like in IMP and other
+languages, we finally tell the tool that values are K results.

+
  syntax Val ::= Int | Bool | String
+               | array(Int,Int)
+               | lambda(Ids,Stmt)
+  syntax Exp ::= Val
+  syntax Exps ::= Vals
+  syntax Vals ::= Bottoms
+  syntax KResult ::= Val
+                   | Vals  // TODO: should not need this
+

The inclusion of values in expressions follows the methodology of
+syntactic definitions (like, e.g., in SOS): extend the syntax of the language
+to encompass all values and additional constructs needed to give semantics.
+In addition to that, it allows us to write the semantic rules using the
+original syntax of the language, and to parse them with the same (now extended
+with additional values) parser. If writing the semantics directly on the K
+AST, using the associated labels instead of the syntactic constructs, then one
+would not need to include values in expressions.

+

Configuration

+ +

The K configuration of SIMPLE consists of a top level cell, T,
+holding a threads cell, a global environment map cell genv
+mapping the global variables and function names to their locations, a shared
+store map cell store mapping each location to some value, a set cell
+busy holding the locks which have been acquired but not yet released
+by threads, a set cell terminated holding the unique identifiers of
+the threads which already terminated (needed for join), input
+and output list cells, and a nextLoc cell holding a natural
+number indicating the next available location. Unlike in the small languages
+in the K tutorial, where we used the fresh predicate to generate fresh
+locations, in larger languages, like SIMPLE, we prefer to explicitly manage
+memory. The location counter in nextLoc models an actual physical
+location in the store; for simplicity, we assume arbitrarily large memory and
+no garbage collection. The threads cell contains one thread
+cell for each existing thread in the program. Note that the thread cell has
+multiplicity *, which means that at any given moment there could be zero,
+one or more thread cells. Each thread cell contains a
+computation cell k, a control cell holding the various
+control structures needed to jump to certain points of interest in the program
+execution, a local environment map cell env mapping the thread local
+variables to locations in the store, and finally a holds map cell
+indicating what locks have been acquired by the thread and not released so far
+and how many times (SIMPLE's locks are re-entrant). The control cell
+currently contains only two subcells, a function stack fstack which
+is a list and an exception stack xstack which is also a list.
+One can add more control structures in the control cell, such as a
+stack for break/continue of loops, etc., if the language is extended with more
+control-changing constructs. Note that all cells except for k are
+also initialized, in that they contain a ground term of their corresponding
+sort. The k cell is initialized with the program that will be passed
+to the K tool, as indicated by the $PGM variable, followed by the
+execute task (defined shortly).

+
  // the syntax declarations below are required because the sorts are
+  // referenced directly by a production and, because of the way KIL to KORE
+  // is implemented, the configuration syntax is not available yet
+  // should simply work once KIL is removed completely
+  // check other definitions for this hack as well
+
+  syntax ControlCell
+  syntax ControlCellFragment
+
+  configuration <T color="red">
+                  <threads color="orange">
+                    <thread multiplicity="*" type="Map" color="yellow">
+                      <id color="pink"> -1 </id>
+                      <k color="green"> $PGM:Stmt ~> execute </k>
+                    //<br/> // TODO(KORE): support latex annotations #1799
+                      <control color="cyan">
+                        <fstack color="blue"> .List </fstack>
+                        <xstack color="purple"> .List </xstack>
+                      </control>
+                    //<br/> // TODO(KORE): support latex annotations #1799
+                      <env color="violet"> .Map </env>
+                      <holds color="black"> .Map </holds>
+                    </thread>
+                  </threads>
+                //<br/> // TODO(KORE): support latex annotations #1799
+                  <genv color="pink"> .Map </genv>
+                  <store color="white"> .Map </store>
+                  <busy color="cyan"> .Set </busy>
+                  <terminated color="red"> .Set </terminated>
+                //<br/> // TODO(KORE): support latex annotations #1799
+                  <input color="magenta" stream="stdin"> .List </input>
+                  <output color="brown" stream="stdout"> .List </output>
+                  <nextLoc color="gray"> 0 </nextLoc>
+                </T>
+

Declarations and Initialization

+ +

We start by defining the semantics of declarations (for variables,
+arrays and functions).

+

Variable Declaration

+ +

The SIMPLE syntax was desugared above so that each variable is
+declared alone and its initialization is done as a separate statement.
+The semantic rule below matches resulting variable declarations of the
+form var X; on top of the k cell
+(indeed, note that the k cell is complete, or round, to the
+left, and is torn, or ruptured, to the right), allocates a fresh
+location L in the store which is initialized with a special value
+ (indeed, the unit ., or nothing, is matched anywhere
+in the map ‒note the tears at both sides‒ and replaced with the
+mapping L ↦ ⊥), and binds X to L in the local
+environment shadowing previous declarations of X, if any.
+This possible shadowing of X requires us to therefore update the
+entire environment map, which is expensive and can significantly slow
+down the execution of larger programs. On the other hand, since we know
+that L is not already bound in the store, we simply add the binding
+L ↦ ⊥ to the store, thus avoiding a potentially complete
+traversal of the the store map in order to update it. We prefer the approach
+used for updating the store whenever possible, because, in addition to being
+faster, it offers more true concurrency than the latter; indeed, according
+to the concurrent semantics of K, the store is not frozen while
+L ↦ ⊥ is added to it, while the environment is frozen during the
+update operation Env[L/X]. The variable declaration command is
+also removed from the top of the computation cell and the fresh location
+counter is incremented. The undefined symbol added in the store
+is of sort KItem, instead of Val, on purpose; this way, the
+store lookup rules will get stuck when one attempts to lookup an
+uninitialized location. All the above happen in one transactional step,
+with the rule below. Note also how configuration abstraction allows us to
+only mention the needed cells; indeed, as the configuration above states,
+the k and env cells are actually located within a
+thread cell within the threads cell, but one needs
+not mention these: the configuration context of the rule is
+automatically transformed to match the declared configuration
+structure.

+
  syntax KItem ::= "undefined"
+
+  rule <k> var X:Id; => .K ...</k>
+       <env> Env => Env[X <- L] </env>
+       <store>... .Map => L |-> undefined ...</store>
+       <nextLoc> L => L +Int 1 </nextLoc>
+

Array Declaration

+ +

The K semantics of the uni-dimensional array declaration is somehow similar
+to the above declaration of ordinary variables. First, note the
+context declaration below, which requests the evaluation of the array
+dimension. Once evaluated, say to a natural number N, then
+N +Int 1 locations are allocated in the store for
+an array of size N, the additional location (chosen to be the first
+one allocated) holding the array reference value. The array reference
+value array(L,N) states that the array has size N and its
+elements are located contiguously in the store starting with location
+L. The operation L … L' ↦ V, defined at the end of this
+file in the auxiliary operation section, initializes each location in
+the list L … L' to V. Note that, since the dimensions of
+array declarations can be arbitrary expressions, this virtually means
+that we can dynamically allocate memory in SIMPLE by means of array
+declarations.

+
  context var _:Id[HOLE];
+
+  rule <k> var X:Id[N:Int]; => .K ...</k>
+       <env> Env => Env[X <- L] </env>
+       <store>... .Map => L |-> array(L +Int 1, N)
+                          (L +Int 1) ... (L +Int N) |-> undefined ...</store>
+       <nextLoc> L => L +Int 1 +Int N </nextLoc>
+    requires N >=Int 0
+

SIMPLE allows multi-dimensional arrays. For semantic simplicity, we
+desugar them all into uni-dimensional arrays by code transformation.
+This way, we only need to give semantics to uni-dimensional arrays.
+First, note that the context rule above actually evaluates all the array
+dimensions (that's why we defined the expression lists strict!):
+Upon evaluating the array dimensions, the code generation rule below
+desugars multi-dimensional array declaration to uni-dimensional declarations.
+To this aim, we introduce two special unique variable identifiers,
+$1 and $2. The first variable, $1, iterates
+through and initializes each element of the first dimension with an array
+of the remaining dimensions, declared as variable $2:

+
  syntax Id ::= "$1" [token] | "$2" [token]
+  rule var X:Id[N1:Int, N2:Int, Vs:Vals];
+    => var X[N1];
+       {
+         for(var $1 = 0; $1 <= N1 - 1; ++$1) {
+           var $2[N2, Vs];
+           X[$1] = $2;
+         }
+       }
+

Ideally, one would like to perform syntactic desugarings like the one
+above before the actual semantics. Unfortunately, that was not possible in
+this case because the dimension expressions of the multi-dimensional array need
+to be evaluated first. Indeed, the desugaring rule above does not work if the
+dimensions of the declared array are arbitrary expressions, because they can
+have side effects (e.g., a[++x,++x]) and those side effects would be
+propagated each time the expression is evaluated in the desugaring code (note
+that both the loop condition and the nested multi-dimensional declaration
+would need to evaluate the expressions given as array dimensions).

+

Function declaration

+ +

Functions are evaluated to λ-abstractions and stored like any other
+values in the store. A binding is added into the environment for the function
+name to the location holding its body. Similarly to the C language, SIMPLE
+only allows function declarations at the top level of the program. More
+precisely, the subsequent semantics of SIMPLE only works well when one
+respects this requirement. Indeed, the simplistic context-free parser
+generated by the grammar above is more generous than we may want, in that it
+allows function declarations anywhere any declaration is allowed, including
+inside arbitrary blocks. However, as the rule below shows, we are not
+storing the declaration environment with the λ-abstraction value as
+closures do. Instead, as seen shortly, we switch to the global environment
+whenever functions are invoked, which is consistent with our requirement that
+functions should only be declared at the top. Thus, if one declares local
+functions, then one may see unexpected behaviors (e.g., when one shadows a
+global variable before declaring a local function). The type checker of
+SIMPLE, also defined in K (see examples/simple/typed/static),
+discards programs which do not respect this requirement.

+
  rule <k> function F(Xs) S => .K ...</k>
+       <env> Env => Env[F <- L] </env>
+       <store>... .Map => L |-> lambda(Xs, S) ...</store>
+       <nextLoc> L => L +Int 1 </nextLoc>
+

When we are done with the first pass (pre-processing), the computation
+cell k contains only the token execute (see the configuration
+declaration above, where the computation item execute was placed
+right after the program in the k cell of the initial configuration)
+and the cell genv is empty. In this case, we have to call
+main() and to initialize the global environment by transferring the
+contents of the local environment into it. We prefer to do it this way, as
+opposed to processing all the top level declarations directly within the global
+environment, because we want to avoid duplication of semantics: the syntax of
+the global declarations is identical to that of their corresponding local
+declarations, so the semantics of the latter suffices provided that we copy
+the local environment into the global one once we are done with the
+pre-processing. We want this separate pre-processing step precisely because
+we want to create the global environment. All (top-level) functions end up
+having their names bound in the global environment and, as seen below, they
+are executed in that same global environment; all these mean, in particular,
+that the functions "see" each other, allowing for mutual recursion, etc.

+
  syntax KItem ::= "execute"
+  rule <k> execute => main(.Exps); </k>
+       <env> Env </env>
+       <genv> .Map => Env </genv>
+

Expressions

+ +

We next define the K semantics of all the expression constructs.

+

Variable lookup

+ +

When a variable X is the first computational task, and X is bound to some
+location L in the environment, and L is mapped to some value V in the
+store, then we rewrite X into V:

+
  rule <k> X:Id => V ...</k>
+       <env>... X |-> L ...</env>
+       <store>... L |-> V:Val ...</store>
+

Note that the rule above excludes reading , because is not
+a value and V is checked at runtime to be a value.

+

Variable/Array increment

+ +

This is tricky, because we want to allow both ++x and ++a[5].
+Therefore, we need to extract the lvalue of the expression to increment.
+To do that, we state that the expression to increment should be wrapped
+by the auxiliary lvalue operation and then evaluated. The semantics
+of this auxiliary operation is defined at the end of this file. For now, all
+we need to know is that it takes an expression and evaluates to a location
+value. Location values, also defined at the end of the file, are integers
+wrapped with the operation loc, to distinguish them from ordinary
+integers.

+
  context ++(HOLE => lvalue(HOLE))
+  rule <k> ++loc(L) => I +Int 1 ...</k>
+       <store>... L |-> (I => I +Int 1) ...</store>
+

Arithmetic operators

+ +

There is nothing special about the following rules. They rewrite the
+language constructs to their library counterparts when their arguments
+become values of expected sorts:

+
  rule I1 + I2 => I1 +Int I2
+  rule Str1 + Str2 => Str1 +String Str2
+  rule I1 - I2 => I1 -Int I2
+  rule I1 * I2 => I1 *Int I2
+  rule I1 / I2 => I1 /Int I2 requires I2 =/=K 0
+  rule I1 % I2 => I1 %Int I2 requires I2 =/=K 0
+  rule - I => 0 -Int I
+  rule I1 < I2 => I1 <Int I2
+  rule I1 <= I2 => I1 <=Int I2
+  rule I1 > I2 => I1 >Int I2
+  rule I1 >= I2 => I1 >=Int I2
+

The equality and inequality constructs reduce to syntactic comparison
+of the two argument values (which is what the equality on K terms does).

+
  rule V1:Val == V2:Val => V1 ==K V2
+  rule V1:Val != V2:Val => V1 =/=K V2
+

The logical negation is clear, but the logical conjunction and disjunction
+are short-circuited:

+
  rule ! T => notBool(T)
+  rule true  && E => E
+  rule false && _ => false
+  rule true  || _ => true
+  rule false || E => E
+

Array lookup

+ +

Untyped SIMPLE does not check array bounds (the dynamically typed version of
+it, in examples/simple/typed/dynamic, does check for array out of
+bounds). The first rule below desugars the multi-dimensional array access to
+uni-dimensional array access; recall that the array access operation was
+declared strict, so all sub-expressions involved are already values at this
+stage. The second rule rewrites the array access to a lookup operation at a
+precise location; we prefer to do it this way to avoid locking the store.
+The semantics of the auxiliary lookup operation is straightforward,
+and is defined at the end of the file.

+
// The [anywhere] feature is underused, because it would only be used
+// at the top of the computation or inside the lvalue wrapper. So it
+// may not be worth, or we may need to come up with a special notation
+// allowing us to enumerate contexts for [anywhere] rules.
+  rule V:Val[N1:Int, N2:Int, Vs:Vals] => V[N1][N2, Vs]
+    [anywhere]
+
+  rule array(L,_)[N:Int] => lookup(L +Int N)
+    [anywhere]
+

Size of an array

+ +

The size of the array is stored in the array reference value, and the
+sizeOf construct was declared strict, so:

+
  rule sizeOf(array(_,N)) => N
+

Function call

+ +

Function application was strict in both its arguments, so we can
+assume that both the function and its arguments are evaluated to
+values (the former expected to be a λ-abstraction). The first
+rule below matches a well-formed function application on top of the
+computation and performs the following steps atomically: it switches
+to the function body followed by return; (for the case in
+which the function does not use an explicit return statement); it
+pushes the remaining computation, the current environment, and the
+current control data onto the function stack (the remaining
+computation can thus also be discarded from the computation cell,
+because an unavoidable subsequent return statement ‒see
+above‒ will always recover it from the stack); it switches the
+current environment (which is being pushed on the function stack) to
+the global environment, which is where the free variables in the
+function body should be looked up; it binds the formal parameters to
+fresh locations in the new environment, and stores the actual
+arguments to those locations in the store (this latter step is easily
+done by reducing the problem to variable declarations, whose semantics
+we have already defined; the auxiliary operation mkDecls is
+defined at the end of the file). The second rule pops the
+computation, the environment and the control data from the function
+stack when a return statement is encountered as the next
+computational task, passing the returned value to the popped
+computation (the popped computation was the context in which the
+returning function was called). Note that the pushing/popping of the
+control data is crucial. Without it, one may have a function that
+contains an exception block with a return statement inside, which
+would put the xstack cell in an inconsistent state (since the
+exception block modifies it, but that modification should be
+irrelevant once the function returns). We add an artificial
+nothing value to the language, which is returned by the
+nulary return; statements.

+
  syntax KItem ::=  (Map,K,ControlCellFragment)
+
+  rule <k> lambda(Xs,S)(Vs:Vals) ~> K => mkDecls(Xs,Vs) S return; </k>
+       <control>
+         <fstack> .List => ListItem((Env,K,C)) ...</fstack>
+         C
+       </control>
+       <env> Env => GEnv </env>
+       <genv> GEnv </genv>
+
+  rule <k> return(V:Val); ~> _ => V ~> K </k>
+       <control>
+         <fstack> ListItem((Env,K,C)) => .List ...</fstack>
+         (_ => C)
+       </control>
+       <env> _ => Env </env>
+
+  syntax Val ::= "nothing"
+  rule return; => return nothing;
+

Like for division-by-zero, it is left unspecified what happens
+when the nothing value is used in domain calculations. For
+example, from the the perspective of the language semantics,
+7 +Int nothing can evaluate to anything, or
+may not evaluate at all (be undefined). If one wants to make sure that
+such artificial values are never misused, then one needs to define a static
+checker (also using K, like our the type checker in
+examples/simple/typed/static) and reject programs that do.
+Note that, unlike the undefined symbol which had the sort K
+instead of Val, we defined nothing to be a value. That
+is because, as explained above, we do not want the program to get
+stuck when nothing is returned by a function. Instead, we want the
+behavior to be unspecified; in particular, if one is careful to never
+use the returned value in domain computation, like it happens when we
+call a function for its side effects (e.g., with a statement of the
+form f(x);), then the program does not get stuck.

+

Read

+ +

The read() expression construct simply evaluates to the next
+input value, at the same time discarding the input value from the
+in cell.

+
  rule <k> read() => I ...</k> <input> ListItem(I:Int) => .List ...</input>
+

Assignment

+ +

In SIMPLE, like in C, assignments are expression constructs and not statement
+constructs. To make it a statement all one needs to do is to follow it by a
+semi-colon ; (see the semantics for expression statements below).
+Like for the increment, we want to allow assignments not only to variables but
+also to array elements, e.g., e1[e2] = e3 where e1 evaluates
+to an array reference, e2 to a natural number, and e3 to any
+value. Thus, we first compute the lvalue of the left-hand-side expression
+that appears in an assignment, and then we do the actual assignment to the
+resulting location:

+
  context (HOLE => lvalue(HOLE)) = _
+
+  rule <k> loc(L) = V:Val => V ...</k> <store>... L |-> (_ => V) ...</store>
+

Statements

+ +

We next define the K semantics of statements.

+

Blocks

+ +

Empty blocks are simply discarded, as shown in the first rule below.
+For non-empty blocks, we schedule the enclosed statement but we have to
+make sure the environment is recovered after the enclosed statement executes.
+Recall that we allow local variable declarations, whose scope is the block
+enclosing them. That is the reason for which we have to recover the
+environment after the block. This allows us to have a very simple semantics
+for variable declarations, as we did above. One can make the two rules below
+computational if one wants them to count as computational steps.

+
  rule {} => .K
+  rule <k> { S } => S ~> setEnv(Env) ...</k>  <env> Env </env>
+

The basic definition of environment recovery is straightforward and
+given in the section on auxiliary constructs at the end of the file.

+

There are two common alternatives to the above semantics of blocks.
+One is to keep track of the variables which are declared in the block and only
+recover those at the end of the block. This way one does more work for
+variable declarations but conceptually less work for environment recovery; we
+say conceptually because it is not clear that it is indeed the case that
+one does less work when AC matching is involved. The other alternative is to
+work with a stack of environments instead of a flat environment, and push the
+current environment when entering a block and pop it when exiting it. This
+way, one does more work when accessing variables (since one has to search the
+variable in the environment stack in a top-down manner), but on the other hand
+uses smaller environments and the definition gets closer to an implementation.
+Based on experience with dozens of language semantics and other K definitions,
+we have found that our approach above is the best trade-off between elegance
+and efficiency (especially since rewrite engines have built-in techniques to
+lazily copy terms, by need, thus not creating unnecessary copies),
+so it is the one that we follow in general.

+

Sequential composition

+ +

Sequential composition is desugared into K's builtin sequentialization
+operation (recall that, like in C, the semi-colon ; is not a
+statement separator in SIMPLE — it is either a statement terminator or a
+construct for a statement from an expression). Note that K allows
+to define the semantics of SIMPLE in such a way that statements eventually
+dissolve from the top of the computation when they are completed; this is in
+sharp contrast to (artificially) evaluating them to a special
+skip statement value and then getting rid of that special value, as
+it is the case in other semantic approaches (where everything must evaluate
+to something). This means that once S₁ completes in the rule below, S₂
+becomes automatically the next computation item without any additional
+(explicit or implicit) rules.

+
  rule S1:Stmt S2:Stmt => S1 ~> S2
+

A subtle aspect of the rule above is that S₁ is declared to have sort
+Stmts and not Stmt. That is because desugaring macros can indeed
+produce left associative sequential composition of statements. For example,
+the code var x=0; x=1; is desugared to
+(var x; x=0;) x=1;, so although originally the first term of
+the sequential composition had sort Stmt, after desugaring it became
+of sort Stmts. Note that the attribute [right] associated
+to the sequential compositon production is an attribute of the syntax, and not
+of the semantics: e.g., it tells the parser to parse
+var x; x=0; x=1; as var x; (x=0; x=1;), but it
+does not tell the rewrite engine to rewrite (var x; x=0;) x=1; to
+var x; (x=0; x=1;).

+

Expression statements

+ +

Expression statements are only used for their side effects, so their result
+value is simply discarded. Common examples of expression statements are ones
+of the form ++x;, x=e;, e1[e2]=e3;, etc.

+
  rule _:Val; => .K
+

Conditional

+ +

Since the conditional was declared with the strict(1) attribute, we
+can assume that its first argument will eventually be evaluated. The rules
+below cover the only two possibilities in which the conditional is allowed to
+proceed (otherwise the rewriting process gets stuck).

+
  rule if ( true) S else _ => S
+  rule if (false) _ else S => S
+

While loop

+ +

The simplest way to give the semantics of the while loop is by unrolling.
+Note, however, that its unrolling is only allowed when the while loop reaches
+the top of the computation (to avoid non-termination of unrolling). The
+simple while loop semantics below works because our while loops in SIMPLE are
+indeed very basic. If we allowed break/continue of loops then we would need
+a completely different semantics, which would also involve the control cell.

+
  rule while (E) S => if (E) {S while(E)S}
+

Print

+ +

The print statement was strict, so all its arguments are now
+evaluated (recall that print is variadic). We append each of
+its evaluated arguments to the output buffer, and discard the residual
+print statement with an empty list of arguments.

+
  rule <k> print(V:Val, Es => Es); ...</k> <output>... .List => ListItem(V) </output>
+  rule print(.Vals); => .K
+

Exceptions

+ +

SIMPLE allows parametric exceptions, in that one can throw and catch a
+particular value. The statement try S₁ catch(X) S₂
+proceeds with the evaluation of S₁. If S₁ evaluates normally, i.e.,
+without any exception thrown, then S₂ is discarded and the execution
+continues normally. If S₁ throws an exception with a statement of the
+form throw E, then E is first evaluated to some value V
+(throw was declared to be strict), then V is bound to X, then
+S₂ is evaluated in the new environment while the reminder of S₁ is
+discarded, then the environment is recovered and the execution continues
+normally with the statement following the try S₁ catch(X) S₂ statement.
+Exceptions can be nested and the statements in the
+catch part (S₂ in our case) can throw exceptions to the
+upper level. One should be careful with how one handles the control data
+structures here, so that the abrupt changes of control due to exception
+throwing and to function returns interact correctly with each other.
+For example, we want to allow function calls inside the statement S₁ in
+a try S₁ catch(X) S₂ block which can throw an exception
+that is not caught by the function but instead is propagated to the
+try S₁ catch(X) S₂ block that called the function.
+Therefore, we have to make sure that the function stack as well as other
+potential control structures are also properly modified when the exception
+is thrown to correctly recover the execution context. This can be easily
+achieved by pushing/popping the entire current control context onto the
+exception stack. The three rules below modularly do precisely the above.

+
  syntax KItem ::= (Id,Stmt,K,Map,ControlCellFragment)
+
+  syntax KItem ::= "popx"
+
+  rule <k> (try S1 catch(X) {S2} => S1 ~> popx) ~> K </k>
+       <control>
+         <xstack> .List => ListItem((X, S2, K, Env, C)) ...</xstack>
+         C
+       </control>
+       <env> Env </env>
+
+  rule <k> popx => .K ...</k>
+       <xstack> ListItem(_) => .List ...</xstack>
+
+  rule <k> throw V:Val; ~> _ => { var X = V; S2 } ~> K </k>
+       <control>
+         <xstack> ListItem((X, S2, K, Env, C)) => .List ...</xstack>
+         (_ => C)
+       </control>
+       <env> _ => Env </env>
+

The catch statement S₂ needs to be executed in the original environment,
+but where the thrown value V is bound to the catch variable X. We here
+chose to rely on two previously defined constructs when giving semantics to
+the catch part of the statement: (1) the variable declaration with
+initialization, for binding X to V; and (2) the block construct for
+preventing X from shadowing variables in the original environment upon the
+completion of S₂.

+

Threads

+ +

SIMPLE's threads can be created and terminated dynamically, and can
+synchronize by acquiring and releasing re-entrant locks and by rendezvous.
+We discuss the seven rules giving the semantics of these operations below.

+

Thread creation

+ +

Threads can be created by any other threads using the spawn S
+construct. The spawn expression construct evaluates to the unique identifier
+of the newly created thread and, at the same time, a new thread cell is added
+into the configuration, initialized with the S statement and sharing the
+same environment with the parent thread. Note that the newly created
+thread cell is torn. That means that the remaining cells are added
+and initialized automatically as described in the definition of SIMPLE's
+configuration. This is part of K's configuration abstraction mechanism.

+
  rule <thread>...
+         <k> spawn S => !T:Int ...</k>
+         <env> Env </env>
+       ...</thread>
+       (.Bag => <thread>...
+               <k> S </k>
+               <env> Env </env>
+               <id> !T </id>
+             ...</thread>)
+

Thread termination

+ +

Dually to the above, when a thread terminates its assigned computation (the
+contents of its k cell) is empty, so the thread can be dissolved.
+However, since no discipline is imposed on how locks are acquired and released,
+it can be the case that a terminating thread still holds locks. Those locks
+must be released, so other threads attempting to acquire them do not deadlock.
+We achieve that by removing all the locks held by the terminating thread in its
+holds cell from the set of busy locks in the busy cell
+(keys(H) returns the domain of the map H as a set, that is, only
+the locks themselves ignoring their multiplicity). As seen below, a lock is
+added to the busy cell as soon as it is acquired for the first time
+by a thread. The unique identifier of the terminated thread is also collected
+into the terminated cell, so the join construct knows which
+threads have terminated.

+
  rule (<thread>... <k>.K</k> <holds>H</holds> <id>T</id> ...</thread> => .Bag)
+       <busy> Busy => Busy -Set keys(H) </busy>
+       <terminated>... .Set => SetItem(T) ...</terminated>
+

Thread joining

+ +

Thread joining is now straightforward: all we need to do is to check whether
+the identifier of the thread to be joined is in the terminated cell.
+If yes, then the join statement dissolves and the joining thread
+continues normally; if not, then the joining thread gets stuck.

+
  rule <k> join T:Int; => .K ...</k>
+       <terminated>... SetItem(T) ...</terminated>
+

Acquire lock

+ +

There are two cases to distinguish when a thread attempts to acquire a lock
+(in SIMPLE any value can be used as a lock):
+(1) The thread does not currently have the lock, in which case it has to
+take it provided that the lock is not already taken by another thread (see
+the side condition of the first rule).
+(2) The thread already has the lock, in which case it just increments its
+counter for the lock (the locks are re-entrant). These two cases are captured
+by the two rules below:

+
  rule <k> acquire V:Val; => .K ...</k>
+       <holds>... .Map => V |-> 0 ...</holds>
+       <busy> Busy (.Set => SetItem(V)) </busy>
+    requires (notBool(V in Busy))
+
+  rule <k> acquire V; => .K ...</k>
+       <holds>... V:Val |-> (N => N +Int 1) ...</holds>
+

Release lock

+ +

Similarly, there are two corresponding cases to distinguish when a thread
+releases a lock:
+(1) The thread holds the lock more than once, in which case all it needs to do
+is to decrement the lock counter.
+(2) The thread holds the lock only once, in which case it needs to remove it
+from its holds cell and also from the the shared busy cell,
+so other threads can acquire it if they need to.

+
  rule <k> release V:Val; => .K ...</k>
+       <holds>... V |-> (N => N -Int 1) ...</holds>
+    requires N >Int 0
+
+  rule <k> release V; => .K ...</k> <holds>... V:Val |-> 0 => .Map ...</holds>
+       <busy>... SetItem(V) => .Set ...</busy>
+

Rendezvous synchronization

+ +

In addition to synchronization through acquire and release of locks, SIMPLE
+also provides a construct for rendezvous synchronization. A thread whose next
+statement to execute is rendezvous(V) gets stuck until another
+thread reaches an identical statement; when that happens, the two threads
+drop their rendezvous statements and continue their executions. If three
+threads happen to have an identical rendezvous statement as their next
+statement, then precisely two of them will synchronize and the other will
+remain blocked until another thread reaches a similar rendezvous statement.
+The rule below is as simple as it can be. Note, however, that, again, it is
+K's mechanism for configuration abstraction that makes it work as desired:
+since the only cell which can multiply containing a k cell inside is
+the thread cell, the only way to concretize the rule below to the
+actual configuration of SIMPLE is to include each k cell in a
+thread cell.

+
  rule <k> rendezvous V:Val; => .K ...</k>
+       <k> rendezvous V; => .K ...</k>
+

Auxiliary declarations and operations

+ +

In this section we define all the auxiliary constructs used in the
+above semantics.

+

Making declarations

+ +

The mkDecls auxiliary construct turns a list of identifiers
+and a list of values in a sequence of corresponding variable
+declarations.

+
  syntax Stmt ::= mkDecls(Ids,Vals)  [function]
+  rule mkDecls((X:Id, Xs:Ids), (V:Val, Vs:Vals)) => var X=V; mkDecls(Xs,Vs)
+  rule mkDecls(.Ids,.Vals) => {}
+

Location lookup

+ +

The operation below is straightforward.

+
  syntax Exp ::= lookup(Int)
+  rule <k> lookup(L) => V ...</k> <store>... L |-> V:Val ...</store>
+

Environment recovery

+ +

We have already discussed the environment recovery auxiliary operation in the
+IMP++ tutorial:

+
// TODO: eliminate the env wrapper, like we did in IMP++
+
+  syntax KItem ::= setEnv(Map)
+  rule <k> setEnv(Env) => .K ...</k> <env> _ => Env </env>
+

While theoretically sufficient, the basic definition for environment
+recovery alone is suboptimal. Consider a loop while (E)S,
+whose semantics (see above) was given by unrolling. S
+is a block. Then the semantics of blocks above, together with the
+unrolling semantics of the while loop, will yield a computation
+structure in the k cell that increasingly grows, adding a new
+environment recovery task right in front of the already existing sequence of
+similar environment recovery tasks (this phenomenon is similar to the ``tail
+recursion'' problem). Of course, when we have a sequence of environment
+recovery tasks, we only need to keep the last one. The elegant rule below
+does precisely that, thus avoiding the unnecessary computation explosion
+problem:

+
  rule (setEnv(_) => .K) ~> setEnv(_)
+

In fact, the above follows a common convention in K for recovery
+operations of cell contents: the meaning of a computation task of the form
+cell(C) that reaches the top of the computation is that the current
+contents of cell cell is discarded and gets replaced with C. We
+did not add support for these special computation tasks in our current
+implementation of K, so we need to define them as above.

+

lvalue and loc

+ +

For convenience in giving the semantics of constructs like the increment and
+the assignment, that we want to operate the same way on variables and on
+array elements, we used an auxiliary lvalue(E) construct which was
+expected to evaluate to the lvalue of the expression E. This is only
+defined when E has an lvalue, that is, when E is either a variable or
+evaluates to an array element. lvalue(E) evaluates to a value of
+the form loc(L), where L is the location where the value of E
+can be found; for clarity, we use loc to structurally distinguish
+natural numbers from location values. In giving semantics to lvalue
+there are two cases to consider. (1) If E is a variable, then all we need
+to do is to grab its location from the environment. (2) If E is an array
+element, then we first evaluate the array and its index in order to identify
+the exact location of the element of concern, and then return that location;
+the last rule below works because its preceding context declarations ensure
+that the array and its index are evaluated, and then the rule for array lookup
+(defined above) rewrites the evaluated array access construct to its
+corresponding store lookup operation.

+
// For parsing reasons, we prefer to allow lvalue to take a K
+
+  syntax Exp ::= lvalue(K)
+  syntax Val ::= loc(Int)
+
+// Local variable
+
+  rule <k> lvalue(X:Id => loc(L)) ...</k> <env>... X |-> L:Int ...</env>
+
+// Array element: evaluate the array and its index;
+// then the array lookup rule above applies.
+
+  context lvalue(_::Exp[HOLE::Exps])
+  context lvalue(HOLE::Exp[_::Exps])
+
+// Finally, return the address of the desired object member
+
+  rule lvalue(lookup(L:Int) => loc(L))
+

Initializing multiple locations

+ +

The following operation initializes a sequence of locations with the same
+value:

+
  syntax Map ::= Int "..." Int "|->" K [function]
+  rule N...M |-> _ => .Map  requires N >Int M
+  rule N...M |-> K => N |-> K (N +Int 1)...M |-> K  requires N <=Int M
+

The semantics of SIMPLE is now complete. Make sure you kompile the
+definition with the right options in order to generate the desired model.
+No kompile options are needed if you only only want to execute the definition
+(and thus get an interpreter), but if you want to search for a different
+program behaviors then you need to kompile with the --enable-search option

+
endmodule
+

Go to Lesson 2, SIMPLE typed static

+

SIMPLE — Untyped

+ +

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+ +

This is the K semantic definition of the untyped SIMPLE language.
+SIMPLE is intended to be a pedagogical and research language that captures
+the essence of the imperative programming paradigm, extended with several
+features often encountered in imperative programming languages.
+A program consists of a set of global variable declarations and
+function definitions. Like in C, function definitions cannot be
+nested and each program must have one function called main,
+which is invoked when the program is executed. To make it more
+interesting and to highlight some of K's strengths, SIMPLE includes
+the following features in addition to the conventional imperative
+expression and statement constructs:

+
    +
  • +

    Multidimensional arrays and array references. An array evaluates
    +to an array reference, which is a special value holding a location (where
    +the elements of the array start) together with the size of the array;
    +the elements of the array can be array references themselves (particularly
    +when the array is multi-dimensional). Array references are ordinary values,
    +so they can be assigned to variables and passed/received by functions.

    +
  • +
  • +

    Functions and function values. Functions can have zero or
    +more parameters and can return abruptly using a return statement.
    +SIMPLE follows a call-by-value parameter passing style, with static scoping.
    +Function names evaluate to function abstractions, which hereby become ordinary
    +values in the language, same like the array references.

    +
  • +
  • +

    Blocks with locals. SIMPLE variables can be declared
    +anywhere, their scope being from the place where they are declared
    +until the end of the most nested enclosing block.

    +
  • +
  • +

    Input/Output. The expression read() evaluates to the
    +next value in the input buffer, and the statement write(e)
    +evaluates e and outputs its value to the output buffer. The
    +input and output buffers are lists of values.

    +
  • +
  • +

    Exceptions. SIMPLE has parametric exceptions (the value thrown as
    +an exception can be caught and bound).

    +
  • +
  • +

    Concurrency via dynamic thread creation/termination and
    +synchronization. One can spawn a thread to execute any statement.
    +The spawned thread shares with its parent its environment at creation time.
    +Threads can be synchronized via a join command which blocks the current thread
    +until the joined thread completes, via re-entrant locks which can be acquired
    +and released, as well as through rendezvous commands.

    +
  • +
+

Like in many other languages, some of SIMPLE's constructs can be
+desugared into a smaller set of basic constructs. We do that at the end
+of the syntax module, and then we only give semantics to the core constructs.

+

Note: This definition is commented slightly more than others, because it is
+intended to be one of the first non-trivial definitions that the new
+user of K sees. We recommend the beginner user to first check the
+language definitions discussed in the K tutorial.

+
module SIMPLE-UNTYPED-SYNTAX
+  imports DOMAINS-SYNTAX
+

Syntax

+ +

We start by defining the SIMPLE syntax. The language constructs discussed
+above have the expected syntax and evaluation strategies. Recall that in K
+we annotate the syntax with appropriate strictness attributes, thus giving
+each language construct the desired evaluation strategy.

+

Identifiers

+ +

Recall from the K tutorial that identifiers are builtin and come under the
+syntactic category Id. The special identifier for the function
+main belongs to all programs, and plays a special role in the semantics,
+so we declare it explicitly. This would not be necessary if the identifiers
+were all included automatically in semantic definitions, but that is not
+possible because of parsing reasons (e.g., K variables used to match
+concrete identifiers would then be ambiguously parsed as identifiers). They
+are only included in the parser generated to parse programs (and used by the
+kast tool). Consequently, we have to explicitly declare all the
+concrete identifiers that play a special role in the semantics, like
+main below.

+
  syntax Id ::= "main" [token]
+

Declarations

+ +

There are two types of declarations: for variables (including arrays) and
+for functions. We are going to allow declarations of the form
+var x=10, a[10,10], y=23;, which is why we allow the var
+keyword to take a list of expressions. The non-terminals used in the two
+productions below are defined shortly.

+
  syntax Stmt ::= "var" Exps ";"
+                | "function" Id "(" Ids ")" Block
+

Expressions

+ +

The expression constructs below are standard. Increment (++) takes
+an expression rather than a variable because it can also increment an array
+element. Recall that the syntax we define in K is what we call the syntax
+of the semantics
: while powerful enough to define non-trivial syntaxes
+(thanks to the underlying SDF technology that we use), we typically refrain
+from defining precise syntaxes, that is, ones which accept precisely the
+well-formed programs (that would not be possible anyway in general). That job
+is deferred to type systems, which can also be defined in K. In other words,
+we are not making any effort to guarantee syntactically that only variables
+or array elements are passed to the increment construct, we allow any
+expression. Nevertheless, we will only give semantics to those, so expressions
+of the form ++5, which parse (but which will be rejected by our type
+system in the typed version of SIMPLE later), will get stuck when executed.
+Arrays can be multidimensional and can hold other arrays, so their
+lookup operation takes a list of expressions as argument and applies to an
+expression (which can in particular be another array lookup), respectively.
+The construct sizeOf gives the size of an array in number of elements
+of its first dimension. Note that almost all constructs are strict. The only
+constructs which are not strict are the increment (since its first argument
+gets updated, so it cannot be evaluated), the input read which takes no
+arguments so strictness is irrelevant for it, the logical and and or constructs
+which are short-circuited, the thread spawning construct which creates a new
+thread executing the argument expression and return its unique identifier to
+the creating thread (so it cannot just evaluate its argument in place), and the
+assignment which is only strict in its second argument (for the same reason as
+the increment).

+
  syntax Exp ::= Int | Bool | String | Id
+               | "(" Exp ")"             [bracket]
+               | "++" Exp
+               > Exp "[" Exps "]"        [strict]
+               > Exp "(" Exps ")"        [strict]
+               | "-" Exp                 [strict]
+               | "sizeOf" "(" Exp ")"    [strict]
+               | "read" "(" ")"
+               > left:
+                 Exp "*" Exp             [strict, left]
+               | Exp "/" Exp             [strict, left]
+               | Exp "%" Exp             [strict, left]
+               > left:
+                 Exp "+" Exp             [strict, left]
+               | Exp "-" Exp             [strict, left]
+               > non-assoc:
+                 Exp "<" Exp             [strict, non-assoc]
+               | Exp "<=" Exp            [strict, non-assoc]
+               | Exp ">" Exp             [strict, non-assoc]
+               | Exp ">=" Exp            [strict, non-assoc]
+               | Exp "==" Exp            [strict, non-assoc]
+               | Exp "!=" Exp            [strict, non-assoc]
+               > "!" Exp                 [strict]
+               > left:
+                 Exp "&&" Exp            [strict(1), left]
+               | Exp "||" Exp            [strict(1), left]
+               > "spawn" Block
+               > Exp "=" Exp             [strict(2), right]
+

We also need comma-separated lists of identifiers and of expressions.
+Moreover, we want them to be strict, that is, to evaluate to lists of results
+whenever requested (e.g., when they appear as strict arguments of
+the constructs above).

+
  syntax Ids  ::= List{Id,","}           [overload(Exps)]
+  syntax Exps ::= List{Exp,","}          [overload(Exps), strict]  // automatically hybrid now
+  syntax Exps ::= Ids
+  syntax Val
+  syntax Vals ::= List{Val,","}          [overload(Exps)]
+  syntax Bottom
+  syntax Bottoms ::= List{Bottom,","}    [overload(Exps)]
+  syntax Ids ::= Bottoms
+

Statements

+ +

Most of the statement constructs are standard for imperative languages.
+We syntactically distinguish between empty and non-empty blocks, because we
+chose Stmts not to be a (;-separated) list of
+Stmt. Variables can be declared anywhere inside a block, their scope
+ending with the block. Expressions are allowed to be used for their side
+effects only (followed by a semicolon ;). Functions are allowed
+to abruptly return. The exceptions are parametric, i.e., one can throw a value
+which is bound to the variable declared by catch. Threads can be
+dynamically created and terminated, and can synchronize with join,
+acquire, release and rendezvous. Note that the
+strictness attributes obey the intended evaluation strategy of the various
+constructs. In particular, the if-then-else construct is strict only in its
+first argument (the if-then construct will be desugared into if-then-else),
+while the loop constructs are not strict in any arguments. The print
+statement construct is variadic, that is, it takes an arbitrary number of
+arguments.

+
  syntax Block ::= "{" "}"
+                | "{" Stmt "}"
+
+  syntax Stmt ::= Block
+                | Exp ";"                               [strict]
+                | "if" "(" Exp ")" Block "else" Block   [avoid, strict(1)]
+                | "if" "(" Exp ")" Block                [macro]
+                | "while" "(" Exp ")" Block
+                | "for" "(" Stmt Exp ";" Exp ")" Block  [macro]
+                | "return" Exp ";"                      [strict]
+                | "return" ";"                          [macro]
+                | "print" "(" Exps ")" ";"              [strict]
+// NOTE: print strict allows non-deterministic evaluation of its arguments
+// Either keep like this but document, or otherwise make Exps seqstrict.
+// Of define and use a different expression list here, which is seqstrict.
+                | "try" Block "catch" "(" Id ")" Block
+                | "throw" Exp ";"                       [strict]
+                | "join" Exp ";"                        [strict]
+                | "acquire" Exp ";"                     [strict]
+                | "release" Exp ";"                     [strict]
+                | "rendezvous" Exp ";"                  [strict]
+

The reason we allow Stmts as the first argument of for
+instead of Stmt is because we want to allow more than one statement
+to be executed when the loop is initialized. Also, as seens shorly, macros
+may expand one statement into more statements; for example, an initialized
+variable declaration statement var x=0; desugars into two statements,
+namely var x; x=0;, so if we use Stmt instead of Stmts
+in the production of for above then we risk that the macro expansion
+of statement var x=0; happens before the macro expansion of for,
+also shown below, in which case the latter would not apply anymore because
+of syntactic mismatch.

+
  syntax Stmt ::= Stmt Stmt                          [right]
+
+// I wish I were able to write the following instead, but confuses the parser.
+//
+// syntax Stmts ::= List{Stmt,""}
+// syntax Top ::= Stmt | "function" Id "(" Ids ")" Block
+// syntax Pgm ::= List{Top,""}
+//
+// With that, I could have also eliminated the empty block
+

Desugared Syntax

+ +

This part desugars some of SIMPLE's language constructs into core ones.
+We only want to give semantics to core constructs, so we get rid of the
+derived ones before we start the semantics. All desugaring macros below are
+straightforward.

+
  rule if (E) S => if (E) S else {}
+  rule for(Start Cond; Step) {S} => {Start while (Cond) {S Step;}}
+  rule for(Start Cond; Step) {} => {Start while (Cond) {Step;}}
+  rule var E1:Exp, E2:Exp, Es:Exps; => var E1; var E2, Es;
+  rule var X:Id = E; => var X; X = E;
+

For the semantics, we can therefore assume from now on that each
+conditional has both branches, that there are only while loops, and
+that each variable is declared alone and without any initialization as part of
+the declaration.

+
endmodule
+
+
+module SIMPLE-UNTYPED
+  imports SIMPLE-UNTYPED-SYNTAX
+  imports DOMAINS
+

Basic Semantic Infrastructure

+ +

Before one starts adding semantic rules to a K definition, one needs to
+define the basic semantic infrastructure consisting of definitions for
+values and configuration. As discussed in the definitions
+in the K tutorial, the values are needed to know when to stop applying
+the heating rules and when to start applying the cooling rules corresponding
+to strictness or context declarations. The configuration serves as a backbone
+for the process of configuration abstraction which allows users to only
+mention the relevant cells in each semantic rule, the rest of the configuration
+context being inferred automatically. Although in some cases the configuration
+could be automatically inferred from the rules, we believe that it is very
+useful for language designers/semanticists to actually think of and design
+their configuration explicitly, so the current implementation of K requires
+one to define it.

+

Values

+ +

We here define the values of the language that the various fragments of
+programs evaluate to. First, integers and Booleans are values. As discussed,
+arrays evaluate to special array reference values holding (1) a location from
+where the array's elements are contiguously allocated in the store, and
+(2) the size of the array. Functions evaluate to function values as
+λ-abstractions (we do not need to evaluate functions to closures
+because each function is executed in the fixed global environment and
+function definitions cannot be nested). Like in IMP and other
+languages, we finally tell the tool that values are K results.

+
  syntax Val ::= Int | Bool | String
+               | array(Int,Int)
+               | lambda(Ids,Stmt)
+  syntax Exp ::= Val
+  syntax Exps ::= Vals
+  syntax Vals ::= Bottoms
+  syntax KResult ::= Val
+                   | Vals  // TODO: should not need this
+

The inclusion of values in expressions follows the methodology of
+syntactic definitions (like, e.g., in SOS): extend the syntax of the language
+to encompass all values and additional constructs needed to give semantics.
+In addition to that, it allows us to write the semantic rules using the
+original syntax of the language, and to parse them with the same (now extended
+with additional values) parser. If writing the semantics directly on the K
+AST, using the associated labels instead of the syntactic constructs, then one
+would not need to include values in expressions.

+

Configuration

+ +

The K configuration of SIMPLE consists of a top level cell, T,
+holding a threads cell, a global environment map cell genv
+mapping the global variables and function names to their locations, a shared
+store map cell store mapping each location to some value, a set cell
+busy holding the locks which have been acquired but not yet released
+by threads, a set cell terminated holding the unique identifiers of
+the threads which already terminated (needed for join), input
+and output list cells, and a nextLoc cell holding a natural
+number indicating the next available location. Unlike in the small languages
+in the K tutorial, where we used the fresh predicate to generate fresh
+locations, in larger languages, like SIMPLE, we prefer to explicitly manage
+memory. The location counter in nextLoc models an actual physical
+location in the store; for simplicity, we assume arbitrarily large memory and
+no garbage collection. The threads cell contains one thread
+cell for each existing thread in the program. Note that the thread cell has
+multiplicity *, which means that at any given moment there could be zero,
+one or more thread cells. Each thread cell contains a
+computation cell k, a control cell holding the various
+control structures needed to jump to certain points of interest in the program
+execution, a local environment map cell env mapping the thread local
+variables to locations in the store, and finally a holds map cell
+indicating what locks have been acquired by the thread and not released so far
+and how many times (SIMPLE's locks are re-entrant). The control cell
+currently contains only two subcells, a function stack fstack which
+is a list and an exception stack xstack which is also a list.
+One can add more control structures in the control cell, such as a
+stack for break/continue of loops, etc., if the language is extended with more
+control-changing constructs. Note that all cells except for k are
+also initialized, in that they contain a ground term of their corresponding
+sort. The k cell is initialized with the program that will be passed
+to the K tool, as indicated by the $PGM variable, followed by the
+execute task (defined shortly).

+
  // the syntax declarations below are required because the sorts are
+  // referenced directly by a production and, because of the way KIL to KORE
+  // is implemented, the configuration syntax is not available yet
+  // should simply work once KIL is removed completely
+  // check other definitions for this hack as well
+
+  syntax ControlCell
+  syntax ControlCellFragment
+
+  configuration <T color="red">
+                  <threads color="orange">
+                    <thread multiplicity="*" type="Map" color="yellow">
+                      <id color="pink"> -1 </id>
+                      <k color="green"> $PGM:Stmt ~> execute </k>
+                    //<br/> // TODO(KORE): support latex annotations #1799
+                      <control color="cyan">
+                        <fstack color="blue"> .List </fstack>
+                        <xstack color="purple"> .List </xstack>
+                      </control>
+                    //<br/> // TODO(KORE): support latex annotations #1799
+                      <env color="violet"> .Map </env>
+                      <holds color="black"> .Map </holds>
+                    </thread>
+                  </threads>
+                //<br/> // TODO(KORE): support latex annotations #1799
+                  <genv color="pink"> .Map </genv>
+                  <store color="white"> .Map </store>
+                  <busy color="cyan"> .Set </busy>
+                  <terminated color="red"> .Set </terminated>
+                //<br/> // TODO(KORE): support latex annotations #1799
+                  <input color="magenta" stream="stdin"> .List </input>
+                  <output color="brown" stream="stdout"> .List </output>
+                  <nextLoc color="gray"> 0 </nextLoc>
+                </T>
+

Declarations and Initialization

+ +

We start by defining the semantics of declarations (for variables,
+arrays and functions).

+

Variable Declaration

+ +

The SIMPLE syntax was desugared above so that each variable is
+declared alone and its initialization is done as a separate statement.
+The semantic rule below matches resulting variable declarations of the
+form var X; on top of the k cell
+(indeed, note that the k cell is complete, or round, to the
+left, and is torn, or ruptured, to the right), allocates a fresh
+location L in the store which is initialized with a special value
+ (indeed, the unit ., or nothing, is matched anywhere
+in the map ‒note the tears at both sides‒ and replaced with the
+mapping L ↦ ⊥), and binds X to L in the local
+environment shadowing previous declarations of X, if any.
+This possible shadowing of X requires us to therefore update the
+entire environment map, which is expensive and can significantly slow
+down the execution of larger programs. On the other hand, since we know
+that L is not already bound in the store, we simply add the binding
+L ↦ ⊥ to the store, thus avoiding a potentially complete
+traversal of the the store map in order to update it. We prefer the approach
+used for updating the store whenever possible, because, in addition to being
+faster, it offers more true concurrency than the latter; indeed, according
+to the concurrent semantics of K, the store is not frozen while
+L ↦ ⊥ is added to it, while the environment is frozen during the
+update operation Env[L/X]. The variable declaration command is
+also removed from the top of the computation cell and the fresh location
+counter is incremented. The undefined symbol added in the store
+is of sort KItem, instead of Val, on purpose; this way, the
+store lookup rules will get stuck when one attempts to lookup an
+uninitialized location. All the above happen in one transactional step,
+with the rule below. Note also how configuration abstraction allows us to
+only mention the needed cells; indeed, as the configuration above states,
+the k and env cells are actually located within a
+thread cell within the threads cell, but one needs
+not mention these: the configuration context of the rule is
+automatically transformed to match the declared configuration
+structure.

+
  syntax KItem ::= "undefined"
+
+  rule <k> var X:Id; => .K ...</k>
+       <env> Env => Env[X <- L] </env>
+       <store>... .Map => L |-> undefined ...</store>
+       <nextLoc> L => L +Int 1 </nextLoc>
+

Array Declaration

+ +

The K semantics of the uni-dimensional array declaration is somehow similar
+to the above declaration of ordinary variables. First, note the
+context declaration below, which requests the evaluation of the array
+dimension. Once evaluated, say to a natural number N, then
+N +Int 1 locations are allocated in the store for
+an array of size N, the additional location (chosen to be the first
+one allocated) holding the array reference value. The array reference
+value array(L,N) states that the array has size N and its
+elements are located contiguously in the store starting with location
+L. The operation L … L' ↦ V, defined at the end of this
+file in the auxiliary operation section, initializes each location in
+the list L … L' to V. Note that, since the dimensions of
+array declarations can be arbitrary expressions, this virtually means
+that we can dynamically allocate memory in SIMPLE by means of array
+declarations.

+
  context var _:Id[HOLE];
+
+  rule <k> var X:Id[N:Int]; => .K ...</k>
+       <env> Env => Env[X <- L] </env>
+       <store>... .Map => L |-> array(L +Int 1, N)
+                          (L +Int 1) ... (L +Int N) |-> undefined ...</store>
+       <nextLoc> L => L +Int 1 +Int N </nextLoc>
+    requires N >=Int 0
+

SIMPLE allows multi-dimensional arrays. For semantic simplicity, we
+desugar them all into uni-dimensional arrays by code transformation.
+This way, we only need to give semantics to uni-dimensional arrays.
+First, note that the context rule above actually evaluates all the array
+dimensions (that's why we defined the expression lists strict!):
+Upon evaluating the array dimensions, the code generation rule below
+desugars multi-dimensional array declaration to uni-dimensional declarations.
+To this aim, we introduce two special unique variable identifiers,
+$1 and $2. The first variable, $1, iterates
+through and initializes each element of the first dimension with an array
+of the remaining dimensions, declared as variable $2:

+
  syntax Id ::= "$1" [token] | "$2" [token]
+  rule var X:Id[N1:Int, N2:Int, Vs:Vals];
+    => var X[N1];
+       {
+         for(var $1 = 0; $1 <= N1 - 1; ++$1) {
+           var $2[N2, Vs];
+           X[$1] = $2;
+         }
+       }
+

Ideally, one would like to perform syntactic desugarings like the one
+above before the actual semantics. Unfortunately, that was not possible in
+this case because the dimension expressions of the multi-dimensional array need
+to be evaluated first. Indeed, the desugaring rule above does not work if the
+dimensions of the declared array are arbitrary expressions, because they can
+have side effects (e.g., a[++x,++x]) and those side effects would be
+propagated each time the expression is evaluated in the desugaring code (note
+that both the loop condition and the nested multi-dimensional declaration
+would need to evaluate the expressions given as array dimensions).

+

Function declaration

+ +

Functions are evaluated to λ-abstractions and stored like any other
+values in the store. A binding is added into the environment for the function
+name to the location holding its body. Similarly to the C language, SIMPLE
+only allows function declarations at the top level of the program. More
+precisely, the subsequent semantics of SIMPLE only works well when one
+respects this requirement. Indeed, the simplistic context-free parser
+generated by the grammar above is more generous than we may want, in that it
+allows function declarations anywhere any declaration is allowed, including
+inside arbitrary blocks. However, as the rule below shows, we are not
+storing the declaration environment with the λ-abstraction value as
+closures do. Instead, as seen shortly, we switch to the global environment
+whenever functions are invoked, which is consistent with our requirement that
+functions should only be declared at the top. Thus, if one declares local
+functions, then one may see unexpected behaviors (e.g., when one shadows a
+global variable before declaring a local function). The type checker of
+SIMPLE, also defined in K (see examples/simple/typed/static),
+discards programs which do not respect this requirement.

+
  rule <k> function F(Xs) S => .K ...</k>
+       <env> Env => Env[F <- L] </env>
+       <store>... .Map => L |-> lambda(Xs, S) ...</store>
+       <nextLoc> L => L +Int 1 </nextLoc>
+

When we are done with the first pass (pre-processing), the computation
+cell k contains only the token execute (see the configuration
+declaration above, where the computation item execute was placed
+right after the program in the k cell of the initial configuration)
+and the cell genv is empty. In this case, we have to call
+main() and to initialize the global environment by transferring the
+contents of the local environment into it. We prefer to do it this way, as
+opposed to processing all the top level declarations directly within the global
+environment, because we want to avoid duplication of semantics: the syntax of
+the global declarations is identical to that of their corresponding local
+declarations, so the semantics of the latter suffices provided that we copy
+the local environment into the global one once we are done with the
+pre-processing. We want this separate pre-processing step precisely because
+we want to create the global environment. All (top-level) functions end up
+having their names bound in the global environment and, as seen below, they
+are executed in that same global environment; all these mean, in particular,
+that the functions "see" each other, allowing for mutual recursion, etc.

+
  syntax KItem ::= "execute"
+  rule <k> execute => main(.Exps); </k>
+       <env> Env </env>
+       <genv> .Map => Env </genv>
+

Expressions

+ +

We next define the K semantics of all the expression constructs.

+

Variable lookup

+ +

When a variable X is the first computational task, and X is bound to some
+location L in the environment, and L is mapped to some value V in the
+store, then we rewrite X into V:

+
  rule <k> X:Id => V ...</k>
+       <env>... X |-> L ...</env>
+       <store>... L |-> V:Val ...</store>
+

Note that the rule above excludes reading , because is not
+a value and V is checked at runtime to be a value.

+

Variable/Array increment

+ +

This is tricky, because we want to allow both ++x and ++a[5].
+Therefore, we need to extract the lvalue of the expression to increment.
+To do that, we state that the expression to increment should be wrapped
+by the auxiliary lvalue operation and then evaluated. The semantics
+of this auxiliary operation is defined at the end of this file. For now, all
+we need to know is that it takes an expression and evaluates to a location
+value. Location values, also defined at the end of the file, are integers
+wrapped with the operation loc, to distinguish them from ordinary
+integers.

+
  context ++(HOLE => lvalue(HOLE))
+  rule <k> ++loc(L) => I +Int 1 ...</k>
+       <store>... L |-> (I => I +Int 1) ...</store>
+

Arithmetic operators

+ +

There is nothing special about the following rules. They rewrite the
+language constructs to their library counterparts when their arguments
+become values of expected sorts:

+
  rule I1 + I2 => I1 +Int I2
+  rule Str1 + Str2 => Str1 +String Str2
+  rule I1 - I2 => I1 -Int I2
+  rule I1 * I2 => I1 *Int I2
+  rule I1 / I2 => I1 /Int I2 requires I2 =/=K 0
+  rule I1 % I2 => I1 %Int I2 requires I2 =/=K 0
+  rule - I => 0 -Int I
+  rule I1 < I2 => I1 <Int I2
+  rule I1 <= I2 => I1 <=Int I2
+  rule I1 > I2 => I1 >Int I2
+  rule I1 >= I2 => I1 >=Int I2
+

The equality and inequality constructs reduce to syntactic comparison
+of the two argument values (which is what the equality on K terms does).

+
  rule V1:Val == V2:Val => V1 ==K V2
+  rule V1:Val != V2:Val => V1 =/=K V2
+

The logical negation is clear, but the logical conjunction and disjunction
+are short-circuited:

+
  rule ! T => notBool(T)
+  rule true  && E => E
+  rule false && _ => false
+  rule true  || _ => true
+  rule false || E => E
+

Array lookup

+ +

Untyped SIMPLE does not check array bounds (the dynamically typed version of
+it, in examples/simple/typed/dynamic, does check for array out of
+bounds). The first rule below desugars the multi-dimensional array access to
+uni-dimensional array access; recall that the array access operation was
+declared strict, so all sub-expressions involved are already values at this
+stage. The second rule rewrites the array access to a lookup operation at a
+precise location; we prefer to do it this way to avoid locking the store.
+The semantics of the auxiliary lookup operation is straightforward,
+and is defined at the end of the file.

+
// The [anywhere] feature is underused, because it would only be used
+// at the top of the computation or inside the lvalue wrapper. So it
+// may not be worth, or we may need to come up with a special notation
+// allowing us to enumerate contexts for [anywhere] rules.
+  rule V:Val[N1:Int, N2:Int, Vs:Vals] => V[N1][N2, Vs]
+    [anywhere]
+
+  rule array(L,_)[N:Int] => lookup(L +Int N)
+    [anywhere]
+

Size of an array

+ +

The size of the array is stored in the array reference value, and the
+sizeOf construct was declared strict, so:

+
  rule sizeOf(array(_,N)) => N
+

Function call

+ +

Function application was strict in both its arguments, so we can
+assume that both the function and its arguments are evaluated to
+values (the former expected to be a λ-abstraction). The first
+rule below matches a well-formed function application on top of the
+computation and performs the following steps atomically: it switches
+to the function body followed by return; (for the case in
+which the function does not use an explicit return statement); it
+pushes the remaining computation, the current environment, and the
+current control data onto the function stack (the remaining
+computation can thus also be discarded from the computation cell,
+because an unavoidable subsequent return statement ‒see
+above‒ will always recover it from the stack); it switches the
+current environment (which is being pushed on the function stack) to
+the global environment, which is where the free variables in the
+function body should be looked up; it binds the formal parameters to
+fresh locations in the new environment, and stores the actual
+arguments to those locations in the store (this latter step is easily
+done by reducing the problem to variable declarations, whose semantics
+we have already defined; the auxiliary operation mkDecls is
+defined at the end of the file). The second rule pops the
+computation, the environment and the control data from the function
+stack when a return statement is encountered as the next
+computational task, passing the returned value to the popped
+computation (the popped computation was the context in which the
+returning function was called). Note that the pushing/popping of the
+control data is crucial. Without it, one may have a function that
+contains an exception block with a return statement inside, which
+would put the xstack cell in an inconsistent state (since the
+exception block modifies it, but that modification should be
+irrelevant once the function returns). We add an artificial
+nothing value to the language, which is returned by the
+nulary return; statements.

+
  syntax KItem ::=  (Map,K,ControlCellFragment)
+
+  rule <k> lambda(Xs,S)(Vs:Vals) ~> K => mkDecls(Xs,Vs) S return; </k>
+       <control>
+         <fstack> .List => ListItem((Env,K,C)) ...</fstack>
+         C
+       </control>
+       <env> Env => GEnv </env>
+       <genv> GEnv </genv>
+
+  rule <k> return(V:Val); ~> _ => V ~> K </k>
+       <control>
+         <fstack> ListItem((Env,K,C)) => .List ...</fstack>
+         (_ => C)
+       </control>
+       <env> _ => Env </env>
+
+  syntax Val ::= "nothing"
+  rule return; => return nothing;
+

Like for division-by-zero, it is left unspecified what happens
+when the nothing value is used in domain calculations. For
+example, from the the perspective of the language semantics,
+7 +Int nothing can evaluate to anything, or
+may not evaluate at all (be undefined). If one wants to make sure that
+such artificial values are never misused, then one needs to define a static
+checker (also using K, like our the type checker in
+examples/simple/typed/static) and reject programs that do.
+Note that, unlike the undefined symbol which had the sort K
+instead of Val, we defined nothing to be a value. That
+is because, as explained above, we do not want the program to get
+stuck when nothing is returned by a function. Instead, we want the
+behavior to be unspecified; in particular, if one is careful to never
+use the returned value in domain computation, like it happens when we
+call a function for its side effects (e.g., with a statement of the
+form f(x);), then the program does not get stuck.

+

Read

+ +

The read() expression construct simply evaluates to the next
+input value, at the same time discarding the input value from the
+in cell.

+
  rule <k> read() => I ...</k> <input> ListItem(I:Int) => .List ...</input>
+

Assignment

+ +

In SIMPLE, like in C, assignments are expression constructs and not statement
+constructs. To make it a statement all one needs to do is to follow it by a
+semi-colon ; (see the semantics for expression statements below).
+Like for the increment, we want to allow assignments not only to variables but
+also to array elements, e.g., e1[e2] = e3 where e1 evaluates
+to an array reference, e2 to a natural number, and e3 to any
+value. Thus, we first compute the lvalue of the left-hand-side expression
+that appears in an assignment, and then we do the actual assignment to the
+resulting location:

+
  context (HOLE => lvalue(HOLE)) = _
+
+  rule <k> loc(L) = V:Val => V ...</k> <store>... L |-> (_ => V) ...</store>
+

Statements

+ +

We next define the K semantics of statements.

+

Blocks

+ +

Empty blocks are simply discarded, as shown in the first rule below.
+For non-empty blocks, we schedule the enclosed statement but we have to
+make sure the environment is recovered after the enclosed statement executes.
+Recall that we allow local variable declarations, whose scope is the block
+enclosing them. That is the reason for which we have to recover the
+environment after the block. This allows us to have a very simple semantics
+for variable declarations, as we did above. One can make the two rules below
+computational if one wants them to count as computational steps.

+
  rule {} => .K
+  rule <k> { S } => S ~> setEnv(Env) ...</k>  <env> Env </env>
+

The basic definition of environment recovery is straightforward and
+given in the section on auxiliary constructs at the end of the file.

+

There are two common alternatives to the above semantics of blocks.
+One is to keep track of the variables which are declared in the block and only
+recover those at the end of the block. This way one does more work for
+variable declarations but conceptually less work for environment recovery; we
+say conceptually because it is not clear that it is indeed the case that
+one does less work when AC matching is involved. The other alternative is to
+work with a stack of environments instead of a flat environment, and push the
+current environment when entering a block and pop it when exiting it. This
+way, one does more work when accessing variables (since one has to search the
+variable in the environment stack in a top-down manner), but on the other hand
+uses smaller environments and the definition gets closer to an implementation.
+Based on experience with dozens of language semantics and other K definitions,
+we have found that our approach above is the best trade-off between elegance
+and efficiency (especially since rewrite engines have built-in techniques to
+lazily copy terms, by need, thus not creating unnecessary copies),
+so it is the one that we follow in general.

+

Sequential composition

+ +

Sequential composition is desugared into K's builtin sequentialization
+operation (recall that, like in C, the semi-colon ; is not a
+statement separator in SIMPLE — it is either a statement terminator or a
+construct for a statement from an expression). Note that K allows
+to define the semantics of SIMPLE in such a way that statements eventually
+dissolve from the top of the computation when they are completed; this is in
+sharp contrast to (artificially) evaluating them to a special
+skip statement value and then getting rid of that special value, as
+it is the case in other semantic approaches (where everything must evaluate
+to something). This means that once S₁ completes in the rule below, S₂
+becomes automatically the next computation item without any additional
+(explicit or implicit) rules.

+
  rule S1:Stmt S2:Stmt => S1 ~> S2
+

A subtle aspect of the rule above is that S₁ is declared to have sort
+Stmts and not Stmt. That is because desugaring macros can indeed
+produce left associative sequential composition of statements. For example,
+the code var x=0; x=1; is desugared to
+(var x; x=0;) x=1;, so although originally the first term of
+the sequential composition had sort Stmt, after desugaring it became
+of sort Stmts. Note that the attribute [right] associated
+to the sequential compositon production is an attribute of the syntax, and not
+of the semantics: e.g., it tells the parser to parse
+var x; x=0; x=1; as var x; (x=0; x=1;), but it
+does not tell the rewrite engine to rewrite (var x; x=0;) x=1; to
+var x; (x=0; x=1;).

+

Expression statements

+ +

Expression statements are only used for their side effects, so their result
+value is simply discarded. Common examples of expression statements are ones
+of the form ++x;, x=e;, e1[e2]=e3;, etc.

+
  rule _:Val; => .K
+

Conditional

+ +

Since the conditional was declared with the strict(1) attribute, we
+can assume that its first argument will eventually be evaluated. The rules
+below cover the only two possibilities in which the conditional is allowed to
+proceed (otherwise the rewriting process gets stuck).

+
  rule if ( true) S else _ => S
+  rule if (false) _ else S => S
+

While loop

+ +

The simplest way to give the semantics of the while loop is by unrolling.
+Note, however, that its unrolling is only allowed when the while loop reaches
+the top of the computation (to avoid non-termination of unrolling). The
+simple while loop semantics below works because our while loops in SIMPLE are
+indeed very basic. If we allowed break/continue of loops then we would need
+a completely different semantics, which would also involve the control cell.

+
  rule while (E) S => if (E) {S while(E)S}
+

Print

+ +

The print statement was strict, so all its arguments are now
+evaluated (recall that print is variadic). We append each of
+its evaluated arguments to the output buffer, and discard the residual
+print statement with an empty list of arguments.

+
  rule <k> print(V:Val, Es => Es); ...</k> <output>... .List => ListItem(V) </output>
+  rule print(.Vals); => .K
+

Exceptions

+ +

SIMPLE allows parametric exceptions, in that one can throw and catch a
+particular value. The statement try S₁ catch(X) S₂
+proceeds with the evaluation of S₁. If S₁ evaluates normally, i.e.,
+without any exception thrown, then S₂ is discarded and the execution
+continues normally. If S₁ throws an exception with a statement of the
+form throw E, then E is first evaluated to some value V
+(throw was declared to be strict), then V is bound to X, then
+S₂ is evaluated in the new environment while the reminder of S₁ is
+discarded, then the environment is recovered and the execution continues
+normally with the statement following the try S₁ catch(X) S₂ statement.
+Exceptions can be nested and the statements in the
+catch part (S₂ in our case) can throw exceptions to the
+upper level. One should be careful with how one handles the control data
+structures here, so that the abrupt changes of control due to exception
+throwing and to function returns interact correctly with each other.
+For example, we want to allow function calls inside the statement S₁ in
+a try S₁ catch(X) S₂ block which can throw an exception
+that is not caught by the function but instead is propagated to the
+try S₁ catch(X) S₂ block that called the function.
+Therefore, we have to make sure that the function stack as well as other
+potential control structures are also properly modified when the exception
+is thrown to correctly recover the execution context. This can be easily
+achieved by pushing/popping the entire current control context onto the
+exception stack. The three rules below modularly do precisely the above.

+
  syntax KItem ::= (Id,Stmt,K,Map,ControlCellFragment)
+
+  syntax KItem ::= "popx"
+
+  rule <k> (try S1 catch(X) {S2} => S1 ~> popx) ~> K </k>
+       <control>
+         <xstack> .List => ListItem((X, S2, K, Env, C)) ...</xstack>
+         C
+       </control>
+       <env> Env </env>
+
+  rule <k> popx => .K ...</k>
+       <xstack> ListItem(_) => .List ...</xstack>
+
+  rule <k> throw V:Val; ~> _ => { var X = V; S2 } ~> K </k>
+       <control>
+         <xstack> ListItem((X, S2, K, Env, C)) => .List ...</xstack>
+         (_ => C)
+       </control>
+       <env> _ => Env </env>
+

The catch statement S₂ needs to be executed in the original environment,
+but where the thrown value V is bound to the catch variable X. We here
+chose to rely on two previously defined constructs when giving semantics to
+the catch part of the statement: (1) the variable declaration with
+initialization, for binding X to V; and (2) the block construct for
+preventing X from shadowing variables in the original environment upon the
+completion of S₂.

+

Threads

+ +

SIMPLE's threads can be created and terminated dynamically, and can
+synchronize by acquiring and releasing re-entrant locks and by rendezvous.
+We discuss the seven rules giving the semantics of these operations below.

+

Thread creation

+ +

Threads can be created by any other threads using the spawn S
+construct. The spawn expression construct evaluates to the unique identifier
+of the newly created thread and, at the same time, a new thread cell is added
+into the configuration, initialized with the S statement and sharing the
+same environment with the parent thread. Note that the newly created
+thread cell is torn. That means that the remaining cells are added
+and initialized automatically as described in the definition of SIMPLE's
+configuration. This is part of K's configuration abstraction mechanism.

+
  rule <thread>...
+         <k> spawn S => !T:Int ...</k>
+         <env> Env </env>
+       ...</thread>
+       (.Bag => <thread>...
+               <k> S </k>
+               <env> Env </env>
+               <id> !T </id>
+             ...</thread>)
+

Thread termination

+ +

Dually to the above, when a thread terminates its assigned computation (the
+contents of its k cell) is empty, so the thread can be dissolved.
+However, since no discipline is imposed on how locks are acquired and released,
+it can be the case that a terminating thread still holds locks. Those locks
+must be released, so other threads attempting to acquire them do not deadlock.
+We achieve that by removing all the locks held by the terminating thread in its
+holds cell from the set of busy locks in the busy cell
+(keys(H) returns the domain of the map H as a set, that is, only
+the locks themselves ignoring their multiplicity). As seen below, a lock is
+added to the busy cell as soon as it is acquired for the first time
+by a thread. The unique identifier of the terminated thread is also collected
+into the terminated cell, so the join construct knows which
+threads have terminated.

+
  rule (<thread>... <k>.K</k> <holds>H</holds> <id>T</id> ...</thread> => .Bag)
+       <busy> Busy => Busy -Set keys(H) </busy>
+       <terminated>... .Set => SetItem(T) ...</terminated>
+

Thread joining

+ +

Thread joining is now straightforward: all we need to do is to check whether
+the identifier of the thread to be joined is in the terminated cell.
+If yes, then the join statement dissolves and the joining thread
+continues normally; if not, then the joining thread gets stuck.

+
  rule <k> join T:Int; => .K ...</k>
+       <terminated>... SetItem(T) ...</terminated>
+

Acquire lock

+ +

There are two cases to distinguish when a thread attempts to acquire a lock
+(in SIMPLE any value can be used as a lock):
+(1) The thread does not currently have the lock, in which case it has to
+take it provided that the lock is not already taken by another thread (see
+the side condition of the first rule).
+(2) The thread already has the lock, in which case it just increments its
+counter for the lock (the locks are re-entrant). These two cases are captured
+by the two rules below:

+
  rule <k> acquire V:Val; => .K ...</k>
+       <holds>... .Map => V |-> 0 ...</holds>
+       <busy> Busy (.Set => SetItem(V)) </busy>
+    requires (notBool(V in Busy))
+
+  rule <k> acquire V; => .K ...</k>
+       <holds>... V:Val |-> (N => N +Int 1) ...</holds>
+

Release lock

+ +

Similarly, there are two corresponding cases to distinguish when a thread
+releases a lock:
+(1) The thread holds the lock more than once, in which case all it needs to do
+is to decrement the lock counter.
+(2) The thread holds the lock only once, in which case it needs to remove it
+from its holds cell and also from the the shared busy cell,
+so other threads can acquire it if they need to.

+
  rule <k> release V:Val; => .K ...</k>
+       <holds>... V |-> (N => N -Int 1) ...</holds>
+    requires N >Int 0
+
+  rule <k> release V; => .K ...</k> <holds>... V:Val |-> 0 => .Map ...</holds>
+       <busy>... SetItem(V) => .Set ...</busy>
+

Rendezvous synchronization

+ +

In addition to synchronization through acquire and release of locks, SIMPLE
+also provides a construct for rendezvous synchronization. A thread whose next
+statement to execute is rendezvous(V) gets stuck until another
+thread reaches an identical statement; when that happens, the two threads
+drop their rendezvous statements and continue their executions. If three
+threads happen to have an identical rendezvous statement as their next
+statement, then precisely two of them will synchronize and the other will
+remain blocked until another thread reaches a similar rendezvous statement.
+The rule below is as simple as it can be. Note, however, that, again, it is
+K's mechanism for configuration abstraction that makes it work as desired:
+since the only cell which can multiply containing a k cell inside is
+the thread cell, the only way to concretize the rule below to the
+actual configuration of SIMPLE is to include each k cell in a
+thread cell.

+
  rule <k> rendezvous V:Val; => .K ...</k>
+       <k> rendezvous V; => .K ...</k>
+

Auxiliary declarations and operations

+ +

In this section we define all the auxiliary constructs used in the
+above semantics.

+

Making declarations

+ +

The mkDecls auxiliary construct turns a list of identifiers
+and a list of values in a sequence of corresponding variable
+declarations.

+
  syntax Stmt ::= mkDecls(Ids,Vals)  [function]
+  rule mkDecls((X:Id, Xs:Ids), (V:Val, Vs:Vals)) => var X=V; mkDecls(Xs,Vs)
+  rule mkDecls(.Ids,.Vals) => {}
+

Location lookup

+ +

The operation below is straightforward.

+
  syntax Exp ::= lookup(Int)
+  rule <k> lookup(L) => V ...</k> <store>... L |-> V:Val ...</store>
+

Environment recovery

+ +

We have already discussed the environment recovery auxiliary operation in the
+IMP++ tutorial:

+
// TODO: eliminate the env wrapper, like we did in IMP++
+
+  syntax KItem ::= setEnv(Map)
+  rule <k> setEnv(Env) => .K ...</k> <env> _ => Env </env>
+

While theoretically sufficient, the basic definition for environment
+recovery alone is suboptimal. Consider a loop while (E)S,
+whose semantics (see above) was given by unrolling. S
+is a block. Then the semantics of blocks above, together with the
+unrolling semantics of the while loop, will yield a computation
+structure in the k cell that increasingly grows, adding a new
+environment recovery task right in front of the already existing sequence of
+similar environment recovery tasks (this phenomenon is similar to the ``tail
+recursion'' problem). Of course, when we have a sequence of environment
+recovery tasks, we only need to keep the last one. The elegant rule below
+does precisely that, thus avoiding the unnecessary computation explosion
+problem:

+
  rule (setEnv(_) => .K) ~> setEnv(_)
+

In fact, the above follows a common convention in K for recovery
+operations of cell contents: the meaning of a computation task of the form
+cell(C) that reaches the top of the computation is that the current
+contents of cell cell is discarded and gets replaced with C. We
+did not add support for these special computation tasks in our current
+implementation of K, so we need to define them as above.

+

lvalue and loc

+ +

For convenience in giving the semantics of constructs like the increment and
+the assignment, that we want to operate the same way on variables and on
+array elements, we used an auxiliary lvalue(E) construct which was
+expected to evaluate to the lvalue of the expression E. This is only
+defined when E has an lvalue, that is, when E is either a variable or
+evaluates to an array element. lvalue(E) evaluates to a value of
+the form loc(L), where L is the location where the value of E
+can be found; for clarity, we use loc to structurally distinguish
+natural numbers from location values. In giving semantics to lvalue
+there are two cases to consider. (1) If E is a variable, then all we need
+to do is to grab its location from the environment. (2) If E is an array
+element, then we first evaluate the array and its index in order to identify
+the exact location of the element of concern, and then return that location;
+the last rule below works because its preceding context declarations ensure
+that the array and its index are evaluated, and then the rule for array lookup
+(defined above) rewrites the evaluated array access construct to its
+corresponding store lookup operation.

+
// For parsing reasons, we prefer to allow lvalue to take a K
+
+  syntax Exp ::= lvalue(K)
+  syntax Val ::= loc(Int)
+
+// Local variable
+
+  rule <k> lvalue(X:Id => loc(L)) ...</k> <env>... X |-> L:Int ...</env>
+
+// Array element: evaluate the array and its index;
+// then the array lookup rule above applies.
+
+  context lvalue(_::Exp[HOLE::Exps])
+  context lvalue(HOLE::Exp[_::Exps])
+
+// Finally, return the address of the desired object member
+
+  rule lvalue(lookup(L:Int) => loc(L))
+

Initializing multiple locations

+ +

The following operation initializes a sequence of locations with the same
+value:

+
  syntax Map ::= Int "..." Int "|->" K [function]
+  rule N...M |-> _ => .Map  requires N >Int M
+  rule N...M |-> K => N |-> K (N +Int 1)...M |-> K  requires N <=Int M
+

The semantics of SIMPLE is now complete. Make sure you kompile the
+definition with the right options in order to generate the desired model.
+No kompile options are needed if you only only want to execute the definition
+(and thus get an interpreter), but if you want to search for a different
+program behaviors then you need to kompile with the --enable-search option

+
endmodule
+

Go to Lesson 2, SIMPLE typed static

+

SIMPLE — Typed — Static

+ +

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+ +

This is the K definition of the static semantics of the typed SIMPLE
+language, or in other words, a type system for the typed SIMPLE
+language in K. We do not re-discuss the various features of the
+SIMPLE language here. The reader is referred to the untyped version of
+the language for such discussions. We here only focus on the new and
+interesting problems raised by the addition of type declarations, and
+what it takes to devise a type system/checker for the language.

+

When designing a type system for a language, no matter in what
+paradigm, we have to decide upon the intended typing policy. Note
+that we can have multiple type systems for the same language, one for
+each typing policy. For example, should we accept programs which
+don't have a main function? Or should we allow functions that do not
+return explicitly? Or should we allow functions whose type expects
+them to return a value (say an int) to use a plain
+return; statement, which returns no value, like in C?
+And so on and so forth. Typically, there are two opposite tensions
+when designing a type system. On the one hand, you want your type
+system to be as permissive as possible, that is, to accept as many
+programs that do not get stuck when executed with the untyped
+semantics as possible; this will keep the programmers using your
+language happy. On the other hand, you want your type system to have
+a reasonable performance when implemented; this will keep both the
+programmers and the implementers of your language happy. For example,
+a type system for rejecting programs that could perform
+division-by-zero is not expected to be feasible in general. A simple
+guideline when designing typing policies is to imagine how the
+semantics of the untyped language may get stuck and try to prevent
+those situations from happening.

+

Before we give the K type system of SIMPLE formally, we discuss,
+informally, the intended typing policy:

+
    +
  • +

    Each program should contain a main() function. Indeed,
    +the untyped SIMPLE semantics will get stuck on any program which does
    +not have a main function.

    +
  • +
  • +

    Each primitive value has its own type, which can be int
    +bool, or string. There is also a type void
    +for nonexistent values, for example for the result of a function meant
    +to return no value (but only be used for its side effects, like a
    +procedure).

    +
  • +
  • +

    The syntax of untyped SIMPLE is extended to allow type
    +declarations for all the variables, including array variables. This is
    +done in a C/Java-style. For example, int x; or
    +int x=7, y=x+3;, or int[][][] a[10,20];
    +(the latter defines a 10 × 20 matrix of arrays of integers).
    +Recall from untyped SIMPLE that, unlike in C/Java, our multi-dimensional
    +arrays use comma-separated arguments, although they have the array-of-array
    +semantics.

    +
  • +
  • +

    Functions are also typed in a C/Java style. However, since in SIMPLE
    +we allow functions to be passed to and returned by other functions, we also
    +need function types. We will use the conventional higher-order arrow-notation
    +for function types, but will separate the argument types with commas. For
    +example, a function returning an array of bool elements and
    +taking as argument an array x of two-integer-argument functions
    +returning an integer, is declared using a syntax of the form
    +bool[] f(((int,int)->int)[] x) { ... }
    +and has the type ((int,int)->int)[] -> bool[].

    +
  • +
  • +

    We allow any variable declarations at the top level. Functions
    +can only be declared at the top level. Each function can only access the
    +other functions and variables declared at the top level, or its own locally
    +declared variables. SIMPLE has static scoping.

    +
  • +
  • +

    The various expression and statement constructs take only elements of
    +the expected types.

    +
  • +
  • +

    Increment and assignment can operate both on variables and on array
    +elements. For example, if f has type int->int[][] and
    +function g has the type int->int, then the
    +increment expression ++f(7)[g(2),g(3)] is valid.

    +
  • +
  • +

    Functions should only return values of their declared result
    +type. To give the programmers more flexibility, we allow functions to
    +use return; statements to terminate without returning an
    +actual value, or to not explicitly use any return statement,
    +regardless of their declared return type. This flexibility can be
    +handy when writing programs using certain functions only for their
    +side effects. Nevertheless, as the dynamic semantics shows, a return
    +value is automatically generated when an explicit return
    +statement is not encountered.

    +
  • +
  • +

    For simplicity, we here limit exceptions to only throw and catch
    +integer values. We let it as an exercise to the reader to extend the
    +semantics to allow throwing and catching arbitrary-type exceptions.
    +Like in programming languages like Java, one can go even further and
    +define a semantics where thrown exceptions are propagated through
    +try-catch statements until one of the corresponding type is found.
    +We will do this when we define the KOOL language, not here.
    +To keep the definition if SIMPLE simple, here we do not attempt to
    +reject programs which throw uncaught exceptions.

    +
  • +
+

Like in untyped SIMPLE, some constructs can be desugared into a
+smaller set of basic constructs. In general, it should be clear why a
+program does not type by looking at the top of the k cells in
+its stuck configuration.

+
module SIMPLE-TYPED-STATIC-SYNTAX
+  imports DOMAINS-SYNTAX
+

Syntax

+ +

The syntax of typed SIMPLE extends that of untyped SIMPLE with support
+for declaring types to variables and functions.

+
  syntax Id ::= "main" [token]
+

Types

+ +

Primitive, array and function types, as well as lists (or tuples) of types.
+The lists of types are useful for function arguments.

+
  syntax Type ::= "void" | "int" | "bool" | "string"
+                | Type "[" "]"
+                | "(" Type ")"             [bracket]
+                > Types "->" Type
+
+  syntax Types ::= List{Type,","}          [overload(exps)]
+

Declarations

+ +

Variable and function declarations have the expected syntax. For variables,
+we basically just replaced the var keyword of untyped SIMPLE with a
+type. For functions, besides replacing the function keyword with a
+type, we also introduce a new syntactic category for typed variables,
+Param, and lists over it.

+
  syntax Param ::= Type Id
+  syntax Params ::= List{Param,","}
+
+  syntax Stmt ::= Type Exps ";"
+                | Type Id "(" Params ")" Block
+

Expressions

+ +

The syntax of expressions is identical to that in untyped SIMPLE,
+except for the logical conjunction and disjunction which have
+different strictness attributes, because they now have different
+evaluation strategies.

+
  syntax Exp ::= Int | Bool | String | Id
+               | "(" Exp ")"             [bracket]
+               | "++" Exp
+               > Exp "[" Exps "]"        [strict]
+               > Exp "(" Exps ")"        [strict]
+               | "-" Exp                 [strict]
+               | "sizeOf" "(" Exp ")"    [strict]
+               | "read" "(" ")"
+               > left:
+                 Exp "*" Exp             [strict, left]
+               | Exp "/" Exp             [strict, left]
+               | Exp "%" Exp             [strict, left]
+               > left:
+                 Exp "+" Exp             [strict, left]
+               | Exp "-" Exp             [strict, left]
+               > non-assoc:
+                 Exp "<" Exp             [strict, non-assoc]
+               | Exp "<=" Exp            [strict, non-assoc]
+               | Exp ">" Exp             [strict, non-assoc]
+               | Exp ">=" Exp            [strict, non-assoc]
+               | Exp "==" Exp            [strict, non-assoc]
+               | Exp "!=" Exp            [strict, non-assoc]
+               > "!" Exp                 [strict]
+               > left:
+                 Exp "&&" Exp            [strict, left]
+               | Exp "||" Exp            [strict, left]
+               > "spawn" Block
+               > Exp "=" Exp             [strict(2), right]
+

Note that spawn has not been declared strict. This may
+seem unexpected, because the child thread shares the same environment
+with the parent thread, so from a typing perspective the spawned
+statement makes the same sense in a child thread as it makes in the
+parent thread. The reason for not declaring it strict is because we
+want to disallow programs where the spawned thread calls the
+return statement, because those programs would get stuck in
+the dynamic semantics. The type semantics of spawn below will reject
+such programs.

+

We still need lists of expressions, defined below, but note that we do
+not need lists of identifiers anymore. They have been replaced by the lists
+of parameters.

+
  syntax Exps ::= List{Exp,","}          [strict, overload(exps)]
+

Statements

+ +

The statements have the same syntax as in untyped SIMPLE, except for
+the exceptions, which now type their parameter. Note that, unlike in untyped
+SIMPLE, all statement constructs which have arguments and are not desugared
+are strict, including the conditional and the while. Indeed, from a
+typing perspective, they are all strict: first type their arguments and then
+type the actual construct.

+
  syntax Block ::= "{" "}"
+                | "{" Stmt "}"
+
+  syntax Stmt ::= Block
+                | Exp ";"                                  [strict]
+                | "if" "(" Exp ")" Block "else" Block      [avoid, strict]
+                | "if" "(" Exp ")" Block                   [macro]
+                | "while" "(" Exp ")" Block                [strict]
+                | "for" "(" Stmt Exp ";" Exp ")" Block     [macro]
+                | "return" Exp ";"                         [strict]
+                | "return" ";"
+                | "print" "(" Exps ")" ";"                 [strict]
+                | "try" Block "catch" "(" Param ")" Block  [strict(1)]
+                | "throw" Exp ";"                          [strict]
+                | "join" Exp ";"                           [strict]
+                | "acquire" Exp ";"                        [strict]
+                | "release" Exp ";"                        [strict]
+                | "rendezvous" Exp ";"                     [strict]
+

Note that the sequential composition is now sequentially strict,
+because, unlike in the dynamic semantics where statements dissolved,
+they now reduce to the stmt type, which is a result.

+
  syntax Stmt ::= Stmt Stmt                             [seqstrict, right]
+

Desugaring macros

+ +

We use the same desugaring macros like in untyped SIMPLE, but, of
+course, including the types of the involved variables.

+
  rule if (E) S => if (E) S else {}
+  rule for(Start Cond; Step) {S:Stmt} => {Start while(Cond){S Step;}}
+  rule for(Start Cond; Step) {} => {Start while(Cond){Step;}}
+  rule T:Type E1:Exp, E2:Exp, Es:Exps; => T E1; T E2, Es;               [anywhere]
+  rule T:Type X:Id = E; => T X; X = E;                                  [anywhere]
+
+endmodule
+
+
+module SIMPLE-TYPED-STATIC
+  imports SIMPLE-TYPED-STATIC-SYNTAX
+  imports DOMAINS
+

Static semantics

+ +

Here we define the type system of SIMPLE. Like concrete semantics,
+type systems defined in K are also executable. However, K type
+systems turn into type checkers instead of interpreters when executed.

+

The typing process is done in two (overlapping) phases. In the first
+phase the global environment is built, which contains type bindings
+for all the globally declared variables and functions. For functions,
+the declared types will be ``trusted'' during the first phase and
+simply bound to their corresponding function names and placed in the
+global type environment. At the same time, type-checking tasks that
+the function bodies indeed respect their claimed types are generated.
+All these tasks are (concurrently) verified during the second phase.
+This way, all the global variable and function declarations are
+available in the global type environment and can be used in order to
+type-check each function code. This is consistent with the semantics
+of untyped SIMPLE, where functions can access all the global variables
+and can call any other function declared in the same program. The
+two phases may overlap because of the K concurrent semantics. For
+example, a function task can be started while the first phase is still
+running; moreover, it may even complete before the first phase does,
+namely when all the global variables and functions that it needs have
+already been processed and made available in the global environment by
+the first phase task.

+

Extended syntax and results

+ +

The idea is to start with a configuration holding the program to type
+in one of its cells, then apply rewrite rules on it mixing types and
+language syntax, and eventually obtain a type instead of the original
+program. In other words, the program reduces to its type using
+the K rules giving the type system of the language. In doing so,
+additional typing tasks for function bodies are generated and solved
+the same way. If this rewriting process gets stuck, then we say that
+the program is not well-typed. Otherwise the program is well-typed
+(by definition). We did not need types for statements and for blocks
+as part of the typed SIMPLE syntax, because programmers are not allowed
+to use such types explicitly. However, we are going to need them in the
+type system, because blocks and statements reduce to them.

+

We start by allowing types to be used inside expressions and statements in
+our language. This way, types can be used together with language syntax in
+subsequent K rules without any parsing errors. Like in the type system of
+IMP++ in the K tutorial, we prefer to group the block and statement types
+under one syntactic sub-category of types, because this allows us to more
+compactly state that certain terms can be either blocks or statements. Also,
+since programs and fragments of program will reduce to their types, in order
+for the strictness and context declarations to be executable we state that
+types are results (same like we did in the IMP++ tutorial).

+
  syntax Exp ::= Type
+  syntax Exps ::= Types
+  syntax BlockOrStmtType ::= "block" | "stmt"
+  syntax Type ::= BlockOrStmtType
+  syntax Block ::= BlockOrStmtType
+  syntax KResult ::= Type
+                   | Types    //TODO: remove this, eventually
+

Configuration

+ +

The configuration of our type system consists of a tasks cell
+holding various typing task cells, and a global type environment.
+Each task includes a k cell holding the code to type, a tenv
+cell holding the local type environment, and a return cell holding
+the return type of the currently checked function. The latter is needed in
+order to check whether return statements return values of the expected type.
+Initially, the program is placed in a k cell inside a
+task cell. Since the cells with multiplicity ? are not
+included in the initial configuration, the task cell holding
+the original program in its k cell will contain no other
+subcells.

+
  configuration <T color="yellow">
+                  <tasks color="orange">
+                    <task multiplicity="*" color="yellow" type="Set">
+                      <k color="green"> $PGM:Stmt </k>
+                      <tenv multiplicity="?" color="cyan"> .Map </tenv>
+                      <returnType multiplicity="?" color="black"> void </returnType>
+                    </task>
+                  </tasks>
+//                  <br/>
+                  <gtenv color="blue"> .Map </gtenv>
+                </T>
+

Variable declarations

+ +

Variable declarations type as statements, that is, they reduce to the
+type stmt. There are only two cases that need to be
+considered: when a simple variable is declared and when an array
+variable is declared. The macros at the end of the syntax module
+above take care of reducing other variable declarations, including
+ones where the declared variables are initialized, to only these two
+cases. The first case has two subcases: when the variable declaration
+is global (i.e., the task cell contains only the k
+cell), in which case it is added to the global type environment
+checking at the same time that the variable has not been already
+declared; and when the variable declaration is local (i.e., a
+tenv cell is available), in which case it is simply added to
+the local type environment, possibly shadowing previous homonymous
+variables. The third case reduces to the second, incrementally moving
+the array dimension into the type until the array becomes a simple
+variable.

+
  rule <task> <k> T:Type X:Id; => stmt ...</k> </task>
+       <gtenv> Rho (.Map => X |-> T) </gtenv>
+    requires notBool(X in keys(Rho))
+  rule <k> T:Type X:Id; => stmt ...</k> <tenv> Rho => Rho[X <- T] </tenv>
+
+  context _:Type _::Exp[HOLE::Exps];
+// The rule below may need to sort E to Exp in the future, if the
+// parser gets stricter; without that information, it may not be able
+// to complete the LHS into T E[int,Ts],.Exps; (and similarly for the RHS)
+  rule T:Type E:Exp[int,Ts:Types]; => T[] E[Ts];
+// I want to write the rule below as _:Type (E:Exp[.Types] => E),
+// but the list completion seems to not work well with that.
+  rule T:Type E:Exp[.Types]; => T E;
+

Function declarations

+ +

Functions are allowed to be declared only at the top level (the
+task cell holds only its k subcell). Each function
+declaration reduces to a variable declaration (a binding of its name
+to its declared function type), but also adds a task into the
+tasks cell. The task consists of a typing of the statement
+declaring all the function parameters followed by the function body,
+together with the expected return type of the function. The
+getTypes and mkDecls functions, defined at the end of
+the file in the section on auxiliary operations, extracts the list of
+types and makes a sequence of variable declarations from a list of
+function parameters, respectively. Note that, although in the dynamic
+semantics we include a terminating return statement at the
+end of the function body to eliminate from the analysis the case when
+the function does not provide an explicit return, we do not need to
+include such a similar return statement here. That's because
+the return statements type to stmt anyway, and the
+entire code of the function body needs to type anyway.

+
  rule <task> <k> T:Type F:Id(Ps:Params) S => getTypes(Ps)->T F; ...</k> </task>
+       (.Bag => <task>
+               <k> mkDecls(Ps) S </k> <tenv> .Map </tenv> <returnType> T </returnType>
+             </task>)
+

Checking if main() exists}

+ +

Once the entire program is processed (generating appropriate tasks
+to type check its function bodies), we can dissolve the main
+task cell (the one holding only a k subcell). Since
+we want to enforce that programs include a main function, we also
+generate a function task executing main() to ensure that it
+types (remove this task creation if you do not want your type system
+to reject programs without a main function).

+
  rule <task> <k> stmt => main(.Exps); </k> (.Bag => <tenv> .Map </tenv>) </task>
+

Collecting the terminated tasks

+ +

Similarly, once a non-main task (i.e., one which contains a
+tenv subcells) is completed using the subsequent rules (i.e.,
+its k cell holds only the block or stmt
+type), we can dissolve its corresponding cell. Note that it is
+important to ensure that we only dissolve tasks containing a
+tenv cell with the rule below, because the main task should
+not dissolve this way! It should do what the above rule says.
+In the end, there should be no task cell left in the configuration
+when the program correctly type checks.

+
  rule <task>... <k> _:BlockOrStmtType </k> <tenv> _ </tenv> ...</task> => .Bag
+

Basic values

+ +

The first three rewrite rules below reduce the primitive values to
+their types, as we typically do when we define type systems in K.

+
  rule _:Int => int
+  rule _:Bool => bool
+  rule _:String => string
+

Variable lookup

+ +

There are three cases to distinguish for variable lookup: (1) if the
+variable is bound in the local type environment, then look its type up
+there; (2) if a local environment exists and the variable is not bound
+in it, then look its type up in the global environment; (3) finally,
+if there is no local environment, meaning that we are executing the
+top-level pass, then look the variable's type up in the global
+environment, too.

+
  rule <k> X:Id => T ...</k> <tenv>... X |-> T ...</tenv>
+
+  rule <k> X:Id => T ...</k> <tenv> Rho </tenv> <gtenv>... X |-> T ...</gtenv>
+    requires notBool(X in keys(Rho))
+
+  rule <task> <k> X:Id => T ...</k> </task> <gtenv>... X |-> T ...</gtenv>
+

Increment

+ +

We want the increment operation to apply to any lvalue, including
+array elements, not only to variables. For that reason, we define a
+special context extracting the type of the argument of the increment
+operation only if that argument is an lvalue. Otherwise the rewriting
+process gets stuck. The operation ltype is defined at the
+end of this file, in the auxiliary operation section. It essentially
+acts as a filter, getting stuck if its argument is not an lvalue and
+letting it reduce otherwise. The type of the lvalue is expected to be
+an integer in order to be allowed to be incremented, as seen in the
+rule ++ int => int below.

+
  context ++(HOLE => ltype(HOLE))
+  rule ++ int => int
+

Common expression constructs

+ +

The rules below are straightforward and self-explanatory:

+
  rule int + int => int
+  rule string + string => string
+  rule int - int => int
+  rule int * int => int
+  rule int / int => int
+  rule int % int => int
+  rule - int => int
+  rule int < int => bool
+  rule int <= int => bool
+  rule int > int => bool
+  rule int >= int => bool
+  rule T:Type == T => bool
+  rule T:Type != T => bool
+  rule bool && bool => bool
+  rule bool || bool => bool
+  rule ! bool => bool
+

Array access and size

+ +

Array access requires each index to type to an integer, and the
+array type to be at least as deep as the number of indexes:

+
// NOTE:
+// We used to need parentheses in the RHS, to avoid capturing Ts as an attribute
+// Let's hope that is not a problem anymore.
+
+  rule (T[])[int, Ts:Types] => T[Ts]
+  rule T:Type[.Types] => T
+

sizeOf only needs to check that its argument is an array:

+
  rule sizeOf(_T[]) => int
+

Input/Output

+ +

The read expression construct types to an integer, while print types
+to a statement provided that all its arguments type to integers or
+strings.

+
  rule read() => int
+
+  rule print(T:Type, Ts => Ts); requires T ==K int orBool T ==K string
+  rule print(.Types); => stmt
+

Assignment

+ +

The special context and the rule for assignment below are similar
+to those for increment: the LHS of the assignment must be an lvalue
+and, in that case, it must have the same type as the RHS, which then
+becomes the type of the assignment.

+
  context (HOLE => ltype(HOLE)) = _
+  rule T:Type = T => T
+

Function application and return

+ +

Function application requires the type of the function and the
+types of the passed values to be compatible. Note that a special case
+is needed to handle the no-argument case:

+
  rule (Ts:Types -> T)(Ts) => T requires Ts =/=K .Types
+  rule (void -> T)(.Types) => T
+

The returned value must have the same type as the declared
+function return type. If an empty return is encountered, than
+we should check that we are in a function (and not a thread)
+context, that is, a return cell must be available:

+
  rule <k> return T:Type; => stmt ...</k> <returnType> T </returnType>
+  rule <k> return; => stmt ...</k> <returnType> _ </returnType>
+

Blocks

+ +

To avoid having to recover type environments after blocks, we prefer
+to start a new task for block body, making sure that the new task
+is passed the same type environment and return cells. The value
+returned by return statements must have the same type as
+stated in the return cell. The print variadic
+function is allowed to only print integers and strings. The thrown
+exceptions can only have integer type.

+
  rule {} => block
+
+  rule <task> <k> {S} => block ...</k> <tenv> Rho </tenv> R </task>
+       (.Bag => <task> <k> S </k> <tenv> Rho </tenv> R </task>)
+

Expression statement

+ +
  rule _:Type; => stmt
+

Conditional and while loop

+ +
  rule if (bool) block else block => stmt
+  rule while (bool) block => stmt
+

Exceptions

+ +

We currently force the parameters of exceptions to only be integers.
+Moreover, for simplicity, we assume that integer exceptions can be
+thrown from anywhere, including from functions which do not define
+any try-catch block (with the currently unchecked ‒also for
+simplicity‒ expectation that the caller functions would catch those
+exceptions).

+
  rule try block catch(int X:Id) {S} => {int X; S}
+  rule try block catch(int X:Id) {} => {int X;}
+  rule throw int; => stmt
+

Concurrency

+ +

Nothing special about typing the concurrency constructs, except that
+we do not want the spawned thread to return, so we do not include any
+return cell in the new task cell for the thread statement.
+Same like with the functions above, we do not check for thrown
+exceptions which are not caught.

+
  rule <k> spawn S => int ...</k> <tenv> Rho </tenv>
+       (.Bag => <task> <k> S </k> <tenv> Rho </tenv> </task>)
+  rule join int; => stmt
+  rule acquire _:Type; => stmt
+  rule release _:Type; => stmt
+  rule rendezvous _:Type; => stmt
+
+  rule _:BlockOrStmtType _:BlockOrStmtType => stmt
+

Auxiliary constructs

+ +

The function mkDecls turns a list of parameters into a
+list of variable declarations.

+
  syntax Stmt ::= mkDecls(Params)  [function]
+  rule mkDecls(T:Type X:Id, Ps:Params) => T X; mkDecls(Ps)
+  rule mkDecls(.Params) => {}
+

The ltype context allows only expressions which have an
+lvalue to evaluate.

+
  syntax LValue ::= Id
+  rule isLValue(_:Exp[_:Exps]) => true
+  syntax Exp ::= LValue  // K should be able to infer this
+                         // if not added, then it gets stuck with an Id on k cell
+
+// Instead of the second LValue production above you can use a rule:
+//  rule isLValue(_:Exp[_:Exps]) => true
+
+  syntax Exp ::= ltype(Exp)
+//  context ltype(HOLE:LValue)
+// The above context does not work due to some error, so we write instead
+  context ltype(HOLE) requires isLValue(HOLE)
+

The function getTypes is the same as in SIMPLE typed dynamic.

+
  syntax Types ::= getTypes(Params)  [function]
+  rule getTypes(T:Type _:Id) => T, .Types   // I would like to not use .Types
+  rule getTypes(T:Type _:Id, P, Ps) => T, getTypes(P,Ps)
+  rule getTypes(.Params) => void, .Types
+
+endmodule
+

Go to Lesson 3, SIMPLE typed dynamic

+

SIMPLE — Typed — Dynamic

+ +

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+ +

This is the K dynamic semantics of the typed SIMPLE language.
+It is very similar to the semantics of the untyped SIMPLE, the
+difference being that we now dynamically check the typing policy
+described in the static semantics of typed SIMPLE. Because of the
+dynamic nature of the semantics, we can also perform some additional
+checks which were not possible in the static semantics, such as
+memory leaks due to accessing an array out of its bounds. We will
+highlight the differences between the dynamically typed and the
+untyped SIMPLE as we proceed with the semantics. We recommend the
+reader to consult the typing policy and the syntax of types discussed
+in the static semantics of the typed SIMPLE language.

+
module SIMPLE-TYPED-DYNAMIC-SYNTAX
+  imports DOMAINS-SYNTAX
+

Syntax

+ +

The syntax of typed SIMPLE extends that of untyped SIMPLE with support
+for declaring types to variables and functions.

+

The syntax below is identical to that of the static semantics of typed
+SIMPLE. However, the K strictness attributes are like those of the untyped
+SIMPLE, to capture the desired evaluation strategies of the various language
+constructs.

+
  syntax Id ::= "main" [token]
+

Types

+ +
  syntax Type ::= "void" | "int" | "bool" | "string"
+                | Type "[" "]"
+                | "(" Type ")"           [bracket]
+                > Types "->" Type
+  syntax Types ::= List{Type,","}        [overload(exps)]
+

Declarations

+ +
  syntax Param ::= Type Id
+  syntax Params ::= List{Param,","}
+
+  syntax Stmt ::= Type Exps ";"
+                | Type Id "(" Params ")" Block
+

Expressions

+ +
  syntax Exp ::= Int | Bool | String | Id
+               | "(" Exp ")"             [bracket]
+               | "++" Exp
+               > Exp "[" Exps "]"        [strict]
+               > Exp "(" Exps ")"        [strict]
+               | "-" Exp                 [strict]
+               | "sizeOf" "(" Exp ")"    [strict]
+               | "read" "(" ")"
+               > left:
+                 Exp "*" Exp             [strict, left]
+               | Exp "/" Exp             [strict, left]
+               | Exp "%" Exp             [strict, left]
+               > left:
+                 Exp "+" Exp             [strict, left]
+               | Exp "-" Exp             [strict, left]
+               > non-assoc:
+                 Exp "<" Exp             [strict, non-assoc]
+               | Exp "<=" Exp            [strict, non-assoc]
+               | Exp ">" Exp             [strict, non-assoc]
+               | Exp ">=" Exp            [strict, non-assoc]
+               | Exp "==" Exp            [strict, non-assoc]
+               | Exp "!=" Exp            [strict, non-assoc]
+               > "!" Exp                 [strict]
+               > left:
+                 Exp "&&" Exp            [strict(1), left]
+               | Exp "||" Exp            [strict(1), left]
+               > "spawn" Block
+               > Exp "=" Exp             [strict(2), right]
+

Like in the static semantics, there is no need for lists of identifiers
+(because we now have lists of parameters).

+
  syntax Exps ::= List{Exp,","}          [strict, overload(exps)]
+  syntax Val
+  syntax Vals ::= List{Val,","}          [overload(exps)]
+

Statements

+ +
  syntax Block ::= "{" "}"
+                | "{" Stmt "}"
+
+  syntax Stmt ::= Block
+                | Exp ";"                               [strict]
+                | "if" "(" Exp ")" Block "else" Block   [avoid, strict(1)]
+                | "if" "(" Exp ")" Block                [macro]
+                | "while" "(" Exp ")" Block
+                | "for" "(" Stmt Exp ";" Exp ")" Block  [macro]
+                | "print" "(" Exps ")" ";"              [strict]
+                | "return" Exp ";"                      [strict]
+                | "return" ";"
+                | "try" Block "catch" "(" Param ")" Block
+            | "throw" Exp ";"                       [strict]
+                | "join" Exp ";"                        [strict]
+                | "acquire" Exp ";"                     [strict]
+                | "release" Exp ";"                     [strict]
+                | "rendezvous" Exp ";"                  [strict]
+
+  syntax Stmt ::= Stmt Stmt                          [right]
+

The same desugaring macros like in the statically typed SIMPLE.

+
  rule if (E) S => if (E) S else {}
+  rule for(Start Cond; Step) {S:Stmt} => {Start while(Cond){S Step;}}
+  rule for(Start Cond; Step) {} => {Start while(Cond){Step;}}
+  rule T:Type E1:Exp, E2:Exp, Es:Exps; => T E1; T E2, Es;               [anywhere]
+  rule T:Type X:Id = E; => T X; X = E;                                  [anywhere]
+
+endmodule
+
+
+module SIMPLE-TYPED-DYNAMIC
+  imports SIMPLE-TYPED-DYNAMIC-SYNTAX
+  imports DOMAINS
+

Semantics

+ +

Values and results

+ +

These are similar to those of untyped SIMPLE, except that the array
+references and the function abstrations now also hold their types.
+These types are needed in order to easily compute the type of any
+value in the language (see the auxiliary typeOf operation at
+the end of this module).

+
  syntax Val ::= Int | Bool | String
+               | array(Type,Int,Int)
+               | lambda(Type,Params,Stmt)
+  syntax Exp ::= Val
+  syntax Exps ::= Vals
+  syntax KResult ::= Val
+                   | Vals  // TODO: should not need this
+

Configuration

+ +

The configuration is almost identical to that of untyped SIMPLE,
+except for a return cell inside the control cell.
+This return cell will hold, like in the static semantics of
+typed SIMPLE, the expected type of the value returned by the function
+being executed. The contents of this cell will be set whenever a
+function is invoked and will be checked whenever the evaluation of the
+function body encounters an explicit return statement.

+
  // the syntax declarations below are required because the sorts are
+  // referenced directly by a production and, because of the way KIL to KORE
+  // is implemented, the configuration syntax is not available yet
+  // should simply work once KIL is removed completely
+  // check other definitions for this hack as well
+
+  syntax ControlCell
+  syntax ControlCellFragment
+
+  configuration <T color="red">
+                  <threads color="orange">
+                    <thread multiplicity="*" color="yellow" type="Map">
+                      <id color="pink"> 0 </id>
+                      <k color="green"> ($PGM:Stmt ~> execute) </k>
+//                      <br/>
+                      <control color="cyan">
+                        <fstack color="blue"> .List </fstack>
+                        <xstack color="purple"> .List </xstack>
+                        <returnType color="LimeGreen"> void </returnType>
+                       </control>
+//                      <br/>
+                      <env color="violet"> .Map </env>
+                      <holds color="black"> .Map </holds>
+                    </thread>
+                  </threads>
+//                  <br/>
+                  <genv color="pink"> .Map </genv>
+                  <store color="white"> .Map </store>
+                  <busy color="cyan">.Set</busy>
+                  <terminated color="red"> .Set </terminated>
+                  <input color="magenta" stream="stdin"> .List </input>
+                  <output color="brown" stream="stdout"> .List </output>
+                  <nextLoc color="gray"> 0 </nextLoc>
+                </T>
+

Declarations and Initialization

+ +

Variable Declaration

+ +

The undefined construct is now parameterized by a type.
+A main difference between untyped SIMPLE and dynamically typed SIMPLE
+is that the latter assigns a type to each of its locations and that
+type cannot be changed during the execution of the program. We do not
+do any memory management in our semantic definitions here, so
+locations cannot be reclaimed, garbage collected and/or reused. Each
+location corresponds precisely to an allocated variable or array
+element, whose type was explicitly or implicitly declared in the
+program and does not change. It is therefore safe to type each
+location and then never allow that type to change. The typed
+undefined values effectively assign both a type and an undefined value
+to a location.

+
  syntax KItem ::= undefined(Type)
+
+  rule <k> T:Type X:Id; => .K ...</k>
+       <env> Env => Env[X <- L] </env>
+       <store>... .Map => L |-> undefined(T) ...</store>
+       <nextLoc> L:Int => L +Int 1 </nextLoc>
+

Array Declaration

+ +

The dynamic semantics of typed array declarations is similar to that
+in untyped SIMPLE, but we have to make sure that we associate the
+right type to the allocated locations.

+
  rule <k> T:Type X:Id[N:Int]; => .K ...</k>
+       <env> Env => Env[X <- L] </env>
+       <store>... .Map => L |-> array(T, L +Int 1, N)
+                          (L +Int 1)...(L +Int N) |-> undefined(T) ...</store>
+       <nextLoc> L:Int => L +Int 1 +Int N </nextLoc>
+    requires N >=Int 0
+
+  context _:Type _::Exp[HOLE::Exps];
+

The desugaring of multi-dimensional arrays into unidimensional
+ones is also similar to that in untyped SIMPLE, although we have to
+make sure that all the declared variables have the right types. The
+auxiliary operation T<Vs>, defined at the end of the file,
+adds the length of Vs dimensions to the type T.

+
// TODO: Check the desugaring below to be consistent with the one for untyped simple
+
+  syntax Id ::= "$1" [token] | "$2" [token]
+  rule T:Type X:Id[N1:Int, N2:Int, Vs:Vals];
+    => T[]<Vs> X[N1];
+       {
+         T[][]<Vs> $1=X;
+         for(int $2=0; $2 <= N1 - 1; ++$2) {
+           T X[N2,Vs];
+           $1[$2] = X;
+         }
+       }
+

Function declaration

+ +

Store all function parameters, as well as the return type, as part
+of the lambda abstraction. In the spirit of dynamic typing, we will
+make sure that parameters are well typed when the function is invoked.

+
  rule <k> T:Type F:Id(Ps:Params) S => .K ...</k>
+       <env> Env => Env[F <- L] </env>
+       <store>... .Map => L |-> lambda(T, Ps, S) ...</store>
+       <nextLoc> L => L +Int 1 </nextLoc>
+

Calling main()

+ +

When done with the first pass, call main().

+
  syntax KItem ::= "execute"
+  rule <k> execute => main(.Exps); </k>
+       <env> Env </env>
+       <genv> .Map => Env </genv>
+

Expressions

+ +

Variable lookup

+ +
  rule <k> X:Id => V ...</k>
+       <env>... X |-> L ...</env>
+       <store>... L |-> V:Val ...</store>
+

Variable/Array increment

+ +
  context ++(HOLE => lvalue(HOLE))
+  rule <k> ++loc(L) => I +Int 1 ...</k>
+       <store>... L |-> (I:Int => I +Int 1) ...</store>
+

Arithmetic operators

+ +
  rule I1 + I2 => I1 +Int I2
+  rule Str1 + Str2 => Str1 +String Str2
+  rule I1 - I2 => I1 -Int I2
+  rule I1 * I2 => I1 *Int I2
+  rule I1 / I2 => I1 /Int I2 requires I2 =/=K 0
+  rule I1 % I2 => I1 %Int I2 requires I2 =/=K 0
+  rule - I => 0 -Int I
+  rule I1 < I2 => I1 <Int I2
+  rule I1 <= I2 => I1 <=Int I2
+  rule I1 > I2 => I1 >Int I2
+  rule I1 >= I2 => I1 >=Int I2
+  rule V1:Val == V2:Val => V1 ==K V2
+  rule V1:Val != V2:Val => V1 =/=K V2
+  rule ! T => notBool(T)
+  rule true  && E => E
+  rule false && _ => false
+  rule true  || _ => true
+  rule false || E => E
+

Array lookup

+ +

Check array bounds, as part of the dynamic typing policy.

+
// Same comment as for simple untyped regarding [anywhere]
+  rule V:Val[N1:Int, N2:Int, Vs:Vals] => V[N1][N2, Vs]
+    [anywhere]
+
+// Same comment as for simple untyped regarding [anywhere]
+  rule array(_:Type, L:Int, M:Int)[N:Int] => lookup(L +Int N)
+    requires N >=Int 0 andBool N <Int M  [anywhere]
+

Size of an array

+ +
  rule sizeOf(array(_,_,N)) => N
+

Function call

+ +

Define function call and return together, to see their relationship.
+Note that the operation mkDecls now declares properly typed
+instantiated variables, and that the semantics of return also
+checks that that type of the returned value is expected one.

+
  syntax KItem ::= (Type,Map,K,ControlCellFragment)
+
+  rule <k> lambda(T,Ps,S)(Vs:Vals) ~> K => mkDecls(Ps,Vs) S return; </k>
+       <control>
+         <fstack> .List => ListItem((T',Env,K,C)) ...</fstack>
+         <returnType> T' => T </returnType>
+         C
+       </control>
+       <env> Env => GEnv </env>
+       <genv> GEnv </genv>
+
+  rule <k> return V:Val; ~> _ => V ~> K </k>
+       <control>
+         <fstack> ListItem((T',Env,K,C)) => .List ...</fstack>
+         <returnType> T => T' </returnType>
+         (_ => C)
+       </control>
+       <env> _ => Env </env>
+    requires typeOf(V) ==K T   // check the type of the returned value
+

Like the undefined above, nothing also gets
+tagged with a type now. The empty return statement is
+completed to return the nothing value tagged as expected.

+
  syntax Val ::= nothing(Type)
+  rule <k> return; => return nothing(T); ...</k> <returnType> T </returnType>
+

Read

+ +
  rule <k> read() => I ...</k> <input> ListItem(I:Int) => .List ...</input>
+

Assignment

+ +

The assignment now checks that the type of the assigned location is
+preserved:

+
  context (HOLE => lvalue(HOLE)) = _
+
+  rule <k> loc(L) = V:Val => V ...</k> <store>... L |-> (V' => V) ...</store>
+    requires typeOf(V) ==K typeOf(V')
+

Statements

+ +

Blocks

+ +
  rule {} => .K
+  rule <k> { S } => S ~> setEnv(Env) ...</k>  <env> Env </env>
+

Sequential composition

+ +
  rule S1:Stmt S2:Stmt => S1 ~> S2
+

Expression statements

+ +
  rule _:Val; => .K
+

Conditional

+ +
  rule if ( true) S else _ => S
+  rule if (false) _ else S => S
+

While loop

+ +
  rule while (E) S => if (E) {S while(E)S}
+

Print

+ +

We only allow printing integers and strings:

+
  rule <k> print(V:Val, Es => Es); ...</k> <output>... .List => ListItem(V) </output>
+    requires typeOf(V) ==K int orBool typeOf(V) ==K string
+  rule print(.Vals); => .K
+

Exceptions

+ +

Exception parameters are now typed, but note that the semantics below
+works correctly only when the thrown exception has the same type as
+the innermost try-catch paramete. To keep things simple, for the time
+being we can assume that SIMPLE only throws and catches integer
+values, in which case our semantics below works fine:

+
  syntax KItem ::= (Param,Stmt,K,Map,ControlCellFragment)  // Param instead of Id
+
+  syntax KItem ::= "popx"
+
+  rule <k> (try S1 catch(P) S2 => S1 ~> popx) ~> K </k>
+       <control>
+         <xstack> .List => ListItem((P, S2, K, Env, C)) ...</xstack>
+         C
+       </control>
+       <env> Env </env>
+
+  rule <k> popx => .K ...</k>
+       <xstack> ListItem(_) => .List ...</xstack>
+
+  rule <k> throw V:Val; ~> _ => { T X = V; S2 } ~> K </k>
+       <control>
+         <xstack> ListItem((T:Type X:Id, S2, K, Env, C)) => .List ...</xstack>
+         (_ => C)
+       </control>
+       <env> _ => Env </env>
+

Threads

+ +

Thread creation

+ +
   rule <thread>...
+          <k> spawn S => !T:Int +Int 1 ...</k>
+          <env> Env </env>
+        ...</thread>
+        (.Bag => <thread>...
+                <k> S </k>
+                <env> Env </env>
+                <id> !T +Int 1 </id>
+              ...</thread>)
+

Thread termination

+ +
   rule (<thread>... <k>.K</k> <holds>H</holds> <id>T</id> ...</thread> => .Bag)
+        <busy> Busy => Busy -Set keys(H) </busy>
+        <terminated>... .Set => SetItem(T) ...</terminated>
+

Thread joining

+ +
   rule <k> join T:Int; => .K ...</k>
+        <terminated>... SetItem(T) ...</terminated>
+

Acquire lock

+ +
   rule <k> acquire V:Val; => .K ...</k>
+        <holds>... .Map => V |-> 0 ...</holds>
+        <busy> Busy (.Set => SetItem(V)) </busy>
+     requires (notBool(V in Busy:Set))
+
+   rule <k> acquire V; => .K ...</k>
+        <holds>... V:Val |-> (N:Int => N +Int 1) ...</holds>
+

Release lock

+ +
   rule <k> release V:Val; => .K ...</k>
+        <holds>... V |-> (N => N:Int -Int 1) ...</holds>
+      requires N >Int 0
+
+   rule <k> release V; => .K ...</k> <holds>... V:Val |-> 0 => .Map ...</holds>
+        <busy>... SetItem(V) => .Set ...</busy>
+

Rendezvous synchronization

+ +
   rule <k> rendezvous V:Val; => .K ...</k>
+        <k> rendezvous V; => .K ...</k>
+

Auxiliary declarations and operations

+ +

Turns a list of parameters and a list of instance values for them
+into a list of variable declarations.

+
  syntax Stmt ::= mkDecls(Params,Vals)  [function]
+  rule mkDecls((T:Type X:Id, Ps:Params), (V:Val, Vs:Vals))
+    => T X=V; mkDecls(Ps,Vs)
+  rule mkDecls(.Params,.Vals) => {}
+

Location lookup.

+
  syntax Exp ::= lookup(Int)  // see NOTES.md for why Exp instead of KItem
+  rule <k> lookup(L) => V ...</k> <store>... L |-> V:Val ...</store>
+

Environment recovery.

+
// TODO: same comment regarding setEnv(...) as for simple untyped
+
+  syntax KItem ::= setEnv(Map)
+  rule <k> setEnv(Env) => .K ...</k>  <env> _ => Env </env>
+  rule (setEnv(_) => .K) ~> setEnv(_)
+

lvalue and loc

+
  syntax Exp ::= lvalue(K)
+  syntax Val ::= loc(Int)
+
+  rule <k> lvalue(X:Id => loc(L)) ...</k>  <env>... X |-> L:Int ...</env>
+
+  //context lvalue(_[HOLE])
+  //context lvalue(HOLE[_])
+  context lvalue(_::Exp[HOLE::Exps])
+  context lvalue(HOLE::Exp[_::Exps])
+
+  rule lvalue(lookup(L:Int) => loc(L))
+

Adds the corresponding depth to an array type

+
  syntax Type ::= Type "<" Vals ">"  [function]
+  rule T:Type<_,Vs:Vals> => T[]<Vs>
+  rule T:Type<.Vals> => T
+

Sequences of locations.

+
  syntax Map ::= Int "..." Int "|->" K [function]
+  rule N...M |-> _ => .Map  requires N >Int M
+  rule N...M |-> K => N |-> K (N +Int 1)...M |-> K  requires N <=Int M
+
+// Type of a value.
+  syntax Type ::= typeOf(K)  [function]
+  rule typeOf(_:Int) => int
+  rule typeOf(_:Bool) => bool
+  rule typeOf(_:String) => string
+  rule typeOf(array(T,_,_)) => (T[])   // () needed! K parses [] as "no tags"
+  rule typeOf(lambda(T,Ps,_)) => getTypes(Ps) -> T
+  rule typeOf(undefined(T)) => T
+  rule typeOf(nothing(T)) => T
+

List of types of a parameter.

+
  syntax Types ::= getTypes(Params)  [function]
+  rule getTypes(T:Type _:Id) => T, .Types   // I would like to not use .Types
+  rule getTypes(T:Type _:Id, P, Ps) => T, getTypes(P,Ps)
+  rule getTypes(.Params) => void, .Types
+endmodule
+

KOOL — Untyped

+ +

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+ +

This is the K semantic definition of the untyped KOOL language. KOOL
+is aimed at being a pedagogical and research language that captures
+the essence of the object-oriented programming paradigm. Its untyped
+variant discussed here is simpler than the typed one, ignoring several
+intricate aspects of types in the presence of objects. A program
+consists of a set of class declarations. Each class can extend at
+most one other class (KOOL is single-inheritance). A class can
+declare a set of fields and a set of methods, all public and called
+the class' members. Specifically, KOOL includes the
+following features:

+
    +
  • +

    Class declarations, where a class may or may not explicitly
    +extend another class. In case a class does not explicitly extend
    +another class, then it is assumed that it extends the default top-most
    +and empty (i.e., no members) class called Object. Each class
    +is required to declare precisely one homonymous method, called its
    +constructor. Each valid program should contain one class
    +named Main, whose constructor, Main(), takes no
    +arguments. The execution of a program consists of creating an object
    +instance of class Main and invoking the constructor
    +Main() on it, that is, of executing new Main();.

    +
  • +
  • +

    All features of SIMPLE (see examples/simple/untyped),
    +i.e., multidimensional arrays, function (here called "method")
    +abstractions with call-by-value parameter passing style and static
    +scoping, blocks with locals, input/output, parametric exceptions, and
    +concurrency via dynamic thread creation/termination and synchronization.
    +The only change in the syntax of SIMPLE when imported in KOOL is the
    +function declaration keyword, function, which is changed into
    +method. The exact same desugaring macros from SIMPLE are
    +also included in KOOL. We can think of KOOL's classes as embedding
    +SIMPLE programs (extended with OO constructs, as discussed next).

    +
  • +
  • +

    Object creation using the new C(e1,...,en)
    +expression construct. An object instance of class C is first
    +created and then the constructor C(e1,...,en) is implicitly
    +called on that object. KOOL only allows (and requires) one
    +constructor per class. The class constructor can be called either
    +implicitly during a new object creation for the class, or explicitly.
    +The superclass constructor is not implicitly invoked when a
    +class constructor is invoked; if you want to invoke the superclass
    +constructor from a subclass constructor then you have to do it
    +explicitly.

    +
  • +
  • +

    An expression construct this, which evaluates to the
    +current object.

    +
  • +
  • +

    An expression construct super, which is used (only) in
    +combination with member lookup (see next) to refer to a superclass
    +field or method.

    +
  • +
  • +

    A member lookup expression construct e.x, where e
    +is an expression (either an expression expected to evaluate to an object
    +or the super construct) and x is a class member name,
    +that is, a field or a method name.

    +
  • +
  • +

    Expression constructs e instanceOf C and
    +(C) e, where e is an expression expected
    +to evaluate to an object and C a class name. The former
    +tells whether the class of e is a subclass of C,
    +that is, whether e can be used as an instance of C,
    +and the latter changes the class of e to C. These
    +operations always succeed: the former returns a Boolean value, while
    +the latter changes the current class of e to C
    +regardless of whether it is safe to do so or not. The typed version
    +of KOOL will check the safety of casting by ensuring that the instance
    +class of the object is a subclass of C. In untyped KOOL we
    +do not want to perform this check because we want to allow the
    +programmer maximum of flexibility: if one always accesses only
    +available members, then the program can execute successfully despite
    +the potentially unsafe cast.

    +
  • +
+

There are some specific aspects of KOOL that need to be discussed.

+

First, KOOL is higher-order, allowing function abstractions to be
+treated like any other values in the language. For example, if
+m is a method of object e then e.m
+evaluates to the corresponding function abstraction. The function
+abstraction is in fact a closure, because in addition to the method
+parameters and body it also encapsulates the object value (i.e., the
+environment of the object together with its current class—see below)
+that e evaluates to. This way, function abstractions can be
+invoked anywhere and have the capability to change the state of their
+object. For example, if m is a method of object e
+which increments a field c of e when invoked, and if
+getm is another method of e which simply returns
+m when invoked, then the double application
+(e.getm())() has the same effect as e.m(), that is,
+increments the counter c of e. Note that the
+higher-order nature of KOOL was not originally planned; it came as a
+natural consequence of evaluating methods to closures and we decided
+to keep it. If you do not like it then do not use it.

+

Second, since all the fields and methods are public in KOOL and since
+they can be redeclared in subclasses, it is not immediately clear how
+to lookup the member x when we write e.x and
+e is different from super. We distinguish two cases,
+depending on whether e.x occurs in a method invocation
+context (i.e., e.x(...)) or in a field context. KOOL has
+dynamic method dispatch, so if e.x is invoked as a method
+then x will be searched for starting with the instance class of
+the object value to which e evaluates. If e.x
+occurs in a non-method-invocation context then x will be
+treated as a field (although it may hold a method closure due to the
+higher-order nature of KOOL) and thus will be searched starting with
+the current class of the object value of e (which, because of
+this and casting, may be different from its instance class).
+In order to achieve the above, each object value will consist of a
+pair holding the current class of the object and an environment stack
+with one layer for each class in the object's instance class hierarchy.

+

Third, although KOOL is dynamic method dispatch, its capabilities
+described above are powerful enough to allow us to mimic static
+method dispatch. For example, suppose that you want to invoke method
+m() statically. Then all you need to do is to declare a
+local variable and bind it to m, for example var staticm = m;, and
+then call staticm(). This works because
+staticm is first bound to the method closure that m
+evaluates to, and then looked up as any local variable when invoked.
+We only enable the dynamic method dispatch when we have an object
+member on an application position, e.g., m().

+

In what follows, we limit our comments to the new, KOOL-specific
+aspects of the language. We refer the reader to the untyped SIMPLE
+language for documentation on the the remaining features, because
+those were all borrowed from SIMPLE.

+
module KOOL-UNTYPED-SYNTAX
+  imports DOMAINS-SYNTAX
+

Syntax

+ +

The syntax of KOOL extends that of SIMPLE with object-oriented
+constructs. We removed from the K annotated syntax of SIMPLE two
+constructs, namely the one for function declarations (because we want
+to call them methods now) and the one for function application
+(because application is not strict in the first argument
+anymore—needs to initiate dynamic method dispatch). The additional
+syntax includes:

+
    +
  • First, we need a new dedicated identifier, Object, for
    +the default top-most class.
  • +
  • Second, we rename the function keyword of SIMPLE into method.
  • +
  • Third, we add syntax for class declarations together with a
    +macro making classes which extend nothing to extend Object.
  • +
  • Fourth, we change the strictness attribute of application
    +into strict(2).
  • +
  • Finally, we add syntax and corresponding strictness
    +for the KOOL object-oriented constructs.
  • +
+
  syntax Id ::= "Object" [token] | "Main" [token]
+
+  syntax Stmt ::= "var" Exps ";"
+                | "method" Id "(" Ids ")" Block  // called "function" in SIMPLE
+                | "class" Id Block               // KOOL
+                | "class" Id "extends" Id Block  // KOOL
+
+  syntax Exp ::= Int | Bool | String | Id
+               | "this"                                 // KOOL
+               | "super"                                // KOOL
+               | "(" Exp ")"             [bracket]
+               | "++" Exp
+               | Exp "instanceOf" Id     [strict(1)]    // KOOL
+               | "(" Id ")" Exp          [strict(2)]    // KOOL  cast
+               | "new" Id "(" Exps ")"   [strict(2)]    // KOOL
+               | Exp "." Id                             // KOOL
+               > Exp "[" Exps "]"        [strict]
+               > Exp "(" Exps ")"        [strict(2)]    // was strict in SIMPLE
+               | "-" Exp                 [strict]
+               | "sizeOf" "(" Exp ")"    [strict]
+               | "read" "(" ")"
+               > left:
+                 Exp "*" Exp             [strict, left]
+               | Exp "/" Exp             [strict, left]
+               | Exp "%" Exp             [strict, left]
+               > left:
+                 Exp "+" Exp             [strict, left]
+               | Exp "-" Exp             [strict, left]
+               > non-assoc:
+                 Exp "<" Exp             [strict, non-assoc]
+               | Exp "<=" Exp            [strict, non-assoc]
+               | Exp ">" Exp             [strict, non-assoc]
+               | Exp ">=" Exp            [strict, non-assoc]
+               | Exp "==" Exp            [strict, non-assoc]
+               | Exp "!=" Exp            [strict, non-assoc]
+               > "!" Exp                 [strict]
+               > left:
+                 Exp "&&" Exp            [strict(1), left]
+               | Exp "||" Exp            [strict(1), left]
+               > "spawn" Block
+               > Exp "=" Exp             [strict(2), right]
+
+  syntax Ids  ::= List{Id,","}
+
+  syntax Exps ::= List{Exp,","}          [strict, overload(exps)]
+  syntax Val
+  syntax Vals ::= List{Val,","}          [overload(exps)]
+
+  syntax Block ::= "{" "}"
+                | "{" Stmt "}"
+
+  syntax Stmt ::= Block
+                | Exp ";"                               [strict]
+                | "if" "(" Exp ")" Block "else" Block   [avoid, strict(1)]
+                | "if" "(" Exp ")" Block                [macro]
+                | "while" "(" Exp ")" Block
+                | "for" "(" Stmt Exp ";" Exp ")" Block  [macro]
+                | "return" Exp ";"                      [strict]
+                | "return" ";"                          [macro]
+                | "print" "(" Exps ")" ";"              [strict]
+                | "try" Block "catch" "(" Id ")" Block
+                | "throw" Exp ";"                       [strict]
+                | "join" Exp ";"                        [strict]
+                | "acquire" Exp ";"                     [strict]
+                | "release" Exp ";"                     [strict]
+                | "rendezvous" Exp ";"                  [strict]
+
+  syntax Stmt ::= Stmt Stmt                          [right]
+

Old desugaring rules, from SIMPLE

+
  rule if (E) S => if (E) S else {}
+  rule for(Start Cond; Step) {S} => {Start while (Cond) {S Step;}}
+  rule var E1::Exp, E2::Exp, Es::Exps; => var E1; var E2, Es;       [anywhere]
+  rule var X::Id = E; => var X; X = E;                              [anywhere]
+

New desugaring rule

+
  rule class C:Id S => class C extends Object S                     // KOOL
+
+endmodule
+

Semantics

+ +

We first discuss the new configuration of KOOL, which extends that of
+SIMPLE. Then we include the semantics of the constructs borrowed from
+SIMPLE unchanged; we refrain from discussing those, because they were
+already discussed in the K definition of SIMPLE. Then we discuss
+changes to SIMPLE's semantics needed for the more general meaning of
+the previous SIMPLE constructs (for example for thread spawning,
+assignment, etc.). Finally, we discuss in detail the
+semantics of the additional KOOL constructs.

+
module KOOL-UNTYPED
+  imports KOOL-UNTYPED-SYNTAX
+  imports DOMAINS
+

Configuration

+ +

KOOL removes one cell and adds two nested cells to the configuration
+of SIMPLE. The cell which is removed is the one holding the global
+environment, because a KOOL program consists of a set of classes only,
+with no global declarations. In fact, since informally speaking each
+KOOL class now includes a SIMPLE program, it is safe to say that the
+global variables in SIMPLE became class fields in KOOL. Let us now
+discuss the new cells that are added to the configuration of SIMPLE.

+
    +
  • +

    The cell crntObj holds data pertaining to the current
    +object, that is, the object environment in which the code in cell
    +k executes: crntClass holds the current class (which
    +can change as methods of the current object are invoked);
    +envStack holds the stack of environments as a list,
    +each layer corresponding to one class in the objects' instance class
    +hierarchy; location, which is optional, holds the location in
    +the store where the current object is or has to be located (this is
    +useful both for method closures and for the semantics of object
    +creation).

    +
  • +
  • +

    The cell classes holds all the declared classes, each
    +class being held in its own class cell which contains a name
    +(className), a parent (extends), and the actual
    +member declarations (declarations).

    +
  • +
+
  // the syntax declarations below are required because the sorts are
+  // referenced directly by a production and, because of the way KIL to KORE
+  // is implemented, the configuration syntax is not available yet
+  // should simply work once KIL is removed completely
+  // check other definitions for this hack as well
+  syntax EnvCell
+  syntax ControlCell
+  syntax EnvStackCell
+  syntax CrntObjCellFragment
+
+  configuration <T color="red">
+                  <threads color="orange">
+                    <thread multiplicity="*" type="Set" color="yellow">
+                      <k color="green"> $PGM:Stmt ~> execute </k>
+                    //<br/> // TODO(KORE): support latex annotations #1799
+                      <control color="cyan">
+                        <fstack color="blue"> .List </fstack>
+                        <xstack color="purple"> .List </xstack>
+                      //<br/> // TODO(KORE): support latex annotations #1799
+                        <crntObj color="Fuchsia">  // KOOL
+                           <crntClass> Object </crntClass>
+                           <envStack> .List </envStack>
+                           <location multiplicity="?"> .K </location>
+                        </crntObj>
+                      </control>
+                    //<br/> // TODO(KORE): support latex annotations #1799
+                      <env color="violet"> .Map </env>
+                      <holds color="black"> .Map </holds>
+                      <id color="pink"> 0 </id>
+                    </thread>
+                  </threads>
+                //<br/> // TODO(KORE): support latex annotations #1799
+                  <store color="white"> .Map </store>
+                  <busy color="cyan">.Set </busy>
+                  <terminated color="red"> .Set </terminated>
+                  <input color="magenta" stream="stdin"> .List </input>
+                  <output color="brown" stream="stdout"> .List </output>
+                  <nextLoc color="gray"> 0 </nextLoc>
+                //<br/> // TODO(KORE): support latex annotations #1799
+                  <classes color="Fuchsia">        // KOOL
+                     <classData multiplicity="*" type="Map" color="Fuchsia">
+                        // the Map has as its key the first child of the cell,
+                        // in this case the className cell.
+                        <className color="Fuchsia"> Main </className>
+                        <baseClass color="Fuchsia"> Object </baseClass>
+                        <declarations color="Fuchsia"> .K </declarations>
+                     </classData>
+                  </classes>
+                </T>
+

Unchanged Semantics from untyped SIMPLE

+ +

The semantics below is taken over from SIMPLE unchanged.
+The semantics of function declaration and invocation, including the
+use of the special lambda abstraction value, needs to change
+in order to account for the fact that methods are now invoked into
+their object's environment. The semantics of function return actually
+stays unchanged. Also, the semantics of program initialization is
+different: now we have to create an instance of the Main
+class which also calls the constructor Main(), while in
+SIMPLE we only had to invoke the function Main().
+Finally, the semantics of thread spawning needs to change, too: the
+parent thread needs to also share its object environment with the
+spawned thread (in addition to its local environment, like in SIMPLE).
+This is needed in order to be able to spawn method invokations under
+dynamic method dispatch; for example, spawn { run(); }
+will need to look up the method run() in the newly created
+thread, operation which will most likely fail unless the child thread
+sees the object environment of the parent thread. Note that the
+spawn statement of KOOL is more permissive than the threads
+of Java. In fact, the latter can be implemented in terms of our
+spawn—see the program threads.kool for a sketch.

+

Below is a subset of the values of SIMPLE, which are also values
+of KOOL. We will add other values later in the semantics, such as
+object and method closures.

+
  syntax Val ::= Int | Bool | String
+               | array(Int,Int)
+  syntax Exp ::= Val
+  syntax Exps ::= Vals
+  syntax KResult ::= Val
+  syntax KResult ::= Vals
+

The semantics below are taken verbatim from the untyped SIMPLE
+definition.

+
  syntax KItem ::= "undefined"
+
+  rule <k> var X:Id; => .K ...</k>
+       <env> Env => Env[X <- L] </env>
+       <store>... .Map => L |-> undefined ...</store>
+       <nextLoc> L:Int => L +Int 1 </nextLoc>
+
+
+  context var _:Id[HOLE];
+
+  rule <k> var X:Id[N:Int]; => .K ...</k>
+       <env> Env => Env[X <- L] </env>
+       <store>... .Map => L |-> array(L +Int 1, N)
+                          (L +Int 1) ... (L +Int N) |-> undefined ...</store>
+       <nextLoc> L:Int => L +Int 1 +Int N </nextLoc>
+    requires N >=Int 0
+
+
+  syntax Id ::= "$1" [token] | "$2" [token]
+  rule var X:Id[N1:Int, N2:Int, Vs:Vals];
+    => var X[N1];
+       {
+         var $1=X;
+         for(var $2=0; $2 <= N1 - 1; ++$2) {
+           var X[N2,Vs];
+           $1[$2] = X;
+         }
+       }
+
+
+  rule <k> X:Id => V ...</k>
+       <env>... X |-> L ...</env>
+       <store>... L |-> V:Val ...</store>
+
+
+  context ++(HOLE => lvalue(HOLE))
+  rule <k> ++loc(L) => I +Int 1 ...</k>
+       <store>... L |-> (I:Int => I +Int 1) ...</store>
+
+
+  rule I1 + I2 => I1 +Int I2
+  rule Str1 + Str2 => Str1 +String Str2
+  rule I1 - I2 => I1 -Int I2
+  rule I1 * I2 => I1 *Int I2
+  rule I1 / I2 => I1 /Int I2 requires I2 =/=K 0
+  rule I1 % I2 => I1 %Int I2 requires I2 =/=K 0
+  rule - I => 0 -Int I
+  rule I1 < I2 => I1 <Int I2
+  rule I1 <= I2 => I1 <=Int I2
+  rule I1 > I2 => I1 >Int I2
+  rule I1 >= I2 => I1 >=Int I2
+
+  rule V1:Val == V2:Val => V1 ==K V2
+  rule V1:Val != V2:Val => V1 =/=K V2
+  rule ! T => notBool(T)
+  rule true  && E => E
+  rule false && _ => false
+  rule true  || _ => true
+  rule false || E => E
+
+
+  rule V:Val[N1:Int, N2:Int, Vs:Vals] => V[N1][N2, Vs]
+    [anywhere]
+
+  rule array(L,_)[N:Int] => lookup(L +Int N)
+    [anywhere]
+
+
+  rule sizeOf(array(_,N)) => N
+

The semantics of function application needs to change into dynamic
+method dispatch invocation, which is defined shortly. However,
+interestingly, the semantics of return stays unchanged.

+
  rule <k> return(V:Val); ~> _ => V ~> K </k>
+       <control>
+         <fstack> ListItem(fstackFrame(Env,K,XS,<crntObj> CO </crntObj>)) => .List ...</fstack>
+         <xstack> _ => XS </xstack>
+         <crntObj> _ => CO </crntObj>
+       </control>
+       <env> _ => Env </env>
+
+  syntax Val ::= "nothing"
+  rule return; => return nothing;
+
+
+  rule <k> read() => I ...</k> <input> ListItem(I:Int) => .List ...</input>
+
+
+  context (HOLE => lvalue(HOLE)) = _
+
+  rule <k> loc(L) = V:Val => V ...</k> <store>... L |-> (_ => V) ...</store>
+
+  rule {} => .K
+  rule <k> { S } => S ~> setEnv(Env) ...</k>  <env> Env </env>
+
+
+  rule S1::Stmt S2::Stmt => S1 ~> S2
+
+  rule _:Val; => .K
+
+  rule if ( true) S else _ => S
+  rule if (false) _ else S => S
+
+  rule while (E) S => if (E) {S while(E)S}
+
+  rule <k> print(V:Val, Es => Es); ...</k> <output>... .List => ListItem(V) </output>
+  rule print(.Vals); => .K
+
+
+  syntax KItem ::= xstackFrame(Id,Stmt,K,Map,K)
+  // TODO(KORE): drop the additional production once parsing issue #1842 is fixed
+                 | (Id,Stmt,K,Map,K)
+
+  syntax KItem ::= "popx"
+
+  rule <k> (try S1 catch(X) {S2} => S1 ~> popx) ~> K </k>
+       <control>
+         <xstack> .List => ListItem(xstackFrame(X, S2, K, Env, C)) ...</xstack>
+         C
+       </control>
+       <env> Env </env>
+
+  rule <k> popx => .K ...</k>
+       <xstack> ListItem(_) => .List ...</xstack>
+
+  rule <k> throw V:Val; ~> _ => { var X = V; S2 } ~> K </k>
+       <control>
+         <xstack> ListItem(xstackFrame(X, S2, K, Env, C)) => .List ...</xstack>
+         (_ => C)
+       </control>
+       <env> _ => Env </env>
+

Thread spawning needs a new semantics, because we want the child
+thread to also share the object environment with its parent. The new
+semantics of thread spawning will be defined shortly. However,
+interestingly, the other concurrency constructs keep their semantics
+from SIMPLE unchanged.

+
  // TODO(KORE): ..Bag should be . throughout this definition #1772
+  rule (<thread>... <k>.K</k> <holds>H</holds> <id>T</id> ...</thread> => .Bag)
+  /*
+  rule (<thread>... <k>.</k> <holds>H</holds> <id>T</id> ...</thread> => .)
+  */
+       <busy> Busy => Busy -Set keys(H) </busy>
+       <terminated>... .Set => SetItem(T) ...</terminated>
+
+  rule <k> join T:Int; => .K ...</k>
+       <terminated>... SetItem(T) ...</terminated>
+
+  rule <k> acquire V:Val; => .K ...</k>
+       <holds>... .Map => V |-> 0 ...</holds>
+       <busy> Busy (.Set => SetItem(V)) </busy>
+    requires (notBool(V in Busy:Set))
+
+  rule <k> acquire V; => .K ...</k>
+       <holds>... V:Val |-> (N:Int => N +Int 1) ...</holds>
+
+  rule <k> release V:Val; => .K ...</k>
+       <holds>... V |-> (N => N:Int -Int 1) ...</holds>
+    requires N >Int 0
+
+  rule <k> release V; => .K ...</k> <holds>... V:Val |-> 0 => .Map ...</holds>
+       <busy>... SetItem(V) => .Set ...</busy>
+
+  rule <k> rendezvous V:Val; => .K ...</k>
+       <k> rendezvous V; => .K ...</k>
+

Unchanged auxiliary operations from untyped SIMPLE

+ +
  syntax Stmt ::= mkDecls(Ids,Vals)  [function]
+  rule mkDecls((X:Id, Xs:Ids), (V:Val, Vs:Vals)) => var X=V; mkDecls(Xs,Vs)
+  rule mkDecls(.Ids,.Vals) => {}
+
+  // TODO(KORE): clarify sort inferences #1803
+  syntax Exp ::= lookup(Int)
+  /*
+  syntax KItem ::= lookup(Int)
+  */
+  rule <k> lookup(L) => V ...</k> <store>... L |-> V:Val ...</store>
+
+  syntax KItem ::= setEnv(Map)
+  rule <k> setEnv(Env) => .K ...</k>  <env> _ => Env </env>
+  rule (setEnv(_) => .K) ~> setEnv(_)
+  // TODO: How can we make sure that the second rule above applies before the first one?
+  //       Probably we'll deal with this using strategies, eventually.
+
+  syntax Exp ::= lvalue(K)
+  syntax Val ::= loc(Int)
+
+  rule <k> lvalue(X:Id => loc(L)) ...</k> <env>... X |-> L:Int ...</env>
+
+  context lvalue(_::Exp[HOLE::Exps])
+  context lvalue(HOLE::Exp[_::Exps])
+
+  rule lvalue(lookup(L:Int) => loc(L))
+
+
+  syntax Map ::= Int "..." Int "|->" K
+    [function]
+  rule N...M |-> _ => .Map  requires N >Int M
+  rule N...M |-> K => N |-> K (N +Int 1)...M |-> K  requires N <=Int M
+

Changes to the existing untyped SIMPLE semantics

+ +

When we extend a language, sometimes we need to do more than just add
+new language constructs and semantics for them. Sometimes we want to
+also extend the semantics of existing language constructs, in order to
+get more from them.

+

Program initialization

+ +

In SIMPLE, once all the global declarations were processed, the
+function main() was invoked. In KOOL, the global
+declarations are classes, and their specific semantics is given
+shortly; essentially, they are pre-processed one by one and added
+into the class cell structure in the configuration.
+Once all the classes are processed, the computation item
+execute, which was placed right after the program in the
+initial configuration, is reached. In SIMPLE, the program was
+initialized by calling the method main(). In KOOL, the
+program is initialized by creating an object instance of class
+Main. This will also implicitly call the method
+Main() (the Main class constructor). The emptiness
+of the env cell below is just a sanity check, to make sure
+that the user has not declared anything but classes at the top level
+of the program.

+
  syntax KItem ::= "execute"
+  rule <k> execute => new Main(.Exps); </k> <env> .Map </env>
+

The semantics of new (defined below) requires the
+execution of all the class' declarations (and also of its
+superclasses').

+

Object and method closures

+ +

Before we can define the semantics of method application (previously
+called function application in SIMPLE), we need to add two more values
+to the language, namely object and method closures:

+
  syntax Val ::= objectClosure(Id, List)
+               | methodClosure(Id,Int,Ids,Stmt)
+

An object value consists of an objectClosure-wrapped bag
+containing the current class of the object and the environment stack
+of the object. The current class of an object will always be one of
+the classes mapped to an environment in the environment stack of the
+object. A method closure encapsulates the method's parameters and
+code (last two arguments), as well as the object context in which the
+method code should execute. This object context includes the current
+class of the object (the first argument of methodClosure) and
+the object environment stack (located in the object stored at the
+location specified as the second argument of methodClosure).

+

Method application

+ +

KOOL has a complex mechanism to invoke methods, because it allows both
+dynamic method dispatch and methods as first-class-citizen values (the
+latter making it a higher-order language). The invocation mechanism
+will be defined later. What is sufficient to know for now is that
+the two arguments of the application construct eventually reduce to
+values, the first being a method closure and the latter a list of
+values. The semantics of the method closure application is then as
+expected: the local environment and control are stacked, then we
+switch to method closure's class and object environment and execute
+the method body. The mkDecls construct is the one that came
+with the unchanged semantics of SIMPLE above.

+
  syntax KItem ::= fstackFrame(Map,K,List,K)
+  // TODO(KORE): drop the additional production once parsing issue #1842 is fixed
+                 | (Map,K,K)
+
+  rule <k> methodClosure(Class,OL,Xs,S)(Vs:Vals) ~> K
+           => mkDecls(Xs,Vs) S return; </k>
+       <env> Env => .Map </env>
+       <store>... OL |-> objectClosure(_, EnvStack)...</store>
+     //<br/> // TODO(KORE): support latex annotations #1799
+       <control>
+          <xstack> XS </xstack>
+          <fstack> .List => ListItem(fstackFrame(Env, K, XS, <crntObj> Obj' </crntObj>))
+          ...</fstack>
+          <crntObj> Obj' => <crntClass> Class </crntClass> <envStack> EnvStack </envStack> </crntObj>
+       </control>
+

Spawn

+ +

We want to extend the semantics of spawn to also share the
+current object environment with the child thread, in addition to the
+current environment. This extension will allow us to also use method
+invocations in the spawned statements, which will be thus looked up as
+expected, using dynamic method dispatch. This lookup operation would
+fail if the child thread did not have access to its parent's object
+environment.

+
  rule <thread>...
+         <k> spawn S => !T:Int ...</k>
+         <env> Env </env>
+         <crntObj> Obj </crntObj>
+       ...</thread>
+       (.Bag => <thread>...
+               <k> S </k>
+               <env> Env </env>
+               <id> !T </id>
+               <crntObj> Obj </crntObj>
+             ...</thread>)
+

Semantics of the new KOOL constructs

+ +

Class declaration

+ +

Initially, the classes forming the program are moved into their
+corresponding cells:

+
  rule <k> class Class1 extends Class2 { S } => .K ...</k>
+       <classes>... (.Bag => <classData>
+                            <className> Class1 </className>
+                            <baseClass> Class2 </baseClass>
+                            <declarations> S </declarations>
+                        </classData>)
+       ...</classes>
+

Method declaration

+ +

Like in SIMPLE, method names are added to the environment and bound
+to their code. However, unlike in SIMPLE where each function was
+executed in the same environment, namely the program global
+environment, a method in KOOL needs to be executed into its object's
+environment. Thus, methods evaluate to closures, which encapsulate
+their object's context (i.e., the current class and environment stack
+of the object) in addition to method's parameters and body. This
+approach to bind method names to method closures in the environment
+will also allow objects to pass their methods to other objects, to
+dynamically change their methods by assigning them other method
+closures, and even to allow all these to be done from other objects.
+This gives the KOOL programmer a lot of power; one should use this
+power wisely, though, because programs can become easily hard to
+understand and reason about if one overuses these features.

+
  rule <k> method F:Id(Xs:Ids) S => .K ...</k>
+       <crntClass> Class:Id </crntClass>
+       <location> OL:Int </location>
+       <env> Env => Env[F <- L] </env>
+       <store>... .Map => L |-> methodClosure(Class,OL,Xs,S) ...</store>
+       <nextLoc> L => L +Int 1 </nextLoc>
+

New

+ +

The semantics of new consists of two actions: memory
+allocation for the new object and execution of the corresponding
+constructor. Then the created object is returned as the result of the
+new operation; the value returned by the constructor, if any,
+is discarded. The current environment and object are stored onto the
+stack and recovered after new (according to the semantics of
+return borrowed from SIMPLE, when the statement
+return this; in the rule below is reached and evaluated),
+because the object creation part of new will destroy them.
+The rule below also initializes the object creation process by
+emptying the local environment and the current object, and allocating
+a location in the store where the created object will be eventually
+stored (this is what the storeObj task after the object
+creation task in the rule below will do—its rule is defined
+shortly). The location where the object will be stored is also made
+available in the crntObj cell, so that method closures can
+refer to it (see rule above).

+
  syntax KItem ::= "envStackFrame" "(" Id "," Map ")"
+
+  rule <k> new Class:Id(Vs:Vals) ~> K
+           => create(Class) ~> storeObj ~> Class(Vs); return this; </k>
+       <env> Env => .Map </env>
+       <nextLoc> L:Int => L +Int 1 </nextLoc>
+     //<br/> // TODO(KORE): support latex annotations #1799
+       <control> <xstack> XS </xstack>
+         <crntObj> Obj
+                   => <crntClass> Object </crntClass>
+                      <envStack> ListItem(envStackFrame(Object, .Map)) </envStack>
+                      <location> L </location>
+         </crntObj>
+         <fstack> .List => ListItem(fstackFrame(Env, K, XS, <crntObj> Obj </crntObj>)) ...</fstack>
+       </control>
+

The creation of a new object (the memory allocation part only) is
+a recursive process, requiring to first create an object for the
+superclass. A memory object representation is a layered structure:
+for each class on the path from the instance class to the root of the
+hierarchy there is a layer including the memory allocated for the
+members (both fields and methods) of that class.

+
  syntax KItem ::= create(Id)
+
+  rule <k> create(Class:Id)
+           => create(Class1) ~> setCrntClass(Class) ~> S ~> addEnvLayer ...</k>
+       <className> Class </className>
+       <baseClass> Class1:Id </baseClass>
+       <declarations> S </declarations>
+
+  rule <k> create(Object) => .K ...</k>
+

The next operation sets the current class of the current object.
+This is necessary to be done at each layer, because the current class
+of the object is enclosed as part of the method closures (see the
+semantics of method declarations above).

+
  syntax KItem ::= setCrntClass(Id)
+
+  rule <k> setCrntClass(C) => .K ...</k>
+       <crntClass> _ => C </crntClass>
+

The next operation adds a new tagged environment layer to the
+current object and gets ready for the next layer by clearing the
+environment (note that create expects the environment to be
+empty).

+
  syntax KItem ::= "addEnvLayer"
+
+  rule <k> addEnvLayer => .K ...</k>
+       <env> Env => .Map </env>
+       <crntClass> Class:Id </crntClass>
+       <envStack> .List => ListItem(envStackFrame(Class, Env)) ...</envStack>
+

The following operation stores the created object at the location
+reserved by new. Note that the location reserved by
+new was temporarily stored in the crntObj cell
+precisely for this purpose. Now that the newly created object is
+stored at its location and that all method closures are aware of it,
+the location is unnecessary and thus we delete it from the
+crntObj cell.

+
  syntax KItem ::= "storeObj"
+
+  rule <k> storeObj => .K ...</k>
+       <crntObj> <crntClass> CC </crntClass> <envStack> ES </envStack> (<location> L:Int </location> => .Bag) </crntObj>
+       <store>... .Map => L |-> objectClosure(CC, ES) ...</store>
+

Self reference

+ +

The semantics of this is straightforward: evaluate to the
+current object.

+
  rule <k> this => objectClosure(CC, ES) ...</k>
+       <crntObj> <crntClass> CC </crntClass> <envStack> ES </envStack> </crntObj>
+

Object member access

+ +

We can access an object member (field or method) either explicitly,
+using the construct e.x, or implicitly, using only the member
+name x directly. The borrowed semantics of SIMPLE will
+already lookup a sole name in the local environment. The first rule
+below reduces implicit member access to explicit access when the name
+cannot be found in the local environment. There are two cases to
+analyze for explicit object member access, depending upon whether the
+object is a proper object or it is just a redirection to the parent
+class via the construct super. In the first case, we
+evaluate the object expression and lookup the member starting with the
+current class (static scoping). Note the use of the conditional
+evaluation context. In the second case, we just lookup the member
+starting with the superclass of the current class. In both cases,
+the lookupMember task eventually yields a lookup(L)
+task for some appropriate location L, which will be further
+solved with the corresponding rule borrowed from SIMPLE. Note that the
+current object is not altered by super, so future method
+invocations see the entire object, as needed for dynamic method dispatch.

+
  rule <k> X:Id => this . X ...</k> <env> Env:Map </env>
+    requires notBool(X in keys(Env))
+
+  context HOLE._::Id requires (HOLE =/=K super)
+
+// TODO: explain how Assoc matching has been replaced with two rules here.
+// Maybe also improve it a bit.
+
+/*  rule objectClosure(<crntClass> Class:Id </crntClass>
+                     <envStack>... envStackFrame(Class,EnvC) EStack </envStack>)
+       . X:Id
+    => lookupMember(envStackFrame(Class,EnvC) EStack, X) */
+
+  rule objectClosure(Class:Id, ListItem(envStackFrame(Class,Env)) EStack)
+       . X:Id
+    => lookupMember(ListItem(envStackFrame(Class,Env)) EStack, X)
+  rule objectClosure(Class:Id, (ListItem(envStackFrame(Class':Id,_)) => .List) _)
+       . _X:Id
+    requires Class =/=K Class'
+
+/*  rule <k> super . X => lookupMember(EStack, X) ...</k>
+       <crntClass> Class </crntClass>
+       <envStack>... envStackFrame(Class,EnvC) EStack </envStack> */
+  rule <k> super . X => lookupMember(EStack, X) ...</k>
+       <crntClass> Class:Id </crntClass>
+       <envStack> ListItem(envStackFrame(Class,_)) EStack </envStack>
+  rule <k> super . _X ...</k>
+       <crntClass> Class </crntClass>
+       <envStack> ListItem(envStackFrame(Class':Id,_)) => .List ...</envStack>
+    requires Class =/=K Class'
+

Method invocation

+ +

Unlike in SIMPLE, in KOOL application was declared strict only in its
+second argument. That is because we want to ensure dynamic method
+dispatch when the first argument is a method access. As a
+consequence, we need to consider all the cases of interest for the
+first argument and to explicitly say what to do in each case. In all
+cases except for method access in a proper object (i.e., not
+super), we want the same behavior for the first argument as
+if it was not in a method invocation position. When it is a member
+access (the third rule below), we look it up starting with the
+instance class of the corresponding object. This ensures dynamic
+dispatch for methods; it actually dynamically dispatches field
+accesses, too, which is correct in KOOL, because one can assign method
+closures to fields and the field appeared in a method invocation
+context. The last context declaration below says that method
+applications or array accesses are also allowed as first argument to
+applications; that is because methods are allowed to return methods
+and arrays are allowed to hold methods in KOOL, since it is
+higher-order. If that is the case, then we want to evaluate the
+method call or the array access.

+
  rule <k> (X:Id => V)(_:Exps) ...</k>
+       <env>... X |-> L ...</env>
+       <store>... L |-> V:Val ...</store>
+
+  rule <k> (X:Id => this . X)(_:Exps) ...</k>
+       <env> Env </env>
+    requires notBool(X in keys(Env))
+
+  context HOLE._::Id(_) requires HOLE =/=K super
+
+  rule (objectClosure(_, EStack) . X
+    => lookupMember(EStack, X:Id))(_:Exps)
+
+/*  rule <k> (super . X
+            => lookupMember(EStack,X))(_:Exps)...</k>
+       <crntClass> Class </crntClass>
+       <envStack>... envStackFrame(Class,_) EStack </envStack> */
+  rule <k> (super . X
+            => lookupMember(EStack,X))(_:Exps)...</k>
+       <crntClass> Class </crntClass>
+       <envStack> ListItem(envStackFrame(Class,_)) EStack </envStack>
+  rule <k> (super . _X)(_:Exps) ...</k>
+       <crntClass> Class </crntClass>
+       <envStack> ListItem(envStackFrame(Class':Id,_)) => .List ...</envStack>
+    requires Class =/=K Class'
+
+  // TODO(KORE): fix getKLabel #1801
+  rule (A:Exp(B:Exps))(C:Exps) => A(B) ~> #freezerFunCall(C)
+  rule (A:Exp[B:Exps])(C:Exps) => A[B] ~> #freezerFunCall(C)
+  rule V:Val ~> #freezerFunCall(C:Exps) => V(C)
+  syntax KItem ::= "#freezerFunCall" "(" K ")"
+  /*
+  context HOLE(_:Exps)
+    when getKLabel(HOLE) ==K #klabel(`_(_)`) orBool getKLabel(HOLE) ==K #klabel(`_[_]`)
+  */
+

Eventually, each of the rules above produces a lookup(L)
+task as a replacement for the method. When that happens, we just
+lookup the value at location L:

+
  rule <k> (lookup(L) => V)(_:Exps) ...</k>  <store>... L |-> V:Val ...</store>
+

The value V looked up above is expected to be a method closure,
+in which case the semantics of method application given above will
+apply. Otherwise, the execution will get stuck.

+

Instance Of

+ +

It searches the object environment for a layer corresponding to the
+desired class. It returns true iff it can find the class,
+otherwise it returns false; it only gets stuck when its first
+argument does not evaluate to an object.

+
  rule objectClosure(_, ListItem(envStackFrame(C,_)) _)
+       instanceOf C => true
+
+  rule objectClosure(_, (ListItem(envStackFrame(C,_)) => .List) _)
+       instanceOf C'  requires C =/=K C'
+//TODO: remove the sort cast ::Id of C above, when sort inference bug fixed
+
+  rule objectClosure(_, .List) instanceOf _ => false
+

Cast

+ +

In untyped KOOL, we prefer to not check the validity of casting. In
+other words, any cast is allowed on any object, simply changing the
+current class of the object to the desired class. The execution will
+get stuck later if one attempts to access a field which is not
+available. Moreover, the execution may complete successfully even
+in the presence of invalid casts, provided that each accessed member
+during the current execution is, or happens to be, available.

+
  rule (C) objectClosure(_ , EnvStack) => objectClosure(C ,EnvStack)
+

KOOL-specific auxiliary declarations and operations

+ +

Here we define all the auxiliary constructs used in the above
+KOOL-specific semantics (those used in the SIMPLE fragment
+have already been defined in a corresponding section above).

+

Objects as lvalues

+ +

The current machinery borrowed with the semantics of SIMPLE allows us
+to enrich the set of lvalues, this way allowing new means to assign
+values to locations. In KOOL, we want object member names to be
+lvalues, so that we can assign values to them using the already
+existing machinery. The first rule below ensures that the object is
+always explicit, the evaluation context enforces the object to be
+evaluated, and finally the second rule initiates the lookup for the
+member's location based on the current class of the object.

+
  rule <k> lvalue(X:Id => this . X) ...</k>  <env> Env </env>
+    requires notBool(X in keys(Env))
+
+  context lvalue((HOLE . _)::Exp)
+
+/*  rule lvalue(objectClosure(<crntClass> C </crntClass>
+                            <envStack>... envStackFrame(C,EnvC) EStack </envStack>)
+              . X
+              => lookupMember(<envStack> envStackFrame(C,EnvC) EStack </envStack>,
+                              X))  */
+  rule lvalue(objectClosure(Class, ListItem(envStackFrame(Class,Env)) EStack)
+              . X
+              => lookupMember(ListItem(envStackFrame(Class,Env)) EStack,
+                              X))
+  rule lvalue(objectClosure(Class, (ListItem(envStackFrame(Class':Id,_)) => .List) _)
+              . _X)
+    requires Class =/=K Class'
+

Lookup member

+ +

It searches for the given member in the given environment stack,
+starting with the most concrete class and going up in the hierarchy.

+
  // TODO(KORE): clarify sort inferences #1803
+  syntax Exp ::= lookupMember(List, Id)  [function]
+  /*
+  syntax KItem ::= lookupMember(EnvStackCell,Id)  [function]
+  */
+
+//  rule lookupMember(<envStack> envStackFrame(_, <env>... X|->L ...</env>) ...</envStack>, X)
+//    => lookup(L)
+  rule lookupMember(ListItem(envStackFrame(_, X|->L _)) _, X)
+    => lookup(L)
+
+//  rule lookupMember(<envStack> envStackFrame(_, <env> Env </env>) => .List ...</envStack>, X)
+//    when notBool(X in keys(Env))
+  rule lookupMember(ListItem(envStackFrame(_, Env)) Rest, X) =>
+       lookupMember(Rest, X)
+    requires notBool(X in keys(Env))
+//TODO: beautify the above
+
+endmodule
+

Go to Lesson 2, KOOL typed dynamic.

+

KOOL — Untyped

+ +

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+ +

This is the K semantic definition of the untyped KOOL language. KOOL
+is aimed at being a pedagogical and research language that captures
+the essence of the object-oriented programming paradigm. Its untyped
+variant discussed here is simpler than the typed one, ignoring several
+intricate aspects of types in the presence of objects. A program
+consists of a set of class declarations. Each class can extend at
+most one other class (KOOL is single-inheritance). A class can
+declare a set of fields and a set of methods, all public and called
+the class' members. Specifically, KOOL includes the
+following features:

+
    +
  • +

    Class declarations, where a class may or may not explicitly
    +extend another class. In case a class does not explicitly extend
    +another class, then it is assumed that it extends the default top-most
    +and empty (i.e., no members) class called Object. Each class
    +is required to declare precisely one homonymous method, called its
    +constructor. Each valid program should contain one class
    +named Main, whose constructor, Main(), takes no
    +arguments. The execution of a program consists of creating an object
    +instance of class Main and invoking the constructor
    +Main() on it, that is, of executing new Main();.

    +
  • +
  • +

    All features of SIMPLE (see examples/simple/untyped),
    +i.e., multidimensional arrays, function (here called "method")
    +abstractions with call-by-value parameter passing style and static
    +scoping, blocks with locals, input/output, parametric exceptions, and
    +concurrency via dynamic thread creation/termination and synchronization.
    +The only change in the syntax of SIMPLE when imported in KOOL is the
    +function declaration keyword, function, which is changed into
    +method. The exact same desugaring macros from SIMPLE are
    +also included in KOOL. We can think of KOOL's classes as embedding
    +SIMPLE programs (extended with OO constructs, as discussed next).

    +
  • +
  • +

    Object creation using the new C(e1,...,en)
    +expression construct. An object instance of class C is first
    +created and then the constructor C(e1,...,en) is implicitly
    +called on that object. KOOL only allows (and requires) one
    +constructor per class. The class constructor can be called either
    +implicitly during a new object creation for the class, or explicitly.
    +The superclass constructor is not implicitly invoked when a
    +class constructor is invoked; if you want to invoke the superclass
    +constructor from a subclass constructor then you have to do it
    +explicitly.

    +
  • +
  • +

    An expression construct this, which evaluates to the
    +current object.

    +
  • +
  • +

    An expression construct super, which is used (only) in
    +combination with member lookup (see next) to refer to a superclass
    +field or method.

    +
  • +
  • +

    A member lookup expression construct e.x, where e
    +is an expression (either an expression expected to evaluate to an object
    +or the super construct) and x is a class member name,
    +that is, a field or a method name.

    +
  • +
  • +

    Expression constructs e instanceOf C and
    +(C) e, where e is an expression expected
    +to evaluate to an object and C a class name. The former
    +tells whether the class of e is a subclass of C,
    +that is, whether e can be used as an instance of C,
    +and the latter changes the class of e to C. These
    +operations always succeed: the former returns a Boolean value, while
    +the latter changes the current class of e to C
    +regardless of whether it is safe to do so or not. The typed version
    +of KOOL will check the safety of casting by ensuring that the instance
    +class of the object is a subclass of C. In untyped KOOL we
    +do not want to perform this check because we want to allow the
    +programmer maximum of flexibility: if one always accesses only
    +available members, then the program can execute successfully despite
    +the potentially unsafe cast.

    +
  • +
+

There are some specific aspects of KOOL that need to be discussed.

+

First, KOOL is higher-order, allowing function abstractions to be
+treated like any other values in the language. For example, if
+m is a method of object e then e.m
+evaluates to the corresponding function abstraction. The function
+abstraction is in fact a closure, because in addition to the method
+parameters and body it also encapsulates the object value (i.e., the
+environment of the object together with its current class—see below)
+that e evaluates to. This way, function abstractions can be
+invoked anywhere and have the capability to change the state of their
+object. For example, if m is a method of object e
+which increments a field c of e when invoked, and if
+getm is another method of e which simply returns
+m when invoked, then the double application
+(e.getm())() has the same effect as e.m(), that is,
+increments the counter c of e. Note that the
+higher-order nature of KOOL was not originally planned; it came as a
+natural consequence of evaluating methods to closures and we decided
+to keep it. If you do not like it then do not use it.

+

Second, since all the fields and methods are public in KOOL and since
+they can be redeclared in subclasses, it is not immediately clear how
+to lookup the member x when we write e.x and
+e is different from super. We distinguish two cases,
+depending on whether e.x occurs in a method invocation
+context (i.e., e.x(...)) or in a field context. KOOL has
+dynamic method dispatch, so if e.x is invoked as a method
+then x will be searched for starting with the instance class of
+the object value to which e evaluates. If e.x
+occurs in a non-method-invocation context then x will be
+treated as a field (although it may hold a method closure due to the
+higher-order nature of KOOL) and thus will be searched starting with
+the current class of the object value of e (which, because of
+this and casting, may be different from its instance class).
+In order to achieve the above, each object value will consist of a
+pair holding the current class of the object and an environment stack
+with one layer for each class in the object's instance class hierarchy.

+

Third, although KOOL is dynamic method dispatch, its capabilities
+described above are powerful enough to allow us to mimic static
+method dispatch. For example, suppose that you want to invoke method
+m() statically. Then all you need to do is to declare a
+local variable and bind it to m, for example var staticm = m;, and
+then call staticm(). This works because
+staticm is first bound to the method closure that m
+evaluates to, and then looked up as any local variable when invoked.
+We only enable the dynamic method dispatch when we have an object
+member on an application position, e.g., m().

+

In what follows, we limit our comments to the new, KOOL-specific
+aspects of the language. We refer the reader to the untyped SIMPLE
+language for documentation on the the remaining features, because
+those were all borrowed from SIMPLE.

+
module KOOL-UNTYPED-SYNTAX
+  imports DOMAINS-SYNTAX
+

Syntax

+ +

The syntax of KOOL extends that of SIMPLE with object-oriented
+constructs. We removed from the K annotated syntax of SIMPLE two
+constructs, namely the one for function declarations (because we want
+to call them methods now) and the one for function application
+(because application is not strict in the first argument
+anymore—needs to initiate dynamic method dispatch). The additional
+syntax includes:

+
    +
  • First, we need a new dedicated identifier, Object, for
    +the default top-most class.
  • +
  • Second, we rename the function keyword of SIMPLE into method.
  • +
  • Third, we add syntax for class declarations together with a
    +macro making classes which extend nothing to extend Object.
  • +
  • Fourth, we change the strictness attribute of application
    +into strict(2).
  • +
  • Finally, we add syntax and corresponding strictness
    +for the KOOL object-oriented constructs.
  • +
+
  syntax Id ::= "Object" [token] | "Main" [token]
+
+  syntax Stmt ::= "var" Exps ";"
+                | "method" Id "(" Ids ")" Block  // called "function" in SIMPLE
+                | "class" Id Block               // KOOL
+                | "class" Id "extends" Id Block  // KOOL
+
+  syntax Exp ::= Int | Bool | String | Id
+               | "this"                                 // KOOL
+               | "super"                                // KOOL
+               | "(" Exp ")"             [bracket]
+               | "++" Exp
+               | Exp "instanceOf" Id     [strict(1)]    // KOOL
+               | "(" Id ")" Exp          [strict(2)]    // KOOL  cast
+               | "new" Id "(" Exps ")"   [strict(2)]    // KOOL
+               | Exp "." Id                             // KOOL
+               > Exp "[" Exps "]"        [strict]
+               > Exp "(" Exps ")"        [strict(2)]    // was strict in SIMPLE
+               | "-" Exp                 [strict]
+               | "sizeOf" "(" Exp ")"    [strict]
+               | "read" "(" ")"
+               > left:
+                 Exp "*" Exp             [strict, left]
+               | Exp "/" Exp             [strict, left]
+               | Exp "%" Exp             [strict, left]
+               > left:
+                 Exp "+" Exp             [strict, left]
+               | Exp "-" Exp             [strict, left]
+               > non-assoc:
+                 Exp "<" Exp             [strict, non-assoc]
+               | Exp "<=" Exp            [strict, non-assoc]
+               | Exp ">" Exp             [strict, non-assoc]
+               | Exp ">=" Exp            [strict, non-assoc]
+               | Exp "==" Exp            [strict, non-assoc]
+               | Exp "!=" Exp            [strict, non-assoc]
+               > "!" Exp                 [strict]
+               > left:
+                 Exp "&&" Exp            [strict(1), left]
+               | Exp "||" Exp            [strict(1), left]
+               > "spawn" Block
+               > Exp "=" Exp             [strict(2), right]
+
+  syntax Ids  ::= List{Id,","}
+
+  syntax Exps ::= List{Exp,","}          [strict, overload(exps)]
+  syntax Val
+  syntax Vals ::= List{Val,","}          [overload(exps)]
+
+  syntax Block ::= "{" "}"
+                | "{" Stmt "}"
+
+  syntax Stmt ::= Block
+                | Exp ";"                               [strict]
+                | "if" "(" Exp ")" Block "else" Block   [avoid, strict(1)]
+                | "if" "(" Exp ")" Block                [macro]
+                | "while" "(" Exp ")" Block
+                | "for" "(" Stmt Exp ";" Exp ")" Block  [macro]
+                | "return" Exp ";"                      [strict]
+                | "return" ";"                          [macro]
+                | "print" "(" Exps ")" ";"              [strict]
+                | "try" Block "catch" "(" Id ")" Block
+                | "throw" Exp ";"                       [strict]
+                | "join" Exp ";"                        [strict]
+                | "acquire" Exp ";"                     [strict]
+                | "release" Exp ";"                     [strict]
+                | "rendezvous" Exp ";"                  [strict]
+
+  syntax Stmt ::= Stmt Stmt                          [right]
+

Old desugaring rules, from SIMPLE

+
  rule if (E) S => if (E) S else {}
+  rule for(Start Cond; Step) {S} => {Start while (Cond) {S Step;}}
+  rule var E1::Exp, E2::Exp, Es::Exps; => var E1; var E2, Es;       [anywhere]
+  rule var X::Id = E; => var X; X = E;                              [anywhere]
+

New desugaring rule

+
  rule class C:Id S => class C extends Object S                     // KOOL
+
+endmodule
+

Semantics

+ +

We first discuss the new configuration of KOOL, which extends that of
+SIMPLE. Then we include the semantics of the constructs borrowed from
+SIMPLE unchanged; we refrain from discussing those, because they were
+already discussed in the K definition of SIMPLE. Then we discuss
+changes to SIMPLE's semantics needed for the more general meaning of
+the previous SIMPLE constructs (for example for thread spawning,
+assignment, etc.). Finally, we discuss in detail the
+semantics of the additional KOOL constructs.

+
module KOOL-UNTYPED
+  imports KOOL-UNTYPED-SYNTAX
+  imports DOMAINS
+

Configuration

+ +

KOOL removes one cell and adds two nested cells to the configuration
+of SIMPLE. The cell which is removed is the one holding the global
+environment, because a KOOL program consists of a set of classes only,
+with no global declarations. In fact, since informally speaking each
+KOOL class now includes a SIMPLE program, it is safe to say that the
+global variables in SIMPLE became class fields in KOOL. Let us now
+discuss the new cells that are added to the configuration of SIMPLE.

+
    +
  • +

    The cell crntObj holds data pertaining to the current
    +object, that is, the object environment in which the code in cell
    +k executes: crntClass holds the current class (which
    +can change as methods of the current object are invoked);
    +envStack holds the stack of environments as a list,
    +each layer corresponding to one class in the objects' instance class
    +hierarchy; location, which is optional, holds the location in
    +the store where the current object is or has to be located (this is
    +useful both for method closures and for the semantics of object
    +creation).

    +
  • +
  • +

    The cell classes holds all the declared classes, each
    +class being held in its own class cell which contains a name
    +(className), a parent (extends), and the actual
    +member declarations (declarations).

    +
  • +
+
  // the syntax declarations below are required because the sorts are
+  // referenced directly by a production and, because of the way KIL to KORE
+  // is implemented, the configuration syntax is not available yet
+  // should simply work once KIL is removed completely
+  // check other definitions for this hack as well
+  syntax EnvCell
+  syntax ControlCell
+  syntax EnvStackCell
+  syntax CrntObjCellFragment
+
+  configuration <T color="red">
+                  <threads color="orange">
+                    <thread multiplicity="*" type="Set" color="yellow">
+                      <k color="green"> $PGM:Stmt ~> execute </k>
+                    //<br/> // TODO(KORE): support latex annotations #1799
+                      <control color="cyan">
+                        <fstack color="blue"> .List </fstack>
+                        <xstack color="purple"> .List </xstack>
+                      //<br/> // TODO(KORE): support latex annotations #1799
+                        <crntObj color="Fuchsia">  // KOOL
+                           <crntClass> Object </crntClass>
+                           <envStack> .List </envStack>
+                           <location multiplicity="?"> .K </location>
+                        </crntObj>
+                      </control>
+                    //<br/> // TODO(KORE): support latex annotations #1799
+                      <env color="violet"> .Map </env>
+                      <holds color="black"> .Map </holds>
+                      <id color="pink"> 0 </id>
+                    </thread>
+                  </threads>
+                //<br/> // TODO(KORE): support latex annotations #1799
+                  <store color="white"> .Map </store>
+                  <busy color="cyan">.Set </busy>
+                  <terminated color="red"> .Set </terminated>
+                  <input color="magenta" stream="stdin"> .List </input>
+                  <output color="brown" stream="stdout"> .List </output>
+                  <nextLoc color="gray"> 0 </nextLoc>
+                //<br/> // TODO(KORE): support latex annotations #1799
+                  <classes color="Fuchsia">        // KOOL
+                     <classData multiplicity="*" type="Map" color="Fuchsia">
+                        // the Map has as its key the first child of the cell,
+                        // in this case the className cell.
+                        <className color="Fuchsia"> Main </className>
+                        <baseClass color="Fuchsia"> Object </baseClass>
+                        <declarations color="Fuchsia"> .K </declarations>
+                     </classData>
+                  </classes>
+                </T>
+

Unchanged Semantics from untyped SIMPLE

+ +

The semantics below is taken over from SIMPLE unchanged.
+The semantics of function declaration and invocation, including the
+use of the special lambda abstraction value, needs to change
+in order to account for the fact that methods are now invoked into
+their object's environment. The semantics of function return actually
+stays unchanged. Also, the semantics of program initialization is
+different: now we have to create an instance of the Main
+class which also calls the constructor Main(), while in
+SIMPLE we only had to invoke the function Main().
+Finally, the semantics of thread spawning needs to change, too: the
+parent thread needs to also share its object environment with the
+spawned thread (in addition to its local environment, like in SIMPLE).
+This is needed in order to be able to spawn method invokations under
+dynamic method dispatch; for example, spawn { run(); }
+will need to look up the method run() in the newly created
+thread, operation which will most likely fail unless the child thread
+sees the object environment of the parent thread. Note that the
+spawn statement of KOOL is more permissive than the threads
+of Java. In fact, the latter can be implemented in terms of our
+spawn—see the program threads.kool for a sketch.

+

Below is a subset of the values of SIMPLE, which are also values
+of KOOL. We will add other values later in the semantics, such as
+object and method closures.

+
  syntax Val ::= Int | Bool | String
+               | array(Int,Int)
+  syntax Exp ::= Val
+  syntax Exps ::= Vals
+  syntax KResult ::= Val
+  syntax KResult ::= Vals
+

The semantics below are taken verbatim from the untyped SIMPLE
+definition.

+
  syntax KItem ::= "undefined"
+
+  rule <k> var X:Id; => .K ...</k>
+       <env> Env => Env[X <- L] </env>
+       <store>... .Map => L |-> undefined ...</store>
+       <nextLoc> L:Int => L +Int 1 </nextLoc>
+
+
+  context var _:Id[HOLE];
+
+  rule <k> var X:Id[N:Int]; => .K ...</k>
+       <env> Env => Env[X <- L] </env>
+       <store>... .Map => L |-> array(L +Int 1, N)
+                          (L +Int 1) ... (L +Int N) |-> undefined ...</store>
+       <nextLoc> L:Int => L +Int 1 +Int N </nextLoc>
+    requires N >=Int 0
+
+
+  syntax Id ::= "$1" [token] | "$2" [token]
+  rule var X:Id[N1:Int, N2:Int, Vs:Vals];
+    => var X[N1];
+       {
+         var $1=X;
+         for(var $2=0; $2 <= N1 - 1; ++$2) {
+           var X[N2,Vs];
+           $1[$2] = X;
+         }
+       }
+
+
+  rule <k> X:Id => V ...</k>
+       <env>... X |-> L ...</env>
+       <store>... L |-> V:Val ...</store>
+
+
+  context ++(HOLE => lvalue(HOLE))
+  rule <k> ++loc(L) => I +Int 1 ...</k>
+       <store>... L |-> (I:Int => I +Int 1) ...</store>
+
+
+  rule I1 + I2 => I1 +Int I2
+  rule Str1 + Str2 => Str1 +String Str2
+  rule I1 - I2 => I1 -Int I2
+  rule I1 * I2 => I1 *Int I2
+  rule I1 / I2 => I1 /Int I2 requires I2 =/=K 0
+  rule I1 % I2 => I1 %Int I2 requires I2 =/=K 0
+  rule - I => 0 -Int I
+  rule I1 < I2 => I1 <Int I2
+  rule I1 <= I2 => I1 <=Int I2
+  rule I1 > I2 => I1 >Int I2
+  rule I1 >= I2 => I1 >=Int I2
+
+  rule V1:Val == V2:Val => V1 ==K V2
+  rule V1:Val != V2:Val => V1 =/=K V2
+  rule ! T => notBool(T)
+  rule true  && E => E
+  rule false && _ => false
+  rule true  || _ => true
+  rule false || E => E
+
+
+  rule V:Val[N1:Int, N2:Int, Vs:Vals] => V[N1][N2, Vs]
+    [anywhere]
+
+  rule array(L,_)[N:Int] => lookup(L +Int N)
+    [anywhere]
+
+
+  rule sizeOf(array(_,N)) => N
+

The semantics of function application needs to change into dynamic
+method dispatch invocation, which is defined shortly. However,
+interestingly, the semantics of return stays unchanged.

+
  rule <k> return(V:Val); ~> _ => V ~> K </k>
+       <control>
+         <fstack> ListItem(fstackFrame(Env,K,XS,<crntObj> CO </crntObj>)) => .List ...</fstack>
+         <xstack> _ => XS </xstack>
+         <crntObj> _ => CO </crntObj>
+       </control>
+       <env> _ => Env </env>
+
+  syntax Val ::= "nothing"
+  rule return; => return nothing;
+
+
+  rule <k> read() => I ...</k> <input> ListItem(I:Int) => .List ...</input>
+
+
+  context (HOLE => lvalue(HOLE)) = _
+
+  rule <k> loc(L) = V:Val => V ...</k> <store>... L |-> (_ => V) ...</store>
+
+  rule {} => .K
+  rule <k> { S } => S ~> setEnv(Env) ...</k>  <env> Env </env>
+
+
+  rule S1::Stmt S2::Stmt => S1 ~> S2
+
+  rule _:Val; => .K
+
+  rule if ( true) S else _ => S
+  rule if (false) _ else S => S
+
+  rule while (E) S => if (E) {S while(E)S}
+
+  rule <k> print(V:Val, Es => Es); ...</k> <output>... .List => ListItem(V) </output>
+  rule print(.Vals); => .K
+
+
+  syntax KItem ::= xstackFrame(Id,Stmt,K,Map,K)
+  // TODO(KORE): drop the additional production once parsing issue #1842 is fixed
+                 | (Id,Stmt,K,Map,K)
+
+  syntax KItem ::= "popx"
+
+  rule <k> (try S1 catch(X) {S2} => S1 ~> popx) ~> K </k>
+       <control>
+         <xstack> .List => ListItem(xstackFrame(X, S2, K, Env, C)) ...</xstack>
+         C
+       </control>
+       <env> Env </env>
+
+  rule <k> popx => .K ...</k>
+       <xstack> ListItem(_) => .List ...</xstack>
+
+  rule <k> throw V:Val; ~> _ => { var X = V; S2 } ~> K </k>
+       <control>
+         <xstack> ListItem(xstackFrame(X, S2, K, Env, C)) => .List ...</xstack>
+         (_ => C)
+       </control>
+       <env> _ => Env </env>
+

Thread spawning needs a new semantics, because we want the child
+thread to also share the object environment with its parent. The new
+semantics of thread spawning will be defined shortly. However,
+interestingly, the other concurrency constructs keep their semantics
+from SIMPLE unchanged.

+
  // TODO(KORE): ..Bag should be . throughout this definition #1772
+  rule (<thread>... <k>.K</k> <holds>H</holds> <id>T</id> ...</thread> => .Bag)
+  /*
+  rule (<thread>... <k>.</k> <holds>H</holds> <id>T</id> ...</thread> => .)
+  */
+       <busy> Busy => Busy -Set keys(H) </busy>
+       <terminated>... .Set => SetItem(T) ...</terminated>
+
+  rule <k> join T:Int; => .K ...</k>
+       <terminated>... SetItem(T) ...</terminated>
+
+  rule <k> acquire V:Val; => .K ...</k>
+       <holds>... .Map => V |-> 0 ...</holds>
+       <busy> Busy (.Set => SetItem(V)) </busy>
+    requires (notBool(V in Busy:Set))
+
+  rule <k> acquire V; => .K ...</k>
+       <holds>... V:Val |-> (N:Int => N +Int 1) ...</holds>
+
+  rule <k> release V:Val; => .K ...</k>
+       <holds>... V |-> (N => N:Int -Int 1) ...</holds>
+    requires N >Int 0
+
+  rule <k> release V; => .K ...</k> <holds>... V:Val |-> 0 => .Map ...</holds>
+       <busy>... SetItem(V) => .Set ...</busy>
+
+  rule <k> rendezvous V:Val; => .K ...</k>
+       <k> rendezvous V; => .K ...</k>
+

Unchanged auxiliary operations from untyped SIMPLE

+ +
  syntax Stmt ::= mkDecls(Ids,Vals)  [function]
+  rule mkDecls((X:Id, Xs:Ids), (V:Val, Vs:Vals)) => var X=V; mkDecls(Xs,Vs)
+  rule mkDecls(.Ids,.Vals) => {}
+
+  // TODO(KORE): clarify sort inferences #1803
+  syntax Exp ::= lookup(Int)
+  /*
+  syntax KItem ::= lookup(Int)
+  */
+  rule <k> lookup(L) => V ...</k> <store>... L |-> V:Val ...</store>
+
+  syntax KItem ::= setEnv(Map)
+  rule <k> setEnv(Env) => .K ...</k>  <env> _ => Env </env>
+  rule (setEnv(_) => .K) ~> setEnv(_)
+  // TODO: How can we make sure that the second rule above applies before the first one?
+  //       Probably we'll deal with this using strategies, eventually.
+
+  syntax Exp ::= lvalue(K)
+  syntax Val ::= loc(Int)
+
+  rule <k> lvalue(X:Id => loc(L)) ...</k> <env>... X |-> L:Int ...</env>
+
+  context lvalue(_::Exp[HOLE::Exps])
+  context lvalue(HOLE::Exp[_::Exps])
+
+  rule lvalue(lookup(L:Int) => loc(L))
+
+
+  syntax Map ::= Int "..." Int "|->" K
+    [function]
+  rule N...M |-> _ => .Map  requires N >Int M
+  rule N...M |-> K => N |-> K (N +Int 1)...M |-> K  requires N <=Int M
+

Changes to the existing untyped SIMPLE semantics

+ +

When we extend a language, sometimes we need to do more than just add
+new language constructs and semantics for them. Sometimes we want to
+also extend the semantics of existing language constructs, in order to
+get more from them.

+

Program initialization

+ +

In SIMPLE, once all the global declarations were processed, the
+function main() was invoked. In KOOL, the global
+declarations are classes, and their specific semantics is given
+shortly; essentially, they are pre-processed one by one and added
+into the class cell structure in the configuration.
+Once all the classes are processed, the computation item
+execute, which was placed right after the program in the
+initial configuration, is reached. In SIMPLE, the program was
+initialized by calling the method main(). In KOOL, the
+program is initialized by creating an object instance of class
+Main. This will also implicitly call the method
+Main() (the Main class constructor). The emptiness
+of the env cell below is just a sanity check, to make sure
+that the user has not declared anything but classes at the top level
+of the program.

+
  syntax KItem ::= "execute"
+  rule <k> execute => new Main(.Exps); </k> <env> .Map </env>
+

The semantics of new (defined below) requires the
+execution of all the class' declarations (and also of its
+superclasses').

+

Object and method closures

+ +

Before we can define the semantics of method application (previously
+called function application in SIMPLE), we need to add two more values
+to the language, namely object and method closures:

+
  syntax Val ::= objectClosure(Id, List)
+               | methodClosure(Id,Int,Ids,Stmt)
+

An object value consists of an objectClosure-wrapped bag
+containing the current class of the object and the environment stack
+of the object. The current class of an object will always be one of
+the classes mapped to an environment in the environment stack of the
+object. A method closure encapsulates the method's parameters and
+code (last two arguments), as well as the object context in which the
+method code should execute. This object context includes the current
+class of the object (the first argument of methodClosure) and
+the object environment stack (located in the object stored at the
+location specified as the second argument of methodClosure).

+

Method application

+ +

KOOL has a complex mechanism to invoke methods, because it allows both
+dynamic method dispatch and methods as first-class-citizen values (the
+latter making it a higher-order language). The invocation mechanism
+will be defined later. What is sufficient to know for now is that
+the two arguments of the application construct eventually reduce to
+values, the first being a method closure and the latter a list of
+values. The semantics of the method closure application is then as
+expected: the local environment and control are stacked, then we
+switch to method closure's class and object environment and execute
+the method body. The mkDecls construct is the one that came
+with the unchanged semantics of SIMPLE above.

+
  syntax KItem ::= fstackFrame(Map,K,List,K)
+  // TODO(KORE): drop the additional production once parsing issue #1842 is fixed
+                 | (Map,K,K)
+
+  rule <k> methodClosure(Class,OL,Xs,S)(Vs:Vals) ~> K
+           => mkDecls(Xs,Vs) S return; </k>
+       <env> Env => .Map </env>
+       <store>... OL |-> objectClosure(_, EnvStack)...</store>
+     //<br/> // TODO(KORE): support latex annotations #1799
+       <control>
+          <xstack> XS </xstack>
+          <fstack> .List => ListItem(fstackFrame(Env, K, XS, <crntObj> Obj' </crntObj>))
+          ...</fstack>
+          <crntObj> Obj' => <crntClass> Class </crntClass> <envStack> EnvStack </envStack> </crntObj>
+       </control>
+

Spawn

+ +

We want to extend the semantics of spawn to also share the
+current object environment with the child thread, in addition to the
+current environment. This extension will allow us to also use method
+invocations in the spawned statements, which will be thus looked up as
+expected, using dynamic method dispatch. This lookup operation would
+fail if the child thread did not have access to its parent's object
+environment.

+
  rule <thread>...
+         <k> spawn S => !T:Int ...</k>
+         <env> Env </env>
+         <crntObj> Obj </crntObj>
+       ...</thread>
+       (.Bag => <thread>...
+               <k> S </k>
+               <env> Env </env>
+               <id> !T </id>
+               <crntObj> Obj </crntObj>
+             ...</thread>)
+

Semantics of the new KOOL constructs

+ +

Class declaration

+ +

Initially, the classes forming the program are moved into their
+corresponding cells:

+
  rule <k> class Class1 extends Class2 { S } => .K ...</k>
+       <classes>... (.Bag => <classData>
+                            <className> Class1 </className>
+                            <baseClass> Class2 </baseClass>
+                            <declarations> S </declarations>
+                        </classData>)
+       ...</classes>
+

Method declaration

+ +

Like in SIMPLE, method names are added to the environment and bound
+to their code. However, unlike in SIMPLE where each function was
+executed in the same environment, namely the program global
+environment, a method in KOOL needs to be executed into its object's
+environment. Thus, methods evaluate to closures, which encapsulate
+their object's context (i.e., the current class and environment stack
+of the object) in addition to method's parameters and body. This
+approach to bind method names to method closures in the environment
+will also allow objects to pass their methods to other objects, to
+dynamically change their methods by assigning them other method
+closures, and even to allow all these to be done from other objects.
+This gives the KOOL programmer a lot of power; one should use this
+power wisely, though, because programs can become easily hard to
+understand and reason about if one overuses these features.

+
  rule <k> method F:Id(Xs:Ids) S => .K ...</k>
+       <crntClass> Class:Id </crntClass>
+       <location> OL:Int </location>
+       <env> Env => Env[F <- L] </env>
+       <store>... .Map => L |-> methodClosure(Class,OL,Xs,S) ...</store>
+       <nextLoc> L => L +Int 1 </nextLoc>
+

New

+ +

The semantics of new consists of two actions: memory
+allocation for the new object and execution of the corresponding
+constructor. Then the created object is returned as the result of the
+new operation; the value returned by the constructor, if any,
+is discarded. The current environment and object are stored onto the
+stack and recovered after new (according to the semantics of
+return borrowed from SIMPLE, when the statement
+return this; in the rule below is reached and evaluated),
+because the object creation part of new will destroy them.
+The rule below also initializes the object creation process by
+emptying the local environment and the current object, and allocating
+a location in the store where the created object will be eventually
+stored (this is what the storeObj task after the object
+creation task in the rule below will do—its rule is defined
+shortly). The location where the object will be stored is also made
+available in the crntObj cell, so that method closures can
+refer to it (see rule above).

+
  syntax KItem ::= "envStackFrame" "(" Id "," Map ")"
+
+  rule <k> new Class:Id(Vs:Vals) ~> K
+           => create(Class) ~> storeObj ~> Class(Vs); return this; </k>
+       <env> Env => .Map </env>
+       <nextLoc> L:Int => L +Int 1 </nextLoc>
+     //<br/> // TODO(KORE): support latex annotations #1799
+       <control> <xstack> XS </xstack>
+         <crntObj> Obj
+                   => <crntClass> Object </crntClass>
+                      <envStack> ListItem(envStackFrame(Object, .Map)) </envStack>
+                      <location> L </location>
+         </crntObj>
+         <fstack> .List => ListItem(fstackFrame(Env, K, XS, <crntObj> Obj </crntObj>)) ...</fstack>
+       </control>
+

The creation of a new object (the memory allocation part only) is
+a recursive process, requiring to first create an object for the
+superclass. A memory object representation is a layered structure:
+for each class on the path from the instance class to the root of the
+hierarchy there is a layer including the memory allocated for the
+members (both fields and methods) of that class.

+
  syntax KItem ::= create(Id)
+
+  rule <k> create(Class:Id)
+           => create(Class1) ~> setCrntClass(Class) ~> S ~> addEnvLayer ...</k>
+       <className> Class </className>
+       <baseClass> Class1:Id </baseClass>
+       <declarations> S </declarations>
+
+  rule <k> create(Object) => .K ...</k>
+

The next operation sets the current class of the current object.
+This is necessary to be done at each layer, because the current class
+of the object is enclosed as part of the method closures (see the
+semantics of method declarations above).

+
  syntax KItem ::= setCrntClass(Id)
+
+  rule <k> setCrntClass(C) => .K ...</k>
+       <crntClass> _ => C </crntClass>
+

The next operation adds a new tagged environment layer to the
+current object and gets ready for the next layer by clearing the
+environment (note that create expects the environment to be
+empty).

+
  syntax KItem ::= "addEnvLayer"
+
+  rule <k> addEnvLayer => .K ...</k>
+       <env> Env => .Map </env>
+       <crntClass> Class:Id </crntClass>
+       <envStack> .List => ListItem(envStackFrame(Class, Env)) ...</envStack>
+

The following operation stores the created object at the location
+reserved by new. Note that the location reserved by
+new was temporarily stored in the crntObj cell
+precisely for this purpose. Now that the newly created object is
+stored at its location and that all method closures are aware of it,
+the location is unnecessary and thus we delete it from the
+crntObj cell.

+
  syntax KItem ::= "storeObj"
+
+  rule <k> storeObj => .K ...</k>
+       <crntObj> <crntClass> CC </crntClass> <envStack> ES </envStack> (<location> L:Int </location> => .Bag) </crntObj>
+       <store>... .Map => L |-> objectClosure(CC, ES) ...</store>
+

Self reference

+ +

The semantics of this is straightforward: evaluate to the
+current object.

+
  rule <k> this => objectClosure(CC, ES) ...</k>
+       <crntObj> <crntClass> CC </crntClass> <envStack> ES </envStack> </crntObj>
+

Object member access

+ +

We can access an object member (field or method) either explicitly,
+using the construct e.x, or implicitly, using only the member
+name x directly. The borrowed semantics of SIMPLE will
+already lookup a sole name in the local environment. The first rule
+below reduces implicit member access to explicit access when the name
+cannot be found in the local environment. There are two cases to
+analyze for explicit object member access, depending upon whether the
+object is a proper object or it is just a redirection to the parent
+class via the construct super. In the first case, we
+evaluate the object expression and lookup the member starting with the
+current class (static scoping). Note the use of the conditional
+evaluation context. In the second case, we just lookup the member
+starting with the superclass of the current class. In both cases,
+the lookupMember task eventually yields a lookup(L)
+task for some appropriate location L, which will be further
+solved with the corresponding rule borrowed from SIMPLE. Note that the
+current object is not altered by super, so future method
+invocations see the entire object, as needed for dynamic method dispatch.

+
  rule <k> X:Id => this . X ...</k> <env> Env:Map </env>
+    requires notBool(X in keys(Env))
+
+  context HOLE._::Id requires (HOLE =/=K super)
+
+// TODO: explain how Assoc matching has been replaced with two rules here.
+// Maybe also improve it a bit.
+
+/*  rule objectClosure(<crntClass> Class:Id </crntClass>
+                     <envStack>... envStackFrame(Class,EnvC) EStack </envStack>)
+       . X:Id
+    => lookupMember(envStackFrame(Class,EnvC) EStack, X) */
+
+  rule objectClosure(Class:Id, ListItem(envStackFrame(Class,Env)) EStack)
+       . X:Id
+    => lookupMember(ListItem(envStackFrame(Class,Env)) EStack, X)
+  rule objectClosure(Class:Id, (ListItem(envStackFrame(Class':Id,_)) => .List) _)
+       . _X:Id
+    requires Class =/=K Class'
+
+/*  rule <k> super . X => lookupMember(EStack, X) ...</k>
+       <crntClass> Class </crntClass>
+       <envStack>... envStackFrame(Class,EnvC) EStack </envStack> */
+  rule <k> super . X => lookupMember(EStack, X) ...</k>
+       <crntClass> Class:Id </crntClass>
+       <envStack> ListItem(envStackFrame(Class,_)) EStack </envStack>
+  rule <k> super . _X ...</k>
+       <crntClass> Class </crntClass>
+       <envStack> ListItem(envStackFrame(Class':Id,_)) => .List ...</envStack>
+    requires Class =/=K Class'
+

Method invocation

+ +

Unlike in SIMPLE, in KOOL application was declared strict only in its
+second argument. That is because we want to ensure dynamic method
+dispatch when the first argument is a method access. As a
+consequence, we need to consider all the cases of interest for the
+first argument and to explicitly say what to do in each case. In all
+cases except for method access in a proper object (i.e., not
+super), we want the same behavior for the first argument as
+if it was not in a method invocation position. When it is a member
+access (the third rule below), we look it up starting with the
+instance class of the corresponding object. This ensures dynamic
+dispatch for methods; it actually dynamically dispatches field
+accesses, too, which is correct in KOOL, because one can assign method
+closures to fields and the field appeared in a method invocation
+context. The last context declaration below says that method
+applications or array accesses are also allowed as first argument to
+applications; that is because methods are allowed to return methods
+and arrays are allowed to hold methods in KOOL, since it is
+higher-order. If that is the case, then we want to evaluate the
+method call or the array access.

+
  rule <k> (X:Id => V)(_:Exps) ...</k>
+       <env>... X |-> L ...</env>
+       <store>... L |-> V:Val ...</store>
+
+  rule <k> (X:Id => this . X)(_:Exps) ...</k>
+       <env> Env </env>
+    requires notBool(X in keys(Env))
+
+  context HOLE._::Id(_) requires HOLE =/=K super
+
+  rule (objectClosure(_, EStack) . X
+    => lookupMember(EStack, X:Id))(_:Exps)
+
+/*  rule <k> (super . X
+            => lookupMember(EStack,X))(_:Exps)...</k>
+       <crntClass> Class </crntClass>
+       <envStack>... envStackFrame(Class,_) EStack </envStack> */
+  rule <k> (super . X
+            => lookupMember(EStack,X))(_:Exps)...</k>
+       <crntClass> Class </crntClass>
+       <envStack> ListItem(envStackFrame(Class,_)) EStack </envStack>
+  rule <k> (super . _X)(_:Exps) ...</k>
+       <crntClass> Class </crntClass>
+       <envStack> ListItem(envStackFrame(Class':Id,_)) => .List ...</envStack>
+    requires Class =/=K Class'
+
+  // TODO(KORE): fix getKLabel #1801
+  rule (A:Exp(B:Exps))(C:Exps) => A(B) ~> #freezerFunCall(C)
+  rule (A:Exp[B:Exps])(C:Exps) => A[B] ~> #freezerFunCall(C)
+  rule V:Val ~> #freezerFunCall(C:Exps) => V(C)
+  syntax KItem ::= "#freezerFunCall" "(" K ")"
+  /*
+  context HOLE(_:Exps)
+    when getKLabel(HOLE) ==K #klabel(`_(_)`) orBool getKLabel(HOLE) ==K #klabel(`_[_]`)
+  */
+

Eventually, each of the rules above produces a lookup(L)
+task as a replacement for the method. When that happens, we just
+lookup the value at location L:

+
  rule <k> (lookup(L) => V)(_:Exps) ...</k>  <store>... L |-> V:Val ...</store>
+

The value V looked up above is expected to be a method closure,
+in which case the semantics of method application given above will
+apply. Otherwise, the execution will get stuck.

+

Instance Of

+ +

It searches the object environment for a layer corresponding to the
+desired class. It returns true iff it can find the class,
+otherwise it returns false; it only gets stuck when its first
+argument does not evaluate to an object.

+
  rule objectClosure(_, ListItem(envStackFrame(C,_)) _)
+       instanceOf C => true
+
+  rule objectClosure(_, (ListItem(envStackFrame(C,_)) => .List) _)
+       instanceOf C'  requires C =/=K C'
+//TODO: remove the sort cast ::Id of C above, when sort inference bug fixed
+
+  rule objectClosure(_, .List) instanceOf _ => false
+

Cast

+ +

In untyped KOOL, we prefer to not check the validity of casting. In
+other words, any cast is allowed on any object, simply changing the
+current class of the object to the desired class. The execution will
+get stuck later if one attempts to access a field which is not
+available. Moreover, the execution may complete successfully even
+in the presence of invalid casts, provided that each accessed member
+during the current execution is, or happens to be, available.

+
  rule (C) objectClosure(_ , EnvStack) => objectClosure(C ,EnvStack)
+

KOOL-specific auxiliary declarations and operations

+ +

Here we define all the auxiliary constructs used in the above
+KOOL-specific semantics (those used in the SIMPLE fragment
+have already been defined in a corresponding section above).

+

Objects as lvalues

+ +

The current machinery borrowed with the semantics of SIMPLE allows us
+to enrich the set of lvalues, this way allowing new means to assign
+values to locations. In KOOL, we want object member names to be
+lvalues, so that we can assign values to them using the already
+existing machinery. The first rule below ensures that the object is
+always explicit, the evaluation context enforces the object to be
+evaluated, and finally the second rule initiates the lookup for the
+member's location based on the current class of the object.

+
  rule <k> lvalue(X:Id => this . X) ...</k>  <env> Env </env>
+    requires notBool(X in keys(Env))
+
+  context lvalue((HOLE . _)::Exp)
+
+/*  rule lvalue(objectClosure(<crntClass> C </crntClass>
+                            <envStack>... envStackFrame(C,EnvC) EStack </envStack>)
+              . X
+              => lookupMember(<envStack> envStackFrame(C,EnvC) EStack </envStack>,
+                              X))  */
+  rule lvalue(objectClosure(Class, ListItem(envStackFrame(Class,Env)) EStack)
+              . X
+              => lookupMember(ListItem(envStackFrame(Class,Env)) EStack,
+                              X))
+  rule lvalue(objectClosure(Class, (ListItem(envStackFrame(Class':Id,_)) => .List) _)
+              . _X)
+    requires Class =/=K Class'
+

Lookup member

+ +

It searches for the given member in the given environment stack,
+starting with the most concrete class and going up in the hierarchy.

+
  // TODO(KORE): clarify sort inferences #1803
+  syntax Exp ::= lookupMember(List, Id)  [function]
+  /*
+  syntax KItem ::= lookupMember(EnvStackCell,Id)  [function]
+  */
+
+//  rule lookupMember(<envStack> envStackFrame(_, <env>... X|->L ...</env>) ...</envStack>, X)
+//    => lookup(L)
+  rule lookupMember(ListItem(envStackFrame(_, X|->L _)) _, X)
+    => lookup(L)
+
+//  rule lookupMember(<envStack> envStackFrame(_, <env> Env </env>) => .List ...</envStack>, X)
+//    when notBool(X in keys(Env))
+  rule lookupMember(ListItem(envStackFrame(_, Env)) Rest, X) =>
+       lookupMember(Rest, X)
+    requires notBool(X in keys(Env))
+//TODO: beautify the above
+
+endmodule
+

Go to Lesson 2, KOOL typed dynamic.

+

KOOL — Typed — Dynamic

+ +

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+ +

This is the K dynamic semantics of the typed KOOL language. It is
+very similar to the semantics of the untyped KOOL, the difference
+being that we now check the typing policy dynamically. Since we have
+to now declare the types of variables and methods, we adopt a syntax
+for those which is close to Java. Like in the semantics of
+untyped KOOL, where we borrowed almost all the semantics of untyped
+SIMPLE, we are going to also borrow much of the semantics of
+dynamically typed SIMPLE here. We will highlight the differences
+between the dynamically typed and the untyped KOOL as we proceed with
+the semantics. In general, the type policy of the typed KOOL language
+is similar to that of Java. You may find it useful to also read
+the discussion in the preamble of the static semantics of typed KOOL
+before proceeding.

+
module KOOL-TYPED-DYNAMIC-SYNTAX
+  imports DOMAINS-SYNTAX
+

Syntax

+ +

Like for the untyped KOOL language, the syntax of typed KOOL extends
+that of typed SIMPLE with object-oriented constructs.
+The syntax below was produced by copying and modifying/extending the
+syntax of dynamically typed SIMPLE. In fact, the only change we made
+to the existing syntax of dynamically typed SIMPLE was to change the
+strictness of the application construct like in untyped KOOL, from
+strict to strict(2) (because application is not
+strict in the first argument anymore due to dynamic method dispatch).
+The KOOL-specific syntactic extensions are identical to those in
+untyped KOOL.

+
  syntax Id ::= "Object" [token] | "Main" [token]
+

Types

+ +
  syntax Type ::= "void" | "int" | "bool" | "string"
+                | Id                              // KOOL class
+                | Type "[" "]"
+                | "(" Type ")"           [bracket]
+                > Types "->" Type
+  // TODO(KORE): drop klabel once issues #1913 are fixed
+  syntax Types ::= List{Type,","}   [symbol(_,_::Types)]
+  /*
+  syntax Types ::= List{Type,","}
+  */
+

Declarations

+ +
  syntax Param ::= Type Id
+  syntax Params ::= List{Param,","}
+
+  syntax Stmt ::= Type Exps ";" [avoid]
+                | Type Id "(" Params ")" Block    // stays like in typed SIMPLE
+                | "class" Id Block                // KOOL
+                | "class" Id "extends" Id Block   // KOOL
+

Expressions

+ +
  syntax Exp ::= Int | Bool | String | Id
+               | "this"                                 // KOOL
+               | "super"                                // KOOL
+               | "(" Exp ")"             [bracket]
+               | "++" Exp
+               | Exp "instanceOf" Id     [strict(1)]    // KOOL
+               | "(" Id ")" Exp          [strict(2)]    // KOOL  cast
+               | "new" Id "(" Exps ")"   [strict(2)]    // KOOL
+               | Exp "." Id                             // KOOL
+               > Exp "[" Exps "]"        [strict]
+               > Exp "(" Exps ")"        [strict(2)]    // was strict in SIMPLE
+               | "-" Exp                 [strict]
+               | "sizeOf" "(" Exp ")"    [strict]
+               | "read" "(" ")"
+               > left:
+                 Exp "*" Exp             [strict, left]
+               | Exp "/" Exp             [strict, left]
+               | Exp "%" Exp             [strict, left]
+               > left:
+                 Exp "+" Exp             [strict, left]
+               | Exp "-" Exp             [strict, left]
+               > non-assoc:
+                 Exp "<" Exp             [strict, non-assoc]
+               | Exp "<=" Exp            [strict, non-assoc]
+               | Exp ">" Exp             [strict, non-assoc]
+               | Exp ">=" Exp            [strict, non-assoc]
+               | Exp "==" Exp            [strict, non-assoc]
+               | Exp "!=" Exp            [strict, non-assoc]
+               > "!" Exp                 [strict]
+               > left:
+                 Exp "&&" Exp            [strict(1), left]
+               | Exp "||" Exp            [strict(1), left]
+               > "spawn" Block
+               > Exp "=" Exp             [strict(2), right]
+
+  syntax Exps ::= List{Exp,","}          [strict, overload(exps)]
+  syntax Val
+  syntax Vals ::= List{Val,","}          [overload(exps)]
+

Statements

+ +
  syntax Block ::= "{" "}"
+                | "{" Stmt "}"
+
+  syntax Stmt ::= Block
+                | Exp ";"                               [strict]
+                | "if" "(" Exp ")" Block "else" Block   [avoid, strict(1)]
+                | "if" "(" Exp ")" Block                [macro]
+                | "while" "(" Exp ")" Block
+                | "for" "(" Stmt Exp ";" Exp ")" Block  [macro]
+                | "print" "(" Exps ")" ";"              [strict]
+                | "return" Exp ";"                      [strict]
+                | "return" ";"
+                | "try" Block "catch" "(" Param ")" Block
+                | "throw" Exp ";"                       [strict]
+                | "join" Exp ";"                        [strict]
+                | "acquire" Exp ";"                     [strict]
+                | "release" Exp ";"                     [strict]
+                | "rendezvous" Exp ";"                  [strict]
+
+  syntax Stmt ::= Stmt Stmt                          [right]
+

Desugaring macros

+ +
  rule if (E) S => if (E) S else {}
+  rule for(Start Cond; Step) {S::Stmt} => {Start while(Cond){S Step;}}
+  rule T::Type E1::Exp, E2::Exp, Es::Exps; => T E1; T E2, Es;           [anywhere]
+  rule T::Type X::Id = E; => T X; X = E;                                [anywhere]
+
+  rule class C:Id S => class C extends Object S                     // KOOL
+
+endmodule
+

Semantics

+ +

We first discuss the new configuration, then we include the semantics of
+the constructs borrowed from SIMPLE which stay unchanged, then those
+whose semantics had to change, and finally the semantics of the
+KOOL-specific constructs.

+
module KOOL-TYPED-DYNAMIC
+  imports KOOL-TYPED-DYNAMIC-SYNTAX
+  imports DOMAINS
+

Configuration

+ +

The configuration of dynamically typed KOOL is almost identical to
+that of its untyped variant. The only difference is the cell
+return, inside the control cell, whose role is to
+hold the expected return type of the invoked method. That is because
+we want to dynamically check that the value that a method returns has
+the expected type.

+
  // the syntax declarations below are required because the sorts are
+  // referenced directly by a production and, because of the way KIL to KORE
+  // is implemented, the configuration syntax is not available yet
+  // should simply work once KIL is removed completely
+  // check other definitions for this hack as well
+  syntax EnvCell
+  syntax ControlCellFragment
+  syntax EnvStackCell
+  syntax CrntObjCellFragment
+
+  configuration <T color="red">
+                  <threads color="orange">
+                    <thread multiplicity="*" type="Set" color="yellow">
+                      <k color="green"> ($PGM:Stmt ~> execute) </k>
+                    //<br/> // TODO(KORE): support latex annotations #1799
+                      <control color="cyan">
+                        <fstack color="blue"> .List </fstack>
+                        <xstack color="purple"> .List </xstack>
+                        <returnType color="LimeGreen"> void </returnType>  // KOOL
+                      //<br/> // TODO(KORE): support latex annotations #1799
+                        <crntObj color="Fuchsia">  // KOOL
+                           <crntClass> Object </crntClass>
+                           <envStack> .List </envStack>
+                           <location multiplicity="?"> .K </location>
+                        </crntObj>
+                      </control>
+                    //<br/> // TODO(KORE): support latex annotations #1799
+                      <env color="violet"> .Map </env>
+                      <holds color="black"> .Map </holds>
+                      <id color="pink"> 0 </id>
+                    </thread>
+                  </threads>
+                //<br/> // TODO(KORE): support latex annotations #1799
+                  <store color="white"> .Map </store>
+                  <busy color="cyan">.Set </busy>
+                  <terminated color="red"> .Set </terminated>
+                  <input color="magenta" stream="stdin"> .List </input>
+                  <output color="brown" stream="stdout"> .List </output>
+                  <nextLoc color="gray"> 0 </nextLoc>
+                //<br/> // TODO(KORE): support latex annotations #1799
+                  <classes color="Fuchsia">        // KOOL
+                     <classData multiplicity="*" type="Map" color="Fuchsia">
+                        <className color="Fuchsia"> Main </className>
+                        <baseClass color="Fuchsia"> Object </baseClass>
+                        <declarations color="Fuchsia"> .K </declarations>
+                     </classData>
+                  </classes>
+                </T>
+

Unchanged semantics from dynamically typed SIMPLE

+ +

The semantics below is taken over from dynamically typed SIMPLE
+unchanged. Like for untyped KOOL, the semantics of function/method
+declaration and invocation, and of program initialization needs to
+change. Moreover, due to subtyping, the semantics of several imported
+SIMPLE constructs can be made more general, such as that of the
+return statement, that of the assignment, and that of the exceptions.
+We removed all these from the imported semantics of SIMPLE below and
+gave their modified semantics right after, together with the extended
+semantics of thread spawning (which is identical to that of untyped
+KOOL).

+
  syntax Val ::= Int | Bool | String
+               | array(Type,Int,Int)
+  syntax Exp ::= Val
+  syntax Exps ::= Vals
+  syntax KResult ::= Val
+  syntax KResult ::= Vals
+
+
+  syntax KItem ::= undefined(Type)
+
+  rule <k> T:Type X:Id; => .K ...</k>
+       <env> Env => Env[X <- L] </env>
+       <store>... .Map => L |-> undefined(T) ...</store>
+       <nextLoc> L:Int => L +Int 1 </nextLoc>
+
+
+  rule <k> T:Type X:Id[N:Int]; => .K ...</k>
+       <env> Env => Env[X <- L] </env>
+       <store>... .Map => L |-> array(T, L +Int 1, N)
+                          (L +Int 1)...(L +Int N) |-> undefined(T) ...</store>
+       <nextLoc> L:Int => L +Int 1 +Int N </nextLoc>
+    requires N >=Int 0
+
+  context _:Type _::Exp[HOLE::Exps];
+
+
+  syntax Id ::= "$1" [token] | "$2" [token]
+  rule T:Type X:Id[N1:Int, N2:Int, Vs:Vals];
+    => T[]<Vs> X[N1];
+       {
+         T[][]<Vs> $1=X;
+         for(int $2=0; $2 <= N1 - 1; ++$2) {
+           T X[N2,Vs];
+           $1[$2] = X;
+         }
+       }
+
+
+  rule <k> X:Id => V ...</k>
+       <env>... X |-> L ...</env>
+       <store>... L |-> V:Val ...</store>
+
+
+  context ++(HOLE => lvalue(HOLE))
+  rule <k> ++loc(L) => I +Int 1 ...</k>
+       <store>... L |-> (I:Int => I +Int 1) ...</store>
+
+
+  rule I1 + I2 => I1 +Int I2
+  rule Str1 + Str2 => Str1 +String Str2
+  rule I1 - I2 => I1 -Int I2
+  rule I1 * I2 => I1 *Int I2
+  rule I1 / I2 => I1 /Int I2 requires I2 =/=K 0
+  rule I1 % I2 => I1 %Int I2 requires I2 =/=K 0
+  rule - I => 0 -Int I
+  rule I1 < I2 => I1 <Int I2
+  rule I1 <= I2 => I1 <=Int I2
+  rule I1 > I2 => I1 >Int I2
+  rule I1 >= I2 => I1 >=Int I2
+  rule V1:Val == V2:Val => V1 ==K V2
+  rule V1:Val != V2:Val => V1 =/=K V2
+  rule ! T => notBool(T)
+  rule true  && E => E
+  rule false && _ => false
+  rule true  || _ => true
+  rule false || E => E
+
+
+  rule V:Val[N1:Int, N2:Int, Vs:Vals] => V[N1][N2, Vs]
+    [anywhere]
+
+  rule array(_:Type, L:Int, M:Int)[N:Int] => lookup(L +Int N)
+    requires N >=Int 0 andBool N <Int M  [anywhere]
+
+  rule sizeOf(array(_,_,N)) => N
+
+
+  syntax Val ::= nothing(Type)
+  rule <k> return; => return nothing(T); ...</k> <returnType> T </returnType>
+
+
+  rule <k> read() => I ...</k> <input> ListItem(I:Int) => .List ...</input>
+
+
+  context (HOLE => lvalue(HOLE)) = _
+
+
+  rule {} => .K
+  rule <k> { S } => S ~> setEnv(Env) ...</k>  <env> Env </env>
+
+
+  rule S1:Stmt S2:Stmt => S1 ~> S2
+
+
+  rule _:Val; => .K
+
+
+  rule if ( true) S else _ => S
+  rule if (false) _ else S => S
+
+
+  rule while (E) S => if (E) {S while(E)S}
+
+
+  rule <k> print(V:Val, Es => Es); ...</k> <output>... .List => ListItem(V) </output>
+    requires typeOf(V) ==K int orBool typeOf(V) ==K string
+  rule print(.Vals); => .K
+
+
+  rule (<thread>... <k>.K</k> <holds>H</holds> <id>T</id> ...</thread> => .Bag)
+       <busy> Busy => Busy -Set keys(H) </busy>
+       <terminated>... .Set => SetItem(T) ...</terminated>
+
+  rule <k> join T:Int; => .K ...</k>
+       <terminated>... SetItem(T) ...</terminated>
+
+  rule <k> acquire V:Val; => .K ...</k>
+       <holds>... .Map => V |-> 0 ...</holds>
+       <busy> Busy (.Set => SetItem(V)) </busy>
+    requires (notBool(V in Busy:Set))
+
+  rule <k> acquire V; => .K ...</k>
+       <holds>... V:Val |-> (N:Int => N +Int 1) ...</holds>
+
+  rule <k> release V:Val; => .K ...</k>
+       <holds>... V |-> (N => N:Int -Int 1) ...</holds>
+    requires N >Int 0
+
+  rule <k> release V; => .K ...</k> <holds>... V:Val |-> 0 => .Map ...</holds>
+       <busy>... SetItem(V) => .Set ...</busy>
+
+  rule <k> rendezvous V:Val; => .K ...</k>
+       <k> rendezvous V; => .K ...</k>
+

Unchanged auxiliary operations from dynamically typed SIMPLE

+ +
  syntax Stmt ::= mkDecls(Params,Vals)  [function]
+  rule mkDecls((T:Type X:Id, Ps:Params), (V:Val, Vs:Vals))
+    => T X=V; mkDecls(Ps,Vs)
+  rule mkDecls(.Params,.Vals) => {}
+
+  syntax Exp ::= lookup(Int)
+  rule <k> lookup(L) => V ...</k> <store>... L |-> V:Val ...</store>
+
+  syntax KItem ::= setEnv(Map)
+  rule <k> setEnv(Env) => .K ...</k>  <env> _ => Env </env>
+  rule (setEnv(_) => .K) ~> setEnv(_)
+
+  syntax Exp ::= lvalue(K)
+  syntax Val ::= loc(Int)
+  rule <k> lvalue(X:Id => loc(L)) ...</k>  <env>... X |-> L:Int ...</env>
+
+  context lvalue(_::Exp[HOLE::Exps])
+  context lvalue(HOLE::Exp[_::Exps])
+
+  rule lvalue(lookup(L:Int) => loc(L))
+
+  syntax Type ::= Type "<" Vals ">"  [function]
+  rule T:Type<_,Vs:Vals> => T[]<Vs>
+  rule T:Type<.Vals> => T
+
+  syntax Map ::= Int "..." Int "|->" K [function]
+  rule N...M |-> _ => .Map  requires N >Int M
+  rule N...M |-> K => N |-> K (N +Int 1)...M |-> K  requires N <=Int M
+
+  syntax Type ::= typeOf(K)  [function]
+  rule typeOf(_:Int) => int
+  rule typeOf(_:Bool) => bool
+  rule typeOf(_:String) => string
+  rule typeOf(array(T,_,_)) => (T[])
+  rule typeOf(undefined(T)) => T
+  rule typeOf(nothing(T)) => T
+
+  syntax Types ::= getTypes(Params)  [function]
+  rule getTypes(T:Type _:Id) => T, .Types
+  rule getTypes(T:Type _:Id, P, Ps) => T, getTypes(P,Ps)
+  rule getTypes(.Params) => void, .Types
+

Changes to the existing dynamically typed SIMPLE semantics

+ +

We extend/change the semantics of several SIMPLE constructs in order
+to take advantage of the richer KOOL semantic infrastructure and thus
+get more from the existing SIMPLE constructs.

+

Program initialization

+ +

Like in untyped KOOL.

+
  syntax KItem ::= "execute"
+  rule <k> execute => new Main(.Exps); </k> <env> .Map </env>
+

Method application

+ +

The only change to untyped KOOL's values is that method closures are
+now typed (their first argument holds their type):

+
 syntax Val ::= objectClosure(Id,List)
+              | methodClosure(Type,Id,Int,Params,Stmt)
+

The type held by a method clossure will be the entire type of the
+method, not only its result type like the lambda-closure of typed
+SIMPLE. The reason for this change comes from the the need to
+dynamically upcast values when passed to contexts where values of
+superclass types are expected; since we want method closures to be
+first-class-citizen values in our language, we have to be able to
+dynamically upcast them, and in order to do that elegantly it is
+convenient to store the entire ``current type'' of the method closure
+instead of just its result type. Note that this was unnecessary in
+the semantics of the dynamically typed SIMPLE language.

+

Method closure application needs to also set a new return type in
+the return cell, like in dynamically typed SIMPLE, in order
+for the values returned by its body to be checked against the return
+type of the method. To do this correctly, we also need to stack the
+current status of the return cell and then pop it when the
+method returns. We have to do the same with the current object
+environment, so we group them together in the stack frame.

+
  syntax KItem ::= fstackFrame(Map, K, List, Type, K)
+
+  rule <k> methodClosure(_->T,Class,OL,Ps,S)(Vs:Vals) ~> K
+           => mkDecls(Ps,Vs) S return; </k>
+       <env> Env => .Map </env>
+       <store>... OL |-> objectClosure(_, EStack)...</store>
+     //<br/> // TODO(KORE): support latex annotations #1799
+       <control>
+          <fstack> .List => ListItem(fstackFrame(Env, K, XS, T', <crntObj> Obj' </crntObj>)) ...</fstack>
+          <xstack> XS </xstack>
+          <returnType> T' => T </returnType>
+          <crntObj> Obj' => <crntClass> Class </crntClass> <envStack> EStack </envStack> </crntObj>
+       </control>
+

At method return, we have to check that the type of the returned
+value is a subtype of the expected return type. Moreover, if that is
+the case, then we also upcast the returned value to one of the
+expected type. The computation item unsafeCast(V,T) changes
+the typeof V to T without any additional checks; however, it only
+does it when V is an object or a method, otherwise it returns V
+unchanged.

+
  rule <k> return V:Val; ~> _
+           => subtype(typeOf(V), T) ~> true? ~> unsafeCast(V, T) ~> K
+       </k>
+       <control>
+         <fstack> ListItem(fstackFrame(Env, K, XS, RT, <crntObj> CO </crntObj>)) => .List ...</fstack>
+         <xstack> _ => XS </xstack>
+         <returnType> T:Type => RT </returnType>
+         <crntObj> _ => CO </crntObj>
+       </control>
+       <env> _ => Env </env>
+

Assignment

+ +

Typed KOOL allows to assign subtype instance values to supertype
+lvalues. The semantics of assignment below is similar in spirit to
+dynamically typed SIMPLE's, but a check is performed that the assigned
+value's type is a subtype of the location's type. If that is the
+case, then the assigned value is returned as a result and stored, but
+it is upcast appropriately first, so the context will continue to see
+a value of the expected type of the location. Note that the type of a
+location is implicit in the type of its contents and it never changes
+during the execution of a program; its type is assigned when the
+location is allocated and initialized, and then only type-preserving
+values are allowed to be stored in each location.

+
  rule <k> loc(L) = V:Val
+           => subtype(typeOf(V),typeOf(V')) ~> true?
+              ~> unsafeCast(V, typeOf(V')) ...</k>
+       <store>... L |-> (V' => unsafeCast(V, typeOf(V'))) ...</store>
+

Typed exceptions

+ +

Exceptions are propagated now until a catch that can handle them is
+encountered.

+
  syntax KItem ::= xstackFrame(Param, Stmt, K, Map, K)
+  syntax KItem ::= "popx"
+
+  rule <k> (try S1 catch(P) S2 => S1 ~> popx) ~> K </k>
+       <control>
+         <xstack> .List => ListItem(xstackFrame(P, S2, K, Env, C)) ...</xstack>
+         C
+       </control>
+       <env> Env </env>
+
+  rule <k> popx => .K ...</k>
+       <xstack> ListItem(_) => .List ...</xstack>
+
+  rule <k> throw V:Val; ~> _
+        => if (subtype(typeOf(V),T)) { T X = V; S2 } else { throw V; } ~> K
+       </k>
+       <control>
+         <xstack> ListItem(xstackFrame(T:Type X:Id, S2, K, Env, C)) => .List ...</xstack>
+         (_ => C)
+       </control>
+       <env> _ => Env </env>
+

Spawn

+ +

Like in untyped KOOL.

+
  rule <thread>...
+         <k> spawn S => !T:Int ...</k>
+         <env> Env </env>
+         <crntObj> Obj </crntObj>
+       ...</thread>
+       (.Bag => <thread>...
+               <k> S </k>
+               <env> Env </env>
+               <id> !T </id>
+               <crntObj> Obj </crntObj>
+             ...</thread>)
+

Semantics of the new KOOL constructs

+ +

Class declaration

+ +

Like in untyped KOOL.

+
  rule <k> class Class1 extends Class2 { S } => .K ...</k>
+       <classes>... (.Bag => <classData>
+                            <className> Class1 </className>
+                            <baseClass> Class2 </baseClass>
+                            <declarations> S </declarations>
+                        </classData>)
+       ...</classes>
+

Method declaration

+ +

Methods are now typed and we need to store their types in their
+closures, so that their type contract can be checked at invocation
+time. The rule below is conceptually similar to that of untyped KOOL;
+the only difference is the addition of the types.

+
  rule <k> T:Type F:Id(Ps:Params) S => .K ...</k>
+       <crntClass> C </crntClass>
+       <location> OL </location>
+       <env> Env => Env[F <- L] </env>
+       <store>... .Map => L|->methodClosure(getTypes(Ps)->T,C,OL,Ps,S) ...</store>
+       <nextLoc> L => L +Int 1 </nextLoc>
+

New

+ +

The semantics of new in dynamically typed KOOL is also
+similar to that in untyped KOOL, the main difference being the
+management of the return types. Indeed, when a new object is created
+we also have to stack the current type in the return cell in
+order to be recovered after the creation of the new object. Only the
+first rule below needs to be changed; the others are identical to
+those in untyped KOOL.

+
  syntax KItem ::= envStackFrame(Id, Map)
+
+  rule <k> new Class:Id(Vs:Vals) ~> K
+           => create(Class) ~> (storeObj ~> ((Class(Vs)); return this;)) </k>
+       <env> Env => .Map </env>
+       <nextLoc> L:Int => L +Int 1 </nextLoc>
+     //<br/> // TODO(KORE): support latex annotations #1799
+       <control>
+         <xstack> XS </xstack>
+         <crntObj> Obj
+                   => <crntClass> Object </crntClass>
+                      <envStack> ListItem(envStackFrame(Object, .Map)) </envStack>
+                      <location> L </location>
+         </crntObj>
+         <returnType> T => Class </returnType>
+         <fstack> .List => ListItem(fstackFrame(Env, K, XS, T, <crntObj>Obj</crntObj>)) ...</fstack>
+       </control>
+
+  syntax KItem ::= create(Id)
+
+  rule <k> create(Class:Id)
+           => create(Class1) ~> setCrntClass(Class) ~> S ~> addEnvLayer ...</k>
+       <className> Class </className>
+       <baseClass> Class1:Id </baseClass>
+       <declarations> S </declarations>
+
+  rule <k> create(Object) => .K ...</k>
+
+  syntax KItem ::= setCrntClass(Id)
+
+  rule <k> setCrntClass(C) => .K ...</k>
+       <crntClass> _ => C </crntClass>
+
+  syntax KItem ::= "addEnvLayer"
+
+  rule <k> addEnvLayer => .K ...</k>
+       <env> Env => .Map </env>
+       <crntClass> Class:Id </crntClass>
+       <envStack> .List => ListItem(envStackFrame(Class, Env)) ...</envStack>
+
+  syntax KItem ::= "storeObj"
+
+  rule <k> storeObj => .K ...</k>
+       <crntObj>
+         <crntClass> Class </crntClass>
+         <envStack> EStack </envStack>
+         (<location> L:Int </location> => .Bag)
+       </crntObj>
+       <store>... .Map => L |-> objectClosure(Class, EStack) ...</store>
+

Self reference

+ +

Like in untyped KOOL.

+
  rule <k> this => objectClosure(Class, EStack) ...</k>
+       <crntObj>
+         <crntClass> Class </crntClass>
+         <envStack> EStack </envStack>
+         ...
+       </crntObj>
+

Object member access

+ +

Like in untyped KOOL.

+
  rule <k> X:Id => this . X ...</k> <env> Env:Map </env>
+    requires notBool(X in keys(Env))
+
+  context HOLE . _::Id requires (HOLE =/=K super)
+
+/*  rule objectClosure(<crntObj> <crntClass> Class:Id </crntClass>
+                     <envStack>... ListItem((Class,EnvC:EnvCell)) EStack </envStack> </crntObj>)
+       . X:Id
+    => lookupMember(<envStack> ListItem((Class,EnvC)) EStack </envStack>, X) */
+  rule objectClosure(Class:Id,
+                     ListItem(envStackFrame(Class,Env)) EStack)
+       . X:Id
+    => lookupMember(ListItem(envStackFrame(Class,Env)) EStack, X)
+  rule objectClosure(Class:Id,
+                     (ListItem(envStackFrame(Class':Id,_)) => .List) _EStack)
+       . _X:Id
+    requires Class =/=K Class'
+
+/*  rule <k> super . X => lookupMember(<envStack>EStack</envStack>, X) ...</k>
+       <crntClass> Class </crntClass>
+       <envStack>... ListItem((Class,EnvC:EnvCell)) EStack </envStack> */
+  rule <k> super . X => lookupMember(EStack, X) ...</k>
+       <crntClass> Class:Id </crntClass>
+       <envStack> ListItem(envStackFrame(Class,_)) EStack </envStack>
+  rule <k> super . _X ...</k>
+       <crntClass> Class:Id </crntClass>
+       <envStack> (ListItem(envStackFrame(Class':Id,_)) => .List) _EStack </envStack>
+    requires Class =/=K Class'
+

Method invocation

+ +

The method lookup is the same as in untyped KOOL.

+
  rule <k> (X:Id => V)(_:Exps) ...</k>
+       <env>... X |-> L ...</env>
+       <store>... L |-> V:Val ...</store>
+
+  rule <k> (X:Id => this . X)(_:Exps) ...</k>
+       <env> Env </env>
+    requires notBool(X in keys(Env))
+
+  context HOLE._::Id(_) requires HOLE =/=K super
+
+  rule (objectClosure(_, EStack) . X
+    => lookupMember(EStack, X:Id))(_:Exps)
+
+/*  rule <k> (super . X
+            => lookupMember(<envStack>EStack</envStack>,X))(_:Exps)...</k>
+       <crntClass> Class </crntClass>
+       <envStack>... ListItem((Class,_)) EStack </envStack> */
+  rule <k> (super . X
+            => lookupMember(EStack,X))(_:Exps)...</k>
+       <crntClass> Class:Id </crntClass>
+       <envStack> ListItem(envStackFrame(Class,_)) EStack </envStack>
+  rule <k> (super . _X)(_:Exps)...</k>
+       <crntClass> Class:Id </crntClass>
+       <envStack> (ListItem(envStackFrame(Class':Id,_)) => .List) _EStack </envStack>
+    requires Class =/=K Class'
+
+  // TODO(KORE): fix getKLabel #1801
+  rule (A:Exp(B:Exps))(C:Exps) => A(B) ~> #freezerFunCall(C)
+  rule (A:Exp[B:Exps])(C:Exps) => A[B] ~> #freezerFunCall(C)
+  rule V:Val ~> #freezerFunCall(C:Exps) => V(C)
+  syntax KItem ::= "#freezerFunCall" "(" K ")"
+  /*
+  context HOLE(_:Exps)
+    requires getKLabel HOLE ==KLabel '_`(_`) orBool getKLabel HOLE ==KLabel '_`[_`]
+  */
+
+  rule <k> (lookup(L) => V)(_:Exps) ...</k>  <store>... L |-> V:Val ...</store>
+

Instance of

+ +

Like in untyped KOOL.

+
  rule objectClosure(_, ListItem(envStackFrame(C,_)) _)
+       instanceOf C => true
+
+  rule objectClosure(_, (ListItem(envStackFrame(C::Id,_)) => .List) _)
+       instanceOf C'  requires C =/=K C'
+
+  rule objectClosure(_, .List) instanceOf _ => false
+

Cast

+ +

Unlike in untyped KOOL, in typed KOOL we actually check that the object
+can indeed be cast to the claimed type.

+
  rule (C:Id) objectClosure(Irrelevant, EStack)
+    => objectClosure(Irrelevant, EStack) instanceOf C ~> true?
+       ~> objectClosure(C, EStack)
+

KOOL-specific auxiliary declarations and operations

+ +

Objects as lvalues

+ +

Like in untyped KOOL.

+
  rule <k> lvalue(X:Id => this . X) ...</k>  <env> Env </env>
+    requires notBool(X in keys(Env))
+
+  context lvalue((HOLE . _)::Exp)
+
+/*  rule lvalue(objectClosure(<crntObj> <crntClass> C </crntClass>
+                            <envStack>... ListItem((C,EnvC:EnvCell)) EStack </envStack> </crntObj>)
+              . X
+              => lookupMember(<envStack> ListItem((C,EnvC)) EStack </envStack>,
+                              X)) */
+  rule lvalue(objectClosure(C:Id,
+                            ListItem(envStackFrame(C,Env)) EStack)
+              . X
+              => lookupMember(ListItem(envStackFrame(C,Env)) EStack,
+                              X))
+  rule lvalue(objectClosure(C,
+                            (ListItem(envStackFrame(C',_)) => .List) _EStack)
+              . _X)
+    requires C =/=K C'
+

Lookup member

+ +

Like in untyped KOOL.

+
  syntax Exp ::= lookupMember(List,Id)  [function]
+
+  rule lookupMember(ListItem(envStackFrame(_, X |-> L _)) _, X) => lookup(L)
+
+  // TODO: fix rule below as shown once we support functions with deep rewrites
+  // rule lookupMember(<envStack> ListItem((_, <env> Env </env>)) => .List
+  //                     ...</envStack>, X)
+  //   requires notBool(X in keys(Env))
+  rule lookupMember(ListItem(envStackFrame(_, Env)) L, X)
+    => lookupMember(L, X)
+    requires notBool(X in keys(Env))
+

typeOf for the additional values}

+ +
  rule typeOf(objectClosure(C,_)) => C
+  rule typeOf(methodClosure(T:Type,_,_,_Ps:Params,_)) => T
+

Subtype checking

+ +

The subclass relation induces a subtyping relation.

+
  syntax Exp ::= subtype(Types,Types)
+
+  rule subtype(T:Type, T) => true
+
+  rule <k> subtype(C1:Id, C:Id) => subtype(C2, C) ...</k>
+       <className> C1 </className>
+       <baseClass> C2:Id </baseClass>
+    requires C1 =/=K C
+
+  rule subtype(Object,Class:Id) => false
+    requires Class =/=K Object
+
+  rule subtype(Ts1->T2,Ts1'->T2') => subtype(((T2)::Type,Ts1'),((T2')::Type,Ts1))
+
+// Note that the following rule would be wrong!
+//  rule subtype(T[],T'[]) => subtype(T,T')
+
+  rule subtype((T:Type,Ts),(T':Type,Ts')) => subtype(T,T') && subtype(Ts,Ts')
+    requires Ts =/=K .Types
+  rule subtype(.Types,.Types) => true
+

Unsafe Casting

+ +

Performs unsafe casting. One should only use it in combination with
+the subtype relation above.

+
  syntax Val ::= unsafeCast(Val,Type)  [function]
+
+  rule unsafeCast(objectClosure(_,EStack), C:Id)
+    => objectClosure(C,EStack)
+
+  rule unsafeCast(methodClosure(_T',C,OL,Ps,S), T) => methodClosure(T,C,OL,Ps,S)
+
+  rule unsafeCast(V:Val, T:Type) => V  requires typeOf(V) ==K T
+

Generic guard

+ +

A generic computational guard: it allows the computation to continue
+only if a prefix guard evaluates to true.

+
  syntax KItem ::= "true?"
+  rule true ~> true? => .K
+
+endmodule
+

Go to Lesson 3, KOOL typed static.

+

KOOL — Typed — Static

+ +

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+ +

This is the K static semantics of the typed KOOL language.
+It extends the static semantics of typed SIMPLE with static semantics
+for the object-oriented constructs. Also, the static semantics of
+some of the existing SIMPLE constructs need to change, in order to
+become more generous with regards to the set of accepted programs,
+mostly due to subtyping. For example, the assignment construct
+x = e required that both the variable x and the
+expression e had the same type in SIMPLE. In KOOL, the type
+of e can be a subtype of the type of x.
+Specifically, we define the following typing policy for KOOL,
+everything else not mentioned below borrowing its semantics from
+SIMPLE:

+
    +
  • +

    Each class C yields a homonymous type, which can be
    +explicitly used in programs to type variables and methods, possibly in
    +combination with other types.

    +
  • +
  • +

    Since now we have user-defined types, we check that each type
    +used in a KOOL program is well-formed, that is, it is constructed only
    +from primitive and class types corresponding to declared classes.

    +
  • +
  • +

    Class members and their types form a class type
    +environment
    . Each class will have such a type environment.
    +Each member in a class is allowed to be declared only once. Since in
    +KOOL we allow methods to be assigned to fields, we make no distinction
    +between field and method members; in other words, we reject programs
    +declaring both a field and a method with the same name.

    +
  • +
  • +

    If an identifier is not found in the local type environment, it
    +will be searched for in the current class type environment. If not
    +there, then it will be searched for in its superclass' type
    +environment. And so on and so forth. If not found until the
    +Object class is reached, a typing error is reported.

    +
  • +
  • +

    The assignment allows variables to be assigned values of
    +more concrete types. The result type of the assignment expression
    +construct will be the (more abstract) type of the assigned variable,
    +and not the (more concrete) type of the expression, like in Java.

    +
  • +
  • +

    Exceptions are changed (from SIMPLE) to allow throwing and
    +catching only objects, like in Java. Also, unlike in SIMPLE, we do
    +not check whether the type of the thrown exception matches the type of
    +the caught variable, because exceptions can be caught by other
    +try/catch blocks, even by ones in other methods. To avoid
    +having to annotate each method with what exceptions it can throw, we
    +prefer to not check the type safety of exceptions (although this is an
    +excellent homework!). We only check that the try block
    +type-checks and that the catch block type-checks after we bind
    +the caught variable to its claimed type.

    +
  • +
  • +

    Class declarations are not allowed to have any cycles in their
    +extends relation. Such cycles would lead to non-termination of
    +new, as it actually does in the dynamic semantics of KOOL
    +where no such circularity checks are performed.

    +
  • +
  • +

    Methods overriding other methods should be in the right subtyping
    +relationship with the overridden methods: co-variant in the codomain
    +and contra-variant in the domain.

    +
  • +
+
module KOOL-TYPED-STATIC-SYNTAX
+  imports DOMAINS-SYNTAX
+

Syntax

+ +

The syntax of statically typed KOOL is identical to that of
+dynamically typed KOOL, they both taking as input the same programs.
+What differs is the K strictness attributes. Like in statically
+typed SIMPLE, almost all language constructs are strict now, since we
+want each to type its arguments almost all the time. Like in the
+other two KOOL definitions, we prefer to copy and then modify/extend
+the syntax of statically typed SIMPLE.

+

Note: This paragraph is old, now we can do things better. We keep
+it here only for historical reasons, to see how much we used to suffer 😃

+

Annoying K-tool technical problem:
+Currently, the K tool treats the "non-terminal" productions (i.e.,
+productions consisting of just one non-terminal), also called
+"subsorting" production, differently from the other productions.
+Specifically, it does not insert a node in the AST for them. This may
+look desirable at first, but it has a big problem: it does not allow
+us to treat the subsort differently in different context. For
+example, since we want Id to be both a type (a class name) and a
+program variable, and since we want expressions to reduce to their
+types, we are in an impossible situations in which we do not know how
+to treat an identifier in the semantics: as a type, i.e., a result of
+computations, or as a program variable, i.e., a non-result. Ideally,
+we would like to tag the identifiers at parse-time with their local
+interpretation, but that, unfortunately, is not possible with the
+current parsing capabilities of the K tool, because it requires to
+insert additional information in the AST for the subsort productions.
+This will be fixed soon. Until then, unfortunately, we have to do the
+job of the parser manually. Instead of subsorting Id directly
+to Type, we "wrap" it first, say with a wrapper called
+class(...), exactly how the parser should have done.
+The major drawback of this is that all the typed KOOL programs
+in kool/typed/programs need to also be modified to always
+declare class types accordingly. The modified programs can be found
+in kool/typed/static/programs. So make sure you execute the
+static semantics of KOOL using the modified programs. To avoid seeing
+the wrapper in the generated documentation, we associate it an
+"invisibility" latex attribute below.

+
  syntax Id ::= "Object" [token] | "Main" [token]
+

Types

+ +
  syntax Type ::= "void" | "int" | "bool" | "string"
+                | Id                     [klabel("class"), symbol, avoid]  // see next
+                | Type "[" "]"
+                | "(" Type ")"           [bracket]
+                > Types "->" Type
+
+  syntax Types ::= List{Type,","}        [overload(exps)]
+

Declarations

+ +
  syntax Param ::= Type Id
+  syntax Params ::= List{Param,","}
+
+  syntax Stmt ::= Type Exps ";" [avoid]
+                | Type Id "(" Params ")" Block
+                | "class" Id Block
+                | "class" Id "extends" Id Block
+

Expressions

+ +
  syntax FieldReference ::= Exp "." Id          [strict(1)]
+  syntax ArrayReference ::= Exp "[" Exps "]"    [strict]
+
+  syntax Exp ::= Int | Bool | String | Id
+               | "this"
+               | "super"
+               | "(" Exp ")"             [bracket]
+               | "++" Exp
+               | Exp "instanceOf" Id     [strict(1)]
+               | "(" Id ")" Exp          [strict(2)]
+               | "new" Id "(" Exps ")"   [strict(2)]
+               > Exp "(" Exps ")"        [strict]
+               | "-" Exp                 [strict]
+               | "sizeOf" "(" Exp ")"    [strict]
+               | "read" "(" ")"
+               > left:
+                 Exp "*" Exp             [strict, left]
+               | Exp "/" Exp             [strict, left]
+               | Exp "%" Exp             [strict, left]
+               > left:
+                 Exp "+" Exp             [strict, left]
+               | Exp "-" Exp             [strict, left]
+               > non-assoc:
+                 Exp "<" Exp             [strict, non-assoc]
+               | Exp "<=" Exp            [strict, non-assoc]
+               | Exp ">" Exp             [strict, non-assoc]
+               | Exp ">=" Exp            [strict, non-assoc]
+               | Exp "==" Exp            [strict, non-assoc]
+               | Exp "!=" Exp            [strict, non-assoc]
+               > "!" Exp                 [strict]
+               > left:
+                 Exp "&&" Exp            [strict, left]
+               | Exp "||" Exp            [strict, left]
+               > "spawn" Block  // not strict: to check return and exceptions
+               > Exp "=" Exp             [strict(2), right]
+
+  syntax Exp ::= FieldReference | ArrayReference
+  syntax priority _.__KOOL-TYPED-STATIC-SYNTAX > _[_]_KOOL-TYPED-STATIC-SYNTAX > _(_)_KOOL-TYPED-STATIC-SYNTAX
+
+  syntax Exps ::= List{Exp,","}          [strict, overload(exps)]
+

Statements

+ +
  syntax Block ::= "{" "}"
+                | "{" Stmt "}"
+
+  syntax Stmt ::= Block
+                | Exp ";"                                 [strict]
+                | "if" "(" Exp ")" Block "else" Block     [avoid, strict]
+                | "if" "(" Exp ")" Block                  [macro]
+                | "while" "(" Exp ")" Block               [strict]
+                | "for" "(" Stmt Exp ";" Exp ")" Block    [macro]
+                | "return" Exp ";"                        [strict]
+                | "return" ";"
+                | "print" "(" Exps ")" ";"                [strict]
+                | "try" Block "catch" "(" Param ")" Block [strict(1)]
+                | "throw" Exp ";"                         [strict]
+                | "join" Exp ";"                          [strict]
+                | "acquire" Exp ";"                       [strict]
+                | "release" Exp ";"                       [strict]
+                | "rendezvous" Exp ";"                    [strict]
+
+  syntax Stmt ::= Stmt Stmt                            [seqstrict, right]
+

Desugaring macros

+ +
  rule if (E) S => if (E) S else {}
+  rule for(Start Cond; Step) {S:Stmt} => {Start while(Cond){S Step;}}
+  rule T:Type E1:Exp, E2:Exp, Es:Exps; => T E1; T E2, Es;               [anywhere]
+  rule T:Type X:Id = E; => T X; X = E;                                  [anywhere]
+
+  rule class C:Id S => class C extends Object S
+
+endmodule
+

Static semantics

+ +

We first discuss the configuration, then give the static semantics
+taken over unchanged from SIMPLE, then discuss the static semantics of
+SIMPLE syntactic constructs that needs to change, and in the end we
+discuss the static semantics and additional checks specifically
+related to the KOOL proper syntax.

+
module KOOL-TYPED-STATIC
+  imports KOOL-TYPED-STATIC-SYNTAX
+  imports DOMAINS
+

Configuration

+ +

The configuration of our type system consists of a tasks
+cell with the same meaning like in statically typed SIMPLE, of an
+out cell streamed to the standard output that will be used to
+display typing error messages, and of a cell classes holding
+data about each class in a separate class cell. The
+task cells now have two additional optional subcells, namely
+ctenvT and inClass. The former holds a temporary
+class type environment; its contents will be transferred into the
+ctenv cell of the corresponding class as soon as all the
+fields and methods in the task are processed. In fact, there will be
+three types of tasks in the subsequent semantics, each determined by
+the subset of cells that it holds:

+
    +
  1. +

    Main task, holding only a k cell holding the
    +original program as a set of classes. The role of this task is to
    +process each class, generating a class task (see next) for each.

    +
  2. +
  3. +

    Class task, holding k, ctenvT, and
    +inClass subcells. The role of this task type is to process
    +a class' contents, generating a class type environment in the
    +ctenvT cell and a method task (see next) for each method in
    +the class. To avoid interference with object member lookup rules
    +below, it is important to add the class type environment to a class
    +atomically; this is the reason for which we use ctenvT
    +temporary cells within class tasks (instead of adding each member
    +incrementally to the class' type environment).

    +
  4. +
  5. +

    Method task, holding k, tenv and
    +return cells. These tasks are similar to SIMPLE's function
    +tasks, so we do not discuss them here any further.

    +
  6. +
+

Each class cell hods its name (in the className
+cell) and the name of the class it extends (in the extends
+cell), as well as its type environment (in the ctenv cell)
+and the set of all its superclasses (in the extendsAll cell).
+The later is useful for example for checking whether there are cycles
+in the class extends relation.

+
  configuration <T multiplicity="?" color="yellow">
+                  <tasks color="orange" multiplicity="?">
+                    <task multiplicity="*" color="yellow" type="Set">
+                      <k color="green"> $PGM:Stmt </k>
+                      <tenv multiplicity="?" color="cyan"> .Map </tenv>
+                      <ctenvT multiplicity="?" color="blue"> .Map </ctenvT>
+                      <returnType multiplicity="?" color="black"> void </returnType>
+                      <inClass multiplicity="?" color="Fuchsia"> .K </inClass>
+                    </task>
+                  </tasks>
+//                  <br/>
+                  <classes color="Fuchsia">
+                    <classData multiplicity="*" type="Map">
+                      <className color="Fuchsia"> Object </className>
+                      <baseClass color="Fuchsia"> .K </baseClass>
+                      <baseClasses color="Fuchsia"> .Set </baseClasses>
+                      <ctenv multiplicity="?" color="blue"> .Map </ctenv>
+                    </classData>
+                  </classes>
+                </T>
+                <output color="brown" stream="stdout"> .List </output>
+

Unchanged semantics from statically typed SIMPLE

+ +

The syntax and rules below are borrowed unchanged from statically
+typed SIMPLE, so we do not discuss them much here.

+
  syntax Exp ::= Type
+  syntax Exps ::= Types
+  syntax BlockOrStmtType ::= "block" | "stmt"
+  syntax Type ::= BlockOrStmtType
+  syntax Block ::= BlockOrStmtType
+  syntax KResult ::= Type
+                   | Types  // TODO: should not be needed
+
+
+  context _:Type _::Exp[HOLE::Exps];
+
+  rule T:Type E:Exp[int,Ts:Types]; => T[] E[Ts];
+  rule T:Type E:Exp[.Types]; => T E;
+
+
+  rule <task>... <k> _:BlockOrStmtType </k> <tenv> _ </tenv> ...</task> => .Bag
+
+
+  rule _:Int => int
+  rule _:Bool => bool
+  rule _:String => string
+
+
+  rule <k> X:Id => T ...</k> <tenv>... X |-> T ...</tenv>
+
+
+  context ++(HOLE => ltype(HOLE))
+  rule ++ int => int
+  rule int + int => int
+  rule string + string => string
+  rule int - int => int
+  rule int * int => int
+  rule int / int => int
+  rule int % int => int
+  rule - int => int
+  rule int < int => bool
+  rule int <= int => bool
+  rule int > int => bool
+  rule int >= int => bool
+  rule T:Type == T => bool
+  rule T:Type != T => bool
+  rule bool && bool => bool
+  rule bool || bool => bool
+  rule ! bool => bool
+
+
+  rule (T[])[int, Ts:Types] => T[Ts]
+  rule T:Type[.Types] => T
+
+  rule sizeOf(_T[]) => int
+
+
+  rule read() => int
+
+  rule print(T:Type, Ts => Ts); requires T ==K int orBool T ==K string
+  rule print(.Types); => stmt
+
+
+  context (HOLE => ltype(HOLE)) = _
+
+
+  rule <k> return; => stmt ...</k> <returnType> _ </returnType>
+
+
+  rule {} => block
+
+  rule <task> <k> {S:Stmt} => block ...</k> <tenv> Rho </tenv> R </task>
+       (.Bag => <task> <k> S </k> <tenv> Rho </tenv> R </task>)
+
+  rule _:Type; => stmt
+  rule if (bool) block else block => stmt
+  rule while (bool) block => stmt
+
+  rule join int; => stmt
+  rule acquire _:Type; => stmt
+  rule release _:Type; => stmt
+  rule rendezvous _:Type; => stmt
+
+  syntax Stmt ::= BlockOrStmtType
+  rule _:BlockOrStmtType _:BlockOrStmtType => stmt
+

Unchanged auxiliary operations from dynamically typed SIMPLE

+ +
  syntax Stmt ::= mkDecls(Params)  [function]
+  rule mkDecls(T:Type X:Id, Ps:Params) => T X; mkDecls(Ps)
+  rule mkDecls(.Params) => {}
+
+  syntax LValue ::= Id
+                  | FieldReference
+                  | ArrayReference
+  syntax Exp ::= LValue
+
+  syntax Exp ::= ltype(Exp)
+// We would like to say:
+//  context ltype(HOLE:LValue)
+// but we currently cannot type the HOLE
+  context ltype(HOLE) requires isLValue(HOLE)
+
+// OLD approach:
+//  syntax Exp ::= ltype(Exp)  [function]
+//  rule ltype(X:Id) => X
+//  rule ltype(E:Exp [Es:Exps]) => E[Es]
+
+  syntax Types ::= getTypes(Params)  [function]
+  rule getTypes(T:Type _:Id) => T, .Types
+  rule getTypes(T:Type _:Id, P, Ps) => T, getTypes(P,Ps)
+  rule getTypes(.Params) => void, .Types
+

Changes to the existing statically typed SIMPLE semantics

+ +

Below we give the new static semantics for language constructs that
+come from SIMPLE, but whose SIMPLE static semantics was too
+restrictive or too permissive and thus had to change.

+

Local variable declaration

+ +

Since we can define new types in KOOL (corresponding to classes), the
+variable declaration needs to now check that the claimed types exist.
+The operation checkType, defined at the end of this module,
+checks whether the argument type is correct (it actually works with
+lists of types as well).

+
  rule <k> T:Type X:Id; => checkType(T) ~> stmt ...</k>
+       <tenv> Rho => Rho[X <- T] </tenv>
+

Class member declaration

+ +

In class tasks, variable declarations mean class member declarations.
+Since we reduce method declarations to variable declarations (see
+below), a variable declaration in a class task can mean either a field
+or a method declaration. Unlike local variable declarations, which
+can shadow previous homonymous local or member declarations, member
+declarations are regarded as a set, so we disallow multiple
+declarations for the same member (one could improve upon this, like in
+Java, by treating members with different types or number of arguments
+as different, etc., but we do not do it here). We also issue an error
+message if one attempts to redeclare the same class member. The
+framed variable declaration in the second rule below should be read
+"stuck". In fact, it is nothing but a unary operation called
+stuck, which takes a K-term as argument and does nothing
+with it; this stuck operation is displayed as a frame in this
+PDF document because of its latex attribute (see the ASCII .k file,
+at the end of this module).

+
  rule <k> T:Type X:Id; => checkType(T) ~> stmt ...</k>
+       <ctenvT> Rho (.Map => X |-> T) </ctenvT>
+    requires notBool(X in keys(Rho))
+
+  rule <k> T:Type X:Id; => stuck(T X;) ...</k>
+       <ctenvT>... X |-> _ ...</ctenvT>
+       <inClass> C:Id </inClass>
+//       <br/>
+       <output>... .List => ListItem("Member \"" +String Id2String(X)
+                              +String "\" declared twice in class \""
+                              +String Id2String(C) +String "\"!\n") </output>
+

Method declaration

+ +

A method declaration requires two conceptual checks to be performed:
+first, that the method's type is consistent with the type of the
+homonymous method that it overrides, if any; and second, that its body
+types correctly. At the same time, it should also be added to the
+type environment of its class. The first conceptual task is performed
+using the checkMethod operation defined below, and the second
+by generating a corresponding method task. To add it to the class
+type environment, we take advantage of the fact that KOOL is higher
+order and reduce the problem to a field declaration problem, which we
+have already defined. The role of the ctenvT cell in the
+rule below is to structurally ensure that the method declaration takes
+place in a class task (we do not want to allow methods to be declared,
+for example, inside other methods).

+
  rule <k> T:Type F:Id(Ps:Params) S
+        => checkMethod(F, getTypes(Ps)->T, C')
+           ~> getTypes(Ps)->T F; ...</k>
+//       <br/>
+       <inClass> C </inClass>
+       <ctenvT> _ </ctenvT> // to ensure we are in a class pass
+       <className> C </className>
+       <baseClass> C' </baseClass>
+//       <br/>
+       (.Bag => <task>
+               <k> mkDecls(Ps) S </k>
+               <inClass> C </inClass>
+               <tenv> .Map </tenv>
+               <returnType> T </returnType>
+             </task>)
+

Assignment

+ +

A more concrete value is allowed to be assigned to a more abstract
+variable. The operation checkSubtype is defined at the end
+of the module and it also works with pairs of lists of types.

+
  rule T:Type = T':Type => checkSubtype(T', T) ~> T
+

Method invocation and return

+ +

Methods can be applied on values of more concrete types than their
+arguments:

+
  rule (Ts:Types -> T:Type) (Ts':Types) => checkSubtype(Ts',Ts) ~> T
+

Similarly, we allow values of more concrete types to be returned by
+methods:

+
  rule <k> return T:Type; => checkSubtype(T,T') ~> stmt ...</k>
+       <returnType> T':Type </returnType>
+

Exceptions

+ +

Exceptions can throw and catch values of any types. Since unlike in Java
+KOOL's methods do not declare the exception types that they can throw,
+we cannot test the full type safety of exceptions. Instead, we
+only check that the try and the catch statements
+type correctly.

+
  rule try block catch(T:Type X:Id) S => {T X; S}
+  rule throw _T:Type ; => stmt
+

Spawn

+ +

The spawned cell needs to also be passed the parent's class.

+
// explain why
+
+  rule <k> spawn S:Block => int ...</k>
+       <tenv> Rho </tenv>
+       <inClass> C </inClass>
+       (.Bag => <task>
+               <k> S </k>
+               <tenv> Rho </tenv>
+               <inClass> C </inClass>
+             </task>)
+

Semantics of the new KOOL constructs

+ +

Class declaration

+ +

We process each class in the main task, adding the corresponding data
+into its class cell and also adding a class task for it. We
+also perform some well-formedness checks on the class hierarchy.

+

Initiate class processing
+We create a class cell and a class task for each task. Also, we start
+the class task with a check that the class it extends is declared
+(this delays the task until that class is processed using another
+instance of this rule).

+
// There seems to be some error with the configuration concretization,
+// as the rule below does not work when rewriting . to both the task
+// and the class cells; I had to include two separate . rewrites
+
+// TODO: the following fails krun; see #2117
+  rule <task> <k> class C:Id extends C':Id { S:Stmt } => stmt ...</k> </task>
+       (.Bag => <classData>...
+               <className> C </className>
+               <baseClass> C' </baseClass>
+             ...</classData>)
+//       <br/>
+       (.Bag => <task>
+                <k> checkType(`class`(C')) ~> S </k>
+                <inClass> C </inClass>
+                <ctenvT> .Map </ctenvT>
+             </task>)
+
+// You may want to try the thing below, but that failed, too
+/*
+syntax Type ::= "stmtStop"
+
+  rule <tasks>...
+       <task> <k> class C:Id extends C':Id { S:Stmt } => stmtStop ...</k> </task>
+       (.Bag => <task>
+                <k> checkType(`class`(C')) ~> S </k>
+                <inClass> C </inClass>
+                <ctenvT> .Map </ctenvT>
+             </task>)
+       ...</tasks>
+       <classes>...
+       .Bag => <classData>...
+               <className> C </className>
+               <baseClass> C' </baseClass>
+             ...</classData>
+       ...</classes>
+//       <br/>
+*/
+

Check for unique class names

+ +
  rule (<T>...
+          <className> C </className>
+          <className> C </className>
+        ...</T> => .Bag)
+       <output>... .List => ListItem("Class \"" +String Id2String(C)
+                                  +String "\" declared twice!\n") </output>
+

Check for cycles in class hierarchy
+We check for cycles in the class hierarchy by transitively closing the
+class extends relation using the extendsAll cells, and
+checking that a class will never appear in its own extendsAll
+cell. The first rule below initiates the transitive closure of the
+superclass relation, the second transitively closes it, and the third
+checks for cycles.

+
  rule <baseClass> C </baseClass>
+       <baseClasses> .Set => SetItem(C) </baseClasses>  [priority(25)]
+
+  rule <classData>...
+         <baseClasses> SetItem(C) Cs:Set (.Set => SetItem(C')) </baseClasses>
+       ...</classData>
+       <classData>... <className>C</className> <baseClass>C'</baseClass> ...</classData>
+    requires notBool(C' in (SetItem(C) Cs))  [priority(25)]
+
+  rule (<T>...
+          <className> C </className>
+          <baseClasses>... SetItem(C) ...</baseClasses>
+        ...</T> => .Bag)
+       <output>... .List => ListItem("Class \"" +String Id2String(C)
+                                  +String "\" is in a cycle!\n") </output>
+    [priority(25)]
+

New

+ +

To type new we only need to check that the class constructor
+can be called with arguments of the given types, so we initiate a call
+to the constructor method in the corresponding class. If that
+succeeds, meaning that it types to stmt, then we discard the
+stmt type and produce instead the corresponding class type of
+the new object. The auxiliary discard operation is defined
+also at the end of this module.

+
  rule new C:Id(Ts:Types) => `class`(C) . C (Ts) ~> discard ~> `class`(C)
+

Self reference

+ +

The typing rule for this is straightforward: reduce to the
+current class type.

+
  rule <k> this => `class`(C) ...</k>
+       <inClass> C:Id </inClass>
+

Super

+ +

Similarly, super types to the parent class type.
+Note that for typing concerns, super can be considered as an object
+(recall that this was not the case in the dynamic semantics).

+
   rule <k> super => `class`(C') ...</k>
+        <inClass> C:Id </inClass>
+        <className> C </className>
+        <baseClass> C':Id </baseClass>
+

Object member access

+ +

There are several cases to consider here. First, if we are in a class
+task, we should lookup the member into the temporary class type
+environemnt in cell ctenvT. That is because we want to allow
+initialized field declarations in classes, such as int x=10;.
+This is desugared to a declaration of x, which is added to
+ctenvT during the class task processing, followed by an
+assignment of x to 10. In order for the assignment to type
+check, we need to know that x has been declared with type
+int; this information can only be found in the
+ctenvT cell. Second, we should redirect non-local variable
+lookups in method tasks to corresponding member accesses (the
+local variables are handled by the rule borrowed from SIMPLE).
+This is what the second rule below does. Third, we should allow
+object member accesses as lvalues, which is done by the third rule
+below. These last two rules therefore ensure that each necessary
+object member access is explicitly allowed for evaluation. Recall
+from the annotated syntax module above that the member access
+operation is strict in the object. That means that the object is
+expected to evaluate to a class type. The next two rules below define
+the actual member lookup operation, moving the search to the
+superclass when the member is not found in the current class. Note
+that this works because we create the class type environments
+atomically; thus, a class either has its complete type environment
+available, in which case these rules can safely apply, or its cell
+ctenv is not yet available, in which case these rules have to
+wait. Finally, the sixth rule below reports an error when the
+Object class is reached.

+
  rule <k> X:Id => T ...</k>
+       <ctenvT>... X |-> T ...</ctenvT>
+
+  rule <k> X:Id => this . X ...</k>
+       <tenv> Rho </tenv>
+    requires notBool(X in keys(Rho))
+
+// OLD approach:
+//  rule ltype(E:Exp . X:Id) => E . X
+
+  rule <k> `class`(C:Id) . X:Id => T ...</k>
+       <className> C </className>
+       <ctenv>... X |-> T:Type ...</ctenv>
+
+  rule <k> `class`(C1:Id => C2) . X:Id ...</k>
+       <className> C1 </className>
+       <baseClass> C2:Id </baseClass>
+       <ctenv> Rho </ctenv>
+    requires notBool(X in keys(Rho))
+
+  rule <k> `class`(Object) . X:Id => stuck(`class`(Object) . X) ...</k>
+       <inClass> C:Id </inClass>
+//      <br/>
+       <output>... .List => ListItem("Member \"" +String Id2String(X)
+                              +String "\" not declared! (see class \""
+                              +String Id2String(C) +String "\")\n") </output>
+

Instance of and casting

+ +

As it is hard to check statically whether casting is always safe,
+the programmer is simply trusted from a typing perspective. We only
+do some basic upcasting and downcasting checks, to reject casts which
+will absolutely fail. However, dynamic semantics or implementations
+of the language need to insert runtime checks for downcasting to be safe.

+
  rule `class`(_C1:Id) instanceOf _C2:Id => bool
+  rule (C:Id) `class`(C) => `class`(C)
+  rule <k> (C2:Id) `class`(C1:Id) => `class`(C2) ...</k>
+       <className> C1 </className>
+       <baseClasses>...SetItem(C2)...</baseClasses>    // upcast
+  rule <k> (C2:Id) `class`(C1:Id) => `class`(C2) ...</k>
+       <className> C2 </className>
+       <baseClasses>...SetItem(C1)...</baseClasses>    // downcast
+  rule <k> (C2) `class`(C1:Id) => stuck((C2) `class`(C1)) ...</k>
+       <classData>...
+         <className> C1 </className>
+         <baseClasses> S1 </baseClasses>
+       ...</classData>
+       <classData>...
+         <className> C2 </className>
+         <baseClasses> S2 </baseClasses>
+       ...</classData>
+       <output>... .List => ListItem("Classes \"" +String Id2String(C1)
+                              +String "\" and \"" +String Id2String(C2)
+                              +String "\" are incompatible!\n") </output>
+    requires notBool(C1 in S2) andBool notBool(C2 in S1)
+

Cleanup tasks

+ +

Finally, we need to clean up the terminated tasks. Each of the three
+types of tasks is handled differently. The main task is replaced by a
+method task holding new main();, which will ensure that a
+main class with a main() method actually exists
+(first rule below). A class task moves its temporary class type
+environment into its class' cell, and then it dissolves itself (second
+rule). A method task simply dissolves when terminated (third rule);
+the presence of the tenv cell in that rule ensures that that
+task is a method task.
+Finally, when all the tasks are cleaned up, we can also remove the
+tasks cell, issuing a corresponding message. Note that
+checking for cycles or duplicate methods can still be performed after
+the tasks cell has been removed.

+
// discard main task when done, issuing a "new main();" command to
+// make sure that the class main and the method main() are declared.
+
+  rule <task> <k> stmt => new Main(.Exps); </k>
+              (.Bag => <tenv> .Map </tenv>
+                    <returnType> void </returnType>
+                    <inClass> Main </inClass>)
+       </task>
+
+// discard class task when done, adding a ctenv in class
+
+  rule (<task>
+          <k> stmt </k>
+          <ctenvT> Rho </ctenvT>
+          <inClass> C:Id </inClass>
+        </task> => .Bag)
+        <className> C </className>
+        (.Bag => <ctenv> Rho </ctenv>)
+
+// discard method task when done
+
+  rule <task>...
+         <k> stmt </k>
+         <tenv> _ </tenv>  // only to ensure that this is a method task
+       ...</task> => .Bag
+
+// cleanup tasks and output a success message when done
+
+  rule (<T>... <tasks> .Bag </tasks> ...</T> => .Bag)
+       <output>... .List => ListItem("Type checked!\n") </output>
+

KOOL-specific auxiliary declarations and operations

+ +

Subtype checking

+ +

The subclass relation introduces a subtyping relation.

+
  syntax KItem ::= checkSubtype(Types,Types)
+
+  rule checkSubtype(T:Type, T) => .K
+
+  rule <k> checkSubtype(`class`(C:Id), `class`(C':Id)) => .K ...</k>
+       <className> C </className>
+       <baseClasses>... SetItem(C') ...</baseClasses>
+
+  rule checkSubtype(Ts1->T2,Ts1'->T2')
+    => checkSubtype(((T2)::Type,Ts1'),((T2')::Type,Ts1))
+
+// note that the following rule would be wrong!
+//  rule checkSubtype(T[],T'[]) => checkSubtype(T,T')
+
+  rule checkSubtype((T:Type,Ts),(T':Type,Ts'))
+    => checkSubtype(T,T') ~> checkSubtype(Ts,Ts')
+    requires Ts =/=K .Types
+
+  rule checkSubtype(.Types,.Types) => .K
+  rule checkSubtype(.Types,void) => .K
+

Checking well-formedness of types

+ +

Since now any Id can be used as the type of a class, we need to
+check that the types used in the program actually exists

+
  syntax KItem ::= checkType(Types)
+
+  rule checkType(T:Type,Ts:Types) => checkType(T) ~> checkType(Ts)
+    requires Ts =/=K .Types
+  rule checkType(.Types) => .K
+  rule checkType(int) => .K
+  rule checkType(bool) => .K
+  rule checkType(string) => .K
+  rule checkType(void) => .K
+  rule <k> checkType(`class`(C:Id)) => .K ...</k> <className> C </className>
+  rule checkType(`class`(Object)) => .K
+  rule checkType(Ts:Types -> T:Type) => checkType(T,Ts)
+  rule checkType(T:Type[]) => checkType(T)
+

Checking correct overiding of methods

+ +

The checkMethod operation below searches to see whether
+the current method overrides some other method in some superclass.
+If yes, then it issues an additional check that the new method's type
+is more concrete than the overridden method's. The types T and T'
+below can only be function types. See the definition of
+checkSubtype on function types at the end of this module (it
+is co-variant in the codomain and contra-variant in the domain).

+
  syntax KItem ::= checkMethod(Id,Type,Id)
+
+  rule <k> checkMethod(F:Id, T:Type, C:Id) => checkSubtype(T, T') ...</k>
+       <className> C </className>
+       <ctenv>... F |-> T':Type ...</ctenv>
+
+  rule <k> checkMethod(F:Id, _T:Type, (C:Id => C')) ...</k>
+       <className> C </className>
+       <baseClass> C':Id </baseClass>
+       <ctenv> Rho </ctenv>
+    requires notBool(F in keys(Rho))
+
+  rule checkMethod(_:Id,_,Object) => .K
+

Generic operations which could be part of the K framework

+ +
  syntax KItem ::= stuck(K)
+
+  syntax KItem ::= "discard"
+  rule _:KResult ~> discard => .K
+
+endmodule
+

FUN — Untyped — Environment

+ +

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+ +

This is the K semantic definition of the untyped FUN language.
+FUN is a pedagogical and research language that captures the essence
+of the functional programming paradigm, extended with several features
+often encountered in functional programming languages.
+Like many functional languages, FUN is an expression language, that
+is, everything, including the main program, is an expression.
+Functions can be declared anywhere and are first class values in the
+language.
+FUN is call-by-value here, but it has been extended (as student
+homework assignments) with other parameter-passing styles.
+To make it more interesting and to highlight some of K's strengths,
+FUN includes the following features:

+
    +
  • +

    The basic builtin data-types of integers, booleans and strings.

    +
  • +
  • +

    Builtin lists, which can hold any elements, including other lists.
    +Lists are enclosed in square brackets and their elements are
    +comma-separated; e.g., [1,2,3].

    +
  • +
  • +

    User-defined data-types, by means of constructor terms.
    +Constructor names start with a capital letter (while any other
    +identifier in the language starts with a lowercase letter), and they
    +can be followed by an arbitrary number of comma-separated arguments
    +enclosed in parentheses; parentheses are not needed when the
    +constructor takes no arguments.
    +For example, Pair(5,7) is a constructor term holding two
    +numbers, Cons(1,Cons(2,Cons(3,Nil))) is a list-like
    +constructor term holding 3 elements, and
    +Tree(Tree(Leaf(1), Leaf(2)), Leaf(3)) is a tree-like
    +constructor term holding 3 elements.
    +In the untyped version of the FUN language, no type checking or
    +inference is performed to ensure that the data constructors are used
    +correctly.
    +The execution will simply get stuck when they are misused.
    +Moreover, since no type checking is performed, the data-types are not
    +even declared in the untyped version of FUN.

    +
  • +
  • +

    Functions and let/letrec binders can take
    +multiple space-separated arguments, but these are desugared to
    +ones that only take one argument, by currying. For example, the
    +expressions

    +
    fun x y -> x y
    +let x y = y in x
    +

    are desugared, respectively, into the following expressions:

    +
    fun x -> fun y -> x y
    +let x = fun y -> y in x
    +
  • +
  • +

    Functions can be defined using pattern matching over the
    +available data-types. For example, the program

    +
    letrec max = fun [h] -> h
    +             |   [h|t] -> let x = max t
    +                          in  if h > x then h else x
    +in max [1, 3, 5, 2, 4, 0, -1, -5]
    +

    defines a function max that calculates the maximum element of
    +a non-empty list, and the function

    +
    letrec ack = fun Pair(0,n) -> n + 1
    +             |   Pair(m,0) -> ack Pair(m - 1, 1)
    +             |   Pair(m,n) -> ack Pair(m - 1, ack Pair(m, n - 1))
    +in ack Pair(2,3)
    +

    calculates the Ackermann function applied to a particular pair of numbers.
    +Patterns can be nested. Patterns can currently only be used in function
    +definitions, and not directly in let/letrec binders.
    +For example, this is not allowed:

    +
    letrec Pai(x,y) = Pair(1,2) in x+y
    +

    But this is allowed:

    +
    let f Pair(x,y) = x+y in f Pair(1,2)
    +

    because it is first reduced to

    +
    let f = fun Pair(x,y) -> x+y in f Pair(1,2)
    +

    by uncurrying of the let binder, and pattern matching is
    +allowed in function arguments.

    +
  • +
  • +

    We include a callcc construct, for two reasons: first,
    +several functional languages support this construct; second, some
    +semantic frameworks have difficulties defining it. Not K.

    +
  • +
  • +

    Finally, we include mutables by means of referencing an
    +expression, getting the reference of a variable, dereferencing and
    +assignment. We include these for the same reasons as above: there are
    +languages which have them, and they are not easy to define in some
    +semantic frameworks.

    +
  • +
+

Like in many other languages, some of FUN's constructs can be
+desugared into a smaller set of basic constructs. We do that as usual,
+using macros, and then we only give semantics to the core constructs.

+

Note:
+We recommend the reader to first consult the dynamic semantics of the
+LAMBDA++ language in the first part of the K Tutorial.
+To keep the comments below small and focused, we will not re-explain
+functional or K features that have already been explained in there.

+

Syntax

+ +
//require "modules/pattern-matching.k"
+
+module FUN-UNTYPED-COMMON
+  imports DOMAINS-SYNTAX
+

FUN is an expression language. The constructs below fall into
+several categories: names, arithmetic constructs, conventional
+functional constructs, patterns and pattern matching, data constructs,
+lists, references, and call-with-current-continuation (callcc).
+The arithmetic constructs are standard; they are present in almost all
+our K language definitions. The meaning of FUN's constructs are
+discussed in more depth when we define their semantics in the next
+module.

+

The Syntactic Constructs

+ +

We start with the syntactic definition of FUN names.
+We have several categories of names: ones to be used for functions and
+variables, others to be used for data constructors, others for types and
+others for type variables. We will introduce them as needed, starting
+with the former category. We prefer the names of variables and functions
+to start with lower case letters. We take the freedom to tacitly introduce
+syntactic lists/sequences for each nonterminal for which we need them:

+
  syntax Name                                      [token]
+  syntax Names ::= List{Name,","}                  [overload(exps)]
+

Expression constructs will be defined throughtout the syntax module.
+Below are the very basic ones, namely the builtins, the names, and the
+parentheses used as brackets for grouping. Lists of expressions are
+declared strict, so all expressions in the list get evaluated whenever
+the list is on a position which can be evaluated:

+
  syntax Exp ::= Int | Bool | String | Name
+               | "(" Exp ")"                       [bracket]
+  syntax Exps  ::= List{Exp,","}                   [strict, overload(exps)]
+  syntax Val
+  syntax Exp ::= Val
+  syntax Exps ::= Vals
+  syntax Vals ::= List{Val,","}                    [overload(exps)]
+  syntax Bottom
+  syntax Bottoms ::= List{Bottom,","}              [overload(exps)]
+

We next define the syntax of arithmetic constructs, together with
+their relative priorities and left-/non-associativities. We also
+tag all these rules as members of a new group, "arith", so we can more easily
+define global syntax priorities later (at the end of the syntax module).

+
  syntax Exp ::= left:
+                 Exp "*" Exp                       [strict, group(arith)]
+               | Exp "/" Exp                       [strict, group(arith)]
+               | Exp "%" Exp                       [strict, group(arith)]
+               > left:
+                 Exp "+" Exp                       [strict, left, group(arith)]
+               | Exp "^" Exp                       [strict, left, group(arith)]
+// left attribute should not be necessary; currently a parsing bug
+               | Exp "-" Exp                       [strict, prefer, group(arith)]
+// the "prefer" attribute above is to not parse x-1 as x(-1)
+// Due to some parsing problems, we currently cannot add unary minus:
+               | "-" Exp                           [strict, group(arith)]
+               > non-assoc:
+                 Exp "<" Exp                       [strict, group(arith)]
+               | Exp "<=" Exp                      [strict, group(arith)]
+               | Exp ">" Exp                       [strict, group(arith)]
+               | Exp ">=" Exp                      [strict, group(arith)]
+               | Exp "==" Exp                      [strict, group(arith)]
+               | Exp "!=" Exp                      [strict, group(arith)]
+               > "!" Exp                           [strict, group(arith)]
+               > Exp "&&" Exp                      [strict(1), left, group(arith)]
+               > Exp "||" Exp                      [strict(1), left, group(arith)]
+

The conditional construct has the expected evaluation strategy,
+stating that only the first argument is evaluate:

+
  syntax Exp ::= "if" Exp "then" Exp "else" Exp    [strict(1)]
+

FUN's builtin lists are formed by enclosing comma-separated
+sequences of expressions (i.e., terms of sort Exps) in square
+brackets. The list constructor cons adds a new element to the
+top of the list, head and tail get the first element
+and the tail sublist of a list if they exist, respectively, and get
+stuck otherwise, and null?? tests whether a list is empty or
+not; syntactically, these are just expression constants.
+In function patterns, we are also going to allow patterns following the
+usual head/tail notation; for example, the pattern [x_1,...,x_n|t]
+binds x_1, ..., x_n to the first elements of the matched list,
+and t to the list formed with the remaining elements. We define list
+patterns as ordinary expression constructs, although we will make sure that
+we do not give them semantics if they appear in any other place then in a
+function case pattern.

+
  syntax Exp ::= "[" Exps "]"                             [strict, klabel(list)]
+               | "head" [macro] | "tail" [macro] | "null?" [macro]
+               | "[" Exps "|" Exp "]"
+  syntax Val ::= "[" Vals "]"                             [klabel(list)]
+  syntax Cons ::= "cons"
+  syntax Val ::= Cons
+  syntax Val ::= Cons Val                                 [klabel(apply)]
+

Data constructors start with capital letters and they may or may
+not have arguments. We need to use the attribute "prefer" to make
+sure that, e.g., Cons(a) parses as constructor Cons with
+argument a, and not as the expression Cons (because
+constructor names are also expressions) regarded as a function applied
+to the expression a. Also, note that the constructor is strict
+in its second argument, because we want to evaluate its arguments but
+not the constuctor name itsef.

+
  syntax ConstructorName                         [token]
+  syntax Exp ::= ConstructorName
+               | ConstructorName "(" Exps ")"    [prefer, strict(2), klabel(constructor)]
+  syntax Val ::= ConstructorName "(" Vals ")"    [klabel(constructor)]
+

A function is essentially a |-separated ordered
+sequence of cases, each case of the form pattern -> expression,
+preceded by the language construct fun. Patterns will be defined
+shortly, both for the builtin lists and for user-defined constructors.
+Recall that the syntax we define in K is not meant to serve as a
+ultimate parser for the defined language, but rather as a convenient
+notation for K abstract syntax trees, which we prefer when we write
+the semantic rules. It is therefore often the case that we define a
+more ``generous'' syntax than we want to allow programs to use.
+We do it here, too. Specifically, the syntax of Cases
+below allows any expressions to appear as pattern. This syntactic
+relaxation permits many wrong programs to be parsed, but that is not a
+problem because we are not going to give semantics to wrong combinations,
+so those programs will get stuck; moreover, our type inferencer will reject
+those programs anyway. Function application is just concatenation of
+expressions, without worrying about type correctness. Again, the type
+system will reject type-incorrect programs.

+
  syntax Exp ::= "fun" Cases
+               | Exp Exp                              [strict, left, klabel(apply)]
+// NOTE: We would like eventually to also have Exp "(" Exps ")
+  syntax Case  ::= Exp "->" Exp
+  syntax Cases ::= List{Case, "|"}
+

The let and letrec binders have the usual syntax
+and functional meaning. We allow multiple and-separated bindings.
+Like for the function cases above, we allow a more generous syntax for
+the left-hand sides of bindings, noting that the semantics will get stuck
+on incorrect bindings and that the type system will reject those programs.

+
  syntax Exp ::= "let" Bindings "in" Exp
+               | "letrec" Bindings "in" Exp                 [prefer]
+// The "prefer" attribute for letrec currently needed due to tool bug,
+// to make sure that "letrec" is not parsed as "let rec".
+  syntax Binding  ::= Exp "=" Exp
+  syntax Bindings ::= List{Binding,"and"}
+

References are first class values in FUN. The construct ref
+takes an expression, evaluates it, and then it stores the resulting value
+at a fresh location in the store and returns that reference. Syntactically,
+ref is just an expression constant. The construct &
+takes a name as argument and evaluates to a reference, namely the store
+reference where the variable passed as argument stores its value; this
+construct is a bit controversial and is further discussed in the
+environment-based semantics of the FUN language, where we desugar
+ref to it. The construct @ takes a reference
+and evaluates to the value stored there. The construct := takes
+two expressions, the first expected to evaluate to a reference; the value
+of its second argument will be stored at the location to which the first
+points (the old value is thus lost). Finally, since expression evaluation
+now has side effects, it makes sense to also add a sequential composition
+construct, which is sequentially strict. This evaluates to the value of
+its second argument; the value of the first argument is lost (which has
+therefore been evaluated only for its side effects.

+
  syntax Exp ::= "ref"                             [macro]
+               | "&" Name
+               | "@" Exp                                     [strict]
+               | Exp ":=" Exp                                [strict]
+               | Exp ";" Exp                       [strict(1), right]
+

Call-with-current-continuation, named callcc in FUN, is a
+powerful control operator that originated in the Scheme programming
+language, but it now exists in many other functional languages. It works
+by evaluating its argument, expected to evaluate to a function, and by
+passing the current continuation, or evaluation context (or computation,
+in K terminology), as a special value to it. When/If this special value
+is invoked, the current context is discarded and replaced with the one
+held by the special value and the computation continues from there.
+It is like taking a snapshot of the execution context at some moment
+in time and then, when desired, being able to get back in time to that
+point. If you like games, it is like saving the game now (so you can
+work on your homework!) and then continuing the game tomorrow or whenever
+you wish. To issustrate the strength of callcc, we also
+allow exceptions in FUN by means of a conventional try-catch
+construct, which will desugar to callcc. We also need to
+introduce the special expression contant throw, but we need to
+use it as a function argument name in the desugaring macro, so we define
+it as a name instead of as an expression constant:

+
  syntax Exp ::= "try" Exp "catch" "(" Name ")" Exp [macro]
+  syntax Val ::= "callcc"
+  syntax Name ::= "throw" [token]
+

Finally, FUN also allows polymorphic datatype declarations. These
+will be useful when we define the type system later on.

+
  syntax Exp ::= "datatype" Type "=" TypeCases Exp [macro]
+// NOTE: In a future version of K, we want the datatype declaration
+// to be a construct by itself, but that is not possible currently
+// because K's parser wronly identifies the __ operation allowing
+// a declaration to appear in front of an expression with the function
+// application construct, giving ambiguous parsing errors.
+

We next need to define the syntax of types and type cases that appear
+in datatype declarations.

+

Like in many functional languages, type parameters/variables in
+user-defined types are quoted identifiers.

+
  syntax TypeVar                        [token]
+  syntax TypeVars ::= List{TypeVar,","} [overload(types)]
+

Types can be basic types, function types, or user-defined
+parametric types. In the dynamic semantics we are going to simply ignore
+all the type declations, so here the syntax of types below is only useful
+for generating the desired parser. To avoid syntactic ambiguities with
+the arrow construct for function cases, we use the symbol --> as
+a constructor for function types:

+
  syntax TypeName [token]
+  syntax Type ::= "int" | "bool" | "string"
+                | Type "-->" Type                            [right]
+                | "(" Type ")"                             [bracket]
+                | TypeVar
+                | TypeName             [klabel(TypeName), avoid]
+                | Type TypeName   [klabel(Type-TypeName), symbol, macro]
+                | "(" Types ")" TypeName                    [prefer]
+  syntax Types ::= List{Type,","} [overload(types)]
+  syntax Types ::= TypeVars
+
+  syntax TypeCase ::= ConstructorName
+                    | ConstructorName "(" Types ")"
+  syntax TypeCases ::= List{TypeCase,"|"}     [symbol(_|TypeCase_)]
+

Additional Priorities

+ +
  syntax priority @__FUN-UNTYPED-COMMON
+                > apply
+                > arith
+                > _:=__FUN-UNTYPED-COMMON
+                > let_in__FUN-UNTYPED-COMMON
+                  letrec_in__FUN-UNTYPED-COMMON
+                  if_then_else__FUN-UNTYPED-COMMON
+                > _;__FUN-UNTYPED-COMMON
+                > fun__FUN-UNTYPED-COMMON
+                > datatype_=___FUN-UNTYPED-COMMON
+endmodule
+
+module FUN-UNTYPED-MACROS
+  imports FUN-UNTYPED-COMMON
+

Desugaring macros

+ +

We desugar the list non-constructor operations to functions matching
+over list patterns. In order to do that we need some new variables; for
+those, we follow the same convention like in the K tutorial, where we
+added them as new identifier constructs starting with the character $,
+so we can easily recognize them when we debug or trace the semantics.

+
  syntax Name ::= "$h" [token] | "$t" [token]
+  rule head => fun [$h|$t] -> $h
+  rule tail => fun [$h|$t] -> $t
+  rule null? => fun [.Exps] -> true | [$h|$t] -> false
+

Multiple-head list patterns desugar into successive one-head patterns:

+
  rule [E1,E2,Es:Exps|T] => [E1|[E2,Es|T]]                   [anywhere]
+

Uncurrying of multiple arguments in functions and binders:

+
  rule P1 P2 -> E => P1 -> fun P2 -> E                       [anywhere]
+  rule F P = E => F = fun P -> E                             [anywhere]
+

We desugar the try-catch construct into callcc:

+
  syntax Name ::= "$k" [token] | "$v" [token]
+  rule try E catch(X) E'
+    => callcc (fun $k -> (fun throw -> E)(fun X -> $k E'))
+

For uniformity, we reduce all types to their general form:

+
  rule `Type-TypeName`(T:Type, Tn:TypeName) => (T) Tn
+

The dynamic semantics ignores all the type declarations:

+
  rule datatype _T = _TCs E => E
+
+endmodule
+
+
+module FUN-UNTYPED-SYNTAX
+  imports FUN-UNTYPED-COMMON
+  imports BUILTIN-ID-TOKENS
+
+  syntax Name ::= r"[a-z][_a-zA-Z0-9]*"           [token, prec(2)]
+                | #LowerId                        [token]
+  syntax ConstructorName ::= #UpperId             [token]
+  syntax TypeVar  ::= r"['][a-z][_a-zA-Z0-9]*"    [token]
+  syntax TypeName ::= Name                        [token]
+endmodule
+

Semantics

+ +

The semantics below is environment-based. A substitution-based
+definition of FUN is also available, but that drops the &
+construct as explained above.

+
module FUN-UNTYPED
+  imports FUN-UNTYPED-COMMON
+  imports FUN-UNTYPED-MACROS
+  imports DOMAINS
+  //imports PATTERN-MATCHING
+

Configuration

+ +

The k, env, and store cells are standard
+(see, for example, the definition of LAMBDA++ or IMP++ in the first
+part of the K tutorial).

+
  configuration <T color="yellow">
+                  <k color="green"> $PGM:Exp </k>
+                  <env color="violet"> .Map </env>
+                  <store color="white"> .Map </store>
+                </T>
+

Values and results

+ +

We only define integers, Booleans and strings as values here, but will
+add more values later.

+
  syntax Val ::= Int | Bool | String
+  syntax Vals ::= Bottoms
+  syntax KResult ::= Val
+

Lookup

+ +
  rule <k> X:Name => V ...</k>
+       <env>... X |-> L ...</env>
+       <store>... L |-> V ...</store>
+

Arithmetic expressions

+ +
  rule I1 * I2 => I1 *Int I2
+  rule I1 / I2 => I1 /Int I2 requires I2 =/=K 0
+  rule I1 % I2 => I1 %Int I2 requires I2 =/=K 0
+  rule I1 + I2 => I1 +Int I2
+  rule S1 ^ S2 => S1 +String S2
+  rule I1 - I2 => I1 -Int I2
+  rule - I => 0 -Int I
+  rule I1 < I2 => I1 <Int I2
+  rule I1 <= I2 => I1 <=Int I2
+  rule I1 > I2 => I1 >Int I2
+  rule I1 >= I2 => I1 >=Int I2
+  rule V1:Val == V2:Val => V1 ==K V2
+  rule V1:Val != V2:Val => V1 =/=K V2
+  rule ! T => notBool(T)
+  rule true  && E => E
+  rule false && _ => false
+  rule true  || _ => true
+  rule false || E => E
+

Conditional

+ +
  rule if  true then E else _ => E
+  rule if false then _ else E => E
+

Lists

+ +

We have already declared the syntactic list of expressions strict, so
+we can assume that all the elements that appear in a FUN list are
+evaluated. The only thing left to do is to state that a list of
+values is a value itself, that is, that the list square-bracket
+construct is indeed a constructor, and to give the semantics of
+cons. Since cons is a builtin function and is
+expected to take two arguments, we have to also state that
+cons itself is a value (specifically, a function/closure
+value, but we do not need that level of detail here), and also that
+cons applied to a value is a value (specifically, it would be
+a function/closure value that expects the second, list argument):

+
  rule cons V:Val [Vs:Vals] => [V,Vs]
+

Data Constructors

+ +

Constructors take values as arguments and produce other values:

+
  syntax Val ::= ConstructorName
+

Functions and Closures

+ +

Like in the environment-based semantics of LAMBDA++ in the first part
+of the K tutorial, functions evaluate to closures. A closure includes
+the current environment besides the function contents; the environment
+will be used at execution time to lookup all the variables that appear
+free in the function body (we want static scoping in FUN).

+
  syntax Val ::= closure(Map,Cases)
+  rule <k> fun Cases => closure(Rho,Cases) ...</k>  <env> Rho </env>
+

Note: The reader may want to get familiar with
+how the pre-defined pattern matching works before proceeding.
+The best way to do that is to consult
+k/include/modules/pattern-matching.k.

+

We distinguish two cases when the closure is applied.
+If the first pattern matches, then we pick the first case: switch to
+the closed environment, get the matching map and bind all its
+variables, and finally evaluate the function body of the first case,
+making sure that the environment is properly recovered afterwards.
+If the first pattern does not match, then we drop it and thus move on
+to the next one.

+
  rule (.K => getMatching(P, V)) ~> closure(_, P->_ | _) V:Val
+  rule <k> matchResult(M:Map) ~> closure(Rho, _->E | _) _
+           => bindMap(M) ~> E ~> setEnv(Rho') ...</k>
+       <env> Rho' => Rho </env>
+  rule (matchFailure => .K) ~> closure(_, (_->_ | Cs:Cases => Cs)) _
+//  rule <k> closure(Rho, P->E | _) V:Val
+//           => bindMap(getMatching(P,V)) ~> E ~> setEnv(Rho') ...</k>
+//       <env> Rho' => Rho </env>  when isMatching(P,V)
+//  rule closure(_, (P->_ | Cs:Cases => Cs)) V:Val  when notBool isMatching(P,V)
+

Let and Letrec

+ +

To highlight the similarities and differences between let and
+letrec, we prefer to give them direct semantics instead of
+to desugar them like in LAMBDA. See the formal definitions of
+bindTo, bind, and assignTo at the end of
+this module. Informally, bindTo(Xs, Es) first
+evaluates the expressions Es in Exps in the current
+environment (i.e., it is strict in its second argument), then it binds
+the variables in Xs in Names to new locations and adds
+those bindings to the environment, and finally writes the values
+previously obtained after evaluating the expressions Es to those
+new locations; bind(Xs) does only the bindings of
+Xs to new locations and adds those bindings to the environment;
+and assignTo(Xs,Es) evaluates the expressions
+Es in the current environment and then it writes the resulting
+values to the locations to which the variables Xs are already
+bound to in the environment.

+

Therefore, let Xs = Es in E first
+evaluates Es in the current environment, then adds new
+bindings for Xs to fresh locations in the environment, then
+writes the values of Es to those locations, and finally
+evaluates E in the new environment, making sure that the
+environment is properly recovered after the evaluation of E.
+On the other hand, letrec does the same things but in a
+different order: it first adds new bindings for Xs to fresh
+locations in the environment, then it evaluates Es in the new
+environment, then it writes the resulting values to their
+corresponding locations, and finally it evaluates E and
+recovers the environment. The crucial difference is that the
+expressions Es now see the locations of the variables Xs
+in the environment, so if they are functions, which is typically the
+case with letrec, their closures will encapsulate in their
+environments the bindings of all the bound variables, including
+themselves (thus, we may have a closure value stored at location
+L, whose environment contains a binding of the form
+F ↦ L; this way, the closure can invoke
+itself).

+
  rule <k> let Bs in E
+        => bindTo(names(Bs),exps(Bs)) ~> E ~> setEnv(Rho) ...</k>
+       <env> Rho </env>
+
+  rule <k> letrec Bs in E
+        => bind(names(Bs))~>assignTo(names(Bs),exps(Bs))~>E~>setEnv(Rho)...</k>
+       <env> Rho </env>
+

Recall that our syntax allows let and letrec to
+take any expression in place of its binding. This allows us to use
+the already existing function application construct to bind names to
+functions, such as, e.g., let x y = y in ....
+The desugaring macro in the syntax module uncurries such declarations,
+and then the semantic rules above only work when the remaining
+bindings are identifiers, so the semantics will get stuck on programs
+that misuse the let and letrec binders.

+

References

+ +

The semantics of references is self-explanatory, except maybe for the
+desugaring rule of ref, which is further discussed. Note
+that &X grabs the location of X from the environment.
+Sequential composition, which is needed only to accumulate the
+side effects due to assignments, was strict in the first argument.
+Once evaluated, its first argument is simply discarded:

+
  syntax Name ::= "$x" [token]
+  rule ref => fun $x -> & $x
+  rule <k> & X => L ...</k>  <env>... X |-> L ...</env>
+  rule <k> @ L:Int => V:Val ...</k>  <store>... L |-> V ...</store>
+  rule <k> L:Int := V:Val => V ...</k>  <store>... L |-> (_=>V) ...</store>
+  rule _V:Val; E => E
+

The desugaring rule of ref (first rule above) works
+because & takes a variable and returns its location (like in C).
+Note that some ``pure'' functional programming researchers strongly dislike
+the & construct, but favor ref. We refrain from having
+a personal opinion on this issue here, but support & in the
+environment-based definition of FUN because it is, technically speaking,
+more powerful than ref. From a language design perspective, it
+would be equally easy to drop & and instead give a direct
+semantics to ref. In fact, this is precisely what we do in the
+substitution-based definition of FUN, because there appears to be no way
+to give a substitution-based definition to the & construct.

+

Callcc

+ +

As we know it from the LAMBDA++ tutorial, call-with-current-continuation
+is quite easy to define in K. We first need to define a special
+value wrapping an execution context, that is, an environment saying
+where the variables should be looked up, and a computation structure
+saying what is left to execute (in a substitution-based definition,
+this special value would be even simpler, as it would only need to
+wrap the computation structure---see, for example, the
+substitution-based semantics of LAMBDA++ in the the first part of the
+K tutorial, or the substitution-based definition of FUN). Then
+callcc creates such a value containing the current
+environment and the current remaining computation, and passes it to
+its argument function. When/If invoked, the special value replaces
+the current execution context with its own and continues the execution
+normally.

+
  syntax Val ::= cc(Map,K)
+  rule <k> (callcc V:Val => V cc(Rho,K)) ~> K </k>  <env> Rho </env>
+  rule <k> cc(Rho,K) V:Val ~> _ => V ~> K </k>  <env> _ => Rho </env>
+

Auxiliary operations

+ +

Environment recovery

+ +

The environment recovery operation is the same as for the LAMBDA++
+language in the K tutorial and many other languages provided with the
+K distribution. The first ``anywhere'' rule below shows an elegant
+way to achieve the benefits of tail recursion in K.

+
  syntax KItem ::= setEnv(Map)  // TODO: get rid of env
+  //rule (setEnv(_) => .) ~> setEnv(_)  [anywhere]
+  rule <k> _:Val ~> (setEnv(Rho) => .K) ...</k> <env> _ => Rho </env>
+

bindTo, bind and assignTo

+ +

The meaning of these operations has already been explained when we
+discussed the let and letrec language constructs
+above.

+
  syntax KItem ::= bindTo(Names,Exps)         [strict(2)]
+                 | bindMap(Map)
+                 | bind(Names)
+
+  rule (.K => getMatchingAux(Xs,Vs)) ~> bindTo(Xs:Names,Vs:Vals)
+  rule matchResult(M:Map) ~> bindTo(_:Names, _:Vals) => bindMap(M)
+
+  rule bindMap(.Map) => .K
+  rule <k> bindMap((X:Name |-> V:Val => .Map) _:Map) ...</k>
+       <env> Rho => Rho[X <- !L:Int] </env>
+       <store>... .Map => !L |-> V ...</store>
+
+  rule bind(.Names) => .K
+  rule <k> bind(X:Name,Xs => Xs) ...</k>
+       <env> Rho => Rho[X <- !_L:Int] </env>
+
+  syntax KItem ::= assignTo(Names,Exps)  [strict(2)]
+
+  rule <k> assignTo(.Names,.Vals) => .K ...</k>
+  rule <k> assignTo((X:Name,Xs => Xs),(V:Val,Vs:Vals => Vs)) ...</k>
+       <env>... X |-> L ...</env>
+       <store>... .Map => L |-> V ...</store>
+

Getters

+ +

The following auxiliary operations extract the list of identifiers
+and of expressions in a binding, respectively.

+
  syntax Names ::= names(Bindings)  [function]
+  rule names(.Bindings) => .Names
+  rule names(X:Name=_ and Bs) => (X,names(Bs))::Names
+
+  syntax Exps ::= exps(Bindings)  [function]
+  rule exps(.Bindings) => .Exps
+  rule exps(_:Name=E and Bs) => E,exps(Bs)
+
+  /* Extra kore stuff */
+  syntax KResult ::= Vals
+  syntax Exps ::= Names
+  syntax Names ::= Bottoms
+
+  /* Matching */
+  syntax MatchResult ::= getMatching(Exp, Val)                      [function]
+                       | getMatchingAux(Exps, Vals)                 [function]
+                       | mergeMatching(MatchResult, MatchResult)    [function]
+                       | matchResult(Map)
+                       | "matchFailure"
+
+  rule getMatching(C:ConstructorName(Es:Exps), C(Vs:Vals)) => getMatchingAux(Es, Vs)
+  rule getMatching([Es:Exps], [Vs:Vals])                   => getMatchingAux(Es, Vs)
+  rule getMatching(C:ConstructorName, C) => matchResult(.Map)
+  rule getMatching(B:Bool, B)            => matchResult(.Map)
+  rule getMatching(I:Int, I)             => matchResult(.Map)
+  rule getMatching(S:String, S)          => matchResult(.Map)
+  rule getMatching(N:Name, V:Val) => matchResult(N |-> V)
+  rule getMatching(_, _) => matchFailure        [owise]
+
+  rule getMatchingAux((E:Exp, Es:Exps), (V:Val, Vs:Vals)) => mergeMatching(getMatching(E, V), getMatchingAux(Es, Vs))
+  rule getMatchingAux(.Exps, .Vals)                       => matchResult(.Map)
+  rule getMatchingAux(_, _) => matchFailure     [owise]
+
+  rule mergeMatching(matchResult(M1:Map), matchResult(M2:Map)) => matchResult(M1 M2)
+    requires intersectSet(keys(M1), keys(M2)) ==K .Set
+  //rule mergeMatching(_, _) => matchFailure      [owsie]
+  rule mergeMatching(matchResult(_:Map), matchFailure) => matchFailure
+  rule mergeMatching(matchFailure, matchResult(_:Map)) => matchFailure
+  rule mergeMatching(matchFailure, matchFailure)       => matchFailure
+

Besides the generic decomposition rules for patterns and values,
+we also want to allow [head|tail] matching for lists, so we add
+the following custom pattern decomposition rule:

+
  rule getMatching([H:Exp | T:Exp], [V:Val, Vs:Vals])
+    => getMatchingAux((H, T), (V, [Vs]))
+endmodule
+

Go to Lesson 2, FUN untyped, Substitution-Based.

+

FUN — Untyped — Environment

+ +

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+ +

This is the K semantic definition of the untyped FUN language.
+FUN is a pedagogical and research language that captures the essence
+of the functional programming paradigm, extended with several features
+often encountered in functional programming languages.
+Like many functional languages, FUN is an expression language, that
+is, everything, including the main program, is an expression.
+Functions can be declared anywhere and are first class values in the
+language.
+FUN is call-by-value here, but it has been extended (as student
+homework assignments) with other parameter-passing styles.
+To make it more interesting and to highlight some of K's strengths,
+FUN includes the following features:

+
    +
  • +

    The basic builtin data-types of integers, booleans and strings.

    +
  • +
  • +

    Builtin lists, which can hold any elements, including other lists.
    +Lists are enclosed in square brackets and their elements are
    +comma-separated; e.g., [1,2,3].

    +
  • +
  • +

    User-defined data-types, by means of constructor terms.
    +Constructor names start with a capital letter (while any other
    +identifier in the language starts with a lowercase letter), and they
    +can be followed by an arbitrary number of comma-separated arguments
    +enclosed in parentheses; parentheses are not needed when the
    +constructor takes no arguments.
    +For example, Pair(5,7) is a constructor term holding two
    +numbers, Cons(1,Cons(2,Cons(3,Nil))) is a list-like
    +constructor term holding 3 elements, and
    +Tree(Tree(Leaf(1), Leaf(2)), Leaf(3)) is a tree-like
    +constructor term holding 3 elements.
    +In the untyped version of the FUN language, no type checking or
    +inference is performed to ensure that the data constructors are used
    +correctly.
    +The execution will simply get stuck when they are misused.
    +Moreover, since no type checking is performed, the data-types are not
    +even declared in the untyped version of FUN.

    +
  • +
  • +

    Functions and let/letrec binders can take
    +multiple space-separated arguments, but these are desugared to
    +ones that only take one argument, by currying. For example, the
    +expressions

    +
    fun x y -> x y
    +let x y = y in x
    +

    are desugared, respectively, into the following expressions:

    +
    fun x -> fun y -> x y
    +let x = fun y -> y in x
    +
  • +
  • +

    Functions can be defined using pattern matching over the
    +available data-types. For example, the program

    +
    letrec max = fun [h] -> h
    +             |   [h|t] -> let x = max t
    +                          in  if h > x then h else x
    +in max [1, 3, 5, 2, 4, 0, -1, -5]
    +

    defines a function max that calculates the maximum element of
    +a non-empty list, and the function

    +
    letrec ack = fun Pair(0,n) -> n + 1
    +             |   Pair(m,0) -> ack Pair(m - 1, 1)
    +             |   Pair(m,n) -> ack Pair(m - 1, ack Pair(m, n - 1))
    +in ack Pair(2,3)
    +

    calculates the Ackermann function applied to a particular pair of numbers.
    +Patterns can be nested. Patterns can currently only be used in function
    +definitions, and not directly in let/letrec binders.
    +For example, this is not allowed:

    +
    letrec Pai(x,y) = Pair(1,2) in x+y
    +

    But this is allowed:

    +
    let f Pair(x,y) = x+y in f Pair(1,2)
    +

    because it is first reduced to

    +
    let f = fun Pair(x,y) -> x+y in f Pair(1,2)
    +

    by uncurrying of the let binder, and pattern matching is
    +allowed in function arguments.

    +
  • +
  • +

    We include a callcc construct, for two reasons: first,
    +several functional languages support this construct; second, some
    +semantic frameworks have difficulties defining it. Not K.

    +
  • +
  • +

    Finally, we include mutables by means of referencing an
    +expression, getting the reference of a variable, dereferencing and
    +assignment. We include these for the same reasons as above: there are
    +languages which have them, and they are not easy to define in some
    +semantic frameworks.

    +
  • +
+

Like in many other languages, some of FUN's constructs can be
+desugared into a smaller set of basic constructs. We do that as usual,
+using macros, and then we only give semantics to the core constructs.

+

Note:
+We recommend the reader to first consult the dynamic semantics of the
+LAMBDA++ language in the first part of the K Tutorial.
+To keep the comments below small and focused, we will not re-explain
+functional or K features that have already been explained in there.

+

Syntax

+ +
//require "modules/pattern-matching.k"
+
+module FUN-UNTYPED-COMMON
+  imports DOMAINS-SYNTAX
+

FUN is an expression language. The constructs below fall into
+several categories: names, arithmetic constructs, conventional
+functional constructs, patterns and pattern matching, data constructs,
+lists, references, and call-with-current-continuation (callcc).
+The arithmetic constructs are standard; they are present in almost all
+our K language definitions. The meaning of FUN's constructs are
+discussed in more depth when we define their semantics in the next
+module.

+

The Syntactic Constructs

+ +

We start with the syntactic definition of FUN names.
+We have several categories of names: ones to be used for functions and
+variables, others to be used for data constructors, others for types and
+others for type variables. We will introduce them as needed, starting
+with the former category. We prefer the names of variables and functions
+to start with lower case letters. We take the freedom to tacitly introduce
+syntactic lists/sequences for each nonterminal for which we need them:

+
  syntax Name                                      [token]
+  syntax Names ::= List{Name,","}                  [overload(exps)]
+

Expression constructs will be defined throughtout the syntax module.
+Below are the very basic ones, namely the builtins, the names, and the
+parentheses used as brackets for grouping. Lists of expressions are
+declared strict, so all expressions in the list get evaluated whenever
+the list is on a position which can be evaluated:

+
  syntax Exp ::= Int | Bool | String | Name
+               | "(" Exp ")"                       [bracket]
+  syntax Exps  ::= List{Exp,","}                   [strict, overload(exps)]
+  syntax Val
+  syntax Exp ::= Val
+  syntax Exps ::= Vals
+  syntax Vals ::= List{Val,","}                    [overload(exps)]
+  syntax Bottom
+  syntax Bottoms ::= List{Bottom,","}              [overload(exps)]
+

We next define the syntax of arithmetic constructs, together with
+their relative priorities and left-/non-associativities. We also
+tag all these rules as members of a new group, "arith", so we can more easily
+define global syntax priorities later (at the end of the syntax module).

+
  syntax Exp ::= left:
+                 Exp "*" Exp                       [strict, group(arith)]
+               | Exp "/" Exp                       [strict, group(arith)]
+               | Exp "%" Exp                       [strict, group(arith)]
+               > left:
+                 Exp "+" Exp                       [strict, left, group(arith)]
+               | Exp "^" Exp                       [strict, left, group(arith)]
+// left attribute should not be necessary; currently a parsing bug
+               | Exp "-" Exp                       [strict, prefer, group(arith)]
+// the "prefer" attribute above is to not parse x-1 as x(-1)
+// Due to some parsing problems, we currently cannot add unary minus:
+               | "-" Exp                           [strict, group(arith)]
+               > non-assoc:
+                 Exp "<" Exp                       [strict, group(arith)]
+               | Exp "<=" Exp                      [strict, group(arith)]
+               | Exp ">" Exp                       [strict, group(arith)]
+               | Exp ">=" Exp                      [strict, group(arith)]
+               | Exp "==" Exp                      [strict, group(arith)]
+               | Exp "!=" Exp                      [strict, group(arith)]
+               > "!" Exp                           [strict, group(arith)]
+               > Exp "&&" Exp                      [strict(1), left, group(arith)]
+               > Exp "||" Exp                      [strict(1), left, group(arith)]
+

The conditional construct has the expected evaluation strategy,
+stating that only the first argument is evaluate:

+
  syntax Exp ::= "if" Exp "then" Exp "else" Exp    [strict(1)]
+

FUN's builtin lists are formed by enclosing comma-separated
+sequences of expressions (i.e., terms of sort Exps) in square
+brackets. The list constructor cons adds a new element to the
+top of the list, head and tail get the first element
+and the tail sublist of a list if they exist, respectively, and get
+stuck otherwise, and null?? tests whether a list is empty or
+not; syntactically, these are just expression constants.
+In function patterns, we are also going to allow patterns following the
+usual head/tail notation; for example, the pattern [x_1,...,x_n|t]
+binds x_1, ..., x_n to the first elements of the matched list,
+and t to the list formed with the remaining elements. We define list
+patterns as ordinary expression constructs, although we will make sure that
+we do not give them semantics if they appear in any other place then in a
+function case pattern.

+
  syntax Exp ::= "[" Exps "]"                             [strict, klabel(list)]
+               | "head" [macro] | "tail" [macro] | "null?" [macro]
+               | "[" Exps "|" Exp "]"
+  syntax Val ::= "[" Vals "]"                             [klabel(list)]
+  syntax Cons ::= "cons"
+  syntax Val ::= Cons
+  syntax Val ::= Cons Val                                 [klabel(apply)]
+

Data constructors start with capital letters and they may or may
+not have arguments. We need to use the attribute "prefer" to make
+sure that, e.g., Cons(a) parses as constructor Cons with
+argument a, and not as the expression Cons (because
+constructor names are also expressions) regarded as a function applied
+to the expression a. Also, note that the constructor is strict
+in its second argument, because we want to evaluate its arguments but
+not the constuctor name itsef.

+
  syntax ConstructorName                         [token]
+  syntax Exp ::= ConstructorName
+               | ConstructorName "(" Exps ")"    [prefer, strict(2), klabel(constructor)]
+  syntax Val ::= ConstructorName "(" Vals ")"    [klabel(constructor)]
+

A function is essentially a |-separated ordered
+sequence of cases, each case of the form pattern -> expression,
+preceded by the language construct fun. Patterns will be defined
+shortly, both for the builtin lists and for user-defined constructors.
+Recall that the syntax we define in K is not meant to serve as a
+ultimate parser for the defined language, but rather as a convenient
+notation for K abstract syntax trees, which we prefer when we write
+the semantic rules. It is therefore often the case that we define a
+more ``generous'' syntax than we want to allow programs to use.
+We do it here, too. Specifically, the syntax of Cases
+below allows any expressions to appear as pattern. This syntactic
+relaxation permits many wrong programs to be parsed, but that is not a
+problem because we are not going to give semantics to wrong combinations,
+so those programs will get stuck; moreover, our type inferencer will reject
+those programs anyway. Function application is just concatenation of
+expressions, without worrying about type correctness. Again, the type
+system will reject type-incorrect programs.

+
  syntax Exp ::= "fun" Cases
+               | Exp Exp                              [strict, left, klabel(apply)]
+// NOTE: We would like eventually to also have Exp "(" Exps ")
+  syntax Case  ::= Exp "->" Exp
+  syntax Cases ::= List{Case, "|"}
+

The let and letrec binders have the usual syntax
+and functional meaning. We allow multiple and-separated bindings.
+Like for the function cases above, we allow a more generous syntax for
+the left-hand sides of bindings, noting that the semantics will get stuck
+on incorrect bindings and that the type system will reject those programs.

+
  syntax Exp ::= "let" Bindings "in" Exp
+               | "letrec" Bindings "in" Exp                 [prefer]
+// The "prefer" attribute for letrec currently needed due to tool bug,
+// to make sure that "letrec" is not parsed as "let rec".
+  syntax Binding  ::= Exp "=" Exp
+  syntax Bindings ::= List{Binding,"and"}
+

References are first class values in FUN. The construct ref
+takes an expression, evaluates it, and then it stores the resulting value
+at a fresh location in the store and returns that reference. Syntactically,
+ref is just an expression constant. The construct &
+takes a name as argument and evaluates to a reference, namely the store
+reference where the variable passed as argument stores its value; this
+construct is a bit controversial and is further discussed in the
+environment-based semantics of the FUN language, where we desugar
+ref to it. The construct @ takes a reference
+and evaluates to the value stored there. The construct := takes
+two expressions, the first expected to evaluate to a reference; the value
+of its second argument will be stored at the location to which the first
+points (the old value is thus lost). Finally, since expression evaluation
+now has side effects, it makes sense to also add a sequential composition
+construct, which is sequentially strict. This evaluates to the value of
+its second argument; the value of the first argument is lost (which has
+therefore been evaluated only for its side effects.

+
  syntax Exp ::= "ref"                             [macro]
+               | "&" Name
+               | "@" Exp                                     [strict]
+               | Exp ":=" Exp                                [strict]
+               | Exp ";" Exp                       [strict(1), right]
+

Call-with-current-continuation, named callcc in FUN, is a
+powerful control operator that originated in the Scheme programming
+language, but it now exists in many other functional languages. It works
+by evaluating its argument, expected to evaluate to a function, and by
+passing the current continuation, or evaluation context (or computation,
+in K terminology), as a special value to it. When/If this special value
+is invoked, the current context is discarded and replaced with the one
+held by the special value and the computation continues from there.
+It is like taking a snapshot of the execution context at some moment
+in time and then, when desired, being able to get back in time to that
+point. If you like games, it is like saving the game now (so you can
+work on your homework!) and then continuing the game tomorrow or whenever
+you wish. To issustrate the strength of callcc, we also
+allow exceptions in FUN by means of a conventional try-catch
+construct, which will desugar to callcc. We also need to
+introduce the special expression contant throw, but we need to
+use it as a function argument name in the desugaring macro, so we define
+it as a name instead of as an expression constant:

+
  syntax Exp ::= "try" Exp "catch" "(" Name ")" Exp [macro]
+  syntax Val ::= "callcc"
+  syntax Name ::= "throw" [token]
+

Finally, FUN also allows polymorphic datatype declarations. These
+will be useful when we define the type system later on.

+
  syntax Exp ::= "datatype" Type "=" TypeCases Exp [macro]
+// NOTE: In a future version of K, we want the datatype declaration
+// to be a construct by itself, but that is not possible currently
+// because K's parser wronly identifies the __ operation allowing
+// a declaration to appear in front of an expression with the function
+// application construct, giving ambiguous parsing errors.
+

We next need to define the syntax of types and type cases that appear
+in datatype declarations.

+

Like in many functional languages, type parameters/variables in
+user-defined types are quoted identifiers.

+
  syntax TypeVar                        [token]
+  syntax TypeVars ::= List{TypeVar,","} [overload(types)]
+

Types can be basic types, function types, or user-defined
+parametric types. In the dynamic semantics we are going to simply ignore
+all the type declations, so here the syntax of types below is only useful
+for generating the desired parser. To avoid syntactic ambiguities with
+the arrow construct for function cases, we use the symbol --> as
+a constructor for function types:

+
  syntax TypeName [token]
+  syntax Type ::= "int" | "bool" | "string"
+                | Type "-->" Type                            [right]
+                | "(" Type ")"                             [bracket]
+                | TypeVar
+                | TypeName             [klabel(TypeName), avoid]
+                | Type TypeName   [klabel(Type-TypeName), symbol, macro]
+                | "(" Types ")" TypeName                    [prefer]
+  syntax Types ::= List{Type,","} [overload(types)]
+  syntax Types ::= TypeVars
+
+  syntax TypeCase ::= ConstructorName
+                    | ConstructorName "(" Types ")"
+  syntax TypeCases ::= List{TypeCase,"|"}     [symbol(_|TypeCase_)]
+

Additional Priorities

+ +
  syntax priority @__FUN-UNTYPED-COMMON
+                > apply
+                > arith
+                > _:=__FUN-UNTYPED-COMMON
+                > let_in__FUN-UNTYPED-COMMON
+                  letrec_in__FUN-UNTYPED-COMMON
+                  if_then_else__FUN-UNTYPED-COMMON
+                > _;__FUN-UNTYPED-COMMON
+                > fun__FUN-UNTYPED-COMMON
+                > datatype_=___FUN-UNTYPED-COMMON
+endmodule
+
+module FUN-UNTYPED-MACROS
+  imports FUN-UNTYPED-COMMON
+

Desugaring macros

+ +

We desugar the list non-constructor operations to functions matching
+over list patterns. In order to do that we need some new variables; for
+those, we follow the same convention like in the K tutorial, where we
+added them as new identifier constructs starting with the character $,
+so we can easily recognize them when we debug or trace the semantics.

+
  syntax Name ::= "$h" [token] | "$t" [token]
+  rule head => fun [$h|$t] -> $h
+  rule tail => fun [$h|$t] -> $t
+  rule null? => fun [.Exps] -> true | [$h|$t] -> false
+

Multiple-head list patterns desugar into successive one-head patterns:

+
  rule [E1,E2,Es:Exps|T] => [E1|[E2,Es|T]]                   [anywhere]
+

Uncurrying of multiple arguments in functions and binders:

+
  rule P1 P2 -> E => P1 -> fun P2 -> E                       [anywhere]
+  rule F P = E => F = fun P -> E                             [anywhere]
+

We desugar the try-catch construct into callcc:

+
  syntax Name ::= "$k" [token] | "$v" [token]
+  rule try E catch(X) E'
+    => callcc (fun $k -> (fun throw -> E)(fun X -> $k E'))
+

For uniformity, we reduce all types to their general form:

+
  rule `Type-TypeName`(T:Type, Tn:TypeName) => (T) Tn
+

The dynamic semantics ignores all the type declarations:

+
  rule datatype _T = _TCs E => E
+
+endmodule
+
+
+module FUN-UNTYPED-SYNTAX
+  imports FUN-UNTYPED-COMMON
+  imports BUILTIN-ID-TOKENS
+
+  syntax Name ::= r"[a-z][_a-zA-Z0-9]*"           [token, prec(2)]
+                | #LowerId                        [token]
+  syntax ConstructorName ::= #UpperId             [token]
+  syntax TypeVar  ::= r"['][a-z][_a-zA-Z0-9]*"    [token]
+  syntax TypeName ::= Name                        [token]
+endmodule
+

Semantics

+ +

The semantics below is environment-based. A substitution-based
+definition of FUN is also available, but that drops the &
+construct as explained above.

+
module FUN-UNTYPED
+  imports FUN-UNTYPED-COMMON
+  imports FUN-UNTYPED-MACROS
+  imports DOMAINS
+  //imports PATTERN-MATCHING
+

Configuration

+ +

The k, env, and store cells are standard
+(see, for example, the definition of LAMBDA++ or IMP++ in the first
+part of the K tutorial).

+
  configuration <T color="yellow">
+                  <k color="green"> $PGM:Exp </k>
+                  <env color="violet"> .Map </env>
+                  <store color="white"> .Map </store>
+                </T>
+

Values and results

+ +

We only define integers, Booleans and strings as values here, but will
+add more values later.

+
  syntax Val ::= Int | Bool | String
+  syntax Vals ::= Bottoms
+  syntax KResult ::= Val
+

Lookup

+ +
  rule <k> X:Name => V ...</k>
+       <env>... X |-> L ...</env>
+       <store>... L |-> V ...</store>
+

Arithmetic expressions

+ +
  rule I1 * I2 => I1 *Int I2
+  rule I1 / I2 => I1 /Int I2 requires I2 =/=K 0
+  rule I1 % I2 => I1 %Int I2 requires I2 =/=K 0
+  rule I1 + I2 => I1 +Int I2
+  rule S1 ^ S2 => S1 +String S2
+  rule I1 - I2 => I1 -Int I2
+  rule - I => 0 -Int I
+  rule I1 < I2 => I1 <Int I2
+  rule I1 <= I2 => I1 <=Int I2
+  rule I1 > I2 => I1 >Int I2
+  rule I1 >= I2 => I1 >=Int I2
+  rule V1:Val == V2:Val => V1 ==K V2
+  rule V1:Val != V2:Val => V1 =/=K V2
+  rule ! T => notBool(T)
+  rule true  && E => E
+  rule false && _ => false
+  rule true  || _ => true
+  rule false || E => E
+

Conditional

+ +
  rule if  true then E else _ => E
+  rule if false then _ else E => E
+

Lists

+ +

We have already declared the syntactic list of expressions strict, so
+we can assume that all the elements that appear in a FUN list are
+evaluated. The only thing left to do is to state that a list of
+values is a value itself, that is, that the list square-bracket
+construct is indeed a constructor, and to give the semantics of
+cons. Since cons is a builtin function and is
+expected to take two arguments, we have to also state that
+cons itself is a value (specifically, a function/closure
+value, but we do not need that level of detail here), and also that
+cons applied to a value is a value (specifically, it would be
+a function/closure value that expects the second, list argument):

+
  rule cons V:Val [Vs:Vals] => [V,Vs]
+

Data Constructors

+ +

Constructors take values as arguments and produce other values:

+
  syntax Val ::= ConstructorName
+

Functions and Closures

+ +

Like in the environment-based semantics of LAMBDA++ in the first part
+of the K tutorial, functions evaluate to closures. A closure includes
+the current environment besides the function contents; the environment
+will be used at execution time to lookup all the variables that appear
+free in the function body (we want static scoping in FUN).

+
  syntax Val ::= closure(Map,Cases)
+  rule <k> fun Cases => closure(Rho,Cases) ...</k>  <env> Rho </env>
+

Note: The reader may want to get familiar with
+how the pre-defined pattern matching works before proceeding.
+The best way to do that is to consult
+k/include/modules/pattern-matching.k.

+

We distinguish two cases when the closure is applied.
+If the first pattern matches, then we pick the first case: switch to
+the closed environment, get the matching map and bind all its
+variables, and finally evaluate the function body of the first case,
+making sure that the environment is properly recovered afterwards.
+If the first pattern does not match, then we drop it and thus move on
+to the next one.

+
  rule (.K => getMatching(P, V)) ~> closure(_, P->_ | _) V:Val
+  rule <k> matchResult(M:Map) ~> closure(Rho, _->E | _) _
+           => bindMap(M) ~> E ~> setEnv(Rho') ...</k>
+       <env> Rho' => Rho </env>
+  rule (matchFailure => .K) ~> closure(_, (_->_ | Cs:Cases => Cs)) _
+//  rule <k> closure(Rho, P->E | _) V:Val
+//           => bindMap(getMatching(P,V)) ~> E ~> setEnv(Rho') ...</k>
+//       <env> Rho' => Rho </env>  when isMatching(P,V)
+//  rule closure(_, (P->_ | Cs:Cases => Cs)) V:Val  when notBool isMatching(P,V)
+

Let and Letrec

+ +

To highlight the similarities and differences between let and
+letrec, we prefer to give them direct semantics instead of
+to desugar them like in LAMBDA. See the formal definitions of
+bindTo, bind, and assignTo at the end of
+this module. Informally, bindTo(Xs, Es) first
+evaluates the expressions Es in Exps in the current
+environment (i.e., it is strict in its second argument), then it binds
+the variables in Xs in Names to new locations and adds
+those bindings to the environment, and finally writes the values
+previously obtained after evaluating the expressions Es to those
+new locations; bind(Xs) does only the bindings of
+Xs to new locations and adds those bindings to the environment;
+and assignTo(Xs,Es) evaluates the expressions
+Es in the current environment and then it writes the resulting
+values to the locations to which the variables Xs are already
+bound to in the environment.

+

Therefore, let Xs = Es in E first
+evaluates Es in the current environment, then adds new
+bindings for Xs to fresh locations in the environment, then
+writes the values of Es to those locations, and finally
+evaluates E in the new environment, making sure that the
+environment is properly recovered after the evaluation of E.
+On the other hand, letrec does the same things but in a
+different order: it first adds new bindings for Xs to fresh
+locations in the environment, then it evaluates Es in the new
+environment, then it writes the resulting values to their
+corresponding locations, and finally it evaluates E and
+recovers the environment. The crucial difference is that the
+expressions Es now see the locations of the variables Xs
+in the environment, so if they are functions, which is typically the
+case with letrec, their closures will encapsulate in their
+environments the bindings of all the bound variables, including
+themselves (thus, we may have a closure value stored at location
+L, whose environment contains a binding of the form
+F ↦ L; this way, the closure can invoke
+itself).

+
  rule <k> let Bs in E
+        => bindTo(names(Bs),exps(Bs)) ~> E ~> setEnv(Rho) ...</k>
+       <env> Rho </env>
+
+  rule <k> letrec Bs in E
+        => bind(names(Bs))~>assignTo(names(Bs),exps(Bs))~>E~>setEnv(Rho)...</k>
+       <env> Rho </env>
+

Recall that our syntax allows let and letrec to
+take any expression in place of its binding. This allows us to use
+the already existing function application construct to bind names to
+functions, such as, e.g., let x y = y in ....
+The desugaring macro in the syntax module uncurries such declarations,
+and then the semantic rules above only work when the remaining
+bindings are identifiers, so the semantics will get stuck on programs
+that misuse the let and letrec binders.

+

References

+ +

The semantics of references is self-explanatory, except maybe for the
+desugaring rule of ref, which is further discussed. Note
+that &X grabs the location of X from the environment.
+Sequential composition, which is needed only to accumulate the
+side effects due to assignments, was strict in the first argument.
+Once evaluated, its first argument is simply discarded:

+
  syntax Name ::= "$x" [token]
+  rule ref => fun $x -> & $x
+  rule <k> & X => L ...</k>  <env>... X |-> L ...</env>
+  rule <k> @ L:Int => V:Val ...</k>  <store>... L |-> V ...</store>
+  rule <k> L:Int := V:Val => V ...</k>  <store>... L |-> (_=>V) ...</store>
+  rule _V:Val; E => E
+

The desugaring rule of ref (first rule above) works
+because & takes a variable and returns its location (like in C).
+Note that some ``pure'' functional programming researchers strongly dislike
+the & construct, but favor ref. We refrain from having
+a personal opinion on this issue here, but support & in the
+environment-based definition of FUN because it is, technically speaking,
+more powerful than ref. From a language design perspective, it
+would be equally easy to drop & and instead give a direct
+semantics to ref. In fact, this is precisely what we do in the
+substitution-based definition of FUN, because there appears to be no way
+to give a substitution-based definition to the & construct.

+

Callcc

+ +

As we know it from the LAMBDA++ tutorial, call-with-current-continuation
+is quite easy to define in K. We first need to define a special
+value wrapping an execution context, that is, an environment saying
+where the variables should be looked up, and a computation structure
+saying what is left to execute (in a substitution-based definition,
+this special value would be even simpler, as it would only need to
+wrap the computation structure---see, for example, the
+substitution-based semantics of LAMBDA++ in the the first part of the
+K tutorial, or the substitution-based definition of FUN). Then
+callcc creates such a value containing the current
+environment and the current remaining computation, and passes it to
+its argument function. When/If invoked, the special value replaces
+the current execution context with its own and continues the execution
+normally.

+
  syntax Val ::= cc(Map,K)
+  rule <k> (callcc V:Val => V cc(Rho,K)) ~> K </k>  <env> Rho </env>
+  rule <k> cc(Rho,K) V:Val ~> _ => V ~> K </k>  <env> _ => Rho </env>
+

Auxiliary operations

+ +

Environment recovery

+ +

The environment recovery operation is the same as for the LAMBDA++
+language in the K tutorial and many other languages provided with the
+K distribution. The first ``anywhere'' rule below shows an elegant
+way to achieve the benefits of tail recursion in K.

+
  syntax KItem ::= setEnv(Map)  // TODO: get rid of env
+  //rule (setEnv(_) => .) ~> setEnv(_)  [anywhere]
+  rule <k> _:Val ~> (setEnv(Rho) => .K) ...</k> <env> _ => Rho </env>
+

bindTo, bind and assignTo

+ +

The meaning of these operations has already been explained when we
+discussed the let and letrec language constructs
+above.

+
  syntax KItem ::= bindTo(Names,Exps)         [strict(2)]
+                 | bindMap(Map)
+                 | bind(Names)
+
+  rule (.K => getMatchingAux(Xs,Vs)) ~> bindTo(Xs:Names,Vs:Vals)
+  rule matchResult(M:Map) ~> bindTo(_:Names, _:Vals) => bindMap(M)
+
+  rule bindMap(.Map) => .K
+  rule <k> bindMap((X:Name |-> V:Val => .Map) _:Map) ...</k>
+       <env> Rho => Rho[X <- !L:Int] </env>
+       <store>... .Map => !L |-> V ...</store>
+
+  rule bind(.Names) => .K
+  rule <k> bind(X:Name,Xs => Xs) ...</k>
+       <env> Rho => Rho[X <- !_L:Int] </env>
+
+  syntax KItem ::= assignTo(Names,Exps)  [strict(2)]
+
+  rule <k> assignTo(.Names,.Vals) => .K ...</k>
+  rule <k> assignTo((X:Name,Xs => Xs),(V:Val,Vs:Vals => Vs)) ...</k>
+       <env>... X |-> L ...</env>
+       <store>... .Map => L |-> V ...</store>
+

Getters

+ +

The following auxiliary operations extract the list of identifiers
+and of expressions in a binding, respectively.

+
  syntax Names ::= names(Bindings)  [function]
+  rule names(.Bindings) => .Names
+  rule names(X:Name=_ and Bs) => (X,names(Bs))::Names
+
+  syntax Exps ::= exps(Bindings)  [function]
+  rule exps(.Bindings) => .Exps
+  rule exps(_:Name=E and Bs) => E,exps(Bs)
+
+  /* Extra kore stuff */
+  syntax KResult ::= Vals
+  syntax Exps ::= Names
+  syntax Names ::= Bottoms
+
+  /* Matching */
+  syntax MatchResult ::= getMatching(Exp, Val)                      [function]
+                       | getMatchingAux(Exps, Vals)                 [function]
+                       | mergeMatching(MatchResult, MatchResult)    [function]
+                       | matchResult(Map)
+                       | "matchFailure"
+
+  rule getMatching(C:ConstructorName(Es:Exps), C(Vs:Vals)) => getMatchingAux(Es, Vs)
+  rule getMatching([Es:Exps], [Vs:Vals])                   => getMatchingAux(Es, Vs)
+  rule getMatching(C:ConstructorName, C) => matchResult(.Map)
+  rule getMatching(B:Bool, B)            => matchResult(.Map)
+  rule getMatching(I:Int, I)             => matchResult(.Map)
+  rule getMatching(S:String, S)          => matchResult(.Map)
+  rule getMatching(N:Name, V:Val) => matchResult(N |-> V)
+  rule getMatching(_, _) => matchFailure        [owise]
+
+  rule getMatchingAux((E:Exp, Es:Exps), (V:Val, Vs:Vals)) => mergeMatching(getMatching(E, V), getMatchingAux(Es, Vs))
+  rule getMatchingAux(.Exps, .Vals)                       => matchResult(.Map)
+  rule getMatchingAux(_, _) => matchFailure     [owise]
+
+  rule mergeMatching(matchResult(M1:Map), matchResult(M2:Map)) => matchResult(M1 M2)
+    requires intersectSet(keys(M1), keys(M2)) ==K .Set
+  //rule mergeMatching(_, _) => matchFailure      [owsie]
+  rule mergeMatching(matchResult(_:Map), matchFailure) => matchFailure
+  rule mergeMatching(matchFailure, matchResult(_:Map)) => matchFailure
+  rule mergeMatching(matchFailure, matchFailure)       => matchFailure
+

Besides the generic decomposition rules for patterns and values,
+we also want to allow [head|tail] matching for lists, so we add
+the following custom pattern decomposition rule:

+
  rule getMatching([H:Exp | T:Exp], [V:Val, Vs:Vals])
+    => getMatchingAux((H, T), (V, [Vs]))
+endmodule
+

Go to Lesson 2, FUN untyped, Substitution-Based.

+

// NOTE: this definition is not up to date with the latest version of K, as it
+// uses both substitution and symbolic reasoning.
+// It is intended for documentation and academic purposes only.

+

FUN — Untyped — Substitution

+ +

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+ +

This is the substitution-based definition of FUN. For additional
+explanations regarding the semantics of the various FUN constructs,
+the reader should consult the emvironment-based definition of FUN.

+

Syntax

+ +
requires "substitution.md"
+//requires "modules/pattern-matching.k"
+
+module FUN-UNTYPED-COMMON
+  imports DOMAINS-SYNTAX
+

The Syntactic Constructs

+ +
  syntax Name
+  syntax Names ::= List{Name,","}
+
+  syntax Exp ::= Int | Bool | String | Name
+               | "(" Exp ")"                       [bracket]
+  syntax Exps  ::= List{Exp,","}                   [strict]
+  syntax Val
+  syntax Vals ::= List{Val,","}
+
+  syntax Exp ::= left:
+                 Exp "*" Exp                       [strict, arith]
+               | Exp "/" Exp                       [strict, arith]
+               | Exp "%" Exp                       [strict, arith]
+               > left:
+                 Exp "+" Exp                       [strict, left, arith]
+               | Exp "^" Exp                       [strict, left, arith]
+               | Exp "-" Exp                       [strict, prefer, arith]
+               | "-" Exp                           [strict, arith]
+               > non-assoc:
+                 Exp "<" Exp                       [strict, arith]
+               | Exp "<=" Exp                      [strict, arith]
+               | Exp ">" Exp                       [strict, arith]
+               | Exp ">=" Exp                      [strict, arith]
+               | Exp "==" Exp                      [strict, arith]
+               | Exp "!=" Exp                      [strict, arith]
+               > "!" Exp                           [strict, arith]
+               > Exp "&&" Exp                      [strict(1), left, arith]
+               > Exp "||" Exp                      [strict(1), left, arith]
+
+  syntax Exp ::= "if" Exp "then" Exp "else" Exp    [strict(1)]
+
+  syntax Exp ::= "[" Exps "]"                      [strict]
+               | "cons" |  "head" | "tail" | "null?"
+               | "[" Exps "|" Exp "]"
+  syntax Val ::= "[" Vals "]"
+
+  syntax ConstructorName
+  syntax Exp ::= ConstructorName
+               | ConstructorName "(" Exps ")"      [prefer, strict(2)]
+  syntax Val ::= ConstructorName "(" Vals ")"
+
+  syntax Exp ::= "fun" Cases
+               | Exp Exp                           [strict, left]
+  syntax Case  ::= Exp "->" Exp                    [binder]
+// NOTE: The binder attribute above is the only difference between this
+// module and the syntax module of environment-based FUN.  We need
+// to fix a bug in order to import modules and override the attributes
+// of operations.
+  syntax Cases ::= List{Case, "|"}
+
+  syntax Exp ::= "let" Bindings "in" Exp
+               | "letrec" Bindings "in" Exp                 [prefer]
+  syntax Binding  ::= Exp "=" Exp
+  syntax Bindings ::= List{Binding,"and"}
+
+  syntax Exp ::= "ref"
+               | "&" Name
+               | "@" Exp                           [strict]
+               | Exp ":=" Exp                      [strict]
+               | Exp ";" Exp                       [strict(1), right]
+
+  syntax Exp ::= "callcc"
+               | "try" Exp "catch" "(" Name ")" Exp
+  syntax Name ::= "throw" [token]
+
+  syntax Exp ::= "datatype" Type "=" TypeCases Exp
+
+  syntax TypeVar
+  syntax TypeVars ::= List{TypeVar,","}
+
+  syntax TypeName
+  syntax Type ::= "int" | "bool" | "string"
+                | Type "-->" Type                            [right]
+                | "(" Type ")"                             [bracket]
+                | TypeVar
+                | TypeName             [klabel(TypeName), avoid]
+                | Type TypeName   [klabel(Type-TypeName), onlyLabel]
+                | "(" Types ")" TypeName                    [prefer]
+  syntax Types ::= List{Type,","}
+  syntax Types ::= TypeVars
+
+  syntax TypeCase ::= ConstructorName
+                    | ConstructorName "(" Types ")"
+  syntax TypeCases ::= List{TypeCase,"|"}     [klabel(_|TypeCase_)]
+

Additional Priorities

+ +
  syntax priority @__FUN-UNTYPED-COMMON
+                > ___FUN-UNTYPED-COMMON
+                > arith
+                > _:=__FUN-UNTYPED-COMMON
+                > let_in__FUN-UNTYPED-COMMON
+                  letrec_in__FUN-UNTYPED-COMMON
+                  if_then_else__FUN-UNTYPED-COMMON
+                > _;__FUN-UNTYPED-COMMON
+                > fun__FUN-UNTYPED-COMMON
+                > datatype_=___FUN-UNTYPED-COMMON
+endmodule
+
+module FUN-UNTYPED-MACROS
+  imports FUN-UNTYPED-COMMON
+

Desugaring macros

+ +
  rule P1 P2 -> E => P1 -> fun P2 -> E                       [macro-rec]
+  rule F P = E => F = fun P -> E                             [macro-rec]
+
+  rule [E1,E2,Es:Exps|T] => [E1|[E2,Es|T]]                   [macro-rec]
+
+//  rule 'TypeName(Tn:TypeName) => (.TypeVars) Tn              [macro]
+  rule `Type-TypeName`(T:Type, Tn:TypeName) => (T) Tn          [macro]
+
+  syntax Name ::= "$h" | "$t"
+  rule head => fun [$h|$t] -> $h                             [macro]
+  rule tail => fun [$h|$t] -> $t                             [macro]
+  rule null? => fun [.Exps] -> true | [$h|$t] -> false       [macro]
+
+  syntax Name ::= "$k" | "$v"
+  rule try E catch(X) E'
+    => callcc (fun $k -> (fun throw -> E)(fun X -> $k E'))   [macro]
+
+  rule datatype _T = _TCs E => E                               [macro]
+

mu needed for letrec, but we put it here so we can also write
+programs with mu in them, which is particularly useful for testing.

+
  syntax Exp ::= "mu" Case
+
+endmodule
+
+
+module FUN-UNTYPED-SYNTAX
+  imports FUN-UNTYPED-COMMON
+  imports BUILTIN-ID-TOKENS
+
+  syntax Name ::= r"[a-z][_a-zA-Z0-9]*"            [token, prec(2)]
+                | #LowerId                         [token]
+  syntax ConstructorName ::= #UpperId              [token]
+  syntax TypeVar  ::= r"['][a-z][_a-zA-Z0-9]*"     [token]
+  syntax TypeName ::= Name                         [token]
+endmodule
+

Semantics

+ +
module FUN-UNTYPED
+  imports FUN-UNTYPED-COMMON
+  imports FUN-UNTYPED-MACROS
+  imports DOMAINS
+  imports SUBSTITUTION
+  //imports PATTERN-MATCHING
+
+  configuration <T color="yellow">
+                  <k color="green"> $PGM:Exp </k>
+                  <store color="white"> .Map </store>
+                </T>
+

Both Name and functions are values now:

+
  syntax Val ::= Int | Bool | String | Name
+  syntax Exp ::= Val
+  syntax Exps ::= Vals
+  syntax KResult ::= Val
+  syntax Exps ::= Names
+  syntax Vals ::= Names
+
+  rule I1 * I2 => I1 *Int I2
+  rule I1 / I2 => I1 /Int I2 when I2 =/=K 0
+  rule I1 % I2 => I1 %Int I2 when I2 =/=K 0
+  rule I1 + I2 => I1 +Int I2
+  rule S1 ^ S2 => S1 +String S2
+  rule I1 - I2 => I1 -Int I2
+  rule - I => 0 -Int I
+  rule I1 < I2 => I1 <Int I2
+  rule I1 <= I2 => I1 <=Int I2
+  rule I1 > I2 => I1 >Int I2
+  rule I1 >= I2 => I1 >=Int I2
+  rule V1:Val == V2:Val => V1 ==K V2
+  rule V1:Val != V2:Val => V1 =/=K V2
+  rule ! T => notBool(T)
+  rule true  && E => E
+  rule false && _ => false
+  rule true  || _ => true
+  rule false || E => E
+
+  rule if  true then E else _ => E
+  rule if false then _ else E => E
+
+  rule isVal(cons) => true
+  rule isVal(cons _V:Val) => true
+  rule cons V:Val [Vs:Vals] => [V,Vs]
+
+  syntax Val ::= ConstructorName
+
+  rule isVal(fun _) => true
+  syntax KVar ::= Name
+  syntax Name ::= freshName(Int)    [freshGenerator, function]
+  rule freshName(I:Int) => {#parseToken("Name", "#" +String Int2String(I))}:>Name
+
+  rule (. => getMatching(P, V)) ~> (fun P->_ | _) V:Val
+  rule matchResult(M:Map) ~> (fun _->E | _) _ => E[M]
+  rule (matchFailure => .) ~> (fun (_->_ | Cs:Cases => Cs)) _
+//  rule (fun P->E | _) V:Val => E[getMatching(P,V)]  when isMatching(P,V)
+//  rule (fun (P->_ | Cs:Cases => Cs)) V:Val  when notBool isMatching(P,V)
+

We can reduce multiple bindings to one list binding, and then
+apply the usual desugaring of let into function application.
+It is important that the rule below is a macro, so let is eliminated
+immediately, otherwise it may interfere in ugly ways with substitution.

+
  rule let Bs in E => ((fun [names(Bs)] -> E) [exps(Bs)])    [macro]
+

We only give the semantics of one-binding letrec.
+Multipe bindings are left as an exercise.

+
  // changed because of parsing error
+  //rule mu X:Name -> E => E[(mu X -> E) / X]
+  rule mu X:Name -> E => E[X |-> (mu X -> E)]
+  rule letrec F:Name = E in E' => let F = (mu F -> E) in E'  [macro]
+

We cannot have & anymore, but we can give direct
+semantics to ref. We also have to declare ref to
+be a value, so that we will never heat on it.

+
//  rule <k> & X => L ...</k>  <env>... X |-> L </env>
+  rule isVal(ref) => true
+  rule <k> ref V:Val => !L:Int ...</k> <store>... .Map => !L |-> V ...</store>
+  rule <k> @ L:Int => V:Val ...</k>  <store>... L |-> V ...</store>
+  rule <k> L:Int := V:Val => V ...</k>  <store>... L |-> (_=>V) ...</store>
+  rule _V:Val; E => E
+
+  syntax Val ::= cc(K)
+  rule isVal(callcc) => true
+  rule <k> (callcc V:Val => V cc(K)) ~> K </k>
+  rule <k> cc(K) V:Val ~> _ => V ~> K </k>
+

Auxiliary getters

+
  syntax Names ::= names(Bindings)  [function]
+  rule names(.Bindings) => .Names
+  rule names(X:Name=_ and Bs) => X,names(Bs)
+
+  syntax Exps ::= exps(Bindings)  [function]
+  rule exps(.Bindings) => .Exps
+  rule exps(_:Name=E and Bs) => E,exps(Bs)
+
+  /* Extra kore stuff */
+  syntax KResult ::= Vals
+  syntax Exps ::= Names
+
+  /* Matching */
+  syntax MatchResult ::= getMatching(Exp, Val)                      [function]
+                       | getMatchingAux(Exps, Vals)                 [function]
+                       | mergeMatching(MatchResult, MatchResult)    [function]
+                       | matchResult(Map)
+                       | "matchFailure"
+
+  rule getMatching(C:ConstructorName(Es:Exps), C(Vs:Vals)) => getMatchingAux(Es, Vs)
+  rule getMatching([Es:Exps], [Vs:Vals])                   => getMatchingAux(Es, Vs)
+  rule getMatching(C:ConstructorName, C) => matchResult(.Map)
+  rule getMatching(B:Bool, B)            => matchResult(.Map)
+  rule getMatching(I:Int, I)             => matchResult(.Map)
+  rule getMatching(S:String, S)          => matchResult(.Map)
+  rule getMatching(N:Name, V:Val) => matchResult(N |-> V)
+  rule getMatching(_, _) => matchFailure        [owise]
+
+  rule getMatchingAux((E:Exp, Es:Exps), (V:Val, Vs:Vals)) => mergeMatching(getMatching(E, V), getMatchingAux(Es, Vs))
+  rule getMatchingAux(.Exps, .Vals)                       => matchResult(.Map)
+  rule getMatchingAux(_, _) => matchFailure     [owise]
+
+  rule mergeMatching(matchResult(M1:Map), matchResult(M2:Map)) => matchResult(M1 M2)
+    requires intersectSet(keys(M1), keys(M2)) ==K .Set
+  //rule mergeMatching(_, _) => matchFailure      [owsie]
+  rule mergeMatching(matchResult(_:Map), matchFailure) => matchFailure
+  rule mergeMatching(matchFailure, matchResult(_:Map)) => matchFailure
+  rule mergeMatching(matchFailure, matchFailure)       => matchFailure
+

Besides the generic decomposition rules for patterns and values,
+we also want to allow [head|tail] matching for lists, so we add
+the following custom pattern decomposition rule:

+
  rule getMatching([H:Exp | T:Exp], [V:Val, Vs:Vals])
+    => getMatchingAux((H, T), (V, [Vs]))
+endmodule
+

// NOTE: this definition is not runnable as is.
+// It is intended for documentation and academic purposes only.

+

LOGIK

+ +

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+ +

This is the K semantic definition of LOGIK, a trivial language
+capturing the essence of the logic programming paradigm. In this
+definition, we explicitly focus on simplicity and mathematical
+clarity, not on advanced logic programming features or performance.
+Those are covered in the LOGIK++ extension under examples/logik++.

+

Specifically, a LOGIK program consists of a sequence of Horn clauses
+of the form

+
P :- P1, P2, ..., Pn .
+

followed by a query of the form

+
?- Q1, Q2, ..., Qm .
+

where P, P1, P2, ..., Pn, Q1, Q2,
+..., Qm are literals. The
+symbol :- is read "if". A literal has the form
+p(T1,T2,...,Tk), where p is a predicate symbol
+and where T1,T2,...,Tk are terms. Terms are built as
+usual, with operation symbols and variables. A common
+convention in logic programming languages, also adopted here, is that
+variables are capitalized and operation symbols are not. Operations
+with zero arguments are called constants and are written without
+parentheses, that is, c instead of c(). Horn
+clauses without conditions, called facts, are written
+without :-, that is, P. instead of P :- ..

+

For example, the LOGIK program below gives a few facts about a
+parent predicate, then several clauses defining some useful
+predicates including an ancestor predicate, and finally a
+query asking for those who both have ancestors and are ancestors
+themselves in the parent relation:

+
parent(david,john).
+parent(jim,david).
+parent(steve,jim).
+parent(nathan,steve).
+
+grandparent(A,B):-
+  parent(A,X),
+  parent(X,B).
+
+ancestor(A,B):-
+  parent(A,X),
+  parents(X,B).
+
+parents(X,X).
+parents(A,B):-
+  ancestor(A,B).
+
+both(X) :- ancestor(A,X), ancestor(X,B).
+
+?- both(X).
+

Above, we only have constant operation symbols, so these and variables
+are the only terms that can be used in predicates. As expected, the
+LOGIK program above will give us three solutions for X:
+david, steve, and jim. If we inline the
+both(X) predicate in the query, that is, if we replace the
+query with ?- ancestor(A,X), ancestor(X,B). then we get
+10 solutions, one for for each triple A, X, and
+B satisfying both predicates ancestor(A,X) and
+ancestor(X,B).

+

As another example, the program below defines an append
+predicate followed by a simple goal:

+
append(nil,L,L).
+append(cons(H,T),L,cons(H,Z)) :- append(T,L,Z).
+
+?- append(cons(a,nil), cons(b,nil), V).
+

Besides the predicate symbol append, the program above also
+includes a constant symbol nil and a binary operation symbol
+cons. Additionally, the query also includes two more
+constants, a and b. The capitalized identifiers are
+all variables. As expected, the LOGIK program above yields only one
+solution, namely V = cons(a,cons(b,nil)). On the other hand,
+if we change the query to:

+
?- append(L1, cons(a,L2), cons(a,cons(b,cons(a,nil)))).
+

then LOGIK yields two solutions: one where L1 is
+cons(a,cons(b,nil)) and L2 is nil,
+and another where L1 is nil and L2 is
+cons(a,cons(b,nil)).

+

The programs above all generated ground solutions, that is,
+solutions where the query variables are mapped to ground terms (i.e.,
+terms without variables). Let us now consider the following query:

+
?- append(cons(a,nil), Y, Z).
+

There are obviously infinitely many ground solutions for the query
+above, e.g.,
+Y = nil and Z = cons(a,nil),
+Y = cons(a,nil) and Z = cons(a,cons(a,nil)),
+Y = cons(b,nil) and Z = cons(a,cons(b,nil)),
+Y = cons(c,cons(b,nil)) and Z = cons(a,cons(c,cons(b,nil))),
+etc. However, all the ground solutions for the query above can be
+elegantly characterized by the property that Z is bound to a list
+starting with a and followed by the list that Y is
+bound to. This property can in fact be described as a symbolic solution
+to the query: Z = cons(a,Y) or, equivalently,
+Y = Symb and Z = cons(a,Symb). It is possible to
+define a ``more general than'' relation on such symbolic solutions,
+in the sense that the more particular solution can be obtained as a
+specialization/substitution of the more general one, and then it can
+be shown that the above is the most general solution to the
+stated query. Logic programming languages, including our LOGIK,
+attempt to always compute such most general solutions.

+

Logic programming languages are highly non-deterministic, in that
+several Horn clauses may be used at the same time, each possibly
+resulting in a different solution. Implementations of logic
+programming languages consist of complex, optimized search and
+indexing algorithms, which we are not concerned with here. Instead,
+we here take advantage of K's builtin support for search.
+Specifically, to find all the solutions of a LOGIK program, we have to
+use krun with the option --search. However, note
+that some programs have infinitely many solutions which cannot relate
+to each other by the "more general" relation. For example, the query

+
?- append(L1, cons(a,L2), L3) .
+

To address such cases and terminate, logic programming languages allow
+the user to choose how many solutions to be computed and displayed.
+In LOGIK, we can use the --bound option of krun for
+this purpose.

+

Finally, note that some queries have no solution. In some cases that
+is easy to detect by exhaustive analysis, such as for the following
+query:

+
?- append(cons(a,L1), L2, cons(b,L3)).
+

Logic programming languages, including LOGIK, terminate in such cases
+and report a no solution answer. However, there are cases where
+exhaustive analysis is not sufficient, such as for the query:

+
?- append(cons(a,L), nil, L).
+

In such cases, logic programming languages do not terminate. While
+one may devise techniques to detect non-termination in some cases,
+one cannot do it in general (same like for all Turing-complete
+languages).

+
requires "unification.k"
+
+module LOGIK-COMMON
+  imports DOMAINS-SYNTAX
+

Syntax

+ +

The syntax of LOGIK is straightforward: a program is a sequence of
+Horn clauses followed by a query:

+
  syntax Literal
+  syntax Term ::= Literal | Literal "(" Terms ")"
+  syntax Terms ::= List{Term,","}
+  syntax Clause ::= Term ":-" Terms "." | Term "."
+  syntax Query ::= "?-" Terms "."
+  syntax Pgm ::= Query | Clause Pgm
+endmodule
+
+module LOGIK-SYNTAX
+  imports LOGIK-COMMON
+  imports BUILTIN-ID-TOKENS
+

Variables and literals are defined as tokens following the conventions
+used in Prolog (variables start with _ or capital letter, while literals
+start with lower case letters):

+
  syntax #KVariable ::= r"[A-Z_][A-Za-z0-9_]*"   [token, prec(2)]
+                      | #UpperId                 [token]
+  syntax Term ::= #KVariable [klabel(#SemanticCastToTerm)]
+  syntax Literal ::= r"[a-z][a-zA-Z0-9_]*"                 [token]
+                   | #LowerId                              [token]
+endmodule
+
+module LOGIK
+  imports LOGIK-COMMON
+  imports DOMAINS
+  imports UNIFICATION
+

Unification is at the core of logic programming. Here we are
+going to use the predefined unification procedure (the same one we
+used in the type inferencers in Tutorial 5).

+

Configuration

+ +

The configuration stores each clause in its own cell for easy access,
+and the most general unifier in a cell named mgu, same like
+the type inferencers. The k cell holds the query and the
+fresh cell holds a fresh clause instance to be attempted on
+the next query item. To more easily read the solutions, we add a
+second top-level cell, solution. Both top cells are
+optional. Indeed, we start with the main top cell and, when a
+solution is found, we move it into the solution cell and
+discard the main cell.

+
  configuration <T color="yellow" multiplicity="?">
+                  <k color="green"> $PGM:Pgm </k>
+                  <fresh color="orange"> .K </fresh>
+                  <clauses color="red">
+                    <clause color="pink" multiplicity="*"> .K </clause>
+                  </clauses>
+                  <mgu> .K </mgu>
+                </T>
+                <solution multiplicity="?"> .K </solution>
+

Pre- and post-processing

+ +

Before we launch the semantics, we first scan the given program and
+place each clause in its own cell, and then place the query in the
+k cell and initialize the mgu with the variables from the query.

+

Note that we put a fresh instance of the clause to avoid interference with
+the query variables. By a "fresh instance" of a clause we mean one whose
+variables are renamed with fresh names; we need that in order to avoid
+undesired unification conflicts due to particular names chosen for
+variables in the original program, as well as conflicts due to
+subsequent uses of the same clause. It is safe to rename the
+variables in a clause, because clauses are universally quantified in
+their variables. This process of creating a fresh instance of a
+clause is similar to how we created fresh instances of type schemas in
+the higher-order type inferencer discussed in Tutorial 5. Indeed, we
+can safely regard clauses as "clause schemas" comprising infinitely
+many instances, one for each context.

+
  rule <k> C:Clause Pgm => Pgm </k>
+       (.Bag => <clause> #renameVariables(C) </clause>)
+
+  rule <k> ?- Ls:Terms. => Ls ...</k>
+       <mgu> _ => #variablesMap(#variables(Ls)) </mgu>
+

We also sequentialize the goals for easier processing:

+
  rule L:Term, Ls:Terms => L ~> Ls
+  rule .Terms => .
+

When all the goals are solved, indicated by the empty k
+cell, the calculated most general unifier (mgu) is in the mgu
+cell. In that case, to ease reading of the final solution we move the
+mgu in the solution cell and delete the rest of the
+configuration:

+
rule <T>... <k> . </k> <mgu> Theta </mgu> ...</T>
+  => <solution> Theta </solution>
+

Since we are not interested in seeing the failed attempts to solve
+the query, we collapse all the error configurations into an empty
+configuration (recall that both top-level cells in the configuration
+were declared optional). This way, if we see an empty configuration
+when we search for all solutions, we know that some attempts failed
+(but we do not know which ones).

+
// this would be nice, but we need feedback from the external unifier
+// for this.
+//  rule <T>... <mgu> _:MguError </mgu> ...</T> => .
+

Semantics

+ +

Once all the infrastructure is in place, the actual semantics of LOGIK
+is quite simple. All we have to do is to pick some (fresh instance of
+a) clause, then unify its conclusion with the first query literal, and
+then replace that literal with condition of the clause. The intuition
+here is the following: to satisfy the first literal in the query, we
+need to find some instance of some clause that matches it, and then to
+similarly show that we can satisfy the conditions of that clause.
+Mathematically, this is an instance of the proof principle called
+resolution: if p ∨ q and ¬ p ∨ r hold, then so does
+q ∨ r. We let it as an exercise to the reader to see how the two
+relate (hint: assume the negation of the goal together with all the
+clauses, and then derive false).

+

The following two rules are tightly connected and they together
+perform the following core task: pick a fresh instance of a clause
+which unifies with the first goal item, then add its conditions as new
+goals.

+

Pick a clause and generate a fresh instance of it when the
+fresh cell is empty:

+
  rule <fresh> . => #renameVariables(C) </fresh> <clause> C </clause>
+       <k> T:Term ...</k>
+  requires #unifiable(T,head(C))
+
+  syntax Term ::= head(Clause) [function]
+  rule head(L.) => L
+  rule head(L:-_.) => L
+

If the goal is unifiable with the fresh clause's head, replace the goal
+with the clause body, and empty the fresh cell (so that
+another clause can be chosen using the rule above):

+
  rule <k> L:Term => . ...</k>
+       <fresh>  L:Term . => . </fresh>
+
+  rule <k> L:Term :KItem => Ls ...</k>
+       <fresh>  L:Term :- Ls:Terms. => . </fresh>
+

Note that there is no problem if a clause is chosen whose
+conclusion literal does not unify with the first goal literal.
+The search
+option of krun will systematically try all clauses, so no
+solution is missed. Of course, the above is not the most efficient
+way to implement a logic programming language, but recall that our
+objective here was to present a simple and mathematically clean
+solution. We encourage the interested reader to consult the LOGIK++
+language definition for a more efficient definition of a richer logic
+programming language.

+
endmodule
+

// NOTE: this definition is not runnable as is.
+// It is intended for documentation and academic purposes only.

+

LOGIK

+ +

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+ +

This is the K semantic definition of LOGIK, a trivial language
+capturing the essence of the logic programming paradigm. In this
+definition, we explicitly focus on simplicity and mathematical
+clarity, not on advanced logic programming features or performance.
+Those are covered in the LOGIK++ extension under examples/logik++.

+

Specifically, a LOGIK program consists of a sequence of Horn clauses
+of the form

+
P :- P1, P2, ..., Pn .
+

followed by a query of the form

+
?- Q1, Q2, ..., Qm .
+

where P, P1, P2, ..., Pn, Q1, Q2,
+..., Qm are literals. The
+symbol :- is read "if". A literal has the form
+p(T1,T2,...,Tk), where p is a predicate symbol
+and where T1,T2,...,Tk are terms. Terms are built as
+usual, with operation symbols and variables. A common
+convention in logic programming languages, also adopted here, is that
+variables are capitalized and operation symbols are not. Operations
+with zero arguments are called constants and are written without
+parentheses, that is, c instead of c(). Horn
+clauses without conditions, called facts, are written
+without :-, that is, P. instead of P :- ..

+

For example, the LOGIK program below gives a few facts about a
+parent predicate, then several clauses defining some useful
+predicates including an ancestor predicate, and finally a
+query asking for those who both have ancestors and are ancestors
+themselves in the parent relation:

+
parent(david,john).
+parent(jim,david).
+parent(steve,jim).
+parent(nathan,steve).
+
+grandparent(A,B):-
+  parent(A,X),
+  parent(X,B).
+
+ancestor(A,B):-
+  parent(A,X),
+  parents(X,B).
+
+parents(X,X).
+parents(A,B):-
+  ancestor(A,B).
+
+both(X) :- ancestor(A,X), ancestor(X,B).
+
+?- both(X).
+

Above, we only have constant operation symbols, so these and variables
+are the only terms that can be used in predicates. As expected, the
+LOGIK program above will give us three solutions for X:
+david, steve, and jim. If we inline the
+both(X) predicate in the query, that is, if we replace the
+query with ?- ancestor(A,X), ancestor(X,B). then we get
+10 solutions, one for for each triple A, X, and
+B satisfying both predicates ancestor(A,X) and
+ancestor(X,B).

+

As another example, the program below defines an append
+predicate followed by a simple goal:

+
append(nil,L,L).
+append(cons(H,T),L,cons(H,Z)) :- append(T,L,Z).
+
+?- append(cons(a,nil), cons(b,nil), V).
+

Besides the predicate symbol append, the program above also
+includes a constant symbol nil and a binary operation symbol
+cons. Additionally, the query also includes two more
+constants, a and b. The capitalized identifiers are
+all variables. As expected, the LOGIK program above yields only one
+solution, namely V = cons(a,cons(b,nil)). On the other hand,
+if we change the query to:

+
?- append(L1, cons(a,L2), cons(a,cons(b,cons(a,nil)))).
+

then LOGIK yields two solutions: one where L1 is
+cons(a,cons(b,nil)) and L2 is nil,
+and another where L1 is nil and L2 is
+cons(a,cons(b,nil)).

+

The programs above all generated ground solutions, that is,
+solutions where the query variables are mapped to ground terms (i.e.,
+terms without variables). Let us now consider the following query:

+
?- append(cons(a,nil), Y, Z).
+

There are obviously infinitely many ground solutions for the query
+above, e.g.,
+Y = nil and Z = cons(a,nil),
+Y = cons(a,nil) and Z = cons(a,cons(a,nil)),
+Y = cons(b,nil) and Z = cons(a,cons(b,nil)),
+Y = cons(c,cons(b,nil)) and Z = cons(a,cons(c,cons(b,nil))),
+etc. However, all the ground solutions for the query above can be
+elegantly characterized by the property that Z is bound to a list
+starting with a and followed by the list that Y is
+bound to. This property can in fact be described as a symbolic solution
+to the query: Z = cons(a,Y) or, equivalently,
+Y = Symb and Z = cons(a,Symb). It is possible to
+define a ``more general than'' relation on such symbolic solutions,
+in the sense that the more particular solution can be obtained as a
+specialization/substitution of the more general one, and then it can
+be shown that the above is the most general solution to the
+stated query. Logic programming languages, including our LOGIK,
+attempt to always compute such most general solutions.

+

Logic programming languages are highly non-deterministic, in that
+several Horn clauses may be used at the same time, each possibly
+resulting in a different solution. Implementations of logic
+programming languages consist of complex, optimized search and
+indexing algorithms, which we are not concerned with here. Instead,
+we here take advantage of K's builtin support for search.
+Specifically, to find all the solutions of a LOGIK program, we have to
+use krun with the option --search. However, note
+that some programs have infinitely many solutions which cannot relate
+to each other by the "more general" relation. For example, the query

+
?- append(L1, cons(a,L2), L3) .
+

To address such cases and terminate, logic programming languages allow
+the user to choose how many solutions to be computed and displayed.
+In LOGIK, we can use the --bound option of krun for
+this purpose.

+

Finally, note that some queries have no solution. In some cases that
+is easy to detect by exhaustive analysis, such as for the following
+query:

+
?- append(cons(a,L1), L2, cons(b,L3)).
+

Logic programming languages, including LOGIK, terminate in such cases
+and report a no solution answer. However, there are cases where
+exhaustive analysis is not sufficient, such as for the query:

+
?- append(cons(a,L), nil, L).
+

In such cases, logic programming languages do not terminate. While
+one may devise techniques to detect non-termination in some cases,
+one cannot do it in general (same like for all Turing-complete
+languages).

+
requires "unification.k"
+
+module LOGIK-COMMON
+  imports DOMAINS-SYNTAX
+

Syntax

+ +

The syntax of LOGIK is straightforward: a program is a sequence of
+Horn clauses followed by a query:

+
  syntax Literal
+  syntax Term ::= Literal | Literal "(" Terms ")"
+  syntax Terms ::= List{Term,","}
+  syntax Clause ::= Term ":-" Terms "." | Term "."
+  syntax Query ::= "?-" Terms "."
+  syntax Pgm ::= Query | Clause Pgm
+endmodule
+
+module LOGIK-SYNTAX
+  imports LOGIK-COMMON
+  imports BUILTIN-ID-TOKENS
+

Variables and literals are defined as tokens following the conventions
+used in Prolog (variables start with _ or capital letter, while literals
+start with lower case letters):

+
  syntax #KVariable ::= r"[A-Z_][A-Za-z0-9_]*"   [token, prec(2)]
+                      | #UpperId                 [token]
+  syntax Term ::= #KVariable [klabel(#SemanticCastToTerm)]
+  syntax Literal ::= r"[a-z][a-zA-Z0-9_]*"                 [token]
+                   | #LowerId                              [token]
+endmodule
+
+module LOGIK
+  imports LOGIK-COMMON
+  imports DOMAINS
+  imports UNIFICATION
+

Unification is at the core of logic programming. Here we are
+going to use the predefined unification procedure (the same one we
+used in the type inferencers in Tutorial 5).

+

Configuration

+ +

The configuration stores each clause in its own cell for easy access,
+and the most general unifier in a cell named mgu, same like
+the type inferencers. The k cell holds the query and the
+fresh cell holds a fresh clause instance to be attempted on
+the next query item. To more easily read the solutions, we add a
+second top-level cell, solution. Both top cells are
+optional. Indeed, we start with the main top cell and, when a
+solution is found, we move it into the solution cell and
+discard the main cell.

+
  configuration <T color="yellow" multiplicity="?">
+                  <k color="green"> $PGM:Pgm </k>
+                  <fresh color="orange"> .K </fresh>
+                  <clauses color="red">
+                    <clause color="pink" multiplicity="*"> .K </clause>
+                  </clauses>
+                  <mgu> .K </mgu>
+                </T>
+                <solution multiplicity="?"> .K </solution>
+

Pre- and post-processing

+ +

Before we launch the semantics, we first scan the given program and
+place each clause in its own cell, and then place the query in the
+k cell and initialize the mgu with the variables from the query.

+

Note that we put a fresh instance of the clause to avoid interference with
+the query variables. By a "fresh instance" of a clause we mean one whose
+variables are renamed with fresh names; we need that in order to avoid
+undesired unification conflicts due to particular names chosen for
+variables in the original program, as well as conflicts due to
+subsequent uses of the same clause. It is safe to rename the
+variables in a clause, because clauses are universally quantified in
+their variables. This process of creating a fresh instance of a
+clause is similar to how we created fresh instances of type schemas in
+the higher-order type inferencer discussed in Tutorial 5. Indeed, we
+can safely regard clauses as "clause schemas" comprising infinitely
+many instances, one for each context.

+
  rule <k> C:Clause Pgm => Pgm </k>
+       (.Bag => <clause> #renameVariables(C) </clause>)
+
+  rule <k> ?- Ls:Terms. => Ls ...</k>
+       <mgu> _ => #variablesMap(#variables(Ls)) </mgu>
+

We also sequentialize the goals for easier processing:

+
  rule L:Term, Ls:Terms => L ~> Ls
+  rule .Terms => .
+

When all the goals are solved, indicated by the empty k
+cell, the calculated most general unifier (mgu) is in the mgu
+cell. In that case, to ease reading of the final solution we move the
+mgu in the solution cell and delete the rest of the
+configuration:

+
rule <T>... <k> . </k> <mgu> Theta </mgu> ...</T>
+  => <solution> Theta </solution>
+

Since we are not interested in seeing the failed attempts to solve
+the query, we collapse all the error configurations into an empty
+configuration (recall that both top-level cells in the configuration
+were declared optional). This way, if we see an empty configuration
+when we search for all solutions, we know that some attempts failed
+(but we do not know which ones).

+
// this would be nice, but we need feedback from the external unifier
+// for this.
+//  rule <T>... <mgu> _:MguError </mgu> ...</T> => .
+

Semantics

+ +

Once all the infrastructure is in place, the actual semantics of LOGIK
+is quite simple. All we have to do is to pick some (fresh instance of
+a) clause, then unify its conclusion with the first query literal, and
+then replace that literal with condition of the clause. The intuition
+here is the following: to satisfy the first literal in the query, we
+need to find some instance of some clause that matches it, and then to
+similarly show that we can satisfy the conditions of that clause.
+Mathematically, this is an instance of the proof principle called
+resolution: if p ∨ q and ¬ p ∨ r hold, then so does
+q ∨ r. We let it as an exercise to the reader to see how the two
+relate (hint: assume the negation of the goal together with all the
+clauses, and then derive false).

+

The following two rules are tightly connected and they together
+perform the following core task: pick a fresh instance of a clause
+which unifies with the first goal item, then add its conditions as new
+goals.

+

Pick a clause and generate a fresh instance of it when the
+fresh cell is empty:

+
  rule <fresh> . => #renameVariables(C) </fresh> <clause> C </clause>
+       <k> T:Term ...</k>
+  requires #unifiable(T,head(C))
+
+  syntax Term ::= head(Clause) [function]
+  rule head(L.) => L
+  rule head(L:-_.) => L
+

If the goal is unifiable with the fresh clause's head, replace the goal
+with the clause body, and empty the fresh cell (so that
+another clause can be chosen using the rule above):

+
  rule <k> L:Term => . ...</k>
+       <fresh>  L:Term . => . </fresh>
+
+  rule <k> L:Term :KItem => Ls ...</k>
+       <fresh>  L:Term :- Ls:Terms. => . </fresh>
+

Note that there is no problem if a clause is chosen whose
+conclusion literal does not unify with the first goal literal.
+The search
+option of krun will systematically try all clauses, so no
+solution is missed. Of course, the above is not the most efficient
+way to implement a logic programming language, but recall that our
+objective here was to present a simple and mathematically clean
+solution. We encourage the interested reader to consult the LOGIK++
+language definition for a more efficient definition of a richer logic
+programming language.

+
endmodule
+

Projects using K

+ +

A list of projects using the K framework. If you are working on something interesting, and you want to share it with the community,
+let us know on our socials, and we will feature you on this list.

+
+ + +
+
    +
  • +

    KAVM (Feb 2022 - Present)

    +

    The Algorand Virtual Machine and TEAL Semantics in K
    +KAVM leverages the K Framework to empower Algorand smart contracts' developers
    +with property-based testing and formal verification.

    +
  • +
  • +

    KPlutus (2016 - Present)

    +

    The K Semantics of Plutus-Core

    +
  • +
  • +

    Dedukti (Mar 2021 - Present)

    +

    This project aims to translate real K semantics into Dedukti.

    +
  • +
  • +

    KWasm (Aug 2015 - Present)

    +

    KWasm is the K semantics of WebAssembly.
    +WebAssembly is a low-level (but simple and streamlined) assembly language that was originally developed to provide a fast execution engine for browser-based tools.
    +More recently, it has been used in several blockchain smart-contract platforms as the underlying language for executing financial agreements.
    +KWasm has been used for measuring coverage of test-suites over Wasm code and verifying programs which are compiled to Wasm.

    +
  • +
  • +

    KEVM (Sep 2017 - Present)

    +

    KEVM is the K semantics of the Ethereum Virtual Machine.
    +It passes all the Ethereum Test Suite, and is used for verifying EVM programs.

    +
  • +
  • +

    IELE (Oct 2016 - Present)

    +

    IELE is the underlying VM integrated into the Cardano blockchain.
    +IELE is a register-based VM (inspired by LLVM), which attempts to avoid many of the missteps in design present in EVM.

    +
  • +
  • +

    K-Michelson (Oct 2019 - Present)

    +

    K-Michelson is the K semantics of Michelson blockchain programming language, which powers the Tezos blockchain.
    +KMichelson provides additional testing tools for developers, including a unit-testing framework which is extendable to symbolic property testing.

    +
  • +
  • +

    C (Jul 2010 - Present)

    +

    The K semantics of the C programming language specifies the translation, linking, and execution semantics of the C language according to the official C standard.
    +It has been used to build tools like RV-Match, which detects undefined behaviors in users programs by running their test-suites through the C semantics.

    +
  • +
+

Archived

+ + +
+
+ + diff --git a/exports/K.mobi b/exports/K.mobi new file mode 100644 index 00000000000..d402ca25f50 Binary files /dev/null and b/exports/K.mobi differ diff --git a/exports/K.pdf b/exports/K.pdf new file mode 100644 index 00000000000..cff444e0065 Binary files /dev/null and b/exports/K.pdf differ diff --git a/faq/index.html b/faq/index.html new file mode 100644 index 00000000000..3202560ecd9 --- /dev/null +++ b/faq/index.html @@ -0,0 +1,475 @@ + + + + + + + + + + + + + + +FAQ | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

FAQ

+

General questions

+

What is K?

+

[13 Dec, 2013] K is a framework for defining programming languages. Once you define a language, K gives you a series of tools for that language, such as: a parser, an interpreter, a state-space explorer (like a model-checker for reachability), and even a deductive program verifier. We continuously work on making these tools better and on adding new tools.

+

What is a language definition?

+

[13 Dec, 2013] A language definition consists of two parts: syntax and semantics. The syntax is defined using a BNF-style, enriched with several features to ease the semantics. The semantics tells what each language construct is meant to do. This way, a language definition says both how the programs in your language should look like and also what they mean, or how they execute.

+

What is the difference between a definition and an implementation?

+

[13 Dec, 2013] No difference in K. We think of K definitions as formal, rigorous implementations of the language. In fact, many users of K have no background on programming language semantics, they think of K as a domain-specific language for implementing programming languages. The benefit of implementing your language in K is that you can make use of the tools that K offers, which is not possible when you implement your language in a conventional programming language.

+

Why K?

+

[13 Dec, 2013] There was and still is a considerable amount of effort spent by many scientists on developing parsing, model-checking, program verification and other formal program analysis techniques. Most of these techniques are language independent, yet a considerable amount of effort is then spent on developing language-specific tools based on these techniques. For example, developing a model-checker or a program verifier for Java, or C, or Python, is a serious endeavor, that only very few highly-skilled people can attempt. We believe that all these language-specific tools can be automatically derived from the K language definition, so that language designers spend the time only once to define their language and then get not only an implementation of their language, but also all the other tools, essentially for free.

+

What is the difference between K and ...

+

SDF

+

[13 Dec, 2013]sdf is a parser generator. Simply speaking, it takes as input a grammar written in the SDF format and a text, and creates the abstract-syntax tree of that text corresponding to the grammar specification. K currently uses SDF for its parsing needs, but we integrated it into a more complex environment suitable for semantic definitions. Using the same language specification, we generate multiple parsers for different purposes: parse programs, parse rewrite rules, etc. Another difference is that we changed a bit the syntax of the grammar specification. We adopted a BNF-style notation whereas SDF uses an algebraic specification, but we keept the same disambiguation system with priorities and associativity filters.

+

Maude

+

PLT Redex

+

[16 Dec, 2013]plt redex is a language definitional framework based on reduction semantics with evaluation contexts, a type of Structural Operational Semantics. A PLT definition consists of the syntax for the language (including the syntax of the execution configuration, if needed), followed by a syntax for evaluation contexts which allows identifying the next reducible expression (redex). The rules can specify the parts of the context (and abstract parts of it using variables), and can alter both the redex and the context. PLT Redex offers a suite of tools built on top of the Racket Scheme-based IDE to help visualize and explore executions. K borrows from PLT Redex the idea of evaluation contexts, and extends it further allowing more complex conditions be put on them. A distinctive difference between Redex and K is the fact that in K evaluation contexts are used only for the computational fragment of the executing configuration, the rules applying modulo the configuration abstraction. This, for example, allows K to more easily specify synchronous communication of agents or threads.

+

Spoofax

+

Rascal

+

OTT

+

ATL and Model-Driven Engineering

+

[14 Dec, 2013]atl (Atlas Transformation Language) falls in the Model-Driven Engineering (MDE) field and includes a model transformation language and toolkit. ATL is also based on rules, which provide a means to produce a target model Mb conforming to a meta-model MMb, from a source model Ma conforming to a meta-model MMa. It should not be difficult to define such model transformations using K, this way effectively using the target meta-model MMb to give semantics to the source meta-model MMa. Moreover, if MMa and MMb have K semantics themselves, then the K tool can be used for proving the conformance of the transformation. Note, however, that K does not currently supply any explicit support for meta-model technologies, such as EMF (Eclipse Modelling Framework), etc.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + diff --git a/index.html b/index.html new file mode 100644 index 00000000000..1b2cd32a8ff --- /dev/null +++ b/index.html @@ -0,0 +1,428 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

K is a rewrite-based +executable semantic framework in which programming languages, type +systems and formal analysis tools can be defined using configurations +and rules. Configurations organize the state in units called cells, +which are labeled and can be nested. K rewrite rules make it explicit +which parts of the term are read-only, write-only, read-write, or +unused. This makes K suitable for defining truly concurrent languages +even in the presence of sharing. Computations are represented as +syntactic extensions of the original language abstract syntax, using a +nested list structure which sequentializes computational tasks, such +as program fragments. Computations are like any other terms in a +rewriting environment: they can be matched, moved from one place to +another, modified, or deleted. This makes K suitable for defining +control-intensive features such as abrupt termination, exceptions, or +call/cc.

+

K Tool Download

+
    +
  • Install from the latest K GitHub Release.
  • +
  • Install pyk, K's scripting interface for Python. Check the API documentation for a complete reference of supported features.
  • +
  • Try our Editor Support page for links to K syntax highlighting definitions for various popular editors/IDEs. Please feel free to contribute.
  • +
  • Build or browse the code on GitHub, where you can also report bugs.
  • +
+

Learn K

+ +

Support

+ +

Resources

+ +
+
+ + + +
+ +
+
+ + + + + + + + + + + + diff --git a/install b/install new file mode 100644 index 00000000000..70476dbcd6f --- /dev/null +++ b/install @@ -0,0 +1,71 @@ +#!/usr/bin/env bash + +{ # Prevent execution if this script was only partially downloaded + set -e + + GREEN='\033[0;32m' + RED='\033[0;31m' + NC='\033[0m' + + NIX_POST_INSTALL_MESSAGE="To ensure that the necessary environment variables are set, either log in again, or type + + . /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh + +in your shell." + + oops() { + >&2 echo -e "${RED}error:${NC} $1" + exit 1 + } + + [[ "$(id -u)" -eq 0 ]] && oops "Please run this script as a regular user" + + # Check if nix is already installed and is at least version 2.4 + if command -v nix > /dev/null; then + NIX_VERSION=$(nix --version | sed -ne 's/[^0-9]*\(\([0-9]\.\)\)/\1/p') + NIX_VERSION_MINOR=$(echo "$NIX_VERSION" | cut -d. -f2) + if [ "$NIX_VERSION_MINOR" -gt "3" ]; then + echo -e "Detected nix version $NIX_VERSION. Proceeding with K framework install." + else + oops "It appears that you have a version of nix on your system that's too old. The K framework installer requires nix >=2.4. Please update nix and try again." + fi + else + read -p "It appears that you don't have nix installed. Since the K framework needs nix for distribution, this script will attempt to install nix first. Would you like to proceed? [y/N]" -n 1 -r + echo + if [[ $REPLY =~ ^[Yy]$ ]] + then + echo "Downloading nix and running the installer..." + curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install --no-confirm \ + --extra-conf "trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= k-framework.cachix.org-1:jeyMXB2h28gpNRjuVkehg+zLj62ma1RnyyopA/20yFE= k-framework-binary.cachix.org-1:pJedQ8iG19BW3v/DMMmiRVtwRBGO3fyMv2Ws0OpBADs=" \ + --extra-conf "substituters = https://cache.nixos.org https://k-framework.cachix.org" + if [ -e '/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh' ]; then + . '/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh' + else + oops "Could not source nix." + fi + NIX_FRESH_INSTALL=true + else + oops "Cannot proceed with the installation without nix." + fi + fi + + PREV_KUP_INSTALL=$(nix profile list --experimental-features 'nix-command flakes' | awk '/packages\..*\.kup/ {print $1}') + if ! [[ -z "$PREV_KUP_INSTALL" ]]; then + echo "Removing previous K framework installer versions ..." + GC_DONT_GC=1 nix profile remove $PREV_KUP_INSTALL \ + --experimental-features 'nix-command flakes' + fi + + echo "Installing the K framework installer utility (kup) ..." + + GC_DONT_GC=1 nix profile install github:runtimeverification/kup#kup \ + --option extra-substituters 'https://k-framework.cachix.org' \ + --option extra-trusted-public-keys 'k-framework.cachix.org-1:jeyMXB2h28gpNRjuVkehg+zLj62ma1RnyyopA/20yFE=' \ + --experimental-features 'nix-command flakes' + + echo -e "${GREEN}All set!${NC}" + if [ -n "$NIX_FRESH_INSTALL" ]; then + echo -e "$NIX_POST_INSTALL_MESSAGE" + fi + +} diff --git a/k-distribution/INSTALL/index.html b/k-distribution/INSTALL/index.html new file mode 100644 index 00000000000..3a01351761a --- /dev/null +++ b/k-distribution/INSTALL/index.html @@ -0,0 +1,604 @@ + + + + + + + + + + + + + + +Installing the K Framework | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Installing the K Framework

+

Fast Installation (preferred)

+

The preferred way to install K is the kup tool, which is based on Nix. +No knowledge of Nix is required to use kup.

+

Install kup and K by running the following:

+
shell
bash <(curl https://kframework.org/install) +kup install k +
+

List available versions with:

+
shell
kup list k +
+

If kup indicates that there's a newer version, you can update by simply running:

+
shell
kup install k +
+

To install a specific version, run:

+
shell
kup install k --version v6.3.11 +
+

Note that the versions marked as ✅ are cached in Runtime Verification's Nix binary cache and thus are the fastest to install.

+

Install through packages

+

We currently strive to provide packages for the following platforms:

+
    +
  • Ubuntu Jammy Jellyfish (22.04)
  • +
  • macOS Ventura (13) via Homebrew
  • +
  • Docker Images
  • +
+

Pre-installation Notes

+
    +
  • +

    We do not currently support running K natively on Windows. To use K on +Windows 10, you are encouraged to install the +Windows Subsystem for Linux (version 2) +and follow the instructions for installing Ubuntu Jammy.

    +

    If you have already installed WSL, before proceeding, you will need to +enter the WSL environment. You can do this by:

    +
      +
    1. opening up the command prompt (accessible by searching cmd or +command prompt from the start menu);
    2. +
    3. using the wsl.exe command to access the WSL environment.
    4. +
    +
  • +
  • +

    To use K in other non-linux environments (e.g. Windows 8 or earlier), +you will need to use a virtual machine (VM) software. We assume you have:

    +
      +
    1. Created a virtual machine
    2. +
    3. Installed a Linux distribution (e.g. Ubuntu Jammy Jellyfish) on your +virtual machine
    4. +
    +

    Consult your virtual machine software if you need help with the above +steps. We recommend the free VirtualBox virtual machine software.

    +

    Before proceeding, follow the virtual machine softare UI to start your +Linux virtual machine and enter the command line environment.

    +
  • +
  • +

    WSL and virtual machine users should be aware that, if you use your web +browser to download the package, you will need to make it accessible to +the command line environment. For this reason, we recommend downloading the +package from the command line directly using a tool like wget. For +example, you could copy the package download URL and then type:

    +
    wget <package-download-url>
    +
    +

    where <package-download-url> is replaced by the URL you just copied.

    +
  • +
  • +

    K depends on version 4.8.15 of Z3, which may not be supplied by package +managers. If this is the case, it should be built and installed from source +following the +instructions in +the Z3 repository. Other versions (older and newer) are not supported by K, +and may lead to incorrect behaviour or performance issues.

    +
  • +
+

Downloading Packages

+

Download the appropriate package from the GitHub, via the +Releases page. +Releases are generated as often as possible from master build.

+

Installing Packages

+

For version X.Y.Z, distribution DISTRO, and package ID ID, the following +instructions tell you how to install on each system. Note that this typically +requires about ~1.4GB of dependencies and will take some time.

+
    +
  • On Linux systems, K will typically be installed under /usr.
  • +
  • On macOS/brew, K will typically be installed under /usr/local.
  • +
+

Ubuntu Jammy (22.04)

+
sh
sudo apt install ./kframework_amd64_ubuntu_jammy.deb +
+

macOS (Homebrew)

+

Homebrew (or just brew) is a third-party package manager +for MacOS. +If you have not installed brew, you must do so before installing the K +Framework brew package.

+

With brew installed, do the following to install the K Framework brew package +(with build number BN):

+
sh
brew install kframework--X.Y.Z.ID.bottle.BN.tar.gz -v +
+

Homebrew Alternate Installation

+

To directly install the latest K Framework brew package without needing to +download it separately, do the following:

+
sh
brew install runtimeverification/k/kframework +
+

Or, to streamline future K Framework upgrades, you can tap the K Framework +package repository. This lets future installations/upgrades/etc... use the +unprefixed package name.

+
sh
brew tap runtimeverification/k +brew install kframework +
+

Docker Images

+

Docker images with K pre-installed are available at the +runtimeverification/kframework-k Docker Hub repository.

+

Each release at COMMIT_ID has an image associated with it at +runtimeverificationinc/kframework-k:ubuntu-jammy-COMMIT_ID.

+

To run the image directly:

+
sh
docker run -it runtimeverificationinc/kframework-k:ubuntu-jammy-COMMIT_ID +
+

and to make a Docker Image based on it, use the following line in your +Dockerfile:

+
Dockerfile
FROM runtimeverificationinc/kframework-k:ubuntu-jammy-COMMIT_ID +
+

We also create Ubuntu 22.04 images with the ubuntu-jammy-COMMIT_ID tags.

+

Testing Packages

+

The easiest way to test the K package is to copy a K tutorial language and +check if you can compile and run an included example.

+
    +
  1. +

    Start by cloning the K tutorial from the K PL Tutorial. This command typically will be like:

    +
    sh
    $ git clone https://www.github.com/runtimeverification/pl-tutorial +
    +
  2. +
  3. +

    Now you can try to run some programs:

    +
    sh
    $ cd pl-tutorial/2_languages/1_simple/1_untyped +$ make kompile +$ krun tests/diverse/factorial.simple +
    +
  4. +
+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/include/kframework/builtin/domains/index.html b/k-distribution/include/kframework/builtin/domains/index.html new file mode 100644 index 00000000000..1cbbf256c9e --- /dev/null +++ b/k-distribution/include/kframework/builtin/domains/index.html @@ -0,0 +1,3853 @@ + + + + + + + + + + + + + + +Basic Builtin Types in K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Basic Builtin Types in K

+

A major piece of the K prelude consists of a series of modules that contain +implementations of basic data types and language features in K. You do not need +to require this file yourself; it is required automatically in every K +definition unless --no-prelude is passed to kompile. K may not work correctly +if some of these modules do not exist or do not declare certain functions.

+

Note that some functions in the K prelude functions are not total, that is, +they are not defined on all possible input values. When you invoke such a +function on an undefined input, the behavior is undefined. In particular, when +this happens, interpreters generated by the K LLVM backend may crash.

+
k
requires "kast.md" +
+

Default Modules

+

K declares certain modules that contain most of the builtins you usually want +when defining a language in K. In particular, this includes integers, booleans, +strings, identifiers, I/O, lists, maps, and sets. The DOMAINS-SYNTAX module +is designed to be imported by the syntax module of the language and contains +only the program-level syntax of identifiers, integers, booleans, and strings. +The DOMAINS module contains the rest of the syntax, including builtin +functions over those and the remaining types.

+

Note that not all modules are included in DOMAINS. A few less-common modules +are not, including ARRAY, COLLECTIONS, FLOAT, STRING-BUFFER, BYTES, +K-REFLECTION, MINT.

+
k
module DOMAINS-SYNTAX + imports SORT-K + imports ID-SYNTAX + imports UNSIGNED-INT-SYNTAX + imports BOOL-SYNTAX + imports STRING-SYNTAX +endmodule + +module DOMAINS + imports DOMAINS-SYNTAX + imports INT + imports BOOL + imports STRING + imports BASIC-K + imports LIST + imports K-IO + imports MAP + imports SET + imports ID + imports K-EQUAL +endmodule +
+

Arrays

+

Provided here is an implementation for fixed-sized, contiguous maps from Int +to KItem. In some previous versions of K, the Array type was a builtin type +backed by mutable arrays of objects. However, in modern K, the Array type is +implemented by means of the List type; users should not access this interface +directly and should instead make only of the functions listed below. Users of +this module should import only the ARRAY module.

+
k
module ARRAY-SYNTAX + imports private LIST + + syntax Array +
+

Array lookup

+

You can look up an element in an Array by its index in O(log(N)) time. Note +that the base of the logarithm is a relatively high number and thus the time is +effectively constant.

+
k
syntax KItem ::= Array "[" Int "]" [function] +
+

Array update

+

You can create a new Array with a new value for a key in O(log(N)) time, or +effectively constant.

+
k
syntax Array ::= Array "[" key: Int "<-" value: KItem "]" [function, symbol(_[_<-_])] +
+

Array reset

+

You can create a new Array where a particular key is reset to its default +value in O(log(N)) time, or effectively constant.

+
k
syntax Array ::= Array "[" Int "<-" "undef" "]" [function] +
+

Multiple array update

+

You can create a new Array from a List L of size N where the N +elements starting at index are replaced with the contents of L, in +O(N*log(K)) time (where K is the size of the array), or effectively linear. +Having index + N > K yields an exception.

+
k
syntax Array ::= updateArray(Array, index: Int, List) [function] +
+

Array fill

+

You can create a new Array where the length elements starting at index +are replaced with value, in O(length*log(N)) time, or effectively linear.

+
k
syntax Array ::= fillArray(Array, index: Int, length: Int, value: KItem) [function] +
+

Array range check

+

You can test whether an integer is within the bounds of an array in O(1) time.

+
k
syntax Bool ::= Int "in_keys" "(" Array ")" [function, total] +
+
k
endmodule + +module ARRAY-IN-K [private] + imports public ARRAY-SYNTAX + imports private LIST + imports private K-EQUAL + imports private INT + imports private BOOL +
+

Array creation

+

You can create an array with length elements where each element is +initialized to value in O(1) time. Note that the array is stored in a manner +where only the highest element that is actually modified is given a value +in its internal representation, which means that subsequent array operations +may incur a one-time O(N) resizing cost, possibly amortized across multiple +operations.

+
k
syntax Array ::= makeArray(length: Int, value: KItem) [function, public] +
+

Implementation of Arrays

+

The remainder of this section consists of an implementation in K of the +operations listed above. Users of the ARRAY module should not make use +of any of the syntax defined in any of these modules.

+
k
syntax Array ::= arr(List, Int, KItem) + + rule makeArray(I::Int, D::KItem) => arr(.List, I, D) + + rule arr(L::List, _, _ ) [ IDX::Int ] => L[IDX] requires 0 <=Int IDX andBool IDX <Int size(L) + rule arr(_ , _, D::KItem) [ _ ] => D [owise] + + syntax List ::= ensureOffsetList(List, Int, KItem) [function] + rule ensureOffsetList(L::List, IDX::Int, D::KItem) => L makeList(IDX +Int 1 -Int size(L), D) requires IDX >=Int size(L) + rule ensureOffsetList(L::List, IDX::Int, _::KItem) => L requires notBool IDX >=Int size(L) + + rule arr(L::List, I::Int, D::KItem) [ IDX::Int <- VAL::KItem ] => arr(ensureOffsetList(L, IDX, D) [ IDX <- VAL ], I, D) + + rule arr(L::List, I::Int, D::KItem) [ IDX::Int <- undef ] => arr(L, I, D) [ IDX <- D ] + + rule updateArray(arr(L::List, I::Int, D::KItem), IDX::Int, L2::List) => arr(updateList(ensureOffsetList(L, IDX +Int size(L2) -Int 1, D), IDX, L2), I, D) + + rule fillArray(arr(L::List, I::Int, D::KItem), IDX::Int, LEN::Int, VAL::KItem) => arr(fillList(ensureOffsetList(L, IDX +Int LEN -Int 1, D), IDX, LEN, VAL), I, D) + + rule IDX::Int in_keys(arr(_, I::Int, _)) => IDX >=Int 0 andBool IDX <Int I +endmodule + +module ARRAY-SYMBOLIC [symbolic] + imports ARRAY-IN-K +endmodule + +module ARRAY-KORE + imports ARRAY-IN-K +endmodule + +module ARRAY + imports ARRAY-SYMBOLIC + imports ARRAY-KORE +endmodule +
+

Maps

+

Provided here is the syntax of an implementation of immutable, associative, +commutative maps from KItem to KItem. This type is hooked to an +implementation of maps provided by the backend. For more information on +matching on maps and allowable patterns for doing so, refer to K's +user documentation.

+
k
module MAP + imports private BOOL-SYNTAX + imports private INT-SYNTAX + imports private LIST + imports private SET + + syntax Map [hook(MAP.Map)] +
+

Map concatenation

+

The Map sort represents a generalized associative array. Each key can be +paired with an arbitrary value, and can be used to reference its associated +value. Multiple bindings for the same key are not allowed.

+

You can construct a new Map consisting of key/value pairs of two Maps. The +result is #False if the maps have keys in common (in particular, this will +yield an exception during concrete execution). This operation is O(Nlog(M)) +where N is the size of the smaller map, when it appears on the right hand side. +When it appears on the left hand side and all variables are bound, it is +O(Nlog(M)) where M is the size of the map it is matching and N is the number +of elements being matched. When it appears on the left hand side containing +variables not bound elsewhere in the term, it is O(N^K) where N is the size of +the map it is matching and K is the number of unbound keys being matched. In +other words, one unbound variable is linear, two is quadratic, three is cubic, +etc.

+
k
syntax Map ::= Map Map [left, function, hook(MAP.concat), symbol(_Map_), assoc, comm, unit(.Map), element(_|->_), index(0), format(%1%n%2)] +
+

Map unit

+

The map with zero elements is represented by .Map.

+
k
syntax Map ::= ".Map" [function, total, hook(MAP.unit), symbol(.Map)] +
+

Map elements

+

An element of a Map is constructed via the |-> operator. The key is on the +left and the value is on the right.

+
k
syntax Map ::= KItem "|->" KItem [function, total, hook(MAP.element), symbol(_|->_), injective] + + syntax priority _|->_ > _Map_ .Map + syntax non-assoc _|->_ +
+

Map lookup

+

You can look up the value associated with the key of a map in O(log(N)) time. +Note that the base of the logarithm is a relatively high number and thus the +time is effectively constant. The value is #False if the key is not in the +map (in particular, this will yield an exception during concrete execution).

+
k
syntax KItem ::= Map "[" KItem "]" [function, hook(MAP.lookup), symbol(Map:lookup)] +
+

Map lookup with default

+

You can also look up the value associated with the key of a map using a +total function that assigns a specific default value if the key is not present +in the map. This operation is also O(log(N)), or effectively constant.

+
k
syntax KItem ::= Map "[" KItem "]" "orDefault" KItem [function, total, hook(MAP.lookupOrDefault), symbol(Map:lookupOrDefault)] +
+

Map update

+

You can insert a key/value pair into a map in O(log(N)) time, or effectively +constant.

+
k
syntax Map ::= Map "[" key: KItem "<-" value: KItem "]" [function, total, symbol(Map:update), hook(MAP.update), prefer] +
+

Map delete

+

You can remove a key/value pair from a map via its key in O(log(N)) time, or +effectively constant.

+
k
syntax Map ::= Map "[" KItem "<-" "undef" "]" [function, total, hook(MAP.remove), symbol(_[_<-undef])] +
+

Map difference

+

You can remove the key/value pairs in a map that are present in another map in +O(N*log(M)) time (where M is the size of the first map and N is the size of the +second), or effectively linear. Note that only keys whose value is the same +in both maps are removed. To remove all the keys in one map from another map, +you can say removeAll(M1, keys(M2)).

+
k
syntax Map ::= Map "-Map" Map [function, total, hook(MAP.difference)] +
+

Multiple map update

+

You can update a map by adding all the key/value pairs in the second map in +O(N*log(M)) time (where M is the size of the first map and N is the size of the +second map), or effectively linear. If any keys are present in both maps, the +value from the second map overwrites the value in the first. This function is +total, which is distinct from map concatenation, a partial function only +defined on maps with disjoint keys.

+
k
syntax Map ::= updateMap(Map, Map) [function, total, hook(MAP.updateAll)] +
+

Multiple map removal

+

You can remove a Set of keys from a map in O(N*log(M)) time (where M is the +size of the Map and N is the size of the Set), or effectively linear.

+
k
syntax Map ::= removeAll(Map, Set) [function, total, hook(MAP.removeAll)] +
+

Map keys (as Set)

+

You can get a Set of all the keys in a Map in O(N) time.

+
k
syntax Set ::= keys(Map) [function, total, hook(MAP.keys)] +
+

Map keys (as List)

+

You can get a List of all the keys in a Map in O(N) time.

+
k
syntax List ::= "keys_list" "(" Map ")" [function, hook(MAP.keys_list)] +
+

Map key membership

+

You can check whether a key is present in a map in O(1) time.

+
k
syntax Bool ::= KItem "in_keys" "(" Map ")" [function, total, hook(MAP.in_keys)] +
+

Map values (as List)

+

You can get a List of all the values in a map in O(N) time.

+
k
syntax List ::= values(Map) [function, hook(MAP.values)] +
+

Map size

+

You can get the number of key/value pairs in a map in O(1) time.

+
k
syntax Int ::= size(Map) [function, total, hook(MAP.size), symbol(sizeMap)] +
+

Map inclusion

+

You can determine whether a Map is a strict subset of another Map in O(N) +time (where N is the size of the first map). Only keys that are bound to the +same value are considered equal.

+
k
syntax Bool ::= Map "<=Map" Map [function, total, hook(MAP.inclusion)] +
+

Map choice

+

You can get an arbitrarily chosen key of a Map in O(1) time. The same key +will always be returned for the same map, but no guarantee is given that two +different maps will return the same element, even if they are similar.

+
k
syntax KItem ::= choice(Map) [function, hook(MAP.choice), symbol(Map:choice)] +
+

Implementation of Maps

+

The remainder of this section contains lemmas used by the Java and Haskell +backend to simplify expressions of sort Map. They do not affect the semantics +of maps, merely describing additional rules that the backend can use to +simplify terms.

+
k
endmodule + +module MAP-KORE-SYMBOLIC [symbolic,haskell] + imports MAP + imports private K-EQUAL + imports private BOOL + + rule #Ceil(@M:Map [@K:KItem]) => {(@K in_keys(@M)) #Equals true} #And #Ceil(@M) #And #Ceil(@K) [simplification] + + // Symbolic update + + // Adding the definedness condition `notBool (K in_keys(M))` in the ensures clause of the following rule would be redundant + // because K also appears in the rhs, preserving the case when it's #Bottom. + rule (K |-> _ M:Map) [ K <- V ] => (K |-> V M) [simplification] + rule M:Map [ K <- V ] => (K |-> V M) requires notBool (K in_keys(M)) [simplification] + rule M:Map [ K <- _ ] [ K <- V ] => M [ K <- V ] [simplification] + // Adding the definedness condition `notBool (K1 in_keys(M))` in the ensures clause of the following rule would be redundant + // because K1 also appears in the rhs, preserving the case when it's #Bottom. + rule (K1 |-> V1 M:Map) [ K2 <- V2 ] => (K1 |-> V1 (M [ K2 <- V2 ])) requires K1 =/=K K2 [simplification] + + // Symbolic remove + rule (K |-> _ M:Map) [ K <- undef ] => M ensures notBool (K in_keys(M)) [simplification] + rule M:Map [ K <- undef ] => M requires notBool (K in_keys(M)) [simplification] + // Adding the definedness condition `notBool (K1 in_keys(M))` in the ensures clause of the following rule would be redundant + // because K1 also appears in the rhs, preserving the case when it's #Bottom. + rule (K1 |-> V1 M:Map) [ K2 <- undef ] => (K1 |-> V1 (M [ K2 <- undef ])) requires K1 =/=K K2 [simplification] + + // Symbolic lookup + rule (K |-> V M:Map) [ K ] => V ensures notBool (K in_keys(M)) [simplification] + rule (K1 |-> _V M:Map) [ K2 ] => M [K2] requires K1 =/=K K2 ensures notBool (K1 in_keys(M)) [simplification] + rule (_MAP:Map [ K <- V1 ]) [ K ] => V1 [simplification] + rule ( MAP:Map [ K1 <- _V1 ]) [ K2 ] => MAP [ K2 ] requires K1 =/=K K2 [simplification] + + rule (K |-> V M:Map) [ K ] orDefault _ => V ensures notBool (K in_keys(M)) [simplification] + rule (K1 |-> _V M:Map) [ K2 ] orDefault D => M [K2] orDefault D requires K1 =/=K K2 ensures notBool (K1 in_keys(M)) [simplification] + rule (_MAP:Map [ K <- V1 ]) [ K ] orDefault _ => V1 [simplification] + rule ( MAP:Map [ K1 <- _V1 ]) [ K2 ] orDefault D => MAP [ K2 ] orDefault D requires K1 =/=K K2 [simplification] + rule .Map [ _ ] orDefault D => D [simplification] + + // Symbolic in_keys + rule K in_keys(_M [ K <- undef ]) => false [simplification] + rule K in_keys(_M [ K <- _ ]) => true [simplification] + rule K1 in_keys(M [ K2 <- _ ]) => true requires K1 ==K K2 orBool K1 in_keys(M) [simplification] + rule K1 in_keys(M [ K2 <- _ ]) => K1 in_keys(M) requires K1 =/=K K2 [simplification] + + rule {false #Equals @Key in_keys(.Map)} => #Ceil(@Key) [simplification] + rule {@Key in_keys(.Map) #Equals false} => #Ceil(@Key) [simplification] + rule {false #Equals @Key in_keys(Key' |-> Val @M)} => #Ceil(@Key) #And #Ceil(Key' |-> Val @M) #And #Not({@Key #Equals Key'}) #And {false #Equals @Key in_keys(@M)} [simplification] + rule {@Key in_keys(Key' |-> Val @M) #Equals false} => #Ceil(@Key) #And #Ceil(Key' |-> Val @M) #And #Not({@Key #Equals Key'}) #And {@Key in_keys(@M) #Equals false} [simplification] + +/* +// The rule below is automatically generated by the frontend for every sort +// hooked to MAP.Map. It is left here to serve as documentation. + + rule #Ceil(@M:Map (@K:KItem |-> @V:KItem)) => {(@K in_keys(@M)) #Equals false} #And #Ceil(@M) #And #Ceil(@K) #And #Ceil(@V) + [simplification] +*/ +endmodule + +module MAP-SYMBOLIC + imports MAP-KORE-SYMBOLIC +endmodule +
+

Range Maps

+

Provided here is the syntax of an implementation of immutable, associative, +commutative range maps from Int to KItem. This type is hooked to an +implementation of range maps provided by the LLVM backend. +Currently, this type is not supported by other backends. +Although the underlying range map data structure supports any key sort, the +current implementation by the backend only supports Int keys due to +limitations of the underlying ordering function.

+
k
module RANGEMAP + imports private BOOL-SYNTAX + imports private INT-SYNTAX + imports private LIST + imports private SET + +
+

Range, bounded inclusively below and exclusively above.

+
k
syntax Range ::= "[" KItem "," KItem ")" [symbol(RangeMap:Range)] + + syntax RangeMap [hook(RANGEMAP.RangeMap)] +
+

Range map concatenation

+

The RangeMap sort represents a map whose keys are stored as ranges, bounded +inclusively below and exclusively above. Contiguous or overlapping ranges that +map to the same value are merged into a single range.

+

You can construct a new RangeMap consisting of range/value pairs of two +RangeMaps. If the RangeMaps have overlapping ranges an exception will be +thrown during concrete execution. This operation is O(N*log(M)) (where N is +the size of the smaller map and M is the size of the larger map).

+
k
syntax RangeMap ::= RangeMap RangeMap [left, function, hook(RANGEMAP.concat), symbol(_RangeMap_), assoc, comm, unit(.RangeMap), element(_r|->_), index(0), format(%1%n%2)] +
+

Range map unit

+

The RangeMap with zero elements is represented by .RangeMap.

+
k
syntax RangeMap ::= ".RangeMap" [function, total, hook(RANGEMAP.unit), symbol(.RangeMap)] +
+

Range map elements

+

An element of a RangeMap is constructed via the r|-> operator. The range +of keys is on the left, and the value is on the right.

+
k
syntax RangeMap ::= Range "r|->" KItem [function, hook(RANGEMAP.elementRng), symbol(_r|->_), injective] + + syntax priority _r|->_ > _RangeMap_ .RangeMap + syntax non-assoc _r|->_ +
+

Range map lookup

+

You can look up the value associated with a key of a RangeMap in O(log(N)) +time (where N is the size of the RangeMap). This will yield an exception +during concrete execution if the key is not in the range map.

+
k
syntax KItem ::= RangeMap "[" KItem "]" [function, hook(RANGEMAP.lookup), symbol(RangeMap:lookup)] +
+

Range map lookup with default

+

You can also look up the value associated with a key of a RangeMap using a +total function that assigns a specific default value if the key is not present +in the RangeMap. This operation is also O(log(N)) (where N is the size of +the range map).

+
k
syntax KItem ::= RangeMap "[" KItem "]" "orDefault" KItem [function, total, hook(RANGEMAP.lookupOrDefault), symbol(RangeMap:lookupOrDefault)] +
+

Range map lookup for range of key

+

You can look up for the range that a key of a RangeMap is stored in in +O(log(N)) time (where N is the size of the RangeMap). This will yield an +exception during concrete execution if the key is not in the range map.

+
k
syntax Range ::= "find_range" "(" RangeMap "," KItem ")" [function, hook(RANGEMAP.find_range), symbol(RangeMap:find_range)] +
+

Range map update

+

You can insert a range/value pair into a RangeMap in O(log(N)) time (where N +is the size of the RangeMap). Any ranges adjacent to or overlapping with the +range to be inserted will be updated accordingly.

+
k
syntax RangeMap ::= RangeMap "[" keyRange: Range "<-" value: KItem "]" [function, symbol(RangeMap:update), hook(RANGEMAP.updateRng), prefer] +
+

Range map delete

+

You can remove a range/value pair from a RangeMap in O(log(N)) time (where N +is the size of the RangeMap). If all or any part of the range is present in +the range map, it will be removed.

+
k
syntax RangeMap ::= RangeMap "[" Range "<-" "undef" "]" [function, hook(RANGEMAP.removeRng), symbol(_r[_<-undef])] +
+

Range map difference

+

You can remove the range/value pairs in a RangeMap that are also present in +another RangeMap in O(max{M,N}*log(M)) time (where M is the size of the +first RangeMap and N is the size of the second RangeMap). Note that only +the parts of overlapping ranges whose value is the same in both range maps +will be removed.

+
k
syntax RangeMap ::= RangeMap "-RangeMap" RangeMap [function, total, hook(RANGEMAP.difference)] +
+

Multiple range map update

+

You can update a RangeMap by adding all the range/value pairs in the second +RangeMap in O(N*log(M+N)) time (where M is the size of the first RangeMap +and N is the size of the second RangeMap). If any ranges are overlapping, +the value from the second range map overwrites the value in the first for the +parts where ranges are overlapping. This function is total, which is distinct +from range map concatenation, a partial function only defined on range maps +with non overlapping ranges.

+
k
syntax RangeMap ::= updateRangeMap(RangeMap, RangeMap) [function, total, hook(RANGEMAP.updateAll)] +
+

Multiple range map removal

+

You can remove a Set of ranges from a RangeMap in O(N*log(M)) time (where +M is the size of the RangeMap and N is the size of the Set). For every +range in the set, all or any part of it that is present in the range map will +be removed.

+
k
syntax RangeMap ::= removeAll(RangeMap, Set) [function, hook(RANGEMAP.removeAll)] +
+

Range map keys (as Set)

+

You can get a Set of all the ranges in a RangeMap in O(N) time (where N +is the size of the RangeMap).

+
k
syntax Set ::= keys(RangeMap) [function, total, hook(RANGEMAP.keys)] +
+

Range map keys (as List)

+

You can get a List of all the ranges in a RangeMap in O(N) time (where N +is the size of the RangeMap).

+
k
syntax List ::= "keys_list" "(" RangeMap ")" [function, hook(RANGEMAP.keys_list)] +
+

Range map key membership

+

You can check whether a key is present in a RangeMap in O(log(N)) time (where +N is the size of the RangeMap).

+
k
syntax Bool ::= KItem "in_keys" "(" RangeMap ")" [function, total, hook(RANGEMAP.in_keys)] +
+

Range map values (as List)

+

You can get a List of all values in a RangeMap in O(N) time (where N is the +size of the RangeMap).

+
k
syntax List ::= values(RangeMap) [function, hook(RANGEMAP.values)] +
+

Range map size

+

You can get the number of range/value pairs in a RangeMap in O(1) time.

+
k
syntax Int ::= size(RangeMap) [function, total, hook(RANGEMAP.size), symbol(sizeRangeMap)] +
+

Range map inclusion

+

You can determine whether a RangeMap is a strict subset of another RangeMap +in O(M+N) time (where M is the size of the first RangeMap and N is the size +of the second RangeMap). Only keys within equal or overlapping ranges that +are bound to the same value are considered equal.

+
k
syntax Bool ::= RangeMap "<=RangeMap" RangeMap [function, total, hook(RANGEMAP.inclusion)] +
+

Range map choice

+

You can get an arbitrarily chosen key of a RangeMap in O(1) time. The same +key will always be returned for the same range map, but no guarantee is given +that two different range maps will return the same element, even if they are +similar.

+
k
syntax KItem ::= choice(RangeMap) [function, hook(RANGEMAP.choice), symbol(RangeMap:choice)] +endmodule +
+

Sets

+

Provided here is the syntax of an implementation of immutable, associative, +commutative sets of KItem. This type is hooked to an implementation of sets +provided by the backend. For more information on matching on sets and allowable +patterns for doing so, refer to K's +user documentation.

+
k
module SET + imports private INT-SYNTAX + imports private BASIC-K + + syntax Set [hook(SET.Set)] +
+

Set concatenation

+

The Set sort represents a mathematical set (A collection of unique items). +The sets are nilpotent, i.e., the concatenation of two sets containing elements +in common is #False (note however, this may be silently allowed during +concrete execution). If you intend to add an element to a set that might +already be present in the set, use the |Set operator instead.

+

The concatenation operator is O(Nlog(M)) where N is the size of the smaller +set, when it appears on the right hand side. When it appears on the left hand +side and all variables are bound, it is O(Nlog(M)) where M is the size of the +set it is matching and N is the number of elements being matched. When it +appears on the left hand side containing variables not bound elsewhere in the +term, it is O(N^K) where N is the size of the set it is matching and K is the +number of unbound keys being mached. In other words, one unbound variable is +linear, two is quadratic, three is cubic, etc.

+
k
syntax Set ::= Set Set [left, function, hook(SET.concat), symbol(_Set_), assoc, comm, unit(.Set), idem, element(SetItem), format(%1%n%2)] +
+

Set unit

+

The set with zero elements is represented by .Set.

+
k
syntax Set ::= ".Set" [function, total, hook(SET.unit), symbol(.Set)] +
+

Set elements

+

An element of a Set is constructed via the SetItem operator.

+
k
syntax Set ::= SetItem(KItem) [function, total, hook(SET.element), symbol(SetItem), injective] +
+

Set union

+

You can compute the union of two sets in O(N*log(M)) time (Where N is the size +of the smaller set). Note that the base of the logarithm is a relatively high +number and thus the time is effectively linear. The union consists of all the +elements present in either set.

+
k
syntax Set ::= Set "|Set" Set [left, function, total, hook(SET.union), comm] + rule S1:Set |Set S2:Set => S1 (S2 -Set S1) [concrete] +
+

Set intersection

+

You can compute the intersection of two sets in O(N*log(M)) time (where N +is the size of the smaller set), or effectively linear. The intersection +consists of all the elements present in both sets.

+
k
syntax Set ::= intersectSet(Set, Set) [function, total, hook(SET.intersection), comm] +
+

Set complement

+

You can compute the relative complement of two sets in O(N*log(M)) time (where +N is the size of the second set), or effectively linear. This is the set of +elements in the first set that are not present in the second set.

+
k
syntax Set ::= Set "-Set" Set [function, total, hook(SET.difference), symbol(Set:difference)] +
+

Set membership

+

You can compute whether an element is a member of a set in O(1) time.

+
k
syntax Bool ::= KItem "in" Set [function, total, hook(SET.in), symbol(Set:in)] +
+

Set inclusion

+

You can determine whether a Set is a strict subset of another Set in O(N) +time (where N is the size of the first set).

+
k
syntax Bool ::= Set "<=Set" Set [function, total, hook(SET.inclusion)] +
+

Set size

+

You can get the number of elements (the cardinality) of a set in O(1) time.

+
k
syntax Int ::= size(Set) [function, total, hook(SET.size)] +
+

Set choice

+

You can get an arbitrarily chosen element of a Set in O(1) time. The same +element will always be returned for the same set, but no guarantee is given +that two different sets will return the same element, even if they are similar.

+
k
syntax KItem ::= choice(Set) [function, hook(SET.choice), symbol(Set:choice)] +
+
k
endmodule +
+

Implementation of Sets

+

The following lemmas are simplifications that the Haskell backend can +apply to simplify expressions of sort Set.

+
k
module SET-KORE-SYMBOLIC [symbolic,haskell] + imports SET + imports private K-EQUAL + imports private BOOL + + //Temporarly rule for #Ceil simplification, should be generated in front-end + +// Matching for this version not implemented. + // rule #Ceil(@S1:Set @S2:Set) => + // {intersectSet(@S1, @S2) #Equals .Set} #And #Ceil(@S1) #And #Ceil(@S2) + // [simplification] + +//simpler version + rule #Ceil(@S:Set SetItem(@E:KItem)) => + {(@E in @S) #Equals false} #And #Ceil(@S) #And #Ceil(@E) + [simplification] + + // -Set simplifications + rule S -Set .Set => S [simplification] + rule .Set -Set _ => .Set [simplification] + rule SetItem(X) -Set (S SetItem(X)) => .Set + ensures notBool (X in S) [simplification] + rule S -Set (S SetItem(X)) => .Set + ensures notBool (X in S) [simplification] + rule (S SetItem(X)) -Set S => SetItem(X) + ensures notBool (X in S) [simplification] + rule (S SetItem(X)) -Set SetItem(X) => S + ensures notBool (X in S) [simplification] + // rule SetItem(X) -Set S => SetItem(X) + // requires notBool (X in S) [simplification] + // rule (S1 SetItem(X)) -Set (S2 SetItem(X)) => S1 -Set S2 + // ensures notBool (X in S1) + // andBool notBool (X in S2) [simplification] + + + + // |Set simplifications + rule S |Set .Set => S [simplification, comm] + rule S |Set S => S [simplification] + + rule (S SetItem(X)) |Set SetItem(X) => S SetItem(X) + ensures notBool (X in S) [simplification, comm] + // Currently disabled, see runtimeverification/haskell-backend#3301 + // rule (S SetItem(X)) |Set S => S SetItem(X) + // ensures notBool (X in S) [simplification, comm] + + // intersectSet simplifications + rule intersectSet(.Set, _ ) => .Set [simplification, comm] + rule intersectSet( S , S ) => S [simplification] + + rule intersectSet( S SetItem(X), SetItem(X)) => SetItem(X) + ensures notBool (X in S) [simplification, comm] + // Currently disabled, see runtimeverification/haskell-backend#3294 + // rule intersectSet( S SetItem(X) , S) => S ensures notBool (X in S) [simplification, comm] + rule intersectSet( S1 SetItem(X), S2 SetItem(X)) => intersectSet(S1, S2) SetItem(X) + ensures notBool (X in S1) + andBool notBool (X in S2) [simplification] + + // membership simplifications + rule _E in .Set => false [simplification] + rule E in (S SetItem(E)) => true + ensures notBool (E in S) [simplification] + +// These two rules would be sound but impose a giant overhead on `in` evaluation: + // rule E1 in (S SetItem(E2)) => true requires E1 in S + // ensures notBool (E2 in S) [simplification] + // rule E1 in (S SetItem(E2)) => E1 in S requires E1 =/=K E2 + // ensures notBool (E2 in S) [simplification] + + rule X in ((SetItem(X) S) |Set _ ) => true + ensures notBool (X in S) [simplification] + rule X in ( _ |Set (SetItem(X) S)) => true + ensures notBool (X in S) [simplification] + +endmodule + +module SET-SYMBOLIC + imports SET-KORE-SYMBOLIC +endmodule +
+

Lists

+

Provided here is the syntax of an implementation of immutable, associative +lists of KItem. This type is hooked to an implementation of lists provided +by the backend. For more information on matching on lists and allowable +patterns for doing so, refer to K's +user documentation.

+
k
module LIST + imports private INT-SYNTAX + imports private BASIC-K + + syntax List [hook(LIST.List)] +
+

List concatenation

+

The List sort is an ordered collection that may contain duplicate elements. +They are backed by relaxed radix balanced trees, which means that they support +efficiently adding elements to both sides of the list, concatenating two lists, +indexing, and updating elements.

+

The concatenation operator is O(log(N)) (where N is the size of the longer +list) when it appears on the right hand side. When it appears on the left hand +side, it is O(N), where N is the number of elements matched on the front and +back of the list.

+
k
syntax List ::= List List [left, function, total, hook(LIST.concat), symbol(_List_), smtlib(smt_seq_concat), assoc, unit(.List), element(ListItem), update(List:set), format(%1%n%2)] +
+

List unit

+

The list with zero elements is represented by .List.

+
k
syntax List ::= ".List" [function, total, hook(LIST.unit), symbol(.List), smtlib(smt_seq_nil)] +
+

List elements

+

An element of a List is constucted via the ListItem operator.

+
k
syntax List ::= ListItem(KItem) [function, total, hook(LIST.element), symbol(ListItem), smtlib(smt_seq_elem)] +
+

List prepend

+

An element can be added to the front of a List using the pushList operator.

+
k
syntax List ::= pushList(KItem, List) [function, total, hook(LIST.push), symbol(pushList)] + rule pushList(K::KItem, L1::List) => ListItem(K) L1 +
+

List indexing

+

You can get an element of a list by its integer offset in O(log(N)) time, or +effectively constant. Positive indices are 0-indexed from the beginning of the +list, and negative indices are -1-indexed from the end of the list. In other +words, 0 is the first element and -1 is the last element.

+
k
syntax KItem ::= List "[" Int "]" [function, hook(LIST.get), symbol(List:get)] +
+

List update

+

You can create a new List with a new value at a particular index in +O(log(N)) time, or effectively constant.

+
k
syntax List ::= List "[" index: Int "<-" value: KItem "]" [function, hook(LIST.update), symbol(List:set)] +
+

List of identical elements

+

You can create a list with length elements, each containing value, in O(N) +time.

+
k
syntax List ::= makeList(length: Int, value: KItem) [function, hook(LIST.make)] +
+

Multiple list update

+

You can create a new List which is equal to dest except the N elements +starting at index are replaced with the contents of src in O(N*log(K)) time +(where K is the size of destand N is the size of src), or effectively linear. Having index + N > K yields an exception.

+
k
syntax List ::= updateList(dest: List, index: Int, src: List) [function, hook(LIST.updateAll)] +
+

List fill

+

You can create a new List where the length elements starting at index +are replaced with value, in O(length*log(N)) time, or effectively linear.

+
k
syntax List ::= fillList(List, index: Int, length: Int, value: KItem) [function, hook(LIST.fill)] +
+

List slicing

+

You can compute a new List by removing fromFront elements from the front +of the list and fromBack elements from the back of the list in +O((fromFront+fromBack)*log(N)) time, or effectively linear.

+
k
syntax List ::= range(List, fromFront: Int, fromBack: Int) [function, hook(LIST.range), symbol(List:range)] +
+

List membership

+

You can compute whether an element is in a list in O(N) time. For repeated +comparisons, it is much better to first convert to a set using List2Set.

+
k
syntax Bool ::= KItem "in" List [function, total, hook(LIST.in), symbol(_inList_)] +
+

List size

+

You can get the number of elements of a list in O(1) time.

+
k
syntax Int ::= size(List) [function, total, hook(LIST.size), symbol(sizeList), smtlib(smt_seq_len)] +
+
k
endmodule +
+

Collection Conversions

+

It is possible to convert from a List to a Set or from a Set to a list. +Converting from a List to a Set and back will not provide the same list; +duplicates will have been removed and the list may be reordered. Converting +from a Set to a List and back will generate the same set.

+

Note that because sets are unordered and lists are ordered, converting from a +Set to a List will generate some arbitrary ordering of elements, which may +be different from the natural ordering you might assume, or may not. Two +equal sets are guaranteed to generate the same ordering, but no guarantee is +otherwise provided about what the ordering will be. In particular, adding an +element to a set may completely reorder the elements already in the set, when +it is converted to a list.

+
k
module COLLECTIONS + imports LIST + imports SET + imports MAP + + syntax List ::= Set2List(Set) [function, total, hook(SET.set2list)] + syntax Set ::= List2Set(List) [function, total, hook(SET.list2set)] + +endmodule +
+

Booleans

+

Provided here is the syntax of an implementation of boolean algebra in K. +This type is hooked to an implementation of booleans provided by the backend. +Note that this algebra is different from the builtin truth in matching logic. +You can, however, convert from the truth of the Bool sort to the truth in +matching logic via the expression {B #Equals true}.

+

The boolean values are true and false.

+
k
module SORT-BOOL + syntax Bool [hook(BOOL.Bool)] +endmodule + +module BOOL-SYNTAX + imports SORT-BOOL + syntax Bool ::= "true" [token] + syntax Bool ::= "false" [token] +endmodule + +module BOOL-COMMON + imports private BASIC-K + imports BOOL-SYNTAX +
+

Basic boolean arithmetic

+

You can:

+
    +
  • Negate a boolean value.
  • +
  • AND two boolean values.
  • +
  • XOR two boolean values.
  • +
  • OR two boolean values.
  • +
  • IMPLIES two boolean values (i.e., P impliesBool Q is the same as +notBool P orBool Q)
  • +
  • Check equality of two boolean values.
  • +
  • Check inequality of two boolean values.
  • +
+

Note that only andThenBool and orElseBool are short-circuiting. andBool +and orBool may be short-circuited in concrete backends, but in symbolic +backends, both arguments will be evaluated.

+
k
syntax Bool ::= "notBool" Bool [function, total, symbol(notBool_), smt-hook(not), group(boolOperation), hook(BOOL.not)] + > Bool "andBool" Bool [function, total, symbol(_andBool_), left, smt-hook(and), group(boolOperation), hook(BOOL.and)] + | Bool "andThenBool" Bool [function, total, symbol(_andThenBool_), left, smt-hook(and), group(boolOperation), hook(BOOL.andThen)] + | Bool "xorBool" Bool [function, total, symbol(_xorBool_), left, smt-hook(xor), group(boolOperation), hook(BOOL.xor)] + | Bool "orBool" Bool [function, total, symbol(_orBool_), left, smt-hook(or), group(boolOperation), hook(BOOL.or)] + | Bool "orElseBool" Bool [function, total, symbol(_orElseBool_), left, smt-hook(or), group(boolOperation), hook(BOOL.orElse)] + | Bool "impliesBool" Bool [function, total, symbol(_impliesBool_), left, smt-hook(=>), group(boolOperation), hook(BOOL.implies)] + > left: + Bool "==Bool" Bool [function, total, symbol(_==Bool_), left, comm, smt-hook(=), hook(BOOL.eq)] + | Bool "=/=Bool" Bool [function, total, symbol(_=/=Bool_), left, comm, smt-hook(distinct), hook(BOOL.ne)] +
+

Implementation of Booleans

+

The remainder of this section consists of an implementation in K of the +operations listed above.

+
k
rule notBool true => false + rule notBool false => true + + rule true andBool B:Bool => B:Bool + rule B:Bool andBool true => B:Bool [simplification] + rule false andBool _:Bool => false + rule _:Bool andBool false => false [simplification] + + rule true andThenBool K::Bool => K + rule K::Bool andThenBool true => K [simplification] + rule false andThenBool _ => false + rule _ andThenBool false => false [simplification] + + rule false xorBool B:Bool => B:Bool + rule B:Bool xorBool false => B:Bool [simplification] + rule B:Bool xorBool B:Bool => false + + rule true orBool _:Bool => true + rule _:Bool orBool true => true [simplification] + rule false orBool B:Bool => B + rule B:Bool orBool false => B [simplification] + + rule true orElseBool _ => true + rule _ orElseBool true => true [simplification] + rule false orElseBool K::Bool => K + rule K::Bool orElseBool false => K [simplification] + + rule true impliesBool B:Bool => B + rule false impliesBool _:Bool => true + rule _:Bool impliesBool true => true [simplification] + rule B:Bool impliesBool false => notBool B [simplification] + + rule B1:Bool =/=Bool B2:Bool => notBool (B1 ==Bool B2) +endmodule + +module BOOL-KORE [symbolic] + imports BOOL-COMMON + + rule {true #Equals notBool @B} => {false #Equals @B} [simplification] + rule {notBool @B #Equals true} => {@B #Equals false} [simplification] + rule {false #Equals notBool @B} => {true #Equals @B} [simplification] + rule {notBool @B #Equals false} => {@B #Equals true} [simplification] + + rule {true #Equals @B1 andBool @B2} => {true #Equals @B1} #And {true #Equals @B2} [simplification] + rule {@B1 andBool @B2 #Equals true} => {@B1 #Equals true} #And {@B2 #Equals true} [simplification] + rule {false #Equals @B1 orBool @B2} => {false #Equals @B1} #And {false #Equals @B2} [simplification] + rule {@B1 orBool @B2 #Equals false} => {@B1 #Equals false} #And {@B2 #Equals false} [simplification] +endmodule + +module BOOL + imports BOOL-COMMON + imports BOOL-KORE +endmodule +
+

Integers

+

Provided here is the syntax of an implementation of arbitrary-precision +integer arithmetic in K. This type is hooked to an implementation of integers +provided by the backend. For a fixed-width integer type, see the MINT module +below.

+

The UNSIGNED-INT-SYNTAX module provides a syntax of whole numbers in K. +This is useful because often programming languages implement the sign of an +integer as a unary operator rather than part of the lexical syntax of integers. +However, you can also directly reference integers with a sign using the +INT-SYNTAX module.

+
k
module UNSIGNED-INT-SYNTAX + syntax Int [hook(INT.Int)] + syntax Int ::= r"[0-9]+" [prefer, token, prec(2)] +endmodule + +module INT-SYNTAX + imports UNSIGNED-INT-SYNTAX + syntax Int ::= r"[\\+\\-]?[0-9]+" [prefer, token, prec(2)] +endmodule + +module INT-COMMON + imports INT-SYNTAX + imports private BOOL +
+

Integer arithmetic

+

You can:

+
    +
  • Compute the bitwise complement ~Int of an integer value in twos-complement.
  • +
  • Compute the exponentiation ^Int of two integers.
  • +
  • Compute the exponentiation of two integers modulo another integer (^%Int). +A ^%Int B C is equal in value to (A ^Int B) %Int C, but has a better +asymptotic complexity.
  • +
  • Compute the product *Int of two integers.
  • +
  • Compute the quotient /Int or modulus %Int of two integers using +t-division, which rounds towards zero. Division by zero is #False.
  • +
  • Compute the quotient divInt or modulus modInt of two integers using +Euclidean division, in which the remainder is always non-negative. Division +by zero is #False.
  • +
  • Compute the sum +Int or difference -Int of two integers.
  • +
  • Compute the arithmetic right shift >>Int of two integers. Shifting by a +negative quantity is #False.
  • +
  • Compute the left shift of two integers. Shifting by a negative quantity is +#False.
  • +
  • Compute the bitwise and of two integers in twos-complement.
  • +
  • Compute the bitwise xor of two integers in twos-complement.
  • +
  • Compute the bitwise inclusive-or of two integers in twos-complement.
  • +
+
k
syntax Int ::= "~Int" Int [function, symbol(~Int_), total, hook(INT.not), smtlib(notInt)] + > left: + Int "^Int" Int [function, symbol(_^Int_), left, smt-hook(^), hook(INT.pow)] + | Int "^%Int" Int Int [function, symbol(_^%Int__), left, smt-hook((mod (^ #1 #2) #3)), hook(INT.powmod)] + > left: + Int "*Int" Int [function, total, symbol(_*Int_), left, comm, smt-hook(*), hook(INT.mul)] + /* FIXME: translate /Int and %Int into smtlib */ + /* /Int and %Int implement t-division, which rounds towards 0. SMT hooks need to convert from Euclidian division operations */ + | Int "/Int" Int [function, symbol(_/Int_), left, + smt-hook((ite (or (= 0 (mod #1 #2)) (>= #1 0)) (div #1 #2) (ite (> #2 0) (+ (div #1 #2) 1) (- (div #1 #2) 1)))), + hook(INT.tdiv)] + | Int "%Int" Int [function, symbol(_%Int_), left, + smt-hook((ite (or (= 0 (mod #1 #2)) (>= #1 0)) (mod #1 #2) (ite (> #2 0) (- (mod #1 #2) #2) (+ (mod #1 #2) #2)))), + hook(INT.tmod)] + /* divInt and modInt implement e-division according to the Euclidean division theorem, therefore the remainder is always positive */ + | Int "divInt" Int [function, symbol(_divInt_), left, smt-hook(div), hook(INT.ediv)] + | Int "modInt" Int [function, symbol(_modInt_), left, smt-hook(mod), hook(INT.emod)] + > left: + Int "+Int" Int [function, total, symbol(_+Int_), left, comm, smt-hook(+), hook(INT.add)] + | Int "-Int" Int [function, total, symbol(_-Int_), left, smt-hook(-), hook(INT.sub)] + > left: + Int ">>Int" Int [function, symbol(_>>Int_), left, hook(INT.shr), smtlib(shrInt)] + | Int "<<Int" Int [function, symbol(_<<Int_), left, hook(INT.shl), smtlib(shlInt)] + > left: + Int "&Int" Int [function, total, symbol(_&Int_), left, comm, hook(INT.and), smtlib(andInt)] + > left: + Int "xorInt" Int [function, total, symbol(_xorInt_), left, comm, hook(INT.xor), smtlib(xorInt)] + > left: + Int "|Int" Int [function, total, symbol(_|Int_), left, comm, hook(INT.or), smtlib(orInt)] +
+

Integer minimum and maximum

+

You can compute the minimum and maximum minInt and maxInt of two integers.

+
k
syntax Int ::= "minInt" "(" Int "," Int ")" [function, total, smt-hook((ite (< #1 #2) #1 #2)), hook(INT.min)] + | "maxInt" "(" Int "," Int ")" [function, total, smt-hook((ite (< #1 #2) #2 #1)), hook(INT.max)] +
+

Absolute value

+

You can compute the absolute value absInt of an integer.

+
k
syntax Int ::= absInt ( Int ) [function, total, smt-hook((ite (< #1 0) (- 0 #1) #1)), hook(INT.abs)] +
+

Log base 2

+

You can compute the log base 2, rounded towards zero, of an integer. The log +base 2 of an integer is equal to the index of the highest bit set in the +representation of a positive integer. Log base 2 of zero or a negative number +is #False.

+
k
syntax Int ::= log2Int ( Int ) [function, hook(INT.log2)] +
+

Bit slicing

+

You can compute the value of a range of bits in the twos-complement +representation of an integer, as interpeted either unsigned or signed, of an +integer. index is offset from 0 and length is the number of bits, starting +with index, that should be read. The number is assumed to be represented +in little endian notation with each byte going from least significant to +most significant. In other words, 0 is the least-significant bit, and each +successive bit is more significant than the last.

+
k
syntax Int ::= bitRangeInt ( Int, index: Int, length: Int ) [function, hook(INT.bitRange)] + | signExtendBitRangeInt ( Int, index: Int, length: Int ) [function, hook(INT.signExtendBitRange)] +
+

Integer comparisons

+

You can compute whether two integers are less than or equal to, less than, +greater than or equal to, greater than, equal, or unequal to another integer.

+
k
syntax Bool ::= Int "<=Int" Int [function, total, symbol(_<=Int_), smt-hook(<=), hook(INT.le)] + | Int "<Int" Int [function, total, symbol(_<Int_), smt-hook(<), hook(INT.lt)] + | Int ">=Int" Int [function, total, symbol(_>=Int_), smt-hook(>=), hook(INT.ge)] + | Int ">Int" Int [function, total, symbol(_>Int_), smt-hook(>), hook(INT.gt)] + | Int "==Int" Int [function, total, symbol(_==Int_), comm, smt-hook(=), hook(INT.eq)] + | Int "=/=Int" Int [function, total, symbol(_=/=Int_), comm, smt-hook(distinct), hook(INT.ne)] +
+

Divides

+

You can compute whether one integer evenly divides another. This is the +case when the second integer modulo the first integer is equal to zero.

+
k
syntax Bool ::= Int "dividesInt" Int [function] +
+

Random integers

+

You can, on concrete backends, compute a pseudorandom integer, or seed the +pseudorandom number generator. These operations are represented as +uninterpreted functions on symbolic backends.

+
k
syntax Int ::= randInt(Int) [function, hook(INT.rand), impure] + syntax K ::= srandInt(Int) [function, hook(INT.srand), impure] +
+

Implementation of Integers

+

The remainder of this section consists of an implementation in K of some +of the operators above, as well as lemmas used by the Java and Haskell backend +to simplify expressions of sort Int. They do not affect the semantics of +integers, merely describing additional rules that the backend can use to +simplify terms.

+
k
endmodule + +module INT-SYMBOLIC [symbolic] + imports INT-COMMON + imports INT-SYMBOLIC-KORE + imports private BOOL + + // Arithmetic Normalization + rule I +Int 0 => I [simplification] + rule I -Int 0 => I [simplification] + + rule X modInt N => X requires 0 <=Int X andBool X <Int N [simplification] + rule X %Int N => X requires 0 <=Int X andBool X <Int N [simplification] + + // Bit-shifts + rule X <<Int 0 => X [simplification, preserves-definedness] + rule 0 <<Int Y => 0 requires 0 <=Int Y [simplification, preserves-definedness] + rule X >>Int 0 => X [simplification, preserves-definedness] + rule 0 >>Int Y => 0 requires 0 <=Int Y [simplification, preserves-definedness] +endmodule + +module INT-SYMBOLIC-KORE [symbolic, haskell] + imports INT-COMMON + imports ML-SYNTAX + imports private BOOL + + // Definability Conditions + rule #Ceil(@I1:Int /Int @I2:Int) => {(@I2 =/=Int 0) #Equals true} #And #Ceil(@I1) #And #Ceil(@I2) [simplification] + rule #Ceil(@I1:Int %Int @I2:Int) => {(@I2 =/=Int 0) #Equals true} #And #Ceil(@I1) #And #Ceil(@I2) [simplification] + rule #Ceil(@I1:Int modInt @I2:Int) => {(@I2 =/=Int 0) #Equals true} #And #Ceil(@I1) #And #Ceil(@I2) [simplification] + rule #Ceil(@I1:Int >>Int @I2:Int) => {(@I2 >=Int 0) #Equals true} #And #Ceil(@I1) #And #Ceil(@I2) [simplification] + rule #Ceil(@I1:Int <<Int @I2:Int) => {(@I2 >=Int 0) #Equals true} #And #Ceil(@I1) #And #Ceil(@I2) [simplification] +endmodule + +module INT-KORE [symbolic] + imports private K-EQUAL + imports private BOOL + imports INT-COMMON + + rule [eq-k-to-eq-int] : I1:Int ==K I2:Int => I1 ==Int I2 [simplification] + rule [eq-int-true-left] : {K1 ==Int K2 #Equals true} => {K1 #Equals K2} [simplification] + rule [eq-int-true-rigth] : {true #Equals K1 ==Int K2} => {K1 #Equals K2} [simplification] + rule [eq-int-false-left] : {K1 ==Int K2 #Equals false} => #Not({K1 #Equals K2}) [simplification] + rule [eq-int-false-rigth] : {false #Equals K1 ==Int K2} => #Not({K1 #Equals K2}) [simplification] + rule [neq-int-true-left] : {K1 =/=Int K2 #Equals true} => #Not({K1 #Equals K2}) [simplification] + rule [neq-int-true-right] : {true #Equals K1 =/=Int K2} => #Not({K1 #Equals K2}) [simplification] + rule [neq-int-false-left] : {K1 =/=Int K2 #Equals false} => {K1 #Equals K2} [simplification] + rule [neq-int-false-right]: {false #Equals K1 =/=Int K2} => {K1 #Equals K2} [simplification] + + // Arithmetic Normalization + rule I +Int B => B +Int I [concrete(I), symbolic(B), simplification(51)] + rule A -Int I => A +Int (0 -Int I) [concrete(I), symbolic(A), simplification(51)] + + rule (A +Int I2) +Int I3 => A +Int (I2 +Int I3) [concrete(I2, I3), symbolic(A), simplification] + rule I1 +Int (B +Int I3) => B +Int (I1 +Int I3) [concrete(I1, I3), symbolic(B), simplification] + rule I1 -Int (B +Int I3) => (I1 -Int I3) -Int B [concrete(I1, I3), symbolic(B), simplification] + rule I1 +Int (I2 +Int C) => (I1 +Int I2) +Int C [concrete(I1, I2), symbolic(C), simplification] + rule I1 +Int (I2 -Int C) => (I1 +Int I2) -Int C [concrete(I1, I2), symbolic(C), simplification] + rule (I1 -Int B) +Int I3 => (I1 +Int I3) -Int B [concrete(I1, I3), symbolic(B), simplification] + rule I1 -Int (I2 +Int C) => (I1 -Int I2) -Int C [concrete(I1, I2), symbolic(C), simplification] + rule I1 -Int (I2 -Int C) => (I1 -Int I2) +Int C [concrete(I1, I2), symbolic(C), simplification] + rule (C -Int I2) -Int I3 => C -Int (I2 +Int I3) [concrete(I2, I3), symbolic(C), simplification] + + rule I1 &Int (I2 &Int C) => (I1 &Int I2) &Int C [concrete(I1, I2), symbolic(C), simplification] +endmodule + +module INT + imports INT-COMMON + imports INT-SYMBOLIC + imports INT-KORE + imports private K-EQUAL + imports private BOOL + + rule bitRangeInt(I::Int, IDX::Int, LEN::Int) => (I >>Int IDX) modInt (1 <<Int LEN) + + rule signExtendBitRangeInt(I::Int, IDX::Int, LEN::Int) => (bitRangeInt(I, IDX, LEN) +Int (1 <<Int (LEN -Int 1))) modInt (1 <<Int LEN) -Int (1 <<Int (LEN -Int 1)) + + rule I1:Int divInt I2:Int => (I1 -Int (I1 modInt I2)) /Int I2 + requires I2 =/=Int 0 + rule + I1:Int modInt I2:Int + => + ((I1 %Int absInt(I2)) +Int absInt(I2)) %Int absInt(I2) + requires I2 =/=Int 0 [concrete, simplification] + + rule minInt(I1:Int, I2:Int) => I1 requires I1 <Int I2 + rule minInt(I1:Int, I2:Int) => I2 requires I1 >=Int I2 + + rule I1:Int =/=Int I2:Int => notBool (I1 ==Int I2) + rule (I1:Int dividesInt I2:Int) => (I2 %Int I1) ==Int 0 + + syntax Int ::= freshInt(Int) [freshGenerator, function, total, private] + rule freshInt(I:Int) => I +endmodule +
+

IEEE 754 Floating-point Numbers

+

Provided here is the syntax of an implementation of arbitrary-precision +floating-point arithmetic in K based on a generalization of the IEEE 754 +standard. This type is hooked to an implementation of floats provided by the +backend.

+

The syntax of ordinary floating-point values in K consists of an optional sign +(+ or -) followed by an optional integer part, followed by a decimal point, +followed by an optional fractional part. Either the integer part or the +fractional part must be specified. The mantissa is followed by an optional +exponent part, which consists of an e or E, an optional sign (+ or -), +and an integer. The expoennt is followed by an optional suffix, which can be +either f, F, d, D, or pNxM where N and M are positive integers. +p and x can be either upper or lowercase.

+

The value of a floating-point literal is computed as follows: First the +mantissa is read as a rational number. Then it is multiplied by 10 to the +power of the exponent, which is interpreted as an integer, and defaults to +zero if it is not present. Finally, it is rounded to the nearest possible +value in a floating-point type represented like an IEEE754 floating-point type, +with the number of bits of precision and exponent specified by the suffix. +A suffix of f or f represents the IEEE binary32 format. A suffix of d +or D, or no suffix, represents the IEEE binary64 format. A suffix of +pNxM (either upper or lowercase) specifies exactly N bits of precision and +M bits of exponent. The number of bits of precision is assumed to include +any optional 1 that precedes the IEEE 754 mantissa. In other words, p24x8 +is equal to the IEEE binary32 format, and p53x11 is equal to the IEEE +binary64 format.

+
k
module FLOAT-SYNTAX + syntax Float [hook(FLOAT.Float)] + syntax Float ::= r"([\\+\\-]?[0-9]+(\\.[0-9]*)?|\\.[0-9]+)([eE][\\+\\-]?[0-9]+)?([fFdD]|([pP][0-9]+[xX][0-9]+))?" [token, prec(1)] + syntax Float ::= r"[\\+\\-]?Infinity([fFdD]|([pP][0-9]+[xX][0-9]+))?" [token, prec(3)] + syntax Float ::= r"NaN([fFdD]|([pP][0-9]+[xX][0-9]+))?" [token, prec(3)] +endmodule + +module FLOAT + imports FLOAT-SYNTAX + imports private BOOL + imports private INT-SYNTAX +
+

Float precision

+

You can retrieve the number of bits of precision in a Float.

+
k
syntax Int ::= precisionFloat(Float) [function, total, hook(FLOAT.precision)] +
+

Float exponent bits

+

You can retrieve the number of bits of exponent range in a Float.

+
k
syntax Int ::= exponentBitsFloat(Float) [function, total, hook(FLOAT.exponentBits)] +
+

Float exponent

+

You can retrieve the value of the exponent bits of a Float as an integer.

+
k
syntax Int ::= exponentFloat(Float) [function, total, hook(FLOAT.exponent)] +
+

Float sign

+

You can retrieve the value of the sign bit of a Float as a boolean. True +means the sign bit is set.

+
k
syntax Bool ::= signFloat(Float) [function, total, hook(FLOAT.sign)] +
+

Float special values

+

You can check whether a Float value is infinite or Not-a-Number.

+
k
syntax Bool ::= isNaN(Float) [function, total, smt-hook(fp.isNaN), hook(FLOAT.isNaN)] + | isInfinite(Float) [function, total] +
+

Float arithmetic

+

You can:

+
    +
  • Compute the unary negation --Float of a float. --Float X is distinct +from 0.0 -Float X. For example, 0.0 -Float 0.0 is positive zero. +--Float 0.0 is negative zero.
  • +
  • Compute the exponentation ^Float of two floats.
  • +
  • Compute the product *Float, quotient /Float, or remainder %Float of two +floats. The remainder is computed based on rounding the quotient of the two +floats to the nearest integer.
  • +
  • Compute the sum +Float or difference -Float of two floats.
  • +
+
k
syntax Float ::= "--Float" Float [function, total, smt-hook(fp.neg), hook(FLOAT.neg)] + > Float "^Float" Float [function, left, hook(FLOAT.pow)] + > left: + Float "*Float" Float [function, left, smt-hook((fp.mul roundNearestTiesToEven #1 #2)), hook(FLOAT.mul)] + | Float "/Float" Float [function, left, smt-hook((fp.div roundNearestTiesToEven #1 #2)), hook(FLOAT.div)] + | Float "%Float" Float [function, left, smt-hook((fp.rem roundNearestTiesToEven #1 #2)), hook(FLOAT.rem)] + > left: + Float "+Float" Float [function, left, smt-hook((fp.add roundNearestTiesToEven #1 #2)), hook(FLOAT.add)] + | Float "-Float" Float [function, left, smt-hook((fp.sub roundNearestTiesToEven #1 #2)), hook(FLOAT.sub)] +
+

Floating-point mathematics

+

You can:

+
    +
  • Compute the Nth integer root rootFloat of a float.
  • +
  • Compute the absolute value absFloat of a float.
  • +
  • Round a floating-point number to a specified precision and exponent +range (roundFloat). The resulting Float will yield the specified values +when calling precisionFloat and exponentBitsFloat and when performing +further computation.
  • +
  • Round a float to the next lowest floating-point value which is an integer +(floorFloat).
  • +
  • Round a float to the next highest floating-point value which is an integer +(ceilFloat).
  • +
  • Round a float to the next closest floating-point value which is an integer, in +the direction of zero (truncFloat).
  • +
  • Compute the natural exponential expFloat of a float (i.e. e^x).
  • +
  • Compute the natural logarithm logFloat of a float.
  • +
  • Compute the sine sinFloat of a float.
  • +
  • Compute the cosine cosFloat of a float.
  • +
  • Compute the tangent tanFlooat of a float.
  • +
  • Compute the arcsine asinFloat of a float.
  • +
  • Compute the arccosine acosFloat of a float.
  • +
  • Compute the arctangent atanFloat of a float.
  • +
  • Compute the arctangent atan2Float of two floats.
  • +
  • Compute the maximum maxFloat of two floats.
  • +
  • Compute the minimum minFloat of two floats.
  • +
  • Compute the square root sqrtFloat of a float.
  • +
  • Compute the largest finite value expressible in a specified precision and +exponent range (maxValueFloat).
  • +
  • Compute the smallest positive finite value expressible in a specified +precision and exponent range (minValueFloat).
  • +
+
k
syntax Float ::= rootFloat(Float, Int) [function, hook(FLOAT.root)] + | absFloat(Float) [function, total, smt-hook(fp.abs), hook(FLOAT.abs)] + | roundFloat(Float, precision: Int, exponentBits: Int) [function, hook(FLOAT.round)] + | floorFloat(Float) [function, total, hook(FLOAT.floor)] + | ceilFloat(Float) [function, total, hook(FLOAT.ceil)] + | truncFloat(Float) [function, total, hook(FLOAT.trunc)] + | expFloat(Float) [function, total, hook(FLOAT.exp)] + | logFloat(Float) [function, hook(FLOAT.log)] + | sinFloat(Float) [function, total, hook(FLOAT.sin)] + | cosFloat(Float) [function, total, hook(FLOAT.cos)] + | tanFloat(Float) [function, hook(FLOAT.tan)] + | asinFloat(Float) [function, hook(FLOAT.asin)] + | acosFloat(Float) [function, hook(FLOAT.acos)] + | atanFloat(Float) [function, total, hook(FLOAT.atan)] + | atan2Float(Float, Float) [function, hook(FLOAT.atan2)] + | maxFloat(Float, Float) [function, smt-hook(fp.max), hook(FLOAT.max)] + | minFloat(Float, Float) [function, smt-hook(fp.min), hook(FLOAT.min)] + | sqrtFloat(Float) [function] + | maxValueFloat(precision: Int, exponentBits: Int) [function, hook(FLOAT.maxValue)] + | minValueFloat(precision: Int, exponentBits: Int) [function, hook(FLOAT.minValue)] +
+

Floating-point comparisons

+

Compute whether a float is less than or equasl to, less than, greater than or +equal to, greater than, equal, or unequal to another float. Note that +X ==Float Y and X ==K Y might yield different values. The latter should be +used in cases where you want to compare whether two values of sort Float +contain the same term. The former should be used when you want to implement +the == operator of a programming language. In particular, NaN =/=Float NaN +is true, because NaN compares unequal to all values, including itself, in +IEEE 754 arithmetic. 0.0 ==Float -0.0 is also true.

+
k
syntax Bool ::= Float "<=Float" Float [function, smt-hook(fp.leq), hook(FLOAT.le)] + | Float "<Float" Float [function, smt-hook(fp.lt), hook(FLOAT.lt)] + | Float ">=Float" Float [function, smt-hook(fp.geq), hook(FLOAT.ge)] + | Float ">Float" Float [function, smt-hook(fg.gt), hook(FLOAT.gt)] + | Float "==Float" Float [function, comm, smt-hook(fp.eq), hook(FLOAT.eq), symbol(_==Float_)] + | Float "=/=Float" Float [function, comm, smt-hook((not (fp.eq #1 #2)))] + + rule F1:Float =/=Float F2:Float => notBool (F1 ==Float F2) +
+

Conversion between integer and float

+

You can convert an integer to a floating-point number with the specified +precision and exponent range. You can also convert a floating-point number +to the nearest integer. This operation rounds to the nearest integer, but it +also avoids the double-rounding that is present in ceilFloat and floorFloat +if the nearest integer is not representable in the specified floating-point +type.

+
k
syntax Float ::= Int2Float(Int, precision: Int, exponentBits: Int) [function, hook(FLOAT.int2float)] + syntax Int ::= Float2Int(Float) [function, total, hook(FLOAT.float2int)] +
+

Implementation of Floats

+

The remainder of this section consists of an implementation in K of some of the +operators above.

+
k
rule sqrtFloat(F:Float) => rootFloat(F, 2) + + rule isInfinite(F:Float) => F >Float maxValueFloat(precisionFloat(F), exponentBitsFloat(F)) orBool F <Float --Float maxValueFloat(precisionFloat(F), exponentBitsFloat(F)) + +endmodule +
+

Strings

+

Provided here is the syntax of an implementation of Unicode strings in K. This +type is hooked to an implementation of strings provided by the backend. The +implementation is currently incomplete and does not fully support encodings +and code points beyond the initial 256 code points of the Basic Latin and +Latin-1 Supplement blocks. In the future, there may be breaking changes to +the semantics of this module in order to support this functionality.

+

The syntax of strings in K is delineated by double quotes. Inside the double +quotes, any character can appear verbatim except double quotes, backslash, +newline, and carriage return. K also supports the following escape sequences:

+
    +
  • " - the " character
  • +
  • \ - the \ character
  • +
  • \n - newline character
  • +
  • \r - carriage return character
  • +
  • \t - tab character
  • +
  • \f - form feed character
  • +
  • \xFF - \x followed by two hexadecimal characters indicates a code point +between 0x00 and 0xff
  • +
  • \uFFFF - \u followed by four hexadecimal characters indicates a code point +between 0x0000 and 0xffff
  • +
  • \UFFFFFFFF - \U followed by eight hexadecimal characters indicates a code +point between 0x000000 and 0x10ffff
  • +
+
k
module STRING-SYNTAX + syntax String [hook(STRING.String)] + syntax String ::= r"[\\\"](([^\\\"\\n\\r\\\\])|([\\\\][nrtf\\\"\\\\])|([\\\\][x][0-9a-fA-F]{2})|([\\\\][u][0-9a-fA-F]{4})|([\\\\][U][0-9a-fA-F]{8}))*[\\\"]" [token] +endmodule + +module STRING-COMMON + imports STRING-SYNTAX + imports private INT + imports private FLOAT-SYNTAX + imports private K-EQUAL + imports private BOOL +
+

String concatenation

+

You can concatenate two strings in O(N) time. For successive concatenation +operations, it may be better to use the STRING-BUFFER module.

+
k
syntax String ::= String "+String" String [function, total, left, hook(STRING.concat)] +
+

String length

+

You can get the length of a string in O(1) time.

+
k
syntax Int ::= lengthString ( String ) [function, total, hook(STRING.length)] +
+

Character and integer conversion

+

You can convert between a character (as represented by a string containing +a single code point) and an integer in O(1) time.

+
k
syntax String ::= chrChar ( Int ) [function, hook(STRING.chr)] + syntax Int ::= ordChar ( String ) [function, hook(STRING.ord)] +
+

String substring

+

You can compute a substring of a string in O(N) time (where N is the +length of the substring). There are two important facts to note:

+
    +
  1. the range generated includes the character at startIndex but excludes the +character at endIndex, i.e., the range is [startIndex..endIndex).
  2. +
  3. this function is only defined on valid indices (i.e., it is defined when +startIndex < endIndex and endIndex is less than or equal to the string +length).
  4. +
+
k
syntax String ::= substrString ( String , startIndex: Int , endIndex: Int ) [function, total, hook(STRING.substr)] +
+ +

You can find the first (respectively, last) occurrence of a substring, starting +at a certain index, in another string in O(N*M) time. +Returns -1 if the substring is not found.

+
k
syntax Int ::= findString ( haystack: String , needle: String , index: Int ) [function, hook(STRING.find)] + syntax Int ::= rfindString ( haystack: String , needle: String , index: Int ) [function, hook(STRING.rfind)] +
+ +

You can find the first (respectively, last) occurrence of one of the characters +of the search string, starting at a certain index, in another string in +O(N*M) time.

+
k
syntax Int ::= findChar ( haystack: String , needles: String , index: Int ) [function, hook(STRING.findChar)] + syntax Int ::= rfindChar ( haystack: String , needles: String , index: Int ) [function, hook(STRING.rfindChar)] +
+

String and Bool conversion

+
k
syntax String ::= Bool2String(Bool) [function, total] + rule Bool2String(true) => "true" + rule Bool2String(false) => "false" +
+
k
syntax Bool ::= String2Bool(String) [function] + rule String2Bool("true") => true + rule String2Bool("false") => false +
+

String and float conversion

+

You can convert between a String and a Float. The String will be +represented in the syntax of the Float sort (see the section on the FLOAT +module above for details of that syntax). Which particular string is returned +by Float2String is determined by the backend, but the same Float is +guaranteed to return the same String, and converting that String back to a +Float is guaranteed to return the original Float.

+

You can also convert a Float to a string in a particular syntax using the +variant of Float2String with a format. In this case, the resulting string +is one which results directly from passing that format to mpfr_printf. This +functionality may not be supported on backends that do not use Gnu MPFR to +implement floating-point numbers.

+
k
syntax String ::= Float2String ( Float ) [function, total, hook(STRING.float2string)] + syntax String ::= Float2String ( Float , format: String ) [function, symbol(FloatFormat), hook(STRING.floatFormat)] + syntax Float ::= String2Float ( String ) [function, hook(STRING.string2float)] +
+

String and integer conversions

+

You can convert between a String and an Int. The String will be represented +in the syntax of the INT module (i.e., a nonempty sequence of digits +optionally prefixed by a sign). When converting from an Int to a String, +the sign will not be present unless the integer is negative.

+

You can also convert between a String and an Int in a particular radix. +This radix can be anywhere between 2 and 36. For a radix 2 <= N <= 10, the +digits 0 to N-1 will be used. For a radix 11 <= N <= 36, the digits 0 to 9 +and the first N-10 letters of the Latin alphabet will be used. Both uppercase +and lowercase letters are supported by String2Base. Whether the letters +returned by Base2String are upper or lowercase is determined by the backend, +but the backend will consistently choose one or the other.

+
k
syntax Int ::= String2Int ( String ) [function, hook(STRING.string2int)] + syntax String ::= Int2String ( Int ) [function, total, hook(STRING.int2string)] + syntax String ::= Base2String ( Int , base: Int ) [function, hook(STRING.base2string)] + syntax Int ::= String2Base ( String , base: Int ) [function, hook(STRING.string2base)] +
+

String count and replace

+

You can replace one, some, or all occurrences of a string within another +string in O(N*M) time. The replaceAll, replace, and replaceFirst methods +are identical, except replaceFirst replaces exactly one ocurrence of the +string, the first occurrence. replace replaces the first times occurrences. +And replaceAll replaces every occurrence.

+

You can also count the number of times a string occurs within another string +using countAllOccurrences.

+
k
syntax String ::= "replaceAll" "(" haystack: String "," needle: String "," replacement: String ")" [function, total, hook(STRING.replaceAll)] + syntax String ::= "replace" "(" haystack: String "," needle: String "," replacement: String "," times: Int ")" [function, hook(STRING.replace)] + syntax String ::= "replaceFirst" "(" haystack: String "," needle: String "," replacement: String ")" [function, total, hook(STRING.replaceFirst)] + syntax Int ::= "countAllOccurrences" "(" haystack: String "," needle: String ")" [function, total, hook(STRING.countAllOccurrences)] +
+

String equality and lexicographic comparison

+

You can compare whether two strings are equal or unequal, or whether one string +is less than, less than or equal to, greater than, or greater than or equal to +another according to the natural lexicographic ordering of strings.

+
k
syntax Bool ::= String "==String" String [function, total, comm, hook(STRING.eq)] + | String "=/=String" String [function, total, comm, hook(STRING.ne)] + rule S1:String =/=String S2:String => notBool (S1 ==String S2) + + syntax Bool ::= String "<String" String [function, total, hook(STRING.lt)] + | String "<=String" String [function, total, hook(STRING.le)] + | String ">String" String [function, total, hook(STRING.gt)] + | String ">=String" String [function, total, hook(STRING.ge)] +
+

Implementation of Strings

+

What follows is a few String hooks which are deprecated and only are supported +on certain outdated backends of K, as well as an implementation of several +of the above operations in K.

+
k
syntax String ::= categoryChar(String) [function, hook(STRING.category)] + | directionalityChar(String) [function, hook(STRING.directionality)] + + syntax String ::= "newUUID" [function, hook(STRING.uuid), impure] + + rule S1:String <=String S2:String => notBool (S2 <String S1) + rule S1:String >String S2:String => S2 <String S1 + rule S1:String >=String S2:String => notBool (S1 <String S2) + + rule findChar(S1:String, S2:String, I:Int) => #if findString(S1, substrString(S2, 0, 1), I) ==Int -1 #then findChar(S1, substrString(S2, 1, lengthString(S2)), I) #else #if findChar(S1, substrString(S2, 1, lengthString(S2)), I) ==Int -1 #then findString(S1, substrString(S2, 0, 1), I) #else minInt(findString(S1, substrString(S2, 0, 1), I), findChar(S1, substrString(S2, 1, lengthString(S2)), I)) #fi #fi requires S2 =/=String "" + rule findChar(_, "", _) => -1 + rule rfindChar(S1:String, S2:String, I:Int) => maxInt(rfindString(S1, substrString(S2, 0, 1), I), rfindChar(S1, substrString(S2, 1, lengthString(S2)), I)) requires S2 =/=String "" + rule rfindChar(_, "", _) => -1 + + rule countAllOccurrences(Source:String, ToCount:String) => 0 + requires findString(Source, ToCount, 0) <Int 0 + rule countAllOccurrences(Source:String, ToCount:String) => 1 +Int countAllOccurrences(substrString(Source, findString(Source, ToCount, 0) +Int lengthString(ToCount), lengthString(Source)), ToCount) + requires findString(Source, ToCount, 0) >=Int 0 + + rule replaceFirst(Source:String, ToReplace:String, Replacement:String) => substrString(Source, 0, findString(Source, ToReplace, 0)) + +String Replacement +String substrString(Source, findString(Source, ToReplace, 0) +Int lengthString(ToReplace), lengthString(Source)) + requires findString(Source, ToReplace, 0) >=Int 0 + rule replaceFirst(Source:String, ToReplace:String, _:String) => Source + requires findString(Source, ToReplace, 0) <Int 0 + + + // Note that the replace function is undefined when Count < 0. This allows different backends to + // implement their own behavior without contradicting these semantics. For instance, a symbolic + // backend can return #Bottom for that case, while a concrete backend can throw an exception. + rule replace(Source:String, ToReplace:String, Replacement:String, Count:Int) => + substrString(Source, 0, findString(Source, ToReplace, 0)) +String Replacement +String + replace(substrString(Source, findString(Source, ToReplace, 0) +Int lengthString(ToReplace), lengthString(Source)), ToReplace, Replacement, Count -Int 1) + requires Count >Int 0 andBool findString(Source, ToReplace, 0) >=Int 0 + rule replace(Source:String, _, _, Count) => Source + requires Count >=Int 0 [owise] + rule replaceAll(Source:String, ToReplace:String, Replacement:String) => replace(Source, ToReplace, Replacement, countAllOccurrences(Source, ToReplace)) + +endmodule + +module STRING-KORE [symbolic] + imports private K-EQUAL + imports STRING-COMMON + + rule S1:String ==K S2:String => S1 ==String S2 [simplification] + +endmodule + +module STRING + imports STRING-COMMON + imports STRING-KORE +endmodule +
+

String Buffers

+

It is a well known fact that repeated string concatenations are quadratic +in performance whereas use of an efficient mutable representation of arrays +can yield linear performance. We thus provide such a sort, the StringBuffer +sort. Axiomatically, it is implemented below on symbolic backends using the +String module. However, on concrete backends it provides an efficient +implementation of string concatenation. There are three operations:

+
    +
  • .StringBuffer creates a new StringBuffer with current content equal +to the empty string.
  • +
  • +String takes a StringBuffer and a String and appends the String to +the end of the StringBuffer
  • +
  • StringBuffer2String converts a StringBuffer to a String. This operation +copies the string so that subsequent modifications to the StringBuffer +will not change the value of the String returned by this function.
  • +
+
k
module STRING-BUFFER-IN-K [symbolic] + imports private BASIC-K + imports STRING + + syntax StringBuffer ::= ".StringBuffer" [function, total] + syntax StringBuffer ::= StringBuffer "+String" String [function, total, avoid] + syntax StringBuffer ::= String + syntax String ::= StringBuffer2String ( StringBuffer ) [function, total] + + rule {SB:String +String S:String}::StringBuffer => (SB +String S)::String + rule .StringBuffer => "" + rule StringBuffer2String(S:String) => S +endmodule + +module STRING-BUFFER-HOOKED [concrete] + imports private BASIC-K + imports STRING + + syntax StringBuffer [hook(BUFFER.StringBuffer)] + syntax StringBuffer ::= ".StringBuffer" [function, total, hook(BUFFER.empty), impure] + syntax StringBuffer ::= StringBuffer "+String" String [function, total, hook(BUFFER.concat), avoid] + syntax String ::= StringBuffer2String ( StringBuffer ) [function, total, hook(BUFFER.toString)] +endmodule + +module STRING-BUFFER + imports STRING-BUFFER-HOOKED + imports STRING-BUFFER-IN-K +endmodule +
+

Byte Arrays

+

Provided here is the syntax of an implementation of fixed-width arrays of Bytes +in K. This type is hooked to an implementation of bytes provided by the backend. +On the LLVM backend, it is possible to opt in to a faster, mutable +representation (using the --llvm-mutable-bytes flag to kompile) where +multiple references can occur to the same Bytes object and when one is +modified, the others are also modified. Care should be taken when using this +feature, however, as it is possible to experience divergent behavior with +symbolic backends unless the Bytes type is used in a manner that preserves +consistency.

+
k
module BYTES-SYNTAX + imports private STRING-SYNTAX + + syntax Bytes [hook(BYTES.Bytes)] + syntax Bytes ::= r"b[\\\"](([ !#-\\[\\]-~])|([\\\\][tnfr\\\"\\\\])|([\\\\][x][0-9a-fA-F]{2}))*[\\\"]" [token] +endmodule +
+
k
module BYTES-STRING-ENCODE [symbolic] + imports BYTES-SYNTAX +
+

Encoding/decoding between Bytes and String

+

You can encode/decode between Bytes and String using UTF-8, UTF-16LE, UTF-16BE, UTF-32LE, and UTF-32BE

+
k
syntax String ::= decodeBytes ( encoding: String , contents: Bytes ) [function, hook(BYTES.decodeBytes)] + syntax Bytes ::= encodeBytes ( encoding: String , contents: String ) [function, hook(BYTES.encodeBytes)] +endmodule +
+
k
module BYTES-HOOKED + imports STRING-SYNTAX + imports BYTES-SYNTAX + imports BYTES-STRING-ENCODE +
+

Empty byte array

+

The byte array of length zero is represented by .Bytes.

+
k
syntax Bytes ::= ".Bytes" [function, total, hook(BYTES.empty)] +
+

Endianness

+

When converting to/from an integer, byte arrays can be treated as either little +endian (ie, least significant byte first) or big endian (ie, most significant +byte first).

+
k
syntax Endianness ::= "LE" [symbol(littleEndianBytes)] + | "BE" [symbol(bigEndianBytes)] +
+

Signedness

+

When converting to/from an integer, byte arrays can be treated as either signed +or unsigned.

+
k
syntax Signedness ::= "Signed" [symbol(signedBytes)] + | "Unsigned" [symbol(unsignedBytes)] +
+

Integer and Bytes conversion

+

You can convert from a Bytes to an Int. In order to do this, the endianness +and signedness of the Bytes must be provided. The resulting integer is +created by means of interpreting the Bytes as either a twos-complement +representation, or an unsigned representation, of an integer, in the specified +byte order.

+

You can also convert from an Int to a Bytes. This comes in two variants. +In the first, the length of the resulting Bytes in bytes is explicitly +specified. If the length is greater than the highest set bit in the magnitude +of the integer, the result is padded with 0 bits if the number is positive +and 1 bits if the number is negative. If the length is less than the highest +bit set in the magnitude of the integer, the most-significant bits of the +integer will be truncated. The endianness of the resulting Bytes object +is as specified.

+

In the second variant, both endianness and signedness are specified, and +the resulting Bytes object will be the smallest number of bytes necessary +for the resulting Bytes object to be convertible back to the original integer +via Bytes2Int. In other words, if the highest bit set in the magnitude of the +integer is N, then the byte array will be at least N+1 bits long, rounded up +to the nearest byte.

+
k
syntax Int ::= Bytes2Int(Bytes, Endianness, Signedness) [function, total, hook(BYTES.bytes2int)] + syntax Bytes ::= Int2Bytes(length: Int, Int, Endianness) [function, total, hook(BYTES.int2bytes)] + | Int2Bytes(Int, Endianness, Signedness) [function, total, symbol(Int2BytesNoLen)] +
+

String and Bytes conversion

+

You can convert between a Bytes and a String in O(N) time. The resulting +value is a copy of the original and will not be affected by subsequent +mutations of the input or output value.

+
k
syntax String ::= Bytes2String(Bytes) [function, total, hook(BYTES.bytes2string)] + syntax Bytes ::= String2Bytes(String) [function, total, hook(BYTES.string2bytes)] +
+

Bytes update

+

You can set the value of a particular byte in a Bytes object in O(1) time. +The result is #False if value is not in the range [0..255] or if index +is not a valid index (ie, less than zero or greater than or equal to the length +of the Bytes term).

+
k
syntax Bytes ::= Bytes "[" index: Int "<-" value: Int "]" [function, hook(BYTES.update)] +
+

Bytes lookup

+

You can get the value of a particular byte in a Bytes object in O(1) time. +The result is #False if index is not a valid index (see above).

+
k
syntax Int ::= Bytes "[" Int "]" [function, hook(BYTES.get)] +
+

Bytes substring

+

You can get a new Bytes object containing a range of bytes from the input +Bytes in O(N) time (where N is the length of the substring). The range +of bytes included is [startIndex..endIndex). The resulting Bytes is +a copy and mutations to it do not affect mutations to the original Bytes. +The result is #False if startIndex or endIndex are not valid.

+
k
syntax Bytes ::= substrBytes(Bytes, startIndex: Int, endIndex: Int) [function, hook(BYTES.substr)] +
+

Multiple bytes update

+

You can modify a Bytes to return a Bytes which is equal to dest except the +N elements starting at index are replaced with the contents of src in O(N) +time. If --llvm-mutable-bytes is active, this will not create a new Bytes +object and will instead modify the original on concrete backends. The result is +#False if index + N is not a valid index.

+
k
syntax Bytes ::= replaceAtBytes(dest: Bytes, index: Int, src: Bytes) [function, hook(BYTES.replaceAt)] +
+

Multiple bytes update

+

You can modify a Bytes to return a Bytes which is equal to dest except the +count bytes starting at index are replaced with count bytes of value +Int2Bytes(1, v, LE/BE) in O(count) time. This does not create a new Bytes +object and will instead modify the original if --llvm-mutable-bytes is active. +This will throw an exception if index + count is not a valid index. The +acceptable range of values for v is -128 to 127. This will throw an exception +if v is outside of this range. This is implemented only for the LLVM backend.

+
k
syntax Bytes ::= memsetBytes(dest: Bytes, index: Int, count: Int, v: Int) [function, hook(BYTES.memset)] +
+

Bytes padding

+

You can create a new Bytes object which is at least length bytes long by +taking the input sequence and padding it on the right (respectively, on the +left) with the specified value. If --llvm-mutable-bytes is active, this does +not create a new Bytes object if the input is already at least length bytes +long, and will instead return the input unchanged. The result is #False if +value is not in the range [0..255], or if the length is negative.

+
k
syntax Bytes ::= padRightBytes(Bytes, length: Int, value: Int) [function, hook(BYTES.padRight)] + | padLeftBytes(Bytes, length: Int, value: Int) [function, hook(BYTES.padLeft)] +
+

Bytes reverse

+

You can reverse a Bytes object in O(N) time. If --llvm-mutable-bytes is +active, this will not create a new Bytes object and will instead modify the +original.

+
k
syntax Bytes ::= reverseBytes(Bytes) [function, total, hook(BYTES.reverse)] +
+

Bytes length

+

You can get the length of a Bytes term in O(1) time.

+
k
syntax Int ::= lengthBytes(Bytes) [function, total, hook(BYTES.length), smtlib(lengthBytes)] +
+

Bytes concatenation

+

You can create a new Bytes object by concatenating two Bytes objects +together in O(N) time.

+
k
syntax Bytes ::= Bytes "+Bytes" Bytes [function, total, hook(BYTES.concat), right] + +endmodule +
+

Implementation of Bytes

+

The remainder of this module consists of an implementation of some of the +operators listed above in K.

+
k
module BYTES-CONCRETE [concrete] + imports BYTES-HOOKED +endmodule + +module BYTES-KORE + imports BYTES-HOOKED + imports BYTES-SYMBOLIC-CEIL +endmodule + +module BYTES-SYMBOLIC-CEIL [symbolic] + imports BYTES-HOOKED + imports private INT + imports private BOOL + + rule #Ceil(padRightBytes(_, LEN, VAL)) => {(0 <=Int LEN andBool 0 <=Int VAL andBool VAL <Int 256) #Equals true} [simplification] + rule #Ceil(padLeftBytes(_, LEN, VAL)) => {(0 <=Int LEN andBool 0 <=Int VAL andBool VAL <Int 256) #Equals true} [simplification] +endmodule + +module BYTES + imports BYTES-CONCRETE + imports BYTES-KORE + imports private INT + + rule Int2Bytes(I::Int, _::Endianness, _) => .Bytes + requires I ==Int 0 + rule Int2Bytes(I::Int, E::Endianness, Unsigned) => Int2Bytes((log2Int(I) +Int 8) /Int 8, I, E) + requires I >Int 0 [preserves-definedness] + rule Int2Bytes(I::Int, E::Endianness, Signed ) => Int2Bytes((log2Int(I) +Int 9) /Int 8, I, E) + requires I >Int 0 [preserves-definedness] + rule Int2Bytes(I::Int, E::Endianness, Signed ) => Int2Bytes((log2Int(~Int I) +Int 9) /Int 8, I, E) + requires I <Int -1 [preserves-definedness] + rule Int2Bytes(I::Int, E::Endianness, Signed ) => Int2Bytes(1, -1, E) + requires I ==Int -1 [preserves-definedness] +endmodule +
+

Program identifiers

+

Provided here is an implementation for program identifiers in K. Developers +of semantics for a particular language may wish to use their own implementation +instead of the one provided here if their syntax differs from the syntax +defined below. However, this is provided for convenience for developers who +do not care about the lexical syntax of identifiers.

+

Provided are the following pieces of functionality:

+
    +
  • Id2String - Convert an Id to a String containing its name
  • +
  • String2Id - Convert a String to an Id with the specified name
  • +
  • !X:Id - You can get a fresh identifier distinct from any previous identifier +generated by this syntax.
  • +
+
k
module ID-SYNTAX-PROGRAM-PARSING + imports BUILTIN-ID-TOKENS + syntax Id ::= r"[A-Za-z\\_][A-Za-z0-9\\_]*" [prec(1), token] + | #LowerId [token] + | #UpperId [token] +endmodule + +module ID-SYNTAX + syntax Id [token] +endmodule + +module ID-COMMON + imports ID-SYNTAX + imports private STRING + + syntax String ::= Id2String ( Id ) [function, total, hook(STRING.token2string)] + syntax Id ::= String2Id (String) [function, total, hook(STRING.string2token)] + syntax Id ::= freshId(Int) [freshGenerator, function, total, private] + + rule freshId(I:Int) => String2Id("_" +String Int2String(I)) +endmodule + +module ID + imports ID-COMMON +endmodule +
+

Equality and conditionals

+

Provided here are implementations of two important primitives in K:

+
    +
  • ==K - the equality between two terms. Returns true if they are equal +and false if they are not equal.
  • +
  • #if #then #else #fi - polymorphic conditional function. If the first +argument evaluates to true, the second argument is returned. Otherwise, +the third argument is returned. Note that this does not short-circuit on +symbolic backends.
  • +
+
k
module K-EQUAL-SYNTAX + imports private BOOL + imports private BASIC-K + + syntax Bool ::= left: + K "==K" K [function, total, comm, smt-hook(=), hook(KEQUAL.eq), symbol(_==K_), group(equalEqualK)] + | K "=/=K" K [function, total, comm, smt-hook(distinct), hook(KEQUAL.ne), symbol(_=/=K_), group(notEqualEqualK)] + + syntax priority equalEqualK notEqualEqualK > boolOperation mlOp + + syntax {Sort} Sort ::= "#if" Bool "#then" Sort "#else" Sort "#fi" [function, total, symbol(ite), smt-hook(ite), hook(KEQUAL.ite)] + +endmodule + +module K-EQUAL-KORE [symbolic] + imports private BOOL + imports K-EQUAL-SYNTAX + + rule K1:Bool ==K K2:Bool => K1 ==Bool K2 [simplification] + rule {K1 ==K K2 #Equals true} => {K1 #Equals K2} [simplification] + rule {true #Equals K1 ==K K2} => {K1 #Equals K2} [simplification] + rule {K1 ==K K2 #Equals false} => #Not({K1 #Equals K2}) [simplification] + rule {false #Equals K1 ==K K2} => #Not({K1 #Equals K2}) [simplification] + rule {K1 =/=K K2 #Equals true} => #Not({K1 #Equals K2}) [simplification] + rule {true #Equals K1 =/=K K2} => #Not({K1 #Equals K2}) [simplification] + rule {K1 =/=K K2 #Equals false} => {K1 #Equals K2} [simplification] + rule {false #Equals K1 =/=K K2} => {K1 #Equals K2} [simplification] + +endmodule + +module K-EQUAL + imports private BOOL + imports K-EQUAL-SYNTAX + imports K-EQUAL-KORE + + rule K1:K =/=K K2:K => notBool (K1 ==K K2) + + rule #if C:Bool #then B1::K #else _ #fi => B1 requires C + rule #if C:Bool #then _ #else B2::K #fi => B2 requires notBool C + +endmodule +
+

Meta operations

+

Provided below are a few miscellaneous, mostly deprecated functions in K. +It is not recommended to use any of them directly as they are largely +unsupported in modern K. There are a few exceptions:

+
    +
  • #getenv - Returns the value of an environment variable
  • +
  • #kompiledDirectory - Returns the path to the current compiled K definition +directory.
  • +
  • #unparseKORE - Takes a K term and converts it to a string.
  • +
+
k
module K-REFLECTION + imports BASIC-K + imports STRING + + syntax K ::= "#configuration" [function, impure, hook(KREFLECTION.configuration)] + syntax String ::= #sort(K) [function, hook(KREFLECTION.sort)] + syntax KItem ::= #fresh(String) [function, hook(KREFLECTION.fresh), impure] + syntax KItem ::= getsymbol(K) [function, hook(KREFLECTION.getKLabel)] + + syntax K ::= #getenv(String) [function, impure, hook(KREFLECTION.getenv)] + + syntax String ::= #kompiledDirectory() [function, hook(KREFLECTION.kompiledDir)] + + // meaningful only for the purposes of compilation to a binary, otherwise + // undefined + syntax List ::= #argv() [function, hook(KREFLECTION.argv)] + + syntax {Sort} String ::= #unparseKORE(Sort) [function, hook(KREFLECTION.printKORE)] + syntax IOError ::= "#noParse" "(" String ")" [symbol(#noParse)] + +endmodule +
+

I/O in K

+

Concrete execution in K supports I/O operations. This functionality is not +supported during symbolic execution, because symbolic execution must exist +completely free of side-effects, and I/O is an irreducible type of side effect. +However, it is useful in many cases when defining concrete execution to be able +to make reference to I/O operations.

+

The design of these I/O operations is based on the POSIX standard, for the most +part. For example, the #read K function maps to the read POSIX function. We +do not at this time have a higher-level API for I/O, but this may be +implemented at some point in the future.

+

I/O operations generally return either their result, or an IOError term +corresponding to the errno returned by the underlying system call.

+
k
module K-IO + imports private LIST + imports private STRING + imports private INT +
+

I/O errors

+

Aside from EOF, which is returned by #getc if the file is at end-of-file, all +of the below I/O errors correspond to possible values for errno after calling +a library function. If the errno returned is not one of the below errnos +known to K, #unknownIOError is returned along with the integer errno value.

+
k
syntax IOError ::= "#EOF" [symbol(#EOF)] + | #unknownIOError(errno: Int) [symbol(#unknownIOError)] + | "#E2BIG" [symbol(#E2BIG)] + | "#EACCES" [symbol(#EACCES)] + | "#EAGAIN" [symbol(#EAGAIN)] + | "#EBADF" [symbol(#EBADF)] + | "#EBUSY" [symbol(#EBUSY)] + | "#ECHILD" [symbol(#ECHILD)] + | "#EDEADLK" [symbol(#EDEADLK)] + | "#EDOM" [symbol(#EDOM)] + | "#EEXIST" [symbol(#EEXIST)] + | "#EFAULT" [symbol(#EFAULT)] + | "#EFBIG" [symbol(#EFBIG)] + | "#EINTR" [symbol(#EINTR)] + | "#EINVAL" [symbol(#EINVAL)] + | "#EIO" [symbol(#EIO)] + | "#EISDIR" [symbol(#EISDIR)] + | "#EMFILE" [symbol(#EMFILE)] + | "#EMLINK" [symbol(#EMLINK)] + | "#ENAMETOOLONG" [symbol(#ENAMETOOLONG)] + | "#ENFILE" [symbol(#ENFILE)] + | "#ENODEV" [symbol(#ENODEV)] + | "#ENOENT" [symbol(#ENOENT)] + | "#ENOEXEC" [symbol(#ENOEXEC)] + | "#ENOLCK" [symbol(#ENOLCK)] + | "#ENOMEM" [symbol(#ENOMEM)] + | "#ENOSPC" [symbol(#ENOSPC)] + | "#ENOSYS" [symbol(#ENOSYS)] + | "#ENOTDIR" [symbol(#ENOTDIR)] + | "#ENOTEMPTY" [symbol(#ENOTEMPTY)] + | "#ENOTTY" [symbol(#ENOTTY)] + | "#ENXIO" [symbol(#ENXIO)] + | "#EPERM" [symbol(#EPERM)] + | "#EPIPE" [symbol(#EPIPE)] + | "#ERANGE" [symbol(#ERANGE)] + | "#EROFS" [symbol(#EROFS)] + | "#ESPIPE" [symbol(#ESPIPE)] + | "#ESRCH" [symbol(#ESRCH)] + | "#EXDEV" [symbol(#EXDEV)] + | "#EWOULDBLOCK" [symbol(#EWOULDBLOCK)] + | "#EINPROGRESS" [symbol(#EINPROGRESS)] + | "#EALREADY" [symbol(#EALREADY)] + | "#ENOTSOCK" [symbol(#ENOTSOCK)] + | "#EDESTADDRREQ" [symbol(#EDESTADDRREQ)] + | "#EMSGSIZE" [symbol(#EMSGSIZE)] + | "#EPROTOTYPE" [symbol(#EPROTOTYPE)] + | "#ENOPROTOOPT" [symbol(#ENOPROTOOPT)] + | "#EPROTONOSUPPORT" [symbol(#EPROTONOSUPPORT)] + | "#ESOCKTNOSUPPORT" [symbol(#ESOCKTNOSUPPORT)] + | "#EOPNOTSUPP" [symbol(#EOPNOTSUPP)] + | "#EPFNOSUPPORT" [symbol(#EPFNOSUPPORT)] + | "#EAFNOSUPPORT" [symbol(#EAFNOSUPPORT)] + | "#EADDRINUSE" [symbol(#EADDRINUSE)] + | "#EADDRNOTAVAIL" [symbol(#EADDRNOTAVAIL)] + | "#ENETDOWN" [symbol(#ENETDOWN)] + | "#ENETUNREACH" [symbol(#ENETUNREACH)] + | "#ENETRESET" [symbol(#ENETRESET)] + | "#ECONNABORTED" [symbol(#ECONNABORTED)] + | "#ECONNRESET" [symbol(#ECONNRESET)] + | "#ENOBUFS" [symbol(#ENOBUFS)] + | "#EISCONN" [symbol(#EISCONN)] + | "#ENOTCONN" [symbol(#ENOTCONN)] + | "#ESHUTDOWN" [symbol(#ESHUTDOWN)] + | "#ETOOMANYREFS" [symbol(#ETOOMANYREFS)] + | "#ETIMEDOUT" [symbol(#ETIMEDOUT)] + | "#ECONNREFUSED" [symbol(#ECONNREFUSED)] + | "#EHOSTDOWN" [symbol(#EHOSTDOWN)] + | "#EHOSTUNREACH" [symbol(#EHOSTUNREACH)] + | "#ELOOP" [symbol(#ELOOP)] + | "#EOVERFLOW" [symbol(#EOVERFLOW)] +
+

I/O result sorts

+

Here we see sorts defined to contain either an Int or an IOError, or +either a String or an IOError. These sorts are used to implement the +return sort of functions that may succeed, in which case they return a value, +or may fail, in which case their return value indicates an error and the +error indicated is returned via errno.

+
k
syntax IOInt ::= Int | IOError + syntax IOString ::= String | IOError +
+

Opening a file

+

You can open a file in K using #open. An optional mode indicates the file +open mode, which can have any value allowed by the fopen function in C. +The returned value is the file descriptor that was opened, or an error.

+
k
syntax IOInt ::= "#open" "(" path: String ")" [function] + | "#open" "(" path: String "," mode: String ")" [function, hook(IO.open), impure] + + rule #open(S:String) => #open(S:String, "r+") +
+

Get/set position in file

+

You can get the current offset in a file using #tell. You can also seek +to a particular offset using #seek or #seekEnd. #seek is implemented via +a call to lseek with the SEEK_SET whence. #seekEnd is implemented via a +call to lseek with the SEEK_END whence. You can emulate the SEEK_CUR +whence by means of #seek(FD, #tell(FD) +Int Offset).

+
k
syntax IOInt ::= "#tell" "(" fd: Int ")" [function, hook(IO.tell), impure] + syntax K ::= "#seek" "(" fd: Int "," index: Int ")" [function, hook(IO.seek), impure] + | "#seekEnd" "(" fd: Int "," fromEnd: Int ")" [function, hook(IO.seekEnd), impure] +
+

Read from file

+

You can read a single character from a file using #getc. #EOF is returned +if you are at end-of-fie.

+

You can also read up to length characters in a file using #read. The +resulting read characters are returned, which may be fewer characters than +requested. A string of zero length being returned indicates end-of-file.

+
k
syntax IOInt ::= "#getc" "(" fd: Int ")" [function, hook(IO.getc), impure] + syntax IOString ::= "#read" "(" fd: Int "," length: Int ")" [function, hook(IO.read), impure] +
+

Write to file

+

You can write a single character to a file using #putc. You can also write +a string to a file using #write. The returned value on success is .K.

+
k
syntax K ::= "#putc" "(" fd: Int "," value: Int ")" [function, hook(IO.putc), impure] + | "#write" "(" fd: Int "," value: String ")" [function, hook(IO.write), impure] +
+

Closing a file

+

You can close a file using #close. The returned value on success is .K.

+
k
syntax K ::= "#close" "(" fd: Int ")" [function, hook(IO.close), impure] +
+

Locking/unlocking a file

+

You can lock or unlock parts of a file using the #lock and #unlock +functions. The lock starts at the beginning of the file and continues for +endIndex bytes. Note that Unix systems do not actually prevent locked files +from being read and modified; you will have to lock both sides of a concurrent +access to guarantee exclusivity.

+
k
syntax K ::= "#lock" "(" fd: Int "," endIndex: Int ")" [function, hook(IO.lock), impure] + | "#unlock" "(" fd: Int "," endIndex: Int ")" [function, hook(IO.unlock), impure] +
+

Networking

+

You can accept a connection on a socket using #accept, or shut down the +write end of a socket with #shutdownWrite. Note that facility is not provided +for opening, binding, and listening on sockets. These functions are implemented +in order to support creating stateful request/response servers where the +request loop is implemented using rewriting in K, but the connection +initialization is written in native code and linked into the LLVM backend.

+
k
syntax IOInt ::= "#accept" "(" fd: Int ")" [function, hook(IO.accept), impure] + syntax K ::= "#shutdownWrite" "(" fd: Int ")" [function, hook(IO.shutdownWrite), impure] +
+

Time

+

You can get the current time in seconds since midnight UTC on January 1, 1970 +using #time.

+
k
syntax Int ::= "#time" "(" ")" [function, hook(IO.time), impure] +
+

Builtin file descriptors

+

Provided here are functions that return the file descriptor for standard input, +standard output, and standard error.

+
k
syntax Int ::= "#stdin" [function, total] + | "#stdout" [function, total] + | "#stderr" [function, total] + + rule #stdin => 0 + rule #stdout => 1 + rule #stderr => 2 +
+

Shell access

+

You can execute a command using the shell using the #system operator. Care +must be taken to sanitize inputs to this function or security issues may +result. Note that K has no facility for reasoning about logic that happens +outside its process, so any functionality that you wish to be able to formally +reason about in K should not be implemented via the #system operator.

+
k
syntax KItem ::= #system ( String ) [function, hook(IO.system), impure] + | "#systemResult" "(" Int /* exit code */ "," String /* stdout */ "," String /* stderr */ ")" [symbol(#systemResult)] +
+

Temporary files

+

You can get a temporary file and open it atomically using the #mkstemp +operator. The resulting file will be closed and deleted when K rewriting ends. +For more info on the argument to #mkstemp, see man mkstemp.

+
k
syntax IOFile ::= #mkstemp(template: String) [function, hook(IO.mkstemp), impure] + syntax IOFile ::= IOError + | "#tempFile" "(" path: String "," fd: Int ")" [symbol(#tempFile)] +
+

Deleting a file

+

You can delete a file using its absolute or relative path using the #remove +operator. It returns .K on success or an IOError on failure.

+
k
syntax K ::= #remove(path: String) [function, total, hook(IO.remove), impure] +
+

Logging

+

You can log information to disk using the #logToFile operator. Semantically, +this operator returns .K. However, it has a side effect that is not reasoned +about which is that value will be written to a uniquely-identified file +containing name in its name. The file is only flushed to disk when rewriting +finishes.

+
k
syntax K ::= #logToFile(name: String, value: String) [function, total, hook(IO.log), impure, returnsUnit, symbol(#logToFile)] +
+

Strings can also be logged via the logging mechanisms available to the backend. +On the LLVM backend, this just means logging the text to standard error. On the +Haskell backend, a log message of type InfoUserLog is created with the +specified text.

+
k
syntax K ::= #log(value: String) [function, total, hook(IO.logString), impure, returnsUnit, symbol(#log)] +
+

Terms can also be logged to standard error in surface syntax, rather than as +KORE using #trace. This operator has similar semantics to #logToFile (i.e. +it returns .K, but prints as an impure side effect). Note that calling +#trace is equivalent to invoking the kprint tool for the first term that is +logged, which requires re-parsing the underlying K definition. Subsequent calls +do not incur this overhead again; the definition is cached.

+
k
syntax K ::= #trace(value: KItem) [function, total, hook(IO.traceTerm), impure, returnsUnit, symbol(#trace)] + | #traceK(value: K) [function, total, hook(IO.traceTerm), impure, returnsUnit, symbol(#traceK)] +
+

Implementation of high-level I/O streams in K

+

Below is an implementation of the stream="stdin" and stream="stdout" +cell attributes in K. You should not refer to these symbols or modules directly +in your definition. It is provided only so that the K compiler can make use of +it. For more information on how to use this feature, refer to IMP++ in the K +tutorial.

+
k
syntax Stream ::= #buffer(K) + | #istream(Int) + | #parseInput(String, String) + | #ostream(Int) + +endmodule + +// NOTE: DO NOT DIRECTLY IMPORT *-STREAM MODULES +// These stream modules will be automatically instantiated and implicitly imported +// into the main module when `stream` attributes appear in configuration cells. +// Only `Stream` productions and `[stream]` rules will be imported. +// The cell name will be replaced with the one of the main configuration. + +module STDIN-STREAM + imports K-IO + imports K-REFLECTION + imports LIST + imports INT + imports BOOL + + configuration <stdin> ListItem(#buffer($STDIN:String)) ListItem($IO:String) ListItem(#istream(#stdin)) </stdin> + + // read one character at a time until we read whitespace + rule [stdinGetc]: + <stdin> + ListItem(#parseInput(_:String, Delimiters:String)) + ListItem(#buffer(S:String => S +String chrChar({#getc(N)}:>Int))) + ListItem("on") + ListItem(#istream(N:Int)) + </stdin> + requires findChar(S, Delimiters, 0) ==Int -1 // [stdin] + [stream, priority(200)] + + // when we reach whitespace, if it parses create a ListItem + rule [stdinParseString]: + <stdin> + (ListItem(#parseInput("String", Delimiters:String)) => ListItem(S)) + ListItem(#buffer(S:String => "")) + _:List + </stdin> + requires findChar(S, Delimiters, 0) =/=Int -1 // [stdin] + [stream] + + // a hack: handle the case when we read integers without the help of the IO server + rule [stdinParseInt]: + <stdin> + (ListItem(#parseInput("Int", Delimiters:String)) + => ListItem(String2Int(substrString(S, 0, findChar(S, Delimiters, 0))))) + ListItem(#buffer(S:String => substrString(S,findChar(S, Delimiters, 0) +Int 1, lengthString(S)))) + _:List + </stdin> + requires findChar(S, Delimiters, 0) =/=Int -1 + andBool lengthString(S) >Int 1 // [stdin] + [stream] + + rule [stdinTrim]: + <stdin> + ListItem(#parseInput(Sort:String, Delimiters:String)) + ListItem(#buffer(S:String => substrString(S, 1, lengthString(S)))) + _:List + </stdin> + requires findChar(S, Delimiters, 0) =/=Int -1 + andBool Sort =/=String "String" + andBool lengthString(S) <=Int 1 // [stdin] + [stream] + + // NOTE: This unblocking rule will be instantiated and inserted carefully + // when necessary according to user-defined rules, since otherwise it will + // lead to a diverging (i.e., non-terminating) transition system definition. + // Currently, it supports only a simple pattern matching on the top of the + // input stream cell, e.g., + // rule <k> read() => V ... </k> <in> ListItem(V:Int) => .List ... </in> + // Non-supported rules that refer to the input stream cell in a sophisticated + // way will get stuck in concrete execution mode with real IO enabled (i.e., + // under `--io on` option), while they will still work in symbolic execution + // mode or concrete execution mode with real IO disabled (i.e., under `--io + // off`, `--search`, or `--debug` options). + // + // TODO: More patterns need to be supported as well. In that case, we need to + // have a way to specify such patterns. + rule [stdinUnblock]: + <stdin> + (.List => ListItem(#parseInput(?Sort:String, ?Delimiters:String))) + ListItem(#buffer(_:String)) + ... + </stdin> + + /* + syntax Stream ::= "#noIO" + + rule ListItem(#buffer(_)) + (ListItem(#noIO) ListItem(#istream(_:Int)) => .List) [stdin] + */ + +endmodule + +module STDOUT-STREAM + imports K-IO + imports LIST + imports STRING + + configuration <stdout> ListItem(#ostream(#stdout)) ListItem($IO:String) ListItem(#buffer("")) </stdout> +//configuration <stderr> ListItem(#ostream(#stderr)) ListItem($IO:String) ListItem(#buffer("")) </stderr> + + rule [stdoutBufferFloat]: + <stdout> + ListItem(#ostream(_)) + ListItem(_) + ListItem(#buffer(Buffer:String => Buffer +String Float2String(F))) + (ListItem(F:Float) => .List) + _:List + </stdout> + // [stdout, stderr] + [stream, priority(25)] + rule [stdoutBufferInt]: + <stdout> + ListItem(#ostream(_)) + ListItem(_) + ListItem(#buffer(Buffer:String => Buffer +String Int2String(I))) + (ListItem(I:Int) => .List) + _:List + </stdout> + // [stdout, stderr] + [stream, priority(25)] + rule [stdoutBufferString]: + <stdout> + ListItem(#ostream(_)) + ListItem(_) + ListItem(#buffer(Buffer:String => Buffer +String S)) + (ListItem(S:String) => .List) + _:List + </stdout> + // [stdout, stderr] + [stream, priority(25)] + + // Send first char from the buffer to the server + rule [stdoutWrite]: + <stdout> + ListItem(#ostream(N:Int => {#write(N, S) ~> N:Int}:>Int)) + ListItem("on") + ListItem(#buffer(S:String => "")) + _:List + </stdout> + requires S =/=String "" // [stdout, stderr] + [stream, priority(30)] + + /* + syntax Stream ::= "#noIO" + + rule ListItem(#buffer(Buffer:String => Buffer +String Float2String(F))) + (ListItem(F:Float) => .List) + _:List [stdout, stderr] + rule ListItem(#buffer(Buffer:String => Buffer +String Int2String(I))) + (ListItem(I:Int) => .List) + _:List [stdout, stderr] + rule ListItem(#buffer(Buffer:String => Buffer +String S)) + (ListItem(S:String) => .List) + _:List [stdout, stderr] + + rule (ListItem(#ostream(_:Int)) ListItem(#noIO) => .List) + ListItem(#buffer(_)) + _:List [stdout, stderr] + */ + +endmodule +
+

Machine Integers

+

Provided here is an implementation of arbitrarily large fixed-precision binary +integers in K. This type is hooked to an implementation of integers provided +by the backend, and in particular makes use of native machine integers for +certain sizes of integer. For arbitrary-precision integers, see the INT +module above.

+

The syntax of machine integers in K is the same as arbitrary-precision integers +(i.e., an optional sign followed by a sequence of digits) except that machine +integers always end in a suffix pN where N is an integer indicating the +width in bits of the integer. The MInt sort is parametric, and this is +reflected in the literals. For example, the sort of 0p8 is MInt{8}.

+
k
module MINT-SYNTAX + /*@\section{Description} The MInt implements machine integers of arbitrary + * bit width represented in 2's complement. */ + syntax {Width} MInt{Width} [hook(MINT.MInt)] + + /*@ Machine integer of bit width and value. */ + syntax {Width} MInt{Width} ::= r"[\\+\\-]?[0-9]+[pP][0-9]+" [token, prec(2), hook(MINT.literal)] +endmodule + +module MINT + imports MINT-SYNTAX + imports private INT + imports private BOOL +
+

Bitwidth of MInt

+

You can get the number of bits of width in an MInt using bitwidthMInt.

+
k
syntax {Width} Int ::= bitwidthMInt(MInt{Width}) [function, total, hook(MINT.bitwidth)] +
+

Int and MInt conversions

+

You can convert from an MInt to an Int using the MInt2Signed and +MInt2Unsigned functions. an MInt does not have a sign; its sign is instead +reflected in how operators interpret its value either as a signed integer or as +an unsigned integer. Thus, you can interpret a MInt as a signed integer witth +MInt2Signed, or as an unsigned integer respectively using MInt2Unsigned.

+

You can also convert from an Int to an MInt using Int2MInt. Care must +be given to ensure that the sort context where the Int2MInt operator appears +has the correct bitwidth, as this will influence the width of the resulting +MInt.

+
k
syntax {Width} Int ::= MInt2Signed(MInt{Width}) [function, total, hook(MINT.svalue)] + | MInt2Unsigned(MInt{Width}) [function, total, hook(MINT.uvalue), smt-hook(bv2int)] + + syntax {Width} MInt{Width} ::= Int2MInt(Int) [function, total, hook(MINT.integer), smt-hook(int2bv)] +
+

MInt min and max values

+

You can get the minimum and maximum values of a signed or unsigned MInt +with az specified bit width using sminMInt, smaxMInt, uminMInt, and +umaxMInt.

+
k
syntax Int ::= sminMInt(Int) [function] + | smaxMInt(Int) [function] + | uminMInt(Int) [function] + | umaxMInt(Int) [function] + rule sminMInt(N:Int) => 0 -Int (1 <<Int (N -Int 1)) + rule smaxMInt(N:Int) => (1 <<Int (N -Int 1)) -Int 1 + rule uminMInt(_:Int) => 0 + rule umaxMInt(N:Int) => (1 <<Int N) -Int 1 +
+

MInt bounds checking

+

You can check whether a specified Int will be represented in an MInt +with a specified width without any loss of precision when interpreted as +a signed or unsigned integer using soverflowMInt and uoverflowMInt.

+
k
syntax Bool ::= soverflowMInt(width: Int, Int) [function] + | uoverflowMInt(width: Int, Int) [function] + rule + soverflowMInt(N:Int, I:Int) + => + I <Int sminMInt(N) orBool I >Int smaxMInt(N) + rule + uoverflowMInt(N:Int, I:Int) + => + I <Int uminMInt(N) orBool I >Int umaxMInt(N) +
+

MInt arithmetic

+

You can:

+
    +
  • Compute the bitwise complement ~MInt of an MInt.
  • +
  • Compute the unary negation --MInt of an MInt.
  • +
  • Compute the product *MInt of two MInts.
  • +
  • Compute the quotient /sMInt of two MInts interpreted as signed integers.
  • +
  • Compute the modulus %sMInt of two MInts interpreted as signed integers.
  • +
  • Compute the quotient /uMInt of two MInts interpreted as unsigned +integers.
  • +
  • Compute the modulus %uMInt of two MInts interpreted as unsigned integers.
  • +
  • Compute the sum +MInt of two MInts.
  • +
  • Compute the difference -MInt of two MInts.
  • +
  • Compute the left shift <<MInt of two MInts. The second MInt is always +interpreted as positive.
  • +
  • Compute the arithmetic right shift >>aMInt of two MInts. The second +MInt is always interpreted as positve.
  • +
  • Compute the logical right shift >>lMInt of two MInts. The second MInt +is always interpreted as positive.
  • +
  • Compute the bitwise and &MInt of two MInts.
  • +
  • Compute the bitwise xor xorMInt of two MInts.
  • +
  • Compute the bitwise inclusive or |MInt of two MInts.
  • +
+
k
syntax {Width} MInt{Width} ::= "~MInt" MInt{Width} [function, total, hook(MINT.not), smt-hook(bvnot)] + | "--MInt" MInt{Width} [function, total, hook(MINT.neg), smt-hook(bvuminus)] + > left: + MInt{Width} "*MInt" MInt{Width} [function, total, hook(MINT.mul), smt-hook(bvmul)] + | MInt{Width} "/sMInt" MInt{Width} [function, hook(MINT.sdiv), smt-hook(bvsdiv)] + | MInt{Width} "%sMInt" MInt{Width} [function, hook(MINT.srem), smt-hook(bvsrem)] + | MInt{Width} "/uMInt" MInt{Width} [function, hook(MINT.udiv), smt-hook(bvudiv)] + | MInt{Width} "%uMInt" MInt{Width} [function, hook(MINT.urem), smt-hook(bvurem)] + > left: + MInt{Width} "+MInt" MInt{Width} [function, total, hook(MINT.add), smt-hook(bvadd)] + | MInt{Width} "-MInt" MInt{Width} [function, total, hook(MINT.sub), smt-hook(bvsub)] + > left: + MInt{Width} "<<MInt" MInt{Width} [function, hook(MINT.shl), smt-hook(bvshl)] + | MInt{Width} ">>aMInt" MInt{Width} [function, hook(MINT.ashr), smt-hook(bvashr)] + | MInt{Width} ">>lMInt" MInt{Width} [function, hook(MINT.lshr), smt-hook(bvlshr)] + > left: + MInt{Width} "&MInt" MInt{Width} [function, total, hook(MINT.and), smt-hook(bvand)] + > left: + MInt{Width} "xorMInt" MInt{Width} [function, total, hook(MINT.xor), smt-hook(bvxor)] + > left: + MInt{Width} "|MInt" MInt{Width} [function, total, hook(MINT.or), smt-hook(bvor)] +
+

MInt comparison

+

You can compute whether one MInt is less than, less than or equal to, greater +than, or greater than or equal to another MInt when interpreted as signed +or unsigned integers. You can also compute whether one MInt is equal to or +unequal to another MInt.

+
k
syntax {Width} Bool ::= MInt{Width} "<sMInt" MInt{Width} [function, total, hook(MINT.slt), smt-hook(bvslt)] + | MInt{Width} "<uMInt" MInt{Width} [function, total, hook(MINT.ult), smt-hook(bvult)] + | MInt{Width} "<=sMInt" MInt{Width} [function, total, hook(MINT.sle), smt-hook(bvsle)] + | MInt{Width} "<=uMInt" MInt{Width} [function, total, hook(MINT.ule), smt-hook(bvule)] + | MInt{Width} ">sMInt" MInt{Width} [function, total, hook(MINT.sgt), smt-hook(bvsgt)] + | MInt{Width} ">uMInt" MInt{Width} [function, total, hook(MINT.ugt), smt-hook(bvugt)] + | MInt{Width} ">=sMInt" MInt{Width} [function, total, hook(MINT.sge), smt-hook(bvsge)] + | MInt{Width} ">=uMInt" MInt{Width} [function, total, hook(MINT.uge), smt-hook(bvuge)] + | MInt{Width} "==MInt" MInt{Width} [function, total, hook(MINT.eq), smt-hook(=)] + | MInt{Width} "=/=MInt" MInt{Width} [function, total, hook(MINT.ne), smt-hook(distinct)] +
+

MInt min/max

+

You can compute the signed minimum sMinMInt, the signed maximum sMaxMInt, +the unsigned minimum uMinMInt, and the unsigned maximum uMaxMInt of two +MInts.

+
k
syntax {Width} MInt{Width} ::= sMaxMInt(MInt{Width}, MInt{Width}) [function, total, hook(MINT.smax), smt-hook((ite (bvslt #1 #2) #2 #1))] + | sMinMInt(MInt{Width}, MInt{Width}) [function, total, hook(MINT.smin), smt-hook((ite (bvslt #1 #2) #1 #2))] + | uMaxMInt(MInt{Width}, MInt{Width}) [function, total, hook(MINT.umax), smt-hook((ite (bvult #1 #2) #2 #1))] + | uMinMInt(MInt{Width}, MInt{Width}) [function, total, hook(MINT.umin), smt-hook((ite (bvult #1 #2) #1 #2))] +
+

MInt to MInt conversion

+

You can convert an MInt of one width to another width with roundMInt. +The resulting MInt will be truncated starting from the most significant bit +if the resulting width is smaller than the input. The resulting MInt will be +zero-extended with the same low-order bits if the resulting width is larger +than the input.

+
k
syntax {Width1, Width2} MInt{Width1} ::= roundMInt(MInt{Width2}) [function, total, hook(MINT.round)] + syntax {Width1, Width2} MInt{Width1} ::= signExtendMInt(MInt{Width2}) [function, total, hook(MINT.sext)] +
+
k
endmodule +
+
+
+ + +
+ +
+
+ + Basic Builtin Types in K + +
+
+ + Arrays + + +
+
+ + Maps + + +
+
+ + Range Maps + + +
+
+ + Sets + + +
+
+ + Lists + + +
+
+ + Booleans + + +
+
+ + Integers + + +
+
+ + IEEE 754 Floating-point Numbers + + +
+
+ + Strings + + +
+
+ + Byte Arrays + + +
+
+ + I/O in K + + +
+
+ + Machine Integers + + +
+ +
+
+ +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/include/kframework/builtin/ffi/index.html b/k-distribution/include/kframework/builtin/ffi/index.html new file mode 100644 index 00000000000..e6d94fb100a --- /dev/null +++ b/k-distribution/include/kframework/builtin/ffi/index.html @@ -0,0 +1,627 @@ + + + + + + + + + + + + + + +K Foreign Function Interface | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

K Foreign Function Interface

+

The K Foreign Function Interface (FFI) module provides a way to call native +functions directly from a K semantics using the C ABI. It also provides +utilities for allocating and deallocating byte buffers with static addresses +that are suitable for being passed to native code.

+

It is built off of the underlying libffi library +(https://sourceware.org/libffi/) and is subject to some of the same +limitations as that library. Bear in mind, because this library exposes +a number of unsafe C APIs directly, misuse of the library is likely to lead +to memory corruption in your interpreter and can cause segmentation faults or +corrupted term representations that lead to undefined behavior at runtime.

+
k
requires "domains.md" + +module FFI-SYNTAX + imports private LIST +
+

The FFIType sort is used to declare the native C ABI types of operands passed +to the #ffiCall function. These types roughly correspond to the types +declared in ffi.h by libffi.

+
k
syntax FFIType ::= "#void" [symbol(#ffi_void)] + | "#uint8" [symbol(#ffi_uint8)] + | "#sint8" [symbol(#ffi_sint8)] + | "#uint16" [symbol(#ffi_uint16)] + | "#sint16" [symbol(#ffi_sint16)] + | "#uint32" [symbol(#ffi_uint32)] + | "#sint32" [symbol(#ffi_sint32)] + | "#uint64" [symbol(#ffi_uint64)] + | "#sint64" [symbol(#ffi_sint64)] + | "#float" [symbol(#ffi_float)] + | "#double" [symbol(#ffi_double)] + | "#uchar" [symbol(#ffi_uchar)] + | "#schar" [symbol(#ffi_schar)] + | "#ushort" [symbol(#ffi_ushort)] + | "#sshort" [symbol(#ffi_sshort)] + | "#uint" [symbol(#ffi_uint)] + | "#sint" [symbol(#ffi_sint)] + | "#ulong" [symbol(#ffi_ulong)] + | "#slong" [symbol(#ffi_slong)] + | "#longdouble" [symbol(#ffi_longdouble)] + | "#pointer" [symbol(#ffi_pointer)] + | "#complexfloat" [symbol(#ffi_complexfloat)] + | "#complexdouble" [symbol(#ffi_complexdouble)] + | "#complexlongdouble" [symbol(#ffi_complexlongdouble)] + | "#struct" "(" List ")" [symbol(#ffi_struct)] +endmodule + +module FFI + imports FFI-SYNTAX + imports private BYTES + imports private STRING + imports private BOOL + imports private LIST + imports private INT + +
+

FFI Calls

+

The #ffiCall functions are designed to call a native C ABI function and +return a native result. They come in three variants:

+

Non-variadic

+

In the first variant, #ffiCall(Address, Args, ArgTypes, ReturnType) takes +an integer address of a function (which can be obtained from +#functionAddress), a List of Bytes containing the arguments of the +function, a List of FFITypes containing the types of the parameters of the +function, and an FFIType containing the return type of the function, and +returns the return value of the function as a Bytes.

+
k
syntax Bytes ::= "#ffiCall" "(" Int "," List "," List "," FFIType ")" [function, hook(FFI.call)] +
+

Variadic

+

In the second variant, +#ffiCall(Address, Args, FixedTypes, VariadicTypes, ReturnType takes an +integer address of a function, a List of Bytes containing the arguments +of the call, a List of FFITypes containing the types of the fixed +parameters of the function, a List of FFITypes containing the types of the +variadic parameters of the function, and an FFIType containing the return +type of the function, and returns the return value of the function as a +Bytes.

+
k
syntax Bytes ::= "#ffiCall" "(" Int "," List "," List "," List "," FFIType ")" [function, hook(FFI.call_variadic)] +
+

Generic

+

In the third variant, +#ffiCall(IsVariadic, Address, Args, ArgTypes, NFixed, ReturnType takes +a boolean indicating whether the function is variadic or not, an integer +address of a function, a List of Bytes containing the arguments of the +call, a List of FFITypes containing the parameter typess of the call +followed by the types of the variadic arguments of the call, if any, an Int +containing how many of the arguments of the call are fixed or not, and an +FFIType containing the return type of the function, and returns the return +value of the function as a Bytes.

+
k
syntax Bytes ::= "#ffiCall" "(" Bool "," Int "," List "," List "," Int "," FFIType ")" [function] + + rule #ffiCall(false, Addr::Int, Args::List, Types::List, _, Ret::FFIType) => #ffiCall(Addr, Args, Types, Ret) + rule #ffiCall(true, Addr::Int, Args::List, Types::List, NFixed::Int, Ret::FFIType) => #ffiCall(Addr, Args, range(Types, 0, size(Types) -Int NFixed), range(Types, NFixed, 0), Ret) +
+

Symbol Lookup

+

The FFI module provides a mechanism to look up any function symbol and return +that function's address.

+
k
syntax Int ::= "#functionAddress" "(" String ")" [function, hook(FFI.address)] +
+

Direct Memory Management

+

Most memory used by the LLVM backend to represent terms is managed +automatically via garbage collection. However, a consequence of this is that +a particular term does not have a fixed address across its entire lifetime +in most cases. Sometimes this is undesirable, especially if you intend for +the address of the memory to be taken by the semantics or if you intend +to pass this memory directly to native code. As a result, the FFI module +exposes the following unsafe APIs for memory management. Note that use of +these APIs leaves the burden of memory management completely on the user, +and thus misuse of these functions can lead to things like use-after-free +and other memory corruption bugs.

+

Allocation

+

#alloc(Key, Size, Align) will allocate Size bytes with an alignment +requirement of Align (which must be a power of two), and return it as a +Bytes term. The memory is uniquely identified by its key and that key will +be used later to free the memory. The memory is not implicitly freed by garbage +collection; failure to call #free on the memory at a later date can lead to +memory leaks.

+
k
syntax Bytes ::= "#alloc" "(" KItem "," Int "," Int ")" [function, hook(FFI.alloc)] +
+

Addressing

+

#addess(B) will return an Int representing the address of the first byte of +B, which must be a Bytes. Unless the Bytes term was allocated by #alloc, +the return value is unspecified and may not be the same across multipl +invocations on the same byte buffer. However, it is guaranteed that memory +allocated by #alloc will have the same address throughout its lifetime.

+
k
syntax Int ::= "#address" "(" Bytes ")" [function, hook(FFI.bytes_address)] +
+

Deallocation

+

#free(Key) will free the memory of the Bytes object that was allocated +by a previous call to #alloc. If Key was not used in a previous call to +#alloc, or the memory was already freed, no action is taken. It will generate +undefined behavior if the Bytes term returned by the previous call to +#alloc is still referenced by any other term in the configuration or a +currently evaluating rule. The function returns .K.

+
k
syntax K ::= "#free" "(" KItem ")" [function, hook(FFI.free)] +
+

Reading

+

#nativeRead(Addr, Mem) will read native memory at address Addr into Mem, +reading exactly lengthBytes(Mem) bytes. This will generate undefined behavior +if Addr does not point to a readable segment of memory at least +lengthBytes(Mem) bytes long.

+
k
syntax K ::= "#nativeRead" "(" Int "," Bytes ")" [function, hook(FFI.read)] +
+

Writing

+

#nativeWrite(Addr, Mem) will write the contents of Mem to native memory at +address Addr. The memory will be read prior to being written, and a write +will only happen if the memory has a different value than the current value of +Mem. This will generate undefined behavior if Addr does not point to a +readable segment of memory at least lengthBytes(Mem) bytes long, or if the +memory at address Addr has a different value than currently contained in +Mem, and the memory in question is not writeable.

+
k
syntax K ::= "#nativeWrite" "(" Int "," Bytes ")" [function, hook(FFI.write)] +endmodule +
+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/include/kframework/builtin/json/index.html b/k-distribution/include/kframework/builtin/json/index.html new file mode 100644 index 00000000000..c65ac03e521 --- /dev/null +++ b/k-distribution/include/kframework/builtin/json/index.html @@ -0,0 +1,436 @@ + + + + + + + + + + + + + + +Syntax of JSON | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Syntax of JSON

+

K provides builtin support for reading/writing to JSON. While the JSON-SYNTAX +module is not precisely the syntax of JSON (utilizing K's syntax for strings, +integers, and floating point numbers rather than the syntax used by JSON), +you can still convert directly to/from the actual syntax of JSON using +the JSON2String and String2JSON hooks.

+
k
module JSON-SYNTAX + imports INT-SYNTAX + imports STRING-SYNTAX + imports BOOL-SYNTAX + imports FLOAT-SYNTAX + + syntax JSONs ::= List{JSON,","} [symbol(JSONs)] + syntax JSONKey ::= String + syntax JSON ::= "null" [symbol(JSONnull)] + | String | Int | Float | Bool + | JSONKey ":" JSON [symbol(JSONEntry)] + | "{" JSONs "}" [symbol(JSONObject)] + | "[" JSONs "]" [symbol(JSONList)] +endmodule +
+

Conversion between JSON and String

+

Given a string written in valid JSON, you can convert it to the JSON +sort with the String2JSON function. Assuming the user has not extended +the syntax of the JSON sort with their own constructors, any term of sort +JSON can also be converted to a String using the JSON2String function.

+
k
module JSON + imports JSON-SYNTAX + + syntax String ::= JSON2String(JSON) [function, symbol(JSON2String), hook(JSON.json2string)] + + syntax JSON ::= String2JSON(String) [function, symbol(String2JSON), hook(JSON.string2json)] +endmodule +
+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/include/kframework/builtin/kast/index.html b/k-distribution/include/kframework/builtin/kast/index.html new file mode 100644 index 00000000000..2ba387fefb3 --- /dev/null +++ b/k-distribution/include/kframework/builtin/kast/index.html @@ -0,0 +1,966 @@ + + + + + + + + + + + + + + +K Language Features | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

K Language Features

+

Defined below is a series of modules used to parse inner syntax in K (ie, the +contents of rules, configuration declarations, and contexts).

+

Much of this file exists in tight correspondence with the K implementation, and +K will not work correctly if it is altered without corresponding changes to the +source code of the K tools.

+

Users should only import a few modules from this file. In particular, this +includes SORT-K, BASIC-K, ML-SYNTAX, DEFAULT-LAYOUT, +DEFAULT-CONFIGURATION, and K-AMBIGUITIES. The remaining modules should not +be imported by the user; they are used implicitly by the implementation of K.

+

Basic K Sorts

+

The SORT-K module declares the K sort, and nothing else.

+
k
module SORT-K + syntax K [hook(K.K)] +endmodule +
+

The BASIC-K module declares the K, KItem, and KConfigVar sorts, and +imports the syntax of matching logic.

+
k
module BASIC-K + imports ML-SYNTAX + imports SORT-BOOL + syntax KItem [hook(K.KItem)] + syntax K ::= KItem + syntax KConfigVar [token] + syntax KItem ::= KConfigVar +endmodule +
+

KAST Abstract Syntax

+

Below is defined the abstract syntax of concrete terms in K, the KAST syntax. +Users should rarely if ever have to refer to this syntax; in general, it +suffices to use concrete syntax in rules, configuration declarations, contexts, +etc.

+

This syntax is used directly by the K implementation, and exists here as a +reference for the syntax of KAST, but it should not be imported directly by +the user.

+
k
module KSTRING + syntax KString ::= r"[\\\"](([^\\\"\\n\\r\\\\])|([\\\\][nrtf\\\"\\\\])|([\\\\][x][0-9a-fA-F]{2})|([\\\\][u][0-9a-fA-F]{4})|([\\\\][U][0-9a-fA-F]{8}))*[\\\"]" [token] + // optionally qualified strings, like in Scala "abc", i"abc", r"a*bc", etc. +endmodule + +module BUILTIN-ID-TOKENS + syntax #LowerId ::= r"[a-z][a-zA-Z0-9]*" [prec(2), token] + syntax #UpperId ::= r"[A-Z][a-zA-Z0-9]*" [prec(2), token] +endmodule + +module SORT-KBOTT + imports SORT-K + syntax KBott +endmodule + +module KAST + imports BASIC-K + imports SORT-KBOTT + imports KSTRING + imports BUILTIN-ID-TOKENS + + syntax KBott ::= "#token" "(" KString "," KString ")" [symbol(#KToken)] + | "#klabel" "(" KLabel ")" [symbol(#WrappedKLabel)] + | KLabel "(" KList ")" [symbol(#KApply)] + syntax KItem ::= KBott + + syntax KLabel ::= r"`(\\\\`|\\\\\\\\|[^`\\\\\\n\\r])+`" [token] + | #LowerId [token] + | r"[#a-z][a-zA-Z0-9]*" [token, prec(1)] + + syntax KList ::= K + | ".KList" [symbol(#EmptyKList)] + | KList "," KList [symbol(#KList), left, assoc, unit(#EmptyKList), prefer] +endmodule + + +// To be used when parsing/pretty-printing ground configurations +module KSEQ + imports KAST + imports K-TOP-SORT + syntax K ::= ".K" [symbol(#EmptyK)] + | "." [symbol(#EmptyK), deprecated, unparseAvoid] + syntax K ::= K "~>" K [symbol(#KSequence), left, assoc, unit(#EmptyK)] + syntax left #KSequence + syntax {Sort} Sort ::= "(" Sort ")" [bracket, group(defaultBracket), applyPriority(1)] +endmodule +
+

Syntax of Matching Logic

+

K provides direct access to the symbols of Matching Logic, while giving them +their own concrete syntax distinct from the syntax of the KORE intermediate +representation. These symbols are primarily used during symbolic execution. +The LLVM Backend has relatively little understanding of Matching Logic directly +and use of these symbols directly in rules is likely to cause it to crash. +However, these symbols are necessary when providing lemmas and other types of +logical assistance to proofs and symbolic execution in the Haskell Backend.

+

The correspondance between K symbols and KORE symbols is as follows:

+
    +
  • #Top - \top
  • +
  • #Bottom - \bottom
  • +
  • #Not - \not
  • +
  • #Ceil - \ceil
  • +
  • #Floor - \floor
  • +
  • #Equals - \equals
  • +
  • #And - \and
  • +
  • #Or - \or
  • +
  • #Implies - \implies
  • +
  • #Exists - \exists
  • +
  • #Forall - \forall
  • +
  • #AG - allPathGlobally
  • +
  • #wEF - weakExistsFinally
  • +
  • #wAF - weakAlwaysFinally
  • +
+
k
module ML-SYNTAX [not-lr1] + imports SORT-K + + syntax {Sort} Sort ::= "#Top" [symbol(#Top), group(mlUnary)] + | "#Bottom" [symbol(#Bottom), group(mlUnary)] + | "#Not" "(" Sort ")" [symbol(#Not), mlOp, group(mlUnary, mlOp)] + + syntax {Sort1, Sort2} Sort2 ::= "#Ceil" "(" Sort1 ")" [symbol(#Ceil), mlOp, group(mlUnary, mlOp)] + | "#Floor" "(" Sort1 ")" [symbol(#Floor), mlOp, group(mlUnary, mlOp)] + | "{" Sort1 "#Equals" Sort1 "}" [symbol(#Equals), mlOp, group(mlEquals, mlOp), comm, format(%1%i%n%2%d%n%3%i%n%4%d%n%5)] + + syntax priority mlUnary > mlEquals > mlAnd + + syntax {Sort} Sort ::= Sort "#And" Sort [symbol(#And), assoc, left, comm, unit(#Top), mlOp, group(mlAnd, mlOp), format(%i%1%d%n%2%n%i%3%d)] + > Sort "#Or" Sort [symbol(#Or), assoc, left, comm, unit(#Bottom), mlOp, group(mlOp), format(%i%1%d%n%2%n%i%3%d)] + > Sort "#Implies" Sort [symbol(#Implies), mlOp, group(mlImplies, mlOp), format(%i%1%d%n%2%n%i%3%d)] + + syntax priority mlImplies > mlQuantifier + + syntax {Sort1, Sort2} Sort2 ::= "#Exists" Sort1 "." Sort2 [symbol(#Exists), mlOp, mlBinder, group(mlQuantifier, mlOp)] + | "#Forall" Sort1 "." Sort2 [symbol(#Forall), mlOp, mlBinder, group(mlQuantifier, mlOp)] + + syntax {Sort} Sort ::= "#AG" "(" Sort ")" [symbol(#AG), mlOp, group(mlOp)] + | "#wEF" "(" Sort ")" [symbol(weakExistsFinally), mlOp, group(mlOp)] + | "#wAF" "(" Sort ")" [symbol(weakAlwaysFinally), mlOp, group(mlOp)] +endmodule +
+

Variables in K

+

Provided below is the syntax of variables in K. There are four types of +variables in K:

+
    +
  1. Regular variables. These are denoted by variables that begin with an +underscore or a capital letter. These variables match exactly one value +and can be used to refer to it on the right-hand-side.
  2. +
  3. Fresh constants. These are denoted by variables that begin with an !. This +is a convenience syntax which can be used on the right-hand-side only, and +refer to a unique value of the specified sort which is distinct from any +other value that has been generated or will be generated by the !X syntax. +Note that this may not be distinct from values produced via other means.
  4. +
  5. Existential variables. This refers to variables that are existentially +quantified and begin with a ?. They are not required to appear on the +left-hand-side prior to appearing on the right-hand-side, and generally +refer to symbolic quantities that are introduced during rewriting. Refer to +K's documentation for more details.
  6. +
  7. Set variables. These are denoted by variables that begin with a @. +These variables refer to a set of values and are generally used when writing +simplification rules in the Haskell Backend. For more information, refer to +K's documentation.
  8. +
+

There is also a fifth type of "variable", although it is not technically a +variable. This refers to configuration variables, which are used to insert +values into the initial configuration that come from outside the semantics. +The most common of these is the $PGM variable, which conventionally contains +the program being executed and is placed in the <k> cell in the configuration +declaration. These "variables" begin with a $ and their values are populated +by the frontend prior to symbolic or concrete execution of a program.

+
k
module KVARIABLE-SYNTAX + syntax #KVariable +endmodule + +// To be used when parsing/pretty-printing symbolic configurations +module KSEQ-SYMBOLIC + imports KSEQ + imports ML-SYNTAX + imports KVARIABLE-SYNTAX + + syntax #KVariable ::= r"(\\!|\\?|@)?([A-Z][A-Za-z0-9'_]*|_|_[A-Z][A-Za-z0-9'_]*)" [token, prec(1)] + | #UpperId [token] + syntax KConfigVar ::= r"(\\$)([A-Z][A-Za-z0-9'_]*)" [token] + syntax KBott ::= #KVariable + syntax KBott ::= KConfigVar +endmodule +
+

Syntax of Cells

+

While the backend treats cells as regular productions like any other, the +frontend provides a significant amount of convenience notation for dealing with +groups of cells, in order to make writing modular definitions easier. As a +result, we need a syntax for groups of cells and for referring to cells within +rules, configuration declarations, and functions.

+

For historical reasons, the Bag sort is used to refer to groups of cells. +This may change in a future release. Users can combine cells in any order +by concatenating them together, and can refer to the absence of any cells with +the .Bag symbol. You can also refer to cells within a function by placing +the cell context symbol, [[ K ]] at the top of a rule, placing a function +symbol inside, and referring to cells afterwards. This implicitly inserts +a reference to the configuration at the time prior to the currently-applied +rule being applied which can be matched on within the function. Functions with +such context cannot be referred to in the initial configuration, because the +prior configuration does not yet exist.

+
k
module KCELLS + imports KAST + + syntax Cell + syntax Bag ::= Bag Bag [left, assoc, symbol(#cells), unit(#cells)] + | ".Bag" [symbol(#cells)] + | ".::Bag" [symbol(#cells)] + | Cell + syntax Bag ::= "(" Bag ")" [bracket] + syntax KItem ::= Bag + syntax #RuleBody ::= "[" "[" K "]" "]" Bag [symbol(#withConfig), avoid] + syntax non-assoc #withConfig + syntax Bag ::= KBott +endmodule +
+

Users can also refer to cells in rules. When doing so, an optional ... can +be placed immediately after the start of the cell or immediately before the +end. In a cell whose contents are commutative, these are equivalent to one +another and are also equivalent to placing ... in both places. This means +that what is placed in the cell will be combined with the cell contents' +concatenation operator with an unnamed variable. In other words, you match on +some number of elements in the collection and do not care about the rest of +the collection.

+

In a cell whose contents are not commutative, the ... operators correspond +to a variable on the respective side of the contents of the cell that the +... appears. For example, <foo>... L </foo>, if L is a list, means +some number of elements followed by L. Note that not all combinations are +supported. Cells whose contents are sort K can only have ... appear at the +tail of the cell, and cells whose contents are sort List can only have ... +appear on at most one side in a single rule.

+
k
module RULE-CELLS + imports KCELLS + imports RULE-LISTS + // if this module is imported, the parser automatically + // generates, for all productions that have the attribute 'cell' or 'maincell', + // a production like below: + //syntax Cell ::= "<top>" #OptionalDots K #OptionalDots "</top>" [symbol(<top>)] + + syntax #OptionalDots ::= "..." [symbol(#dots)] + | "" [symbol(#noDots)] + + syntax Int + // this production will be added by the compiler to help handle bang variables, + // however, it is valuable to put it here because without this production, it + // will not exist at the point in time when rules and claims are parsed, and + // as a result it makes it very difficult to write proof claims over fragments + // of code that exercise rules containing bang variables. We put it here because + // this production will "vanish" after parsing finishes and not be picked up + // by the compiler, which is the behavior we want in this case since an actual + // production will be generated by the compiler later on. + syntax GeneratedCounterCell ::= "<generatedCounter>" Int "</generatedCounter>" [cell, symbol(<generatedCounter>), internal] +endmodule +
+

Users can also declare cells in a configuration declaration. This generates a +specific set of productions that is used internally to implement the cell. The +most important of these is the cell itself, and attributes on this production +can be specified in an xml-attribute-like syntax.

+

You can also use an xml-short-tag-like syntax to compose configuration cells +together which were defined in different modules. However, it is a requirement +that any K definition have at most one fully-composed configuration; thus, all +other configuration declarations must appear composed within another +configuration declaration.

+
k
module CONFIG-CELLS + imports KCELLS + imports RULE-LISTS + syntax #CellName ::= r"[a-zA-Z][a-zA-Z0-9\\-]*" [token, prec(1)] + | #LowerId [token] + | #UpperId [token] + + syntax Cell ::= "<" #CellName #CellProperties ">" K "</" #CellName ">" [symbol(#configCell)] + syntax Cell ::= "<" #CellName "/>" [symbol(#externalCell)] + + syntax #CellProperties ::= #CellProperty #CellProperties [symbol(#cellPropertyList)] + | "" [symbol(#cellPropertyListTerminator)] + syntax #CellProperty ::= #CellName "=" KString [symbol(#cellProperty)] +endmodule +
+

Syntax of Rules

+

Rules can have an optional requires clause or an ensures clause. For backwards- +compatibility, you can refer to the requires clause with both the requires +and when keywords; The latter, however, is deprecated and may be removed in +a future release.

+

The requires clause specifies the preconditions that must be true in order +for the rule to apply. The ensures clause specifies the information which +becomes true after the rule has applied. It is a requirement that information +present in the ensures clause refer to existential variables only.

+

When doing concrete execution, you can think of the requires clause as a +side-condition. In other words, even if the rule matches, it will not apply +unless the requires clause, which must be of sort Bool, evaluates to +true.

+
k
module REQUIRES-ENSURES + imports BASIC-K + + syntax #RuleBody ::= K + + syntax #RuleContent ::= #RuleBody [symbol("#ruleNoConditions")] + | #RuleBody "requires" Bool [symbol("#ruleRequires")] + | #RuleBody "ensures" Bool [symbol("#ruleEnsures")] + | #RuleBody "requires" Bool "ensures" Bool [symbol("#ruleRequiresEnsures")] +endmodule +
+

Miscellaneous modules

+

The below modules are used in various ways as indicators to the implementation +that certain automatically generated syntax should be created by the parser. +These modules should not be imported directly by the user.

+
k
module K-TOP-SORT + imports SORT-KBOTT + syntax KItem ::= KBott + syntax {Sort} KItem ::= Sort +endmodule + +module K-BOTTOM-SORT + imports SORT-KBOTT + syntax KItem ::= KBott + syntax {Sort} Sort ::= KBott +endmodule + +module K-SORT-LATTICE + imports K-TOP-SORT + imports K-BOTTOM-SORT +endmodule + +module AUTO-CASTS + // if this module is imported, the parser automatically + // generates, for all sorts, productions of the form: + // Sort ::= Sort ":Sort" // semantic cast - force the inner term to be `Sort` or a subsort + // Sort ::= Sort "::Sort" // strict cast - force the inner term to be exactly `Sort`. Useful for disambiguation + // Sort ::= "{" Sort "}" "::Sort" // synonym for strict cast + // Sort ::= "{" K "}" ":>Sort" // projection cast. Allows any term to be placed in a context that expects `Sort` + // this is part of the mechanism that allows concrete user syntax in K +endmodule + +module AUTO-FOLLOW + // if this module is imported, the parser automatically + // generates a follow restriction for every terminal which is a prefix + // of another terminal. This is useful to prevent ambiguities such as: + // syntax K ::= "a" + // syntax K ::= "b" + // syntax K ::= "ab" + // syntax K ::= K K + // #parse("ab", "K") + // In the above example, the terminal "a" is not allowed to be followed by a "b" + // because it would turn the terminal into the terminal "ab". +endmodule + +module PROGRAM-LISTS + imports SORT-K + // if this module is imported, the parser automatically + // replaces the default productions for lists: + // Es ::= E "," Es [userList("*"), symbol('_,_)] + // | ".Es" [userList("*"), symbol('.Es)] + // into a series of productions more suitable for programs: + // Es#Terminator ::= "" [symbol('.Es)] + // Ne#Es ::= E "," Ne#Es [symbol('_,_)] + // | E Es#Terminator [symbol('_,_)] + // Es ::= Ne#Es + // | Es#Terminator // if the list is * +endmodule + +module RULE-LISTS + // if this module is imported, the parser automatically + // adds the subsort production to the parsing module only: + // Es ::= E [userList("*")] + +endmodule + +module RECORD-PRODUCTIONS + // if this module is imported, prefix productions of the form + // syntax Sort ::= name(Args) + // will be able to be parsed with don't-care variables according + // to their nonterminal's names +endmodule + +module SORT-PREDICATES + // if this module is imported, the Bool sort will be annotated with + // syntax Bool ::= isSort(K) [function] + // and all sorts will be annotated with + // syntax Sort ::= project:Sort(K) [function] +endmodule +
+

Additional Syntax for K Terms in Rules

+

Certain additional features are available when parsing the contents of rules +and contexts. For more information on each of these, refer to K's +documentation.

+
k
module KREWRITE + syntax {Sort} Sort ::= Sort "=>" Sort [symbol(#KRewrite)] + syntax non-assoc #KRewrite + syntax priority #KRewrite > #withConfig +endmodule + +// To be used to parse semantic rules +module K + imports KSEQ-SYMBOLIC + imports REQUIRES-ENSURES + imports RECORD-PRODUCTIONS + imports SORT-PREDICATES + imports K-SORT-LATTICE + imports AUTO-CASTS + imports AUTO-FOLLOW + imports KREWRITE + + syntax {Sort} Sort ::= Sort "#as" Sort [symbol(#KAs)] + // functions that preserve sorts and can therefore have inner rewrites + syntax {Sort} Sort ::= "#fun" "(" Sort ")" "(" Sort ")" [symbol(#fun2), prefer] + // functions that do not preserve sort and therefore cannot have inner rewrites + syntax {Sort1, Sort2} Sort1 ::= "#fun" "(" Sort2 "=>" Sort1 ")" "(" Sort2 ")" [symbol(#fun3)] + + syntax {Sort1, Sort2} Sort1 ::= "#let" Sort2 "=" Sort2 "#in" Sort1 [symbol(#let)] + + /*@ Set membership over terms. In addition to equality over + concrete patterns, K also supports computing equality + between a concrete pattern and a symbolic pattern. + This is compiled efficiently down to pattern matching, + and can be used by putting a term with unbound variables + in the left child of :=K or =/=K. Note that this does not + bind variables used on the lhs however (although this may + change in the future).*/ + + syntax Bool ::= left: + K ":=K" K [function, total, symbol(_:=K_), group(equalEqualK)] + | K ":/=K" K [function, total, symbol(_:/=K_), group(notEqualEqualK)] +endmodule + +// To be used to parse terms in full K +module K-TERM + imports KSEQ-SYMBOLIC + imports RECORD-PRODUCTIONS + imports SORT-PREDICATES + imports K-SORT-LATTICE + imports AUTO-CASTS + imports AUTO-FOLLOW + imports KREWRITE +endmodule +
+

Layout Information

+

When constructing a scanner for use during parsing, often you wish to ignore +certain types of text, such as whitespace and comments. However, the specific +syntax which each language must ignore is a little different from language +to language, and thus you wish to specify it manually. You can do this by +defining productions of the #Layout sort. For more information, refer to +K's documentation. However, this module will be implicitly imported if no +productions are declared of sort #Layout. This module will also be used +for the purposes of parsing K rules. If you wish to declare a language with +no layout productions, simply create a sort declaration for the #Layout sort +in your code (e.g. syntax #Layout).

+
k
module DEFAULT-LAYOUT + syntax #Layout ::= r"(\\/\\*([^\\*]|(\\*+([^\\*\\/])))*\\*+\\/)" // C-style multi-line comments + | r"(\\/\\/[^\\n\\r]*)" // C-style single-line comments + | r"([\\ \\n\\r\\t])" // Whitespace +endmodule +
+

Default Configuration

+

If the user has no configuration declaration in their seamantics, the below +configuration declaration will be implicitly imported.

+
k
module DEFAULT-CONFIGURATION + imports BASIC-K + + configuration <k> $PGM:K </k> +endmodule +
+

Parsing Ambiguous Languages

+

On occasion, it may be desirable to parse a language with an ambiguous grammar +when parsing a program, and perform additional semantic analysis at a later +time in order to resolve the ambiguities. A good example of this is as a +substitute for the lexer hack in parsers of the C programming language.

+

The following module contains a declaration for ambiguities in K. Usually, +an ambiguous parse is an error. However, when you use the --gen-glr-parser +flag to kast, or the --gen-glr-bison-parser flag to kompile, ambiguities +instead become instances of the below parametric production, which you can use +regular K rules to disambiguate as necessary.

+
k
module K-AMBIGUITIES + + syntax {Sort} Sort ::= amb(Sort, Sort) [symbol(amb)] + +endmodule +
+

Annotating Parses with Locations

+

Another feature of K's Bison parser is the ability to annotate terms parsed +with location information about the file and line where they occurred. For +more information about how to use this, refer to K's documentation. However, +the below module exists to provide a user syntax for the annotations that +are generated by the parser.

+
k
module K-LOCATIONS + imports STRING-SYNTAX + imports INT-SYNTAX + + // filename, startLine, startCol, endLine, endCol + syntax {Sort} Sort ::= #location(Sort, String, Int, Int, Int, Int) [symbol(#location), format(%3)] + +endmodule +
+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/include/kframework/builtin/prelude/index.html b/k-distribution/include/kframework/builtin/prelude/index.html new file mode 100644 index 00000000000..afe317a4075 --- /dev/null +++ b/k-distribution/include/kframework/builtin/prelude/index.html @@ -0,0 +1,407 @@ + + + + + + + + + + + + + + +K Prelude | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

K Prelude

+

The following files, integral to defining semantics in K, are automatically +required by every definition via this file. This behavior can be disabled +via kompile --no-prelude, however, semantics will likely break unless +they provide their own versions of these files, which are assumed to exist +by the compiler. There are, however, circumstances where passing this flag is +appropriate, such as if you are manually requiring these files in your +definition, if your definition was automatically condensed into a single file +with kompile -E, or if you wish to modify the inner syntax of K by providing +your own version of these files with different syntax.

+
k
requires "kast.md" +requires "domains.md" +
+
+
+ + +
+ + + +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/include/kframework/builtin/rat/index.html b/k-distribution/include/kframework/builtin/rat/index.html new file mode 100644 index 00000000000..4ac39074159 --- /dev/null +++ b/k-distribution/include/kframework/builtin/rat/index.html @@ -0,0 +1,628 @@ + + + + + + + + + + + + + + +Rational Numbers in K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Rational Numbers in K

+

K provides support for arbitrary-precision rational numbers represented as a +quotient between two integers. The sort representing these values is Rat. +Int is a subsort of Rat, and it is guaranteed that any integer will be +represented as an Int and can be matched as such on the left hand side +of rules. K also supports the usual arithmetic operators over rational numbers.

+
k
module RAT-SYNTAX + imports INT-SYNTAX + imports private BOOL + + syntax Rat + + syntax Rat ::= Int +
+

Arithmetic

+

You can:

+
    +
  • Raise a rational number to any negative or nonnegative integer.
  • +
  • Multiply or divide two rational numbers to obtain a product or quotient.
  • +
  • Add or subtract two rational numbers to obtain a sum or difference.
  • +
+
k
syntax Rat ::= left: + Rat "^Rat" Int [function, total, symbol(_^Rat_), smtlib(ratpow), hook(RAT.pow)] + > left: + Rat "*Rat" Rat [function, total, symbol(_*Rat_), left, smtlib(ratmul), hook(RAT.mul)] + | Rat "/Rat" Rat [function, symbol(_/Rat_), left, smtlib(ratdiv), hook(RAT.div)] + > left: + Rat "+Rat" Rat [function, total, symbol(_+Rat_), left, smtlib(ratadd), hook(RAT.add)] + | Rat "-Rat" Rat [function, total, symbol(_-Rat_), left, smtlib(ratsub), hook(RAT.sub)] +
+

Comparison

+

You can determine whether two rational numbers are equal, unequal, or compare +one of less than, less than or equalto, greater than, or greater than or equal +to the other:

+
k
syntax Bool ::= Rat "==Rat" Rat [function, total, symbol(_==Rat_), smtlib(rateq), hook(RAT.eq)] + | Rat "=/=Rat" Rat [function, total, symbol(_=/=Rat_), smtlib(ratne), hook(RAT.ne)] + | Rat ">Rat" Rat [function, total, symbol(_>Rat_), smtlib(ratgt), hook(RAT.gt)] + | Rat ">=Rat" Rat [function, total, symbol(_>=Rat_), smtlib(ratge), hook(RAT.ge)] + | Rat "<Rat" Rat [function, total, symbol(_<Rat_), smtlib(ratlt), hook(RAT.lt)] + | Rat "<=Rat" Rat [function, total, symbol(_<=Rat_), smtlib(ratle), hook(RAT.le)] +
+

Min/Max

+

You can compute the minimum and maximum of two rational numbers:

+
k
syntax Rat ::= minRat(Rat, Rat) [function, total, symbol(minRat), smtlib(ratmin), hook(RAT.min)] + | maxRat(Rat, Rat) [function, total, symbol(maxRat), smtlib(ratmax), hook(RAT.max)] +
+

Conversion to Floating Point

+

You can convert a rational number to the nearest floating point number that +is representable in a Float of a specified number of precision and exponent +bits:

+
k
syntax Float ::= Rat2Float(Rat, precision: Int, exponentBits: Int) [function] +endmodule +
+

Implementation of Rational Numbers

+

The remainder of this file consists of an implementation in K of the +operations listed above. Users of the RAT module should not use any of the +syntax defined in any of these modules.

+

As a point of reference for users, it is worth noting that rational numbers +are normalized to a canonical form by this module,. with the canonical form +bearing the property that it is either an Int, or a pair of integers +I /Rat J such that +I =/=Int 0 andBool J >=Int 2 andBool gcdInt(I, J) ==Int 1 is always true.

+
k
module RAT-COMMON + imports RAT-SYNTAX + + // invariant of < I , J >Rat : I =/= 0, J >= 2, and I and J are coprime + syntax Rat ::= "<" Int "," Int ">Rat" [format(%2 /Rat %4)] +endmodule + +module RAT-SYMBOLIC [symbolic] + imports private RAT-COMMON + imports ML-SYNTAX + imports private BOOL + + rule + #Ceil(@R1:Rat /Rat @R2:Rat) + => + {(@R2 =/=Rat 0) #Equals true} #And #Ceil(@R1) #And #Ceil(@R2) + [simplification] +endmodule + +module RAT-KORE + imports private RAT-COMMON + imports private K-EQUAL + + /* + * equalities + */ + + // NOTE: the two rules below may not work correctly in non-kore backends + + rule R ==Rat S => R ==K S + + rule R =/=Rat S => R =/=K S +endmodule + +module RAT [private] + imports private RAT-COMMON + imports public RAT-SYMBOLIC + imports public RAT-KORE + imports public RAT-SYNTAX + imports private INT + imports private BOOL + + /* + * arithmetic + */ + + rule < I , I' >Rat +Rat < J , J' >Rat => ((I *Int J') +Int (I' *Int J)) /Rat (I' *Int J') + rule I:Int +Rat < J , J' >Rat => ((I *Int J') +Int J) /Rat J' + rule < J , J' >Rat +Rat I:Int => I +Rat < J , J' >Rat + rule I:Int +Rat J:Int => I +Int J + + rule < I , I' >Rat *Rat < J , J' >Rat => (I *Int J) /Rat (I' *Int J') + rule I:Int *Rat < J , J' >Rat => (I *Int J) /Rat J' + rule < J , J' >Rat *Rat I:Int => I *Rat < J , J' >Rat + rule I:Int *Rat J:Int => I *Int J + + rule < I , I' >Rat /Rat < J , J' >Rat => (I *Int J') /Rat (I' *Int J) + rule I:Int /Rat < J , J' >Rat => (I *Int J') /Rat J + rule < I , I' >Rat /Rat J:Int => I /Rat (I' *Int J) requires J =/=Int 0 + rule I:Int /Rat J:Int => makeRat(I, J) requires J =/=Int 0 + + // derived + + rule R -Rat S => R +Rat (-1 *Rat S) + + // normalize + + syntax Rat ::= makeRat(Int, Int) [function] + | makeRat(Int, Int, Int) [function] + + rule makeRat(0, J) => 0 requires J =/=Int 0 + + rule makeRat(I, J) => makeRat(I, J, gcdInt(I,J)) requires I =/=Int 0 andBool J =/=Int 0 + + // makeRat(I, J, D) is defined when I =/= 0, J =/= 0, D > 0, and D = gcd(I,J) + rule makeRat(I, J, D) => I /Int D requires J ==Int D // implies J > 0 since D > 0 + rule makeRat(I, J, D) => < I /Int D , J /Int D >Rat requires J >Int 0 andBool J =/=Int D + rule makeRat(I, J, D) => makeRat(0 -Int I, 0 -Int J, D) requires J <Int 0 + + // gcdInt(a,b) computes the gcd of |a| and |b|, which is positive. + syntax Int ::= gcdInt(Int, Int) [function, public] + + rule gcdInt(A, 0) => A requires A >Int 0 + rule gcdInt(A, 0) => 0 -Int A requires A <Int 0 + rule gcdInt(A, B) => gcdInt(B, A %Int B) requires B =/=Int 0 // since |A %Int B| = |A| %Int |B| + + /* + * exponentiation + */ + + rule _ ^Rat 0 => 1 + rule 0 ^Rat N => 0 requires N =/=Int 0 + + rule < I , J >Rat ^Rat N => powRat(< I , J >Rat, N) requires N >Int 0 + rule X:Int ^Rat N => X ^Int N requires N >Int 0 + + rule X ^Rat N => (1 /Rat X) ^Rat (0 -Int N) requires X =/=Rat 0 andBool N <Int 0 + + // exponentiation by squaring + + syntax Rat ::= powRat(Rat, Int) [function] + + // powRat(X, N) is defined when X =/= 0 and N > 0 + rule powRat(X, 1) => X + rule powRat(X, N) => powRat(X *Rat X, N /Int 2) requires N >Int 1 andBool N %Int 2 ==Int 0 + rule powRat(X, N) => powRat(X, N -Int 1) *Rat X requires N >Int 1 andBool N %Int 2 =/=Int 0 + + /* + * inequalities + */ + + rule R >Rat S => R -Rat S >Rat 0 requires S =/=Rat 0 + + rule < I , _ >Rat >Rat 0 => I >Int 0 + rule I:Int >Rat 0 => I >Int 0 + + // derived + + rule R >=Rat S => notBool R <Rat S + + rule R <Rat S => S >Rat R + + rule R <=Rat S => S >=Rat R + + rule minRat(R, S) => R requires R <=Rat S + rule minRat(R, S) => S requires S <=Rat R + + rule maxRat(R, S) => R requires R >=Rat S + rule maxRat(R, S) => S requires S >=Rat R + + syntax Float ::= #Rat2Float(Int, Int, Int, Int) [function, hook(FLOAT.rat2float)] + rule Rat2Float(Num:Int, Prec:Int, Exp:Int) => #Rat2Float(Num, 1, Prec, Exp) + rule Rat2Float(< Num, Dem >Rat, Prec, Exp) => #Rat2Float(Num, Dem, Prec, Exp) + +endmodule +
+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/include/kframework/builtin/substitution/index.html b/k-distribution/include/kframework/builtin/substitution/index.html new file mode 100644 index 00000000000..e74b2457afc --- /dev/null +++ b/k-distribution/include/kframework/builtin/substitution/index.html @@ -0,0 +1,492 @@ + + + + + + + + + + + + + + +Capture-Aware Substitution in K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Capture-Aware Substitution in K

+

One of the traditional ways in which functional languages are given operational +semantics is via substitution. In particular, you can view a function as +declaring a particular bound variable, the parameter of the function, as well +as the body of the function, within which both bound and free variables can +occur, and implement the process of beta-reduction (one of the axioms of the +lambda calculus) by means of a substitution operator which is aware of the +difference between free variables and bound variables and prevents variable +capture.

+

In K this is implemented using two mechanisms: The KVar sort, and the +binder attribute.

+

The KVar Sort

+

K introduces a new hooked sort, KVar, which the substitution operator +(defined below) understands in a particular way. The syntax of KVar is the +same as for sort Id in DOMAINS, but with a different sort name. Similarly, +some of the same operators are defined over KVar which are defined for Id, +such as conversion from String to KVar and support for the !Var:KVar +syntax.

+

A KVar is simply an identifier with special meaning during substitution. +KVars must begin with a letter or underscore, +and can be followed by zero or more letters, numbers, or underscores.

+
k
module KVAR-SYNTAX-PROGRAM-PARSING + imports BUILTIN-ID-TOKENS + + syntax KVar ::= r"[A-Za-z\\_][A-Za-z0-9\\_]*" [prec(1), token] + | #LowerId [token] + | #UpperId [token] +endmodule + +module KVAR-SYNTAX + syntax KVar [token, hook(KVAR.KVar)] +endmodule + +module KVAR-COMMON + imports KVAR-SYNTAX + imports private STRING + + syntax KVar ::= String2KVar (String) [function, total, hook(STRING.string2token)] + syntax KVar ::= freshKVar(Int) [freshGenerator, function, total, private] + + rule freshKVar(I:Int) => String2KVar("_" +String Int2String(I)) +endmodule + +module KVAR + imports KVAR-COMMON +endmodule +
+

The binder Attribute

+

A production can be given the attribute binder. Such a production must have +at least two nonterminals. The first nonterminal from left to right must be of +sort KVar, and contains the bound variable. The last nonterminal from left +to right contains the term that is bound. For example, I could describe lambdas +in the lambda calculus with the production +syntax Val ::= "lambda" KVar "." Exp [binder].

+

Substitution

+

K provides a hooked implementation of substitution, currently only implemented +on the Java and LLVM backends. Two variants exist: the first substitutes +a single KVar for a single KItem. The second takes a Map with KVar +keys and KItem values, and substitutes each element in the map atomically.

+

Internally, this is implemented in the LLVM backend by a combination of +de Bruijn indices for bound variables and names for free variables. Free +variables are also sometimes given a unique numeric identifier in order to +prevent capture, and the rewriter will automatically assign unique names to +such identifiers when rewriting finishes. The names assigned will always begin +with the original name of the variable and be followed by a unique integer +suffix. However, the names assigned after rewriting finishes might be different +from the names that would be assigned if rewriting were to halt prematurely, +for example due to krun --depth.

+
k
module SUBSTITUTION + imports private MAP + imports KVAR + + syntax {Sort} Sort ::= Sort "[" KItem "/" KItem "]" [function, hook(SUBSTITUTION.substOne), impure] + syntax {Sort} Sort ::= Sort "[" Map "]" [function, hook(SUBSTITUTION.substMany), impure] +endmodule +
+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/include/kframework/builtin/timer/index.html b/k-distribution/include/kframework/builtin/timer/index.html new file mode 100644 index 00000000000..cee1412d9c0 --- /dev/null +++ b/k-distribution/include/kframework/builtin/timer/index.html @@ -0,0 +1,389 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+
k
module TIMER + syntax K ::= "timerStart" "(" ")" [function, hook(TIMER.timerStart)] + + syntax K ::= "timerStop" "(" ")" [function, hook(TIMER.timerStop)] +endmodule +
+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/include/kframework/index.html b/k-distribution/include/kframework/index.html new file mode 100644 index 00000000000..2e88a5559de --- /dev/null +++ b/k-distribution/include/kframework/index.html @@ -0,0 +1,413 @@ + + + + + + + + + + + + + + +K Builtins | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

K Builtins

+

The K Builtins (also referred to as the K Prelude or the K Standard Library) +consists of several files which contain definitions that make working with K +simpler. These files can be found under include/kframework/builtin in your K +installation directory, and can be imported with requires "FILENAME" (without +the path prefix).

+
    +
  • domains: Basic datatypes which are universally useful.
  • +
  • kast: Representation of K internal data-structures (not to be +included in normal definitions).
  • +
  • prelude: Automatically included into every K definition.
  • +
  • ffi: FFI interface for calling out to native C code from K.
  • +
  • json: JSON datatype and parsers/unparsers for JSON strings.
  • +
  • rat: Rational number representation.
  • +
  • substitution: Hooked implementation of capture-aware +sustitution for K definitions.
  • +
  • unification: Hooked implementation of unification +exposed directly to K definitions.
  • +
+
+
+ + +
+ + + +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/1_basic/01_installing/index.html b/k-distribution/k-tutorial/1_basic/01_installing/index.html new file mode 100644 index 00000000000..3e9a4eef2f1 --- /dev/null +++ b/k-distribution/k-tutorial/1_basic/01_installing/index.html @@ -0,0 +1,481 @@ + + + + + + + + + + + + + + +Lesson 1.1: Setting up a K Environment | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 1.1: Setting up a K Environment

+

The first step to learning K is to install K on your system, and configure your +editor for K development.

+

Installing K

+

You have two options for how to install K, depending on how you intend to +interact with the K codebase. If you are solely a user of K, and have no +interest in developing or making changes to K, you most likely will want to +install one of our binary releases of K. However, if you are going to be a K +developer, or simply want to build K from source, you should follow the +instructions for a source build of K.

+

Installing K from a binary release

+

K is developed as a rolling release, with each change to K that passes our +CI infrastructure being deployed on GitHub for download. The latest release of +K can be downloaded here. +This page also contains information on how to install K. It is recommended +that you fully uninstall the old version of K prior to installing the new one, +as K does not maintain entries in package manager databases, with the exception +of Homebrew on MacOS.

+

Installing K from source

+

You can clone K from GitHub with the following Git command:

+
git clone https://github.com/runtimeverification/k --recursive
+
+

Instructions on how to build K from source can be found +here.

+

Configuring your editor

+

K maintains a set of scripts for a variety of text editors, including vim and +emacs, in various states of maintenance. You can download these scripts with +the following Git command:

+
git clone https://github.com/kframework/k-editor-support
+
+

Because K allows users to define their own grammars for parsing K itself, +not all features of K can be effectively highlighted. However, at the cost of +occasionally highlighting things incorrectly, you can get some pretty good +results in many cases. With that being said, some of the editor scripts in the +above repository are pretty out of date. If you manage to improve them, we +welcome pull requests into the repository.

+

Troubleshooting

+

If you have problems installing K, we encourage you to reach out to us. If you +follow the above install instructions and run into a problem, you can +Create a bug report on GitHub

+

Next lesson

+

Once you have set up K on your system to your satisfaction, you can continue to +Lesson 1.2: Basics of Functional K.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/1_basic/02_basics/index.html b/k-distribution/k-tutorial/1_basic/02_basics/index.html new file mode 100644 index 00000000000..71a10d0bd08 --- /dev/null +++ b/k-distribution/k-tutorial/1_basic/02_basics/index.html @@ -0,0 +1,620 @@ + + + + + + + + + + + + + + +Lesson 1.2: Basics of Functional K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 1.2: Basics of Functional K

+

The purpose of this lesson is to explain the basics of productions and +rules in K. These are two types of K sentences. A K file consists of +one or more requires or modules in K. Each module consists of one or +more imports or sentences. For more information on requires, modules, and +sentences, refer to Lesson 1.5. However, for the time +being, just think of a module as a container for sentences, and don't worry +about requires or imports just yet.

+

Our first K program

+

To start with, input the following program into your editor as file +lesson-02-a.k:

+
k
module LESSON-02-A + + syntax Color ::= Yellow() | Blue() + syntax Fruit ::= Banana() | Blueberry() + syntax Color ::= colorOf(Fruit) [function] + + rule colorOf(Banana()) => Yellow() + rule colorOf(Blueberry()) => Blue() + +endmodule +
+

Save this file and then run:

+
kompile lesson-02-a.k
+
+

kompile is K's compiler. By default, it takes a program or specification +written in K and compiles it into an interpreter for that input. Right now we +are compiling a single file. A set of K files that are compiled together are +called a K definition. We will cover multiple file K definitions later on. +kompile will output a directory containing everything needed to execute +programs and perform proofs using that definition. In this case, kompile will +(by default) create the directory lesson-02-a-kompiled under the current +directory.

+

Now, save the following input file in your editor as banana.color in the same +directory as lesson-02-a.k:

+
colorOf(Banana())
+
+

We can now evaluate this K term by running (from the same directory):

+
krun banana.color
+
+

krun will use the interpreter generated by the first call to kompile to +execute this program.

+

You will get the following output:

+
<k>
+  Yellow ( ) ~> .
+</k>
+
+

For now, don't worry about the <k>, </k>, or ~> . portions of this +output file.

+

You can also execute small programs directly by specifying them on the command +line instead of putting them in a file. For example, the same program above +could also have been executed by running the following command:

+
krun -cPGM='colorOf(Banana())'
+
+

Now, let's look at what this definition and program did.

+

Productions, Constructors, and Functions

+

The first thing to realize is that this K definition contains 5 productions. +Productions are introduced with the syntax keyword, followed by a sort, +followed by the operator ::= followed by the definition of one or more +productions themselves, separated by the | operator. There are different +types of productions, but for now we only care about constructors and +functions. Each declaration separated by the | operator is individually +a single production, and the | symbol simply groups together productions that +have the same sort. For example, we could equally have written an identical K +definition lesson-02-b.k like so:

+
k
module LESSON-02-B + + syntax Color ::= Yellow() + syntax Color ::= Blue() + syntax Fruit ::= Banana() + syntax Fruit ::= Blueberry() + syntax Color ::= colorOf(Fruit) [function] + + rule colorOf(Banana()) => Yellow() + rule colorOf(Blueberry()) => Blue() + +endmodule +
+

You can try compiling and running lesson-02-b.k to see that it produces the same output as lesson-02-a.k:

+
kompile lesson-02-b.k
+krun -cPGM='colorOf(Banana())' --definition 'lesson-02-b-kompiled'
+
+

where the --definition attribute points to the directory containing a compiled version of LESSON-02-B. +Even the following definition is equivalent:

+
k
module LESSON-02-C + + syntax Color ::= Yellow() + | Blue() + | colorOf(Fruit) [function] + syntax Fruit ::= Banana() + | Blueberry() + + rule colorOf(Banana()) => Yellow() + rule colorOf(Blueberry()) => Blue() + +endmodule +
+

Each of these types of productions named above has the same underlying syntax, +but context and attributes are used to distinguish between the different +types. Tokens, brackets, lists, macros, aliases, and anywhere productions will +be covered in a later lesson, but this lesson does introduce us to constructors +and functions. Yellow(), Blue(), Banana(), and Blueberry() are +constructors. You can think of a constructor like a constructor for an +algebraic data type, if you're familiar with a functional language. The data +type itself is the sort that appears on the left of the ::= operator. Sorts +in K consist of uppercase identifiers.

+

Constructors can have arguments, but these ones do not. We will cover the +syntax of productions in detail in the next lesson, but for now, you can write +a production with no arguments as an uppercase or lowercase identifier followed +by the () operator.

+

A function is distinguished from a constructor by the presence of the +function attribute. Attributes appear in a comma separated list between +square brackets after any sentence, including both productions and rules. +Various attributes with built-in meanings exist in K and will be discussed +throughout the tutorial.

+

Exercise

+

Use krun to compute the return value of the colorOf function on a +Blueberry().

+

Rules, Matching, and Variables

+

Functions in K are given definitions using rules. A rule begins with the rule +keyword and contains at least one rewrite operator. The rewrite operator +is represented by the syntax =>. The rewrite operator is one of the built-in +productions in K, and we will discuss in more detail how it can be used in +future lessons, but for now, you can think of a rule as consisting of a +left-hand side and a right-hand side, separated by the rewrite +operator. On the left-hand side is the name of the function and zero or more +patterns corresponding to the parameters of the function. On the right-hand +side is another pattern. The meaning of the rule is relatively simple, having +defined these components. If the function is called with arguments that +match the patterns on the left-hand side, then the return value of the +function is the pattern on the right-hand side.

+

For example, in the above example, if the argument of the colorOf function +is Banana(), then the return value of the function is Yellow().

+

So far we have introduced that a constructor is a type of pattern in K. We +will introduce more complex patterns in later lessons, but there is one other +type of basic pattern: the variable. A variable, syntactically, consists +of an uppercase identifier. However, unlike a constructor, a variable will +match any pattern with one exception: Two variables with the same name +must match the same pattern.

+

Here is a more complex example (lesson-02-d.k):

+
k
module LESSON-02-D + + syntax Container ::= Jar(Fruit) + syntax Fruit ::= Apple() | Pear() + + syntax Fruit ::= contentsOfJar(Container) [function] + + rule contentsOfJar(Jar(F)) => F + +endmodule +
+

Here we see that Jar is a constructor with a single argument. You can write a +production with multiple arguments by putting the sorts of the arguments in a +comma-separated list inside the parentheses.

+

In this example, F is a variable. It will match either Apple() or Pear(). +The return value of the function is created by substituting the matched +values of all of the variables into the variables on the right-hand side of +the rule.

+

To demonstrate, compile this definition and execute the following program with +krun:

+
contentsOfJar(Jar(Apple()))
+
+

You will see when you run it that the program returns Apple(), because that +is the pattern that was matched by F.

+

Exercises

+
    +
  1. Extend the definition in lesson-02-a.k with the addition of blackberries +and kiwis. For simplicity, blackberries are black and kiwis are green. Then +compile your definition and test that your additional fruits are correctly +handled by the colorOf function.
  2. +
  3. Create a new definition which defines an outfit as a multi-argument +constructor consisting of a hat, shirt, pants, and shoes. Define a new sort, +Boolean, with two constructors, true and false. Each of hat, shirt, pants, +and shoes will have a single argument (a color), either black or +white. Then define an outfitMatching function that will return true if all +the pieces of the outfit are the same color. You do not need to define the +case that returns false. Write some tests that your function behaves the way +you expect.
  4. +
+

Next lesson

+

Once you have completed the above exercises, you can continue to +Lesson 1.3: BNF Syntax and Parser Generation.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/1_basic/03_parsing/index.html b/k-distribution/k-tutorial/1_basic/03_parsing/index.html new file mode 100644 index 00000000000..316afd6fc77 --- /dev/null +++ b/k-distribution/k-tutorial/1_basic/03_parsing/index.html @@ -0,0 +1,788 @@ + + + + + + + + + + + + + + +Lesson 1.3: BNF Syntax and Parser Generation | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 1.3: BNF Syntax and Parser Generation

+

The purpose of this lesson is to explain the full syntax and semantics of +productions in K as well as how productions and other syntactic +sentences can be used to define grammars for use parsing both rules as well +as programs.

+

K's approach to parsing

+

K's grammar is divided into two components: the outer syntax of K and the +inner syntax of K. Outer syntax refers to the parsing of requires, +modules, imports, and sentences in a K definition. Inner syntax +refers to the parsing of rules and programs. Unlike the outer syntax of +K, which is predetermined, much of the inner syntax of K is defined by you, the +developer. When rules or programs are parsed, they are parsed within the +context of a module. Rules are parsed in the context of the module in which +they exist, whereas programs are parsed in the context of the +main syntax module of a K definition. The productions and other syntactic +sentences in a module are used to construct the grammar of the module, which +is then used to perform parsing.

+

Basic BNF productions

+

To illustrate how this works, we will consider a simple K definition which +defines a relatively basic calculator capable of evaluating Boolean expressions +containing and, or, not, and xor.

+

Input the following program into your editor as file lesson-03-a.k:

+
k
module LESSON-03-A + + syntax Boolean ::= "true" | "false" + | "!" Boolean [function] + | Boolean "&&" Boolean [function] + | Boolean "^" Boolean [function] + | Boolean "||" Boolean [function] + +endmodule +
+

You will notice that the productions in this file look a little different than +the ones from the previous lesson. In point of fact, K has two different +mechanisms for defining productions. We have previously been focused +exclusively on the first mechanism, where the ::= symbol is followed by an +alphanumeric identifier followed by a comma-separated list of sorts in +parentheses. However, this is merely a special case of a more generic mechanism +for defining the syntax of productions using a variant of +BNF Form.

+

For example, in the previous lesson, we had the following set of productions:

+
k
module LESSON-03-B + syntax Color ::= Yellow() | Blue() + syntax Fruit ::= Banana() | Blueberry() + syntax Color ::= colorOf(Fruit) [function] +endmodule +
+

It turns out that this is equivalent to the following definition which defines +the same grammar, but using BNF notation:

+
k
module LESSON-03-C + syntax Color ::= "Yellow" "(" ")" | "Blue" "(" ")" + syntax Fruit ::= "Banana" "(" ")" | "Blueberrry" "(" ")" + syntax Color ::= "colorOf" "(" Fruit ")" [function] +endmodule +
+

In this example, the sorts of the argument to the function are unchanged, but +everything else has been wrapped in double quotation marks. This is because +in BNF notation, we distinguish between two types of production items: +terminals and non-terminals. A terminal represents simply a literal +string of characters that is verbatim part of the syntax of that production. +A non-terminal, conversely, represents a sort name, where the syntax of that +production accepts any valid term of that sort at that position.

+

This is why, when we wrote the program colorOf(Banana()), krun was able to +execute that program: because it represented a term of sort Color that was +parsed and interpreted by K's interpreter. In other words, krun parses and +interprets terms according to the grammar defined by the developer. It is +automatically converted into an AST of that term, and then the colorOf +function is evaluated using the function rules provided in the definition.

+

You can ask yourself: How does K match the strings between the double quotes? +The answer is that K uses Flex to generate a scanner for the grammar. Flex looks +for the longest possible match of a regular expression in the input. If there +are ambiguities between 2 or more regular expressions, it will pick the one with +the highest prec attribute. You can learn more about how Flex matching works +here.

+

Bringing us back to the file lesson-03-a.k, we can see that this grammar +has given a simple BNF grammar for expressions over Booleans. We have defined +constructors corresponding to the Boolean values true and false, and functions +corresponding to the Boolean operators for and, or, not, and xor. We have also +given a syntax for each of these functions based on their syntax in the C +programming language. As such, we can now write programs in the simple language +we have defined.

+

Input the following program into your editor as and.bool in the same +directory:

+
true && false
+
+

We cannot interpret this program yet, because we have not given rules defining +the meaning of the && function yet, but we can parse it. To do this, you can +run (from the same directory):

+
kast --output kore and.bool
+
+

kast is K's just-in-time parser. It will generate a grammar from your K +definition on the fly and use it to parse the program passed on the command +line. The --output flag controls how the resulting AST is represented; don't +worry about the possible values yet, just use kore.

+

You ought to get the following AST printed on standard output, minus the +formatting:

+
inj{SortBoolean{}, SortKItem{}}(
+  Lbl'UndsAnd-And-UndsUnds'LESSON-03-A'Unds'Boolean'Unds'Boolean'Unds'Boolean{}(
+    Lbltrue'Unds'LESSON-03-A'Unds'Boolean{}(),
+    Lblfalse'Unds'LESSON-03-A'Unds'Boolean{}()
+  )
+)
+
+

Don't worry about what exactly this means yet, just understand that it +represents the AST of the program that you just parsed. You ought to be able +to recognize the basic shape of it by seeing the words true, false, and +And in there. This is Kore, the intermediate representation of K, and we +will cover it in detail later.

+

Note that you can also tell kast to print the AST in other formats. For a +more direct representation of the original K, while still maintaining the +structure of an AST, you can say kast --output kast and.bool. This will +yield the following output:

+
`_&&__LESSON-03-A_Boolean_Boolean_Boolean`(
+  `true_LESSON-03-A_Boolean`(.KList),
+  `false_LESSON-03-A_Boolean`(.KList)
+)
+
+

Note how the first output is largely a name-mangled version of the second +output. The one difference is the presence of the inj symbol in the KORE +output. We will talk more about this in later lessons.

+

Exercise

+

Parse the expression false || true with --output kast. See if you can +predict approximately what the corresponding output would be with +--output kore, then run the command yourself and compare it to your +prediction.

+

Ambiguities

+

Now let's try a slightly more advanced example. Input the following program +into your editor as and-or.bool:

+
true && false || false
+
+

When you try and parse this program, you ought to see the following error:

+
[Error] Inner Parser: Parsing ambiguity.
+1: syntax Boolean ::= Boolean "||" Boolean [function]
+
+`_||__LESSON-03-A_Boolean_Boolean_Boolean`(`_&&__LESSON-03-A_Boolean_Boolean_Boolean`(`true_LESSON-03-A_Boolean`(.KList),`false_LESSON-03-A_Boolean`(.KList)),`false_LESSON-03-A_Boolean`(.KList))
+2: syntax Boolean ::= Boolean "&&" Boolean [function]
+
+`_&&__LESSON-03-A_Boolean_Boolean_Boolean`(`true_LESSON-03-A_Boolean`(.KList),`_||__LESSON-03-A_Boolean_Boolean_Boolean`(`false_LESSON-03-A_Boolean`(.KList),`false_LESSON-03-A_Boolean`(.KList)))
+        Source(./and-or.bool)
+        Location(1,1,1,23)
+
+

This error is saying that kast was unable to parse this program because it is +ambiguous. K's just-in-time parser is a GLL parser, which means it can handle +the full generality of context-free grammars, including those grammars which +are ambiguous. An ambiguous grammar is one where the same string can be parsed +as multiple distinct ASTs. In this example, it can't decide whether it should +be parsed as (true && false) || false or as true && (false || false). As a +result, it reports the error to the user.

+

Brackets

+

Currently there is no way of resolving this ambiguity, making it impossible +to write complex expressions in this language. This is obviously a problem. +The standard solution in most programming languages to this problem is to +use parentheses to indicate the appropriate grouping. K generalizes this notion +into a type of production called a bracket. A bracket production in K +is any production with the bracket attribute. It is required that such a +production only have a single non-terminal, and the sort of the production +must equal the sort of that non-terminal. However, K does not otherwise +impose restrictions on the grammar the user provides for a bracket. With that +being said, the most common type of bracket is one in which a non-terminal +is surrounded by terminals representing some type of bracket such as +(), [], {}, <>, etc. For example, we can define the most common +type of bracket, the type used by the vast majority of programming languages, +quite simply.

+

Consider the following modified definition, which we will save to +lesson-03-d.k:

+
k
module LESSON-03-D + + syntax Boolean ::= "true" | "false" + | "(" Boolean ")" [bracket] + | "!" Boolean [function] + | Boolean "&&" Boolean [function] + | Boolean "^" Boolean [function] + | Boolean "||" Boolean [function] + +endmodule +
+

In this definition, if the user does not explicitly define parentheses, the +grammar remains ambiguous and K's just-in-time parser will report an error. +However, you are now able to parse more complex programs by means of explicitly +grouping subterms with the bracket we have just defined.

+

Consider and-or-left.bool:

+
(true && false) || false
+
+

Now consider and-or-right.bool:

+
true && (false || false)
+
+

If you parse these programs with kast, you will once again get a single +unique AST with no error. If you look, you might notice that the bracket itself +does not appear in the AST. In fact, this is a property unique to brackets: +productions with the bracket attribute are not represented in the parsed AST +of a term, and the child of the bracket is folded immediately into the parent +term. This is the reason for the requirement that a bracket production have +a single non-terminal of the same sort as the production itself.

+

Exercise

+

Write out what you expect the AST to be arising from parsing these two programs +above with --output kast, then parse them yourself and compare them to the +AST you expected. Confirm for yourself that the bracket production does not +appear in the AST.

+

Tokens

+

So far we have seen how we can define the grammar of a language. However, +the grammar is not the only relevant part of parsing a language. Also relevant +is the lexical syntax of the language. Thus far, we have implicitly been using +K's automatic lexer generation to generate a token in the scanner for each +terminal in our grammar. However, sometimes we wish to define more complex +lexical syntax. For example, consider the case of integers in C: an integer +consists of a decimal, octal, or hexadecimal number followed by an optional +suffix indicating the type of the literal.

+

In theory it would be possible to define this syntax via a grammar, but not +only would it be cumbersome and tedious, you would also then have to deal with +an AST generated for the literal which is not convenient to work with.

+

Instead of doing this, K allows you to define token productions, where +a production consists of a regular expression followed by the token +attribute, and the resulting AST consists of a typed string containing the +value recognized by the regular expression.

+

For example, the builtin integers in K are defined using the following +production:

+
.k .exclude
syntax Int ::= r"[\\+\\-]?[0-9]+" [token] +
+

Here we can see that we have defined that an integer is an optional sign +followed by a nonzero sequence of digits. The r preceding the terminal +indicates that what appears inside the double quotes is a regular expression, +and the token attribute indicates that terms which parse as this production +should be converted into a token by the parser.

+

It is also possible to define tokens that do not use regular expressions. This +can be useful when you wish to declare particular identifiers for use in your +semantics later. For example:

+
.k .exclude
syntax Id ::= "main" [token] +
+

Here, we declare that main is a token of sort Id. Instead of being parsed +as a symbol, it gets parsed as a token, generating a typed string in the AST. +This is useful in a semantics of C because the parser generally does not treat +the main function in C specially; only the semantics treats it specially.

+

Of course, languages can have more complex lexical syntax. For example, if we +wish to define the syntax of integers in C, we could use the following +production:

+
.k .exclude
syntax IntConstant ::= r"(([1-9][0-9]*)|(0[0-7]*)|(0[xX][0-9a-fA-F]+))(([uU][lL]?)|([uU]((ll)|(LL)))|([lL][uU]?)|(((ll)|(LL))[uU]?))?" [token] +
+

As you may have noted above, long and complex regular expressions +can be hard to read. They also suffer from the problem that unlike a grammar, +they are not particularly modular.

+

We can get around this restriction by declaring explicit regular expressions, +giving them a name, and then referring to them in productions.

+

Consider the following (equivalent) way to define the lexical syntax of +integers in C:

+
.k .exclude
syntax IntConstant ::= r"({DecConstant}|{OctConstant}|{HexConstant})({IntSuffix}?)" [token] +syntax lexical DecConstant = r"{NonzeroDigit}({Digit}*)" +syntax lexical OctConstant = r"0({OctDigit}*)" +syntax lexical HexConstant = r"{HexPrefix}({HexDigit}+)" +syntax lexical HexPrefix = r"0x|0X" +syntax lexical NonzeroDigit = r"[1-9]" +syntax lexical Digit = r"[0-9]" +syntax lexical OctDigit = r"[0-7]" +syntax lexical HexDigit = r"[0-9a-fA-F]" +syntax lexical IntSuffix = r"{UnsignedSuffix}({LongSuffix}?)|{UnsignedSuffix}{LongLongSuffix}|{LongSuffix}({UnsignedSuffix}?)|{LongLongSuffix}({UnsignedSuffix}?)" +syntax lexical UnsignedSuffix = r"[uU]" +syntax lexical LongSuffix = r"[lL]" +syntax lexical LongLongSuffix = r"ll|LL" +
+

As you can see, this is rather more verbose, but it has the benefit of both +being much easier to read and understand, and also increased modularity. +Note that we refer to a named regular expression by putting the name in curly +brackets. Note also that only the first sentence actually declares a new piece +of syntax in the language. When the user writes syntax lexical, they are only +declaring a regular expression. To declare an actual piece of syntax in the +grammar, you still must actually declare an explicit token production.

+

One final note: K uses Flex to implement +its lexical analysis. As a result, you can refer to the +Flex Manual +for a detailed description of the regular expression syntax supported. Note +that for performance reasons, Flex's regular expressions are actually a regular +language, and thus lack some of the syntactic convenience of modern +"regular expression" libraries. If you need features that are not part of the +syntax of Flex regular expressions, you are encouraged to express them via +a grammar instead.

+

Ahead-of-time parser generation

+

So far we have been entirely focused on K's support for just-in-time parsing, +where the parser is generated on the fly prior to being used. This benefits +from being faster to generate the parser, but it suffers in performance if you +have to repeatedly parse strings with the same parser. For this reason, it is +generally encouraged that when parsing programs, you use K's ahead-of-time +parser generation. K makes use of +GNU Bison to generate parsers.

+

By default, you can enable ahead-of-time parsing via the --gen-bison-parser +flag to kompile. This will make use of Bison's LR(1) parser generator. As +such, if your grammar is not LR(1), it may not parse exactly the same as if +you were to use the just-in-time parser, because Bison will automatically pick +one of the possible branches whenever it encounters a shift-reduce or +reduce-reduce conflict. In this case, you can either modify your grammar to be +LR(1), or you can enable use of Bison's GLR support by instead passing +--gen-glr-bison-parser to kompile. Note that if your grammar is ambiguous, +the ahead-of-time parser will not provide you with particularly readable error +messages at this time.

+

If you have a K definition named foo.k, and it generates a directory when +you run kompile called foo-kompiled, you can invoke the ahead-of-time +parser you generated by running foo-kompiled/parser_PGM <file> on a file.

+

Exercises

+
    +
  1. +

    Compile lesson-03-d.k with ahead-of-time parsing enabled. Then compare +how long it takes to run kast --output kore and-or-left.bool with how long it +takes to run lesson-03-d-kompiled/parser_PGM and-or-left.bool. Confirm for +yourself that both produce the same result, but that the latter is faster.

    +
  2. +
  3. +

    Define a simple grammar consisting of integers, brackets, addition, +subtraction, multiplication, division, and unary negation. Integers should be +in decimal form and lexically without a sign, whereas negative numbers can be +represented via unary negation. Ensure that you are able to parse some basic +arithmetic expressions using a generated ahead-of-time parser. Do not worry +about disambiguating the grammar or about writing rules to implement the +operations in this definition.

    +
  4. +
  5. +

    Write a program where the meaning of the arithmetic expression based on +the grammar you defined above is ambiguous, and then write programs that +express each individual intended meaning using brackets.

    +
  6. +
+

Next lesson

+

Once you have completed the above exercises, you can continue to +Lesson 1.4: Disambiguating Parses.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/1_basic/04_disambiguation/index.html b/k-distribution/k-tutorial/1_basic/04_disambiguation/index.html new file mode 100644 index 00000000000..6f5716b324f --- /dev/null +++ b/k-distribution/k-tutorial/1_basic/04_disambiguation/index.html @@ -0,0 +1,737 @@ + + + + + + + + + + + + + + +Lesson 1.4: Disambiguating Parses | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 1.4: Disambiguating Parses

+

The purpose of this lesson is to teach how to use K's builtin features for +disambiguation to transform an ambiguous grammar into an unambiguous one that +expresses the intended ASTs.

+

Priority blocks

+

In practice, very few formal languages outside the domain of natural language +processing are ambiguous. The main reason for this is that parsing unambiguous +languages is asymptotically faster than parsing ambiguous languages. +Programming language designers instead usually use the notions of operator +precedence and associativity to make expression grammars unambiguous. These +mechanisms work by instructing the parser to reject certain ASTs in favor of +others in case of ambiguities; it is often possible to remove all ambiguities +in a grammar with these techniques.

+

While it is sometimes possible to explicitly rewrite the grammar to remove +these parses, because K's grammar specification and AST generation are +inextricably linked, this is generally discouraged. Instead, we use the +approach of explicitly expressing the relative precedence of different +operators in different situations in order to resolve the ambiguity.

+

For example, in C, && binds tighter in precedence than ||, meaning that +the expression true && false || false has only one valid AST: +(true && false) || false.

+

Consider, then, the third iteration on the grammar of this definition +(lesson-04-a.k):

+
k
module LESSON-04-A + + syntax Boolean ::= "true" | "false" + | "(" Boolean ")" [bracket] + > "!" Boolean [function] + > Boolean "&&" Boolean [function] + > Boolean "^" Boolean [function] + > Boolean "||" Boolean [function] + +endmodule +
+

In this example, some of the | symbols separating productions in a single +block have been replaced with >. This serves to describe the +priority groups associated with this block of productions. +The first priority group consists of the atoms of the +language: true, false, and the bracket operator. In general, a priority +group starts either at the ::= or > operator and extends until either the +next > operator or the end of the production block. Thus, we can see that the +second, third, fourth, and fifth priority groups in this grammar all consist +of a single production.

+

The meaning of these priority groups becomes apparent when parsing programs: +A symbol with a lesser priority, (i.e., one that binds looser), cannot +appear as the direct child of a symbol with a greater priority (i.e., +one that binds tighter. In this case, the > operator can be seen as a +greater-than operator describing a transitive partial ordering on the +productions in the production block, expressing their relative priority.

+

To see this more concretely, let's look again at the program +true && false || false. As noted before, previously this program was +ambiguous because the parser could either choose that && was the child of || +or vice versa. However, because a symbol with lesser priority (i.e., ||) +cannot appear as the direct child of a symbol with greater priority +(i.e., &&), the parser will reject the parse where || is under the +&& operator. As a result, we are left with the unambiguous parse +(true && false) || false. Similarly, true || false && false parses +unambiguously as true || (false && false). Conversely, if the user explicitly +wants the other parse, they can express this using brackets by explicitly +writing true && (false || false). This still parses successfully because the +|| operator is no longer the direct child of the && operator, but is +instead the direct child of the () operator, and the && operator is an +indirect parent, which is not subject to the priority restriction.

+

Astute readers, however, will already have noticed what seems to be a +contradiction: we have defined () as also having greater priority than ||. +One would think that this should mean that || cannot appear as a direct +child of (). This is a problem because priority groups are applied to every +possible parse separately. That is to say, even if the term is unambiguous +prior to this disambiguation rule, we still reject that parse if it violates +the rule of priority.

+

In fact, however, we do not reject this program as a parse error. Why is that? +Well, the rule for priority is slightly more complex than previously described. +In actual fact, it applies only conditionally. Specifically, it applies in +cases where the child is either the first or last production item in the +parent's production. For example, in the production Bool "&&" Bool, the +first Bool non-terminal is not preceded by any terminals, and the last Bool +non-terminal is not followed by any terminals. As a result of this, we apply +the priority rule to both children of &&. However, in the () operator, +the sole non-terminal is both preceded by and followed by terminals. As a +result, the priority rule is not applied when () is the parent. Because of +this, the program we mentioned above successfully parses.

+

Exercise

+

Parse the program true && false || false using kast, and confirm that the AST +places || as the top level symbol. Then modify the definition so that you +will get the alternative parse.

+

Associativity

+

Even having broken the expression grammar into priority blocks, the resulting +grammar is still ambiguous. We can see this if we try to parse the following +program (assoc.bool):

+
true && false && false
+
+

Priority blocks will not help us here: the problem comes between two parses +where both possible parses have a direct parent and child which is within a +single priority block (in this case, && is in the same block as itself).

+

This is where the notion of associativity comes into play. Associativity +applies the following additional rules to parses:

+
    +
  • a left-associative symbol cannot appear as a direct rightmost child of a +symbol with equal priority;
  • +
  • a right-associative symbol cannot appear as a direct leftmost child of a +symbol with equal priority; and
  • +
  • a non-associative symbol cannot appear as a direct leftmost or rightmost +child of a symbol with equal priority.
  • +
+

In C, binary operators are all left-associative, meaning that the expression +true && false && false parses unambiguously as (true && false) && false, +because && cannot appear as the rightmost child of itself.

+

Consider, then, the fourth iteration on the grammar of this definition +(lesson-04-b.k):

+
k
module LESSON-04-B + + syntax Boolean ::= "true" | "false" + | "(" Boolean ")" [bracket] + > "!" Boolean [function] + > left: Boolean "&&" Boolean [function] + > left: Boolean "^" Boolean [function] + > left: Boolean "||" Boolean [function] + +endmodule +
+

Here each priority group, immediately after the ::= or > operator, can +be followed by a symbol representing the associativity of that priority group: +either left: for left associativity, right: for right associativity, or +non-assoc: for non-associativity. In this example, each priority group we +apply associativity to has only a single production, but we could equally well +write a priority block with multiple productions and an associativity.

+

For example, consider the following, different grammar (lesson-04-c.k):

+
k
module LESSON-04-C + + syntax Boolean ::= "true" | "false" + | "(" Boolean ")" [bracket] + > "!" Boolean [function] + > left: + Boolean "&&" Boolean [function] + | Boolean "^" Boolean [function] + | Boolean "||" Boolean [function] + +endmodule +
+

In this example, unlike the one above, &&, ^, and || have the same +priority. However, viewed as a group, the entire group is left associative. +This means that none of &&, ^, and || can appear as the right child of +any of &&, ^, or ||. As a result of this, this grammar is also not +ambiguous. However, it expresses a different grammar, and you are encouraged +to think about what the differences are in practice.

+

Exercise

+

Parse the program true && false && false yourself, and confirm that the AST +places the rightmost && at the top of the expression. Then modify the +definition to generate the alternative parse.

+

Explicit priority and associativity declarations

+

Previously we have only considered the case where all of the productions +which you wish to express a priority or associativity relation over are +co-located in the same block of productions. However, in practice this is not +always feasible or desirable, especially as a definition grows in size across +multiple modules.

+

As a result of this, K provides a second way of declaring priority and +associativity relations.

+

Consider the following grammar, which we will name lesson-04-d.k and which +will express the exact same grammar as lesson-04-b.k

+
k
module LESSON-04-D + + syntax Boolean ::= "true" [group(literal)] | "false" [group(literal)] + | "(" Boolean ")" [group(atom), bracket] + | "!" Boolean [group(not), function] + | Boolean "&&" Boolean [group(and), function] + | Boolean "^" Boolean [group(xor), function] + | Boolean "||" Boolean [group(or), function] + + syntax priority literal atom > not > and > xor > or + syntax left and + syntax left xor + syntax left or +endmodule +
+

This introduces a couple of new features of K. First, the group(_) attribute +is used to conceptually group together sets of sentences under a common +user-defined name. For example, literal in the syntax priority sentence is +used to refer to all the productions marked with the group(literal) attribute, +i.e., true and false. A production can belong to multiple groups using +syntax such as group(myGrp1,myGrp2).

+

Once we understand this, it becomes relatively straightforward to understand +the meaning of this grammar. Each syntax priority sentence defines a +priority relation where > separates different priority groups. Each priority +group is defined by a list of one or more group names, and consists of all +productions which are members of at least one of those named groups.

+

In the same way, a syntax left, syntax right, or syntax non-assoc sentence +defines an associativity relation among left-, right-, or non-associative +groups. Specifically, this means that:

+
syntax left a b
+
+

is different to:

+
syntax left a
+syntax left b
+
+

As a consequence of this, syntax [left|right|non-assoc] should not be used to +group together labels with different priority.

+

Prefer/avoid

+

Sometimes priority and associativity prove insufficient to disambiguate a +grammar. In particular, sometimes it is desirable to be able to choose between +two ambiguous parses directly while still not rejecting any parses if the term +parsed is unambiguous. A good example of this is the famous "dangling else" +problem in imperative C-like languages.

+

Consider the following definition (lesson-04-E.k):

+
k
module LESSON-04-E + + syntax Exp ::= "true" | "false" + syntax Stmt ::= "if" "(" Exp ")" Stmt + | "if" "(" Exp ")" Stmt "else" Stmt + | "{" "}" +endmodule +
+

We can write the following program (dangling-else.if):

+
if (true) if (false) {} else {}
+
+

This is ambiguous because it is unclear whether the else clause is part of +the outer if or the inner if. At first we might try to resolve this with +priorities, saying that the if without an else cannot appear as a child of +the if with an else. However, because the non-terminal in the parent symbol +is both preceded and followed by a terminal, this will not work.

+

Instead, we can resolve the ambiguity directly by telling the parser to +"prefer" or "avoid" certain productions when ambiguities arise. For example, +when we parse this program, we see the following ambiguity as an error message:

+
[Error] Inner Parser: Parsing ambiguity.
+1: syntax Stmt ::= "if" "(" Exp ")" Stmt
+
+`if(_)__LESSON-04-E_Stmt_Exp_Stmt`(`true_LESSON-04-E_Exp`(.KList),`if(_)_else__LESSON-04-E_Stmt_Exp_Stmt_Stmt`(`false_LESSON-04-E_Exp`(.KList),`;_LESSON-04-E_Stmt`(.KList),`;_LESSON-04-E_Stmt`(.KList)))
+2: syntax Stmt ::= "if" "(" Exp ")" Stmt "else" Stmt
+
+`if(_)_else__LESSON-04-E_Stmt_Exp_Stmt_Stmt`(`true_LESSON-04-E_Exp`(.KList),`if(_)__LESSON-04-E_Stmt_Exp_Stmt`(`false_LESSON-04-E_Exp`(.KList),`;_LESSON-04-E_Stmt`(.KList)),`;_LESSON-04-E_Stmt`(.KList))
+        Source(./dangling-else.if)
+        Location(1,1,1,30)
+
+

Roughly, we see that the ambiguity is between an if with an else or an if +without an else. Since we want to pick the first parse, we can tell K to +"avoid" the second parse with the avoid attribute. Consider the following +modified definition (lesson-04-f.k):

+
k
module LESSON-04-F + + syntax Exp ::= "true" | "false" + syntax Stmt ::= "if" "(" Exp ")" Stmt + | "if" "(" Exp ")" Stmt "else" Stmt [avoid] + | "{" "}" +endmodule +
+

Here we have added the avoid attribute to the else production. As a result, +when an ambiguity occurs and one or more of the possible parses has that symbol +at the top of the ambiguous part of the parse, we remove those parses from +consideration and consider only those remaining. The prefer attribute behaves +similarly, but instead removes all parses which do not have that attribute. +In both cases, no action is taken if the parse is not ambiguous.

+

Exercises

+
    +
  1. +

    Parse the program if (true) if (false) {} else {} using lesson-04-f.k +and confirm that else clause is part of the innermost if statement. Then +modify the definition so that you will get the alternative parse.

    +
  2. +
  3. +

    Modify your solution from Lesson 1.3, Exercise 2 so that unary negation should +bind tighter than multiplication and division, which should bind tighter than +addition and subtraction, and each binary operator should be left associative. +Write these priority and associativity declarations explicitly, and then +try to write them inline.

    +
  4. +
  5. +

    Write a simple grammar containing at least one ambiguity that cannot be +resolved via priority or associativity, and then use the prefer attribute to +resolve that ambiguity.

    +
  6. +
  7. +

    Explain why the following grammar is not labeled ambiguous by the K parser when parsing abb, then make the parser realize the ambiguity.

    +
  8. +
+
k
module EXERCISE4 + +syntax Expr ::= "a" Expr "b" + | "abb" + | "b" + +endmodule +
+

Next lesson

+

Once you have completed the above exercises, you can continue to +Lesson 1.5: Modules, Imports, and Requires.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/1_basic/05_modules/index.html b/k-distribution/k-tutorial/1_basic/05_modules/index.html new file mode 100644 index 00000000000..50fd5d1e5f9 --- /dev/null +++ b/k-distribution/k-tutorial/1_basic/05_modules/index.html @@ -0,0 +1,678 @@ + + + + + + + + + + + + + + +Lesson 1.5: Modules, Imports, and Requires | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 1.5: Modules, Imports, and Requires

+

The purpose of this lesson is to explain how K definitions can be broken into +separate modules and files and how these distinct components combine into a +complete K definition.

+

K's outer syntax

+

Recall from Lesson 1.3 that K's grammar is broken +into two components: the outer syntax of K and the inner syntax of K. +Outer syntax, as previously mentioned, consists of requires, modules, +imports, and sentences. A K semantics is expressed by the set of +sentences contained in the definition. The scope of what is considered +contained in that definition is determined both by the main semantics +module of a K definition, as well as the requires and imports present +in the file that contains that module.

+

Basic module syntax

+

The basic unit of grouping sentences in K is the module. A module consists +of a module name, an optional list of attributes, a list of +imports, and a list of sentences.

+

A module name consists of one or more groups of letters, numbers, or +underscores, separated by a hyphen. Here are some valid module names: FOO, +FOO-BAR, foo0, foo0_bar-Baz9. Here are some invalid module names: -, +-FOO, BAR-, FOO--BAR. Stylistically, modules names are usually all +uppercase with hyphens separating words, but this is not strictly enforced.

+

Some example modules include an empty module:

+
k
module LESSON-05-A + +endmodule +
+

A module with some attributes:

+
k
module LESSON-05-B [group(attr1,attr2), private] + +endmodule +
+

A module with some sentences:

+
k
module LESSON-05-C + syntax Boolean ::= "true" | "false" + syntax Boolean ::= "not" Boolean [function] + rule not true => false + rule not false => true +endmodule +
+

Imports

+

Thus far we have only discussed definitions containing a single module. +Definitions can also contain multiple modules, in which one module imports +others.

+

An import in K appears at the top of a module, prior to any sentences. It can +be specified with the imports keyword, followed by a module name.

+

For example, here is a simple definition with two modules (lesson-05-d.k):

+
k
module LESSON-05-D-1 + syntax Boolean ::= "true" | "false" + syntax Boolean ::= "not" Boolean [function] +endmodule + +module LESSON-05-D + imports LESSON-05-D-1 + + rule not true => false + rule not false => true +endmodule +
+

This K definition is equivalent to the definition expressed by the single module +LESSON-05-C. Essentially, by importing a module, we include all of the +sentences in the module being imported into the module that we import from. +There are a few minor differences between importing a module and simply +including its sentences in another module directly, but we will cover these +differences later. Essentially, you can think of modules as a way of +conceptually grouping sentences in a larger K definition.

+

Exercise

+

Modify lesson-05-d.k to include four modules: one containing the syntax, two +with one rule each that imports the first module, and a final module +LESSON-05-D containing no sentences that imports the second and third module. +Check to make sure the definition still compiles and that you can still evaluate +the not function.

+

Parsing in the presence of multiple modules

+

As you may have noticed, each module in a definition can express a distinct set +of syntax. When parsing the sentences in a module, we use the syntax +of that module, enriched with the basic syntax of K, in order to parse +rules in that module. For example, the following definition is a parser error +(lesson-05-e.k):

+
.k .error
module LESSON-05-E-1 + rule not true => false + rule not false => true +endmodule + +module LESSON-05-E-2 + syntax Boolean ::= "true" | "false" + syntax Boolean ::= "not" Boolean [function] +endmodule +
+

This is because the syntax referenced in module LESSON-05-E-1, namely, not, +true, and false, is not imported by that module. You can solve this problem +by simply importing the modules containing the syntax you want to use in your +sentences.

+

Main syntax and semantics modules

+

When we are compiling a K definition, we need to know where to start. We +designate two specific entry point modules: the main syntax module +and the main semantics module. The main syntax module, as well as all the +modules it imports recursively, are used to create the parser for programs that +you use to parse programs that you execute with krun. The main semantics +module, as well as all the modules it imports recursively, are used to +determine the rules that can be applied at runtime in order to execute a +program. For example, in the above example, if the main semantics module is +module LESSON-05-D-1, then not is an uninterpreted function (i.e., has no +rules associated with it), and the rules in module LESSON-05-D are not +included.

+

While you can specify the entry point modules explicitly by passing the +--main-module and --syntax-module flags to kompile, by default, if you +type kompile foo.k, then the main semantics module will be FOO and the +main syntax module will be FOO-SYNTAX.

+

Splitting a definition into multiple files

+

So far, while we have discussed ways to break definitions into separate +conceptual components (modules), K also provides a mechanism for combining +multiple files into a single K definition, namely, the requires directive.

+

In K, the requires keyword has two meanings. The first, the requires +statement, appears at the top of a K file, prior to any module declarations. It +consists of the keyword requires followed by a double-quoted string. The +second meaning of the requires keyword will be covered in a later lesson, +but it is distinguished because the second case occurs only inside modules.

+

The string passed to the requires statement contains a filename. When you run +kompile on a file, it will look at all of the requires statements in that +file, look up those files on disk, parse them, and then recursively process all +the requires statements in those files. It then combines all the modules in all +of those files together, and uses them collectively as the set of modules to +which imports statements can refer.

+

Putting it all together

+

Putting it all together, here is one possible way in which we could break the +definition lesson-02-c.k from Lesson 1.2 into +multiple files and modules:

+

colors.k:

+
k
module COLORS + syntax Color ::= Yellow() + | Blue() +endmodule +
+

fruits.k:

+
k
module FRUITS + syntax Fruit ::= Banana() + | Blueberry() +endmodule +
+

colorOf.k:

+
.k .exclude
requires "fruits.k" +requires "colors.k" + +module COLOROF-SYNTAX + imports COLORS + imports FRUITS + + syntax Color ::= colorOf(Fruit) [function] +endmodule + +module COLOROF + imports COLOROF-SYNTAX + + rule colorOf(Banana()) => Yellow() + rule colorOf(Blueberry()) => Blue() +endmodule +
+

You would then compile this definition with kompile colorOf.k and use it the +same way as the original, single-module definition.

+

Exercise

+

Modify the name of the COLOROF module, and then recompile the definition. +Try to understand why you now get a compiler error. Then, resolve this compiler +error by passing the --main-module and --syntax-module flags to kompile.

+

Include path

+

One note can be made about how paths are resolved in requires statements.

+

By default, the path you specify is allowed to be an absolute or a relative +path. If the path is absolute, that exact file is imported. If the path is +relative, a matching file is looked for within all of the +include directories specified to the compiler. By default, the include +directories include the current working directory, followed by the +include/kframework/builtin directory within your installation of K. You can +also pass one or more directories to kompile via the -I command line flag, +in which case these directories are prepended to the beginning of the list.

+

Exercises

+
    +
  1. +

    Take the solution to Lesson 1.4, Exercise 2 which included the explicit +priority and associativity declarations, and modify the definition so that +the syntax of integers and brackets is in one module, the syntax of addition, +subtraction, and unary negation is in another module, and the syntax of +multiplication and division is in a third module. Make sure you can still parse +the same set of expressions as before. Place priority declarations in the main +module.

    +
  2. +
  3. +

    Modify lesson-02-d.k from Lesson 1.2 so that the rules and syntax are in +separate modules in separate files.

    +
  4. +
  5. +

    Place the file containing the syntax from Exercise 2 in another directory, +then recompile the definition. Observe why a compilation error occurs. Then +fix the compiler error by passing -I to kompile.

    +
  6. +
+

Next lesson

+

Once you have completed the above exercises, you can continue to +Lesson 1.6: Integers and Booleans.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/1_basic/06_ints_and_bools/index.html b/k-distribution/k-tutorial/1_basic/06_ints_and_bools/index.html new file mode 100644 index 00000000000..23072093b68 --- /dev/null +++ b/k-distribution/k-tutorial/1_basic/06_ints_and_bools/index.html @@ -0,0 +1,616 @@ + + + + + + + + + + + + + + +Lesson 1.6: Integers and Booleans | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 1.6: Integers and Booleans

+

The purpose of this lesson is to explain the two most basic types of builtin +sorts in K, the Int sort and the Bool sort, representing +arbitrary-precision integers and Boolean algebra.

+

Builtin sorts in K

+

K provides definitions of some useful sorts in +domains.md, found in the +include/kframework/builtin directory of the K installation. This file is +defined via a +Literate programming +style that we will discuss in a future lesson. We will not cover all of the +sorts found there immediately, however, this lesson discusses some of the +details surrounding integers and Booleans, as well as providing information +about how to look up more detailed knowledge about builtin functions in K's +documentation.

+

Booleans in K

+

The most basic builtin sort K provides is the Bool sort, representing +Boolean values (i.e., true and false). You have already seen how we were +able to create this type ourselves using K's parsing and disambiguation +features. However, in the vast majority of cases, we prefer instead to import +the version of Boolean algebra defined by K itself. Most simply, you can do +this by importing the module BOOL in your definition. For example +(lesson-06-a.k):

+
k
module LESSON-06-A + imports BOOL + + syntax Fruit ::= Blueberry() | Banana() + syntax Bool ::= isBlue(Fruit) [function] + + rule isBlue(Blueberry()) => true + rule isBlue(Banana()) => false +endmodule +
+

Here we have defined a simple predicate, i.e., a function returning a +Boolean value. We are now able to perform the usual Boolean operations of +and, or, and not over these values. For example (lesson-06-b.k):"

+
k
module LESSON-06-B + imports BOOL + + syntax Fruit ::= Blueberry() | Banana() + syntax Bool ::= isBlue(Fruit) [function] + + rule isBlue(Blueberry()) => true + rule isBlue(Banana()) => false + + syntax Bool ::= isYellow(Fruit) [function] + | isBlueOrYellow(Fruit) [function] + + rule isYellow(Banana()) => true + rule isYellow(Blueberry()) => false + + rule isBlueOrYellow(F) => isBlue(F) orBool isYellow(F) +endmodule +
+

In the above example, Boolean inclusive or is performed via the orBool +function, which is defined in the BOOL module. As a matter of convention, +many functions over builtin sorts in K are suffixed with the name of the +primary sort over which those functions are defined. This happens so that the +syntax of K does not (generally) conflict with the syntax of any other +programming language, which would make it harder to define that programming +language in K.

+

Exercise

+

Write a function isBlueAndNotYellow which computes the appropriate Boolean +expression. If you are unsure what the appropriate syntax is to use, you +can refer to the BOOL module in +domains.md. Add a term of +sort Fruit for which isBlue and isYellow both return true, and test that +the isBlueAndNotYellow function behaves as expected on all three Fruits.

+

Syntax Modules

+

For most sorts in domains.md, K defines more than one module that can be +imported by users. For example, for the Bool sort, K defines the BOOL +module that has previously already been discussed, but also provides the +BOOL-SYNTAX module. This module, unlike the BOOL module, only declares the +values true and false, but not any of the functions that operate over the +Bool sort. The rationale is that you may want to import this module into the +main syntax module of your definition in some cases, whereas you generally do +not want to do this with the version of the module that includes all the +functions over the Bool sort. For example, if you were defining the semantics +of C++, you might import BOOL-SYNTAX into the syntax module of your +definition, because true and false are part of the grammar of C++, but +you would only import the BOOL module into the main semantics module, because +C++ defines its own syntax for and, or, and not that is different from the +syntax defined in the BOOL module.

+

Here, for example, is how we might redefine our Boolean expression calculator +to use the Bool sort while maintaining an idiomatic structure of modules +and imports, for the first time including the rules to calculate the values of +expressions themselves (lesson-06-c.k):

+
k
module LESSON-06-C-SYNTAX + imports BOOL-SYNTAX + + syntax Bool ::= "(" Bool ")" [bracket] + > "!" Bool [function] + > left: + Bool "&&" Bool [function] + | Bool "^" Bool [function] + | Bool "||" Bool [function] +endmodule + +module LESSON-06-C + imports LESSON-06-C-SYNTAX + imports BOOL + + rule ! B => notBool B + rule A && B => A andBool B + rule A ^ B => A xorBool B + rule A || B => A orBool B +endmodule +
+

Note the encapsulation of syntax: the LESSON-06-C-SYNTAX module contains +exactly the syntax of our Boolean expressions, and no more, whereas any other +syntax needed to implement those functions is in the LESSON-06-C module +instead.

+

Exercise

+

Add an "implies" function to the above Boolean expression calculator, using the +-> symbol to represent implication. You can look up K's builtin "implies" +function in the BOOL module in domains.md.

+

Integers in K

+

Unlike most programming languages, where the most basic integer type is a +fixed-precision integer type, the most commonly used integer sort in K is +the Int sort, which represents the mathematical integers, ie, +arbitrary-precision integers.

+

K provides three main modules for import when using the Int sort. The first, +containing all the syntax of integers as well as all of the functions over +integers, is the INT module. The second, which provides just the syntax +of integer literals themselves, is the INT-SYNTAX module. However, unlike +most builtin sorts in K, K also provides a third module for the Int sort: +the UNSIGNED-INT-SYNTAX module. This module provides only the syntax of +non-negative integers, i.e., natural numbers. The reasons for this involve +lexical ambiguity. Generally speaking, in most programming languages, -1 is +not a literal, but instead a literal to which the unary negation operator is +applied. K thus provides this module to ease in specifying the syntax of such +languages.

+

For detailed information about the functions available over the Int sort, +refer to domains.md. Note again how we append Int to the end of most of the +integer operations to ensure they do not collide with the syntax of other +programming languages.

+

Exercises

+
    +
  1. +

    Extend your solution from Lesson 1.4, Exercise 2 to implement the rules +that define the behavior of addition, subtraction, multiplication, and +division. Do not worry about the case when the user tries to divide by zero +at this time. Use /Int to implement division. Test your new calculator +implementation by executing the arithmetic expressions you wrote as part of +Lesson 1.3, Exercise 2. Check to make sure each computes the value you expected.

    +
  2. +
  3. +

    Combine the Boolean expression calculator from this lesson with your +solution to Exercise 1, and then extend the combined calculator with the <, +<=, >, >=, ==, and != expressions. Write some Boolean expressions +that combine integer and Boolean operations, and test to ensure that these +expressions return the expected truth value.

    +
  4. +
  5. +

    Compute the following expressions using your solution from Exercise 2: +7 / 3, 7 / -3, -7 / 3, -7 / -3. Then replace the /Int function in +your definition with divInt instead, and observe how the value of the above +expressions changes. Why does this occur?

    +
  6. +
+

Next lesson

+

Once you have completed the above exercises, you can continue to +Lesson 1.7: Side Conditions and Rule Priority.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/1_basic/07_side_conditions/index.html b/k-distribution/k-tutorial/1_basic/07_side_conditions/index.html new file mode 100644 index 00000000000..d950525107d --- /dev/null +++ b/k-distribution/k-tutorial/1_basic/07_side_conditions/index.html @@ -0,0 +1,629 @@ + + + + + + + + + + + + + + +Lesson 1.7: Side Conditions and Rule Priority | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 1.7: Side Conditions and Rule Priority

+

The purpose of this lesson is to explain how to write conditional rules in K, +and to explain how to control the order in which rules are tried.

+

Side Conditions

+

So far, all of the rules we have discussed have been unconditional rules. +If the left-hand side of the rule matches the arguments to the function, the +rule applies. However, there is another type of rule, a conditional rule. +A conditional rule consists of a rule body containing the patterns to +match, and a side condition representing a Boolean expression that must +evaluate to true in order for the rule to apply.

+

Side conditions in K are introduced via the requires keyword immediately +following the rule body. For example, here is a rule with a side condition +(lesson-07-a.k):

+
k
module LESSON-07-A + imports BOOL + imports INT + + syntax Grade ::= "letter-A" + | "letter-B" + | "letter-C" + | "letter-D" + | "letter-F" + | gradeFromPercentile(Int) [function] + + rule gradeFromPercentile(I) => letter-A requires I >=Int 90 +endmodule +
+

In this case, the gradeFromPercentile function takes a single integer +argument. The function evaluates to letter-A if the argument passed is +greater than 90. Note that the side condition is allowed to refer to variables +that appear on the left-hand side of the rule. In the same manner as variables +appearing on the right-hand side, variables that appear in the side condition +evaluate to the value that was matched on the left-hand side. Then the +functions in the side condition are evaluated, which returns a term of sort +Bool. If the term is equal to true, then the rule applies. Bear in mind +that the side condition is only evaluated at all if the patterns on the +left-hand side of the rule match the term being evaluated.

+

Exercise

+

Write a rule that evaluates gradeFromPercentile to letter-B if the argument +to the function is in the range [80,90). Test that the function correctly +evaluates various numbers between 80 and 100.

+

owise Rules

+

So far, all the rules we have introduced have had the same priority. What +this means is that K does not necessarily enforce an order in which the rules +are tried. We have only discussed functions so far in K, so it is not +immediately clear why this choice was made, given that a function is not +considered well-defined if multiple rules for evaluating it are capable of +evaluating the same arguments to different results. However, in future lessons +we will discuss other types of rules in K, some of which can be +non-deterministic. What this means is that if more than one rule is capable +of matching, then K will explore both possible rules in parallel, and consider +each of their respective results when executing your program. Don't worry too +much about this right now, but just understand that because of the potential +later for nondeterminism, we don't enforce a total ordering on the order in +which rules are attempted to be applied.

+

However, sometimes this is not practical; It can be very convenient to express +that a particular rule applies if no other rules for that function are +applicable. This can be expressed by adding the owise attribute to a rule. +What this means, in practice, is that this rule has lower priority than other +rules, and will only be tried to be applied after all the other, +higher-priority rules have been tried and they have failed.

+

For example, in the above exercise, we had to add a side condition containing +two Boolean comparisons to the rule we wrote to handle letter-B grades. +However, in practice this meant that we compare the percentile to 90 twice. We +can more efficiently and more idiomatically write the letter-B case for the +gradeFromPercentile rule using the owise attribute (lesson-07-b.k):

+
k
module LESSON-07-B + imports BOOL + imports INT + + syntax Grade ::= "letter-A" + | "letter-B" + | "letter-C" + | "letter-D" + | "letter-F" + | gradeFromPercentile(Int) [function] + + rule gradeFromPercentile(I) => letter-A requires I >=Int 90 + rule gradeFromPercentile(I) => letter-B requires I >=Int 80 [owise] +endmodule +
+

This rule is saying, "if all the other rules do not apply, then the grade is a +B if the percentile is greater than or equal to 80." Note here that we use both +a side condition and an owise attribute on the same rule. This is not +required (as we will see later), but it is allowed. What this means is that the +side condition is only tried if the other rules did not apply and the +left-hand side of the rule matched. You can even use more complex matching on +the left-hand side than simply a variable. More generally, you can also have +multiple higher-priority rules, or multiple owise rules. What this means in +practice is that all of the non-owise rules are tried first, in any order, +followed by all the owise rules, in any order.

+

Exercise

+

The grades D and F correspond to the percentile ranges [60, 70) and [0, 60) +respectively. Write another implementation of gradeFromPercentile which +handles only these cases, and uses the owise attribute to avoid redundant +Boolean comparisons. Test that various percentiles in the range [0, 70) are +evaluated correctly.

+

Rule Priority

+

As it happens, the owise attribute is a specific case of a more general +concept we call rule priority. In essence, each rule is assigned an integer +priority. Rules are tried in increasing order of priority, starting with a +rule with priority zero, and trying each increasing numerical value +successively.

+

By default, a rule is assigned a priority of 50. If the rule has the owise +attribute, it is instead given the priority 200. You can see why this will +cause owise rules to be tried after regular rules.

+

However, it is also possible to directly assign a numerical priority to a rule +via the priority attribute. For example, here is an alternative way +we could express the same two rules in the gradeFromPercentile function +(lesson-07-c.k):

+
k
module LESSON-07-C + imports BOOL + imports INT + + syntax Grade ::= "letter-A" + | "letter-B" + | "letter-C" + | "letter-D" + | "letter-F" + | gradeFromPercentile(Int) [function] + + rule gradeFromPercentile(I) => letter-A requires I >=Int 90 [priority(50)] + rule gradeFromPercentile(I) => letter-B requires I >=Int 80 [priority(200)] +endmodule +
+

We can, of course, assign a priority equal to any non-negative integer. For +example, here is a more complex example that handles the remaining grades +(lesson-07-d.k):

+
k
module LESSON-07-D + imports BOOL + imports INT + + syntax Grade ::= "letter-A" + | "letter-B" + | "letter-C" + | "letter-D" + | "letter-F" + | gradeFromPercentile(Int) [function] + + rule gradeFromPercentile(I) => letter-A requires I >=Int 90 [priority(50)] + rule gradeFromPercentile(I) => letter-B requires I >=Int 80 [priority(51)] + rule gradeFromPercentile(I) => letter-C requires I >=Int 70 [priority(52)] + rule gradeFromPercentile(I) => letter-D requires I >=Int 60 [priority(53)] + rule gradeFromPercentile(_) => letter-F [priority(54)] +endmodule +
+

Note that we have introduced a new piece of syntax here: _. This is actually +just a variable. However, as a special case, when a variable is named _, it +does not bind a value that can be used on the right-hand side of the rule, or +in a side condition. Effectively, _ is a placeholder variable that means "I +don't care about this term."

+

In this example, we have explicitly expressed the order in which the rules of +this function are tried. Since rules are tried in increasing numerical +priority, we first try the rule with priority 50, then 51, then 52, 53, and +finally 54.

+

As a final note, remember that if you assign a rule a priority higher than 200, +it will be tried after a rule with the owise attribute, and if you assign +a rule a priority less than 50, it will be tried before a rule with no +explicit priority.

+

Exercises

+
    +
  1. +

    Write a function isEven that returns whether an integer is an even number. +Use two rules and one side condition. The right-hand side of the rules should +be Boolean literals. Refer back to +domains.md for the relevant +integer operations.

    +
  2. +
  3. +

    Modify the calculator application from Lesson 1.6, Exercise 2, so that division +by zero will no longer make krun crash with a "Divison by zero" exception. +Instead, the / function should not match any of its rules if the denominator +is zero.

    +
  4. +
  5. +

    Write your own implementation of ==, <, <=, >, >= for integers and modify your solution from Exercise 2 to use it. +You can use any arithmetic operations in the INT module, but do not use any built-in boolean functions for comparing integers.

    +

    Hint: Use pattern matching and recursive definitions with rule priorities.

    +
  6. +
+

Next lesson

+

Once you have completed the above exercises, you can continue to +Lesson 1.8: Literate Programming with Markdown.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/1_basic/08_literate_programming/index.html b/k-distribution/k-tutorial/1_basic/08_literate_programming/index.html new file mode 100644 index 00000000000..fd1b9865c88 --- /dev/null +++ b/k-distribution/k-tutorial/1_basic/08_literate_programming/index.html @@ -0,0 +1,579 @@ + + + + + + + + + + + + + + +Lesson 1.8: Literate Programming with Markdown | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 1.8: Literate Programming with Markdown

+

The purpose of this lesson is to teach a paradigm for performing literate +programming in K, and explain how this can be used to create K definitions +that are also documentation.

+

Markdown and K

+

The K tutorial so far has been written in +Markdown. Markdown, +for those not already familiar, is a lightweight plain-text format for styling +text. From this point onward, we assume you are familiar with Markdown and how +to write Markdown code. You can refer to the above link for a tutorial if you +are not already familiar.

+

What you may not necessarily realize, however, is that the K tutorial is also +a sequence of K definitions written in the manner of +Literate Programming. +For detailed information about Literate Programming, you can read the linked +Wikipedia article, but the short summary is that literate programming is a way +of intertwining documentation and code together in a manner that allows +executable code to also be, simultaneously, a documented description of that +code.

+

K is provided with built-in support for literate programming using Markdown. +By default, if you pass a file with the .md file extension to kompile, it +will look for any code blocks containing k code in that file, extract out +that K code into pure K, and then compile it as if it were a .k file.

+

A K code block begins with a line of text containing the keyword ```k, +and ends when it encounters another ``` keyword.

+

For example, if you view the markdown source of this document, this is a K +code block:

+
k
module LESSON-08 + imports INT +
+

Only the code inside K code blocks will actually be sent to the compiler. The +rest, while it may appear in the document when rendered by a markdown viewer, +is essentially a form of code comment.

+

When you have multiple K code blocks in a document, K will append each one +together into a single file before passing it off to the outer parser.

+

For example, the following code block contains sentences that are part of the +LESSON-08 module that we declared the beginning of above:

+
k
syntax Int ::= Int "+" Int [function] + rule I1 + I2 => I1 +Int I2 +
+

Exercise

+

Compile this file with kompile README.md --main-module LESSON-08. Confirm +that you can use the resulting compiled definition to evaluate the + +function.

+

Markdown Selectors

+

On occasion, you may want to generate multiple K definitions from a single +Markdown file. You may also wish to include a block of syntax-highlighted K +code that nonetheless does not appear as part of your K definition. It is +possible to accomplish this by means of the built-in support for syntax +highlighting in Markdown. Markdown allows a code block that was begun with +``` to be immediately followed by a string which is used to signify what +programming language the following code is written in. However, this feature +actually allows arbitrary text to appear describing that code block. Markdown +parsers are able to parse this text and render the code block differently +depending on what text appears after the backticks.

+

In K, you can use this functionality to specify one or more +Markdown selectors which are used to describe the code block. A Markdown +selector consists of a sequence of characters containing letters, numbers, and +underscores. A code block can be designated with a single selector by appending +the selector immediately following the backticks that open the code block.

+

For example, here is a code block with the foo selector:

+
foo
foo bar +
+

Note that this is not K code. By convention, K code should have the k +selector on it. You can express multiple selectors on a code block by putting +them between curly braces and prepending each with the . character. For +example, here is a code block with the foo and k selectors:

+
.k .foo
syntax Int ::= foo(Int) [function] + rule foo(0) => 0 +
+

Because this code block contains the k Markdown selector, by default it is +included as part of the K definition being compiled.

+

Exercise

+

Confirm this fact by using krun to evaluate foo(0).

+

Markdown Selector Expressions

+

By default, as previously stated, K includes in the definition any code block +with the k selector. However, this is merely a specific instance of a general +principle, namely, that K allows you to control which selectors get included +in your K definition. This is done by means of the --md-selector flag to +kompile. This flag accepts a Markdown selector expression, which you +can essentially think of as a kind of Boolean algebra over Markdown selectors. +Each selector becomes an atom, and you can combine these atoms via the &, +|, !, and () operators.

+

Here is a grammar, written in K, of the language of Markdown selector +expressions:

+
.k .selector
syntax Selector ::= r"[0-9a-zA-Z_]+" [token] + syntax SelectorExp ::= Selector + | "(" SelectorExp ")" [bracket] + > right: + "!" SelectorExp + > right: + SelectorExp "&" SelectorExp + > right: + SelectorExp "|" SelectorExp +
+

Here is a selector expression that selects all the K code blocks in this +definition except the one immediately above:

+
k & (! selector)
+
+

Addendum

+

This code block exists in order to make the above lesson a syntactically valid +K definition. Consider why it is necessary.

+
k
endmodule +
+

Exercises

+
    +
  1. +

    Compile this lesson with the selector expression k & (! foo) and confirm +that you get a parser error if you try to evaluate the foo function with the +resulting definition.

    +
  2. +
  3. +

    Compile Lesson 1.3 +as a K definition. Identify why it fails to compile. Then pass an appropriate +--md-selector to the compiler in order to make it compile.

    +
  4. +
  5. +

    Modify your calculator application from Lesson 1.7, Exercise 2, to be written +in a literate style. Consider what text might be appropriate to turn the +resulting markdown file into documentation for your calculator.

    +
  6. +
+

Next lesson

+

Once you have completed the above exercises, you can continue to +Lesson 1.9: Unparsing and the format and color attributes.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/1_basic/09_unparsing/index.html b/k-distribution/k-tutorial/1_basic/09_unparsing/index.html new file mode 100644 index 00000000000..169a0e05020 --- /dev/null +++ b/k-distribution/k-tutorial/1_basic/09_unparsing/index.html @@ -0,0 +1,726 @@ + + + + + + + + + + + + + + +Lesson 1.9: Unparsing and the format and color attributes | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 1.9: Unparsing and the format and color attributes

+

The purpose of this lesson is to teach the user about how terms are +pretty-printed in K, and how the user can make adjustments to the default +settings for how to print specific terms.

+

Parsing, Execution, and Unparsing

+

When you use krun to interpret a program, the tool passes through three major +phases. In the first, parsing, the program itself is parsed using either kast +or an ahead-of-time parser generated via Bison, and the resulting AST becomes +the input to the interpreter. In the second phase, execution, K evaluates +functions and (as we will discuss in depth later) performs rewrite steps to +iteratively transform the program state. The third and final phase is called +unparsing, because it consists of taking the final state of the application +after the program has been interpreted, and converting it from an AST back into +text that (in theory, anyway) could be parsed back into the same AST that was +the output of the execution phase.

+

In practice, parsing is not always precisely reversible. It turns out +(although we are not going to cover exactly why this is here), that +constructing a sound algorithm that takes a grammar and an AST and emits text +that could be parsed via that grammar to the original AST is an +NP-hard problem. As a result, in the interests of avoiding exponential time +algorithms when users rarely care about unparsing being completely sound, we +take certain shortcuts that provide a linear-time algorithm that approximates +a sound solution to the problem while sacrificing the notion that the result +can be parsed into the exact original term in all cases.

+

This is a lot of theoretical explanation, but at root, the unparsing process +is fairly simple: it takes a K term that is the output of execution and pretty +prints it according to the syntax defined by the user in their K definition. +This is useful because the original AST is not terribly user-readable, and it +is difficult to visualize the entire term or decipher information about the +final state of the program at a quick glance. Of course, in rare cases, the +pretty-printed configuration loses information of relevance, which is why K +allows you to obtain the original AST on request.

+

As an example of all of this, consider the following K definition +(lesson-09-a.k):

+
k
module LESSON-09-A + imports BOOL + + syntax Exp ::= "(" Exp ")" [bracket] + | Bool + > "!" Exp + > left: + Exp "&&" Exp + | Exp "^" Exp + | Exp "||" Exp + + syntax Exp ::= id(Exp) [function] + rule id(E) => E +endmodule +
+

This is similar to the grammar we defined in LESSON-06-C, with the difference +that the Boolean expressions are now constructors of sort Exp and we define a +trivial function over expressions that returns its argument unchanged.

+

We can now parse a simple program in this definition and use it to unparse some +Boolean expressions. For example (exp.bool):

+
id(true&&false&&!true^(false||true))
+
+

Here is a program that is not particularly legible at first glance, because all +extraneous whitespace has been removed. However, if we run krun exp.bool, we +see that the result of the unparser will pretty-print this expression rather +nicely:

+
<k>
+  true && false && ! true ^ ( false || true ) ~> .
+</k>
+
+

Notably, not only does K insert whitespace where appropriate, it is also smart +enough to insert parentheses where necessary in order to ensure the correct +parse. For example, without those parentheses, the expression above would parse +equivalent to the following one:

+
(((true && false) && ! true) ^ false) || true
+
+

Indeed, you can confirm this by passing that exact expression to the id +function and evaluating it, then looking at the result of the unparser:

+
<k>
+  true && false && ! true ^ false || true ~> .
+</k>
+
+

Here, because the meaning of the AST is the same both with and without +parentheses, K does not insert any parentheses when unparsing.

+

Exercise

+

Modify the grammar of LESSON-09-A above so that the binary operators are +right associative. Try unparsing exp.bool again, and note how the result is +different. Explain the reason for the difference.

+

Custom unparsing of terms

+

You may have noticed that right now, the unparsing of terms is not terribly +imaginative. All it is doing is taking each child of the term, inserting it +into the non-terminal positions of the production, then printing the production +with a space between each terminal or non-terminal. It is easy to see why this +might not be desirable in some cases. Consider the following K definition +(lesson-09-b.k):

+
k
module LESSON-09-B + imports BOOL + + syntax Stmt ::= "{" Stmt "}" | "{" "}" + > right: + Stmt Stmt + | "if" "(" Bool ")" Stmt + | "if" "(" Bool ")" Stmt "else" Stmt [avoid] +endmodule +
+

This is a statement grammar, simplified to the point of meaninglessness, but +still useful as an object lesson in unparsing. Consider the following program +in this grammar (if.stmt):

+
if (true) {
+  if (true) {}
+  if (false) {}
+  if (true) {
+    if (false) {} else {}
+  } else {
+    if (false) {}
+  }
+}
+
+

This is how that term would be unparsed if it appeared in the output of krun:

+
if ( true ) { if ( true ) { } if ( false ) { } if ( true ) { if ( false ) { } else { } } else { if ( false ) { } } }
+
+

This is clearly much less legible than we started with! What are we to do? +Well, K provides an attribute, format, that can be applied to any production, +which controls how that production gets unparsed. You've seen how it gets +unparsed by default, but via this attribute, the developer has complete control +over how the term is printed. Of course, the user can trivially create ways to +print terms that would not parse back into the same term. Sometimes this is +even desirable. But in most cases, what you are interested in is controlling +the line breaking, indentation, and spacing of the production.

+

Here is an example of how you might choose to apply the format attribute +to improve how the above term is unparsed (lesson-09-c.k):

+
k
module LESSON-09-C + imports BOOL + + syntax Stmt ::= "{" Stmt "}" [format(%1%i%n%2%d%n%3)] | "{" "}" [format(%1%2)] + > right: + Stmt Stmt [format(%1%n%2)] + | "if" "(" Bool ")" Stmt [format(%1 %2%3%4 %5)] + | "if" "(" Bool ")" Stmt "else" Stmt [avoid, format(%1 %2%3%4 %5 %6 %7)] +endmodule +
+

If we compile this new definition and unparse the same term, this is the +result we get:

+
if (true) {
+  if (true) {}
+  if (false) {}
+  if (true) {
+    if (false) {} else {}
+  } else {
+    if (false) {}
+  }
+}
+
+

This is the exact same text we started with! By adding the format attributes, +we were able to indent the body of code blocks, adjust the spacing of if +statements, and put each statement on a new line.

+

How exactly was this achieved? Well, each time the unparser reaches a term, +it looks at the format attribute of that term. That format attribute is a +mix of characters and format codes. Format codes begin with the % +character. Each character in the format attribute other than a format code is +appended verbatim to the output, and each format code is handled according to +its meaning, transformed (possibly recursively) into a string of text, and +spliced into the output at the position the format code appears in the format +string.

+

Provided for reference is a table with a complete list of all valid format +codes, followed by their meaning:

+ + + + + + + + + +
Format Code Meaning
n Insert '\n' followed by the current indentation + level
i Increase the current indentation level by 1
d Decrease the current indentation level by 1
c Move to the next color in the list of colors for + this production (see next section)
r Reset color to the default foreground color for + the terminal (see next section)
an integer Print a terminal or non-terminal from the + production. The integer is treated as a 1-based + index into the terminals and non-terminals of + the production. +
+
If the offset refers to a terminal, move to the + next color in the list of colors for this + production, print the value of that terminal, + then reset the color to the default foreground + color for the terminal. +
+
If the offset refers to a regular expression + terminal, it is an error. +
+
If the offset refers to a non-terminal, unparse + the corresponding child of the current term + (starting with the current indentation level) + and print the resulting text, then set the + current color and indentation level to the color + and indentation level following unparsing that + term.
other char Print that character verbatim
+

Exercise

+

Change the format attributes for LESSON-09-C so that if.stmt will unparse +as follows:

+
if (true)
+{
+  if (true)
+  {
+  }
+  if (false)
+  {
+  }
+  if (true)
+  {
+    if (false)
+    {
+    }
+    else
+    {
+    }
+  }
+  else
+  {
+    if (false)
+    {
+    }
+  }
+}
+
+

Output coloring

+

When the output of unparsing is displayed on a terminal supporting colors, K +is capable of coloring the output, similar to what is possible with a syntax +highlighter. This is achieved via the color and colors attributes.

+

Essentially, both the color and colors attributes are used to construct a +list of colors associated with each production, and then the format attribute +is used to control how those colors are used to unparse the term. At its most +basic level, you can set the color attribute to color all the terminals in +the production a certain color, or you can use the colors attribute to +specify a comma-separated list of colors for each terminal in the production. +At a more advanced level, the %c and %r format codes control how the +formatter interacts with the list of colors specified by the colors +attribute. You can essentially think of the color attribute as a way of +specifying that you want all the colors in the list to be the same color.

+

Note that the %c and %r format codes are relatively primitive in nature. +The color and colors attributes merely maintain a list of colors, whereas +the %c and %r format codes merely control how to advance through that list +and how individual text is colored.

+

It is an error if the colors attribute does not provide all the colors needed +by the terminals and escape codes in the production. %r does not change the +position in the list of colors at all, so the next %c will advance to the +following color.

+

As a complete example, here is a variant of LESSON-09-A which colors the +various boolean operators:

+
k
module LESSON-09-D + imports BOOL + + syntax Exp ::= "(" Exp ")" [bracket] + | Bool + > "!" Exp [color(yellow)] + > left: + Exp "&&" Exp [color(red)] + | Exp "^" Exp [color(blue)] + | Exp "||" Exp [color(green)] + + syntax Exp ::= id(Exp) [function] + rule id(E) => E +endmodule +
+

For a complete list of allowed colors, see +here.

+

Exercises

+
    +
  1. +

    Use the color attribute on LESSON-09-C to color the keywords true and +false one color, the keywords if and else another color, and the operators +(, ), {, and } a third color.

    +
  2. +
  3. +

    Use the format, color, and colors attributes to tell the unparser to +style the expression grammar from Lesson 1.8, Exercise 3 according to your own +personal preferences for syntax highlighting and code formatting. You can +view the result of the unparser on a function term without evaluating that +function by means of the command kparse <file> | kore-print -.

    +
  4. +
+

Next lesson

+

Once you have completed the above exercises, you can continue to +Lesson 1.10: Strings.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/1_basic/10_strings/index.html b/k-distribution/k-tutorial/1_basic/10_strings/index.html new file mode 100644 index 00000000000..a5c650441b0 --- /dev/null +++ b/k-distribution/k-tutorial/1_basic/10_strings/index.html @@ -0,0 +1,562 @@ + + + + + + + + + + + + + + +Lesson 1.10: Strings | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 1.10: Strings

+

The purpose of this lesson is to explain how to use the String sort in K to +represent sequences of characters, and explain where to find additional +information about builtin functions over strings.

+

The String Sort

+

In addition to the Int and Bool sorts covered in +Lesson 1.6, K provides, among others, the +String sort to represent sequences of characters. You can import this +functionality via the STRING-SYNTAX module, which contains the syntax of +string literals in K, and the STRING module, which contains all the functions +that operate over the String type.

+

Strings in K are double-quoted. The following list of escape sequences is +supported:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Escape SequenceMeaning
\"The literal character "
\\The literal character \
\nThe newline character (ASCII code 0x0a)
\rThe carriage return character (ASCII code 0x0d)
\tThe tab character (ASCII code 0x09)
\fThe form feed character (ASCII code 0x0c)
\x00\x followed by 2 hexadecimal digits indicates a code point between 0x00 and 0xFF
\u0000\u followed by 4 hexadecimal digits indicates a code point between 0x0000 and 0xFFFF
\U00000000\U followed by 8 hexadecimal digits indicates a code point between 0x000000 and 0x10FFFF
+

Please note that as of the current moment, K's unicode support is not fully +complete, so you may run into errors using code points greater than 0xff.

+

As an example, you can construct a string literal containing the following +block of text:

+
This is an example block of text.
+Here is a quotation: "Hello world."
+	This line is indented.
+ÁÉÍÓÚ
+
+

Like so:

+
"This is an example block of text.\nHere is a quotation: \"Hello world.\"\n\tThis line is indented.\n\xc1\xc9\xcd\xd3\xda\n"
+
+

Basic String Functions

+

The full list of functions provided for the String sort can be found in +domains.md, but here we +describe a few of the more basic ones.

+

String concatenation

+

The concatenation operator for strings is +String. For example, consider +the following K rule that constructs a string from component parts +(lesson-10.k):

+
k
module LESSON-10 + imports STRING + + syntax String ::= msg(String) [function] + rule msg(S) => "The string you provided: " +String S +String "\nHave a nice day!" +endmodule +
+

Note that this operator is O(N), so repeated concatenations are inefficient. +For information about efficient string concatenation, refer to +Lesson 2.14.

+

String length

+

The function to return the length of a string is lengthString. For example, +lengthString("foo") will return 3, and lengthString("") will return 0. +The return value is the length of the string in code points.

+

Substring computation

+

The function to compute the substring of a string is substrString. It +takes two string indices, starting from 0, and returns the substring within the +range [start..end). It is only defined if end >= start, start >= 0, and +end <= length of string. Here, for example, we return the first 5 characters +of a string:

+
substrString(S, 0, 5)
+
+

Here we return all but the first 3 characters:

+
substrString(S, 3, lengthString(S))
+
+

Exercises

+
    +
  1. Write a function that takes a paragraph of text (i.e., a sequence of +sentences, each ending in a period), and constructs a new (nonsense) sentence +composed of the first word of each sentence, followed by a period. Do not +worry about capitalization or periods within the sentence which do not end the +sentence (e.g. "Dr."). You can assume that all whitespace within the paragraph +are spaces. For more information about the functions over strings required to +implement such a function, refer to domains.md.
  2. +
+

Next lesson

+

Once you have completed the above exercises, you can continue to +Lesson 1.11: Casting Terms.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/1_basic/11_casts/index.html b/k-distribution/k-tutorial/1_basic/11_casts/index.html new file mode 100644 index 00000000000..2fb0e7ebbbe --- /dev/null +++ b/k-distribution/k-tutorial/1_basic/11_casts/index.html @@ -0,0 +1,625 @@ + + + + + + + + + + + + + + +Lesson 1.11: Casting Terms | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 1.11: Casting Terms

+

The purpose of this lesson is to explain how to use cast expressions in +order to disambiguate terms using sort information. We also explain how the +variable sort inference algorithm works in K, and how to change the default +behavior by casting variables to a particular sort.

+

Casting in K

+

Sometimes the grammar you write for your rules in K can be a little bit +ambiguous on purpose. While grammars for programming languages may be +unambiguous when considered in their entirety, K allows you to write rules +involving arbitrary fragments of that grammar, and those fragments can +sometimes be ambiguous by themselves, or similar enough to other fragments +of the grammar to trigger ambiguity. As a result, in addition to the tools +covered in Lesson 1.4, K provides one +additional powerful tool for disambiguation: cast expressions.

+

K provides three main types of casts: the semantic cast, the strict cast, and +the projection cast. We will cover each of them, and their similarities and +differences, in turn.

+

Semantic casts

+

The most basic, and most common, type of cast in K is called the +semantic cast. For every sort S declared in a module, K provides the +following (implicit) production for use in sentences:

+
  syntax S ::= S ":S"
+
+

Note that S simply represents the name of the sort. For example, if we +defined a sort Exp, the actual production for that sort would be:

+
  syntax Exp ::= Exp ":Exp"
+
+

At runtime, this expression will not actually exist; it is merely an annotation +to the compiler describing the sort of the term inside the cast. It is telling +the compiler that the term inside the cast must be of sort Exp. For example, +if we had the following grammar:

+
k
module LESSON-11-A + imports INT + + syntax Exp ::= Int | Exp "+" Exp + syntax Stmt ::= "if" "(" Exp ")" Stmt | "{" "}" +endmodule +
+

Then we would be able to write 1:Exp, or (1 + 2):Exp, but not {}:Exp.

+

You can also restrict the sort that a variable in a rule will match by casting +it. For example, consider the following additional module:

+
k
module LESSON-11-B + imports LESSON-11-A + imports BOOL + + syntax Term ::= Exp | Stmt + syntax Bool ::= isExpression(Term) [function] + + rule isExpression(_E:Exp) => true + rule isExpression(_) => false [owise] +endmodule +
+

Here we have defined a very simple function that decides whether a term is +an expression or a statement. It does this by casting the variable inside the +isExpression rule to sort Exp. As a result, that variable will only match terms +of sort Exp. Thus, isExpression(1) will return true, as will isExpression(1 + 2), but +isExpression({}) will return false.

+

Exercise

+

Verify this fact for yourself by running isExpression on the above examples. Then +write an isStatement function, and test that it works as expected.

+

Strict casts

+

On occasion, a semantic cast is not strict enough. It might be that you want +to, for disambiguation purposes, say exactly what sort a term is. For +example, consider the following definition:

+
k
module LESSON-11-C + imports INT + + syntax Exp ::= Int + | "add[" Exp "," Exp "]" [group(exp)] + syntax Exp2 ::= Exp + | "add[" Exp2 "," Exp2 "]" [group(exp2)] +endmodule +
+

This grammar is a little ambiguous and contrived, but it serves to demonstrate +how a semantic cast might be insufficient to disambiguate a term. If we were +to write the term add[ I1:Int , I2:Int ]:Exp2, the term would be ambiguous, +because the cast is not sufficiently strict to determine whether you mean +to derive the "add" production defined in group exp or the one in group exp2.

+

In this situation, there is a solution: the strict cast. For every sort +S in your grammar, K also defines the following production:

+
  syntax S ::= S "::S"
+
+

This may at first glance seem the same as the previous cast. And indeed, +from the perspective of the grammar and from the perspective of rewriting, +they are in fact identical. However, the second variant has a unique meaning +in the type system of K: namely, the term inside the cast cannot be a +subsort, i.e., a term of another sort S2 such that the production +syntax S ::= S2 exists.

+

As a result, if we were to write in the above grammar the term +add[ I1:Int , I2:Int ]::Exp2, then we would know that the second derivation above +should be chosen, whereas if we want the first derivation, we could write +add[ I1:Int , I2:Int ]::Exp.

+

Care must be taken when using a strict cast with brackets. For example, consider a +similar grammar but using an infix "+":

+
k
module LESSON-11-D + imports INT + + syntax Exp ::= Int + | Exp "+" Exp [group(exp)] + syntax Exp2 ::= Exp + | Exp2 "+" Exp2 [group(exp2)] + | "(" Exp2 ")" [bracket] +endmodule +
+

The term I1:Int + I2:Int is ambiguous and could refer to either the production +in group exp or the one in group exp2. To differentiate, you might try to write +(I1:Int + I2:Int)::Exp2 similarly to the previous example.

+

Unfortunately though, this is still ambiguous. Here, the strict cast ::Exp2 applies +directly to the brackets themselves rather than the underlying term within those brackets. +As a result, it enforces that (I1:Int + I2:Int) cannot be a strict subsort of Exp2, but +it has no effect on the sort of the subterm I1:Int + I2:Int.

+

For cases like this, K provides an alternative syntax for strict casts:

+
  syntax S ::= "{" S "}::S"
+
+

The ambiguity can then be resolved with {I1:Int + I2:Int}::Exp or {I1:Int + I2:Int}::Exp2.

+

Projection casts

+

Thus far we have focused entirely on casts which exist solely to inform the +compiler about the sort of terms. However, sometimes when dealing with grammars +containing subsorts, it can be desirable to reason with the subsort production +itself, which injects one sort into another. Remember from above that such +a production looks like syntax S ::= S2. This type of production, called a +subsort production, can be thought of as a type of inheritance involving +constructors. If we have the above production in our grammar, we say that S2 +is a subsort of S, or that any S2 is also an S. K implicitly maintains a +symbol at runtime which keeps track of where such subsortings occur; this +symbol is called an injection.

+

Sometimes, when one sort is a subsort of another, it can be the case that +a function returns one sort, but you actually want to cast the result of +calling that function to another sort which is a subsort of the first sort. +This is similar to what happens with inheritance in an object-oriented +language, where you might cast a superclass to a subclass if you know for +sure the object at runtime is in fact an instance of that class.

+

K provides something similar for subsorts: the projection cast.

+

For each pair of sorts S and S2, K provides the following production:

+
  syntax S ::= "{" S2 "}" ":>S"
+
+

What this means is that you take any term of sort S2 and cast it to sort +S. If the term of sort S2 consists of an injection containing a term of sort +S, then this will return that term. Otherwise, an error occurs and rewriting +fails, returning the projection function which failed to apply. The sort is +not actually checked at compilation time; rather, it is a runtime check +inserted into the code that runs when the rule applies.

+

For example, here is a module that makes use of projection casts:

+
k
module LESSON-11-E + imports INT + imports BOOL + + syntax Exp ::= Int | Bool | Exp "+" Exp | Exp "&&" Exp + + syntax Exp ::= eval(Exp) [function] + rule eval(I:Int) => I + rule eval(B:Bool) => B + rule eval(E1 + E2) => {eval(E1)}:>Int +Int {eval(E2)}:>Int + rule eval(E1 && E2) => {eval(E1)}:>Bool andBool {eval(E2)}:>Bool +endmodule +
+

Here we have defined constructors for a simple expression language over +Booleans and integers, as well as a function eval that evaluates these +expressions to a value. Because that value could be an integer or a Boolean, +we need the casts in the last two rules in order to meet the type signature of ++Int and andBool. Of course, the user can write ill-formed expressions like +1 && true or false + true, but these will cause errors at runtime, because +the projection cast will fail.

+

Exercises

+
    +
  1. +

    Extend the eval function in LESSON-11-E to include Strings and add a . +operator which concatenates them.

    +
  2. +
  3. +

    Modify your solution from Lesson 1.9, Exercise 2 by using an Exp sort to +express the integer and Boolean expressions that it supports, in the same style +as LESSON-11-E. Then write an eval function that evaluates all terms of +sort Exp to either a Bool or an Int.

    +
  4. +
+

Next lesson

+

Once you have completed the above exercises, you can continue to +Lesson 1.12: Syntactic Lists.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/1_basic/12_syntactic_lists/index.html b/k-distribution/k-tutorial/1_basic/12_syntactic_lists/index.html new file mode 100644 index 00000000000..9aead23bf2d --- /dev/null +++ b/k-distribution/k-tutorial/1_basic/12_syntactic_lists/index.html @@ -0,0 +1,585 @@ + + + + + + + + + + + + + + +Lesson 1.12: Syntactic Lists | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 1.12: Syntactic Lists

+

The purpose of this lesson is to explain how K provides support for syntactic +repetition through the use of the List{} and NeList{} constructs, +generally called syntactic lists.

+

The List{} construct

+

Sometimes, when defining a grammar in K, it is useful to define a syntactic +construct consisting of an arbitrary-length sequence of items. For example, +you might wish to define a function call construct, and need to express a way +of passing arguments to the function. You can in theory simply define these +productions using ordinary constructors, but it can be tricky to get the syntax +exactly right in K without a lot of tedious glue code.

+

For this reason, K provides a way of specifying that a non-terminal represents +a syntactic list (lesson-12-a.k):

+
k
module LESSON-12-A-SYNTAX + imports INT-SYNTAX + + syntax Ints ::= List{Int,","} +endmodule + +module LESSON-12-A + imports LESSON-12-A-SYNTAX +endmodule +
+

Note that instead of a sequence of terminals and non-terminals, the right hand +side of the Ints production contains the symbol List followed by two items +in curly braces. The first item is the non-terminal which is the element type +of the list, and the second item is a terminal representing the separator of +the list. As a special case, lists which are separated only by whitespace can +be specified with a separator of "".

+

This List{} construct is roughly equivalent to the following definition +(lesson-12-b.k):

+
k
module LESSON-12-B-SYNTAX + imports INT-SYNTAX + + syntax Ints ::= Int "," Ints | ".Ints" +endmodule + +module LESSON-12-B + imports LESSON-12-B-SYNTAX +endmodule +
+

As you can see, the List{} construct represents a cons-list with an element +at the head and another list at the tail. The empty list is represented by +a . followed by the sort of the list.

+

However, the List{} construct provides several key syntactic conveniences +over the above definition. First of all, when writing a list in a rule, +explicitly writing the terminator is not always required. For example, consider +the following additional module (lesson-12-c.k):

+
k
module LESSON-12-C + imports LESSON-12-A + imports INT + + syntax Int ::= sum(Ints) [function] + rule sum(I:Int) => I + rule sum(I1:Int, I2:Int, Is:Ints) => sum(I1 +Int I2, Is) +endmodule +
+

Here we see a function that sums together a non-empty list of integers. Note in +particular the first rule. We do not explicitly mention .Ints, but in fact, +the rule in question is equivalent to the following rule:

+
  rule sum(I:Int, .Ints) => I
+
+

The reason for this is that K will automatically insert a list terminator +anywhere a syntactic list is expected, but an element of that list appears +instead. This works even with lists of more than one element:

+
  rule sum(I1:Int, I2:Int) => I1 +Int I2
+
+

This rule is redundant, but here we explicitly match a list of exactly two +elements, because the .Ints is implicitly added after I2.

+

Parsing Syntactic Lists in Programs

+

An additional syntactic convenience takes place when you want to express a +syntactic list in the input to krun. In this case, K will automatically +transform the grammar in LESSON-12-B-SYNTAX into the following +(lesson-12-d.k):

+
k
module LESSON-12-D + imports INT-SYNTAX + + syntax Ints ::= #NonEmptyInts | #IntsTerminator + syntax #NonEmptyInts ::= Int "," #NonEmptyInts + | Int #IntsTerminator + syntax #IntsTerminator ::= "" +endmodule +
+

This allows you to express the usual comma-separated list of arguments where +an empty list is represented by the empty string, and you don't have to +explicitly terminate the list. Because of this, we can write the syntax +of function calls in C very easily (lesson-12-e.k):

+
k
module LESSON-12-E + syntax Id ::= r"[a-zA-Z_][a-zA-Z0-9_]*" [token] + syntax Exp ::= Id | Exp "(" Exps ")" + syntax Exps ::= List{Exp,","} +endmodule +
+

Exercise

+

Write a function concat which takes a list of String and concatenates them +all together. Do not worry if the function is O(n^2). +Test your implementation using the syntactic sugar for lists added by the parser.

+

Then write some function call expressions using identifiers in C and verify with +kast that the above grammar captures the intended syntax. Make sure to test +with function calls with zero, one, and two or more arguments.

+

The NeList{} construct

+

One limitation of the List{} construct is that it is always possible to +write a list of zero elements where a List{} is expected. While this is +desirable in a number of cases, it is sometimes not what the grammar expects.

+

For example, in C, it is not allowable for an enum definition to have zero +members. In other words, if we were to write the grammar for enumerations like +so (lesson-12-f.k):

+
k
module LESSON-12-F + syntax Id ::= r"[a-zA-Z_][a-zA-Z0-9_]*" [token] + syntax Exp ::= Id + + syntax EnumSpecifier ::= "enum" Id "{" Ids "}" + syntax Ids ::= List{Id,","} +endmodule +
+

Then we would be syntactically allowed to write enum X {}, which instead, +ought to be a syntax error.

+

For this reason, we introduce the additional NeList{} construct. The syntax +is identical to List{}, except with NeList instead of List before the +curly braces. When parsing rules, it behaves identically to the List{} +construct. However, when parsing inputs to krun, the above grammar, if we +replaced syntax Ids ::= List{Id,","} with syntax Ids ::= NeList{Id,","}, +would become equivalent to the following (lesson-12-g.k):

+
k
module LESSON-12-G + syntax Id ::= r"[a-zA-Z_][a-zA-Z0-9_]*" [token] + syntax Exp ::= Id + + syntax EnumSpecifier ::= "enum" Id "{" Ids "}" + syntax Ids ::= Id | Id "," Ids +endmodule +
+

In other words, only non-empty lists of Id would be allowed.

+

Exercises

+
    +
  1. +

    Modify the sum function in LESSON-12-C so that the Ints sort is an +NeList{}. Verify that calling sum() with no arguments is now a syntax +error.

    +
  2. +
  3. +

    Write a modified sum function with the List construct that can also sum +up an empty list of arguments. In such a case, the sum ought to be 0.

    +
  4. +
+

Next lesson

+

Once you have completed the above exercises, you can continue to +Lesson 1.13: Basics of K Rewriting.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/1_basic/13_rewrite_rules/index.html b/k-distribution/k-tutorial/1_basic/13_rewrite_rules/index.html new file mode 100644 index 00000000000..07a90f367ba --- /dev/null +++ b/k-distribution/k-tutorial/1_basic/13_rewrite_rules/index.html @@ -0,0 +1,704 @@ + + + + + + + + + + + + + + +Lesson 1.13: Basics of K Rewriting | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 1.13: Basics of K Rewriting

+

The purpose of this lesson is to explain how rewrite rules that are not the +definition of a function behave, and how, using these rules, you can construct +a semantics of programs in a programming language in K.

+

Recap: Function rules in K

+

Recall from Lesson 1.2 that we have, thus far, +introduced two types of productions in K: constructors and functions. +A function is identified by the function attribute placed on the +production. As you may recall, when we write a rule with a function on the +left-hand side of the => operator, we are defining the meaning of that +function for inputs which match the patterns on the left-hand side of the rule. +If the argument to the function match the patterns, then the function is +evaluated to the value constructed by substituting the bindings for the +variables into the right-hand side of the rule.

+

Top-level rules

+

However, function rules are not the only type of rule permissible in K, nor +even the most frequently used. K also has a concept of a +top-level rewrite rule. The simplest way to ensure that a rule is treated +as a top-level rule is for the left-hand side of the rule to mention one or +more cells. We will cover how cells work and are declared in more detail +in a later lesson, but for now, what you should know is that when we ran krun +in our very first example in Lesson 1.2 and got the following output:

+
<k>
+  Yellow ( ) ~> .
+</k>
+
+

<k> is a cell, known by convention as the K cell. This cell is available +by default in any definition without needing to be explicitly declared.

+

The K cell contains a single term of sort K. K is a predefined sort in K +with two constructors, that can be roughly represented by the following +grammar:

+
  syntax K ::= KItem "~>" K
+             | "."
+
+

As a syntactic convenience, K allows you to treat ~> like it is an +associative list (i.e., as if it were defined as syntax K ::= K "~>" K). +When a definition is compiled, it will automatically transform the rules you +write so that they treat the K sort as a cons-list. Another syntactic +convenience is that, for disambiguation purposes, you can write .K anywhere +you would otherwise write . and the meaning is identical.

+

Now, you may notice that the above grammar mentions the sort KItem. This is +another built-in sort in K. For every sort S declared in a definition (with +the exception of K and KItem), K will implicitly insert the following +production:

+
  syntax KItem ::= S
+
+

In other words, every sort is a subsort of the sort KItem, and thus a term +of any sort can be injected as an element of a term of sort K, also called +a K sequence.

+

By default, when you krun a program, the AST of the program is inserted as +the sole element of a K sequence into the <k> cell. This explains why we +saw the output we did in Lesson 1.2.

+

With these preliminaries in mind, we can now explain how top-level rewrite +rules work in K. Put simply, any rule where there is a cell (such as the K +cell) at the top on the left-hand side will be a top-level rewrite rule. Once +the initial program has been inserted into the K cell, the resulting term, +called the configuration, will be matched against all the top-level +rewrite rules in the definition. If only one rule matches, the substitution +generated by the matching will be applied to the right-hand side of the rule +and the resulting term is rewritten to be the new configuration. Rewriting +proceeds by iteratively applying rules, also called taking steps, until +no top-level rewrite rule can be applied. At this point the configuration +becomes the final configuration and is output by krun.

+

If more than one top-level rule applies, by default, K will pick just one +of those rules, apply it, and continue rewriting. However, it is +non-deterministic which rule applies. In theory, it could be any of them. +By passing the --search flag to krun, you are able to tell krun to +explore all possible non-deterministic choices, and generate a complete list of +all possible final configurations reachable by each nondeterminstic choice that +can be made. Note that the --search flag to krun only works if you pass +--enable-search to kompile first.

+

Unlike top-level rewrite rules, function rules are not associated with any +particular set of cells in the configuration (although they can contain cells +in their function arguments and return value). While top-level rewrite rules +apply to the entire term being rewritten, function rules apply anywhere a +function application for that function appears, and are immediately rewritten +to their return value in that position.

+

Another key distinction between top-level rules and function rules is that +function symbols, i.e., productions with the function attribute, are +mathematical functions rather than constructors. While a constructor is +logically distinct from any other constructor of the same sort, and can be +matched against unconditionally, a function does not necessaraily have the +same restriction unless it happens to be an injective function. Thus, two +function symbols with different arguments may still ultimately produce the +same value and thus compare equal to one another. Due to this, concrete +execution (i.e., all K definitions introduced thus far; see Lesson 1.21) +introduces the restriction that you cannot match on a function symbol on the +left-hand side of a rule, except as the top symbol on the left-hand side of +a function rule. This restriction will be later lifted when we introduce the +Haskell Backend which performs symbolic execution.

+

Exercise

+

Pass a program containing no functions to krun. You can use a term of sort +Exp from LESSON-11-E. Observe the output and try to understand why you get +the output you do. Then write two rules that rewrite that program to another. +Run krun --search on that program and observe both results. Then add a third +rule that rewrites one of those results again. Test that that rule applies as +well.

+

Using top-level rules to evaluate expressions

+

Thus far, we have focused primarily on defining functions over constructors +in K. However, now that we have a basic understanding of top-level rules, +it is possible to introduce a rewrite system to our definitions. A rewrite +system is a collection of top-level rewrite rules which performs an organized +transformation of a particular program into a result which expresses the +meaning of that program. For example, we might rewrite an expression in a +programming language into a value representing the result of evaluating that +expression.

+

Recall in Lesson 1.11, we wrote a simple grammar of Boolean and integer +expressions that looked roughly like this (lesson-13-a.k):

+
k
module LESSON-13-A + imports INT + + syntax Exp ::= Int + | Bool + | Exp "+" Exp + | Exp "&&" Exp +endmodule +
+

In that lesson, we defined a function eval which evaluated such expressions +to either an integer or Boolean.

+

However, it is more idiomatic to evaluate such expressions using top-level +rewrite rules. Here is how one might do so in K (lesson-13-b.k):

+
k
module LESSON-13-B-SYNTAX + imports UNSIGNED-INT-SYNTAX + imports BOOL-SYNTAX + + syntax Val ::= Int | Bool + syntax Exp ::= Val + > left: Exp "+" Exp + > left: Exp "&&" Exp +endmodule + +module LESSON-13-B + imports LESSON-13-B-SYNTAX + imports INT + imports BOOL + + rule <k> I1:Int + I2:Int ~> K:K </k> => <k> I1 +Int I2 ~> K </k> + rule <k> B1:Bool && B2:Bool ~> K:K </k> => <k> B1 andBool B2 ~> K </k> + + syntax KItem ::= freezer1(Val) | freezer2(Exp) + | freezer3(Val) | freezer4(Exp) + + rule <k> E1:Val + E2:Exp ~> K:K </k> => <k> E2 ~> freezer1(E1) ~> K </k> [priority(51)] + rule <k> E1:Exp + E2:Exp ~> K:K </k> => <k> E1 ~> freezer2(E2) ~> K </k> [priority(52)] + rule <k> E1:Val && E2:Exp ~> K:K </k> => <k> E2 ~> freezer3(E1) ~> K </k> [priority(51)] + rule <k> E1:Exp && E2:Exp ~> K:K </k> => <k> E1 ~> freezer4(E2) ~> K </k> [priority(52)] + + rule <k> E2:Val ~> freezer1(E1) ~> K:K </k> => <k> E1 + E2 ~> K </k> + rule <k> E1:Val ~> freezer2(E2) ~> K:K </k> => <k> E1 + E2 ~> K </k> + rule <k> E2:Val ~> freezer3(E1) ~> K:K </k> => <k> E1 && E2 ~> K </k> + rule <k> E1:Val ~> freezer4(E2) ~> K:K </k> => <k> E1 && E2 ~> K </k> +endmodule +
+

This is of course rather cumbersome currently, but we will soon introduce +syntactic convenience which makes writing definitions of this type considerably +easier. For now, notice that there are roughly 3 types of rules here: the first +matches a K cell in which the first element of the K sequence is an Exp whose +arguments are values, and rewrites the first element of the sequence to the +result of that expression. The second also matches a K cell with an Exp in +the first element of its K sequence, but it matches when one or both arguments +of the Exp are not values, and replaces the first element of the K sequence +with two new elements: one being an argument to evaluate, and the other being +a special constructor called a freezer. Finally, the third matches a K +sequence where a Val is first, and a freezer is second, and replaces them +with a partially evaluated expression.

+

This general pattern is what is known as heating an expression, +evaluating its arguments, cooling the arguments into the expression +again, and evaluating the expression itself. By repeatedly performing +this sequence of actions, we can evaluate an entire AST containing a complex +expression down into its resulting value.

+

Exercise

+

Write an addition expression with integers. Use krun --depth 1 to see the +result of rewriting after applying a single top-level rule. Gradually increase +the value of --depth to see successive states. Observe how this combination +of rules is eventually able to evaluate the entire expression.

+

Simplifying the evaluator: Local rewrites and cell ellipses

+

As you saw above, the definition we wrote is rather cumbersome. Over the +remainder of Lessons 1.13 and 1.14, we will greatly simplify it. The first step +in doing so is to teach a bit more about the rewrite operator, =>. Thus far, +all the rules we have written look like rule LHS => RHS. However, this is not +the only way the rewrite operator can be used. It is actually possible to place +a constructor or function at the very top of the rule, and place rewrite +operators inside that term. While a rewrite operator cannot appear nested +inside another rewrite operator, by doing this, we can express that some parts +of what we are matching are not changed by the rewrite operator. For +example, consider the following rule from above:

+
  rule <k> I1:Int + I2:Int ~> K:K </k> => <k> I1 +Int I2 ~> K </k>
+
+

We can equivalently write it like following:

+
  rule <k> (I1:Int + I2:Int => I1 +Int I2) ~> _:K </k>
+
+

When you put a rewrite inside a term like this, in essence, you are telling +the rule to only rewrite part of the left-hand side to the right-hand side. +In practice, this is implemented by lifting the rewrite operator to the top of +the rule by means of duplicating the surrounding context.

+

There is a way that the above rule can be simplified further, however. K +provides a special syntax for each cell containing a term of sort K, indicating +that we want to match only on some prefix of the K sequence. For example, the +above rule can be simplified further like so:

+
  rule <k> I1:Int + I2:Int => I1 +Int I2 ...</k>
+
+

Here we have placed the symbol ... immediately prior to the </k> which ends +the cell. What this tells the compiler is to take the contents of the cell, +treat it as the prefix of a K sequence, and insert an anonymous variable of +sort K at the end. Thus we can think of ... as a way of saying we +don't care about the part of the K sequence after the beginning, leaving +it unchanged.

+

Putting all this together, we can rewrite LESSON-13-B like so +(lesson-13-c.k):

+
k
module LESSON-13-C-SYNTAX + imports UNSIGNED-INT-SYNTAX + imports BOOL-SYNTAX + + syntax Val ::= Int | Bool + syntax Exp ::= Val + > left: Exp "+" Exp + > left: Exp "&&" Exp +endmodule + +module LESSON-13-C + imports LESSON-13-C-SYNTAX + imports INT + imports BOOL + + rule <k> I1:Int + I2:Int => I1 +Int I2 ...</k> + rule <k> B1:Bool && B2:Bool => B1 andBool B2 ...</k> + + syntax KItem ::= freezer1(Val) | freezer2(Exp) + | freezer3(Val) | freezer4(Exp) + + rule <k> E1:Val + E2:Exp => E2 ~> freezer1(E1) ...</k> [priority(51)] + rule <k> E1:Exp + E2:Exp => E1 ~> freezer2(E2) ...</k> [priority(52)] + rule <k> E1:Val && E2:Exp => E2 ~> freezer3(E1) ...</k> [priority(51)] + rule <k> E1:Exp && E2:Exp => E1 ~> freezer4(E2) ...</k> [priority(52)] + + rule <k> E2:Val ~> freezer1(E1) => E1 + E2 ...</k> + rule <k> E1:Val ~> freezer2(E2) => E1 + E2 ...</k> + rule <k> E2:Val ~> freezer3(E1) => E1 && E2 ...</k> + rule <k> E1:Val ~> freezer4(E2) => E1 && E2 ...</k> +endmodule +
+

This is still rather cumbersome, but it is already greatly simplified. In the +next lesson, we will see how additional features of K can be used to specify +heating and cooling rules much more compactly.

+

Exercises

+
    +
  1. Modify LESSON-13-C to add rules to evaluate integer subtraction.
  2. +
+

Next lesson

+

Once you have completed the above exercises, you can continue to +Lesson 1.14: Defining Evaluation Order.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/1_basic/14_evaluation_order/index.html b/k-distribution/k-tutorial/1_basic/14_evaluation_order/index.html new file mode 100644 index 00000000000..2c3e592d998 --- /dev/null +++ b/k-distribution/k-tutorial/1_basic/14_evaluation_order/index.html @@ -0,0 +1,715 @@ + + + + + + + + + + + + + + +Lesson 1.14: Defining Evaluation Order | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 1.14: Defining Evaluation Order

+

The purpose of this lesson is to explain how to use the heat and cool +attributes, context and context alias sentences, and the strict and +seqstrict attributes to more compactly express heating and cooling in K, +and to express more advanced evaluation strategies in K.

+

The heat and cool attributes

+

Thus far, we have been using rule priority and casts to express when to heat +an expression and when to cool it. For example, the rules for heating have +lower priority, so they do not apply if the term could be evaluated instead, +and the rules for heating are expressly written only to apply if the argument +of the expression is a value.

+

However, K has built-in support for deciding when to heat and when to cool. +This support comes in the form of the rule attributes heat and cool as +well as the specially named function isKResult.

+

Consider the following definition, which is equivalent to LESSON-13-C +(lesson-14-a.k):

+
k
module LESSON-14-A-SYNTAX + imports UNSIGNED-INT-SYNTAX + imports BOOL-SYNTAX + + syntax Exp ::= Int + | Bool + > left: Exp "+" Exp + > left: Exp "&&" Exp +endmodule + +module LESSON-14-A + imports LESSON-14-A-SYNTAX + imports INT + imports BOOL + + rule <k> I1:Int + I2:Int => I1 +Int I2 ...</k> + rule <k> B1:Bool && B2:Bool => B1 andBool B2 ...</k> + + syntax KItem ::= freezer1(Exp) | freezer2(Exp) + | freezer3(Exp) | freezer4(Exp) + + rule <k> E:Exp + HOLE:Exp => HOLE ~> freezer1(E) ...</k> + requires isKResult(E) [heat] + rule <k> HOLE:Exp + E:Exp => HOLE ~> freezer2(E) ...</k> [heat] + rule <k> E:Exp && HOLE:Exp => HOLE ~> freezer3(E) ...</k> + requires isKResult(E) [heat] + rule <k> HOLE:Exp && E:Exp => HOLE ~> freezer4(E) ...</k> [heat] + + rule <k> HOLE:Exp ~> freezer1(E) => E + HOLE ...</k> [cool] + rule <k> HOLE:Exp ~> freezer2(E) => HOLE + E ...</k> [cool] + rule <k> HOLE:Exp ~> freezer3(E) => E && HOLE ...</k> [cool] + rule <k> HOLE:Exp ~> freezer4(E) => HOLE && E ...</k> [cool] + + syntax Bool ::= isKResult(K) [function, symbol] + rule isKResult(_:Int) => true + rule isKResult(_:Bool) => true + rule isKResult(_) => false [owise] +endmodule +
+

We have introduced three major changes to this definition. First, we have +removed the Val sort. We replace it instead with a function isKResult. +The function in question must have the same signature and attributes as seen in +this example. It ought to return true whenever a term should not be heated +(because it is a value) and false when it should be heated (because it is not +a value). We thus also insert isKResult calls in the side condition of two +of the heating rules, where the Val sort was previously used.

+

Second, we have removed the rule priorities on the heating rules and the use of +the Val sort on the cooling rules, and replaced them with the heat and +cool attributes. These attributes instruct the compiler that these rules are +heating and cooling rules, and thus should implicitly apply only when certain +terms on the LHS either are or are not a KResult (i.e., isKResult returns +true versus false).

+

Third, we have renamed some of the variables in the heating and cooling rules +to the special variable HOLE. Syntactically, HOLE is just a special name +for a variable, but it is treated specially by the compiler. By naming a +variable HOLE, we have informed the compiler which term is being heated +or cooled. The compiler will automatically insert the side condition +requires isKResult(HOLE) to cooling rules and the side condition +requires notBool isKResult(HOLE) to heating rules.

+

Exercise

+

Modify LESSON-14-A to add rules to evaluate integer subtraction.

+

Simplifying further with Contexts

+

The above example is still rather cumbersome to write. We must explicitly write +both the heating and the cooling rule separately, even though they are +essentially inverses of one another. It would be nice to instead simply +indicate which terms should be heated and cooled, and what part of them to +operate on.

+

To do this, K introduces a new type of sentence, the context. Contexts +begin with the context keyword instead of the rule keyword, and usually +do not contain a rewrite operator.

+

Consider the following definition which is equivalent to LESSON-14-A +(lesson-14-b.k):

+
k
module LESSON-14-B-SYNTAX + imports UNSIGNED-INT-SYNTAX + imports BOOL-SYNTAX + + syntax Exp ::= Int + | Bool + > left: Exp "+" Exp + > left: Exp "&&" Exp +endmodule + +module LESSON-14-B + imports LESSON-14-B-SYNTAX + imports INT + imports BOOL + + rule <k> I1:Int + I2:Int => I1 +Int I2 ...</k> + rule <k> B1:Bool && B2:Bool => B1 andBool B2 ...</k> + + context <k> E:Exp + HOLE:Exp ...</k> + requires isKResult(E) + context <k> HOLE:Exp + _:Exp ...</k> + context <k> E:Exp && HOLE:Exp ...</k> + requires isKResult(E) + context <k> HOLE:Exp && _:Exp ...</k> + + syntax Bool ::= isKResult(K) [function, symbol] + rule isKResult(_:Int) => true + rule isKResult(_:Bool) => true + rule isKResult(_) => false [owise] +endmodule +
+

In this example, the heat and cool rules have been removed entirely, as +have been the productions defining the freezers. Don't worry, they still exist +under the hood; the compiler is just generating them automatically. For each +context sentence like above, the compiler generates a #freezer production, +a heat rule, and a cool rule. The generated form is equivalent to the +rules we wrote manually in LESSON-14-A. However, we are now starting to +considerably simplify the definition. Instead of 3 sentences, we just have one.

+

context alias sentences and the strict and seqstrict attributes

+

Notice that the contexts we included in LESSON-14-B still seem rather +similar in form. For each expression we want to evaluate, we are declaring +one context for each operand of that expression, and they are each rather +similar to one another. We would like to be able to simplify further by +simply annotating each expression production with information about how +it is to be evaluated instead. We can do this with the seqstrict attribute.

+

Consider the following definition, once again equivalent to those above +(lesson-14-c.k):

+
.k .alias
module LESSON-14-C-SYNTAX + imports UNSIGNED-INT-SYNTAX + imports BOOL-SYNTAX + + syntax Exp ::= Int + | Bool + > left: Exp "+" Exp [seqstrict(exp; 1, 2)] + > left: Exp "&&" Exp [seqstrict(exp; 1, 2)] +endmodule + +module LESSON-14-C + imports LESSON-14-C-SYNTAX + imports INT + imports BOOL + + rule <k> I1:Int + I2:Int => I1 +Int I2 ...</k> + rule <k> B1:Bool && B2:Bool => B1 andBool B2 ...</k> + + context alias [exp]: <k> HERE ...</k> + + syntax Bool ::= isKResult(K) [function, symbol] + rule isKResult(_:Int) => true + rule isKResult(_:Bool) => true + rule isKResult(_) => false [owise] +endmodule +
+

This definition has two important changes from the one above. The first is +that the individual context sentences have been removed and have been +replaced with a single context alias sentence. You may notice that this +sentence begins with an identifier in square brackets followed by a colon. This +syntax is a way of naming individual sentences in K for reference by the tool +or by other sentences. The context alias sentence also has a special variable +HERE.

+

The second is that the productions in LESSON-14-C-SYNTAX have been given a +seqstrict attribute. The value of this attribute has two parts. The first +is the name of a context alias sentence. The second is a comma-separated list +of integers. Each integer represents an index of a non-terminal in the +production, counting from 1. For each integer present, the compiler implicitly +generates a new context sentence according to the following rules:

+
    +
  1. The compiler starts by looking for the context alias sentence named. If +there is more than one, then one context sentence is created per +context alias sentence with that name.
  2. +
  3. For each context created, the variable HERE in the context alias is +substituted with an instance of the production the seqstrict attribute is +attached to. Each child of that production is a variable. The non-terminal +indicated by the integer offset of the seqstrict attribute is given the name +HOLE.
  4. +
  5. For each integer offset prior in the list to the one currently being +processed, the predicate isKResult(E) is conjuncted together and included +as a side condition, where E is the child of the production term with that +offset, starting from 1. For example, if the attribute lists 1, 2, then +the rule generated for the 2 will include isKResult(E1) where E1 is the +first child of the production.
  6. +
+

As you can see if you work through the process, the above code will ultimately +generate the same contexts present in LESSON-14-B.

+

Finally, note that there are a few minor syntactic conveniences provided by the +seqstrict attribute. First, in the special case of the context alias sentence +being <k> HERE ...</k>, you can omit both the context alias sentence +and the name from the seqstrict attribute.

+

Second, if the numbered list of offsets contains every non-terminal in the +production, it can be omitted from the attribute value.

+

Thus, we can finally produce the idiomatic K definition for this example +(lesson-14-d.k):

+
k
module LESSON-14-D-SYNTAX + imports UNSIGNED-INT-SYNTAX + imports BOOL-SYNTAX + + syntax Exp ::= Int + | Bool + > left: Exp "+" Exp [seqstrict] + > left: Exp "&&" Exp [seqstrict] +endmodule + +module LESSON-14-D + imports LESSON-14-D-SYNTAX + imports INT + imports BOOL + + rule <k> I1:Int + I2:Int => I1 +Int I2 ...</k> + rule <k> B1:Bool && B2:Bool => B1 andBool B2 ...</k> + + syntax Bool ::= isKResult(K) [function, symbol] + rule isKResult(_:Int) => true + rule isKResult(_:Bool) => true + rule isKResult(_) => false [owise] +endmodule +
+

Exercise

+

Modify LESSON-14-D to add a production and rule to evaluate integer +subtraction.

+

Nondeterministic evaluation order with the strict attribute

+

Thus far, we have focused entirely on deterministic evaluation order. However, +not all languages are deterministic in the order they evaluate expressions. +For example, in C, the expression a() + b() + c() is guaranteed to parse +to (a() + b()) + c(), but it is not guaranteed that a will be called before +b before c. In fact, this evaluation order is non-deterministic.

+

We can express non-deterministic evaluation orders with the strict attribute. +Its behavior is identical to the seqstrict attribute, except that step 3 in +the above list (with the side condition automatically added) does not take +place. In other words, if we wrote syntax Exp ::= Exp "+" Exp [strict] +instead of syntax Exp ::= Exp "+" Exp [seqstrict], it would generate the +following two contexts instead of the ones found in LESSON-14-B:

+
  context <k> _:Exp + HOLE:Exp ...</k>
+  context <k> HOLE:Exp + _:Exp ...</k>
+
+

As you can see, these contexts will generate heating rules that can both +apply to the same term. As a result, the choice of which heating rule +applies first is non-deterministic, and as we saw in Lesson 1.13, we can +get all possible behaviors by passing --search to krun.

+

Exercises

+
    +
  1. +

    Add integer division to LESSON-14-D. Make division and addition strict +instead of seqstrict, and write a rule evaluating integer division with a +side condition that the denominator is non-zero. Run krun --search on the +program 1 / 0 + 2 / 1 and observe all possible outputs of the program. How +many are there total, and why?

    +
  2. +
  3. +

    Rework your solution from Lesson 1.9, Exercise 2 to evaluate expressions from left to right using the seqstrict attribute.

    +
  4. +
+

Next lesson

+

Once you have completed the above exercises, you can continue to +Lesson 1.15: Configuration Declarations and Cell Nesting.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/1_basic/15_configurations/index.html b/k-distribution/k-tutorial/1_basic/15_configurations/index.html new file mode 100644 index 00000000000..54e39b31841 --- /dev/null +++ b/k-distribution/k-tutorial/1_basic/15_configurations/index.html @@ -0,0 +1,671 @@ + + + + + + + + + + + + + + +Lesson 1.15: Configuration Declarations and Cell Nesting | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 1.15: Configuration Declarations and Cell Nesting

+

The purpose of this lesson is to explain how to store additional information +about the state of your interpreter by declaring cells using the +configuration sentence, as well as how to add additional inputs to your +definition.

+

Cells and Configuration Declarations

+

We have already covered the absolute basics of cells in K by looking at the +<k> cell. As explained in Lesson 1.13, the +<k> cell is available without being explicitly declared. It turns out this is +because, if the user does not explicitly specify a configuration sentence +anywhere in the main module of their definition, the configuration sentence +from the DEFAULT-CONFIGURATION module of +kast.md is imported +automatically. Here is what that sentence looks like:

+
  configuration <k> $PGM:K </k>
+
+

This configuration declaration declares a single cell, the <k> cell. It also +declares that at the start of rewriting, the contents of that cell should be +initialized with the value of the $PGM configuration variable. +Configuration variables function as inputs to krun. These terms are supplied +to krun in the form of ASTs parsed using a particular module. By default, the +$PGM configuration variable uses the main syntax module of the definition.

+

The cast on the configuration variable also specifies the sort that is used as +the entry point to the parser, in this case the K sort. It is often +useful to cast to other sorts there as well for better control over the accepted +language. The sort used for the $PGM variable is referred to as the start +symbol. During parsing, the default start symbol K subsumes all user-defined +sorts except for syntactic lists. These are excluded because they will always +produce an ambiguity error when parsing a single element.

+

Note that we did not explicitly specify the $PGM configuration variable when +we invoked krun on a file. This is because krun handles the $PGM variable +specially, and allows you to pass the term for that variable via a file passed +as a positional argument to krun. We did, however, specify the PGM name +explicitly when we called krun with the -cPGM command line argument in +Lesson 1.2. This is the other, explicit, way of +specifying an input to krun.

+

This explains the most basic use of configuration declarations in K. We can, +however, declare multiple cells and multiple configuration variables. We can +also specify the initial values of cells statically, rather than dynamically +via krun.

+

For example, consider the following definition (lesson-15-a.k):

+
k
module LESSON-15-A-SYNTAX + imports INT-SYNTAX + + syntax Ints ::= List{Int,","} +endmodule + +module LESSON-15-A + imports LESSON-15-A-SYNTAX + imports INT + + configuration <k> $PGM:Ints </k> + <sum> 0 </sum> + + rule <k> I:Int, Is:Ints => Is ...</k> + <sum> SUM:Int => SUM +Int I </sum> +endmodule +
+

This simple definition takes a list of integers as input and sums them +together. Here we have declared two cells: <k> and <sum>. Unlike <k>, +<sum> does not get initialized via a configuration variable, but instead +is initialized statically with the value 0.

+

Note the rule in the second module: we have explicitly specified multiple +cells in a single rule. K will expect each of these cells to match in order for +the rule to apply.

+

Here is a second example (lesson-15-b.k):

+
k
module LESSON-15-B-SYNTAX + imports INT-SYNTAX +endmodule + +module LESSON-15-B + imports LESSON-15-B-SYNTAX + imports INT + imports BOOL + + configuration <k> . </k> + <first> $FIRST:Int </first> + <second> $SECOND:Int </second> + + rule <k> . => FIRST >Int SECOND </k> + <first> FIRST </first> + <second> SECOND </second> +endmodule +
+

This definition takes two integers as command-line arguments and populates the +<k> cell with a Boolean indicating whether the first integer is greater than +the second. Notice that we have specified no $PGM configuration variable +here. As a result, we cannot invoke krun via the syntax krun $file. +Instead, we must explicitly pass values for each configuration variable via the +-cFIRST and -cSECOND command line flags. For example, if we invoke +krun -cFIRST=0 -cSECOND=1, we will get the value false in the K cell.

+

You can also specify both a $PGM configuration variable and other +configuration variables in a single configuration declaration, in which case +you would be able to initialize $PGM with either a positional argument or the +-cPGM command line flag, but the other configuration variables would need +to be explicitly initialized with -c.

+

Exercise

+

Modify your solution to Lesson 1.14, Exercise 2 to add a new cell with a +configuration variable of sort Bool. This variable should determine whether +the / operator is evaluated using /Int or divInt. Test that by specifying +different values for this variable, you can change the behavior of rounding on +division of negative numbers.

+

Cell Nesting

+

It is possible to nest cells inside one another. A cell that contains other +cells must contain only other cells, but in doing this, you are able to +create a hierarchical structure to the configuration. Consider the following +definition (lesson-15-c.k), which is equivalent to the one in LESSON-15-B:

+
k
module LESSON-15-C-SYNTAX + imports INT-SYNTAX +endmodule + +module LESSON-15-C + imports LESSON-15-C-SYNTAX + imports INT + imports BOOL + + configuration <T> + <k> . </k> + <state> + <first> $FIRST:Int </first> + <second> $SECOND:Int </second> + </state> + </T> + + rule <k> . => FIRST >Int SECOND </k> + <first> FIRST </first> + <second> SECOND </second> +endmodule +
+

Note that we have added some new cells to the configuration declaration: +the <T> cell wraps the entire configuration, and the <state> cell is +introduced around the <first> and <second> cells.

+

However, we have not changed the rule in this definition. This is because of +a concept in K called configuration abstraction. K allows you to specify +any number of cells in a rule (except zero) in any order you want, and K will +compile the rules into a form that matches the structure of the configuration +specified by the configuration declaration.

+

Here then, is how this rule would look after the configuration abstraction +has been resolved:

+
  rule <T>
+         <k> . => FIRST >Int SECOND </k>
+         <state>
+           <first> FIRST </first>
+           <second> SECOND </second>
+         </state>
+       </T>
+
+

In other words, K will complete cells to the top of the configuration by +inserting parent cells where appropriate based on the declared structure of +the configuration. This is useful because as a definition evolves, the +configuration may change, but you don't want to have to modify every single +rule each time. Thus, K follows the principle that you should only mention the +cells in a rule that are actually needed in order to accomplish its specific +goal. By following this best practice, you can significantly increase the +modularity of the definition and make it easier to maintain and modify.

+

Note that unlike top-level rewrite rules, cells that appear inside function +rules are not necessarily completed to the top of the configuration. They still +participate in cell ccompletion in the sense that you can mention cell +structure loosely inside a function rule and it will be completed into the +correct cell structure specified by the configuration declaration. However, +they do not complete all the way to the top, instead completing only up to +the top-most cell mentioned in the rule.

+

For example, if I write the following function rule in the above definition:

+
  rule doStuff(<first> FIRST </first>) => FIRST
+
+

The function will only match on the first cell, rather than the entire +configuration. However, if we had mentioned a parent cell in the rule, it still +would have completed the children of that parent cell as needed to ensure that +the resulting term is well formed.

+

Exercise

+

Modify your definition from the previous exercise in this lesson to wrap the +two cells you have declared in a top cell <T>. You should not have to change +any other rules in the definition.

+

Cell Variables

+

Sometimes it is desirable to explicitly match a variable against certain +fragments of the configuration. Because K's configuration is hierarchical, +we can grab subsets of the configuration as if they were just another term. +However, configuration abstraction applies here as well. +In particular, for each cell you specify in a configuration declaration, a +unique sort is assigned for that cell with a single constructor (the cell +itself). The sort name is taken by removing all special characters, +capitalizing the first letter and each letter after a hyphen, and adding the +word Cell at the end. For example, in the above example, the cell sorts are +TCell, KCell, StateCell, FirstCell, and SecondCell. If we had declared +a cell as <first-number>, then the cell sort name would be FirstNumberCell.

+

You can explicitly reference a variable of one of these sorts anywhere you +might instead write that cell. For example, consider the following rule:

+
  rule <k> true => S </k>
+       (S:StateCell => <state>... .Bag ...</state>)
+
+

Here we have introduced two new concepts. The first is the variable of sort +StateCell, which matches the entire <state> part of the configuration. The +second is that we have introduced the concept of ... once again. When a cell +contains other cells, it is also possible to specify ... on either the left, +right or both sides of the cell term. Each of these three syntaxes are +equivalent in this case. When they appear on the left-hand side of a rule, they +indicate that we don't care what value any cells not explicitly named might +have. For example, we might write <state>... <first> 0 </first> ...</state> on +the left-hand side of a rule in order to indicate that we want to match the +rule when the <first> cell contains a zero, regardless of what the <second> +cell contains. If we had not included this ellipsis, it would have been a +syntax error, because K would have expected you to provide a value for each of +the child cells.

+

However, if, as in the example above, the ... appeared on the right-hand side +of a rule, this instead indicates that the cells not explicitly mentioned under +the cell should be initialized with their default value from the configuration +declaration. In other words, that rule will set the value of <first> and +<second> to zero.

+

You may note the presence of the phrase .Bag here. You can think of this as +the empty set of cells. It is used as the child of a cell when you want to +indicate that no cells should be explicitly named. We will cover other uses +of this term in later lessons.

+

Exercises

+
    +
  1. Modify the definition from the previous exercise in this lesson so that the +Boolean cell you created is initialized to false. Then add a production +syntax Stmt ::= Bool ";" Exp, and a rule that uses this Stmt to set the +value of the Boolean flag. Then add another production +syntax Stmt ::= "reset" ";" Exp which sets the value of the Boolean flag back +to its default value via a ... on the right-hand side. You will need to add +an additional cell around the Boolean cell to make this work.
  2. +
+

Next lesson

+

Once you have completed the above exercises, you can continue to +Lesson 1.16: Maps, Semantic Lists, and Sets.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/1_basic/16_collections/index.html b/k-distribution/k-tutorial/1_basic/16_collections/index.html new file mode 100644 index 00000000000..086034812af --- /dev/null +++ b/k-distribution/k-tutorial/1_basic/16_collections/index.html @@ -0,0 +1,750 @@ + + + + + + + + + + + + + + +Lesson 1.16: Maps, Semantic Lists, and Sets | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 1.16: Maps, Semantic Lists, and Sets

+

The purpose of this lesson is to explain how to use the data structure sorts +provided by K: maps, lists, and sets.

+

Maps

+

The most frequently used type of data structure in K is the map. The sort +provided by K for this purpose is the Map sort, and it is provided in +domains.md in the MAP +module. This type is not (currently) polymorphic. All Map terms are maps that +map terms of sort KItem to other terms of sort KItem. A KItem can contain +any sort except a K sequence. If you need to store such a term in a +map, you can always use a wrapper such as syntax KItem ::= kseq(K).

+

A Map pattern consists of zero or more map elements (as represented by the +symbol syntax Map ::= KItem "|->" KItem), mixed in any order, separated by +whitespace, with zero or one variables of sort Map. The empty map is +represented by .Map. If all of the bindings for the variables in the keys +of the map can be deterministically chosen, these patterns can be matched in +O(1) time. If they cannot, then each map element that cannot be +deterministically constructed contributes a single dimension of polynomial +time to the cost of the matching. In other words, a single such element is +linear, two are quadratic, three are cubic, etc.

+

Patterns like the above are the only type of Map pattern that can appear +on the left-hand-side of a rule. In other words, you are not allowed to write +a Map pattern on the left-hand-side with more than one variable of sort Map +in it. You are, however, allowed to write such patterns on the right-hand-side +of a rule. You can also write a function pattern in the key of a map element +so long as all the variables in the function pattern can be deterministically +chosen.

+

Note the meaning of matching on a Map pattern: a map pattern with no +variables of sort Map will match if the map being matched has exactly as +many bindings as |-> symbols in the pattern. It will then match if each +binding in the map pattern matches exactly one distinct binding in the map +being matched. A map pattern with one Map variable will also match any map +that contains such a map as a subset. The variable of sort Map will be bound +to whatever bindings are left over (.Map if there are no bindings left over).

+

Here is an example of a simple definition that implements a very basic +variable declaration semantics using a Map to store the value of variables +(lesson-16-a.k):

+
k
module LESSON-16-A-SYNTAX + imports INT-SYNTAX + imports ID-SYNTAX + + syntax Exp ::= Id | Int + syntax Decl ::= "int" Id "=" Exp ";" [strict(2)] + syntax Pgm ::= List{Decl,""} +endmodule + +module LESSON-16-A + imports LESSON-16-A-SYNTAX + imports BOOL + + configuration <T> + <k> $PGM:Pgm </k> + <state> .Map </state> + </T> + + // declaration sequence + rule <k> D:Decl P:Pgm => D ~> P ...</k> + rule <k> .Pgm => . ...</k> + + // variable declaration + rule <k> int X:Id = I:Int ; => . ...</k> + <state> STATE => STATE [ X <- I ] </state> + + // variable lookup + rule <k> X:Id => I ...</k> + <state>... X |-> I ...</state> + + syntax Bool ::= isKResult(K) [symbol, function] + rule isKResult(_:Int) => true + rule isKResult(_) => false [owise] +endmodule +
+

There are several new features in this definition. First, note we import +the module ID-SYNTAX. This module is defined in domains.md and provides a +basic syntax for identifiers. We are using the Id sort provided by this +module in this definition to implement the names of program variables. This +syntax is only imported when parsing programs, not when parsing rules. Later in +this lesson we will see how to reference specific concrete identifiers in a +rule.

+

Second, we introduce a single new function over the Map sort. This function, +which is represented by the symbol +syntax Map ::= Map "[" KItem "<-" KItem "]", represents the map update +operation. Other functions over the Map sort can be found in domains.md.

+

Finally, we have used the ... syntax on a cell containing a Map. In this +case, the meaning of <state>... Pattern ...</state>, +<state>... Pattern </state>, and <state> Pattern ...</state> are the same: +it is equivalent to writing <state> (Pattern) _:Map </state>.

+

Consider the following program (a.decl):

+
int x = 0;
+int y = 1;
+int a = x;
+
+

If we run this program with krun, we will get the following result:

+
<T>
+  <k>
+    .
+  </k>
+  <state>
+    a |-> 0
+    x |-> 0
+    y |-> 1
+  </state>
+</T>
+
+

Note that krun has automatically sorted the collection for you. This doesn't +happen at runtime, so you still get the performance of a hash map, but it will +help make the output more readable.

+

Exercise

+

Create a sort Stmt that is a subsort of Decl. Create a production of sort +Stmt for variable assignment in addition to the variable declaration +production. Feel free to use the syntax syntax Stmt ::= Id "=" Exp ";". Write +a rule that implements variable assignment using a map update function. Then +write the same rule using a map pattern. Test your implementations with some +programs to ensure they behave as expected.

+

Semantic Lists

+

In a previous lesson, we explained how to represent lists in the AST of a +program. However, this is not the only context where lists can be used. We also +frequently use lists in the configuration of an interpreter in order to +represent certain types of program state. For this purpose, it is generally +useful to have an associative-list sort, rather than the cons-list sorts +provided in Lesson 1.12.

+

The type provided by K for this purpose is the List sort, and it is also +provided in domains.md, in the LIST module. This type is also not +(currently) polymorphic. Like Map, all List terms are lists of terms of the +KItem sort.

+

A List pattern in K consists of zero or more list elements (as represented by +the ListItem symbol), followed by zero or one variables of sort List, +followed by zero or more list elements. An empty list is represented by +.List. These patterns can be matched in O(log(N)) time. This is the only +type of List pattern that can appear on the left-hand-side of a rule. In +other words, you are not allowed to write a List pattern on the +left-hand-side with more than one variable of sort List in it. You are, +however, allowed to write such patterns on the right-hand-side of a rule.

+

Note the meaning of matching on a List pattern: a list pattern with no +variables of sort List will match if the list being matched has exactly as +many elements as ListItem symbols in the pattern. It will then match if each +element in sequence matches the pattern contained in the ListItem symbol. A +list pattern with one variable of sort List operates the same way, except +that it can match any list with at least as many elements as ListItem +symbols, so long as the prefix and suffix of the list match the patterns inside +the ListItem symbols. The variable of sort List will be bound to whatever +elements are left over (.List if there are no elements left over).

+

The ... syntax is allowed on cells containing lists as well. In this case, +the meaning of <cell>... Pattern </cell> is the same as +<cell> _:List (Pattern) </cell>, the meaning of <cell> Pattern ...</cell> +is the same as <cell> (Pattern) _:List</cell>. Because list patterns with +multiple variables of sort List are not allowed, it is an error to write +<cell>... Pattern ...</cell>.

+

Here is an example of a simple definition that implements a very basic +function-call semantics using a List as a function stack (lesson-16-b.k):

+
k
module LESSON-16-B-SYNTAX + imports INT-SYNTAX + imports ID-SYNTAX + + syntax Exp ::= Id "(" ")" | Int + syntax Stmt ::= "return" Exp ";" [strict] + syntax Decl ::= "fun" Id "(" ")" "{" Stmt "}" + syntax Pgm ::= List{Decl,""} + syntax Id ::= "main" [token] +endmodule + +module LESSON-16-B + imports LESSON-16-B-SYNTAX + imports BOOL + imports LIST + + configuration <T> + <k> $PGM:Pgm ~> main () </k> + <functions> .Map </functions> + <fstack> .List </fstack> + </T> + + // declaration sequence + rule <k> D:Decl P:Pgm => D ~> P ...</k> + rule <k> .Pgm => . ...</k> + + // function definitions + rule <k> fun X:Id () { S } => . ...</k> + <functions>... .Map => X |-> S ...</functions> + + // function call + syntax KItem ::= stackFrame(K) + rule <k> X:Id () ~> K => S </k> + <functions>... X |-> S ...</functions> + <fstack> .List => ListItem(stackFrame(K)) ...</fstack> + + // return statement + rule <k> return I:Int ; ~> _ => I ~> K </k> + <fstack> ListItem(stackFrame(K)) => .List ...</fstack> + + syntax Bool ::= isKResult(K) [function, symbol] + rule isKResult(_:Int) => true + rule isKResult(_) => false [owise] +endmodule +
+

Notice that we have declared the production syntax Id ::= "main" [token]. +Since we use the ID-SYNTAX module, this declaration is necessary in order to +be able to refer to the main identifier directly in the configuration +declaration. Our <k> cell now contains a K sequence initially: first we +process all the declarations in the program, then we call the main function.

+

Consider the following program (foo.func):

+
fun foo() { return 5; }
+fun main() { return foo(); }
+
+

When we krun this program, we should get the following output:

+
<T>
+  <k>
+    5 ~> .
+  </k>
+  <functions>
+    foo |-> return 5 ;
+    main |-> return foo ( ) ;
+  </functions>
+  <fstack>
+    .List
+  </fstack>
+</T>
+
+

Note that we have successfully put on the <k> cell the value returned by the +main function.

+

Exercise

+

Add a term of sort Id to the stackFrame operator to keep track of the +name of the function in that stack frame. Then write a function +syntax String ::= printStackTrace(List) that takes the contents of the +<fstack> cell and pretty prints the current stack trace. You can concatenate +strings with +String in the STRING module in domains.md, and you can +convert an Id to a String with the Id2String function in the ID module. +Test this function by creating a new expression that returns the current stack +trace as a string. Make sure to update isKResult and the Exp sort as +appropriate to allow strings as values.

+

Sets

+

The final primary data structure sort in K is a set, i.e., an idempotent +unordered collection where elements are deduplicated. The sort provided by K +for this purpose is the Set sort and it is provided in domains.md in the +SET module. Like maps and lists, this type is not (currently) polymorphic. +Like Map and List, all Set terms are sets of terms of the KItem sort.

+

A Set pattern has the exact same restrictions as a Map pattern, except that +its elements are treated like keys, and there are no values. It has the same +performance characteristics as well. However, syntactically it is more similar +to the List sort: An empty Set is represented by .Set, but a set element +is represented by the SetItem symbol.

+

Matching behaves similarly to the Map sort: a set pattern with no variables +of sort Set will match if the set has exactly as many bindings as SetItem +symbols, and if each element pattern matches one distinct element in the set. +A set with a variable of sort Set also matches any superset of such a set. +As with map, the elements left over will be bound to the Set variable (or +.Set if no elements are left over).

+

Like Map, the ... syntax on a set is syntactic sugar for an anonymous +variable of sort Set.

+

Here is an example of a simple modification to LESSON-16-A which uses a Set +to ensure that variables are never declared more than once. In practice, you +would likely just use the in_keys symbol over maps to test for this, but +it's still useful as an example of sets in practice:

+
k
module LESSON-16-C-SYNTAX + imports LESSON-16-A-SYNTAX +endmodule + +module LESSON-16-C + imports LESSON-16-C-SYNTAX + imports BOOL + imports SET + + configuration <T> + <k> $PGM:Pgm </k> + <state> .Map </state> + <declared> .Set </declared> + </T> + + // declaration sequence + rule <k> D:Decl P:Pgm => D ~> P ...</k> + rule <k> .Pgm => . ...</k> + + // variable declaration + rule <k> int X:Id = I:Int ; => . ...</k> + <state> STATE => STATE [ X <- I ] </state> + <declared> D => D SetItem(X) </declared> + requires notBool X in D + + // variable lookup + rule <k> X:Id => I ...</k> + <state>... X |-> I ...</state> + <declared>... SetItem(X) ...</declared> + + syntax Bool ::= isKResult(K) [symbol, function] + rule isKResult(_:Int) => true + rule isKResult(_) => false [owise] +endmodule +
+

Now if we krun a program containing duplicate declarations, it will get +stuck on the declaration.

+

Exercises

+
    +
  1. Modify your solution to Lesson 1.14, Exercise 2 and introduce the sorts +Decls, Decl, and Stmt which include variable and function declaration +(without function parameters), and return and assignment statements, as well +as call expressions. Use List and Map to implement these operators, making +sure to consider the interactions between components, such as saving and +restoring the environment of variables at each call site. Don't worry about +local function definitions or global variables for now. Make sure to test the +resulting interpreter.
  2. +
+

Next lesson

+

Once you have completed the above exercises, you can continue to +Lesson 1.17: Cell Multiplicity and Cell Collections.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/1_basic/17_cell_multiplicity/index.html b/k-distribution/k-tutorial/1_basic/17_cell_multiplicity/index.html new file mode 100644 index 00000000000..5d0aabdabba --- /dev/null +++ b/k-distribution/k-tutorial/1_basic/17_cell_multiplicity/index.html @@ -0,0 +1,585 @@ + + + + + + + + + + + + + + +Lesson 1.17: Cell Multiplicity and Cell Collections | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 1.17: Cell Multiplicity and Cell Collections

+

The purpose of this lesson is to explain how you can create optional cells +and cells that repeat multiple times in a configuration using a feature called +cell multiplicity.

+

Cell Multiplicity

+

K allows you to specify attributes for cell productions as part of the syntax +of configuration declarations. Unlike regular productions, which use the [] +syntax for attributes, configuration cells use an XML-like attribute syntax:

+
configuration <k color="red"> $PGM:K </k>
+
+

This configuration declaration gives the <k> cell the color red during +unparsing using the color attribute as discussed in +Lesson 1.9.

+

However, in addition to the usual attributes for productions, there are some +other attributes that can be applied to cells with special meaning. One such +attribute is the multiplicity attribute. By default, each cell that is +declared occurs exactly once in every configuration term. However, using the +multiplicity attribute, this default behavior can be changed. There are two +values that this attribute can have: ? and *.

+

Optional cells

+

The first cell multiplicity we will discuss is ?. Similar to a regular +expression language, this attribute tells the compiler that this cell can +appear 0 or 1 times in the configuration. In other words, it is an +optional cell. By default, K does not create optional cells in the initial +configuration, unless that optional cell has a configuration variable inside +it. However, it is possible to override the default behavior and create that +cell initially by adding the additional cell attribute initial="".

+

K uses the .Bag symbol to represent the absence of any cells in a particular +rule. Consider the following module:

+
k
module LESSON-17-A + imports INT + + configuration <k> $PGM:K </k> + <optional multiplicity="?"> 0 </optional> + + syntax KItem ::= "init" | "destroy" + + rule <k> init => . ...</k> + (.Bag => <optional> 0 </optional>) + rule <k> destroy => . ...</k> + (<optional> _ </optional> => .Bag) + +endmodule +
+

In this definition, when the init symbol is executed, the <optional> cell +is added to the configuration, and when the destroy symbol is executed, it +is removed. Any rule that matches on that cell will only match if that cell is +present in the configuration.

+

Exercise

+

Create a simple definition with a Stmts sort that is a List{Stmt,""} and +a Stmt sort with the constructors +syntax Stmt ::= "enable" | "increment" | "decrement" | "disable". The +configuration should have an optional cell that contains an integer that +is created with the enable command, destroyed with the disable command, +and its value is incremented or decremented by the increment and decrement +command.

+

Cell collections

+

The second type of cell multiplicity we will discuss is *. Simlar to a +regular expression language, this attribute tells the compiler that this cell +can appear 0 or more times in the configuration. In other words, it is a +cell collection. Cells with multiplicity * must be the only child of +their parent cell. As a convention, the inner cell is usually named with the +singular form of what it contains, and the outer cell with the plural form, for +example, "thread" and "threads".

+

All cell collections are required to have the type attribute set to either +Set or Map. A Set cell collection is represented as a set and behaves +internally the same as the Set sort, although it actually declares a new +sort. A Map cell collection is represented as a Map in which the first +subcell of the cell collection is the key and the remaining cells are the +value.

+

For example, consider the following module:

+
k
module LESSON-17-B + imports INT + imports BOOL + imports ID-SYNTAX + + syntax Stmt ::= Id "=" Exp ";" [strict(2)] + | "return" Exp ";" [strict] + syntax Stmts ::= List{Stmt,""} + syntax Exp ::= Id + | Int + | Exp "+" Exp [seqstrict] + | "spawn" "{" Stmts "}" + | "join" Exp ";" [strict] + + configuration <threads> + <thread multiplicity="*" type="Map"> + <id> 0 </id> + <k> $PGM:K </k> + </thread> + </threads> + <state> .Map </state> + <next-id> 1 </next-id> + + rule <k> X:Id => I:Int ...</k> + <state>... X |-> I ...</state> + rule <k> X:Id = I:Int ; => . ...</k> + <state> STATE => STATE [ X <- I ] </state> + rule <k> S:Stmt Ss:Stmts => S ~> Ss ...</k> + rule <k> I1:Int + I2:Int => I1 +Int I2 ...</k> + + rule <thread>... + <k> spawn { Ss } => NEXTID ...</k> + ...</thread> + <next-id> NEXTID => NEXTID +Int 1 </next-id> + (.Bag => + <thread> + <id> NEXTID </id> + <k> Ss </k> + </thread>) + + rule <thread>... + <k> join ID:Int ; => I ...</k> + ...</thread> + (<thread> + <id> ID </id> + <k> return I:Int ; ...</k> + </thread> => .Bag) + + syntax Bool ::= isKResult(K) [function, symbol] + rule isKResult(_:Int) => true + rule isKResult(_) => false [owise] +endmodule +
+

This module implements a very basic fork/join semantics. The spawn expression +spawns a new thread to execute a sequence of statements and returns a thread +id, and the join statement waits until a thread executes return and then +returns the return value of the thread.

+

Note something quite novel here: the <k> cell is inside a cell of +multiplicity *. Since the <k> cell is just a regular cell (mostly), this +is perfectly allowable. Rules that don't mention a specific thread are +automatically completed to match any thread.

+

When you execute programs in this language, the cells in the cell collection +get sorted and printed like any other collection, but they still display like +cells. Rules in this language also benefit from all the structural power of +cells, allowing you to omit cells you don't care about or complete the +configuration automatically. This allows you to have the power of cells while +still being a collection under the hood.

+

Exercises

+
    +
  1. Modify the solution from Lesson 1.16, Exercise 1 so that the cell you use to +keep track of functions in a Map is now a cell collection. Run some programs +and compare how they get unparsed before and after this change.
  2. +
+

Next lesson

+

Once you have completed the above exercises, you can continue to +Lesson 1.18: Term Equality and the Ternary Operator.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/1_basic/18_equality_and_conditionals/index.html b/k-distribution/k-tutorial/1_basic/18_equality_and_conditionals/index.html new file mode 100644 index 00000000000..87bde96a6ca --- /dev/null +++ b/k-distribution/k-tutorial/1_basic/18_equality_and_conditionals/index.html @@ -0,0 +1,491 @@ + + + + + + + + + + + + + + +Lesson 1.18: Term Equality and the Ternary Operator | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 1.18: Term Equality and the Ternary Operator

+

The purpose of this lesson is to introduce how to compare equality of terms in +K, and how to put conditional expressions directly into the right-hand side of +rules.

+

Term Equality

+

One major way you can compare whether two terms are equal in K is to simply +match both terms with a variable with the same name. This will only succeed +in matching if the two terms are equal structurally. However, sometimes this +is impractical, and it is useful to have access to a way to actually compare +whether two terms in K are equal. The operator for this is found in +domains.md in the K-EQUAL +module. The operator is ==K and takes two terms of sort K and returns a +Bool. It returns true if they are equal. This includes equality over builtin +types such as Map and Set where equality is not purely structural in +nature. However, it does not include any notion of semantic equality over +user-defined syntax. The inverse symbol for inequality is =/=K.

+

Ternary Operator

+

One way to introduce conditional logic in K is to have two separate rules, +each with a side condition (or one rule with a side condition and another with +the owise attribute). However, sometimes it is useful to explicitly write +a conditional expression directly in the right-hand side of a rule. For this +purpose, K defines one more operator in the K-EQUAL module, which corresponds +to the usual ternary operator found in many languages. Here is an example of its +usage (lesson-18.k):

+
k
module LESSON-18 + imports INT + imports BOOL + imports K-EQUAL + + syntax Exp ::= Int | Bool | "if" "(" Exp ")" Exp "else" Exp [strict(1)] + + syntax Bool ::= isKResult(K) [function, symbol] + rule isKResult(_:Int) => true + rule isKResult(_:Bool) => true + + rule if (B:Bool) E1:Exp else E2:Exp => #if B #then E1 #else E2 #fi +endmodule +
+

Note the symbol on the right-hand side of the final rule. This symbol is +polymorphic: B must be of sort Bool, but E1 and E2 could have been +any sort so long as both were of the same sort, and the sort of the entire +expression becomes equal to that sort. K supports polymorphic built-in +operators, but does not yet allow users to write their own polymorphic +productions.

+

The behavior of this function is to evaluate the Boolean expression to a +Boolean, then pick one of the two children and return it based on whether the +Boolean is true or false. Please note that it is not a good idea to use this +symbol in cases where one or both of the children is potentially undefined +(for example, an integer expression that divides by zero). While the default +implementation is smart enough to only evaluate the branch that happens to be +picked, this will not be true when we begin to do program verification. If +you need short circuiting behavior, it is better to use a side condition.

+

Exercises

+
    +
  1. +

    Write a function in K that takes two terms of sort K and returns an +Int: the Int should be 0 if the terms are equal and 1 if the terms are +unequal.

    +
  2. +
  3. +

    Modify your solution to Lesson 1.16, Exercise 1 and introduce an if +Stmt to the syntax of the language, then implement it using the #if symbol. +Make sure to write tests for the resulting interpreter.

    +
  4. +
+

Next lesson

+

Once you have completed the above exercises, you can continue to +Lesson 1.19: Debugging with GDB.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/1_basic/19_debugging/index.html b/k-distribution/k-tutorial/1_basic/19_debugging/index.html new file mode 100644 index 00000000000..881f0ffc618 --- /dev/null +++ b/k-distribution/k-tutorial/1_basic/19_debugging/index.html @@ -0,0 +1,920 @@ + + + + + + + + + + + + + + +Lesson 1.19: Debugging with GDB or LLDB | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 1.19: Debugging with GDB or LLDB

+

The purpose of this lesson is to teach how to debug your K interpreter using +the K-language support provided in GDB or +LLDB.

+

Caveats

+

This lesson has been written with GDB support on Linux in mind. Unfortunately, +on macOS, GDB has limited support. To address this, we have introduced early +experimental support for debugging with LLDB on macOS. In some cases, the +features supported by LLDB are slightly different to those supported by GDB; the +tutorial text will make this clear where necessary. If you use a macOS with an +LLVM version older than 15, you may need to upgrade it to use the LLDB +correctly. If you encounter an issue on either operating system, please open an +issue against the K repository.

+

Getting started

+

On Linux, you will need GDB in order to complete this lesson. If you do not +already have GDB installed, then do so. Steps to install GDB are outlined in +this GDB Tutorial.

+

On macOS, LLDB should already have been installed with K's build dependencies +(whether you have built K from source, or installed it using kup or Homebrew).

+

The first thing neccessary in order to debug a K interpreter is to build the +interpreter with full debugging support enabled. This can be done relatively +simply. First, run kompile with the command line flag --enable-llvm-debug. +The resulting compiled K definition will be ready to support debugging.

+

Once you have a compiled K definition and a program you wish to debug, you can +start the debugger by passing the --debugger flag to krun. This will +automatically load the program you are executing into GDB and drop you into a +GDB shell ready to start executing the program.

+

As an example, consider the following K definition (lesson-19-a.k):

+
k
module LESSON-19-A + imports INT + + rule I => I +Int 1 + requires I <Int 100 +endmodule +
+

If we compile this definition with kompile lesson-19-a.k --enable-llvm-debug, +and run the program 0 in the debugger with krun -cPGM=0 --debugger, we will +see the following output (roughly, and depending on which platform you are +using):

+

GDB / Linux

+
GNU gdb (Ubuntu 9.2-0ubuntu1~20.04) 9.2
+Copyright (C) 2020 Free Software Foundation, Inc.
+License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>
+This is free software: you are free to change and redistribute it.
+There is NO WARRANTY, to the extent permitted by law.
+Type "show copying" and "show warranty" for details.
+This GDB was configured as "x86_64-linux-gnu".
+Type "show configuration" for configuration details.
+For bug reporting instructions, please see:
+<http://www.gnu.org/software/gdb/bugs/>.
+Find the GDB manual and other documentation resources online at:
+    <http://www.gnu.org/software/gdb/documentation/>.
+
+For help, type "help".
+Type "apropos word" to search for commands related to "word"...
+Reading symbols from ./lesson-19-a-kompiled/interpreter...
+warning: File "/home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-a-kompiled/interpreter" auto-loading has been declined by your `auto-load safe-path' set to "$debugdir:$datadir/auto-load".
+To enable execution of this file add
+        add-auto-load-safe-path /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-a-kompiled/interpreter
+line to your configuration file "/home/dwightguth/.gdbinit".
+To completely disable this security protection add
+        set auto-load safe-path /
+line to your configuration file "/home/dwightguth/.gdbinit".
+For more information about this security protection see the
+"Auto-loading safe path" section in the GDB manual.  E.g., run from the shell:
+        info "(gdb)Auto-loading safe path"
+(gdb)
+
+

To make full advantage of the GDB features of K, you should follow the first +command listed in this output message and add the corresponding +add-auto-load-safe-path command to your ~/.gdbinit file as prompted. +Please note that the path will be different on your machine than the one +listed above. Adding directories to the "load safe path" effectively tells GDB +to trust those directories. All content under a given directory will be recursively +trusted, so if you want to avoid having to add paths to the "load safe path" every +time you kompile a different K definition, then you can just trust a minimal +directory containing all your kompiled files; however, do not choose a top-level directory containing arbitrary files as this amounts to trusting arbitrary files and is a security risk. More info on the load safe path +can be found here.

+

LLDB / macOS

+
(lldb) target create "./lesson-19-a-kompiled/interpreter"
+warning: 'interpreter' contains a debug script. To run this script in this debug session:
+
+    command script import "/Users/brucecollie/code/scratch/lesson-19-a-kompiled/interpreter.dSYM/Contents/Resources/Python/interpreter.py"
+
+To run all discovered debug scripts in this session:
+
+    settings set target.load-script-from-symbol-file true
+
+Current executable set to '/Users/brucecollie/code/scratch/lesson-19-a-kompiled/interpreter' (x86_64).
+(lldb) settings set -- target.run-args  ".krun-2023-03-20-11-22-46-TcYt9ffhb2/tmp.in.RupiLwHNfn" "-1" ".krun-2023-03-20-11-22-46-TcYt9ffhb2/result.kore"
+(lldb) 
+
+

LLDB applies slightly different security policies to GDB. To load K's debugging +scripts for this session only, you can run the command script import line at +the LLDB prompt. The loaded scripts will not persist across debugging sessions +if you do this. It is also possible to configure LLDB to automatically load the +K scripts when an interpreter is started in LLDB; doing so requires a slightly +less broad permission than GDB.

+

On macOS, the .dSYM directory that contains debugging symbols for an +executable can also contain Python scripts in Contents/Resources/Python. If +there is a Python script with a name matching the name of the current executable +(here, interpreter and interpreter.py), it will be automatically loaded if +the target.load-script-from-symbol-file setting is set). You can therefore add +the settings set command to your ~/.lldbinit without enabling full arbitrary +code execution, but you should be aware of the paths from which code can be +executed if you do so.

+

Basic commands

+
+

LLDB Note: the k start and k step commands are currently not +implemented in the K LLDB scripts. To work around this limitation temporarily, +you can run process launch --stop-at-entry instead of k start. To emulate +k step, first run rbreak k_step once, then continue instead of each k step. We hope to address these limitations soon.

+
+

The most basic commands you can execute in the K GDB session are to run your +program or to step through it. The first can be accomplished using GDB's +built-in run command. This will automatically start the program and begin +executing it. It will continue until the program aborts or finishes, or the +debugger is interrupted with Ctrl-C.

+

Sometimes you want finer-grained control over how you proceed through the +program you are debugging. To step through the rule applications in your +program, you can use the k start and k step GDB commands.

+

k start is similar to the built-in start command in that it starts the +program and then immediately breaks before doing any work. However, unlike +the start command which will break immediately after the main method of +a program is executed, the K start program will initialize the rewriter, +evaluate the initial configuration, and break immediately prior to applying +any rewrite steps.

+

In the example above, here is what we see when we run the k start command:

+
Temporary breakpoint 1 at 0x239210
+Starting program: /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-a-kompiled/interpreter .krun-2021-08-13-14-10-50-sMwBkbRicw/tmp.in.01aQt85TaA -1 .krun-2021-08-13-14-10-50-sMwBkbRicw/result.kore
+[Thread debugging using libthread_db enabled]
+Using host libthread_db library "/lib/x86_64-linux-gnu/libthread_db.so.1".
+
+Temporary breakpoint 1, 0x0000000000239210 in main ()
+0x0000000000231890 in step (subject=<k>
+  0 ~> .
+</k>)
+(gdb)
+
+

As you can see, we are stopped at the step function in the interpreter. +This function is responsible for taking top-level rewrite steps. The subject +parameter to this function is the current K configuration.

+

We can step through K rewrite steps one at a time by running the k step +command. By default, this takes a single rewrite step (including any function +rule applications that are part of that step).

+

Here is what we see when we run that command:

+
Continuing.
+
+Temporary breakpoint -22, 0x0000000000231890 in step (subject=<k>
+  1 ~> .
+</k>)
+(gdb)
+
+

As we can see, we have taken a single rewrite step. We can also pass a number +to the k step command which indicates the number of rewrite steps to take.

+

Here is what we see if we run k step 10:

+
Continuing.
+
+Temporary breakpoint -23, 0x0000000000231890 in step (subject=<k>
+  11 ~> .
+</k>)
+(gdb)
+
+

As we can see, ten rewrite steps were taken.

+

Breakpoints

+

The next important step in debugging an application in GDB is to be able to +set breakpoints. Generally speaking, there are three types of breakpoints we +are interested in a K semantics: Setting a breakpoint when a particular +function is called, setting a breakpoint when a particular rule is applied, +and setting a breakpoint when a side condition of a rule is evaluated.

+

The easiest way to do the first two things is to set a breakpoint on the +line of code containing the function or rule.

+

For example, consider the following K definition (lesson-19-b.k):

+
k
module LESSON-19-B + imports BOOL + + syntax Bool ::= isBlue(Fruit) [function] + syntax Fruit ::= Blueberry() | Banana() + rule isBlue(Blueberry()) => true + rule isBlue(Banana()) => false + + rule F:Fruit => isBlue(F) +endmodule +
+

Once this program has been compiled for debugging, we can run the program +Blueberry(). We can then set a breakpoint that stops when the isBlue +function is called with the following command in GDB:

+
break lesson-19-b.k:4
+
+

Similarly, in LLDB, run:

+
breakpoint set --file lesson-19-b.k --line 4
+
+

Here is what we see if we set this breakpoint and then run the interpreter:

+
(gdb) break lesson-19-b.k:4
+Breakpoint 1 at 0x231040: file /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-b.k, line 4.
+(gdb) run
+Starting program: /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-b-kompiled/interpreter .krun-2021-08-13-14-20-27-vXOQmV6lwS/tmp.in.fga98yqXlc -1 .krun-2021-08-13-14-20-27-vXOQmV6lwS/result.kore
+[Thread debugging using libthread_db enabled]
+Using host libthread_db library "/lib/x86_64-linux-gnu/libthread_db.so.1".
+
+Breakpoint 1, LblisBlue'LParUndsRParUnds'LESSON-19-B'Unds'Bool'Unds'Fruit (_1=Blueberry ( )) at /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-b.k:4
+4         syntax Bool ::= isBlue(Fruit) [function]
+(gdb)
+
+
(lldb) breakpoint set --file lesson-19-b.k --line 4
+Breakpoint 1: where = interpreter`LblisBlue'LParUndsRParUnds'LESSON-19-B'Unds'Bool'Unds'Fruit + 20 at lesson-19-b.k:4:19, address = 0x0000000100003ff4
+(lldb) run
+Process 50546 launched: '/Users/brucecollie/code/scratch/lesson-19-b-kompiled/interpreter' (x86_64)
+Process 50546 stopped
+* thread #1, queue = 'com.apple.main-thread', stop reason = breakpoint 1.1
+    frame #0: 0x0000000100003ff4 interpreter`LblisBlue'LParUndsRParUnds'LESSON-19-B'Unds'Bool'Unds'Fruit(_1=Blueberry ( )) at lesson-19-b.k:4:19
+   1   	module LESSON-19-B
+   2   	  imports BOOL
+   3   	
+-> 4   	  syntax Bool ::= isBlue(Fruit) [function]
+   5   	  syntax Fruit ::= Blueberry() | Banana()
+   6   	  rule isBlue(Blueberry()) => true
+   7   	  rule isBlue(Banana()) => false
+(lldb)
+
+

As we can see, we have stopped at the point where we are evaluating that +function. The value _1 that is a parameter to that function shows the +value passed to the function by the caller.

+

We can also break when the isBlue(Blueberry()) => true rule applies by simply +changing the line number to the line number of that rule:

+
(gdb) break lesson-19-b.k:6
+Breakpoint 1 at 0x2af710: file /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-b.k, line 6.
+(gdb) run
+Starting program: /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-b-kompiled/interpreter .krun-2021-08-13-14-32-36-7kD0ic7XwD/tmp.in.8JNH5Qtmow -1 .krun-2021-08-13-14-32-36-7kD0ic7XwD/result.kore
+[Thread debugging using libthread_db enabled]
+Using host libthread_db library "/lib/x86_64-linux-gnu/libthread_db.so.1".
+
+Breakpoint 1, apply_rule_138 () at /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-b.k:6
+6         rule isBlue(Blueberry()) => true
+(gdb)
+
+
(lldb) breakpoint set --file lesson-19-b.k --line 6
+Breakpoint 1: where = interpreter`apply_rule_140 at lesson-19-b.k:6:8, address = 0x0000000100004620
+(lldb) run
+Process 50681 launched: '/Users/brucecollie/code/scratch/lesson-19-b-kompiled/interpreter' (x86_64)
+Process 50681 stopped
+* thread #1, queue = 'com.apple.main-thread', stop reason = breakpoint 1.1
+    frame #0: 0x0000000100004620 interpreter`apply_rule_140 at lesson-19-b.k:6:8
+   3   	
+   4   	  syntax Bool ::= isBlue(Fruit) [function]
+   5   	  syntax Fruit ::= Blueberry() | Banana()
+-> 6   	  rule isBlue(Blueberry()) => true
+   7   	  rule isBlue(Banana()) => false
+   8   	
+   9   	  rule F:Fruit => isBlue(F)
+(lldb) 
+
+

We can also do the same with a top-level rule:

+
(gdb) break lesson-19-b.k:9
+Breakpoint 1 at 0x2aefa0: lesson-19-b.k:9. (2 locations)
+(gdb) run
+Starting program: /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-b-kompiled/interpreter .krun-2021-08-13-14-33-13-9fC8Sz4aO3/tmp.in.jih1vtxSiQ -1 .krun-2021-08-13-14-33-13-9fC8Sz4aO3/result.kore
+[Thread debugging using libthread_db enabled]
+Using host libthread_db library "/lib/x86_64-linux-gnu/libthread_db.so.1".
+
+Breakpoint 1, apply_rule_107 (Var'Unds'DotVar0=<generatedCounter>
+  0
+</generatedCounter>, Var'Unds'DotVar1=., VarF=Blueberry ( )) at /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-b.k:9
+9         rule F:Fruit => isBlue(F)
+(gdb)
+
+
(lldb) breakpoint set --file lesson-19-b.k --line 9
+Breakpoint 1: 2 locations.
+(lldb) run
+Process 50798 launched: '/Users/brucecollie/code/scratch/lesson-19-b-kompiled/interpreter' (x86_64)
+Process 50798 stopped
+* thread #1, queue = 'com.apple.main-thread', stop reason = breakpoint 1.1
+    frame #0: 0x0000000100003f2e interpreter`apply_rule_109(Var'Unds'DotVar0=<generatedCounter>
+  0
+</generatedCounter>, Var'Unds'DotVar1=., VarF=Blueberry ( )) at lesson-19-b.k:9:8
+   6   	  rule isBlue(Blueberry()) => true
+   7   	  rule isBlue(Banana()) => false
+   8   	
+-> 9   	  rule F:Fruit => isBlue(F)
+   10  	endmodule
+(lldb)  
+
+

Unlike the function rule above, we see several parameters to this function. +These are the substitution that was matched for the function. Variables only +appear in this substitution if they are actually used on the right-hand side +of the rule.

+

Advanced breakpoints

+

Sometimes it is inconvenient to set the breakpoint based on a line number.

+

It is also possible to set a breakpoint based on the rule label of a particular +rule. Consider the following definition (lesson-19-c.k):

+
k
module LESSON-19-C + imports INT + imports BOOL + + syntax Bool ::= isEven(Int) [function] + rule [isEven]: isEven(I) => true requires I %Int 2 ==Int 0 + rule [isOdd]: isEven(I) => false requires I %Int 2 =/=Int 0 + +endmodule +
+

We will run the program isEven(4). We can set a breakpoint for when a rule +applies by means of the MODULE-NAME.label.rhs syntax:

+
(gdb) break LESSON-19-C.isEven.rhs
+Breakpoint 1 at 0x2afda0: file /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-c.k, line 6.
+(gdb) run
+Starting program: /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-c-kompiled/interpreter .krun-2021-08-13-14-40-29-LNNT8YEZ61/tmp.in.ZG93vWCGGC -1 .krun-2021-08-13-14-40-29-LNNT8YEZ61/result.kore
+[Thread debugging using libthread_db enabled]
+Using host libthread_db library "/lib/x86_64-linux-gnu/libthread_db.so.1".
+
+Breakpoint 1, LESSON-19-C.isEven.rhs () at /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-c.k:6
+6         rule [isEven]: isEven(I) => true requires I %Int 2 ==Int 0
+(gdb)
+
+
(lldb) breakpoint set --name LESSON-19-C.isEven.rhs
+Breakpoint 1: where = interpreter`LESSON-19-C.isEven.rhs at lesson-19-c.k:6:18, address = 0x00000001000038e0
+(lldb) run
+Process 51205 launched: '/Users/brucecollie/code/scratch/lesson-19-c-kompiled/interpreter' (x86_64)
+Process 51205 stopped
+* thread #1, queue = 'com.apple.main-thread', stop reason = breakpoint 1.1
+    frame #0: 0x00000001000038e0 interpreter`LESSON-19-C.isEven.rhs at lesson-19-c.k:6:18
+   3   	  imports BOOL
+   4   	
+   5   	  syntax Bool ::= isEven(Int) [function]
+-> 6   	  rule [isEven]: isEven(I) => true requires I %Int 2 ==Int 0
+   7   	  rule [isOdd]: isEven(I) => false requires I %Int 2 =/=Int 0
+   8   	
+   9   	endmodule
+(lldb) 
+
+

We can also set a breakpoint for when a rule's side condition is evaluated +by means of the MODULE-NAME.label.sc syntax:

+
(gdb) break LESSON-19-C.isEven.sc
+Breakpoint 1 at 0x2afd70: file /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-c.k, line 6.
+(gdb) run
+Starting program: /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-c-kompiled/interpreter .krun-2021-08-13-14-41-48-1BoGfJRbYc/tmp.in.kg4F8cwfCe -1 .krun-2021-08-13-14-41-48-1BoGfJRbYc/result.kore
+[Thread debugging using libthread_db enabled]
+Using host libthread_db library "/lib/x86_64-linux-gnu/libthread_db.so.1".
+
+Breakpoint 1, LESSON-19-C.isEven.sc (VarI=4) at /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-c.k:6
+6         rule [isEven]: isEven(I) => true requires I %Int 2 ==Int 0
+(gdb) finish
+Run till exit from #0  LESSON-19-C.isEven.sc (VarI=4) at /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-c.k:6
+0x00000000002b2662 in LblisEven'LParUndsRParUnds'LESSON-19-C'Unds'Bool'Unds'Int (_1=4) at /home/dwightguth/kframework-5.0.0/k-distribution/k-tutorial/1_basic/19_debugging/lesson-19-c.k:5
+5         syntax Bool ::= isEven(Int) [function]
+Value returned is $1 = true
+(gdb)
+
+
(lldb) breakpoint set --name LESSON-19-C.isEven.sc
+Breakpoint 1: where = interpreter`LESSON-19-C.isEven.sc + 1 at lesson-19-c.k:6:18, address = 0x00000001000038c1
+(lldb) run
+Process 52530 launched: '/Users/brucecollie/code/scratch/lesson-19-c-kompiled/interpreter' (x86_64)
+Process 52530 stopped
+* thread #1, queue = 'com.apple.main-thread', stop reason = breakpoint 1.1
+    frame #0: 0x00000001000038c1 interpreter`LESSON-19-C.isEven.sc(VarI=0x0000000101800088) at lesson-19-c.k:6:18
+   3   	  imports BOOL
+   4   	
+   5   	  syntax Bool ::= isEven(Int) [function]
+-> 6   	  rule [isEven]: isEven(I) => true requires I %Int 2 ==Int 0
+   7   	  rule [isOdd]: isEven(I) => false requires I %Int 2 =/=Int 0
+   8   	
+   9   	endmodule
+(lldb) finish
+Process 52649 stopped
+* thread #1, queue = 'com.apple.main-thread', stop reason = step out
+Return value: (bool) $0 = true
+
+    frame #0: 0x00000001000069e5 interpreter`LblisEven'LParUndsRParUnds'LESSON-19-C'Unds'Bool'Unds'Int(_1=0x0000000101800088) at lesson-19-c.k:5:19
+   2   	  imports INT
+   3   	  imports BOOL
+   4   	
+-> 5   	  syntax Bool ::= isEven(Int) [function]
+   6   	  rule [isEven]: isEven(I) => true requires I %Int 2 ==Int 0
+   7   	  rule [isOdd]: isEven(I) => false requires I %Int 2 =/=Int 0
+   8
+(lldb)
+
+

Here we have used the built-in command finish to tell us whether the side +condition returned true or not. Note that once again, we see the substitution +that was matched from the left-hand side. Like before, a variable will only +appear here if it is used in the side condition.

+

Debugging rule matching

+

Sometimes it is useful to try to determine why a particular rule did or did +not apply. K provides some basic debugging commands which make it easier +to determine this.

+

Consider the following K definition (lesson-19-d.k):

+
k
module LESSON-19-D + + syntax Foo ::= foo(Bar) + syntax Bar ::= bar(Baz) | bar2(Baz) + syntax Baz ::= baz() | baz2() + + rule [baz]: foo(bar(baz())) => .K + +endmodule +
+

Suppose we try to run the program foo(bar(baz2())). It is obvious from this +example why the rule in this definition will not apply. However, in practice, +such cases are not always obvious. You might look at a rule and not immediately +spot why it didn't apply on a particular term. For this reason, it can be +useful to get the debugger to provide a log about how it tried to match that +term. You can do this with the k match command. If you are stopped after +having run k start or k step, you can obtain this log for any rule after +any step by running the command k match MODULE.label subject for a particular +top-level rule label.

+

For example, with the baz rule above, we get the following output:

+
(gdb) k match LESSON-19-D.baz subject
+Subject:
+baz2 ( )
+does not match pattern:
+baz ( )
+
+
(lldb) k match LESSON-19-D.baz subject
+Subject:
+baz2 ( )
+does not match pattern:
+baz ( )
+
+

As we can see, it provided the exact subterm which did not match against the +rule, as well as the particular subpattern it ought to have matched against.

+

This command does not actually take any rewrite steps. In the event that +matching actually succeeds, you will still need to run the k step command +to advance to the next step.

+

Final notes

+

In addition to the functionality provided above, you have the full power of +GDB or LLDB at your disposal when debugging. Some features are not particularly +well-adapted to K code and may require more advanced knowledge of the +term representation or implementation to use effectively, but anything that +can be done in GDB or LLDB can in theory be done using this debugging functionality. +We suggest you refer to the +GDB Documentation or +LLDB Tutorial if you +want to try to do something and are unsure as to how.

+

Exercises

+
    +
  1. Compile your solution to Lesson 1.18, Exercise 2 with debugging support +enabled and step through several programs you have previously used to test. +Then set a breakpoint on the isKResult function and observe the state of the +interpreter when stopped at that breakpoint. Set a breakpoint on the rule for +addition and run a program that causes it to be stopped at that breakpoint. +Finally, step through the program until the addition symbol is at the top +of the K cell, and then use the k match command to report the reason why +the subtraction rule does not apply. You may need to modify the definition +to insert some rule labels.
  2. +
+

Next lesson

+

Once you have completed the above exercises, you can continue to +Lesson 1.20: K Backends and the Haskell Backend.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/1_basic/20_backends/index.html b/k-distribution/k-tutorial/1_basic/20_backends/index.html new file mode 100644 index 00000000000..87c6ceb6672 --- /dev/null +++ b/k-distribution/k-tutorial/1_basic/20_backends/index.html @@ -0,0 +1,511 @@ + + + + + + + + + + + + + + +Lesson 1.20: K Backends and the Haskell Backend | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 1.20: K Backends and the Haskell Backend

+

The purpose of this lesson is to teach about the multiple backends of K, +in particular the Haskell Backend which is the complement of the backend we +have been using so far.

+

K Backends

+

Thus far, we have not discussed the distinction between the K frontend and +the K backends at all. We have simply assumed that if you run kompile on a +K definition, there will be a compiler backend that will allow you to execute +the K definition you have compiled.

+

K actually has multiple different backends. The one we have been using so far +implicitly, the default backend, is called the LLVM Backend. It is +designed to support efficient, optimized concrete execution and search. It +does this by compiling your K definition to LLVM bitcode and then using LLVM +to generate machine code for it that is compiled and linked and executed. +However, K is a formal methods toolkit at the end of the day, and the primary +goal many people have when defining a programming language in K is to +ultimately be able to perform more advanced verification on programs in their +programming language.

+

It is for this purpose that K also provides the Haskell Backend, so called +because it is implemented in Haskell. While we will cover the features of the +Haskell Backend in more detail in the next two lessons, the important thing to +understand is that it is a separate backend which is optimized for more formal +reasoning about programming languages. While it is capable of performing +concrete execution, it does not do so as efficiently as the LLVM Backend. +In exchange, it provides more advanced features.

+

Choosing a backend

+

You can choose which backend to use to compile a K definition by means of the +--backend flag to kompile. By default, if you do not specify this flag, it +is equivalent to if you had specified --backend llvm. However, to use the +Haskell Backend instead, you can simply say kompile --backend haskell on a +particular K definition.

+

As an example, here is a simple K definition that we have seen before in the +previous lesson (lesson-20.k):

+
k
module LESSON-20 + imports INT + + rule I => I +Int 1 + requires I <Int 100 +endmodule +
+

Previously we compiled this definition using the LLVM Backend, but if we +instead execute the command kompile lesson-20.k --backend haskell, we +will get an interpreter for this K definition that is implemented in Haskell +instead. Unlike the default LLVM Backend, the Haskell Backend is not a +compiler per se. It does not generate new Haskell code corresponding to your +programming language and then compile and execute it. Instead, it is an +interpreter which reads the generated IR from kompile and implements in +Haskell an interpreter that is capable of interpreting any K definition.

+

Note that on arm64 macOS (Apple Silicon), there is a known issue with the Compact +library that causes crashes in the Haskell backend. Pass the additional flag +--no-haskell-binary to kompile to resolve this. +This flag is also needed when using krun.

+

Exercise

+

Try running the program 0 in this K definition on the Haskell Backend and +compare the final configuration to what you would get compiling the same +definition with the LLVM Backend.

+

Legacy backends

+

As a quick note, K does provide one other backend, which exists primarily as +legacy code which should be considered deprecated. This is the +Java Backend. The Java Backend is essentially a precursor to the Haskell +Backend. We will not cover this backend in any detail since it is deprecated, +but we still mention it here for the purposes of understanding.

+

Exercises

+
    +
  1. Compile your solution to Lesson 1.18, Exercise 2 with the Haskell Backend +and execute some programs. Compare the resulting configurations with the +output of the same program on the LLVM Backend. Note that if you are getting +different behaviors on the Haskell backend, you might have some luck debugging +by passing --search to krun when using the LLVM backend.
  2. +
+

Next lesson

+

Once you have completed the above exercises, you can continue to +Lesson 1.21: Unification and Symbolic Execution.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/1_basic/21_symbolic_execution/index.html b/k-distribution/k-tutorial/1_basic/21_symbolic_execution/index.html new file mode 100644 index 00000000000..72e4a54a980 --- /dev/null +++ b/k-distribution/k-tutorial/1_basic/21_symbolic_execution/index.html @@ -0,0 +1,591 @@ + + + + + + + + + + + + + + +Lesson 1.21: Unification and Symbolic Execution | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 1.21: Unification and Symbolic Execution

+

The purpose of this lesson is to teach the basic concepts of symbolic execution +in order to introduce the unique capabilities of the Haskell Backend at a +conceptual level.

+

Symbolic Execution

+

Thus far, all of the programs we have run using K have been concrete +configurations. What this means is that the configuration we use to initialize +the K rewrite engine is concrete; in other words, contains no logical +variables. The LLVM Backend is a concrete execution engine, meaning that +it is only capable of rewriting concrete configurations.

+

By contrast, the Haskell Backend performs symbolic execution, which is +capable of rewriting any configuration, including those where parts of the +configuration are symbolic, ie, contain variables or uninterpreted +functions.

+

Unification

+

Previously, we have introduced the concept that K rewrite rules operate by +means of pattern matching: the current configuration being rewritten is pattern +matched against the left-hand side of the rewrite rule, and the substitution +is used in order to construct a new term from the right-hand side. In symbolic +execution, we use +unification +instead of pattern matching. To summarize, unification behaves akin to a +two-way pattern matching where both the configuration and the left-hand side +of the rule can contain variables, and the algorithm generates a +most general unifier containing substitutions for the variables in both +which will make both terms equal.

+

Feasibility

+

Unification by itself cannot completely solve the problem of symbolic +execution. One task symbolic execution must perform is to identify whether +a particular symbolic term is feasible, that is to say, that there actually +exists a concrete instantiation of that term such that all the logical +constraints on that term can actually be satisfied. The Haskell Backend +delegates this task to Z3, an +SMT solver. +This solver is used to periodically trim configurations that are determined +to be mathematically infeasible.

+

Symbolic terms

+

The final component of symbolic execution consists of the task of introducing +symbolic terms into the configuration. This can be done one of two different +ways. First, the term being passed to krun can actually be symbolic. This +is less frequently used because it requires the user to construct an AST +that contains variables, something which our current parsing capabilities are +not well-equipped to do. The second, more common, way of introducing symbolic +terms into a configuration consists of writing rules where there exists an +existentially qualified variable on the right-hand side of the rule that does +not exist on the left-hand side of the rule.

+

In order to prevent users from writing such rules by accident, K requires +that such variables begin with the ? prefix. For example, here is a rule +that rewrites a constructor foo to a symbolic integer:

+
rule <k> foo => ?X:Int ...</k>
+
+

When this rule applies, a fresh variable is introduced to the configuration, which +then is unified against the rules that might apply in order to symbolically +execute that configuration.

+

ensures clauses

+

We also introduce here a new feature of K rules that applies when a rule +has this type of variable on the right-hand side: the ensures clause. +An ensures clause is similar to a requires clause and can appear after +a rule body, or after a requires clause. The ensures clause is used to +introduce constraints that might apply to the variable that was introduced by +that rule. For example, we could write the rule above with the additional +constraint that the symbolic integer that was introduced must be less than +five, by means of the following rule:

+
rule <k> foo => ?X:Int ...</k> ensures ?X <Int 5
+
+

Putting it all together

+

Putting all these pieces together, it is possible to use the Haskell Backend +to perform symbolic reasoning about a particular K module, determining all the +possible states that can be reached by a symbolic configuration.

+

For example, consider the following K definition (lesson-21.k):

+
k
module LESSON-21 + imports INT + + rule <k> 0 => ?X:Int ... </k> ensures ?X =/=Int 0 + rule <k> X:Int => 5 ... </k> requires X >=Int 10 +endmodule +
+

When we symbolically execute the program 0, we get the following output +from the Haskell Backend:

+
    <k>
+      5 ~> .
+    </k>
+  #And
+    {
+      true
+    #Equals
+      ?X:Int >=Int 10
+    }
+  #And
+    #Not ( {
+      ?X:Int
+    #Equals
+      0
+    } )
+#Or
+    <k>
+      ?X:Int ~> .
+    </k>
+  #And
+    #Not ( {
+      true
+    #Equals
+      ?X:Int >=Int 10
+    } )
+  #And
+    #Not ( {
+      ?X:Int
+    #Equals
+      0
+    } )
+
+

Note some new symbols introduced by this configuration: #And, #Or, and +#Equals. While andBool, orBool, and ==K represent functions of sort +Bool, #And, #Or, and #Equals are matching logic connectives. We +will discuss matching logic in more detail later in the tutorial, but the basic +idea is that these symbols represent Boolean operators over the domain of +configurations and constraints, as opposed to over the Bool sort.

+

Notice that the configuration listed above is a disjunction of conjunctions. +This is the most common form of output that can be produced by the Haskell +Backend. In this case, each conjunction consists of a configuration and a set +of constraints. What this conjunction describes, essentially, is a +configuration and a set of information that was derived to be true while +rewriting that configuration.

+

Similar to how we saw --search in a previous lesson, the reason we have +multiple disjuncts is because there are multiple possible output states +for this program, depending on whether or not the second rule applied. In the +first case, we see that ?X is greater than or equal to 10, so the second rule +applied, rewriting the symbolic integer to the concrete integer 5. In the +second case, we see that the second rule did not apply because ?X is less +than 10. Moreover, because of the ensures clause on the first rule, we know +that ?X is not zero, therefore the first rule will not apply a second time. +If we had omitted this constraint, we would have ended up infinitely applying +the first rule, leading to krun not terminating.

+

In the next lesson, we will cover how symbolic execution forms the backbone +of deductive program verification in K and how we can use K to prove programs +correct against a specification.

+

Exercises

+
    +
  1. Create another rule in LESSON-21 that rewrites odd integers greater than +ten to a symbolic even integer less than 10 and greater than 0. This rule will +now apply nondeterministically along with the existing rules. Predict what the +resulting output configuration will be from rewriting 0 after adding this +rule. Then run the program and see whether your prediction is correct.
  2. +
+

Once you have completed the above exercises, you can continue to +Lesson 1.22: Basics of Deductive Program Verification using K.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/1_basic/22_proofs/index.html b/k-distribution/k-tutorial/1_basic/22_proofs/index.html new file mode 100644 index 00000000000..1fca0bb7179 --- /dev/null +++ b/k-distribution/k-tutorial/1_basic/22_proofs/index.html @@ -0,0 +1,827 @@ + + + + + + + + + + + + + + +Lesson 1.22: Basics of Deductive Program Verification using K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 1.22: Basics of Deductive Program Verification using K

+

In this lesson, you will familiarize yourself with the basics of using K for +deductive program verification.

+

1. Setup: Simple Programming Language with Function Calls

+

We base this lesson on a simple programming language with functions, +assignment, if conditionals, and while loops. Take your time to study its +formalization below (lesson-22.k):

+
module LESSON-22-SYNTAX
+    imports INT-SYNTAX
+    imports BOOL-SYNTAX
+    imports ID-SYNTAX
+
+    syntax Exp ::= IExp | BExp
+
+    syntax IExp ::= Id | Int
+
+    syntax KResult ::= Int | Bool | Ints
+
+    // Take this sort structure:
+    //
+    //     IExp
+    //    /    \
+    // Int      Id
+    //
+    // Through the List{_, ","} functor.
+    // Must add a `Bot`, for a common subsort for the empty list.
+
+    syntax Bot
+    syntax Bots ::= List{Bot, ","} [klabel(exps)]
+    syntax Ints ::= List{Int, ","} [klabel(exps)]
+                  | Bots
+    syntax Ids  ::= List{Id, ","}  [klabel(exps)]
+                  | Bots
+    syntax Exps ::= List{Exp, ","} [klabel(exps), seqstrict]
+                  | Ids | Ints
+
+    syntax IExp ::= "(" IExp ")" [bracket]
+                  | IExp "+" IExp [seqstrict]
+                  | IExp "-" IExp [seqstrict]
+                  > IExp "*" IExp [seqstrict]
+                  | IExp "/" IExp [seqstrict]
+                  > IExp "^" IExp [seqstrict]
+                  | Id "(" Exps ")" [strict(2)]
+
+    syntax BExp ::= Bool
+
+    syntax BExp ::= "(" BExp ")" [bracket]
+                  | IExp "<=" IExp [seqstrict]
+                  | IExp "<"  IExp [seqstrict]
+                  | IExp ">=" IExp [seqstrict]
+                  | IExp ">"  IExp [seqstrict]
+                  | IExp "==" IExp [seqstrict]
+                  | IExp "!=" IExp [seqstrict]
+
+    syntax BExp ::= BExp "&&" BExp
+                  | BExp "||" BExp
+
+    syntax Stmt ::=
+         Id "=" IExp ";" [strict(2)]                        // Assignment
+       | Stmt Stmt [left]                                   // Sequence
+       | Block                                              // Block
+       | "if" "(" BExp ")" Block "else" Block [strict(1)]   // If conditional
+       | "while" "(" BExp ")" Block                         // While loop
+       | "return" IExp ";"                    [seqstrict]   // Return statement
+       | "def" Id "(" Ids ")" Block                         // Function definition
+
+    syntax Block ::=
+         "{" Stmt "}"    // Block with statement
+       | "{" "}"         // Empty block
+endmodule
+
+module LESSON-22
+    imports INT
+    imports BOOL
+    imports LIST
+    imports MAP
+    imports LESSON-22-SYNTAX
+
+    configuration
+      <k> $PGM:Stmt </k>
+      <store> .Map </store>
+      <funcs> .Map </funcs>
+      <stack> .List </stack>
+
+ // -----------------------------------------------
+    rule <k> I1 + I2 => I1 +Int I2 ... </k>
+    rule <k> I1 - I2 => I1 -Int I2 ... </k>
+    rule <k> I1 * I2 => I1 *Int I2 ... </k>
+    rule <k> I1 / I2 => I1 /Int I2 ... </k>
+    rule <k> I1 ^ I2 => I1 ^Int I2 ... </k>
+
+    rule <k> I:Id => STORE[I] ... </k>
+         <store> STORE </store>
+
+ // ------------------------------------------------
+    rule <k> I1 <= I2 => I1  <=Int I2 ... </k>
+    rule <k> I1  < I2 => I1   <Int I2 ... </k>
+    rule <k> I1 >= I2 => I1  >=Int I2 ... </k>
+    rule <k> I1  > I2 => I1   >Int I2 ... </k>
+    rule <k> I1 == I2 => I1  ==Int I2 ... </k>
+    rule <k> I1 != I2 => I1 =/=Int I2 ... </k>
+
+    rule <k> B1 && B2 => B1 andBool B2 ... </k>
+    rule <k> B1 || B2 => B1  orBool B2 ... </k>
+
+    rule <k> S1:Stmt S2:Stmt => S1 ~> S2 ... </k>
+
+    rule <k> ID = I:Int ; => . ... </k>
+         <store> STORE => STORE [ ID <- I ] </store>
+
+    rule <k> { S } => S ... </k>
+    rule <k> {   } => . ... </k>
+
+    rule <k> if (true)   THEN else _ELSE => THEN ... </k>
+    rule <k> if (false) _THEN else  ELSE => ELSE ... </k>
+
+    rule <k> while ( BE ) BODY => if ( BE ) { BODY while ( BE ) BODY } else { } ... </k>
+
+    rule <k> def FNAME ( ARGS ) BODY => . ... </k>
+         <funcs> FS => FS [ FNAME <- def FNAME ( ARGS ) BODY ] </funcs>
+
+    rule <k> FNAME ( IS:Ints ) ~> CONT => #makeBindings(ARGS, IS) ~> BODY </k>
+         <funcs> ... FNAME |-> def FNAME ( ARGS ) BODY ... </funcs>
+         <store> STORE => .Map </store>
+         <stack> .List => ListItem(state(CONT, STORE)) ... </stack>
+
+    rule <k> return I:Int ; ~> _ => I ~> CONT </k>
+         <stack> ListItem(state(CONT, STORE)) => .List ... </stack>
+         <store> _ => STORE </store>
+
+    rule <k> return I:Int ; ~> . => I </k>
+         <stack> .List </stack>
+
+    syntax KItem ::= #makeBindings(Ids, Ints)
+                   | state(continuation: K, store: Map)
+ // ----------------------------------------------------
+    rule <k> #makeBindings(.Ids, .Ints) => . ... </k>
+    rule <k> #makeBindings((I:Id, IDS => IDS), (IN:Int, INTS => INTS)) ... </k>
+         <store> STORE => STORE [ I <- IN ] </store>
+endmodule
+
+

Next, compile this example using kompile lesson-22.k --backend haskell. If +your processor is an Apple Silicon processor, add the --no-haskell-binary +flag if the compilation fails.

+

2. Setup: Proof Environment

+

Next, take the following snippet of K code and save it in lesson-22-spec.k. +This is a skeleton of the proof environment, and we will complete it as the +lesson progresses.

+
requires "lesson-22.k"
+requires "domains.md"
+
+module LESSON-22-SPEC-SYNTAX
+    imports LESSON-22-SYNTAX
+
+endmodule
+
+module VERIFICATION
+    imports K-EQUAL
+    imports LESSON-22-SPEC-SYNTAX
+    imports LESSON-22
+    imports MAP-SYMBOLIC
+
+endmodule
+
+module LESSON-22-SPEC
+    imports VERIFICATION
+
+endmodule
+
+

3. Claims

+
    +
  1. The first claim we will ask K to prove is that 3 + 4, in fact, equals 7. +Claims are stated using the claim keyword, followed by the claim +statement:
  2. +
+
claim <k> 3 + 4 => 7 ... </k>
+
+

Add this claim to the LESSON-22-SPEC module and run the K prover using the +command kprove lesson-22-spec.k. You should get back the output #Top, +which denotes the Matching Logic equivalent of true and means, in this +context, that all claims have been proven correctly.

+
    +
  1. The second claim reasons about the if statement that has a concrete condition:
  2. +
+
claim <k> if ( 3 + 4 == 7 ) {
+            $a = 1 ;
+            } else {
+            $a = 2 ;
+            }
+        => . ... </k>
+        <store> STORE => STORE [ $a <- 1 ] </store>
+
+

stating that the given program terminates (=> .), and when it does, the value +of the variable $a is set to 1, meaning that the execution will have taken +the then branch. Add this claim to the LESSON-22-SPEC module, but also add

+
syntax Id ::= "$a" [token]
+
+

to the LESSON-22-SPEC-SYNTAX module in order to declare $a as a token so +that it can be used as a program variable. Re-run the K prover, which should +again return #Top.

+
    +
  1. Our third claim demonstrates how to reason about both branches of an if +statement at the same time:
  2. +
+
claim <k> $a = A:Int ; $b = B:Int ;
+          if ($a < $b) {
+            $c = $b ;
+          } else {
+            $c = $a ;
+          }
+        => . ... </k>
+        <store> STORE => STORE [ $a <- A ] [ $b <- B ] [ $c <- ?C:Int ] </store>
+    ensures (?C ==Int A) orBool (?C ==Int B)
+
+

The program in question first assigns symbolic integers A and B to program +variables $a and $b, respectively, and then executes the given if +statement, which has a symbolic condition (A < B), updating the value of the +program variable $c in both branches. The specification we give states that +the if statement terminates, with $a and $b updated, respectively, to A +and B, and $c updated to some symbolic integer value ?C. Via the +ensures clause, which is used to specify additional constraints that hold +after execution, we also state that this existentially quantified ?C equals +either A or B.

+

Add the productions declaring $b and $c as tokens to the +LESSON-22-SPEC-SYNTAX module, the claim to the LESSON-22-SPEC module, run +the K prover again, and observe the output, which should not be #Top this +time. This means that K was not able to prove the claim, and we now need to +understand why. We do so by examining the output, which should look as follows:

+
    (InfoReachability) while checking the implication:
+    The configuration's term unifies with the destination's term,
+    but the implication check between the conditions has failed.
+
+  #Not (
+    #Exists ?C . {
+        STORE [ $a <- A:Int ] [ $b <- B:Int ] [ $c <- ?C:Int ]
+      #Equals
+        STORE [ $a <- A:Int ] [ $b <- B:Int ] [ $c <- B:Int ]
+    }
+  #And
+    {
+      true
+    #Equals
+      ?C ==Int A orBool ?C ==Int B
+    }
+  )
+#And
+  <generatedTop>
+    <k>
+      _DotVar1
+    </k>
+    <store>
+      STORE [ $a <- A:Int ] [ $b <- B:Int ] [ $c <- B:Int ]
+    </store>
+    <funcs>
+      _Gen3
+    </funcs>
+    <stack>
+      _Gen5
+    </stack>
+  </generatedTop>
+#And
+  {
+    true
+  #Equals
+    A <Int B
+  }
+
+

This output starts with a message telling us at which point the proof failed, +followed by the final state, which consists of three parts: some negative +Matching Logic (ML) constraints, the final configuration (<generatedTop> ... </generatedTop>), and some positive ML constraints. Generally speaking, +these positive and the negative constraints could arise from various sources, +such as (but not limited to) branches taken by the execution +(e.g. { true #Equals A <Int B } or #Not ( { true #Equals A <Int B } ) ), +or ensures constraints.

+

First, we examine the message:

+
(InfoReachability) while checking the implication:
+The configuration's term unifies with the destination's term,
+but the implication check between the conditions has failed.
+
+

which tells us that the structure of the final configuration is as expected, +but that some of the associated constraints cannot be proven. We next look at +the final configuration, in which the relevant item is the <store> ... </store> cell, because it is the only one that we are reasoning about. By +inspecting its contents:

+
STORE [ $a <- A:Int ] [ $b <- B:Int ] [ $c <- B:Int ]
+
+

we see that we should be within the constraints of the ensures, since the +value of $c in the store equals B in this branch. We next examine the +negative and positive constraints of the output and, more often than not, the +goal is to instruct K how to use the information from the final configuration +and the positive constraints to falsify one of the negative constraints. This +is done through simplifications.

+

So, the positive constraint that we have is

+
{ true #Equals A <Int B }
+
+

meaning that A <Int B holds. Given the analysed program, this tells us that +we are in the then branch of the if. The negative constraint is

+
  #Not (
+    #Exists ?C . {
+        STORE [ $a <- A:Int ] [ $b <- B:Int ] [ $c <- ?C:Int ]
+      #Equals
+        STORE [ $a <- A:Int ] [ $b <- B:Int ] [ $c <- B:Int ]
+    }
+  #And
+    { true #Equals ?C ==Int A orBool ?C ==Int B }
+  )
+
+

and we observe, from the first equality, that the existential ?C should be +instantiated with B. This would make both branches of the #And true, +falsifying the outside #Not. We just need to show K how to conclude that +?C ==Int B. We do so by introducing the following simplification into the +VERIFICATION module:

+
rule { M:Map [ K <- V ] #Equals M [ K <- V' ] } => { V #Equals V' } [simplification]
+
+

which formalizes our internal understanding of ?C ==Int B. The rule states +that when we update the same key in the same map with two values, and the +resulting maps are equal, then the two values must be equal as well. The +[simplification] attribute indicates to K to use this rule to simplify the +state when trying to prove claims. Like function rules, simplification rules +do not complete to the top of the configuration, but instead apply anywhere +their left-hand-side matches. Re-run the K prover, which should now return +#Top, indicating that K was able to use the simplification and prove the +required claims.

+
    +
  1. Next, we show how to state and prove properties of while loops. In +particular, we consider the following loop
  2. +
+
claim
+    <k>
+        while ( 0 < $n ) {
+            $s = $s + $n;
+            $n = $n - 1;
+            } => . ...
+    </k>
+    <store>
+        $s |-> (S:Int => S +Int ((N +Int 1) *Int N /Int 2))
+        $n |-> (N:Int => 0)
+    </store>
+    requires N >=Int 0
+
+

which adds the sum of the first $n integers to $s, assuming the value of $n +is non-negative to begin with. This is reflected in the store by stating that, +after the execution of the loop, the original value of $s (which is set to +equal some symbolic integer S) is incremented by ((N +Int 1) *Int N /Int 2), and the value of $n always equals 0. Add $n and $s as tokens in +the LESSON-22-SPEC-SYNTAX module, the above claim to the LESSON-22-SPEC +module, and run the K prover, which should return #Top.

+
    +
  1. Finally, our last claim is about a program that uses function calls:
  2. +
+
claim
+    <k>
+        def $sum($n, .Ids) {
+            $s = 0 ;
+            while (0 < $n) {
+                $s = $s + $n;
+                $n = $n - 1;
+            }
+            return $s;
+        }
+
+        $s = $sum(N:Int, .Ints);
+    => . ... </k>
+    <funcs> .Map => ?_ </funcs>
+    <store> $s |-> (_ => ((N +Int 1) *Int N /Int 2)) </store>
+    <stack> .List </stack>
+    requires N >=Int 0
+
+

Essentially, we have wrapped the while loop from claim 3.4 into a function +$sum, and then called that function with a symbolic integer N, storing the +return value in the variable $s. The specification states that this program +ends up storing the sum of the first N integers in the variable $n. Add $sum +to the LESSON-22-SPEC-SYNTAX module, the above claim to the +LESSON-22-SPEC module, and run the K prover, which should again return +#Top.

+

Exercises

+
    +
  1. +

    Change the condition of the if statement in part 3.2 to take the else +branch and adjust the claim so that the proof passes.

    +
  2. +
  3. +

    The post-condition of the specification in part 3.3 loses some information. +In particular, the value of ?C is in fact the maximum of A and B. +Prove the same claim as in 3.2, but with the post-condition ensures (?C ==Int maxInt(A, B)). For this, you will need to extend the VERIFICATION +module with two simplifications that capture the meaning of maxInt(A:Int, B:Int). Keep in mind that any rewriting rule can be used as a +simplification; in particular, that simplifications can have requires +clauses.

    +
  4. +
  5. +

    Following the pattern shown in part 3.4, assuming a non-negative initial +value of $b, specify and verify the following while loop:

    +
  6. +
+
while ( 0 < $b ) {
+    $a = $a + $c;
+    $b = $b - 1;
+    $c = $c - 1;
+}
+
+

Hint: You will not need additional simplifications---once you've got the +specification right, the proof will go through.

+
    +
  1. Write an arbitrary yet not-too-complex function (or several functions +interacting with each other), and try to specify and verify it (them) in K.
  2. +
+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/1_basic/index.html b/k-distribution/k-tutorial/1_basic/index.html new file mode 100644 index 00000000000..c3e23dea58d --- /dev/null +++ b/k-distribution/k-tutorial/1_basic/index.html @@ -0,0 +1,409 @@ + + + + + + + + + + + + + + +Section 1: Basic K Concepts | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Section 1: Basic K Concepts

+

The goal of this first section of the K tutorial is to teach the basic +principles of K to someone with no prior experience with K as a programming +language. However, this is not written with the intended audience of someone +who is a complete beginner to programming. We are assuming that the reader +has a firm grounding in computer science broadly, as well as that they have +experience writing code in functional programming languages before.

+

By the end of this section, the reader ought to be able to write specifications +of simple languages in K, use these specifications to generate a fast +interpreter for their programming language, as well as write basic deductive +program verification proofs over programs in their language. This should give +them the theoretical grounding they need to begin expanding their knowledge +of K in Section 2: Intermediate K Concepts.

+

To begin this section, refer to +Lesson 1.1: Setting up a K Environment.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/2_intermediate/01_macros/index.html b/k-distribution/k-tutorial/2_intermediate/01_macros/index.html new file mode 100644 index 00000000000..16558516034 --- /dev/null +++ b/k-distribution/k-tutorial/2_intermediate/01_macros/index.html @@ -0,0 +1,575 @@ + + + + + + + + + + + + + + +Lesson 2.1: Macros, Aliases, and Anywhere Rules | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 2.1: Macros, Aliases, and Anywhere Rules

+

The purpose of this lesson is to explain the behavior of the macro, +macro-rec, alias, and alias-rec production attributes, as well as the +anywhere rule attribute. These attributes control the meaning of how rules +associated with them are applied.

+

Macros

+

Thus far in the K tutorial, we have described three different types of rules:

+
    +
  1. Top-level rewrite rules, which rewrite a configuration composed of cells to +another configuration;
  2. +
  3. Function rules, which define the behavior of a function written over +arbitrary input and output types; and
  4. +
  5. Simplification rules, which describe ways in which the symbolic execution +engine ought to simplify terms containing symbolic values.
  6. +
+

This lesson introduces three more types of rules, the first of which are +macros. A production is a macro if it has the macro attribute, and all +rules whose top symbol on the left hand side is a macro are macro rules +which define the behavior of the macro. Like function rules and simplification +rules, macro rules do not participate in cell completion. However, unlike +function rules and simplification rules, macro rules are applied statically +before rewriting begins, and the macro symbol is expected to no longer appear +in the initial configuration for rewriting once all macros in that +configuration are rewritten.

+

The rationale behind macros is they allow you to define one piece of syntax +in terms of another piece of syntax without any runtime overhead associated +with the cost of rewriting one to the other. This process is a common one in +programming language design and specification and is referred to as +desugaring; The syntax that is transformed is typically also referred to as +syntactic sugar for another type of syntax. For example, in a language with +if statements and curly braces, you could write the following fragment +(lesson-01.k):

+
k
module LESSON-01 + imports BOOL + + syntax Stmt ::= "if" "(" Exp ")" Stmt [macro] + | "if" "(" Exp ")" Stmt "else" Stmt + | "{" Stmts "}" + syntax Stmts ::= List{Stmt,""} + syntax Exp ::= Bool + + rule if ( E ) S => if ( E ) S else { .Stmts } +endmodule +
+

In this example, we see that an if statement without an else clause is +defined in terms of one with an else clause. As a result, we would only +need to give a single rule for how to rewrite if statements, rather than +two separate rules for two types of if statements. This is a common pattern +for dealing with program syntax that contains an optional component to it.

+

It is worth noting that by default, macros are not applied recursively. To be +more precise, by default a macro that arises as a result of the expansion of +the same macro is not rewritten further. This is primarily to simplify the +macro expansion process and reduce the risk that improperly defined macros will +lead to non-terminating behavior.

+

It is possible, however, to tell K to expand a macro recursively. To do this, +simply replace the macro attribute with the macro-rec attribute. Note that +K does not do any kind of checking to ensure termination here, so it is +important that rules be defined correctly to always terminate, otherwise the +macro expansion phase will run forever. Fortunately, in practice it is very +simple to ensure this property for most of the types of macros that are +typically used in real-world semantics.

+

Exercise

+

Using a Nat sort containing the constructors 0 and S (i.e., a +Peano-style axiomatization of the +natural numbers where S(N) = N + 1, S(S(N)) = N + 2, etc), write a macro +that will compute the sum of two numbers.

+

Aliases

+

NOTE: This lesson introduces the concept of "aliases", which are a variant +of macros. While similar, this is different from the concept of "aliases" in +matching logic, which is introduced in Lesson 2.16.

+

Macros can be very useful in helping you define a programming language. +However, they can be disruptive while pretty printing a configuration. For +example, you might write a set of macros that transforms the code the user +wrote into equivalent code that is slightly harder to read. This can make it +more difficult to understand the code when it is pretty printed as part of the +output of rewriting.

+

K defines a relatively straightforward but novel solution to this problem, +which is known as a K alias. An alias in K is very similar to a macro, +with the exception that the rewrite rule will also be applied backwards +during the pretty-printing process.

+

It is very simple to make a production be an alias instead of a macro: simply +use the alias or alias-rec attributes instead of the macro or macro-rec +attributes. For example, if the example involving if statements above was +declared using an alias instead of a macro, the Stmt term if (E) {} else {} +would be pretty-printed as if (E) {}. This is because during pretty-printing, +the term participates in another macro-expansion pass. However, this macro +expansion step will only apply rules with the alias or alias-rec attribute, +and, critically, it will reverse the rule by treating the left-hand side as if +it were the right-hand side, and vice versa.

+

This can be very useful to allow you to define one construct in terms of +another while still being able to pretty-print the result as if it were +the original term in question. This can be especially useful for applications +of K where we are taking the output of rewriting and attempting to use it as +a code fragment that we then execute, such as with test generation.

+

Exercise

+

Modify LESSON-01 above to use an alias instead of a macro and experiment +with how various terms are pretty-printed by invoking krun on them.

+

anywhere rules

+

The last type of rule introduced in this lesson is the anywhere rule. An +anywhere rule is specified by adding the anywhere attribute to a rule. Such a +rule is similar to a function rule in that it does not participate in cell +completion, and will apply anywhere that the left-hand-side matches in the +configuration, but distinct in that the symbol in question can still be matched +against in the left-hand side of other rules, even during concrete rewriting. +The reasoning behind this is that instead of the symbol in question being a +constructor, it is a constructor modulo the axioms defined with the +anywhere attribute. Essentially, the rules with the anywhere attribute will +apply as soon as they appear in the right-hand side of a rule being applied, +but the symbol in question will still be treated as a symbol that can be +matched on if it is not completely removed by those rules.

+

This can be useful in certain cases to allow you to define transformations over +particular pieces of syntax while still generally giving those pieces of syntax +another meaning when the anywhere rule does not apply. For example, the ISO C +standard defines the semantics of *&x as exactly equal to x, with no +reading or writing of memory taking place, and the K semantics of C implements +this functionality using an anywhere rule that is applied at compilation time.

+

NOTE: the anywhere attribute is only implemented on the LLVM backend +currently. Attempting to use it in a semantics that is compiled with the +Haskell backend will result in an error being reported by the compiler. This +should be remembered when using this attribute, as it may not be suitable for +a segment of a semantics which is intended to be symbolically executed.

+

Exercises

+
    +
  1. Write a version of the calculator from Lesson 1.14 Exercise 1, which uses +the same syntax for evaluating expressions, but defines its arithmetic logic +using anywhere rules rather than top-level rewrite rules.
  2. +
+

Return to Top

+

Click here to return to the Table of Contents for Section 2.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/2_intermediate/02_fresh_constants/index.html b/k-distribution/k-tutorial/2_intermediate/02_fresh_constants/index.html new file mode 100644 index 00000000000..911a95f22cb --- /dev/null +++ b/k-distribution/k-tutorial/2_intermediate/02_fresh_constants/index.html @@ -0,0 +1,405 @@ + + + + + + + + + + + + + + +Lesson 2.2: Fresh Constants | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 2.2: Fresh Constants

+

Return to Top

+

Click here to return to the Table of Contents for Section 2.

+
+
+ + +
+ + + +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/2_intermediate/03_klabels/index.html b/k-distribution/k-tutorial/2_intermediate/03_klabels/index.html new file mode 100644 index 00000000000..c39d563b19b --- /dev/null +++ b/k-distribution/k-tutorial/2_intermediate/03_klabels/index.html @@ -0,0 +1,405 @@ + + + + + + + + + + + + + + +Lesson 2.3: KLabels and Abstract Syntax | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 2.3: KLabels and Abstract Syntax

+

Return to Top

+

Click here to return to the Table of Contents for Section 2.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/2_intermediate/04_overloading/index.html b/k-distribution/k-tutorial/2_intermediate/04_overloading/index.html new file mode 100644 index 00000000000..1eb6653172e --- /dev/null +++ b/k-distribution/k-tutorial/2_intermediate/04_overloading/index.html @@ -0,0 +1,405 @@ + + + + + + + + + + + + + + +Lesson 2.4: Overloaded Symbols | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 2.4: Overloaded Symbols

+

Return to Top

+

Click here to return to the Table of Contents for Section 2.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/2_intermediate/05_matching_logic/index.html b/k-distribution/k-tutorial/2_intermediate/05_matching_logic/index.html new file mode 100644 index 00000000000..39ccaec6d51 --- /dev/null +++ b/k-distribution/k-tutorial/2_intermediate/05_matching_logic/index.html @@ -0,0 +1,405 @@ + + + + + + + + + + + + + + +Lesson 2.5: Matching Logic Connectives and #Or Patterns | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 2.5: Matching Logic Connectives and #Or Patterns

+

Return to Top

+

Click here to return to the Table of Contents for Section 2.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/2_intermediate/06_function_context/index.html b/k-distribution/k-tutorial/2_intermediate/06_function_context/index.html new file mode 100644 index 00000000000..00e1523fa5b --- /dev/null +++ b/k-distribution/k-tutorial/2_intermediate/06_function_context/index.html @@ -0,0 +1,405 @@ + + + + + + + + + + + + + + +Lesson 2.6: Function Context | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 2.6: Function Context

+

Return to Top

+

Click here to return to the Table of Contents for Section 2.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/2_intermediate/07_record_productions/index.html b/k-distribution/k-tutorial/2_intermediate/07_record_productions/index.html new file mode 100644 index 00000000000..ca6851326b2 --- /dev/null +++ b/k-distribution/k-tutorial/2_intermediate/07_record_productions/index.html @@ -0,0 +1,405 @@ + + + + + + + + + + + + + + +Lesson 2.7: Record Productions and Named Nonterminals | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 2.7: Record Productions and Named Nonterminals

+

Return to Top

+

Click here to return to the Table of Contents for Section 2.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/2_intermediate/08_fun_and_let/index.html b/k-distribution/k-tutorial/2_intermediate/08_fun_and_let/index.html new file mode 100644 index 00000000000..9aa233f615e --- /dev/null +++ b/k-distribution/k-tutorial/2_intermediate/08_fun_and_let/index.html @@ -0,0 +1,405 @@ + + + + + + + + + + + + + + +Lesson 2.8: #fun and #let | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 2.8: #fun and #let

+

Return to Top

+

Click here to return to the Table of Contents for Section 2.

+
+
+ + +
+ + + +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/2_intermediate/09_as/index.html b/k-distribution/k-tutorial/2_intermediate/09_as/index.html new file mode 100644 index 00000000000..e701aaac0ce --- /dev/null +++ b/k-distribution/k-tutorial/2_intermediate/09_as/index.html @@ -0,0 +1,405 @@ + + + + + + + + + + + + + + +Lesson 2.9: #as Patterns | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 2.9: #as Patterns

+

Return to Top

+

Click here to return to the Table of Contents for Section 2.

+
+
+ + +
+ + + +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/2_intermediate/10_matching_operator/index.html b/k-distribution/k-tutorial/2_intermediate/10_matching_operator/index.html new file mode 100644 index 00000000000..a957f06fb7e --- /dev/null +++ b/k-distribution/k-tutorial/2_intermediate/10_matching_operator/index.html @@ -0,0 +1,405 @@ + + + + + + + + + + + + + + +Lesson 2.10: The Matching Operators, :=K and :/=K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 2.10: The Matching Operators, :=K and :/=K

+

Return to Top

+

Click here to return to the Table of Contents for Section 2.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/2_intermediate/11_evaluation_order/index.html b/k-distribution/k-tutorial/2_intermediate/11_evaluation_order/index.html new file mode 100644 index 00000000000..dc58889d7c8 --- /dev/null +++ b/k-distribution/k-tutorial/2_intermediate/11_evaluation_order/index.html @@ -0,0 +1,405 @@ + + + + + + + + + + + + + + +Lesson 2.11: Uncommon Evaluation Order Concepts | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 2.11: Uncommon Evaluation Order Concepts

+

Return to Top

+

Click here to return to the Table of Contents for Section 2.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/2_intermediate/12_floats_and_machine_ints/index.html b/k-distribution/k-tutorial/2_intermediate/12_floats_and_machine_ints/index.html new file mode 100644 index 00000000000..6ba7522c112 --- /dev/null +++ b/k-distribution/k-tutorial/2_intermediate/12_floats_and_machine_ints/index.html @@ -0,0 +1,405 @@ + + + + + + + + + + + + + + +Lesson 2.12: IEEE 754 Floating Point and Fixed Width Integers | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 2.12: IEEE 754 Floating Point and Fixed Width Integers

+

Return to Top

+

Click here to return to the Table of Contents for Section 2.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/2_intermediate/13_substitution/index.html b/k-distribution/k-tutorial/2_intermediate/13_substitution/index.html new file mode 100644 index 00000000000..fa0b1db0e44 --- /dev/null +++ b/k-distribution/k-tutorial/2_intermediate/13_substitution/index.html @@ -0,0 +1,405 @@ + + + + + + + + + + + + + + +Lesson 2.13: Alpha-renaming-aware Substitution | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 2.13: Alpha-renaming-aware Substitution

+

Return to Top

+

Click here to return to the Table of Contents for Section 2.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/2_intermediate/14_io/index.html b/k-distribution/k-tutorial/2_intermediate/14_io/index.html new file mode 100644 index 00000000000..39d61df9685 --- /dev/null +++ b/k-distribution/k-tutorial/2_intermediate/14_io/index.html @@ -0,0 +1,405 @@ + + + + + + + + + + + + + + +Lesson 2.14: File I/O | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 2.14: File I/O

+

Return to Top

+

Click here to return to the Table of Contents for Section 2.

+
+
+ + +
+ + + +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/2_intermediate/15_string_buffers_and_bytes/index.html b/k-distribution/k-tutorial/2_intermediate/15_string_buffers_and_bytes/index.html new file mode 100644 index 00000000000..1f37dc47c8d --- /dev/null +++ b/k-distribution/k-tutorial/2_intermediate/15_string_buffers_and_bytes/index.html @@ -0,0 +1,405 @@ + + + + + + + + + + + + + + +Lesson 2.15: String Buffers and Byte Sequences | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 2.15: String Buffers and Byte Sequences

+

Return to Top

+

Click here to return to the Table of Contents for Section 2.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/2_intermediate/16_kore/index.html b/k-distribution/k-tutorial/2_intermediate/16_kore/index.html new file mode 100644 index 00000000000..c1cfc6a0c27 --- /dev/null +++ b/k-distribution/k-tutorial/2_intermediate/16_kore/index.html @@ -0,0 +1,405 @@ + + + + + + + + + + + + + + +Lesson 2.16: The Intermediate Language of K, KORE | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 2.16: The Intermediate Language of K, KORE

+

Return to Top

+

Click here to return to the Table of Contents for Section 2.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/2_intermediate/17_debugging_proofs/index.html b/k-distribution/k-tutorial/2_intermediate/17_debugging_proofs/index.html new file mode 100644 index 00000000000..13a205a76d5 --- /dev/null +++ b/k-distribution/k-tutorial/2_intermediate/17_debugging_proofs/index.html @@ -0,0 +1,405 @@ + + + + + + + + + + + + + + +Lesson 2.17: Debugging Proofs using the Haskell Backend REPL | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 2.17: Debugging Proofs using the Haskell Backend REPL

+

Return to Top

+

Click here to return to the Table of Contents for Section 2.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/2_intermediate/index.html b/k-distribution/k-tutorial/2_intermediate/index.html new file mode 100644 index 00000000000..ad204eb87dc --- /dev/null +++ b/k-distribution/k-tutorial/2_intermediate/index.html @@ -0,0 +1,438 @@ + + + + + + + + + + + + + + +Section 2: Intermediate K Concepts | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Section 2: Intermediate K Concepts

+

The goal of this second section is to supplement a beginning developer's +knowledge of K after they have gained a basic understanding of K. Each lesson +in this section can be completed independently in order to learn about a +particular facet of the K language. The lessons are written to provide basic +understanding of less commonly-used features of K to someone who is still +learning K. For more complete references of these features, the reader ought to +consult the User Manual.

+

The reader ought to be able to complete lessons in this section as needed in +order to learn about specific features of interest, but if desired, can also +complete the entire section in one go. Someone who has completed this entire +section ought to be able to read and understand most K specifications, as well +as write their own specifications of some complexity, and use them to perform +most common K-related tasks. They can then read about specific lessons in +Section 3: Advanced K Concepts if they want to +learn more.

+

Table of Contents

+
    +
  1. Macros, Aliases, and Anywhere Rules
  2. +
  3. Fresh Constants
  4. +
  5. KLabels and Abstract Syntax
  6. +
  7. Overloaded Symbols
  8. +
  9. Matching Logic Connectives and #Or Patterns
  10. +
  11. Function Context
  12. +
  13. Record Productions and Named Nonterminals
  14. +
  15. #fun and #let
  16. +
  17. #as patterns
  18. +
  19. The Matching Operators, :=K and :/=K
  20. +
  21. Uncommon Evaluation Order Concepts
  22. +
  23. IEEE 754 Floating Point and Fixed Width Integers
  24. +
  25. Alpha-renaming-aware Substitution
  26. +
  27. File I/O
  28. +
  29. String Buffers and Byte Sequences
  30. +
  31. The Intermediate Language of K, KORE
  32. +
  33. Debugging Proofs using the Haskell Backend REPL
  34. +
+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/3_advanced/1_parsing/1_layout/index.html b/k-distribution/k-tutorial/3_advanced/1_parsing/1_layout/index.html new file mode 100644 index 00000000000..13e0216ea1f --- /dev/null +++ b/k-distribution/k-tutorial/3_advanced/1_parsing/1_layout/index.html @@ -0,0 +1,395 @@ + + + + + + + + + + + + + + +Lesson 3.1.1: Using #Layout to define the syntax of comments and whitespace | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 3.1.1: Using #Layout to define the syntax of comments and whitespace

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/3_advanced/1_parsing/2_ambiguities/index.html b/k-distribution/k-tutorial/3_advanced/1_parsing/2_ambiguities/index.html new file mode 100644 index 00000000000..b022bed2601 --- /dev/null +++ b/k-distribution/k-tutorial/3_advanced/1_parsing/2_ambiguities/index.html @@ -0,0 +1,395 @@ + + + + + + + + + + + + + + +Lesson 3.1.2: Using amb for parsing context-sensitive languages | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 3.1.2: Using amb for parsing context-sensitive languages

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/3_advanced/1_parsing/3_locations/index.html b/k-distribution/k-tutorial/3_advanced/1_parsing/3_locations/index.html new file mode 100644 index 00000000000..88d0bc27fda --- /dev/null +++ b/k-distribution/k-tutorial/3_advanced/1_parsing/3_locations/index.html @@ -0,0 +1,395 @@ + + + + + + + + + + + + + + +Lesson 3.1.3: Using #location to annotate terms with file, line, and column information | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 3.1.3: Using #location to annotate terms with file, line, and column information

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/3_advanced/2_libraries/1_json/index.html b/k-distribution/k-tutorial/3_advanced/2_libraries/1_json/index.html new file mode 100644 index 00000000000..d5b17bc7ca2 --- /dev/null +++ b/k-distribution/k-tutorial/3_advanced/2_libraries/1_json/index.html @@ -0,0 +1,395 @@ + + + + + + + + + + + + + + +Lesson 3.2.1: The JSON Module | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 3.2.1: The JSON Module

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/3_advanced/2_libraries/2_rat/index.html b/k-distribution/k-tutorial/3_advanced/2_libraries/2_rat/index.html new file mode 100644 index 00000000000..35883a4bdb8 --- /dev/null +++ b/k-distribution/k-tutorial/3_advanced/2_libraries/2_rat/index.html @@ -0,0 +1,395 @@ + + + + + + + + + + + + + + +Lesson 3.2.2: The RAT Module | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 3.2.2: The RAT Module

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/3_advanced/2_libraries/3_ffi/index.html b/k-distribution/k-tutorial/3_advanced/2_libraries/3_ffi/index.html new file mode 100644 index 00000000000..9e5172e8b68 --- /dev/null +++ b/k-distribution/k-tutorial/3_advanced/2_libraries/3_ffi/index.html @@ -0,0 +1,395 @@ + + + + + + + + + + + + + + +Lesson 3.2.3: The FFI Module | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 3.2.3: The FFI Module

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/3_advanced/3_extending_k/1_custom_hooks/index.html b/k-distribution/k-tutorial/3_advanced/3_extending_k/1_custom_hooks/index.html new file mode 100644 index 00000000000..0dcdf7ba460 --- /dev/null +++ b/k-distribution/k-tutorial/3_advanced/3_extending_k/1_custom_hooks/index.html @@ -0,0 +1,395 @@ + + + + + + + + + + + + + + +Lesson 3.3.1: Extending K by adding new builtin functions | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lesson 3.3.1: Extending K by adding new builtin functions

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/3_advanced/3_extending_k/2_scripting_k/index.html b/k-distribution/k-tutorial/3_advanced/3_extending_k/2_scripting_k/index.html new file mode 100644 index 00000000000..2726f605067 --- /dev/null +++ b/k-distribution/k-tutorial/3_advanced/3_extending_k/2_scripting_k/index.html @@ -0,0 +1,395 @@ + + + + + + + + + + + + + + +Lessonm 3.3.2: Scripting K to provide language-specific tooling | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Lessonm 3.3.2: Scripting K to provide language-specific tooling

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/3_advanced/index.html b/k-distribution/k-tutorial/3_advanced/index.html new file mode 100644 index 00000000000..6f6a6f73785 --- /dev/null +++ b/k-distribution/k-tutorial/3_advanced/index.html @@ -0,0 +1,395 @@ + + + + + + + + + + + + + + +Section 3: Advanced K Concepts | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Section 3: Advanced K Concepts

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/k-tutorial/index.html b/k-distribution/k-tutorial/index.html new file mode 100644 index 00000000000..741ff022f41 --- /dev/null +++ b/k-distribution/k-tutorial/index.html @@ -0,0 +1,406 @@ + + + + + + + + + + + + + + +K Tutorial | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

K Tutorial

+

The purpose of this series of lessons is to teach developers how to program in +K. While the primary use of K is in the specification of operational semantics +of programming languages, this tutorial is agnostic on how the knowledge of K +is used. For a more detailed tutorial explaining the basic principles of +programming language design, refer to the +K PL Tutorial. Note that that tutorial is somewhat +out of date presently.

+

This K tutorial is a work in progress. Many lessons are currently simply +placeholders for future content.

+

To start the K tutorial, begin with +Section 1: Basic Programming in K.

+
+
+ + +
+ + + +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/1_lambda/index.html b/k-distribution/pl-tutorial/1_k/1_lambda/index.html new file mode 100644 index 00000000000..bf606fe5a03 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/1_lambda/index.html @@ -0,0 +1,412 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Part 1: Defining LAMBDA

+

Here you will learn how to define a very simple language in K and the basics +of how to use the K tool. The language is a variant of call-by-value lambda +calculus and its definition is based on substitution. Specifically, you will +learn the following:

+
    +
  • How to define a module.
  • +
  • How to define a language syntax.
  • +
  • How to use the defined syntax to parse programs.
  • +
  • How to import predefined modules.
  • +
  • How to define evaluation strategies using strictness attributes.
  • +
  • How to define semantic rules.
  • +
  • How the predefined generic substitution works.
  • +
  • How to generate PDF and HTML documentation from ASCII definitions.
  • +
  • How to include builtins (integers and Booleans) into your language.
  • +
  • How to define derived language constructs.
  • +
+

This folder contains several lessons, each adding new features to LAMBDA.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/1_lambda/lesson_1/NOTES/index.html b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_1/NOTES/index.html new file mode 100644 index 00000000000..1d7c8cf81de --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_1/NOTES/index.html @@ -0,0 +1,387 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

We now support the following line to the syntax module:

+
syntax priority lambda_._ > __  // exact syntax subject to change
+
+

This will allow for fewer parentheses in programs.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/1_lambda/lesson_1/index.html b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_1/index.html new file mode 100644 index 00000000000..84f4c9343a1 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_1/index.html @@ -0,0 +1,479 @@ + + + + + + + + + + + + + + +Syntax Modules and Basic K Commands | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Syntax Modules and Basic K Commands

+

Here we define our first K module, which contains the initial syntax of the +LAMBDA language, and learn how to use the basic K commands.

+

Let us create an empty working folder, and open a terminal window +(to the left) and an editor window (to the right). We will edit our K +definition in the right window in a file called lambda.k, and will call +the K tool commands in the left window.

+

Let us start by defining a K module, containing the syntax of LAMBDA.

+

K modules are introduced with the keywords module ... endmodule.

+

The keyword syntax adds new productions to the syntax grammar, using a +BNF-like notation.

+

Terminals are enclosed in double-quotes, like strings.

+

You can define multiple productions for the same non-terminal in the same +syntax declaration using the | separator.

+

Productions can have attributes, which are enclosed in square brackets.

+

The attribute left tells the parser that we want the lambda application to be +left associative. For example, a b c d will then parse as (((a b) c) d).

+

The attribute bracket tells the parser to not generate a node for the +parenthesis production in the abstract syntax trees associated to programs. +In other words, we want to allow parentheses to be used for grouping, but we +do not want to bother to give them their obvious (ignore) semantics.

+

In our variant of lambda calculus defined here, identifiers and lambda +abstractions are meant to be irreducible, that is, are meant to be values. +However, so far Val is just another non-terminal, just like Exp, +without any semantic meaning. It will get a semantic meaning later.

+

After we are done typing our definition in the file lambda.k, we can kompile +it with the command:

+
kompile lambda.k
+
+

If we get no errors then a parser has been generated. This parser will be +called from now on by default by the krun tool. To see whether and how the +parser works, we are going to write some LAMBDA programs and store them in +files with the extension .lambda.

+

Let us create a file identity.lambda, which contains the identity lambda +abstraction:

+
lambda x . x
+
+

Now let us call krun on identity.lambda:

+
krun identity.lambda
+
+

Make sure you call the krun command from the folder containing your language +definition (otherwise type krun --help to learn how to pass a language +definition as a parameter to krun). The krun command produces the output:

+
<k>
+  lambda x . x
+</k>
+
+

If you see such an output it means that your program has been parsed (and then +pretty printed) correctly. If you want to see the internal abstract syntax +tree (AST) representation of the parsed program, which we call the K AST, then +type kast in the command instead of krun:

+
kast identity.lambda
+
+

You should normally never need to see this internal representation in your +K definitions, so do not get scared (yes, it is ugly for humans, but it is +very convenient for tools).

+

Note that krun placed the program in a <k> ... </k> cell. In K, computations +happen only in cells. If you do not define a configuration in your definition, +like we did here, then a configuration will be created automatically for you +which contains only one cell, the default k cell, which holds the program.

+

Next, let us create a file free-variable-capture.lambda, which contains an +expression which, in order to execute correctly in a substitution-based +semantics of LAMBDA, the substitution operation needs to avoid +variable-capture:

+
a (((lambda x.lambda y.x) y) z)
+
+

Next, file closed-variable-capture.lambda shows an expression which also +requires a capture-free substitution, but this expression is closed (that is, +it has no free variables) and all its bound variables are distinct (I believe +this is the smallest such expression):

+
(lambda z.(z z)) (lambda x.lambda y.(x y))
+
+

Finally, the file omega.lambda contains the classic omega combinator +(or closed expression), which is the smallest expression which loops forever +(not now, but after we define the semantics of LAMBDA):

+
(lambda x.(x x)) (lambda x.(x x))
+
+

Feel free to define and parse several other LAMBDA programs to get a feel for +how the parser works. Parse also some incorrect programs, to see how the +parser generates error messages.

+

In the next lesson we will see how to define semantic rules that iteratively +rewrite expressions over the defined syntax until they evaluate to a result. +This way, we obtain our first programming language defined using K.

+

Go to Lesson 2, LAMBDA: Module Importing, Rules, Variables

+

MOVIE (out of date) [4'07"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/1_lambda/lesson_2.5/NOTES/index.html b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_2.5/NOTES/index.html new file mode 100644 index 00000000000..a9f610a517c --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_2.5/NOTES/index.html @@ -0,0 +1,387 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

This folder has been added after the original tutorial was made +and after the videos were recorded. Eventually we will renumber +the lessons and redo the videos. A README.md file is also needed +here.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/1_lambda/lesson_2/NOTES/index.html b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_2/NOTES/index.html new file mode 100644 index 00000000000..c477bf1d9d5 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_2/NOTES/index.html @@ -0,0 +1,393 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Substitution has been reimplemented in the meanwhile, where the fresh +variables are resolved locally. So there is no global counter for +fresh variables anymore as shown in the video, and fewer variable +renamings take place.

+

When calling krun on the programs in lesson_1, a different path is +shown than in the README.md.

+

Marking the beta-reduction rule with [anywhere] will give us the +conventional lambda-calculus. A new lesson has been added, 2.5, +showing that. The README.md file should be changed at the end to +point to lesson 2.5.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/1_lambda/lesson_2/index.html b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_2/index.html new file mode 100644 index 00000000000..b936214d257 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_2/index.html @@ -0,0 +1,461 @@ + + + + + + + + + + + + + + +Module Importing, Rules, Variables | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Module Importing, Rules, Variables

+

We here learn how to include a predefined module (SUBSTITUTION), how to +use it to define a K rule (the characteristic rule of lambda calculus), +and how to make proper use of variables in rules.

+

Let us continue our lambda.k definition started in the previous lesson.

+

The requires keyword takes a .k file containing language features that +are needed for the current definition, which can be found in the +k-distribution/include/kframework/builtin folder. Thus, the command

+
requires "substitution.k"
+
+

says that the subsequent definition of LAMBDA needs the generic substitution, +which is predefined in file substitution.k under the folder +k-distribution/include/kframework/builtin. Note that substitution can be defined itself in K, +although it uses advanced features that we have not discussed yet in this +tutorial, so it may not be easy to understand now.

+

Using the imports keyword, we can now modify LAMBDA to import the module +SUBSTITUTION, which is defined in the required substitution.k file.

+

Now we have all the substitution machinery available for our definition. +However, since our substitution is generic, it cannot know which language +constructs bind variables, and what counts as a variable; however, this +information is critical in order to correctly solve the variable capture +problem. Thus, you have to tell the substitution that your lambda construct +is meant to be a binder, and that your Id terms should be treated as variables +for substitution. The former is done using the attribute binder. +By default, binder binds all the variables occurring anywhere in the first +argument of the corresponding syntactic construct within its other arguments; +you can configure which arguments are bound where, but that will be discussed +in subsequent lectures. To tell K which terms are meant to act as variables +for binding and substitution, we have to explicitly subsort the desired syntactic +categories to the builtin KVariable sort.

+

Now we are ready to define our first K rule. Rules are introduced with the +keyword rule and make use of the rewrite symbol, =>. In our case, +the rule defines the so-called lambda calculus beta-reduction, which +makes use of substitution in its right-hand side, as shown in lambda.k.

+

By convention, variables that appear in rules start with a capital letter +(the current implementation of the K tool may even enforce that).

+

Variables may be explicitly tagged with their syntactic category (also called +sort). If tagged, the matching term will be checked at run-time for +membership to the claimed sort. If not tagged, then no check will be made. +The former is safer, but involves the generation of a side condition to the +rule, so the resulting definition may execute slightly slower overall.

+

In our rule in lambda.k we tagged all variables with their sorts, so we chose +the safest path. Only the V variable really needs to be tagged there, +because we can prove (using other means, not the K tool, as the K tool is not +yet concerned with proving) that the first two variables will always have the +claimed sorts whenever we execute any expression that parses within our +original grammar.

+

Let us compile the definition and then run some programs. For example,

+
krun closed-variable-capture.lambda
+
+

yields the output

+
<k>
+  lambda y . ((lambda x . (lambda y . (x  y))) y)
+</k> 
+
+

Notice that only certain programs reduce (some even yield non-termination, +such as omega.lambda), while others do not. For example, +free-variable-capture.lambda does not reduce its second argument expression +to y, as we would expect. This is because the K rewrite rules between syntactic +terms do not apply anywhere they match. They only apply where they have been +given permission to apply by means of appropriate evaluation strategies of language +constructs, which is done using strictness attributes, evaluation contexts, +heating/cooling rules, etc., as discussed in the next lessons.

+

The next lesson will show how to add LAMBDA the desired evaluation strategies +using strictness attributes.

+

Go to Lesson 3, LAMBDA: Evaluation Strategies using Strictness

+

MOVIE (out of date) [4'03"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/1_lambda/lesson_3/NOTES/index.html b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_3/NOTES/index.html new file mode 100644 index 00000000000..267e4db8cf2 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_3/NOTES/index.html @@ -0,0 +1,385 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

When we say "previous lesson" we refer to lesson 2. This will need to change +when we incorporate lesson 2.5 properly.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/1_lambda/lesson_3/index.html b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_3/index.html new file mode 100644 index 00000000000..a237f99b525 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_3/index.html @@ -0,0 +1,426 @@ + + + + + + + + + + + + + + +Evaluation Strategies using Strictness | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Evaluation Strategies using Strictness

+

Here we learn how to use the K strict attribute to define desired evaluation +strategies. We will also learn how to tell K which terms are already +evaluated, so it does not attempt to evaluate them anymore and treats them +internally as results of computations.

+

Recall from the previous lecture that the LAMBDA program +free-variable-capture.lambda was stuck, because K was not given permission +to evaluate the arguments of the lambda application construct.

+

You can use the attribute strict to tell K that the corresponding construct +has a strict evaluation strategy, that is, that its arguments need to be +evaluated before the semantics of the construct applies. The order of +argument evaluation is purposely unspecified when using strict, and indeed +the K tool allows us to detect all possible non-deterministic behaviors that +result from such intended underspecification of evaluation strategies. We will +learn how to do that when we define the IMP language later in this tutorial; +we will also learn how to enforce a particular order of evaluation.

+

In order for the above strictness declaration to work effectively and +efficiently, we need to tell the K tool which expressions are meant to be +results of computations, so that it will not attempt to evaluate them anymore. +One way to do it is to make Val a syntactic subcategory of the builtin +KResult syntactic category. Since we use the same K parser to also parse +the semantics, we use the same syntax keyword to define additional syntax +needed exclusively for the semantics (like KResults). See lambda.k.

+

Compile again and then run some programs. They should all work as expected. +In particular, free-variable-capture.lambda now evaluates to a y.

+

We now got a complete and working semantic definition of call-by-value +lambda-calculus. While theoretically correct, our definition is not +easy to use and disseminate. In the next lessons we will learn how to +generate formatted documentation for LAMBDA and how to extend LAMBDA +in order to write human readable and interesting programs.

+

Go to Lesson 4, LAMBDA: Generating Documentation; Latex Attributes.

+

MOVIE (out of date) [2'20"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/1_lambda/lesson_4/NOTES/index.html b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_4/NOTES/index.html new file mode 100644 index 00000000000..4fd1984bdfc --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_4/NOTES/index.html @@ -0,0 +1,384 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

README.md refers to Lesson 9. This will need to be updated.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/1_lambda/lesson_4/index.html b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_4/index.html new file mode 100644 index 00000000000..4f2a65f2676 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_4/index.html @@ -0,0 +1,420 @@ + + + + + + + + + + + + + + +Generating Documentation; Latex Attributes | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Generating Documentation; Latex Attributes

+

In this lesson we learn how to generate formatted documentation from K +language definitions. We also learn how to use Latex attributes to control +the formatting of language constructs, particularly of ones which have a +mathematical flavor and we want to display accordingly.

+

To enhance readability, we may want to replace the keyword lambda by the +mathematical lambda symbol in the generated documentation. We can control +the way we display language constructs in the generated documentation +by associating them Latex attributes.

+

This is actually quite easy. All we have to do is to associate a latex +attribute to the production defining the construct in question, following +the Latex syntax for defining new commands (or macros).

+

In our case, we associate the attribute latex(\lambda{#1}.{#2}) to the +production declaring the lambda abstraction (recall that in Latex, #n refers +to the n-th argument of the defined new command).

+

We will later see, in Lesson 9, that we can add arbitrarily complex Latex +comments and headers to our language definitions, which give us maximum +flexibility in formatting our language definitions.

+

Now we have a simple programming language, with a nice documentation. However, +it is not easy to write interesting programs in this language. Almost all +programming languages build upon existing data-types and libraries. The K +tool provides a few of these (and you can add more).

+

In the next lesson we show how we can add builtin integers and Booleans to +LAMBDA, so we can start to evaluate meaningful expressions.

+

Go to Lesson 5, LAMBDA: Adding Builtins; Side Conditions.

+

MOVIE (out of date) [3'13"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/1_lambda/lesson_5/NOTES/index.html b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_5/NOTES/index.html new file mode 100644 index 00000000000..54cce2a9a20 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_5/NOTES/index.html @@ -0,0 +1,389 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

The builtins have changed, they are now generic for all backends.

+

Talk about sort inference for variables, for example from I1 +Int I2 +we infer the sort of I1 and I2 is Int.

+

Check the entire tutorial for instances where we give the sort of a +variable but we don't have to. Many of those are artifacts since we were +not able to infer sorts that well.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/1_lambda/lesson_5/index.html b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_5/index.html new file mode 100644 index 00000000000..d8252bced14 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_5/index.html @@ -0,0 +1,473 @@ + + + + + + + + + + + + + + +Adding Builtins; Side Conditions | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Adding Builtins; Side Conditions

+

We have already added the builtin identifiers (sort Id) to LAMBDA expressions, +but those had no operations on them. In this lesson we add integers and +Booleans to LAMBDA, and extend the builtin operations on them into +corresponding operations on LAMBDA expressions. We will also learn how to add +side conditions to rules, to limit the number of instances where they can +apply.

+

The K tool provides several builtins, which are automatically included in all +definitions. These can be used in the languages that we define, typically by +including them in the desired syntactic categories. You can also define your +own builtins in case the provided ones are not suitable for your language +(e.g., the provided builtin integers and operations on them are arbitrary +precision).

+

For example, to add integers and Booleans as values to our LAMBDA, we have to +add the productions

+
syntax Val ::= Int | Bool
+
+

Int and Bool are the nonterminals that correspond to these builtins.

+

To make use of these builtins, we have to add some arithmetic operation +constructs to our language. We prefer to use the conventional infix notation +for these, and the usual precedences (i.e., multiplication and division bind +tighter than addition, which binds tighter than relational operators). +Inspired from SDF, we use > instead of +| to state that all the previous constructs bind tighter than all the +subsequent ones. See lambda.k.

+

The only thing left is to link the LAMBDA arithmetic operations to the +corresponding builtin operations, when their arguments are evaluated. +This can be easily done using trivial rewrite rules, as shown in lambda.k. +In general, the K tool attempts to uniformly add the corresponding builtin +name as a suffix to all the operations over builtins. For example, the +addition over integers is an infix operation named +Int.

+

Compile the new lambda.k definition and evaluate some simple arithmetic +expressions. For example, if arithmetic.lambda is (1+2*3)/4 <= 1, then

+
krun arithmetic.lambda
+
+

yields, as expected, true. Note that the parser took the desired operation +precedence into account.

+

Let us now try to evaluate an expression which performs a wrong computation, +namely a division by zero. Consider the expression arithmetic-div-zero.lambda +which is 1/(2/3). Since division is strict and 2/3 evaluates to 0, this +expression reduces to 1/0, which further reduces to 1 /Int 0 by the rule for +division, which is now stuck (with the current back-end to the K tool).

+

In fact, depending upon the back-end that we use to execute K definitions and +in particular to evaluate expressions over builtins, 1 /Int 0 can evaluate to +anything. It just happens that the current back-end keeps it as an +irreducible term. Other K back-ends may reduce it to an explicit error +element, or issue a segmentation fault followed by a core dump, or throw an +exception, etc.

+

To avoid requesting the back-end to perform an illegal operation, we may use a +side condition in the rule of division, to make sure it only applies when the +denominator is non-zero.

+

Like in other operational formalisms, the role of the K side +conditions is to filter the number of instances of the rule. The notion +of a side condition comes from logics, where a sharp distinction is made +between a side condition (cheap) and a premise (expensive). Premises are +usually resolved using further (expensive) logical derivations, while side +conditions are simple (cheap) conditions over the rule meta-variables within +the underlying mathematical domains (which in K can be extended by the user, +as we will see in future lessons). Regarded as a logic, K derives rewrite +rules from other rewrite rules; therefore, the K side conditions cannot +contain other rewrites in them (using =>). This contrasts other rewrite +engines, for example Maude, which +allow conditional rules with rewrites in conditions. +The rationale behind this deliberate restriction in K is twofold:

+
    +
  • On the one hand, general conditional rules require a complex, and thus slower +rewrite engine, which starts recursive (sometimes exhaustive) rewrite sessions +to resolve the rewrites in conditions. In contrast, the side conditions in K +can be evaluated efficiently by back-ends, for example by evaluating builtin +expressions and/or by calling builtin functions.
  • +
  • On the other hand, the semantic definitional philosophy of K is that rule +premises are unnecessary, so there is no need to provide support for them.
  • +
+

Having builtin arithmetic is useful, but writing programs with just lambda +and arithmetic constructs is still a pain. In the next two lessons we will +add conditional (if_then_else) and binding (let and letrec) constructs, +which will allow us to write nicer programs.

+

Go to Lesson 6, LAMBDA: Selective Strictness; Anonymous Variables.

+

MOVIE (out of date) [4'52"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/1_lambda/lesson_6/index.html b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_6/index.html new file mode 100644 index 00000000000..d9f13768887 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_6/index.html @@ -0,0 +1,429 @@ + + + + + + + + + + + + + + +Selective Strictness; Anonymous Variables | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Selective Strictness; Anonymous Variables

+

We here show how to define selective strictness of language constructs, +that is, how to state that certain language constructs are strict only +in some arguments. We also show how to use anonymous variables.

+

We next define a conditional if construct, which takes three arguments, +evaluates only the first one, and then reduces to either the second or the +third, depending on whether the first one evaluated to true or to false.

+

K allows to define selective strictness using the same strict attribute, +but passing it a list of numbers. The numbers correspond to the arguments +in which we want the defined construct to be strict. In our case,

+
syntax Exp ::= "if" Exp "then" Exp "else" Exp   [strict(1)]
+
+

states that the conditional construct is strict in the first argument.

+

We can now assume that its first argument will eventually reduce to a value, so +we only write the following two semantic rules:

+
rule if true  then E else _ => E
+rule if false then _ else E => E
+
+

Thus, we assume that the first argument evaluates to either true or false.

+

Note the use of the anonymous variable _. We use such variables purely for +structural reasons, to state that something is there but we don't care what. +An anonymous variable is therefore completely equivalent to a normal variable +which is unsorted and different from all the other variables in the rule. If +you use _ multiple times in a rule, they will all be considered distinct.

+

Compile lambda.k and write and execute some interesting expressions making +use of the conditional construct. For example, the expression

+
if 2<=1 then 3/0 else 10
+
+

evaluates to 10 and will never evaluate 3/0, thus avoiding an unwanted +division-by-zero.

+

In the next lesson we will introduce two new language constructs, called +let and letrec and conventionally found in functional programming +languages, which will allow us to already write interesting LAMBDA programs.

+

Go to Lesson 7, LAMBDA: Derived Constructs; Extending Predefined Syntax.

+

MOVIE (out of date) [2'14"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/1_lambda/lesson_7/NOTES/index.html b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_7/NOTES/index.html new file mode 100644 index 00000000000..672ba84bad7 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_7/NOTES/index.html @@ -0,0 +1,387 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

In more recent definitions, we prefer to make some [macro] rules. +Macros apply statically, before the program is executed, thus +increasing the execution performance. The let and letrec constructs +here could be made into [macro].

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/1_lambda/lesson_7/index.html b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_7/index.html new file mode 100644 index 00000000000..10439d1b8d7 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_7/index.html @@ -0,0 +1,467 @@ + + + + + + + + + + + + + + +Derived Constructs, Extending Predefined Syntax | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Derived Constructs, Extending Predefined Syntax

+

In this lesson we will learn how to define derived language constructs, that +is, ones whose semantics is defined completely in terms of other language +constructs. We will also learn how to add new constructs to predefined +syntactic categories.

+

When defining a language, we often want certain language constructs to be +defined in terms of other constructs. For example, a let-binding construct +of the form

+
let x = e in e'
+
+

is nothing but syntactic sugar for

+
(lambda x . e') e
+
+

This can be easily achieved with a rule, as shown in lambda.k.

+

Compile lambda.k and write some programs using let binders.

+

For example, consider a lets.lambda program which takes arithmetic.lambda +and replaces each integer by a let-bound variable. It should evaluate to +true, just like the original arithmetic.lambda.

+

Let us now consider a more interesting program, namely one that calculates the +factorial of 10:

+
let f = lambda x . (
+        (lambda t . lambda x . (t t x))
+        (lambda f . lambda x . (if x <= 1 then 1 else (x * (f f (x + -1)))))
+        x
+      )
+in (f 10)
+
+

This program follows a common technique to define fixed points in untyped +lambda calculus, based on passing a function to itself.

+

We may not like to define fixed-points following the approach above, because +it requires global changes in the body of the function meant to be recursive, +basically to pass it to itself (f f in our case above). The approach below +isolates the fixed-point aspect of the function in a so-called fixed-point +combinator, which we call fix below, and then apply it to the function +defining the body of the factorial, without any changes to it:

+
let fix = lambda f . (
+          (lambda x . (f (lambda y . (x x y))))
+          (lambda x . (f (lambda y . (x x y))))
+        )
+in let f = fix (lambda f . lambda x .
+                (if x <= 1 then 1 else (x * (f (x + -1)))))
+   in (f 10)
+
+

Although the above techniques are interesting and powerful (indeed, untyped +lambda calculus is in fact Turing complete), programmers will probably not +like to write programs this way.

+

We can easily define a more complex derived construct, called letrec and +conventionally encountered in functional programming languages, whose semantics +captures the fixed-point idea above. In order to keep its definition simple +and intuitive, we define a simplified variant of letrec, namely one which only +allows to define one recursive one-argument function. See lambda.k.

+

There are two interesting observations here.

+

First, note that we have already in-lined the definition of the fix +combinator in the definition of the factorial, to save one application of the +beta reduction rule (and the involved substitution steps). We could have +in-lined the definition of the remaining let, too, but we believe that the +current definition is easier to read.

+

Second, note that we extended the predefined Id syntactic category with two +new constants, $x and $y. The predefined identifiers cannot start with +$, so programs that will be executed with this semantics cannot possibly +contain the identifiers xandx andy. In other words, by adding them to Id they +become indirectly reserved for the semantics. This is indeed desirable, +because any possible uses of xinthebodyofthefunctiondefinedusingletrecwouldbecapturedbythelambdax in the body of the function defined +using `letrec` would be captured by the `lambdaxdeclaration in the definition ofletrec`.

+

Using letrec, we can now write the factorial program as elegantly as it can +be written in a functional language:

+
letrec f x = if x <= 1 then 1 else (x * (f (x + -1)))
+in (f 10)
+
+

In the next lesson we will discuss an alternative definition of letrec, based +on another binder, mu, specifically designed to define fixed points.

+

Go to Lesson 8, LAMBDA: Multiple Binding Constructs.

+

MOVIE (out of date) [5'10"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/1_lambda/lesson_8/exercises/SK-combinators/index.html b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_8/exercises/SK-combinators/index.html new file mode 100644 index 00000000000..5b9c1b5030a --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_8/exercises/SK-combinators/index.html @@ -0,0 +1,423 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Define LAMBDA using the S/K combinators instead of substitution. +You new definition will not require the substitution.k module anymore, +and will not use environments (discussed in future lectures), either.

+

Recall that the S and K combinators are defined as follows:

+
K E1 E2 = E1
+S E1 E2 E3 = E1 E3 (E2 E3)
+
+

where the application is that of LAMBDA (left associative binary operation), +and that the lambda construct can be desugared to combinators using the +following simple rules:

+
lambda X . X = S K K
+lambda X . Y = K Y    when Y is a name different from X
+lambda X . (E1 E2) = S (lambda X . E1) (lambda X . E2)
+lambda X . B = K B    when B is any constant, including S or K
+
+

To distinguish the S and K combinators from K variables and make them +more visible, we prefer to write them as SS and KK instead of S and K.

+

If defined correctly and completely, all the tests should pass when you call +ktest on the provided config.xml file. The tests include all the programs +previously executed using LAMBDA (lesson_8), plus the additional program of +the mu-derived exercise, plus a few more simple programs given with this +exercise to help you better test your definition and nail down the notation.

+

The syntax of the new LAMBDA should be the same as before, although +mu needs to be desugared as in the mu-desugared exercise (using a macro). +The tricky part is how to deal with the builtin operations. For example, +lambda x . if x then y else z cannot be transformed into combinators as is, +but it can if we assume a builtin conditional function constant, say cond, +and desugar if_then_else_ to it. Then this expression becomes +lambda x . (((cond x) y) z), which we know how to transform. The drawback +of this cond constant approach is that it may induce non-termination +in recursive programs, but that appears to not be a problem in our examples.

+

You will have to do the same for all builtin functions, and you will have +to make sure that you define your values correctly! In our previous +definition we were able to say that lambda x . e was a value, but now that +is not possible anymore, because the lambda construct will be eliminated. +Instead, you will have to explicitly say it using the isVal membership +predicate that all the expressions that involve builtin functions and +yield functions are values; for example, isVal(cond V:Val) => true and +isVal(cond V1:Val V2:Val) => true need to be added, but obviously not +isVal(cond V1:Val V2:Val V3:Val) => true.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/1_lambda/lesson_8/exercises/mu-derived/index.html b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_8/exercises/mu-derived/index.html new file mode 100644 index 00000000000..c97c0c8e16e --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_8/exercises/mu-derived/index.html @@ -0,0 +1,388 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

The current K LAMBDA semantics of mu (in Lesson 8) is based on +substitution, and then letrec is defined as a derived operation using +mu. Give mu a different semantics, as a derived construct by +translation into other LAMBDA constructs, like we defined letrec in +Lesson 7.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/1_lambda/lesson_8/index.html b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_8/index.html new file mode 100644 index 00000000000..3f438898251 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_8/index.html @@ -0,0 +1,440 @@ + + + + + + + + + + + + + + +Multiple Binding Constructs | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Multiple Binding Constructs

+

Here we learn how multiple language constructs that bind variables can +coexist. We will also learn about or recall another famous binder besides +lambda, namely mu, which can be used to elegantly define all kinds of +interesting fixed-point constructs.

+

The mu binder has the same syntax as lambda, except that it replaces +lambda with mu.

+

Since mu is a binder, in order for substitution to know how to deal with +variable capture in the presence of mu, we have to tell it that mu is a +binding construct, same like lambda. We take advantage of being there and +also add mu its desired latex attribute.

+

The intuition for

+
mu x . e
+
+

is that it reduces to e, but each free occurrence of x in e behaves +like a pointer that points back to mu x . e.

+

With that in mind, let us postpone the definition of mu and instead redefine +letrec F X = E in E' as a derived construct, assuming mu available. The +idea is to simply regard F as a fixed-point of the function

+
lambda X . E
+
+

that is, to first calculate

+
mu F . lambda X . E
+
+

and then to evaluate E' where F is bound to this fixed-point:

+
let F = mu F . lambda X . E in E'
+
+

This new definition of letrec may still look a bit tricky, particularly +because F is bound twice, but it is much simpler and cleaner than our +previous definition. Moreover, now it is done in a type-safe manner +(this aspect goes beyond our objective in this tutorial).

+

Let us now define the semantic rule of mu.

+

The semantics of mu is actually disarmingly simple. We just have to +substitute mu X . E for each free occurrence of X in E:

+
mu X . E => E[(mu X . E) / X]
+
+

Compile lambda.k and execute some recursive programs. They should be now +several times faster. Write a few more recursive programs, for example ones +for calculating the Ackermann function, for calculating the number of moves +needed to solve the Hanoi tower problem, etc.

+

We have defined our first programming language in K, which allows us to +write interesting functional programs. In the next lesson we will learn how +to fully document our language definition, in order to disseminate it, to ship +it to colleagues or friends, to publish it, to teach it, and so on.

+

Go to Lesson 9, LAMBDA: A Complete and Commented Definition.

+

MOVIE (out of date) [2'40"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/1_lambda/lesson_9/index.html b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_9/index.html new file mode 100644 index 00000000000..06fcbd1d2bd --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_9/index.html @@ -0,0 +1,453 @@ + + + + + + + + + + + + + + +A Complete and Documented K Definition | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

A Complete and Documented K Definition

+

In this lesson you will learn how to add formal comments to your K definition, +in order to nicely document it. The generated document can be then used for +various purposes: to ease understanding the K definition, to publish it, +to send it to others, etc.

+

The K tool allows a literate programming style, where the executable +language definition can be documented by means of annotations. One such +annotation is the latex(_) annotation, where you can specify how to format +the given production when producing Latex output via the --output latex +option to krun, kast, and kprove.

+

There are three types of comments, which we discuss next.

+

Ordinary comments

+

These use // or /* ... */, like in various programming languages. These +comments are completely ignored.

+

Document annotations

+

Use the @ symbol right after // or /* in order for the comment to be +considered an annotation and thus be processed by the K tool when it +generates documentation.

+

As an example, we can go ahead and add such an annotation at the beginning +of the LAMBDA module, explaining how we define the syntax of this language.

+

Header annotations

+

Use the ! symbol right after // or /* if you want the comment to be +considered a header annotation, that is, one which goes before +\begin{document} in the generated Latex. You typically need header +annotations to include macros, or to define a title, etc.

+

As an example, let us set a Latex length and then add a title and an +author to this K definition.

+

Compile the documentation and take a look at the results. Notice the title.

+

Feel free to now add lots of annotations to lambda.k.

+

Then compile and check the result. Depending on your PDF viewer, you +may also see a nice click-able table of contents, with all the sections +of your document. This could be quite convenient when you define large +languages, because it helps you jump to any part of the semantics.

+

Tutorial 1 is now complete. The next tutorial will take us through the +definition of a simple imperative language and will expose us to more +feature of the K framework and the K tool.

+

MOVIE (out of date) [6'07"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/1_lambda/lesson_9/lambda/index.html b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_9/lambda/index.html new file mode 100644 index 00000000000..082e78445eb --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/1_lambda/lesson_9/lambda/index.html @@ -0,0 +1,548 @@ + + + + + + + + + + + + + + +Tutorial 1 --- LAMBDA | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

K code can be nested inside Markdown using annotated code blocks. +Use the tag k to tell the compiler which blocks to select.

+

Inside .k files, C/Java-like comments are available.

+
k
// Single line comment +/* Multiline +comments */ +
+

Tutorial 1 --- LAMBDA

+

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Abstract

+

This file defines a simple functional language in K, called LAMBDA, +using a substitution style. The explicit objective here is to teach some +K concepts and how they work in the K tool, and not to teach +λ-calculus or to argue for one definitional style against another +(e.g., some may prefer environment/closure-based definitions of such +languages).

+

Note that the subsequent definition is so simple, that it hardly shows any +of the strengths of K. Perhaps the most interesting K aspect it shows is +that substitution can be defined fully generically, and then used to give +semantics to various constructs in various languages.

+

Note: +K follows the +literate programming +approach. The various semantic features defined in a K +module can be reordered at will and can be commented using normal +comments like in C/C++/Java. +While comments are useful in general, they can annoy the expert user +of K. To turn them off, you can do one of the following (unless you +want to remove them manually):
+(1) Use an editor which can +hide or highlight Markdown and conventional C-like comments; or
+(2) Run kompile --debug <def>. Inside ./.kompiled-xxx/.md2.k/ you will find +all the K code extracted from the markdown files as used for compilation.

+

Substitution

+

We need the predefined substitution module, so we require it with the command +below. Then we should make sure we import its module called SUBSTITUTION +in our LAMBDA module below.

+
k
module LAMBDA-SYNTAX + imports DOMAINS-SYNTAX + imports ID-SYNTAX +
+

Basic Call-by-value λ-Calculus Syntax

+

We first define the syntax of conventional call-by-value λ-calculus, making +sure we declare the lambda abstraction construct to be a binder, the +lambda application to be strict, and the parentheses used for grouping as +a bracket.

+

Note: +Syntax in K is defined using the familiar BNF notation, with +terminals enclosed in quotes and nonterminals starting with capital +letters. K actually extends BNF with several attributes, which will be +described in this tutorial.

+

Note: +The strict constructs can evaluate their arguments in any (fully +interleaved) order.

+

The initial syntax of our λ-calculus:

+
k
syntax Val ::= Id + | "lambda" Id "." Exp + syntax Exp ::= Val + | Exp Exp [left, strict] + | "(" Exp ")" [bracket] +
+

Integer and Boolean Builtins Syntax

+

The LAMBDA arithmetic and Boolean expression constructs are simply rewritten +to their builtin counterparts once their arguments are evaluated. +The annotated operators in the right-hand side of the rules below are +builtin and come with the corresponding builtin sort. Note that the +variables appearing in these rules have integer sort. That means that these +rules will only be applied after the arguments of the arithmetic constructs +are fully evaluated to K results; this will happen thanks to their strictness +attributes declared as annotations to their syntax declarations (below).

+
k
syntax Val ::= Int | Bool + syntax Exp ::= "-" Int + > Exp "*" Exp [strict, left] + | Exp "/" Exp [strict] + > Exp "+" Exp [strict, left] + > Exp "<=" Exp [strict] +
+

Conditional Syntax

+

Note that the if construct is strict only in its first argument.

+
k
syntax Exp ::= "if" Exp "then" Exp "else" Exp [strict(1)] +
+

Let Binder

+

The let binder is a derived construct, because it can be defined using λ.

+
k
syntax Exp ::= "let" Id "=" Exp "in" Exp [macro] + rule let X = E in E':Exp => (lambda X . E') E +
+

Letrec Binder

+

We prefer a definition based on the μ construct. Note that μ is not +really necessary, but it makes the definition of letrec easier to understand +and faster to execute.

+
k
syntax Exp ::= "letrec" Id Id "=" Exp "in" Exp [macro] + | "mu" Id "." Exp + rule letrec F:Id X:Id = E in E' => let F = mu F . lambda X . E in E' +endmodule +
+

LAMBDA module

+
k
module LAMBDA + imports LAMBDA-SYNTAX + imports DOMAINS + + syntax KResult ::= Val +
+

β-reduction

+
k
syntax Set ::= freeVars( Exp ) [function] + rule freeVars( _ ) => .Set [owise] + rule freeVars( V:Id ) => SetItem(V) + rule freeVars( lambda X . E ) => freeVars( E ) -Set SetItem(X) + rule freeVars( E1 E2 ) => freeVars(E1) freeVars(E2) + rule freeVars( E1 * E2 ) => freeVars(E1) freeVars(E2) + rule freeVars( E1 / E2 ) => freeVars(E1) freeVars(E2) + rule freeVars( E1 + E2 ) => freeVars(E1) freeVars(E2) + rule freeVars( E1 <= E2 ) => freeVars(E1) freeVars(E2) + rule freeVars( if B then E1 else E2) => freeVars(B) freeVars(E1) freeVars(E2) + + syntax Id ::= freshVar(Id, Int, Set) [function] + rule freshVar(V, I, S) => #let X = String2Id(Id2String(V) +String Int2String(I)) #in #if X in S #then freshVar(V, I +Int 1, S) #else X #fi + + syntax Exp ::= Exp "[" Exp "/" Id "]" [function] + + rule X:Exp [_ / _] => X [owise] + rule X [V / X] => V + + rule (lambda Y . E) [_ / Y] => lambda Y . E + rule (lambda Y . E) [V / X] => lambda Y . (E[V / X]) requires Y =/=K X andBool notBool (Y in freeVars(V)) + rule (lambda Y . E) [V / X] => #let Z = freshVar(Y, 0, freeVars(E) freeVars(V)) #in lambda Z . (E[Z / Y] [V / X]) + requires Y =/=K X andBool Y in freeVars(V) + + rule (E1:Exp E2:Exp) [V / X] => E1[V / X] (E2[V / X]) + + rule (E1:Exp * E2:Exp) [V / X] => E1[V / X] * (E2[V / X]) + rule (E1:Exp / E2:Exp) [V / X] => E1[V / X] / (E2[V / X]) + rule (E1:Exp + E2:Exp) [V / X] => E1[V / X] + (E2[V / X]) + rule (E1:Exp <= E2:Exp) [V / X] => E1[V / X] <= (E2[V / X]) + + rule (if C then E1 else E2) [V / X] => if C[V / X] then E1[V / X] else (E2[V / X]) + + rule (lambda X:Id . E:Exp) V:Val => E[V / X] +
+

Integer Builtins

+
k
rule - I => 0 -Int I + rule I1 * I2 => I1 *Int I2 + rule I1 / I2 => I1 /Int I2 requires I2 =/=Int 0 + rule I1 + I2 => I1 +Int I2 + rule I1 <= I2 => I1 <=Int I2 +
+

Conditional

+
k
rule if true then E else _ => E + rule if false then _ else E => E +
+

Mu

+
k
rule mu X . E => E[(mu X . E) / X] +endmodule +
+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/2_imp/NOTES/index.html b/k-distribution/pl-tutorial/1_k/2_imp/NOTES/index.html new file mode 100644 index 00000000000..0d293a48122 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/2_imp/NOTES/index.html @@ -0,0 +1,387 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+
    +
  • Revise the change of S1 S2 into S1:Stmt S2:Stmt, if needed; only S2 +really needs to be sorted.
  • +
+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/2_imp/index.html b/k-distribution/pl-tutorial/1_k/2_imp/index.html new file mode 100644 index 00000000000..0e306d15637 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/2_imp/index.html @@ -0,0 +1,411 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Part 2: Defining IMP

+

Here you will learn how to define a very simple imperative language in K +and the basics of how to work with configurations, cells, and computations. +Specifically, you will learn the following:

+
    +
  • How to define languages using multiple modules.
  • +
  • How to define sequentially strict syntactic constructs.
  • +
  • How to use K's syntactic lists.
  • +
  • How to define, initialize and configure configurations.
  • +
  • How the language syntax is swallowed by the builtin K syntactic category.
  • +
  • The additional syntax of the K syntactic category.
  • +
  • How the strictness annotations are automatically desugared into rules.
  • +
  • The first steps of the configuration abstraction mechanism.
  • +
+

Like in the previous tutorial, this folder contains several lessons, each +adding new features to IMP. Do them in order. Also, make sure you completed +and understood the previous tutorial.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/2_imp/lesson_1/index.html b/k-distribution/pl-tutorial/1_k/2_imp/lesson_1/index.html new file mode 100644 index 00000000000..5bca1d45cfd --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/2_imp/lesson_1/index.html @@ -0,0 +1,549 @@ + + + + + + + + + + + + + + +Defining a More Complex Syntax | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Defining a More Complex Syntax

+

Here we learn how to define a more complex language syntax than LAMBDA's, +namely the C-like syntax of IMP. Also, we will learn how to define languages +using multiple modules, because we are going to separate IMP's syntax from +its semantics using modules. Finally, we will also learn how to use K's +builtin support for syntactic lists.

+

The K tool provides modules for grouping language features. In general, we +can organize our languages in arbitrarily complex module structures. +While there are no rigid requirements or even guidelines for how to group +language features in modules, we often separate the language syntax from the +language semantics in different modules.

+

In our case here, we start by defining two modules, IMP-SYNTAX and IMP, and +import the first in the second, using the keyword imports. As their names +suggest, we will place all IMP's syntax definition in IMP-SYNTAX and all its +semantics in IMP.

+

Note, however, that K does no more than simply includes all the +contents of the imported module in the one which imports it (making sure +that everything is only kept once, even if you import it multiple times). +In other words, there is currently nothing fancy in K tool's module system.

+

IMP has six syntactic categories, as shown in imp.k: AExp for arithmetic +expressions, BExp for Boolean expressions, Block for blocks, Stmt for +statements, Pgm for programs and Ids for comma-separated lists of +identifiers. Blocks are special statements, whose role is to syntactically +constrain the conditional statement and the while loop statement to only +take blocks as branches and body, respectively.

+

There is nothing special about arithmetic and Boolean expressions. They +are given the expected strictness attributes, except for <= and &&, +for demonstration purposes.

+

The <= is defined to be seqstrict, which means that it evaluates its +arguments in order, from left-to-right (recall that the strict operators +can evaluate their arguments in any, fully interleaved, orders). Like +strict, the seqstrict annotation can also be configured; for example, one +can specify in which arguments and in what order. By default, seqstrict +refers to all the arguments, in their left-to-right order. In our case here, +it is equivalent with seqstrict(1 2).

+

The && is only strict in its first argument, because we will give it a +short-circuited semantics (its second argument will only be evaluated when +the first evaluates to true). Recall the K tool also allows us to associate +LaTex attributes to constructs, telling the document generator how to display +them. For example, we associate <= the attribute latex({#1}\leq{#2}), +which makes it be displayed \leq everywhere in the generated LaTex +documentation.

+

In this tutorial we take the freedom to associate the various constructs +parsing precedences that we have already tested and we know work well, so that +we can focus on the semantics here instead of syntax. In practice, though, +you typically need to experiment with precedences until you obtain the desired +parser.

+

Blocks are defined using curly brackets, and they can either be empty or +hold a statement.

+

Nothing special about the IMP statements. Note that ; is an assignment +statement terminator, not a statement separator. Note also that blocks are +special statements.

+

An IMP program declares a comma-separated list of variables using the keyword +int like in C, followed by a semicolon ;, followed by a statement. +Syntactically, the idea here is that we can wrap any IMP program within a +main(){...} function and get a valid C program. IMP does not allow variable +declarations anywhere else except through this construct, at the top-level of +the program. Other languages provided with the K distribution (see, e.g., the +IMP++ language also discussed in this tutorial) remove this top-level program +construct of IMP and add instead variable declaration as a statement construct, +which can be used anywhere in the program, not only at the top level.

+

Note how we defined the comma-separated list of identifiers using +List{Id,","}. The K tool provides builtin support for generic syntactic +lists. In general,

+
syntax B ::= List{A,T}
+
+

declares a new non-terminal, B, corresponding to T-separated sequences of +elements of A, where A is a non-terminal and T is a terminal. These +lists can also be empty, that is, IMP programs declaring no variable are also +allowed (e.g., int; {} is a valid IMP program). To instantiate and use +the K builtin lists, you should alias each instance with a (typically fresh) +non-terminal in your syntax, like we do with the Ids nonterminal.

+

Like with other K features, there are ways to configure the syntactic lists, +but we do not discuss them here.

+

Recall from Tutorial 1 (LAMBDA) that in order for strictness to work well +we also need to tell K which computations are meant to be results. We do +this as well now, in the module IMP: integers and Booleans are K results.

+

Kompile imp.k and test the generated parser by running some programs. +Since IMP is a fragment of C, you may want to select the C mode in your +editor when writing these programs. This will also give your the feel that +you are writing programs in a real programming language.

+

For example, here is sum.imp, which sums in sum all numbers up to n:

+
int n, sum;
+n = 100;
+sum=0;
+while (!(n <= 0)) {
+  sum = sum + n;
+  n = n + -1;
+}
+
+

Now krun it and see how it looks parsed in the default k cell.

+

The program collatz.imp tests the Collatz conjecture for all numbers up to +m and accumulates the total number of steps in s:

+
int m, n, q, r, s;
+m = 10;
+while (!(m<=2)) {
+  n = m;
+  m = m + -1;
+  while (!(n<=1)) {
+    s = s+1;
+    q = n/2;
+    r = q+q+1;
+    if (r<=n) {
+      n = n+n+n+1;         // n becomes 3*n+1 if odd
+    } else {n=q;}          //        of   n/2 if even
+  }
+}
+
+

Finally, program primes.imp counts in s all the prime numbers up to m:

+
int i, m, n, q, r, s, t, x, y, z;
+m = 10;  n = 2;
+while (n <= m) {
+  // checking primality of n and writing t to 1 or 0
+  i = 2;  q = n/i;  t = 1;
+  while (i<=q && 1<=t) {
+    x = i;
+    y = q;
+    // fast multiplication (base 2) algorithm
+    z = 0;
+    while (!(x <= 0)) {
+      q = x/2;
+      r = q+q+1;
+      if (r <= x) { z = z+y; } else {}
+      x = q;
+      y = y+y;
+    } // end fast multiplication
+    if (n <= z) { t = 0; } else { i = i+1;  q = n/i; }
+  } // end checking primality
+  if (1 <= t) { s = s+1; } else {}
+  n = n+1;
+}
+
+

All the programs above will run once we define the semantics of IMP. If you +want to execute them now, wrap them in a main(){...} function and compile +them and run them with your favorite C compiler.

+

Before we move to the K semantics of IMP, we would like to make some +clarifications regarding the K builtin parser, kast. Although it is quite +powerful, you should not expect magic from it! While the K parser can parse +many non-trivial languages (see, for example, the KOOL language in +pl-tutorial/2_languages) in the K distribution), it was +never meant to be a substitute for real parsers. We often call the syntax +defined in K the syntax of the semantics, to highlight the fact that its +role is to serve as a convenient notation when writing the semantics, not +necessarily as a means to define concrete syntax of arbitrarily complex +programming languages. See the KERNELC language for an example on how to connect an external parser for concrete syntax to +the K tool.

+

The above being said, we strongly encourage you to strive to make the +builtin parser work with your desired language syntax! Do not give up +simply because you don't want to deal with syntactic problems. On the +contrary, fight for your syntax! If you really cannot define your desired +syntax because of tool limitations, we would like to know. Please tell us.

+

Until now we have only seen default configurations. In the next lesson we +will learn how to define a K custom configuration.

+

Go to Lesson 2, IMP: Defining a Configuration.

+

MOVIE (out of date) [09'15"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/2_imp/lesson_2/index.html b/k-distribution/pl-tutorial/1_k/2_imp/lesson_2/index.html new file mode 100644 index 00000000000..7ef6b31b44c --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/2_imp/lesson_2/index.html @@ -0,0 +1,461 @@ + + + + + + + + + + + + + + +Defining a Configuration | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Defining a Configuration

+

Here we learn how to define a configuration in K. We also learn how to +initialize and how to display it.

+

As explained in the overview presentation on K, configurations are quite +important, because all semantic rules match and apply on them. +Moreover, they are the backbone of configuration abstraction, which allows +you to only mention the relevant cells in each semantic rule, the rest of +the configuration context being inferred automatically. The importance of +configuration abstraction will become clear when we define more complex +languages (even in IMP++). IMP does not really need it. K configurations +are constructed making use of cells, which are labeled and can be arbitrarily +nested.

+

Configurations are defined with the keyword configuration. Cells are +defined using an XML-ish notation stating clearly where the cell starts +and where it ends.

+

While not enforced by the tool, we typically like to put the entire +configuration in a top-level cell, called T. So let's define it:

+
configuration <T>...</T>
+
+

Cells can have other cells inside. In our case of IMP, we need a cell to +hold the remaining program, cell which we typically call k, and a cell to +hold the program state. Let us add them:

+
configuration <T> <k>...</k> <state>...</state> </T>
+
+

K allows us to also specify how to initialize a configuration at the same +time with declaring the configuration. All we have to do is to fill in +the contents of the cells with some terms. The syntactic categories of +those terms will also indirectly define the types of the corresponding +cells.

+

For example, we want the k cell to initially hold the program that is passed +to krun. K provides a builtin configuration variable, called $PGM, which +is specifically designed for this purpose: krun will place its program there +(after it parses it, or course). The K tool allows users to define their own +configuration variables, too, which can be used to develop custom +initializations of program configurations with the help of krun; this can be +quite useful when defining complex languages, but we do not discuss it in +this tutorial.

+
configuration <T> <k> $PGM </k> <state>...</state>  </T>
+
+

Moreover, we want the program to be a proper Pgm term (because we do not +want to allow krun to take fragments of programs, for example, statements). +Therefore, we tag $PGM with the desired syntactic category, Pgm:

+
configuration <T> <k> $PGM:Pgm </k> <state>...</state>  </T>
+
+

Like for other variable tags in K, a run-time check will be performed and the +semantics will get stuck if the passed term is not a well-formed program.

+

We next tell K that the state cell should be initialized with the empty map:

+
configuration <T> <k> $PGM:Pgm </k> <state> .Map </state>  </T>
+
+

Recall that in K . stands for nothing. However, since there are various +types of nothing, to avoid confusion we can suffix the . with its desired +type. K has several builtin data-types, including lists, sets, bags, and +maps. .Map is the empty map.

+

Kompile imp.k and run several programs to see how the configuration is +initialized as desired.

+

When configurations get large, and they do when defining large programming +languages, you may want to color the cells in order to more easily distinguish +them. This can be easily achieved using the color cell attribute, following +again an XML-ish style:

+
configuration <T color="yellow">
+                <k color="green"> $PGM:Pgm </k>
+                <state color="red"> .Map </state>
+              </T>
+
+

In the next lesson we will learn how to write rules that involve cells.

+

Go to Lesson 3, IMP: Computations, Results, Strictness; Rules Involving Cells.

+

MOVIE (out of date) [04'21"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/2_imp/lesson_3/index.html b/k-distribution/pl-tutorial/1_k/2_imp/lesson_3/index.html new file mode 100644 index 00000000000..b6a16bdd256 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/2_imp/lesson_3/index.html @@ -0,0 +1,599 @@ + + + + + + + + + + + + + + +Computations, Results, Strictness; Rules Involving Cells | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Computations, Results, Strictness; Rules Involving Cells

+

In this lesson we will learn about the syntactic category K of computations, +about how strictness attributes are in fact syntactic sugar for rewrite rules +over computations, and why it is important to tell the tool which +computations are results. We will also see a K rule that involves cells.

+

K Computations

+

Computation structures, or more simply computations, extend the abstract +syntax of your language with a list structure using ~> (read followed +by or and then, and written \curvearrowright in Latex) as a separator. +K provides a distinguished sort, K, for computations. The extension of the +abstract syntax of your language into computations is done automatically by +the K tool when you declare constructs using the syntax keyword, so the K +semantic rules can uniformly operate only on terms of sort K. The intuition +for computation structures of the form

+
t1 ~> t2 ~> ... ~> tn
+
+

is that the listed tasks are to be processed in order. The initial +computation typically contains the original program as its sole task, but +rules can then modify it into task sequences, as seen shortly.

+

Strictness in Theory

+

The strictness attributes, used as annotations to language constructs, +actually correspond to rules over computations. For example, the +strict(2) attribute of the assignment statement corresponds to the +following two opposite rules (X ranges over Id and A over AExp):

+
X=A; => A ~> X=[];
+A ~> X=[]; => X=A;
+
+

The first rule pulls A from the syntactic context X=A; and schedules it +for processing. The second rule plugs A back into its context. +Inspired from the chemical abstract machine, we call rules of the first +type above heating rules and rules of the second type cooling rules. +Similar rules are generated for other arguments in which operations are +strict. Iterative applications of heating rules eventually bring to the +top of the computation atomic tasks, such as a variable lookup, or a +builtin operation, which then make computational progress by means of other +rules. Once progress is made, cooling rules can iteratively plug the result +back into context, so that heating rules can pick another candidate for +reduction, and so on and so forth.

+

When operations are strict only in some of their arguments, the corresponding +positions of the arguments in which they are strict are explicitly enumerated +in the argument of the strict attribute, e.g., strict(2) like above, or +strict(2 3) for an operation strict in its second and third arguments, etc. +If an operation is simply declared strict then it means that it is strict +in all its arguments. For example, the strictness of addition yields:

+
A1+A2 => A1 ~> []+A2
+A1 ~> []+A2 => A1+A2
+A1+A2 => A2 ~> A1+[]
+A2 ~> A1+[] => A1+A2
+
+

It can be seen that such heating/cooling rules can easily lead to +non-determinism, since the same term may be heated many different ways; +these different evaluation orders may lead to different behaviors in some +languages (not in IMP, because its expressions do not have side effects, +but we will experiment with non-determinism in its successor, IMP++).

+

A similar desugaring applies to sequential strictness, declared with the +keyword seqstrict. While the order of arguments of strict is irrelevant, +it matters in the case of seqstrict: they are to be evaluated in the +specified order; if no arguments are given, then they are assumed by default +to be evaluated from left-to-right. For example, the default heating/cooling +rules associated to the sequentially strict <= construct above are +(A1, A2 range over AExp and I1 over Int):

+
A1<=A2 => A1 ~> []<=A2
+A1 ~> []<=A2 => A1<=A2
+I1<=A2 => A2 ~> I1<=[]
+A2 ~> I1<=[] => I1<=A2
+
+

In other words, A2 is only heated/cooled after A1 is already evaluated.

+

While the heating/cooling rules give us a nice and uniform means to define +all the various allowable ways in which a program can evaluate, all based +on rewriting, the fact that they are reversible comes with a serious practical +problem: they make the K definitions unexecutable, because they lead to +non-termination.

+

Strictness in Practice; K Results

+

To break the reversibility of the theoretical heating/cooling rules, and, +moreover, to efficiently execute K definitions, the current implementation of +the K tool relies on users giving explicit definitions of their languages' +results.

+

The K tool provides a predicate isKResult, which is automatically defined +as we add syntactic constructs to KResult (in fact the K tool defines such +predicates for all syntactic categories, which are used, for example, as +rule side conditions to check user-declared variable memberships, such as +V:Val stating that V belongs to Val).

+

The kompile tool, depending upon what it is requested to do, changes the +reversible heating/cooling rules corresponding to evaluation strategy +definitions (e.g., those corresponding to strictness attributes) to avoid +non-termination. For example, when one is interested in obtaining an +executable model of the language (which is the default compilation mode of +kompile), then heating is performed only when the to-be-pulled syntactic +fragment is not a result, and the corresponding cooling only when the +to-be-plugged fragment is a result. In this case, e.g., the heating/cooling +rules for assignment are modified as follows:

+
X=A; => A ~> X=[];  requires notBool isKResult(A)
+A ~> X=[]; => X=A;  requires isKResult(A)
+
+

Note that non-termination of heating/cooling is avoided now. The only thing +lost is the number of possible behaviors that a program can manifest, but +this is irrelevant when all we want is one behavior.

+

As will be discussed in the IMP++ tutorial, the heating/cooling rules are +modified differently by kompile when we are interested in other aspects +of the language definition, such us, for example, in a search-able model that +comprises all program behaviors. This latter model is obviously more general +from a theoretical perspective, but, in practice, it is also slower to execute. +The kompile tool strives to give you the best model of the language for the +task you are interested in.

+

Can't Results be Inferred Automatically?

+

This is a long story, but the short answer is: No!. Maybe in some cases +it is possible, but we prefer to not attempt it in the K tool. For example, +you most likely do not want any stuck computation to count as a result, +since some of them can happen simply because you forgot a semantic rule that +could have further reduce it! Besides, in our experience with defining large +languages, it is quite useful to take your time and think of what the results +of your language's computations are. This fact in itself may help you improve +your overall language design. We typically do it at the same time with +defining the evaluation strategies of our languages. Although in theory K +could infer the results of your language as the stuck computations, based on +the above we have deliberately decided to not provide this feature, in spite +of requests from some users. So you currently do have to explicitly define +your K results if you want to effectively use the K tool. Note, however, that +theoretical definitions, not meant to be executed, need not worry about +defining results (that's because in theory semantic rules apply modulo the +reversible heating/cooling rules, so results are not necessary).

+

A K Rule Involving Cells

+

All our K rules so far in the tutorial were of the form

+
rule left => right requires condition
+
+

where left and right were syntactic, or more generally computation, terms.

+

Here is our first K rule explicitly involving cells:

+
rule <k> X:Id => I ...</k> <state>... X |-> I ...</state>
+
+

Recall that the k cell holds computations, which are sequences of tasks +separated by ~>. Also, the state cell holds a map, which is a set of +bindings, each binding being a pair of computations (currently, the +K builtin data-structures, like maps, are untyped; or, said differently, +they are all over the type of computations, K).

+

Therefore, the two cells mentioned in the rule above hold collections +of things, ordered or not. The ...s, which we also call cell frames, +stand for more stuff there, which we do not care about.

+

The rewrite relation => is allowed in K to appear anywhere in a term, its +meaning being that the corresponding subterm is rewritten as indicated in the +shown context. We say that K's rewriting is local.

+

The rule above says that if the identifier X is the first task in the k +cell, and if X is bound to I somewhere in the state, then X rewrites +to I locally in the k cell. Therefore, IMP variables need to be already +declared when looked up.

+

Of course, the K rule above can be translated into an ordinary rewrite rule +of the form

+
rule <k> X ~> Rest </k> <state> Before (X |-> I) After </state>
+  => <k> I ~> Rest </k> <state> Before (X |-> I) After </state>
+
+

Besides being more verbose and thus tedious to write, this ordinary rule +is also more error-prone; for example, we may forget the Rest variable +in the right-hand-side, etc. Moreover, the concurrent semantics of K +allows for its rules to be interpreted as concurrent transactions, where +the context is the read-only component of the transaction, while the +subterms which are rewritten are read/write component of the transaction; +thus, K rule instances can apply concurrently if they only overlap +on read-only parts, while they cannot if regarded as ordinary rewrite logic +rules. Note: our current implementation of the K tool is not concurrent, +so K rules are in fact desugared as normal rewrite rules in the K tool.

+

Kompile imp.k using a documentation option and check out how the K rule +looks in the generated document. The ... frames are displayed as cell +tears, metaphorically implying that those parts of the cells that we +do not care about are torn away. The rewrite relation is replaced by a +horizontal line: specifically, the subterm which rewrites, X, is +underlined, and its replacement is written underneath the line.

+

In the next lesson we define the complete K semantics of IMP and +run the programs we parsed in the first lesson.

+

Go to Lesson 4, IMP: Configuration Abstraction, Part 1; Types of Rules.

+

MOVIE (out of date) [10'30"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/2_imp/lesson_4/exercises/purely-syntactic/index.html b/k-distribution/pl-tutorial/1_k/2_imp/lesson_4/exercises/purely-syntactic/index.html new file mode 100644 index 00000000000..6acf836fad1 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/2_imp/lesson_4/exercises/purely-syntactic/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Modify IMP so that the K followed by arrow, ~>, does not explicitly +occur in the definition (it currently occurs in the semantics of +sequential composition).

+

Hint: make sequential composition strict(1) or seqstrict, and have +statements reduce to {} instead of .; and don't forget to make +{} a KResult (you may need a new syntactic category for that, which +only includes {} and is included in KResult).

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/2_imp/lesson_4/exercises/uninitialized-variables/index.html b/k-distribution/pl-tutorial/1_k/2_imp/lesson_4/exercises/uninitialized-variables/index.html new file mode 100644 index 00000000000..a890083be42 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/2_imp/lesson_4/exercises/uninitialized-variables/index.html @@ -0,0 +1,389 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Modify the K definition of IMP to not automatically initialize +variables to 0. Instead, declared variables should stay uninitialized +until assigned a value, and the execution should get stuck when an +uninitialized variable is looked up. Specifically, you should add a +new undefined construct of sort K, and initialize all the declared +variables with it.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/2_imp/lesson_4/index.html b/k-distribution/pl-tutorial/1_k/2_imp/lesson_4/index.html new file mode 100644 index 00000000000..11d850da01e --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/2_imp/lesson_4/index.html @@ -0,0 +1,505 @@ + + + + + + + + + + + + + + +Configuration Abstraction, Part 1; Types of Rules | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Configuration Abstraction, Part 1; Types of Rules

+

Here we will complete the K definition of IMP and, while doing so, we will +learn the very first step of what we call configuration abstraction.

+

The IMP Semantic Rules

+

Let us add the remaining rules, in the order in which the language constructs +were defined in IMP-SYNTAX.

+

The rules for the arithmetic and Boolean constructs are self-explanatory. +Note, however, that K will infer the correct sorts of all the variables in +these rules, because they appear as arguments of the builtin operations +(_+Int_, etc.). Moreover, the inferred sorts will be enforced dynamically. +Indeed, we do not want to apply the rule for addition, for example, when the +two arguments are not integers. In the rules for &&, although we prefer to +not do it here for simplicity, we could have eliminated the dynamic check by +replacing B (and similarly for _) with B:K. Indeed, it can be shown +that whenever any of these rules apply, B (or _) is a BExp anyway. +That's because there is no rule that can touch such a B (or _); this +will become clearer shortly, when we discuss the first step of configuration +abstraction. Therefore, since we know that B will be a BExp anyway, we +could save the time it takes to check its sort; such times may look minor, +but they accumulate, so some designers may prefer to avoid run-time checks +whenever possible.

+

The block rules are trivial. However, the rule for non-empty blocks is +semantically correct only because we do not have local variable declarations +in IMP. We will have to change this rule in IMP++.

+

The assignment rule has two =>: one in the k cell dissolving the +assignment statement, and the other in the state cell updating the value of +the assigned variable. Note that the one in the state is surrounded by +parentheses: (_ => I). That is because => is greedy: it matches as much +as it can to the left and to the right, until it reaches the cell boundaries +(closed or open). If you want to limit its scope, or for clarity, you can use +parentheses like here.

+

The rule for sequential composition simply desugars S1 S2 into S1 ~> S2. +Indeed, the two have exactly the same semantics. Note that statements +evaluate to nothing (.), so once S1 is processed in S1 ~> S2, then the +next task is automatically S2, without wasting any step for the transition.

+

The rules for the conditional and while statements are clear. One thing to +keep in mind now is that the while unrolling rule will not apply +indefinitely in the positive branch of the resulting conditional, because +of K's configuration abstraction, which will be discussed shortly.

+

An IMP program declares a set of variables and then executes a +statement in the state obtained after initializing all those variables +to 0. The rules for programs initialize the declared variables one by one, +checking also that there are no duplicates. We check for duplicates only for +demonstration purposes, to illustrate the keys predefined operation that +returns the set of keys of a map, and the set membership operation in. +In practice, we typically define a static type checker for our language, +which we execute before the semantics and reject inappropriate programs.

+

The use of the .Ids in the second rule is not necessary. We could have +written int; S instead of int .Ids; S and the K tool would parse it and +kompile the definition correctly, because it uses the same parser used for +parsing programs also to parse the semantics. However, we typically prefer to +explicitly write the nothing values in the semantics, for clarity; +the parser has been extended to accept these. Note that the first rule +matches the entire k cell, because int_;_ is the top-level program +construct in IMP, so there is nothing following it in the computation cell. +The anonymous variable stands for the second argument of this top-level program +construct, not for the rest of the computation. The second rule could have +also been put in a complete k cell, but we preferred not to, for simplicity.

+

Our IMP semantics is now complete, but there are a few more things that we +need to understand and do.

+

Configuration Abstraction, Part 1

+

First, let us briefly discuss the very first step of configuration abstraction. +In K, all semantic rules are in fact rules between configurations. As soon +explained in the IMP++ tutorial, the declared configuration cell structure is +used to automatically complete the missing configuration parts in rules. +However, many rules do not involve any cells, being rules between syntactic +terms (of sort K); for example, we had only three rules involving cells in our +IMP semantics. In this case, the k cell will be added automatically and the +actual rewrite will happen on top of the enclosed computation. For example, +the rule for the while loop is automatically translated into the following:

+
rule <k> while (B) S => if (B) {S while (B) S} else {} ...</k>
+
+

Since the first task in computations is what needs to be done next, the +intuition for this rule completion is that the syntactic transition +only happens when the term to rewrite is ready for processing. This explains, +for example, why the while loop unrolling does not indefinitely apply in the +positive branch of the conditional: the inner while loop is not ready for +evaluation yet. We call this rule completion process, as well as other +similar ones, configuration abstraction. That is because the incomplete +rule abstracts away the configuration structure, thus being easier to read. +As seen soon when we define IMP++, configuration abstraction is not only a +user convenience; it actually significantly increases the modularity of our +definitions. The k-cell-completion is only the very first step, though.

+

If you really want certain rewrites over syntactic terms to apply +anywhere they match, then you should tag the rule with the attribute +anywhere, which was discussed in Tutorial 1, Lesson 2.5.

+

Kompile and then krun the programs that you only parsed in Lesson 1. They +should all execute as expected. The state cell shows the final state +of the program. The k cell shows the final code contents, which should be +empty whenever the IMP program executes correctly.

+

Kompile also with the documentation option and take a look at the generated +documentation. The assignment rule should particularly be of interest, +because it contains two local rewrites.

+

In the next lesson we comment the IMP definition and conclude this tutorial.

+

Go to Lesson 5, IMP: Completing and Documenting IMP.

+

MOVIE (out of date) [09'16"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/2_imp/lesson_5/imp/index.html b/k-distribution/pl-tutorial/1_k/2_imp/lesson_5/imp/index.html new file mode 100644 index 00000000000..89de8d60e99 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/2_imp/lesson_5/imp/index.html @@ -0,0 +1,555 @@ + + + + + + + + + + + + + + +IMP | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

IMP

+

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Abstract

+

This is the K semantic definition of the classic IMP language. +IMP is considered a folklore language, without an official inventor, +and has been used in many textbooks and papers, often with slight +syntactic variations and often without being called IMP. It includes +the most basic imperative language constructs, namely basic constructs +for arithmetic and Boolean expressions, and variable assignment, +conditional, while loop and sequential composition constructs for statements.

+
k
module IMP-SYNTAX + imports DOMAINS-SYNTAX +
+

Syntax

+

This module defines the syntax of IMP. +Note that <= is sequentially strict, and && is strict only in its first +argument, because we want to give it a short-circuit semantics.

+
k
syntax AExp ::= Int | Id + | "-" Int [format(%1%2)] + | AExp "/" AExp [left, strict, color(pink)] + | "(" AExp ")" [bracket] + > AExp "+" AExp [left, strict, color(pink)] + syntax BExp ::= Bool + | AExp "<=" AExp [seqstrict] + | "!" BExp [strict, color(pink)] + | "(" BExp ")" [bracket] + > BExp "&&" BExp [left, strict(1), color(pink)] + syntax Block ::= "{" "}" + | "{" Stmt "}" [format(%1%i%n%2%d%n%3)] + syntax Stmt ::= Block + | Id "=" AExp ";" [strict(2), color(pink), format(%1 %2 %3%4)] + | "if" "(" BExp ")" + Block "else" Block [strict(1), colors(yellow, white, white, yellow), format(%1 %2%3%4 %5 %6 %7)] + | "while" "(" BExp ")" Block [colors(yellow,white,white), format(%1 %2%3%4 %5)] + > Stmt Stmt [left, format(%1%n%2)] +
+

An IMP program declares a set of variables and then executes a +statement in the state obtained after initializing all those variables +to 0. K provides builtin support for generic syntactic lists: +List{Nonterminal,terminal} stands for terminal-separated lists of Nonterminal elements.

+
k
syntax Pgm ::= "int" Ids ";" Stmt [format(%1 %2%3%n%4), colors(yellow,pink)] + syntax Ids ::= List{Id,","} [format(%1%2 %3)] +endmodule +
+

We are done with the definition of IMP's syntax. Make sure +that you write and parse several interesting programs before you move to the +semantics.

+
k
module IMP + imports IMP-SYNTAX + imports DOMAINS +
+

Semantics

+

This module defines the semantics of IMP. +Before you start adding semantic rules to a K definition, you need to +define the basic semantic infrastructure consisting of definitions for +results and the configuration.

+

Values and results

+

IMP only has two types of values, or results of computations: integers +and Booleans. We here use the K builtin variants for both of them.

+
k
syntax KResult ::= Int | Bool +
+

Configuration

+

The configuration of IMP is trivial: it only contains two cells, one +for the computation and another for the state. For good encapsulation +and clarity, we place the two cells inside another cell, the top cell +which is labeled T.

+
k
configuration <T color="yellow"> + <k color="green"> $PGM:Pgm </k> + <state color="red"> .Map </state> + </T> +
+

The configuration variable PGM tells the K tool where to +place the program. More precisely, the command +krun program parses the program and places the resulting +K abstract syntax tree in the k cell before invoking the +semantic rules described in the sequel. The . in the +state cell, written .Map in ASCII in the +imp.md file, is K's way to say nothing. Technically, it +is a constant which is the unit, or identity, of all maps in K +(similar dot units exist for other K structures, such as lists, sets, +multi-sets, etc.).

+

Arithmetic expressions

+

The K semantics of each arithmetic construct is defined below.

+

Variable lookup

+

A program variable X is looked up in the state by matching a binding +of the form X |-> I in the state cell. If such a binding does not +exist, then the rewriting process will get stuck. Thus our semantics of +IMP disallows uses of uninitialized variables. Note that the variable +to be looked up is the first task in the k cell (the cell is +closed to the left and torn to the right), while the binding can be +anywhere in the state cell (the cell is torn at both sides).

+
k
rule <k> X:Id => I ...</k> <state>... X |-> I ...</state> +
+

Arithmetic operators

+

There is nothing special about these, but recall that K's configuration +abstraction mechanism is at work here! That means that the rewrites in the +rules below all happen at the beginning of the k cell.

+
k
rule I1 / I2 => I1 /Int I2 requires I2 =/=Int 0 + rule I1 + I2 => I1 +Int I2 + rule - I1 => 0 -Int I1 +
+

Boolean expressions

+

The rules below are straightforward. Note the short-circuited semantics +of &&; this is the reason we annotated the syntax of +&& with the K attribute strict(1) instead of strict.

+
k
rule I1 <= I2 => I1 <=Int I2 + rule ! T => notBool T + rule true && B => B + rule false && _ => false +
+

Blocks and Statements

+

There is one rule per statement construct except for the conditional, +which needs two rules.

+

Blocks

+

The empty block {} is simply dissolved. The . below is the +unit of the computation list structure K, that is, the empty task. +Similarly, the non-empty blocks are dissolved and replaced by their statement +contents, thus effectively giving them a bracket semantics; we can afford to +do this only because we have no block-local variable declarations yet in IMP.

+
k
rule {} => .K + rule {S} => S +
+

Assignment

+

The assigned variable is updated in the state. The variable is expected +to be declared, otherwise the semantics will get stuck. At the same time, +the assignment is dissolved.

+
k
rule <k> X = I:Int; => .K ...</k> <state>... X |-> (_ => I) ...</state> +
+

Sequential composition

+

Sequential composition is simply structurally translated into K's +builtin task sequentialization operation.

+
k
rule S1:Stmt S2:Stmt => S1 ~> S2 +
+

Conditional

+

The conditional statement has two semantic cases, corresponding to +when its condition evaluates to true or to false. +Recall that the conditional was annotated with the attribute +strict(1) in the syntax module above, so only its first +argument is allowed to be evaluated.

+
k
rule if (true) S else _ => S + rule if (false) _ else S => S +
+

While loop

+

We give the semantics of the while loop by unrolling.

+
k
rule while (B) S => if (B) {S while (B) S} else {} +
+

Programs

+

The semantics of an IMP program is that its body statement is executed +in a state initializing all its global variables to 0. Since K's +syntactic lists are internally interpreted as cons-lists (i.e., lists +constructed with a head element followed by a tail list), we need to +distinguish two cases, one when the list has at least one element and +another when the list is empty. In the first case we initialize the +variable to 0 in the state, but only when the variable is not already +declared (all variables are global and distinct in IMP).

+
k
rule <k> int (X,Xs => Xs);_ </k> <state> Rho:Map (.Map => X|->0) </state> + requires notBool (X in keys(Rho)) + rule int .Ids; S => S +endmodule +
+
+
+ + +
+ + + +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/2_imp/lesson_5/index.html b/k-distribution/pl-tutorial/1_k/2_imp/lesson_5/index.html new file mode 100644 index 00000000000..01885b641c5 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/2_imp/lesson_5/index.html @@ -0,0 +1,408 @@ + + + + + + + + + + + + + + +Completing and Documenting IMP | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Completing and Documenting IMP

+

We here learn no new concepts, but it is a good moment to take a break +and contemplate what we learned so far.

+

Let us add lots of formal annotations to imp.k.

+

Once we are done with the annotations, we kompile with the documentation +option and then take a look at the produced document. We often call these +documents language posters. Depending on how much information you add to +these language posters, they can serve as standalone, formal presentations +of your languages. For example, you can print them as large posters and +post them on the wall, or in poster sessions at conferences.

+

This completes our second tutorial. The next tutorials will teach us more +features of the K framework, such as how to define languages with complex +control constructs (like callcc), languages which are concurrent, and so on.

+

MOVIE (out of date) [03'45"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/3_lambda++/index.html b/k-distribution/pl-tutorial/1_k/3_lambda++/index.html new file mode 100644 index 00000000000..e4b2f4cba6d --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/3_lambda++/index.html @@ -0,0 +1,407 @@ + + + + + + + + + + + + + + +Part 3: Defining LAMBDA++ | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Part 3: Defining LAMBDA++

+

Here you will learn how to define language constructs which abruptly change +the execution control flow, and how to define language semantics following +and environment/store style. Specifically, you will learn the following:

+
    +
  • How to define constructs like callcc, which allow you to take snapshots of +program executions and to go back in time at any moment.
  • +
  • How to define languages in an environment/store style.
  • +
  • Some basic notions about the use of closures and closure-like semantic +structures to save and restore execution environments.
  • +
  • Some basic intuitions about reusing existing semantics in new languages, +as well as some of the pitfalls in doing so.
  • +
+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_1/exercises/NOTES/index.html b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_1/exercises/NOTES/index.html new file mode 100644 index 00000000000..74d8cfa2ac7 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_1/exercises/NOTES/index.html @@ -0,0 +1,386 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Maybe we should change the name of calCC, as it is not a good idea to have +two constructs with different semantics but names which cannot be distinguished +easily.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_1/exercises/callCC/index.html b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_1/exercises/callCC/index.html new file mode 100644 index 00000000000..9e954f2ec42 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_1/exercises/callCC/index.html @@ -0,0 +1,386 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Define a variant of callcc, say callCC, which never returns to the +current context unless a value is specifically passed to its argument +continuation. Follow a substitution-based style.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_1/exercises/from-call-CC-to-callcc/index.html b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_1/exercises/from-call-CC-to-callcc/index.html new file mode 100644 index 00000000000..304a2b501b8 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_1/exercises/from-call-CC-to-callcc/index.html @@ -0,0 +1,385 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Define callcc in terms of callCC, where callCC is explained in the +callCC exercise under LAMBDA++, Lesson 1. Follow a substitution-based style.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_1/exercises/from-callcc-to-call-CC/index.html b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_1/exercises/from-callcc-to-call-CC/index.html new file mode 100644 index 00000000000..1b089fc14ad --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_1/exercises/from-callcc-to-call-CC/index.html @@ -0,0 +1,389 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Define callCC in terms of callcc, where callCC is explained in the +callCC exercise under LAMBDA++, Lesson 1. Follow a substitution-based style.

+

To facilitate testing, call the main module CALLCC (see tests/config.xml). +For example, you can define a module CALLCC which imports the previous +definition of callcc and adds the definition of callCC in terms of +callcc.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_1/index.html b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_1/index.html new file mode 100644 index 00000000000..9dbdb863ff2 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_1/index.html @@ -0,0 +1,494 @@ + + + + + + + + + + + + + + +Abrupt Changes of Control | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Abrupt Changes of Control

+

Here we add call-with-current-continuation (callcc) to the definition of +LAMBDA completed in Tutorial 1, and call the resulting language LAMBDA++. +While doing so, we will learn how to define language constructs that +abruptly change the execution control flow.

+

Take over the lambda.k definition from Lesson 8 in Part 1 of this Tutorial, +which is the complete definition of the LAMBDA language, but without the +comments.

+

callcc is a good example for studying the capabilities of a framework to +support abrupt changes of control, because it is one of the most +control-intensive language constructs known. Scheme is probably the first +programming language that incorporated the callcc construct, although +similar constructs have been recently included in many other languages in +one form or another.

+

Here is a quick description: callcc e passes the remaining computation +context, packaged as a function k, to e (which is expected to be a function); +if during its evaluation e passes any value to k, then the current +execution context is discarded and replaced by the one encoded by k and +the value is passed to it; if e evaluates normally to some value v and +passes nothing to k in the process, then v is returned as a result of +callcc e and the execution continues normally. For example, we want the +program callcc-jump.lambda:

+
(callcc (lambda k . ((k 5) + 2))) + 10
+
+

to evaluate to 15, not 17! Indeed, the computation context [] + 10 is +passed to callcc's argument, which then sends it a 5, so the computation +resumes to 5 + 10. On the other hand, the program callcc-not-jump.lambda

+
(callcc (lambda k . (5 + 2))) + 10
+
+

evaluates to 17.

+

If you like playing games, you can metaphorically think of callcc e as +saving your game state in a file and passing it to your friend e. +Then e can decide at some moment to drop everything she was doing, load +your game and continue to play it from where you were.

+

The behavior of many popular control-changing constructs can be obtained +using callcc. The program callcc-return.lambda shows, for example, how to +obtain the behavior of a return statement, which exits the current execution +context inside a function and returns a value to the caller's context:

+
letrec f x = callcc (lambda return . (
+  f (if (x <= 0) then ((return 1) / 0) else 2)
+))
+in (f -3)
+
+

This should evaluate to 1, in spite of the recursive call to f +and of the division by zero! Note that return is nothing but a variable +name, but one which is bound to the current continuation at the beginning of +the function execution. As soon as 1 is passed to return, the computation +jumps back in time to where callcc was defined! Change -3 to 3 and the +program will loop forever.

+

callcc is quite a powerful and beautiful language construct, although one +which is admittedly hard to give semantics to in some frameworks. +But not in K :) Here is the entire K syntax and semantics of callcc:

+
syntax Exp ::= "callcc" Exp  [strict]
+syntax Val ::= cc(K)
+rule <k> (callcc V:Val => V cc(K)) ~> K </k>
+rule <k> cc(K) V ~> _ =>  V ~> K </k>
+
+

Let us first discuss the annotated syntax. We declared callcc strict, +because its argument may not necessarily be a function yet, so it may need +to be evaluated. As explained above, we need to encode the remaining +computation somehow and pass it to callcc's argument. More specifically, +since LAMBDA is call-by-value, we have to encode the remaining computation as +a value. We do not want to simply subsort computations to Val, because there +are computations which we do not want to be values. A simple solution to +achieve our goal here is to introduce a new value construct, say cc (from +current-continuation), which holds any computation.

+

Note that, inspired from SDF, +K allows you to define the syntax of helping semantic operations, like cc, +more compactly. Typically, we do not need a fancy syntax for such operators; +all we need is a name, followed by open parenthesis, followed by a +comma-separated list of arguments, followed by closed parenthesis. If this +is the syntax that you want for a particular construct, then K allows you to +drop all the quotes surrounding the terminals, as we did above for cc.

+

The semantic rules do exactly what the English semantics of callcc says. +Note that here, unlike in our definition of LAMBDA in Tutorial 1, we had +to mention the cell <k/> in our rules. This is because we need to make sure +that we match the entire remaining computation, not only a fragment of it! +For example, if we replace the two rules above with

+
rule (callcc V:Val => V cc(K)) ~> K
+rule cc(K) V ~> _ =>  V ~> K
+
+

then we get a callcc which is allowed to non-deterministically pick a +prefix of the remaining computation and pass it to its argument, and then +when invoked within its argument, a non-deterministic prefix of the new +computation is discarded and replaced by the saved one. Wow, that would +be quite a language! Would you like to write programs in it? :)

+

Consequently, in K we can abruptly change the execution control flow of a +program by simply changing the contents of the <k/> cell. This is one of +the advantages of having an explicit representation of the execution context, +like in K or in reduction semantics with evaluation contexts. Constructs like +callcc are very hard and non-elegant to define in frameworks such as SOS, +because those implicitly represent the execution context as proof context, +and the latter cannot be easily changed.

+

Now that we know how to handle cells in configurations and use them in rules, +in the next lesson we take a fresh look at LAMBDA and define it using +an environment-based style, which avoids the complexity of substitution +(e.g., having to deal with variable capture) and is closer in spirit to how +functional languages are implemented.

+

Go to Lesson 2, LAMBDA++: Semantic (Non-Syntactic) Computation Items.

+

MOVIE (out of date) [6'28"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_2/index.html b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_2/index.html new file mode 100644 index 00000000000..9c2fe827444 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_2/index.html @@ -0,0 +1,543 @@ + + + + + + + + + + + + + + +Semantic (Non-Syntactic) Computation Items | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Semantic (Non-Syntactic) Computation Items

+

In this lesson we start another semantic definition of LAMBDA++, which +follows a style based on environments instead of substitution. In terms of +K, we will learn how easy it is to add new items to the syntactic category +of computations K, even ones which do not have a syntactic nature.

+

An environment binds variable names of interest to locations where their +values are stored. The idea of environment-based definitions is to maintain +a global store mapping locations to values, and then have environments +available when we evaluate expressions telling where the variables are +located in the store. Since LAMBDA++ is a relatively simple language, we +only need to maintain one global environment. Following a similar style +like in IMP, we place all cells into a top cell T:

+
configuration <T>
+                <k> $PGM:Exp </k>
+                <env> .Map </env>
+                <store> .Map </store>
+              </T>
+
+

Recall that $PGM is where the program is placed by krun after parsing. So +the program execution starts with an empty environment and an empty store.

+

In environment-based definitions of lambda-calculi, lambda abstractions +evaluate to so-called closures:

+
rule <k> lambda X:Id . E => closure(Rho,X,E) ...</k>
+     <env> Rho </env>
+
+

A closure is like a lambda abstraction, but it also holds the environment +in which it was declared. This way, when invoked, a closure knows where to +find in the store the values of all the variables that its body expression +refers to. We will define the lookup rule shortly.

+

Therefore, unlike in the substitution-based definitions of LAMBDA and +LAMBDA++, neither the lambda abstractions nor the identifiers are values +anymore here, because they both evaluate further: lambda abstractions to +closures and identifiers to their values in the store. In fact, the only +values at this moment are the closures, and they are purely semantic entities, +which cannot be used explicitly in programs. That's why we modified the +original syntax of the language to include no Val syntactic category +anymore, and that's why we need to add closures as values now; same like +before, we add a Val syntactic category which is subsorted +to KResult. In general, whenever you have any strictness attributes, +your should also define some K results.

+

Invoking a closure is a bit more involved than the substitution-based +beta-reduction: we need to switch to the closure's environment, then create a +new, or fresh, binding for the closure's parameter to the value passed to the +closure, then evaluate the closure's body, and then switch back to the +caller's environment, which needs to be stored somewhere in the meanwhile. +We can do all these with one rule:

+
rule <k> closure(Rho,X,E) V:Val => E ~> Rho' ...</k>
+     <env> Rho' => Rho[X <- !N] </env>
+     <store>... .Map => (!N:Int |-> V) ...</store>
+
+

Therefore, we atomically do all the following:

+
    +
  • switch the computation to the closure's body, E, followed by a +caller-environment-recovery task Rho' (note that Rho' is the +current environment),
  • +
  • generate a fresh location !N (the ! is important, we discuss it below), +bind X to !N in closure's environment and switch the current environment +Rho' to that one,
  • +
  • write the value passed to the closure, V, at location !N.
  • +
+

This was the most complex K rule we've seen so far in the tutorial. Note, +however, that this one rule achieves a lot. It is, in fact, quite compact +considering how much it does. Note also that everything that this K rule +mentions is needed also conceptually in order to achieve this task, so it +is minimal from that point of view. That would not be the case if we +used, instead, a conventional rewrite rule, because we would have had to +mention the remaining store, say Sigma, in both sides of the rule, to say +it stays unchanged. Here we just use ....

+

The declaration of the fresh variable above, !N, is new and needs +some explanation. First, note that !N appears only in the right-hand-side +terms in the rule, that is, it is not matched when the rule is applied. +Instead, a fresh Nat element is generated each time the rule is applied. +In K, we can define syntactic categories which have the capability to +generate fresh elements like above, using unbound variables whose name starts +with a !. The details of how to do that are beyond the scope of this +tutorial (see Tutorial 6). All we need to know here is that an arbitrary +fresh element of that syntactic category is generated each time the rule +is applied. We cannot rely on the particular name or value of the generated +element, because that can change with the next version of the K tool, or +even from execution to execution with the same version. All you can rely +on is that each newly generated element is distinct from the previously +generated elements for the same syntactic category.

+

Unlike in the substitution-based definition, we now also need a lookup rule:

+
rule <k> X => V ...</k>
+     <env>... X |-> N ...</env>
+     <store>... N |-> V ...</store>
+
+

This rule speaks for itself: replace X by the value V located in the store +at X's location N in the current environment.

+

The only thing left to define is the auxiliary environment-recovery operation:

+

rule _:Val ~> (Rho => .) ... _ => Rho

+

When the item preceding the environment recovery task Rho in the +computation becomes a value, replace the current environment with Rho +and dissolve Rho from the computation.

+

Let us kompile and ... fail:

+
kompile lambda
+
+

gives a parsing error saying that V:Val does not fit there in the closure +invocation rule. That's because Val and Exp are currently completely +disconnected, so K rightfully complains that we want to apply a value to +another one, because application was defined to work with expressions, not +values. What we forgot here was to state that Exp includes Val:

+
syntax Exp ::= Val
+
+

Now everything works, but it is a good time to reflect a bit.

+

So we added closures, which are inherently semantic entities, to the syntax +of expressions. Does that mean that we can now write LAMBDA programs with +closures in them? Interestingly, with our current definition of LAMBDA, +which purposely did not follow the nice organization of IMP into syntax and +semantic modules, and with K's default parser, kast, you can. But you are +not supposed to speculate this! In fact, if you use an external parser, that +parser will reject programs with explicit closures. Also, if we split the +LAMBDA definition into two modules, one called LAMBDA-SYNTAX containing +exclusively the desired program syntax and one called LAMBDA importing the +former and defining the syntax of the auxiliary operations and the semantics, +then even K's default parser will reject programs using auxiliary syntactic +constructs.

+

Indeed, when you kompile a language, say lang.k, the tool will by default +attempt to find a module LANG-SYNTAX and generate the program parser from +that. If it cannot find it, then it will use the module LANG instead. There +are also ways to tell kompile precisely which syntax module you want to use +for the program parser if you don't like the default convention. +See kompile --help.

+

Another insightful thought to reflect upon, is the relationship between your +language's values and other syntactic categories. It is often the case that +values form a subset of the original language syntax, like in IMP (Part 2 of +the tutorial), but sometimes that is not true, like in our case here. When +that happens, in order for the semantics to be given smoothly and uniformly +using the original syntax, you need to extend your language's original +syntactic categories with the new values. The same holds true in other +semantic approaches, not only in K, even in ones which are considered purely +syntactic. As it should be clear by now, K does not enforce you to use a +purely syntactic style in your definitions; nevertheless, K does allow you to +develop purely syntactic definitions, like LAMBDA in Part 1 of the tutorial, +if you prefer those.

+

krun some programs, such as those provided in Lesson 1 of the LAMBDA +tutorial (Part 1). Note the closures, both as results in the <k/> cell, +and as values in the store. Also, since variables are not values anymore, +expressions that contain free variables may get stuck with one of those on +top of their computation. See, for example, free-variable-capture.lambda, +which gets stuck on z, because z is free, so it cannot evaluate it. +If you want, you can go ahead and manually provide a configuration with +z mapped to some location in the environment and that location mapped to +some value in the store, and then you can also execute this program. The +program omega.lambda should still loop.

+

Although we completely changed the definitional style of LAMBDA, the semantics +of the other constructs do not need to change, as seen in the next lesson.

+

Go to Lesson 3, LAMBDA++: Reusing Existing Semantics.

+

MOVIE (out of date) [8'02"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_3/NOTES/index.html b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_3/NOTES/index.html new file mode 100644 index 00000000000..5826ddc3e0d --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_3/NOTES/index.html @@ -0,0 +1,401 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+
    +
  • Cut-and-paste is a poor-man's approach to reuse.
  • +
+

Indeed, it is. A better way to reuse, which requires a bit of planning ahead, +is to put each feature in its own module. Then you can simply include the +modules containing the features you want to reuse. Our point in this lesson +was that such reuse is possible, not to teach the best way to do it in +practice. Good methodologies on how to use a technology are equally important.

+
    +
  • Do we need an env/store split? Couldn't we just work with a state?
  • +
+

Since in our language so far we never change the value of a variable, it +happens to be OK to only keep a state. That is, to collapse env/store into +state, then embed the state in closures and restore the state instead of the +environment. However, this simplistic approach breaks as soon as we add +references to our language, because functions can then modify the environment +in which they were declared, so we would have to carry over those changes when +returning from function invocations, which would be quite difficult.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_3/index.html b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_3/index.html new file mode 100644 index 00000000000..b9bdabf987f --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_3/index.html @@ -0,0 +1,416 @@ + + + + + + + + + + + + + + +Reusing Existing Semantics | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Reusing Existing Semantics

+

In this lesson we will learn that, in some cases, we can reuse existing +semantics of language features without having to make any change!

+

Although the definitional style of the basic LAMBDA language changed quite +radically in our previous lesson, compared to its original definition in +Part 1 of the tutorial, we fortunately can reuse a large portion of the +previous definition. For example, let us just cut-and-paste the rest of the +definition from Lesson 7 in Part 1 of the tutorial.

+

Let us kompile and krun all the remaining programs from Part 1 of the +tutorial. Everything should work fine, although the store contains lots of +garbage. Garbage collection is an interesting topic, but we do not do it +here. Nevertheless, much of this garbage is caused by the intricate use of +the fixed-point combinator to define recursion. In a future lesson in this +tutorial we will see that a different, environment-based definition of +fixed-points will allocate much less memory.

+

One interesting question at this stage is: how do we know when we can reuse +an existing semantics of a language feature? Well, I'm afraid the answer is: +we don't. In the next lesson we will learn how reuse can fail for quite subtle +reasons, which are impossible to detect statically (and some non-experts may +fail to even detect them at all).

+

Go to Lesson 4, LAMBDA++: Do Not Reuse Blindly!.

+

MOVIE (out of date) [3'21"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_4/NOTES/index.html b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_4/NOTES/index.html new file mode 100644 index 00000000000..d4475816ac0 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_4/NOTES/index.html @@ -0,0 +1,405 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

With the current version of the tool (as of Sept 12, 2013), the +callcc-env1.lambda program actually evaluates to 4, as expected. +But the comments in the README are still valid, because it could just as +well evaluate to 3. For example, just replace ...+x with x+..., and it +should evaluate to 3 now.

+

Also, the first "fix" suggested in the READMEm to make "+" seqstrict, only +works for that particular program. It does not fix the problem if we change +the program as indicated above. In that case "+" it would need to be +seqstrict(2,1).

+

Also, callcc-env2.lambda evaluates to 3 instead of 4, because of the +particular order in which the strictness of the application operation is +applied. If you make application seqstrict(2,1) then you get 4.

+

Dec 06, 2014: Looks like we should discuss the --search and --transition +options before this lesson, and then kompile the definition with option +--transition = computational and krun it with --search.

+

The README.md says "One is to make + seqstrict in the semantics, to +enforce its evaluation from left-to-right. Do it and then run the program +above again;". Then it continues and says "The problem is now the +non-deterministic evaluation strategy of the function application construct". +Grigore will add this as an exercise, asking reader to fix this +non-determinism. Then ask them to propose another example where you still get +non-determinism; can they?

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_4/index.html b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_4/index.html new file mode 100644 index 00000000000..1d51449fc85 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_4/index.html @@ -0,0 +1,477 @@ + + + + + + + + + + + + + + +Do Not Reuse Blindly! | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Do Not Reuse Blindly!

+

It may be tempting to base your decision to reuse an existing semantics of +a language feature solely on syntactic considerations; for example, to reuse +whenever the parser does not complain. As seen in this lesson, this could +be quite risky.

+

Let's try (and fail) to reuse the definition of callcc from Lesson 1:

+
syntax Exp ::= "callcc" Exp  [strict]
+syntax Val ::= cc(K)
+rule <k> (callcc V:Val => V cc(K)) ~> K </k>
+rule <k> cc(K) V ~> _ =>  V ~> K </k>
+
+

The callcc examples that we tried in Lesson 1 work, so it may look it works.

+

However, the problem is that cc(K) should also include an environment, +and that environment should also be restored when cc(K) is invoked. +Let's try to illustrate this bug with callcc-env1.lambda

+
let x = 1 in
+  ((callcc lambda k . (let x = 2 in (k x))) + x)
+
+

where the second argument of +, x, should be bound to the top x, which +is 1. However, since callcc does not restore the environment, that x +should be looked up in the wrong, callcc-inner environment, so we should see +the overall result 4.

+

Hm, we get the right result, 3 ... (Note: you may get 4, depending on +your version of K and platform; but both 3 and 4 are possible results, as +explained below and seen in the tests). How can we get 3? Well, recall that ++ is strict, which means that it can evaluate its arguments in any order. +It just happened that in the execution that took place above its second +argument was evaluated first, to 1, and then the callcc was evaluated, but +its cc value K had already included the 1 instead of x ... In Part 4 of +the tutorial we will see how to explore all the non-deterministic behaviors of +a program; we could use that feature of K to debug semantics, too. +For example, in this case, we could search for all behaviors of this program +and we would indeed get two possible value results: 3 and 4.

+

One may think that the problem is the non-deterministic evaluation order +of +, and thus that all we need to do is to enforce a deterministic order +in which the arguments of + are evaluated. Let us follow this path to +see what happens. There are two simple ways to make the evaluation order +of +'s arguments deterministic. One is to make + seqstrict in the +semantics, to enforce its evaluation from left-to-right. Do it and then +run the program above again; you should get only one behavior for the +program above, 4, which therefore shows that copying-and-pasting our old +definition of callcc was incorrect. However, as seen shortly, that only +fixed the problem for the particular example above, but not in general. +Another conventional approach to enforce the desired evaluation order is to +modify the program to enforce the left-to-right evaluation order using let +binders, as we do in callcc-env2.lambda:

+
let x = 1 in
+  let a = callcc lambda k . (let x = 2 in (k x)) in
+    let b = x in
+      (a + b)
+
+

With your installation of K you may get the "expected" result 4 when you +execute this program, so it may look like our non-deterministic problem is +fixed. Unfortunately, it is not. Using the K tool to search for all the +behaviors in the program above reveals that the final result 3 is still +possible. Moreover, both the 3 and the 4 behaviors are possible regardless +of whether + is declared to be seqstrict or just strict. How is that +possible? The problem is now the non-deterministic evaluation strategy of +the function application construct. Indeed, recall that the semantics of +the let-in construct is defined by desugaring to lambda application:

+
rule let X = E in E' => (lambda X . E') E
+
+

With this, the program above eventually reduces to

+
(lambda a . ((lambda b . a + b) x))
+(callcc lambda k . (let x = 2 in (k x)))
+
+

in an environment where x is 1. If the first expression evaluates first, +then it does so to a closure in which x is bound to a location holding 1, +so when applied later on to the x inside the argument of callcc (which is +2), it will correctly lookup x in its enclosed environment and thus the +program will evaluate to 3. On the other hand, if the second expression +evaluates first, then the cc value will freeze the first expression as is, +breaking the relationship between its x and the current environment in which +it is bound to 1, being inadvertently captured by the environment of the +let-in construct inside the callcc and thus making the entire expression +evaluate to 4.

+

So the morale is: Do not reuse blindly. Think!

+

In the next lesson we fix the environment-based semantics of callcc by having +cc also wrap an environment, besides a computation. We will also give a more +direct semantics to recursion, based on environments instead of fixed-point +combinators.

+

Go to Lesson 5, LAMBDA++: More Semantic Computation Items.

+

MOVIE (out of date) [3'37"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_5/index.html b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_5/index.html new file mode 100644 index 00000000000..8e9c476c929 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_5/index.html @@ -0,0 +1,452 @@ + + + + + + + + + + + + + + +More Semantic Computation Items | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

More Semantic Computation Items

+

In this lesson we see more examples of semantic (i.e., non-syntactic) +computational items, and how useful they can be. Specifically, we fix the +environment-based definition of callcc and give an environment-based +definition of the mu construct for recursion.

+

Let us first fix callcc. As discussed in Lesson 4, the problem that we +noticed there was that we only recovered the computation, but not the +environment, when a value was passed to the current continuation. This is +quite easy to fix: we modify cc to take both an environment and a +computation, and its rules to take a snapshot of the current environment with +it, and to recover it at invocation time:

+
syntax Val ::= cc(Map,K)
+rule <k> (callcc V:Val => V cc(Rho,K)) ~> K </k> <env> Rho </env>
+rule <k> cc(Rho,K) V:Val ~> _ =>  V ~> K </k> <env> _ => Rho </env>
+
+

Let us kompile and make sure it works with the callcc-env2.lambda program, +which should evaluate to 3, not to 4.

+

Note that the cc value, which can be used as a computation item in the <k/> +cell, is now quite semantic in nature, pretty much the same as the closures.

+

Let us next add one more closure-like semantic computational item, for mu. +But before that, let us reuse the semantics of letrec in terms of mu that +was defined in Lesson 8 of Part 1 of the tutorial on LAMBDA:

+
syntax Exp ::= "letrec" Id Id "=" Exp "in" Exp [macro]
+             | "mu" Id "." Exp                 [latex(\mu{#1}.{#2})]
+rule letrec F:Id X = E in E' => let F = mu F . lambda X . E in E'
+
+

We removed the binder annotation of mu, because it is not necessary +anymore (since we do not work with substitutions anymore).

+

To save the number of locations needed to evaluate mu X . E, let us replace +it with a special closure which already binds X to a fresh location holding +the closure itself:

+
syntax Exp ::= muclosure(Map,Exp)
+
+rule <k> mu X . E => muclosure(Rho[X <- !N], E) ...</k>
+     <env> Rho </env>
+     <store>... .Map => (!N:Int |-> muclosure(Rho[X <- !N], E)) ...</store>
+
+

Since each time mu X . E is encountered during the evaluation it needs to +evaluate E, we conclude that muclosure cannot be a value. We can declare +it as either an expression or as a computation. Let's go with the former.

+

Finally, here is the rule unrolling the muclosure:

+

rule muclosure(Rho,E) => E ~> Rho' ... + Rho' => Rho

+

Note that the current environment Rho' needs to be saved before and +restored after E is executed, because the fixed point may be invoked +from a context with a completely different environment from the one +in which mu X . E was declared.

+

We are done. Let us now kompile and krun factorial-letrec.lambda from +Lesson 7 in Part 1 of the tutorial on LAMBDA. Recall that in the previous +lesson this program generated a lot of garbage into the store, due to the +need to allocate space for the arguments of all those lambda abstractions +needed to run the fixed-point combinator. Now we need much fewer locations, +essentially only locations for the argument of the factorial function, one at +each recursive call. Anyway, much better than before.

+

In the next lesson we wrap up the environment definition of LAMBDA++ and +generate its documentation.

+

Go to Lesson 6, LAMBDA++: Wrapping Up and Documenting LAMBDA++.

+

MOVIE (out of date) [5'19"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_6/exercises/NOTES/index.html b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_6/exercises/NOTES/index.html new file mode 100644 index 00000000000..fac976bd5e9 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_6/exercises/NOTES/index.html @@ -0,0 +1,386 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Maybe we should change the name of calCC, as it is not a good idea to have +two constructs with different semantics but names which cannot be distinguished +easily.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_6/exercises/callCC/index.html b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_6/exercises/callCC/index.html new file mode 100644 index 00000000000..7eaf9c95680 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_6/exercises/callCC/index.html @@ -0,0 +1,387 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

(see similar exercise in Lesson 1, with substitution instead of environments)

+

Define a variant of callcc, say callCC, which never returns to the +current context unless a value is specifically passed to its argument +continuation. Follow an environment-based style.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_6/exercises/from-call-CC-to-callcc/index.html b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_6/exercises/from-call-CC-to-callcc/index.html new file mode 100644 index 00000000000..ab20d93900d --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_6/exercises/from-call-CC-to-callcc/index.html @@ -0,0 +1,385 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Define callcc in terms of callCC, where callCC is explained in the +callCC exercise under LAMBDA++, Lesson 1. Follow an environment-based style.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_6/exercises/from-callcc-to-call-CC/index.html b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_6/exercises/from-callcc-to-call-CC/index.html new file mode 100644 index 00000000000..5cde05c6593 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_6/exercises/from-callcc-to-call-CC/index.html @@ -0,0 +1,386 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Define callCC in terms of callcc, where callCC is explained in the +callCC exercise under LAMBDA++, Lesson 1. Follow an environment-based +style.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_6/index.html b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_6/index.html new file mode 100644 index 00000000000..6cb7de5b169 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_6/index.html @@ -0,0 +1,402 @@ + + + + + + + + + + + + + + +Wrapping Up and Documenting LAMBDA++ | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Wrapping Up and Documenting LAMBDA++

+

In this lesson we wrap up and nicely document LAMBDA++. In doing so, we also +take the freedom to reorganize the semantics a bit, to make it look better.

+

See the lambda.k file, which is self-explanatory.

+

Part 3 of the tutorial is now complete. Part 4 will teach you more features +of the K framework, in particular how to exhaustively explore the behaviors +of non-deterministic or concurrent programs.

+

MOVIE (out of date) [6'23"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_6/lambda/index.html b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_6/lambda/index.html new file mode 100644 index 00000000000..99a34f55b41 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_6/lambda/index.html @@ -0,0 +1,547 @@ + + + + + + + + + + + + + + +Tutorial 3--- LAMBDA++ | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Tutorial 3--- LAMBDA++

+

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Abstract

+

This file contains an environment-based K semantic definition of LAMBDA++, an +extension of the LAMBDA language (defined in Tutorial 1) with a callcc +construct. The objective here is to further disseminate some of the features +of the K framework, in particular to illustrate how popular environment-based +and closure-based semantics can be defined in K.

+

For notational/kompilation/krun simplicity and to avoid OS errors, we continue +to write LAMBDA and lambda as names for modules and program extensions, +respectively, in the sequel.

+

To restrict the default program parser invoked by krun, namely kast, +to only parse proper LAMBDA++ programs no matter what other syntactic +constructs we add to Exp later on in the semantics, we put the actual program +syntax in a module with the suffix -SYNTAX. This issue was discussed in more +detail in Lesson 2 of this tutorial. In short, the parser generated by kompile +to be used by kast will be by default built only based on the syntax in this +module. Type kompile --help to see how to tell the parser which syntax to use.

+
k
module LAMBDA-SYNTAX + imports DOMAINS-SYNTAX +
+

Syntax

+

We move all the LAMBDA++ syntax here.

+
k
syntax Val ::= Int | Bool + syntax Exp ::= Val +// Basic lambda-calculus syntax + | Id + | "lambda" Id "." Exp + | Exp Exp [strict, left] + | "(" Exp ")" [bracket] +// Arithmetic + > "-" Int + | Exp "*" Exp [strict, left] + | Exp "/" Exp [strict] + > Exp "+" Exp [strict, left] + > Exp "<=" Exp [strict] +// Other functional constructs + syntax Exp ::= "if" Exp "then" Exp "else" Exp [strict(1)] // Conditional + | "let" Id "=" Exp "in" Exp [macro] // Let binder + | "letrec" Id Id "=" Exp "in" Exp [macro] // Letrec + | "mu" Id "." Exp // Mu + | "callcc" Exp [strict] // Callcc +
+

One thing you may want to do, now that the entire syntax is in one +place, is to play with precedences. This way, you can make kompile +generate the parser you want for your programs, so that you won't have to +put lots of parentheses in your programs.

+
k
endmodule + + +module LAMBDA + imports LAMBDA-SYNTAX + imports DOMAINS +
+

Semantics

+

The next module contains the semantics of all the LAMBDA++ constructs, +in the order in which their syntax was declared above.

+

The K Results

+

We should not forget to define the results of our computations. +Here is a rule of thumb: whenever you have any strictness attributes, your +should also define some K results. Or even simpler: always define your +results! (unless you define a theoretical semantics, for analysis but not +for execution purposes, you will need to define your results)

+
k
syntax KResult ::= Val +
+

Configuration

+

Since LAMBDA++ is such a simple language, its configuration is minimal +for an environment-based semantics: it only contains the k cell, +an environment cell, and a store cell. An environment binds variable names +to locations, and a store binds locations to values.

+
k
configuration <T color="yellow"> + <k color="green"> $PGM:Exp </k> + <env color="blue"> .Map </env> + <store color="red"> .Map </store> + </T> +
+

Recall that $PGM is where the program is placed by krun after parsing.

+

Closures

+

In environment-based definitions of lambda-calculi, λ-abstractions +evaluate to closures. A closure is like a λ-abstraction, +but it also holds the environment in which it was declared. This way, when +invoked, a closure knows where to find in the store the values of all the +variables that its body expression refers to. +To invoke a closure, we need to switch to closure's environment, then create +a new binding for closure's parameter, then evaluate the closure's body, and +then switch back to caller's environment.

+
k
syntax Val ::= closure(Map,Id,Exp) + + rule <k> lambda X:Id . E => closure(Rho,X,E) ...</k> + <env> Rho </env> + rule <k> closure(Rho,X,E) V:Val => E ~> Rho' ...</k> + <env> Rho' => Rho[X <- !N] </env> + <store>... .Map => (!N:Int |-> V) ...</store> + rule <k> X => V ...</k> + <env>... X |-> N ...</env> + <store>... N |-> V ...</store> +
+

Environment Recovery

+

The environment-recovery computation item defined below is useful in many +semantics, like it was above. It is so useful, that there are discussions +in the K team to add it to the set of pre-defined K features.

+
k
rule <k> _:Val ~> (Rho => .K) ...</k> <env> _ => Rho </env> +
+

Arithmetic Constructs

+

Not much to say here. They have exactly the same semantics as in LAMBDA and +IMP. Note that we let it in programmer's hands to check that the denominator +of a division is different from zero. If a division-by-zero is issued, then +completely non-deterministic result can happen depending upon what back-end +one uses for the K tool. Currently, Maude is used and Maude gets stuck +with a term of the form I /Int 0, but one should not rely on that. +If you want to catch division-by-zero in the semantics, instead of letting +the back-end do whatever it wants, you should add a side condition to the +division rule.

+
k
rule - I => 0 -Int I + rule I1 * I2 => I1 *Int I2 + rule I1 / I2 => I1 /Int I2 + rule I1 + I2 => I1 +Int I2 + rule I1 <= I2 => I1 <=Int I2 +
+

Conditional

+
k
rule if true then E else _ => E + rule if false then _ else E => E +
+

Let Binder

+
k
rule let X = E in E':Exp => (lambda X . E') E +
+

Letrec Binder

+

We define letrec in term of mu, whose semantics is below.

+
k
rule letrec F:Id X = E in E' => let F = mu F . lambda X . E in E' +
+

Mu

+

To save the number of locations needed to evaluate μ X . E, we replace it +with a special closure which binds X to a fresh location holding the closure +itself. This has the same effect as binding X to a reference that points +back to the fixed-point.

+
k
syntax Exp ::= muclosure(Map,Exp) + rule <k> mu X . E => muclosure(Rho[X <- !N], E) ...</k> + <env> Rho </env> + <store>... .Map => (!N:Int |-> muclosure(Rho[X <- !N], E)) ...</store> + rule <k> muclosure(Rho,E) => E ~> Rho' ...</k> + <env> Rho' => Rho </env> +
+

Callcc

+

For callcc, we need to create a new closure-like value which +wraps both the remaining computation, and the environment in which it is +supposed to be executed. Forget the environment, and you get a wrong +callcc.

+
k
syntax Val ::= cc(Map,K) + rule <k> (callcc V:Val => V cc(Rho,K)) ~> K </k> <env> Rho </env> + rule <k> cc(Rho,K) V:Val ~> _ => V ~> K </k> <env> _ => Rho </env> +endmodule +
+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/4_imp++/index.html b/k-distribution/pl-tutorial/1_k/4_imp++/index.html new file mode 100644 index 00000000000..71214a85307 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/4_imp++/index.html @@ -0,0 +1,416 @@ + + + + + + + + + + + + + + +Part 4: Defining IMP++ | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Part 4: Defining IMP++

+

IMP++ extends IMP, which was discussed in Part 2 of this tutorial, with several +new syntactic constructs. Also, some existing syntax is generalized, which +requires non-modular changes of the existing IMP semantics. For example, +global variable declarations become local declarations and can occur +anywhere a statement can occur. In this tutorial we will learn the following:

+
    +
  • That (and how) existing syntax/semantics may change as a language evolves.
  • +
  • How to refine configurations as a language evolves.
  • +
  • How to define and use fresh elements of desired sorts.
  • +
  • How to tag syntactic constructs and rules, and how to use such tags +with the superheat/supercool options of kompile.
  • +
  • How the search option of krun works.
  • +
  • How to stream cells holding semantic lists to the standard input/output, +and thus obtain interactive interpreters for the defined languages.
  • +
  • How to delete, save and restore cell contents.
  • +
  • How to add/delete cells dynamically.
  • +
  • More details on how the configuration abstraction mechanism works.
  • +
+

Like in the previous tutorials, this folder contains several lessons, each +adding new features to IMP++. Do them in order and make sure you completed +and understood the previous tutorials.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/4_imp++/lesson_1/NOTES/index.html b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_1/NOTES/index.html new file mode 100644 index 00000000000..0b0a15db918 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_1/NOTES/index.html @@ -0,0 +1,389 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Add an exercise somewhere with a print which first evaluates all its arguments +and THEN prints them. The idea is to define print to be strict and to +make the AExps list construct seqstrict, so lists of arithmetic +expressions get evaluated from left-to-right whenever they reach the top of +the <k/> cell (replace seqstrict with strict if you want expressions in +a list to evaluate non-deterministically and interleaved).

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/4_imp++/lesson_1/index.html b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_1/index.html new file mode 100644 index 00000000000..677eebe01d5 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_1/index.html @@ -0,0 +1,473 @@ + + + + + + + + + + + + + + +Extending/Changing an Existing Language Syntax | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Extending/Changing an Existing Language Syntax

+

Here we learn how to extend the syntax of an existing language, both with +new syntactic constructs and with more general uses of existing constructs. +The latter, in particular, requires changes of the existing semantics.

+

Consider the IMP language, as defined in Lesson 4 of Part 2 of the tutorial.

+

Let us first add the new syntactic constructs, with their precedences:

+
    +
  • variable increment, ++, which increments an integer variable and +evaluates to the new value;
  • +
  • read, which reads and evaluates to a new integer from the input buffer;
  • +
  • print, which takes a comma-separated list of arithmetic expressions and +evaluates and prints each of them in order, from left to right, to the +output buffer; we therefore define a new list syntactic category, AExps, +which we pass as an argument to print; note we do not want to declare +print to be strict, because we do not want to first evaluate the +arguments and then print them (for example, if the second argument performs +an illegal operation, say division by zero, we still want to print the first +argument); we also go ahead and add strings as arithmetic expressions, +because we intend print to also take strings, in order to print nice +messages to the user;
  • +
  • halt, which abruptly terminates the program; and
  • +
  • spawn, which takes a statement and creates a new concurrent thread +executing it and sharing its environment with the parent thread.
  • +
+

Also, we want to allow local variable declarations, which can appear anywhere +a statement can appear. Their scope ranges from the place they are defined +until the end of the current block, and they can shadow previous declarations, +both inside and outside the current block. The simplest way to define the +syntax of the new variable declarations is as ordinary statements, at the same +time removing the previous Pgm syntactic category and its construct. +Programs are now just statements.

+

We are now done with adding the new syntax and modifying the old one. +Note that the old syntax was modified in a way which makes the previous IMP +programs still parse, but this time as statements. Let us then modify +the configuration variable $PGM to have the sort Stmt instead of Pgm, +and let us try to run the old IMP programs, for example sum.imp.

+

Note that they actually get stuck with the global declaration on the top +of their computations. This is because variable declarations are now treated +like any statements, in particular, the sequential composition rule applies. +This makes the old IMP rule for global variable declarations not match anymore. +We can easily fix it by replacing the anonymous variable _, which matched +the program's statement that now turned into the remaining computation in +the <k/> cell, with the cell frame variable ..., which matches the +remaining computation. Similarly, we have to change the rule for the case +where there are no variables left to declare into one that dissolves itself.

+

We can now run all the previous IMP programs, in spite of the fact that +our IMP++ semantics is incomplete and, more interestingly, in spite of the +fact that our current semantics of blocks is incorrect in what regards the +semantics of local variable declarations (note that the old IMP programs do +not declare block-local variables, which is why they still run correctly).

+

Let us also write some proper IMP++ programs, which we would like to execute +once we give semantics to the new constructs.

+

div.imp is a program manifesting non-deterministic behaviors due to the +desired non-deterministic evaluation strategy of division and the fact that +expressions will have side effects once we add variable increment. We will +be able to see all the different behaviors of this program. Challenge: can +you identify the behavior where the program performs a division-by-zero?

+

If we run div.imp now, it will get stuck with the variable increment +construct on top of the computation cell. Once we give it a semantics, +div.imp will execute completely (all the other constructs in div.imp +already have their semantics defined as part of IMP).

+

Note that some people prefer to define all their semantics in a by need +style, that is, they first write and parse lots of programs, and then they +add semantics to each language construct on which any of the programs gets +stuck, and so on and so forth until they can run all the programs.

+

io.imp is a program which exercises the input/output capabilities of the +language: reads two integers and prints three strings and an integer. +Note that the variable declaration is not the first statement anymore.

+

sum-io.imp is an interactive variant of the sum program.

+

spawn.imp is a program which dynamically creates two threads that interact +with the main thread via the shared variable x. Lots of behaviors will be +seen here once we give spawn the right semantics.

+

Finally, locals.imp tests whether variable shadowing/unshadowing works well.

+

In the next lesson we will prepare the configuration for the new constructs, +and will see what it takes to adapt the semantics to the new configuration. +Specifically, we will split the state cell into an environment cell and a +store cell, like in LAMBDA++ in Part 3 of the tutorial.

+

Go to Lesson 2, IMP++: Configuration Refinement; Freshness.

+

MOVIE (out of date) [07'47"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/4_imp++/lesson_2/NOTES/index.html b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_2/NOTES/index.html new file mode 100644 index 00000000000..1e48b52ec9b --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_2/NOTES/index.html @@ -0,0 +1,384 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

update discussion on fresh; it has already been explained in lambda++

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/4_imp++/lesson_2/index.html b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_2/index.html new file mode 100644 index 00000000000..a5b4fdfca78 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_2/index.html @@ -0,0 +1,455 @@ + + + + + + + + + + + + + + +Configuration Refinement; Freshness | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Configuration Refinement; Freshness

+

To prepare for the semantics of threads and local variables, in this lesson we +split the state cell into an environment and a store. The environment and +the store will be similar to those in the definition of LAMBDA++ in Part +3 of the Tutorial. This configuration refinement will require us to change +some of IMP's rules, namely those that used the state.

+

To split the state map, which binds program variables to values, into an +environment mapping program variables to locations and a store mapping +locations to values, we replace in the configuration declaration the cell

+
<state color="red"> .Map </state>
+
+

with two cells

+
<env color="LightSkyBlue"> .Map </env>
+<store color="red"> .Map </store>
+
+

Structurally speaking, this split of a cell into other cells is a major +semantic change, which, unfortunately, requires us to revisit the existing +rules that used the state cell. One could, of course, argue that we could +have avoided this problem if we had followed from the very beginning the +good-practice style to work with an environment and a store, instead of a +monolithic state. While that is a valid argument, highlighting the fact that +modularity is not only a feature of the framework alone, but one should also +follow good practices to achieve it, it is also true that if all we wanted +in Part 2 of the tutorial was to define IMP as is, then the split of the state +in an environment and a store is unnecessary and not really justified.

+

The first rule which used a state cell is the lookup rule:

+
rule <k> X:Id => I ...</k> <state>... X |-> I ...</state>
+
+

We modify it as follows:

+
rule <k> X:Id => I ...</k>
+     <env>... X |-> N ...</env>
+     <store>... N |-> I ...</store>
+
+

So we first match the location N of X in the environment, then the value +I at location N in the store, and finally we rewrite X to I into the +computation. This rule also shows an instance of a more complex +multiset matching, where two variables (X and N) are matched each twice.

+

The assignment rule is modified quite similarly.

+

The variable declaration rule is trickier, though, because we need to allocate +a fresh location in the store and bind the newly declared variable to it. +This is quite similar to the way we allocated space for variables in +the environment-based definition of LAMBDA++ in Part 3 of the tutorial.

+
rule <k> int (X,Xs => Xs); ...</k>
+     <env> Rho => Rho[X <- !N:Int] </env>
+     <store>... .Map => !N |-> 0 ...</store>
+
+

Note the use of the fresh (!N) variable notation above. Recall from +the LAMBDA++ tutorial that each time the rule with fresh (!) variables is +applied, fresh elements of corresponding sorts are generated for the fresh +variables, distinct from all the previously generated elements; also, we +cannot and should not assume anything about the particular element that is +being generated, except that it is different from the previous ones.

+

kompile and krun sum.imp to see how the fresh locations have been +generated and used. There were two fresh locations needed, for the two +variables. Note also that a cell holding the counter has been added to the +configuration.

+

In the next lesson we will add the semantics of variable increment, and see +how that yields non-deterministic behaviors in programs and how to explore +those behaviors using the K tool.

+

Go to Lesson 3, IMP++: Tagging; Superheat/Supercool Kompilation Options.

+

MOVIE (out of date) [04'06"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/4_imp++/lesson_3/NOTES/index.html b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_3/NOTES/index.html new file mode 100644 index 00000000000..fd974a5db97 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_3/NOTES/index.html @@ -0,0 +1,385 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

We eliminated the superheat/supercool optimization. Now we only need to use +the transition option. So the video is out of synch now.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/4_imp++/lesson_3/index.html b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_3/index.html new file mode 100644 index 00000000000..5527ca7d135 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_3/index.html @@ -0,0 +1,421 @@ + + + + + + + + + + + + + + +Variable increment; Search | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Variable increment; Search

+

In this lesson we add the semantics of variable increment. We also learn +how to instruct the kompile tool to instrument the language model for +exhaustive analysis.

+

The variable increment rule is self-explanatory:

+
rule <k> ++X => I +Int 1 ...</k>
+     <env>... X |-> N ...</env>
+     <store>... N |-> (I => I +Int 1) ...</store>
+
+

We can now run programs like our div.imp program introduced in Lesson 1. +Do it.

+

The addition of increment makes the evaluation of expressions have side +effects. That, in combination with the non-determinism allowed by the +strictness attributes in how expression constructs evaluate their +arguments, makes expressions in particular and programs in general have +non-deterministic behaviors. One possible execution of the div.imp program +assigns 1 to y's location, for example, but this program manifests several +other behaviors, too.

+

To see all the (final-state) behaviors that a program can have, you can kompile +the semantics with --enable-search and call the krun tool with the option +--search. For example:

+
krun div.imp --search
+
+

In the next lesson we add input/output to our language and learn how to +generate a model of it which behaves like an interactive interpreter!

+

Go to Lesson 4, IMP++: Semantic Lists; Input/Output Streaming.

+

MOVIE (out of date) [06'56"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/4_imp++/lesson_4/NOTES/index.html b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_4/NOTES/index.html new file mode 100644 index 00000000000..e815201412e --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_4/NOTES/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Make sure cells have the same indentation, which should use normal +spaces, not tabs. Tabs look differently in different editors.

+

The tests here include all the imp and imp++ programs, but of course +the imp ones do not display any output, so their .out files are empty. +But this way we at least make sure we test that these programs +do not fail/crash and that nothing is output, so it is better that what +we used to have in K3.6.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/4_imp++/lesson_4/index.html b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_4/index.html new file mode 100644 index 00000000000..df7b65f0f92 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_4/index.html @@ -0,0 +1,492 @@ + + + + + + + + + + + + + + +Semantic Lists; Input/Output Streaming | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Semantic Lists; Input/Output Streaming

+

In this lesson we add semantics to the read and print IMP++ constructs. +In doing so, we also learn how to use semantic lists and how to connect +cells holding semantic lists to the standard input and standard output. +This allows us to turn the K semantics into an interactive interpreter.

+

We start by adding two new cells to the configuration,

+
<in color="magenta"> .List </in>
+<out color="Orchid"> .List </out>
+
+

each holding a semantic list, initially empty. Semantic lists are +space-separated sequences of items, each item being a term of the form +ListItem(t), where t is a term of sort K. Recall that the semantic maps, +which we use for states, environments, stores, etc., are sets of pairs +t1 |-> t2, where t1 and t2 are terms of sort K. The ListItem wrapper +is currently needed, to avoid parsing ambiguities.

+

Since we want the print statement to also print strings, we need to tell +K that strings are results. To make it more interesting, let us also overload +the + symbol on arithmetic expressions to also take strings and, as a +result, to concatenate them. Since + is already strict, we only need to add +a rule reducing the IMP addition of strings to the builtin operation +String +which concatenates two strings.

+

The semantics of read is immediate: read and consumes the first integer item +from the <in/> cell; note that our read only reads integer values (it gets +stuck if the first item in the <in/> cell is not an integer).

+

The semantics of print is a bit trickier. Recall that print takes an +arbitrary number of arithmetic expression arguments, and evaluates and outputs +each of them in order, from left to right. For example, +print("Hello", 3/0, "Bye"); outputs "Hello" and then gets stuck on the +illegal division by zero operation. In other words, we do not want it to +first evaluate all its arguments and then print them, because that would miss +outputting potentially valuable information. So the first step is to evaluate +the first argument of print. In some sense, what we'd like to say is that +print has the evaluation strategy strict(1). However, strictness +attributes only work with individual language constructs, while what we need +is an evaluation strategy that involves two constructs: print and the list +(comma) construct of AExps. If we naively associate print the strict(1) +evaluation strategy then its first and unique argument, an AExps list, will +be scheduled for evaluation and the execution will get stuck because we have +no rules for evaluating AExps terms. If we make the list construct of +AExps strict then we get the wrong semantics for print which first +evaluates all its arguments and then outputs them. The correct way to +tell K that print should evaluate only its first argument is by using a +context declaration:

+
context print(HOLE:AExp, _);
+
+

Note the HOLE of sort AExp above. Contexts allow us to define finer-grain +evaluation strategies than the strictness attributes, involving potentially +more than one language construct, like above. The HOLE indicates the +argument which is requested to be evaluated. For example, the strict +attribute of division corresponds to two contexts:

+
context HOLE / _
+context _ / HOLE
+
+

In their full generality, contexts can be any terms with precisely one +occurrence of a HOLE, and with arbitrary side conditions on any variables +occurring in the context term as well as on the HOLE. See Part 6 of the +tutorial for more examples.

+

Once evaluated, the first argument of print is expected to become either an +integer or a string. Since we want to print both integers and string values, +to avoid writing two rules, one for each type of value, we instead add a new +syntactic category, Printable, which is the union of integers and strings.

+

Let us kompile and krun the io.imp program discussed in Lesson 1. As +expected, it gets stuck with a read construct on top of the computation and +with an empty <in/> cell. To run it, we need to provide some items in the +<in/> cell, so that the rule of read can match. Let us add

+
<in> ListItem(3) ListItem(5) ListItem(7) </in>
+
+

Now, if we krun io.imp, we can see that its execution completes normally +(the <k/> cell is empty), that the first two items have been removed by the +two read constructs from the <in/> cell, and that the desired strings and +numbers have been placed into the <out/> cell.

+

Cells holding semantic lists can be connected to the standard input and +standard output buffers, and krun knows how to handle these appropriately. +Let us connect the <in/> cell to the standard input using the cell attribute +stream="stdin" and the <out/> cell to the standard output with the +attribute stream="sdtout". A cell connected to the standard input will +take its items from the standard input and block the rewriting process when +an input is needed until an item is available in the standard input buffer. +A cell connected to the standard output buffer will send all its items, in +order, to the standard output.

+

Let us kompile and krun io.imp again. It prints the message and then +waits for your input numbers. Type in two numbers, then press <Enter>. +A message with their sum is then printed, followed by the final configuration. +If you do not want to see the final configuration, and thus obtain a realistic +interpreter for our language, then call krun with the option --output none:

+
krun io.imp --output none
+
+

Let us now krun our interactive sum program, which continuously reads numbers +from the console and prints the sum of numbers up to them:

+
krun sum-io.imp
+
+

Try a few numbers, then 0. Note that the program terminated, but with junk +in the <k/> cell, essentially with a halt statement on its top. Of course, +because halt has been reached and it has no semantics yet.

+

In the next lesson we give the semantics of halt and also fix the semantics +of blocks with local variable declarations.

+

Go to Lesson 5, IMP++: Deleting, Saving and Restoring Cell Contents.

+

MOVIE (out of date) [05'21"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/4_imp++/lesson_5/index.html b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_5/index.html new file mode 100644 index 00000000000..41713f17283 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_5/index.html @@ -0,0 +1,442 @@ + + + + + + + + + + + + + + +Deleting, Saving and Restoring Cell Contents | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Deleting, Saving and Restoring Cell Contents

+

In this lesson we will see how easily we can delete, save and/or restore +contents of cells in order to achieve the desired semantics of language +constructs that involve abrupt changes of control or environments. We have +seen similar or related K features in the LAMBDA++ language in Part 3 of the +tutorial.

+

Let us start by adding semantics to the halt statement. As its name says, +what we want is to abruptly terminate the execution of the program. Moreover, +we want the program configuration to look as if the program terminated +normally, with an empty computation cell. The simplest way to achieve that is +to simply empty the computation cell when halt is encountered:

+
rule <k> halt; ~> _ => . </k>
+
+

It is important to mention the entire <k/> cell here, with both its membranes +closed, to make sure that its entire contents is discarded. Note the +anonymous variable, which matches the rest of the computation.

+

kompile and krun sum-io.imp. Note that unlike in Lesson 4, the program +terminates with an empty computation cell now.

+

As mentioned earlier, the semantics of blocks that was inherited from IMP is +wrong. Program locals.imp shows it very clearly: the environments are not +correctly restored at block exits. One way to fix the problem is to take +a snapshot of the current environment when a block is entered and save it +somewhere, and then to restore it when the block is left. There are many +ways to do this, which you can explore on your own: for example you can add +a new list cell for this task where to push/pop the environment snapshots in +a stack style; or you can use the existing environment cell for this purpose, +but then you need to change the variable access rules to search through the +stacked environments for the variable.

+

My preferred solution is to follow a style similar to how we saved/restored +LAMBDA++ environments in Part 3 of the Tutorial, namely to use the already +existing <k/> cell for such operations. More specifically, we place a +reminder item in the computation whenever we need to take a snapshot of +some cell contents; the item simply consists of the entire contents of the cell. +Then, when the reminder item is reached, we restore the contents of the cell:

+
rule <k> {S} => S ~> Rho ...</k> <env> Rho </env>
+
+

The only thing left now is to give the definition of environment restore:

+
rule <k> Rho => . ...</k> <env> _ => Rho </env>
+
+

Done. kompile and krun locals.imp. Everything should work correctly now. +Note that the rule above is different from the one we had for LAMBDA++ in +Part 3 of the tutorial, in that here there is no value preceding the environment +restoration item in the computation; that's because IMP++ statements, +unlike LAMBDA++'s expressions, evaluate to nothing (.).

+

In the next lesson we will give semantics to the spawn S construct, which +dynamically creates a concurrent shared-memory thread executing statement S.

+

Go to Lesson 6, IMP++: Adding/Deleting Cells Dynamically; Configuration Abstraction, Part 2.

+

MOVIE (out of date) [04'30"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/4_imp++/lesson_6/index.html b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_6/index.html new file mode 100644 index 00000000000..b1495cf824b --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_6/index.html @@ -0,0 +1,550 @@ + + + + + + + + + + + + + + +Adding/Deleting Cells Dynamically; Configuration Abstraction, Part 2 | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Adding/Deleting Cells Dynamically; Configuration Abstraction, Part 2

+

In this lesson we add dynamic thread creation and termination to IMP, and +while doing so we learn how to define and use configurations whose structure +can evolve dynamically.

+

Recall that the intended semantics of spawn S is to spawn a new concurrent +thread that executes S. The new thread is being passed at creation time +its parent's environment, so it can share with its parent the memory +locations that its parent had access to at creation time. No other locations +can be shared, and no other memory sharing mechanism is available. +The parent and the child threads can evolve unrestricted, in particular they +can change their environments by declaring new variables or shadowing existing +ones, can create other threads, and so on.

+

The above suggests that each thread should have its own computation and its +own environment. This can be elegantly achieved if we group the <k/> and +<env/> cells in a <thread/> cell in the configuration. Since at any given +moment during the execution of a program there could be zero, one or more +instances of such a <thread/> cell in the configuration, it is a good idea +to declare the <thread/> cell with multiplicity * (i.e., zero, one or more):

+
<thread multiplicity="*" color="blue">
+  <k color="green"> $PGM:Stmt </k>
+  <env color="LightSkyBlue"> .Map </env>
+</thread>
+
+

This multiplicity declaration is not necessary, but it is a good idea to do +it for several reasons:

+
    +
  1. it may help the configuration abstraction process, +which may in turn significantly increase the compactness and modularity of +your subsequent rules;
  2. +
  3. it may help various analysis and execution tools, +for example static analyzers to give you error messages when you create cells +where you should not, or K compilers to improve performance by starting +actual concurrent hardware threads or processes corresponding to each cell +instance; and
  4. +
  5. it may help you better understand and control the dynamics +of your configuration, and thus your overall semantics.
  6. +
+

For good encapsulation, I also prefer to put all thread cells into one cell, +<threads/>. This is technically unnecessary, though; to convince yourself +that this is indeed the case, you can remove this cell once we are done with +the semantics and everything will work without having to make any changes.

+

Before we continue, let us kompile an krun some programs that used to +work, say sum-io.imp. In spite of the relatively radical configuration +reorganization, those programs execute just fine! How is that possible? +In particular, why do rules like the lookup and assignment still work, +unchanged, in spite of the fact that the <k/> and <env/> cells are not at +the same level with the <store/> cell in the configuration anymore?

+

Welcome to configuration abstraction, part 2. Recall that the role of +configuration abstraction is to allow you to only write the relevant +information in each rule, and have the compiler fill-in the obvious and boring +details. According to the configuration that we declared for our new +language, there is only one reasonable way to complete rules like the lookup, +namely to place the <k/> and </env> cells inside a <thread/> cell, +inside a <threads/> cell:

+
rule <threads>...
+       <thread>...
+         <k> X:Id => I ...</k>
+         <env>... X |-> N ...</env>
+       ...</thread>
+     ...<threads/>
+     <store>... N |-> I ...</store>  [lookup]
+
+

This is the most direct, compact and local way to complete the configuration +context of the lookup rule. If for some reason you wanted here to match the +<k/> cell of one thread and the <env/> cell of another thread, then you +would need to explicitly tell K so, by mentioning the two thread cells, +for example:

+
rule <thread>...
+         <k> X:Id => I ...</k>
+     ...</thread>
+     <thread>...
+         <env>... X |-> N ...</env>
+     ...</thread>
+     <store>... N |-> I ...</store>  [lookup]
+
+

By default, K completes rules in a greedy style. Think this way: what is the +minimal number of changes to my rule to make it fit the declared +configuration? That's what the K tool will do.

+

Configuration abstraction is technically unnecessary, but once you start +using it and get a feel for how it works, it will become your best friend. +It allows you to focus on the essentials of your semantics, and at the same +time gives you flexibility in changing the configuration later on without +having to touch the rules. For example, it allows you to remove the +<threads/> cell from the configuration, if you don't like it, without +having to touch any rule.

+

We are now ready to give the semantics of spawn:

+
rule <k> spawn S => . ...</k> <env> Rho </env>
+     (. => <thread>... <k> S </k> <env> Rho </env> ...</thread>)
+
+

Note configuration abstraction at work, again. Taking into account +the declared configuration, and in particular the multiplicity information +* in the <thread/> cell, the only reasonable way to complete the rule +above is to wrap the <k/> and <env/> cells on the first line within a +<thread/> cell, and to fill-in the ...s in the child thread with the +default contents of the other subcells in <thread/>. In this case there +are no other cells, so we can get rid of those ...s, but that would +decrease the modularity of this rule: indeed, we may later on add other +cells within <thread/> as the language evolves, for example a function +or an exception stack, etc.

+

In theory, we should be able to write the rule above even more compactly +and modularly, namely as

+
rule <k> spawn S => . ...</k> <env> Rho </env>
+     (. => <k> S </k> <env> Rho </env>)
+
+

Unfortunately, this currently does not work in the K tool, due to some +known limitations of our current configuration abstraction algorithm. +This latter rule would be more modular, because it would not even depend +on the cell name thread. For example, we may later decide to change +thread into agent, and we would not have to touch this rule. +We hope this current limitation will be eliminated soon.

+

Once a thread terminates, its computation cell becomes empty. When that +happens, we can go ahead and remove the useless thread cell:

+
rule <thread>... <k> . </k> ...</thread> => .
+
+

Let's see what we've got. kompile and krun spawn.imp. +Note the following:

+
    +
  • The <threads/> cell is empty, so all threads terminated normally;
  • +
  • The value printed is different from the value in the store; the store value +is not even the one obtained if the threads executed sequentially.
  • +
+

Therefore, interesting behaviors may happen; we would like to see them all!

+
krun spawn.imp --search
+
+

However, the above does not work.

+

spawn.imp is an interactive program, which reads a number from the +standard input. When analyzing programs exhaustively using the search option, +krun has to disable the streaming capabilities (just think about it and you +will realize why). The best you can do in terms of interactivity with search +is to pipe some input to krun: krun will flush the standard input buffer +into the cells connected to it when creating the initial configuration (will +do that no matter whether you run it with or without the --search option). +For example:

+
echo 23 | krun spawn.imp --search
+
+

puts 23 in the standard input buffer, which is then transferred in the +<in/> cell as a list item, and then the exhaustive search procedure is +invoked.

+

However, even after piping some input, the spawn.imp program outputs +an error:

+
[Error] krun: You must pass --enable-search to kompile to be able to use krun --search with the LLVM backend
+
+

As explained in Lesson 3, by default kompile optimizes the generated +language model for execution. In particular, it does not insert any +backtracking markers where transition attempts should be made, so krun +lacks the information it needs to exhaustively search the generated language +model.

+

kompile with the search feature enabled:

+
kompile imp --enable-search
+
+

Now echo 23 | krun spawn.imp --search gives us all 12 behaviors of the +spawn.imp program.

+

We currently have no mechanism for thread synchronization. In the next lesson +we add a join statement, which allows a thread to wait until another completes.

+

Go to Lesson 7, IMP++: Everything Changes: Syntax, Configuration, Semantics.

+

MOVIE (out of date) [11'40"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/4_imp++/lesson_7/index.html b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_7/index.html new file mode 100644 index 00000000000..2d3d3fb9f15 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_7/index.html @@ -0,0 +1,473 @@ + + + + + + + + + + + + + + +Everything Changes: Syntax, Configuration, Semantics | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Everything Changes: Syntax, Configuration, Semantics

+

In this lesson we add thread joining, one of the simplest thread +synchronization mechanisms. In doing so, we need to add unique ids +to threads in the configuration, and to modify the syntax to allow spawn +to return the id of the newly created thread. This gives us an opportunity +to make several other small syntactic and semantics changes to the language, +which make it more powerful or more compact at a rather low cost.

+

Before we start, let us first copy and modify the previous spawn.imp program +from Lesson 1 to make use of thread joining. Recall from Lesson 6 that in some +runs of this program the main thread completed before the child threads, +printing a possibly undesired value of x. What we want now is to assign +unique ids to the two spawned threads, and then to modify the main thread to +join the two child threads before printing. To avoid adding a new type to +the language, let's assume that thread ids are integer numbers. So we declare +two integers, t1 and t2, and assign them the two spawn commands. In order +for this to parse, we will have to change the syntax of spawn to be an +arithmetic expression construct instead of a statement. Once we do that, +we have a slight syntactic annoyance: we need to put two consecutive ; +after the spawn assignment, one for the assignment statement inside the spawn, +and another for the outer assignment. To avoid the two consecutive semicolons, +we can syntactically enforce spawn to take a block as argument, instead of a +statement. Now it looks better. The new spawn.imp program is still +non-deterministic, because the two threads can execute in any order and even +continue to have a data-race on the shared variable x, but we should see fewer +behaviors when we use the join statements. If we want to fully synchronize +this program, we can have the second thread start with a join(t1) statement. +Then we should only see one behavior for this program.

+

Let us now modify the language semantics. First, we move the spawn +construct from statements to expressions, and make it take a block. +Second, we add one more sub-cell to the thread cell in the configuration, +<id/>, to hold the unique identifier of the thread. We want the main +thread to have id 0, so we initialize this cell with 0. Third, we modify +the spawn rule to generate a fresh integer identifier, which is put in the +<id/> cell of the child thread and returned as a result of spawn in the +parent thread. Fourth, let us add the join statement to the language, +both syntactically and semantically. So in order for the join(T) statement +to execute, thread T must have its computation empty. However, in order +for this to work we have to get rid of the thread termination cleanup rule. +Indeed, we need to store somewhere the information that thread T terminated; +the simplest way to do it is to not remove the terminated threads. Feel free +to experiment with other possibilities, too, here. For example, you may add +another cell, <done/>, in which you can store all the thread ids of the +terminated and garbage-collected threads.

+

Let us now kompile imp.k and convince ourselves that the new spawn.imp +with join statements indeed has fewer behaviors than its variant without +join statements. Also, let us convince ourselves that the fully synchronized +variant of it indeed has only one behavior.

+

Note that now spawn, like variable increment, makes the evaluation of +expressions to have side effects. Many programming languages in fact allow +expressions to be evaluated only for their side effects, and not for their +value. This is typically done by simply adding a ; after the expression +and thus turning it into a statement. For example, ++x;. Let as also +allow arithmetic expressions in our language to be used as statements, by +simply adding the production AExp ";" to Stmt, with evaluation strategy +strict and with the expected semantics discarding the value of the AExp.

+

Another simple change in syntax and semantics which gives our language more +power, is to remove the ; from the syntax of variable assignments and to make +them expression instead of statement constructs. This change, combined with +the previous one, will still allow us to parse all the programs that we could +parse before, but will also allow us to parse more programs. For example, we +can now do sequence assignments like in C: x = y = z = 0. The semantics +of assignment now has to return the assigned value also to the computation, +because we want the assignment expression to evaluate to the assigned value.

+

Let us also make another change, but this time one which only makes the +definition more compact. Instead of defining statement sequential +composition as a binary construct for statements, let us define a new +syntactic construct, Stmts, as whitespace-separated lists of Stmt. This +allows us to get rid of the empty blocks, because we can change the syntax of +blocks to {Stmts} and Stmts also allows the empty sequence of statements. +However, we do have to make sure that .Stmts dissolves.

+

In general, unless you are defining a well-established programming language, +it is quite likely that your definitions will suffer lots of changes like the +ones seen in this lecture. You add a new construct, which suggests changes +in the existing syntax making in fact your language parse more programs, +which then requires corresponding changes in the semantics, and so on. +Also, compact definitions are desirable in general, because they are easier +to read and easier to change if needed later.

+

In the next lesson we wrap up and document the definition of IMP++.

+

Go to Lesson 8, IMP++: Wrapping up Larger Languages.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/4_imp++/lesson_8/imp/index.html b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_8/imp/index.html new file mode 100644 index 00000000000..55ae31da564 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_8/imp/index.html @@ -0,0 +1,922 @@ + + + + + + + + + + + + + + +IMP++ | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

IMP++

+

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Abstract

+

This is the K semantic definition of the IMP++ language. +IMP++ extends the IMP language with the features listed below. We +strongly recommend you to first familiarize yourself with the IMP +language and its K definition in Tutorial 2 before proceeding.

+

Strings and concatenation of strings. Strings are useful +for the print statement, which is discussed below. For +string concatenation, we use the same + construct that we use +for addition (so we overload it).

+

Variable increment. We only add a pre-increment construct: +++x increments variable x and evaluates to the +incremented value. Variable increment makes the evaluation of +expressions have side effects, and thus makes the evaluation strategies +of the various language constructs have an influence on the set +of possible program behaviors.

+

Input and output. IMP++ adds a read() expression +construct which reads an integer number and evaluates to it, and +a variadic (i.e., it has an arbitrary number of arguments) statement +construct print(e1,e2,...,en) which evaluates its arguments +and then outputs their values. Note that the K tool allows to +connect the input and output cells to the standard input and output +buffers, this way compiling the language definition into an +interactive interpreter.

+

Abrupt termination. The halt statement simply halts +the program. The K tool shows the resulting configuration, as if the +program terminated normally. We therefore assume that an external +observer does not care whether the program terminates normally or +abruptly, same like with exit statements in conventional +programming languages like C.

+

Dynamic threads. The expression construct spawn s +starts a new concurrent thread that executes statement s, +which is expected to be a block, and evaluates immediately to a fresh +thread identifier that is also assigned to the newly created thread. +The new thread is given at creation time the environment of its +parent, so it can access all its parent's variables. This allows for +the parent thread, and the child thread to communicate; it also allows +for races and "unexpected" behaviors, so be careful. +For thread synchronization, IMP++ provides a thread join statement +construct join t;, where t evaluates to a thread +identifier, which stalls the current thread until thread t +completes its computation. For simplicity, we here assume a +sequentially consistent shared memory model. To experiment with other +memory models, see the definition of KERNELC.

+

Blocks and local variables. IMP++ allows blocks enclosed by +curly brackets. Also, IMP's global variable declaration construct is +generalized to be used anywhere as a statement, not only at the +beginning of the program. As expected, the scope of the declared +variables is from their declaration point till the end of the most +nested enclosing block.

+

What You Will Learn Here

+
    +
  • How to define a less trivial language in K, as explained above.
  • +
  • How to use the superheat and supercool +options of the K tool kompile to exhaustively explore the +non-determinism due to underspecified evaluation strategies.
  • +
  • How to use the --enable-search option of the K tool to +exhaustively explore the non-determinism due to concurrency.
  • +
  • How to connect certain cells in the configuration to the +standard input and standard output, and thus turn the krun +tool into an interactive interpreter for the defined language.
  • +
  • How to exhaustively search for the non-deterministic behaviors +of a program using the search option of krun.
  • +
+
k
module IMP-SYNTAX + imports DOMAINS-SYNTAX +
+

Syntax

+

IMP++ adds several syntactic constructs to IMP. Also, since the +variable declaration construct is generalized to be used anywhere a +statement can be used, not only at the beginning of the program, we +need to remove the previous global variable declaration of IMP and +instead add a variable declaration statement construct

+

We do not re-discuss the constructs which are taken over from IMP, +except when their syntax has been subtly modified (such as, for +example, the syntax of the previous "statement" assignment which +is now obtained by composing the new assignment expression, and the +new expression statement constructs); go the last lesson of +Tutorial 2 if you are interested in IMP's constructs. For execution +purposes, we tag the addition and division operations as members of the +addition and division groups. These groups have no theoretical significance, +in that they do not affect the semantics of the language in any way. They only +have practical relevance, specific to our implementation of the K tool. +Specifically, we can tell the K tool (using its superheat and supercool +options) that we want to exhaustively explore all the non-deterministic +behaviors (due to strictness) of these language constructs. For performance +reasons, by default the K tool chooses an arbitrary but fixed order to +evaluate the arguments of the strict language constructs, thus possibly losing +behaviors due to missed interleavings. This aspect was irrelevant in IMP, +because its expressions had no side effects, but it becomes relevant in IMP++.

+

The syntax of the IMP++ constructs is self-explanatory. Note that assignment +is now an expression construct. Also, print is variadic, taking a +list of expressions as argument. It is also strict, which means that the +entire list of expressions, that is, each expression in the list, will be +evaluated. Note also that we have now defined sequential composition +of statements as a whitespace-separated list of statements, aliased with +the nonterminal Stmts, and block as such a (possibly empty) sequence +of statements surrounded by curly brackets.

+
k
syntax AExp ::= Int | String | Id + | "++" Id + | "read" "(" ")" + | "-" AExp [strict] + | "(" AExp ")" [bracket] + > AExp "/" AExp [left, strict] + > AExp "+" AExp [left, strict] + > "spawn" Block + > Id "=" AExp [strict(2)] + syntax BExp ::= Bool + | AExp "<=" AExp [seqstrict] + | "!" BExp [strict] + | "(" BExp ")" [bracket] + > BExp "&&" BExp [left, strict(1)] + syntax Block ::= "{" Stmts "}" + syntax Stmt ::= Block + | AExp ";" [strict] + | "if" "(" BExp ")" + Block "else" Block [strict(1)] + | "while" "(" BExp ")" Block + | "int" Ids ";" + | "print" "(" AExps ")" ";" + | "halt" ";" + > "join" AExp ";" [strict] + + syntax Ids ::= List{Id,","} [overload(exps)] + syntax AExps ::= List{AExp,","} [overload(exps)] + syntax Stmts ::= List{Stmt,""} + syntax AExps ::= Ids +endmodule + + +module IMP + imports IMP-SYNTAX + imports DOMAINS +
+

Semantics

+

We next give the semantics of IMP++. We start by first defining its +configuration.

+

Configuration

+

The original configuration of IMP has been extended to include +all the various additional cells needed for IMP++. +To facilitate the semantics of threads, more specifically +to naturally give them access to their parent's variables, we prefer a +(rather conventional) split of the program state into an +environment and a store. An environment maps +variable names into locations, while a store maps locations +into values. Stores are also sometimes called states, or +heaps, or memory, in the literature. Like values, locations +can be anything. For simplicity, here we assume they are natural +numbers. Moreover, each thread has its own environment, so it knows +where all the variables that it has access to are located in the store +(that includes its locally declared variables as well as the variables +of its parent thread), and its own unique identifier. The store is +shared by all threads. For simplicity, we assume a sequentially consistent +memory model in IMP++. Note that the thread cell has multiplicity +*, meaning that there could be zero, one, or more instances of that cell +in the configuration at any given time. This multiplicity information +is important for K's configuration abstraction process: it tells +K how to complete rules which, in order to increase the modularity of the +definition, choose to not mention the entire configuration context. +The in and out cells hold the input and the output +buffers as lists of items.

+
k
configuration <T color="yellow"> + <threads color="orange"> + <thread multiplicity="*" color="blue" type="Map"> + <id color="black"> 0 </id> + <k color="green"> $PGM:Stmts </k> + <env color="LightSkyBlue"> .Map </env> + </thread> + </threads> +// <br/> + <store color="red"> .Map </store> +// <input color="magenta"> .List </input> +// <output color="Orchid"> .List </output> + <input color="magenta" stream="stdin"> .List </input> + <output color="Orchid" stream="stdout"> .List </output> + </T> +// Replace the <input/> and <output/> cells with the next two in order to +// initialize the input buffer through krun +// <input color="magenta"> $IN:List </input> +// <output color="Orchid"> .List </output> +// Replace the <input/> and <output/> cells with the next two to connect the +// input/output buffers to stdin/stdout through krun +// <input color="magenta" stream="stdin"> .List </input> +// <output color="Orchid" stream="stdout"> .List </output> +// Replace the <input/> and <output/> cells with the next two to connect the +// input/output buffers to stdin/stdout and also allow input through krun +// <input color="magenta" stream="stdin"> $IN:List </input> +// <output color="Orchid" stream="stdout"> .List </output> +
+

We can also use configuration variables to initialize +the configuration through krun. For example, we may want to +pass a few list items in the in cell when the program makes +use of read(), so that the semantics does not get stuck. +Recall from IMP that configuration variables start with a ParseError: KaTeX parse error: Expected group after '_' at position 63: …, for example, +_̲PGM) and can be initialized with any string by +krun; or course, the string should parse to a term of the +corresponding sort, otherwise errors will be generated. +Moreover, K allows you to connect list cells to the standard input or +the standard output. For example, if you add the attribute +stream="stdin" to the in cell, then krun +will prompt the user to pass input when the in cell is empty +and any semantic rule needs at least one item to be present there in +order to match. Similarly but dually, if you add the attribute +stream="stdout" to the out cell, then any item +placed into this cell by any rule will be promptly sent to the +standard output. This way, krun can be used to obtain +interactive interpreters based directly on the K semantics of the +language. For example:

+
shell
sh$ krun sum-io.imp --output none +Add numbers up to (<= 0 to quit)? 10 +Sum = 55 +Add numbers up to (<= 0 to quit)? 1000 +Sum = 500500 +Add numbers up to (<= 0 to quit)? 0 +sh$ +
+

The option --output none instructs krun to not +display the resulting configuration after the program executes. The +input/output streaming works with or without this option, although +if you don't use the option then a configuration with empty +in and out cells will be displayed after the program +is executed. You can also initialize the configuration using +configuration variables and stream the contents of the cells to +standard input/output at the same time. For example, if you use a +configuration variable in the in cell and pass contents to it +through krun, then that contents will be first consumed and +then the user will be prompted to introduce additional input if the +program's execution encounters more read() constructs.

+

The old IMP constructs

+

The semantics of the old IMP constructs is almost identical to their +semantics in the original IMP language, except for those constructs +making use of the program state and for those whose syntax has slightly +changed. Indeed, the rules for variable lookup and assignment in IMP +accessed the state cell, but that cell is not available in IMP++ +anymore. Instead, we have to use the combination of environment and store +cells. Thanks to K's implicit configuration abstraction, we do not have +to mention the thread and threads cells: these are +automatically inferred (and added by the K tool at compile time) from the +definition of the configuration above, as there is only one correct +way to complete the configuration context of these rules in order to +match the configuration declared above. In our case here, "correct way" +means that the k and env cells will be considered as +being part of the same thread cell, as opposed to each being part +of a different thread. Configuration abstraction is crucial for modularity, +because it gives us the possibility to write our definitions in a way that +may not require us to revisit existing rules when we change the configuration. +Changes in the configuration are quite frequent in practice, typically +needed in order to accommodate new language features. For example, +imagine that we initially did not have threads in IMP++. There +would be no need for the thread and threads cells in +the configuration then, the cells k and env being simply +placed at the top level in the T cell, together with the +already existing cells. Then the rules below would be exactly the +same. Thus, configuration abstraction allows you to not have to +modify your rules when you make structural changes in your language +configuration.

+
k
syntax KResult ::= Int | Bool +
+

Variable lookup

+
k
rule <k> X:Id => I ...</k> + <env>... X |-> N ...</env> + <store>... N |-> I ...</store> +
+

Arithmetic constructs

+
k
rule I1 / I2 => I1 /Int I2 requires I2 =/=Int 0 + rule I1 + I2 => I1 +Int I2 + rule - I => 0 -Int I +
+

Boolean constructs

+
k
rule I1 <= I2 => I1 <=Int I2 + rule ! T => notBool T + rule true && B => B + rule false && _ => false +
+

Variable assignment

+

Note that the old IMP assignment statement X = I; is now composed of two +constructs: an assignment expression construct X = I, followed by a +semicolon ; turning the expression into a statement. The rationale behind +this syntactic restructuring has been explained in Lesson 7. Here is the +semantics of the two constructs:

+
k
rule _:Int; => .K + rule <k> X = I:Int => I ...</k> + <env>... X |-> N ...</env> + <store>... N |-> (_ => I) ...</store> +
+

Sequential composition

+

Sequential composition has been defined as a whitespace-separated syntactic +list of statements. Recall that syntactic lists are actually syntactic +sugar for cons-lists. Therefore, the following two rules eventually +sequentialize a syntactic list of statements s1 s2 ... sn.. into the +corresponding computation s1 ~> s2 ~> ... ~> sn.

+
k
rule .Stmts => .K + rule S:Stmt Ss:Stmts => S ~> Ss +
+

Conditional statement

+
k
rule if (true) S else _ => S + rule if (false) _ else S => S +
+

While loop

+

The only thing to notice here is that the empty block has been replaced +with the block holding the explicit empty sequence. That's because in +the semantics all empty lists become explicit corresponding dots +(to avoid parsing ambiguities)

+
k
rule while (B) S => if (B) {S while (B) S} else {.Stmts} +
+

The new IMP++ constructs

+

We next discuss the semantics of the new IMP++ constructs.

+

Strings

+

First, we have to state that strings are also results. +Second, we give the semantics of IMP++ string concatenation (which +uses the already existing addition symbol + from IMP) by +reduction to the built-in string concatenation operation.

+
k
syntax KResult ::= String + rule Str1 + Str2 => Str1 +String Str2 +
+

Variable increment

+

Like variable lookup, this is also meant to be a supercool transition: we +want it to count both in the non-determinism due to strict operations above +it in the computation and in the non-determinism due to thread +interleavings. This rule also relies on K's configuration abstraction. +Without abstraction, you would have to also include the thread and +threads cells.

+
k
rule <k> ++X => I +Int 1 ...</k> + <env>... X |-> N ...</env> + <store>... N |-> (I => I +Int 1) ...</store> +
+

Read

+

The read() construct evaluates to the first integer in the +input buffer, which it consumes. Note that two or more threads can +"compete" on reading the next integer from the input buffer, and +different choices for the next transition can lead to different behaviors.

+
k
rule <k> read() => I ...</k> + <input> ListItem(I:Int) => .List ...</input> +
+

Print

+

The print statement is strict, so all its arguments are +eventually evaluated (recall that print is variadic). We +append each of its evaluated arguments, in order, to the output buffer, +and structurally discard the residual print statement with an +empty list of arguments. We only want to allow printing integers and +strings, so we define a Printable syntactic category including +only these and define the print statement to only print +Printable elements. Alternatively, we could have had two +similar rules, one for integers and one for strings. Recall that, +currently, K's lists are cons-lists, so we cannot simply rewrite the +head of a list (P) into a list (.). Note that different threads may +compete on the output buffer.

+
k
syntax Printable ::= Int | String + +/* currently it is necessary to subsort Printable to AExp, + but future K should be able to infer that automatically. */ + syntax AExp ::= Printable + + context print(HOLE:AExp, _AEs:AExps); + + rule <k> print(P:Printable,AEs => AEs); ...</k> + <output>... .List => ListItem(P) </output> + rule print(.AExps); => .K +
+

Halt

+

The halt statement empties the computation, so the rewriting process +simply terminates as if the program terminated normally. Interestingly, once +we add threads to the language, the halt statement as defined below +will terminate the current thread only. If you want an abrupt termination +statement that halts the entire program, then you need to discard the entire +contents of the threads cell, so the entire computation abruptly +terminates the entire program, no matter how many concurrent threads it has, +because there is nothing else to rewrite.

+
k
rule <k> halt; ~> _ => .K </k> +
+

Spawn thread

+

A spawned thread is passed its parent's environment at creation time. +The spawn expression in the parent thread is immediately replaced +by the unique identifier of the newly created thread, so the parent +thread can continue its execution. We only consider a sequentially +consistent shared memory model for IMP++, but other memory models +can also be defined in K; see, for example, the definition of +KERNELC. Note that K's configuration abstraction is at heavy work +here, in two different places. First, the parent thread's k and env +cells are wrapped within a thread cell. Second, the child thread's +k, env and id cells are also wrapped within a thread cell. Why +that way and not putting all these four cells together within the same +thread, or even create an additional threads cell at top holding a +thread cell with the new k, env and id? Because in the original +configuration we declared the multiplicity of the thread cell to be +*, which effectively tells the K tool that zero, one or more such +cells can co-exist in a configuration at any moment. The other cells have +the default multiplicity one, so they are not allowed to multiply. +Thus, the only way to complete the rule below in a way consistent with +the declared configuration is to wrap the first two cells in a thread +cell, and the latter two cells under the . also in a thread cell. Once +the rule applies, the spawning thread cell will add a new thread cell +next to it, which is consistent with the declared configuration cell +multiplicity. The unique identifier of the new thread is generated using +the fresh side condition.

+
k
rule <k> spawn S => !T:Int +Int 1 ...</k> <env> Rho </env> + (.Bag => <thread>... <k> S </k> <env> Rho </env> <id> !T +Int 1 </id> ...</thread>) +
+

Join thread

+

A thread who wants to join another thread T has to wait until +the computation of T becomes empty. When that happens, the +join statement is simply dissolved. The terminated thread is not removed, +because we want to allow possible other join statements to also dissolve.

+
k
rule <k> join(T); => .K ...</k> <thread>... <k>.K</k> <id>T</id> ...</thread> +
+

Blocks

+

The body statement of a block is executed normally, making sure +that the environment at the block entry point is saved in the computation, +in order to be recovered after the block body statement. This step is +necessary because blocks can declare new variables having the same +name as variables which already exist in the environment, and our +semantics of variable declarations is to update the environment map in +the declared variable with a fresh location. Thus, variables which +are shadowed lose their original binding, which is why we take a +snapshot of the environment at block entrance and place it after the +block body (see the semantics of environment recovery at the end of +this module). Note that any store updates through variables which are +not declared locally are kept at the end of the block, since the store +is not saved/restored. An alternative to this environment save/restore +approach is to actually maintain a stack of environments and to push a +new layer at block entrance and pop it at block exit. The variable +lookup/assign/increment operations then also need to change, so we do +not prefer that non-modular approach. Compilers solve this problem by +statically renaming all local variables into fresh ones, to completely +eliminate shadowing and thus environment saving/restoring.

+
k
rule <k> {Ss} => Ss ~> Rho ...</k> <env> Rho </env> +
+

Variable declaration

+

We allocate a fresh location for each newly declared variable and +initialize it with 0.

+
k
rule <k> int (X,Xs => Xs); ...</k> + <env> Rho => Rho[X <- !N:Int] </env> + <store>... .Map => !N |-> 0 ...</store> + rule int .Ids; => .K +
+

Auxiliary operations

+

We only have one auxiliary operation in IMP++, the environment +recovery. Its role is to discard the current environment in the +env cell and replace it with the environment that it holds.

+
k
rule <k> Rho => .K ...</k> <env> _ => Rho </env> +
+

If you want to avoid useless environment recovery steps and keep the size +of the computation structure smaller, then you can also add the rule

+
  rule (_:Map => .) ~> _:Map
+
+

This rule acts like a ``tail recursion'' optimization, but for blocks. */

+
k
// verification ids + syntax Id ::= "n" [token] + | "sum" [token] + | "a" [token] + | "b" [token] + | "c" [token] +endmodule +
+

On Kompilation Options

+

We are done with the IMP++ semantics. The next step is to kompile the +definition using the kompile tool, this way generating a language +model. Depending upon for what you want to use the generated language model, +you may need to kompile the definition using various options. We here discuss +these options.

+

To tell the K tool to exhaustively explore all the behaviors due to the +non-determinism of addition, division, and threads, we have to kompile +with the command:

+
shell
kompile imp.k --enable-search +
+

Theoretically, the heating/cooling rules in K are fully reversible and +unconstrained by side conditions as we showed in the semantics of IMP. +For example, the theoretical heating/cooling rules corresponding to the +strict attribute of division are the following:

+
E₁ / E₂ ⇒ E₁ ⤳ □ / E₂
+E₁ ⤳ □ / E₂ ⇒ E₁ / E₂
+E₁ / E₂ ⇒ E₂ ⤳ E₁ / □
+E₂ ⤳ E₁ / □ ⇒ E₁ / E₂
+
+

The other semantic rules apply modulo such structural rules. +For example, using heating rules we can bring a redex (a subterm which +can be reduced with semantic rules) to the front of the computation, +then reduce it, then use cooling rules to reconstruct a term over the +original syntax of the language, then heat again and +non-deterministically pick another redex, and so on and so forth +without losing any opportunities to apply semantic rules. +Nevertheless, these unrestricted heating/cooling rules may create an +immense, often unfeasibly large space of possibilities to analyze. +The --enable-search option implements an optimization which works +well with other implementation choices made in the current K tool. +Recall from the detailed description of the IMP language semantics that +(theoretical) reversible rules like above are restricted by default +to complementary conditional rules of the form

+
E₁ / E₂ ⇒ E₁ ⤳ □ / E₂
+   if E₁ not in KResult
+E₁ ⤳ □ / E₂ ⇒ E₁ / E₂
+   if E₁ in KResult
+E₁ / E₂ ⇒ E₂ ⤳ E₁ / □
+   if E₂ not in KResult
+E₂ ⤳ E₁ / □  ⇒ E₁ / E₂
+   if  E₂ in KResult
+
+

Therefore, our tool eagerly heats and lazily cools the computation. +In other words, heating rules apply until a redex gets placed on the +top of the computation, then some semantic rule applies and rewrites +that into a result, then a cooling rule is applied to plug the +obtained result back into its context, then another argument may be +chosen and completely heated, and so on. This leads to efficient +execution, but it may and typically does hide program behaviors. +Using the --enable-search option allows you to interfere with this +process and to obtain all possible non-deterministic behaviors as if +the theoretical heating/cooling rules were applied. Optimizations +of course happen under the hood, but you need not be aware of them. +Used carefully, this mechanism allows us to efficiently explore more of +the non-deterministic behaviors of a program, even all of them (like here). +For example, with the semantics of IMP++ given above, the krun +command with the --search option detects all five behaviors +of the following IMP++ program (x can be 0, 1, 2, 3, or undefined +due to division-by-zero):

+
  int x,y;
+  x = 1;
+  y = ++x / (++x / x);
+
+

Besides non-determinism due to underspecified argument evaluation +orders, which the current K tool addresses as explained above, there +is another important source of non-determinism in programming languages: +non-determinism due to concurrency/parallelism. For example, when two +or more threads are about to access the same location in the store and at +least one of these accesses is a write (i.e., an instance of the variable +assignment rule), there is a high chance that different choices for +the next transition lead to different program behaviors.

+
+
+ + +
+ + + +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/4_imp++/lesson_8/index.html b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_8/index.html new file mode 100644 index 00000000000..9a337d4a6d9 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/4_imp++/lesson_8/index.html @@ -0,0 +1,409 @@ + + + + + + + + + + + + + + +Wrapping up Larger Languages | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Wrapping up Larger Languages

+

In this lesson we wrap up IMP++'s semantics and also generate its poster. +While doing so, we also learn how to display larger configurations in order +to make them easier to read and print.

+

Note that we rearrange a bit the semantics, to group the semantics of old +IMP's constructs together, and separate it from the new IMP++'s semantics.

+

You can go even further and manually edit the generated Latex document. +You typically want to do that when you want to publish your language +definition, or parts of it, and you need to finely tune it to fit the +editing requirements. For example, you may want to insert some negative +spaces, etc.

+

Part 4 of the tutorial is now complete. At this moment you should know most +of K framework's features and how to use the K tool. You can now define or +design your own programming languages, and then execute and analyze programs.

+

MOVIE (out of date) [06'26"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/5_types/index.html b/k-distribution/pl-tutorial/1_k/5_types/index.html new file mode 100644 index 00000000000..da881fda8bd --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/5_types/index.html @@ -0,0 +1,400 @@ + + + + + + + + + + + + + + +Part 5: Defining Type Systems | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Part 5: Defining Type Systems

+

In this part of the tutorial we will show that defining type systems for +languages is essentially no different from defining semantics. The major +difference is that programs and fragments of programs now rewrite to their +types, instead of to concrete values. In terms of K, we will learn how +to use it for a certain particular but important kind of applications.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/5_types/lesson_1.9/NOTES/index.html b/k-distribution/pl-tutorial/1_k/5_types/lesson_1.9/NOTES/index.html new file mode 100644 index 00000000000..cac85ed3e62 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/5_types/lesson_1.9/NOTES/index.html @@ -0,0 +1,391 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

This currently does NOT work, because of the rules

+
rule _:Int => int                            [anywhere]
+rule _:Bool => bool                          [anywhere]
+
+

which now rewrite ANY integer ANYWHERE to "int", including integers +that appear in the internal data-structures/functions of the builtins. +We will need to allow a strategy where "anywhere" means anywhere in one +or more computational cells.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/5_types/lesson_1/NOTES/index.html b/k-distribution/pl-tutorial/1_k/5_types/lesson_1/NOTES/index.html new file mode 100644 index 00000000000..53278cd1d7c --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/5_types/lesson_1/NOTES/index.html @@ -0,0 +1,384 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Explain the lack of tenv(...)?

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/5_types/lesson_1/index.html b/k-distribution/pl-tutorial/1_k/5_types/lesson_1/index.html new file mode 100644 index 00000000000..d7bafbfc6e1 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/5_types/lesson_1/index.html @@ -0,0 +1,500 @@ + + + + + + + + + + + + + + +Imperative, Environment-Based Type Systems | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Imperative, Environment-Based Type Systems

+

In this lesson you learn how to define a type system for an imperative +language (the IMP++ language defined in Part 4 of the tutorial), using a style +based on type environments.

+

Let us copy the imp.k file from Part 4 of the tutorial, Lesson 7, which holds +the semantics of IMP++, and modify it into a type system. The resulting type +system, when executed, yields a type checker.

+

We start by defining the new strictness attributes of the IMP++ syntax. +While doing so, remember that programs and fragments of programs now reduce +to their types. So types will be the new results of our new (type) semantics. +We also clean up the semantics by removing the unnecessary tags, and also +use strict instead of seqstrict wherever possible, because strict gives +implementations more freedom. Interestingly, note that spawn is strict now, +because the code of the child thread should type in the current parent's type +environment. Note that this is not always the case for threads, see for example +SIMPLE in the languages tutorial, but it works here for our simpler IMP++.

+

From a typing perspective, the && construct is strict in both its arguments; +its short-circuit (concrete) semantics is irrelevant for its (static) type +system. Similarly, both the conditional and the while loop are strict +constructs when regarded through the typing lenses.

+

Finally, the sequential composition is now sequentially strict! Indeed, +statements are now going to reduce to their type, stmt, and it is critical +for sequential composition to type its argument statements left-to-right; +for example, imagine that the second argument is a variable declaration (whose +type semantics will modify the type environment).

+

We continue by defining the new results of computations, that is, the actual +types. In this simple imperative language, we only have a few constant types: +int, bool, string, block and stmt.

+

We next define the new configuration, which is actually quite simple. Besides +the <k/> cell, all we need is a type environment cell, <tenv/>, which will +hold a map from identifiers to their types. A type environment is therefore +like a state in the abstract domain of type values.

+

Let us next modify the semantic rules, turning them into a type system. In +short, the idea is to reduce the basic values to their types, and then have a +rule for each language construct reducing it to its result type whenever its +arguments have the expected types.

+

We write the rules in the order given by the syntax declarations, to make +sure we do not forget any construct.

+

Integers reduce to their type, int.

+

So do the strings.

+

Variables are now looked up in the type environment and reduced to their type +there. Since we only declare integer variables in IMP++, their type in tenv +will always be int. Nevertheless, we write the rule generically, so that we +would not have to change it later if we add other type declarations to IMP++. +Note that we reject programs which lookup undeclared variables. Rejection, +in this case, means rewriting getting stuck.

+

Variable increment types to int, provided the variable has type int.

+

Read types to int, because we only allow integer input.

+

Division is only allowed on integers, so it rewrites to int provided that its +arguments rewrite to int. Note, however, that in order to write int / int, +we have to explicitly add int to the syntax of arithmetic expressions. +Otherwise, the K parser rightfully complains, because / was declared on +arithmetic expressions, not on types. One simple and generic way to allow +types to appear anywhere, is to define Type as a syntactic subcategory of all +the other syntactic categories. Let's do it on a by-need basis, though.

+

Addition is overloaded, so we add two typing rules for it: one for integers +and another for strings.

+

As discussed, spawn types to stmt provided that its argument types to +block.

+

The assignment construct was strict(2); its typing policy is that the declared +type of X should be identical to the type of the assigned value. Like for +lookup, we define this rule more generically than needed for IMP++, for any +type, not only for int.

+

The typing rules for Boolean expression constructs are in the same spirit. +Note that we need only one rule for &&.

+

The typing of blocks is a bit trickier. First, note that we still need to +recover the environment after the block is typed, because we do not want the +block-local variables to be visible in the outer type environment. We recover +the type environment only after the block-enclosed statements type; moreover, +we also opportunistically yield a block type on the computation when we +discard the type environment recovery item. To account for the fact that the +block-enclosed statement can itself be a block (e.g., {{S}}), we would need an +additional rule. Since we do not like repetition, we instead group the types +block and stmt into one syntactic category, BlockOrStmtType, and now we +can have only one rule. We also include BlockOrStmtType in Type, as a +replacement for the two basic types.

+

The expression statement types as expected. Recall that we only allow +arithmetic expressions, which type to int, to be used as statements in IMP++.

+

The conditional was declared strict in all its arguments. Its typing policy +is that its first argument types to bool and its two branches to block. +If that is the case, then it yields a stmt type.

+

For while, its first argument should type to bool and its second to block.

+

Variable declarations add new bindings to the type environment. Recall that +we can only declare variables of integer type in IMP++.

+

The typing policy of print is that it can only print integer or string values, +and in that case it types to stmt. Like for BlockOrStmtType, to avoid +having two similar rules, one for int and another for string, we prefer to +introduce an additional syntactic category, PrintableType, which includes both +int and string types.

+

halt types to stmt; so its subsequent code is also typed.

+

join types to stmt, provided that its argument types to int.

+

Sequential composition was declared as a whitespace-separated sequentially +strict list. Its typing policy is that all the statements in the list should +type to stmt or block in order for the list to type to stmt. Since +lists are maintained internally as cons-lists, this is probably the simplest +way to do it:

+
rule .Stmts => stmt
+rule _:BlockOrStmtType Ss => Ss
+
+

Note that the first rule, which types the empty sequence of statements to stmt, +is needed anyway, to type empty blocks {} (together with the block rule).

+

kompile imp.k and krun all the programs in Part 4 of the tutorial. They +should all type to stmt.

+

In the next lesson we will define a substitution-based type system for LAMBDA.

+

Go to Lesson 2, Type Systems: Substitution-Based Higher-Order Type Systems.

+

MOVIE (out of date) [10'11"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/5_types/lesson_2/NOTES/index.html b/k-distribution/pl-tutorial/1_k/5_types/lesson_2/NOTES/index.html new file mode 100644 index 00000000000..bd9210ea634 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/5_types/lesson_2/NOTES/index.html @@ -0,0 +1,385 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Maybe we should define simply-typed lambda calculus in some earlier +lecture in tutorial 1, and then reuse its examples here.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/5_types/lesson_2/index.html b/k-distribution/pl-tutorial/1_k/5_types/lesson_2/index.html new file mode 100644 index 00000000000..9bd79983c6c --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/5_types/lesson_2/index.html @@ -0,0 +1,455 @@ + + + + + + + + + + + + + + +Substitution-Based Higher-Order Type Systems | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Substitution-Based Higher-Order Type Systems

+

In this lesson you learn how to define a substitution-based type system for +a higher-order language, namely the LAMBDA language defined in Part 1 of the +tutorial.

+

Let us copy the definition of LAMBDA from Part 1 of the tutorial, Lesson 8. +We are going to modify it into a type systems for LAMBDA.

+

Before we start, it is important to clarify an important detail, namely that +our type system will yield a type checker when executed, not a type +inferencer. In particular, we are going to change the LAMBDA syntax +to allow us to associate a type to each declared variable. The +constructs which declare variables are lambda, let, letrec and mu. +The syntax of all these will therefore change.

+

Since here we are not interested in a LAMBDA semantics anymore, we take the +freedom to eliminate the Val syntactic category, our previous results. +Our new results are going to be the types, because programs will now reduce +to their types.

+

As explained, the syntax of the lambda construct needs to change, to also +declare the type of the variable that it binds. We add the new syntactic +category Type, with the following constructs: int, bool, the function +type (which gives it its higher-order status), and parentheses as bracket. +Also, we make types our K results.

+

We are now ready to define the typing rules.

+

Let us start with the typing rule for lambda abstraction: lambda X : T . E +types to the function type T -> T', where T' is the type obtained by further +typing E[T/X]. This can be elegantly achieved by reducing the lambda +abstraction to T -> E[T/X], provided that we extend the function type construct +to take expressions, not only types, as arguments, and to be strict. +This can be easily achieved by redeclaring it as a strict expression construct +(strictness in the second argument would suffice in this example, but it is +more uniform to define it strict overall).

+

The typing rule for application is as simple as it can get: (T1->T2) T1 => T2.

+

Let us now give the typing rules of arithmetic and Boolean expression +constructs. First, let us get rid of Val. Second, rewrite each value to its +type, similarly to the type system for IMP++ in the previous lesson. Third, +replace each semantic rule by its typing rule. Fourth, make sure you +do not forget to subsort Type to Exp, so your rules above will parse.

+

The typing policy of the conditional statement is that its first argument +should type to bool and its other two arguments should type to the same type +T, which will also be the result type of the conditional. So we make the +conditional construct strict in all its three arguments and we write the +obvious rule: if bool then T:Type else T => T. We want a runtime check that +the latter arguments are actually typed, so we write T:Type.

+

There is nothing special about let, except that we have to make sure we +change its syntax to account for the type of the variable that it binds. +This rule is a macro, so the let is desugared statically.

+

Similarly, the syntax of letrec and mu needs to change to account for the +type of the variable that they bind. The typing of letrec remains based on +its desugaring to mu; we have to make sure the types are also included now.

+

The typing policy of mu is that its body should type to the same type T of +its variable, which is also the type of the entire mu expression. This can +be elegantly achieved by rewriting it to (T -> T) E[T/X]. Recall that +application is strict, so E[T/X] will be eventually reduced to its type. +Then the application types correctly only if that type is also T, and in +that case the result type will also be T.

+

kompile and krun some programs. You can, for example, take the LAMBDA +programs from the first tutorial, modify them by adding types to their +variable declarations, and then type check them using krun.

+

In the next lesson we will discuss an environment-based type system +for LAMBDA.

+

Go to Lesson 3, Type Systems: Environment-Based Higher-Order Type Systems.

+

MOVIE (out of date) [6'52"]

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/5_types/lesson_3/index.html b/k-distribution/pl-tutorial/1_k/5_types/lesson_3/index.html new file mode 100644 index 00000000000..9e8d7feccd2 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/5_types/lesson_3/index.html @@ -0,0 +1,442 @@ + + + + + + + + + + + + + + +Environment-Based Higher-Order Type Systems | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Environment-Based Higher-Order Type Systems

+

In this lesson you learn how to define an environment-based type system for +a higher-order language, namely the LAMBDA language defined in Part 1 of the +tutorial.

+

The simplest and fastest way to proceed is to copy the substitution-based +type system of LAMBDA from the previous lesson and modify it into an +environment-based one. A large portion of the substitution-based definition +will remain unchanged. We only have to modify the rules that use +substitution.

+

We do not need the substitution anymore, so we can remove the require and +import statements. The syntax of types and expressions stays unchanged, but +we can now remove the binder tag of lambda.

+

Like in the type system of IMP++ in Lesson 1, we need a configuration that +contains, besides the <k/> cell, a <tenv/> cell that will hold the type +environment.

+

In an environment-based definition, unlike in a substitution-based one, we +need to lookup variables in the environment. So let us start with the +type lookup rule:

+
rule <k> X:Id => T ...</k> <tenv>... X |-> T ...</k>
+
+

The type environment is populated by the semantic rule of lambda:

+
rule <k> lambda X : T . E => (T -> E) ~> Rho ...</k>
+     <tenv> Rho => Rho[X <- T] </tenv>
+
+

So X is bound to its type T in the type environment, and then T -> E +is scheduled for processing. Recall that the arrow type construct has been +extended into a strict expression construct, so E will be eventually reduced +to its type. Like in other environment-based definitions, we need to make +sure that we recover the type environment after the computation in the scope +of the declared variable terminates.

+

The typing rule of application does not change, so it stays as elegant as it +was in the substitution-based definition:

+
rule (T1 -> T2) T1 => T2
+
+

So do the rules for arithmetic and Boolean constructs, and those for the +if, and let, and letrec.

+

The mu rule needs to change, because it was previously defined using +substitution. We modify it in the same spirit as we modified the lambda +rule: bind X to its type in the environment, schedule its body for typing +in its right context, and then recover the type environment.

+

Finally, we give the semantics of environment recovery, making sure +the environment is recovered only after the preceding computation is +reduced to a type:

+

rule _:Type ~> (Rho => .) ... _ => Rho

+

The changes that we applied to the substitution-based definition were +therefore quite systematic: each substitution invocation was replaced with +an appropriate type environment update/recovery.

+

Go to Lesson 4, Type Systems: A Naive Substitution-Based Type Inferencer.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/5_types/lesson_4/NOTES/index.html b/k-distribution/pl-tutorial/1_k/5_types/lesson_4/NOTES/index.html new file mode 100644 index 00000000000..64d9681c822 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/5_types/lesson_4/NOTES/index.html @@ -0,0 +1,388 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

The README needs to be changed to reflect the fact that we now have a builtin +unification procedure. We may even want to merge this lecture with the +next one, and eliminate the approach where we throw equalities on the computation. +This needs some more thinking, though, especialy on how to smoothly glue it +with Lesson 6, where we also use equalities.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/5_types/lesson_4/index.html b/k-distribution/pl-tutorial/1_k/5_types/lesson_4/index.html new file mode 100644 index 00000000000..1d49cc8cf36 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/5_types/lesson_4/index.html @@ -0,0 +1,585 @@ + + + + + + + + + + + + + + +A Naive Substitution-Based Type Inferencer | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

A Naive Substitution-Based Type Inferencer

+

In this lesson you learn how to define a naive substitution-based type +inferencer for a higher-order language, namely the LAMBDA language +defined in Part 1 of the tutorial.

+

Unlike in the type checker defined in Lessons 2 and 3, where we had to +associate a type with each declared variable, a type inferencer +attempts to infer the types of all the variables from the way those +variables are used. Let us take a look at this program, say plus.lambda:

+
lambda x . lambda y . x + y
+
+

Since x and y are used in an integer addition context, we can infer +that they must have the type int and the result of the addition is +also an int, so the type of the entire expression is int -> int -> int. +Similarly, the program if.lambda

+
lambda x . lambda y . lambda z .
+  if x then y else z
+
+

can only make sense when x has type bool and y and z have the same +type, say t, in which case the type of the entire expression is +bool -> t -> t -> t. Since the type t can be anything, we say that +the type of this expression is polymorphic. That means that the code +above can be used in different contexts, where t can be an int, a +bool, a function type int -> int, and so on.

+

In the identity.lambda program

+
let f = lambda x . x
+in f 1
+
+

f has such a polymorphic type, which is then applied to an integer, +so this program is type-safe and its type is int.

+

A typical polymorphic expression is the composition

+
lambda f . lambda g . lambda x .
+  g (f x)
+
+

which has the type (t1 -> t2) -> (t2 -> t3) -> (t1 -> t3), polymorphic +in 3 types.

+

Let us now define our naive type inferencer and then we discuss more +examples. The idea is quite simple: we conceptually do the same +operations like we did within the type checker defined in Lesson 2, +with two important differences:

+
    +
  1. instead of declaring a type with each declared variable, we assume +a fresh type for that variable; and
  2. +
  3. instead of checking that the types of expressions satisfy the +type properties of the context in which they are used, we impose +those properties as type equality constraints. A general-purpose +unification-based constraint solving mechanism is then used to solve +the generated type constraints.
  4. +
+

Let us start with the syntax, which is essentially identical to that +of the type checker in Lesson 2, except that bound variables are not +declared a type anymore. Also, to keep things more compact, we put +all the Exp syntax declarations in one syntax declaration this time.

+ +

Before we modify the rules, let us first define our machinery for +adding and solving constraints. First, we require and import the +unification procedure. We do not discuss unification here, but if you +are interested you can consult the unification.k files under +k-distribution/include/kframework/builtin, which contains our current generic +definition of unification, which is written also in K. The generic unification +provides a sort, Mgu, for most-general-unifier, an operation +updateMgu(Mgu,T1,T2) which updates Mgu with additional constraints +generated by forcing the terms T1 and T2 to be equal, and an operation +applyMgu(Mgu,T) which applies Mgu to term T. For our use +of unification here, we do not even need to know how Mgu terms are +represented internally.

+

We define a K item construct, =, which takes two Type terms and +enforces them to be equal by means of updating the current Mgu. +Once the constraints are added to the Mgu, the equality dissolves +itself. With this semantics of = in mind, we can now go ahead and +modify the rules of the type checker systematically into rules +for a type inferencer. The changes are self-explanatory and +mechanical: for example, the rule

+
rule int * int => int
+
+

changes into rule

+
rule T1:Type  * T2:Type => T1 = int ~> T2 = int ~> int
+
+

generating the constraints that the two arguments of multiplication +have the type int, and the result type is int. Recall that each type +equality on the <k/> cell updates the current Mgu appropriately and +then dissolves itself; thus, the above says that after imposing the +constraints T1=int and T2=int, multiplication yields a type int.

+

As mentioned above, since types of variables are not declared anymore, +but inferred, we have to generate a fresh type for each variable at its +declaration time, and then generate appropriately constraints for it. +For example, the type semantics of lambda and mu become:

+
rule lambda X . E => T -> E[T/X]  when fresh(T:Type)
+rule mu X . E => (T -> T) E[T/X]  when fresh(T:Type)
+
+

that is, we add a condition stating that the previously declared type +is now a fresh one. This type will be further constrained by how the +variable X is being used within E.

+

Interestingly, the previous typing rule for lambda application is not +powerful enough anymore. Indeed, since types are not given anymore, +it may very well be the case that the inferred type of the first +argument of the application construct is not yet a function type +(remember, for example, the program composition.lambda above). What +we have to do is to enforce it to be a function type, by means of +fresh types and constraints. We can introduce a fresh type for the +result of the application, and then write the expected rule as +follows:

+
rule T1:Type T2:Type => T1 = (T2 -> T) ~> T  when fresh(T:Type)
+
+

The conditional requires that its first argument is a bool and its +second and third arguments have the same type, which is also the +result type.

+

The macros do not change, in particular let is desugared into lambda +application. We will next see that this is a significant restriction, +because it limits the polymorphism of our type system.

+

We are done. We have a working type inferencer for LAMBDA.

+

Let's kompile it and krun the programs above. They all work as +expected. Let us also try some additional programs, to push it to its +limits.

+

First, let us test mu by means of a letrec example:

+
letrec f x = 3
+in f
+
+

We can also try all the programs that we had in our first tutorial, on +lambda, for example the factorial.imp program:

+
letrec f x = if x <= 1 then 1 else (x * (f (x + -1)))
+in (f 10)
+
+

Those programs are simple enough that they should all work as +expected with our naive type inferencer here.

+

Let us next try to type some tricky programs, which involve more +complex and indirect type constraints.

+

tricky-1.lambda:

+
lambda f . lambda x . lambda y . (
+  (f x y) + x + (let x = y in x)
+)
+
+

tricky-2.lambda:

+
lambda x .
+  let f = lambda y . if true then y else x
+  in (lambda x . f 0)
+
+

tricky-3.lambda:

+
lambda x . let f = lambda y . if true then x 7 else x y
+           in f
+
+

tricky-4.lambda:

+
lambda x . let f = lambda x . x
+           in let d = (f x) + 1
+              in x
+
+

tricky-5.lambda:

+
lambda x . let f = lambda y . x y
+           in let z = x 0 in f
+
+

It is now time to see the limitations of this naive type inferencer. +Consider the program

+
let id = lambda x . x
+in if (id true) then (id 1) else (id 2)
+
+

Our type inferencer fails graciously with a clash in the <mgu/> cell +between int and bool. Indeed, the desugaring macro of let turns it +into a lambda and an application, which further enforce id to have a +type of the form t -> t for some fresh type t. The first use of id +in the condition of if will then constrain t to be bool, while the +other uses in the two branches will enforce t to be int. Thus the +clash in the <mgu/> cell.

+

Similarly, the program

+
let id = lambda x . x
+in id id
+
+

yields a different kind of conflict: if id has type t -> t, in order +to apply id to itself it must be the case that its argument, t, equals +t -> t. These two type terms cannot be unified because there is a +circular dependence on t, so we get a cycle in the <mgu/> cell.

+

Both limitations above will be solved when we change the semantics of +let later on, to account for the desired polymorphism.

+

Before we conclude this lesson, let us see one more interesting +example, where the lack of let-polymorphism leads not to a type error, +but to a less generic type:

+
let f1 = lambda x . x in
+  let f2 = f1 in
+    let f3 = f2 in
+      let f4 = f3 in
+        let f5 = f4 in
+          if (f5 true) then f2 else f3
+
+

Our current type inferencer will infer the type bool -> bool for the +program above. Nevertheless, since all functions f1, f2, f3, f4, f5 +are the identity function, which is polymorphic, we would expect the +entire program to type to the same polymorphic identity function type.

+

This limitation will be also addressed when we define our +let-polymorphic type inferencer.

+

Before that, in the next lesson we will show how easily we can turn +the naive substitution-based type inferencer discussed in this lesson +into a similarly naive, but environment-based type inferencer.

+

Go to Lesson 5, Type Systems: A Naive Environment-Based Type Inferencer.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/5_types/lesson_5/index.html b/k-distribution/pl-tutorial/1_k/5_types/lesson_5/index.html new file mode 100644 index 00000000000..c558e520fce --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/5_types/lesson_5/index.html @@ -0,0 +1,441 @@ + + + + + + + + + + + + + + +A Naive Environment-Based Type Inferencer | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

A Naive Environment-Based Type Inferencer

+

In this lesson you learn how to define a naive environment-based type +inferencer for a higher-order language. Specifically, we take the +substitution-based type inferencer for LAMBDA defined in Lesson 4 and +turn it into an environment-based one.

+

Recall from Lesson 3, where we defined an environment-based type +checker for LAMBDA based on the substitution-based one in Lesson 2, +that the transition from a substitution-based definition to an +environment-based one was quite systematic and mechanical: each +substitution occurrence E[T/X] is replaced by E, but at the same time +the variable X is bound to type T in the type environment. One benefit +of using type environments instead of substitution is that we replace +a linear complexity operation (the substitution) with a constant +complexity one (the variable lookup).

+

There is not much left to say which has not been already said in +Lesson 3: we remove the unnecessary binder annotations for the +variable binding operations, then add a <tenv/> cell to the +configuration to hold the type environment, then add a new rule for +variable lookup, and finally apply the transformation of substitutions +E[T/X] into E as explained above.

+

The resulting type inferencer should now work exactly the same way as +the substitution-based one, except, of course, that the resulting +configurations will contain a <tenv/> cell now.

+

As sanity check, let us consider two more LAMBDA programs that test +the static scoping nature of the inferencer. We do that because +faulty environment-based definitions often have this problem. The +program

+
let x = 1
+in let f = lambda a . x
+   in let x = true
+      in f 3
+
+

should type to int, not to bool, and so it does. Similarly, the +program

+
let y = 0
+in letrec f x = if x <= 0
+                then y
+                else let y = true
+                     in f (x + 1)
+   in f 1
+
+

should also type to int, not bool, and so it does, too.

+

The type inferencer defined in this lesson has the same limitations, +in terms of polymorphism, as the one in Lesson 4. In the next +lesson we will see how it can be parallelized, and in further lessons +how to make it polymorphic.

+

Go to Lesson 6, Type Systems: Parallel Type Checkers/Inferencers.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/5_types/lesson_6/NOTES/index.html b/k-distribution/pl-tutorial/1_k/5_types/lesson_6/NOTES/index.html new file mode 100644 index 00000000000..7badb38e728 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/5_types/lesson_6/NOTES/index.html @@ -0,0 +1,385 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

The README needs to be changed to reflect the fact that we now have a builtin +unification procedure.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/5_types/lesson_6/index.html b/k-distribution/pl-tutorial/1_k/5_types/lesson_6/index.html new file mode 100644 index 00000000000..759a926a830 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/5_types/lesson_6/index.html @@ -0,0 +1,495 @@ + + + + + + + + + + + + + + +Parallel Type Checkers/Inferencers | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Parallel Type Checkers/Inferencers

+

In this lesson you learn how to define parallel type checkers or +inferencers. For the sake of a choice, we will parallelize the one in +the previous lesson, but the ideas are general. We are using the same +idea to define type checkers for other languages in the K tool +distribution, such as SIMPLE and KOOL.

+

The idea is in fact quite simple. Instead of one monolithic typing +task, we generate many smaller tasks, which can be processed in +parallel. We use the same approach to define parallel semantics as we +used for threads in IMP++ in Part 4 of the tutorial, that is, we add a +cell holding all the parallel tasks, making sure we declare the cell +holding a task with multiplicity *. For the particular type +inferencer that we chose here, the one in Lesson 5, each task will +hold an expression to type together with a type environment (so it +knows where to lookup its free variables). We have the following +configuration then:

+
configuration <tasks color="yellow">
+                <task color="orange" multiplicity="*">
+                  <k color="green"> $PGM:Exp </k>
+                  <tenv color="red"> .Map </tenv>
+                </task>
+              </tasks>
+              <mgu color="blue"> .Mgu </mgu>
+
+

Now we have to take each typing rule we had before and change it to +yield parallel typing. For example, our rule for typing +multiplication was the following in Lesson 5:

+
rule T1:Type * T2:Type => T1 = int ~> T2 = int ~> int
+
+

Since * was strict, its two arguments eventually type, and once that +happens the rule above fires. Unfortunately, the strictness of +multiplication makes the typing of the two expressions sequential in +our previous definition. To avoid typing the two expressions +sequentially and instead generating two parallel tasks, we remove the +strict attribute of multiplication and replace the rule above with the +following:

+
rule <k> E1 * E2 => int ...</k> <tenv> Rho </tenv>
+     (. => <task> <k> E1 = int </k> <tenv> Rho </tenv> </task>
+           <task> <k> E2 = int </k> <tenv> Rho </tenv> </task>)
+
+

Therefore, we generate two tasks for typing E1 and E2 in the same type +environment as the current task, and let the current task continue by +simply optimistically reducing E1*E2 to its expected result type, int. +If E1 or E2 will not type to int, then either their corresponding +tasks will get stuck or the <mgu/> cell will result into a clash or cycle, +so the program will not type overall in spite of the fact that we +allowed the task containing the multiplication to continue. This is +how we get maximum of parallelism in this case.

+

Before we continue, note that the new tasks hold equalities in them, +where one of its arguments is an expression, while previously the +equality construct was declared to take types. What we want now is +for the equality construct to possibly take any expressions, and first +type them and then generate the type constraint like before. This can +be done very easily by just extending the equality construct to +expressions and declaring it strict:

+
syntax KItem ::= Exp "=" Exp  [strict]
+
+ +

Unlike before, where we only passed types to the equality construct, +we now need a runtime check that its arguments are indeed types before +we can generate the updateMgu command:

+
rule <k> T:Type = T':Type => . ...</k>
+     <mgu> Theta:Mgu => updateMgu(Theta,T,T') </mgu>
+
+

Like before, an equality will therefore update the <mgu/> cell and then +it dissolves itself, letting the <k/> cell in the corresponding task +empty. Such empty tasks are unnecessary, so they can be erased:

+
rule <task>... <k> . </k> ...</task> => .
+
+

We can now follow the same style as for multiplication to write the +parallel typing rules of the other arithmetic constructs, and even for +the conditional.

+

To parallelize the typing of lambda we generate two fresh types, one +for the variable and one for the body, and make sure that we generate +the correct type constraint and environment in the body task:

+
rule <k> lambda X . E => Tx -> Te ...</k> <tenv> TEnv </tenv>
+     (. => <task> <k> E = Te </k> <tenv> TEnv[Tx/X] </tenv> </task>)
+  when fresh(Tx:Type) andBool fresh(Te:Type)
+
+

Note that the above also allows us to not need to change and then +recover the environment of the current cell.

+

For function application we also need to generate two fresh types:

+
rule <k> E1 E2 => T ...</k> <tenv> Rho </tenv>
+     (. => <task> <k> E1 = T2 -> T </k> <tenv> Rho </tenv> </task>
+           <task> <k> E2 = T2 </k> <tenv> Rho </tenv> </task>)
+  when fresh(T2:Type) andBool fresh(T:Type)
+
+

The only rule left is that of mu X . E. In this case we only need one +fresh type, because X, E and mu X . E have all the same type:

+
rule <k> mu X . E => T ...</k>  <tenv> TEnv </tenv>
+     (. => <task> <k> E = T </k> <tenv> TEnv[T/X] </tenv> </task>)
+  when fresh(T:Type)
+
+

We do not need the type environment recovery operation, so we delete it.

+

We can now kompile and krun all the programs that we typed in Lesson 5. +Everything should work.

+

In this lesson we only aimed at parallelizing the type inferencer in +Lesson 5, not to improve its expressiveness; it still has the same +limitations in terms of polymorphism. The next lessons are dedicated +to polymorphic type inferencers.

+

Go to Lesson 7, Type Systems: A Naive Substitution-based Polymorphic Type Inferencer.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/5_types/lesson_7/NOTES/index.html b/k-distribution/pl-tutorial/1_k/5_types/lesson_7/NOTES/index.html new file mode 100644 index 00000000000..f7444e7c721 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/5_types/lesson_7/NOTES/index.html @@ -0,0 +1,386 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

This approach leads to an incorrect type checker, in that programs which +lead to a runtime error will type. See tricky-5 and/or variations of it.

+

Discuss monomorphic vs. polymorphic types and type inferencers.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/5_types/lesson_7/index.html b/k-distribution/pl-tutorial/1_k/5_types/lesson_7/index.html new file mode 100644 index 00000000000..97737122beb --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/5_types/lesson_7/index.html @@ -0,0 +1,500 @@ + + + + + + + + + + + + + + +A Naive Substitution-based Polymorphic Type Inferencer | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

A Naive Substitution-based Polymorphic Type Inferencer

+

In this lesson you learn how little it takes to turn a naive monomorphic +type inferencer into a naive polymorphic one, basically only changing +a few characters. In terms of the K framework, you will learn that +you can have complex combinations of substitutions in K, both over +expressions and over types.

+

Let us start directly with the change. All we have to do is to take +the LAMBDA type inferencer in Lesson 4 and only change the macro

+
rule let X = E in E' => (lambda X . E') E  [macro]
+
+

as follows:

+
rule let X = E in E' => E'[E/X]  [macro]
+
+

In other words, we are inlining the beta-reduction rule of +lambda-calculus within the original rule. In terms of typing, +the above forces the type inferencer to type E in place for each +occurrence of X in E'. Unlike in the first rule, where X had to get +one type only which satisfied the constrains of all X's occurrences in +E', we now never associate any type to X anymore.

+

Let us kompile and krun some examples. Everything that worked with +the type inferencer in Lesson 4 should still work here, although the +types of some programs can now be more general. For example, reconsider +the nested-lets.lambda program

+
let f1 = lambda x . x in
+  let f2 = f1 in
+    let f3 = f2 in
+      let f4 = f3 in
+        let f5 = f4 in
+          if (f5 true) then f2 else f3
+
+

which was previously typed to bool -> bool. With the new rule above, +the sequence of lets is iteratively eliminated and we end up with the +program

+
if (lambda x . x) true then (lambda x . x) else (lambda x . x)
+
+

which now types (with both type inferencers) to a type of the form +t -> t, for some type variable t, which is more general than the +previous bool -> bool type that the program typed to in Lesson 4.

+

We can also now type programs that were not typable before, such as

+
let id = lambda x . x
+in if (id true) then (id 1) else (id 2)
+
+

and

+
let id = lambda x . x
+in id id
+
+

Let us also test it on some trickier programs, also not typable +before, such as

+
let f = lambda x . x
+in let g = lambda y . f y
+   in g g
+
+

which gives us a type of the form t -> t for some type variable t, +and as

+
let f = let g = lambda x . x
+        in let h = lambda x . lambda x . (g g g g)
+           in h
+in f
+
+

which types to t1 -> t2 -> t3 -> t3 for some type variables t1, t2, t3.

+

Here is another program which was not typable before, which is +trickier than the others above in that a lambda-bound variable appears +free in a let-bound expression:

+
lambda x . (
+  let y = lambda z . x
+  in if (y true) then (y 1) else (y (lambda x . x))
+)
+
+

The above presents no problem now, because once lambda z . x gets +substituted for y we get a well-typed expression which yields that x +has the type bool, so the entire expression types to bool -> bool.

+

The cheap type inferencer that we obtained above therefore works as +expected. However, it has two problems which justify a more advanced +solution. First, substitution is typically considered an elegant +mathematical instrument which is not too practical in implementations, +so an implementation of this type inferencer will likely be based on +type environments anyway. Additionally, we mix two kinds of +substitutions in this definition, one where we substitute types and +another where we substitute expressions, which can only make things +harder to implement efficiently. Second, our naive substitution of E +for X in E' can yield an exponential explosion in size of the original +program. Consider, for example, the following classic example which +is known to generate a type whose size is exponential in the size of +the program (and is thus used as an argument for why let-polymorphic +type inference is exponential in the worst-case):

+
let f00 = lambda x . lambda y . x in
+  let f01 = lambda x . f00 (f00 x) in
+    let f02 = lambda x . f01 (f01 x) in
+      let f03 = lambda x . f02 (f02 x) in
+        let f04 = lambda x . f03 (f03 x) in
+          // ... you can add more nested lets here
+          f04
+
+

The particular instance of the pattern above generates a type which +has 17 type variables! The desugaring of each let doubles the size of +the program and of its resulting type. While such programs are little +likely to appear in practice, it is often the case that functions can +be quite complex and large while their type can be quite simple in the +end, so we should simply avoid retyping each function each time it is +used.

+

This is precisely what we will do next. Before we present the classic +let-polymorphic type inferencer in Lesson 9, which is based on +environments, we first quickly discuss in Lesson 8 an intermediate +step, namely a naive environment-based variant of the inferencer +defined here.

+

Go to Lesson 8, Type Systems: A Naive Environment-based Polymorphic Type Inferencer.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/5_types/lesson_8/NOTES/index.html b/k-distribution/pl-tutorial/1_k/5_types/lesson_8/NOTES/index.html new file mode 100644 index 00000000000..22711e0fa05 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/5_types/lesson_8/NOTES/index.html @@ -0,0 +1,385 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

We also have to redeclare lambda and mu as binders. Program +tricky-2.lambda shows why.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/5_types/lesson_8/index.html b/k-distribution/pl-tutorial/1_k/5_types/lesson_8/index.html new file mode 100644 index 00000000000..ca4e8217ee0 --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/5_types/lesson_8/index.html @@ -0,0 +1,433 @@ + + + + + + + + + + + + + + +A Naive Environment-based Polymorphic Type Inferencer | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

A Naive Environment-based Polymorphic Type Inferencer

+

In this short lesson we discuss how to quickly turn a naive +environment-based monomorphic type inferencer into a naive let-polymorphic +one. Like in the previous lesson, we only need to change a few +characters. In terms of the K framework, you will learn how to have +both environments and substitution in the same definition.

+

Like in the previous lesson, all we have to do is to take the LAMBDA +type inferencer in Lesson 5 and only change the rule

+
rule let X = E in E' => (lambda X . E') E
+
+

as follows:

+
rule let X = E in E' => E'[E/X]
+
+

The reasons why this works have already been explained in the previous +lesson, so we do not repeat them here.

+

Since our new let rule uses substitution, we have to require the +substitution module at the top and also import SUBSTITUTION in the +current module, besides the already existing UNIFICATION.

+

Everything which worked with the type inferencer in Lesson 7 should +also work now. Let us only try the exponential type example,

+
let f00 = lambda x . lambda y . x in
+  let f01 = lambda x . f00 (f00 x) in
+    let f02 = lambda x . f01 (f01 x) in
+      let f03 = lambda x . f02 (f02 x) in
+        let f04 = lambda x . f03 (f03 x) in
+          f04
+
+

As expected, this gives us precisely the same type as in Lesson 7.

+

So the only difference between this type inferencer and the one in +Lesson 7 is that substitution is only used for LAMBDA-to-LAMBDA +transformations, but not for infusing types within LAMBDA programs. +Thus, the syntax of LAMBDA programs is preserved intact, which some +may prefer. Nevertheless, this type inferencer is still expensive and +wasteful, because the let-bound expression is typed over and over +again in each place where the let-bound variable occurs.

+

In the next lesson we will discuss a type inferencer based on the +classic Damas-Hindley-Milner type system, which maximizes the reuse of +typing work by means of parametric types.

+

Go to Lesson 9, Type Systems: Let-Polymorphic Type Inferencer (Damas-Hindley-Milner).

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/5_types/lesson_9.5/NOTES/index.html b/k-distribution/pl-tutorial/1_k/5_types/lesson_9.5/NOTES/index.html new file mode 100644 index 00000000000..fccf1a57baf --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/5_types/lesson_9.5/NOTES/index.html @@ -0,0 +1,384 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

We have to test this as well; we need a test folder with a config.xml.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/5_types/lesson_9/index.html b/k-distribution/pl-tutorial/1_k/5_types/lesson_9/index.html new file mode 100644 index 00000000000..452065ce21a --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/5_types/lesson_9/index.html @@ -0,0 +1,550 @@ + + + + + + + + + + + + + + +Let-Polymorphic Type Inferencer (Damas-Hindley-Milner) | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Let-Polymorphic Type Inferencer (Damas-Hindley-Milner)

+

In this lesson we discuss a type inferencer based on what we call today +the Damas-Hindley-Milner type system, which is at the core of many +modern functional programming languages. The first variant of it was +proposed by Hindley in 1969, then, interestingly, Milner rediscovered +it in 1978 in the context of the ML language. Damas formalized it as +a type system in his PhD thesis in 1985. More specifically, our type +inferencer here, like many others as well as many implementations of +it, follows more closely the syntax-driven variant proposed by Clement +in 1987.

+

In terms of K, we will see how easily we can turn one definition which +is considered naive (our previous type inferencer in Lesson 8) into a +definition which is considered advanced. All we have to do is to +change one existing rule (the rule of the let binder) and to add a new +one. We will also learn some new predefined features of K, which make +the above possible.

+

The main idea is to replace the rule

+
rule let X = E in E' => E'[E/X]
+
+

which creates potentially many copies of E within E' with a rule +which types E once and then reuses that type in each place where X +occurs free in E'. The simplest K way to type E is to declare the +let construct strict(2). Now we cannot simply bind X to the type +of E, because we would obtain a variant of the naive type inferencer +we already discussed, together with its limitations, in Lesson 5 of this +tutorial. The trick here is to parameterize the type of E in all its +unconstrained fresh types, and then create fresh copies of those +parameters in each free occurrence of X in E'.

+

Let us discuss some examples, before we go into the technical details. +Consider the first let-polymorphic example which failed to be typed +with our first naive type-inferencer:

+
let id = lambda x . x
+in if (id true) then (id 1) else (id 2)
+
+

When typing lambda x . x, we get a type of the form t -> t, for some +fresh type t. Instead of assigning this type to id as we did in the +naive type inferencers, we now first parametrize this type in its +fresh variable t, written

+
(forall t) t -> t
+
+

and then bind id to this parametric type. The intuition for the +parameter is that it can be instantiated with any other type, so this +parametric type stands, in fact, for infinitely many non-parametric +types. This is similar to what happens in formal logic proof systems, +where rule schemas stand for infinitely many concrete instances of +them. For this reason, parametric types are also called type schemas.

+

Now each time id is looked up within the let-body, we create a fresh +copy of the parameter t, which can this way be independently +constrained by each local context. Let's suppose that the three id +lookups yield the types t1 -> t1, t2 -> t2, and respectively t3 -> t3. +Then t1 will be constrained to be bool, and t2 and t3 to be int, +so we can now safely type the program above to int.

+

Therefore, a type schema comprises a summary of all the typing work +that has been done for typing the corresponding expression, and an +instantiation of its parameters with fresh copies represents an +elegant way to reuse all that typing work.

+

There are some subtleties regarding what fresh types can be made +parameters. Let us consider another example, discussed as part of +Lesson 7 on naive let-polymorphism:

+
lambda x . (
+  let y = lambda z . x
+  in if (y true) then (y 1) else (y (lambda x . x))
+)
+
+

This program should type to bool -> bool, as explained in Lesson 7. +The lambda construct will bind x to some fresh type tx. Then the +let-bound expression lambda z . x types to tz -> tx for some +additional fresh type tz. The question now is what should the +parameters of this type be when we generate the type schema? If we +naively parameterize in all fresh variables, that is in both tz and +tx obtaining the type schema (forall tz,tx) tz -> tx, then there will +be no way to infer that the type of x, tx, must be a bool! The +inferred type of this expression would then wrongly be tx -> t for +some fresh types tx and t. That's because the parameters are replaced +with fresh copies in each occurrence of y, and thus their relationship +to the original x is completely lost. This tells us that we cannot +parameterize in all fresh types that appear in the type of the +let-bound expression. In particular, we cannot parameterize in those +which some variables are already bound to in the current type +environment (like x is bound to tx in our example above). +In our example, the correct type schema is (forall tz) tz -> tx, +which now allows us to correctly infer that tx is bool.

+

Let us now discuss another example, which should fail to type:

+
lambda x .
+  let f = lambda y . x y
+  in if (f true) then (f 1) else (f 2)
+
+

This should fail to type because lambda y . x y is equivalent to x, +so the conditional imposes the conflicting constraints that x should be +a function whose argument is either a bool or an int. Let us try to +type it using our currently informal procedure. Like in the previous +example, x will be bound to a fresh type tx. Then the let-bound +expression types to ty -> tz with ty and tz fresh types, adding also +the constraint tx = ty -> tz. What should the parameters of this type +be? If we ignore the type constraint and simply make both ty and tz +parameters because no variable is bound to them in the type +environment (indeed, the only variable x in the type environment is +bound to tx), then we can wrongly type this program to tx -> tz +following a reasoning similar to the one in the example above. +In fact, in this example, none of ty and tz can be parameters, because +they are constrained by tx.

+

The examples above tell us two things: first, that we have to take the +type constraints into account when deciding the parameters of the +schema; second, that after applying the most-general-unifier solution +given by the type constraints everywhere, the remaining fresh types +appearing anywhere in the type environment are consequently constrained +and cannot be turned into parameters. Since the type environment can in +fact also hold type schemas, which already bind some types, we only need +to ensure that none of the fresh types appearing free anywhere in the +type environment are turned into parameters of type schemas.

+

Thanks to generic support offered by the K tool, we can easily achieve +all the above as follows.

+

First, add syntax for type schemas:

+
syntax TypeSchema ::= "(" "forall" Set ")" Type  [binder]
+
+

The definition below will be given in such a way that the Set argument +of a type schema will always be a set of fresh types. We also declare +this construct to be a binder, so that we can make use of the generic +free variable function provided by the K tool.

+

We now replace the old rule for let

+
rule let X = E in E' => E'[E/X]
+
+

with the following rule:

+
rule <k> let X = T:Type in E => E ~> tenv(TEnv) ...</k>
+     <mgu> Theta:Mgu </mgu>
+     <tenv> TEnv
+      => TEnv[(forall freeVariables(applyMgu(Theta, T)) -Set
+                      freeVariables(applyMgu(Theta, values TEnv))
+              ) applyMgu(Theta, T) / X]
+     </tenv>
+
+

So the type T of E is being parameterized and then bound to X in the +type environment. The current mgu Theta, which comprises all the type +constraints accumulated so far, is applied to both T and the types in +the type environment. The remaining fresh types in T which do not +appear free in the type environment are then turned into type parameters. +The function freeVariables returns, as expected, the free variables of +its argument as a Set; this is why we declared the type schema to be a +binder above.

+

Now a LAMBDA variable in the type environment can be bound to either a +type or a type schema. In the first case, the previous rule we had +for variable lookup can be reused, but we have to make sure we check +that T there is of sort Type (adding a sort membership, for example). +In the second case, as explained above, we have to create fresh copies +of the parameters. This can be easily achieved with another +predefined K function, as follows:

+
rule <k> X:Id => freshVariables(Tvs,T) ...</k>
+     <tenv>... X |-> (forall Tvs) T ...</tenv>
+
+

Indeed, freshVariables takes a set of variables and a term, and returns the +same term but with each of the given variables replaced by a fresh copy.

+

The operations freeVariables and freshVariables are useful in many K +definitions, so they are predefined in module substitution.k.

+

Our definition of this let-polymorphic type inferencer is now +complete. To test it, kompile it and then krun all the LAMBDA +programs discussed since Lesson 4. They should all work as expected.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/1_k/index.html b/k-distribution/pl-tutorial/1_k/index.html new file mode 100644 index 00000000000..a03937ae92d --- /dev/null +++ b/k-distribution/pl-tutorial/1_k/index.html @@ -0,0 +1,405 @@ + + + + + + + + + + + + + + +Learning K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Learning K

+

We start by introducing the basic features of K by means of a series +of very simple languages. The objective here is neither to learn those +languages nor to study their underlying paradigm, but simply to learn K.

+
    +
  • LAMBDA: Lambda calculus defined.
  • +
  • IMP: A simple imperative language.
  • +
  • LAMBDA++: LAMBDA extended with control flow.
  • +
  • IMP++: IMP extended with threads and IO.
  • +
  • TYPES: LAMBDA type system.
  • +
+
+
+ + +
+ + + +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/2_languages/1_simple/1_untyped/NOTES/index.html b/k-distribution/pl-tutorial/2_languages/1_simple/1_untyped/NOTES/index.html new file mode 100644 index 00000000000..7cab63974e7 --- /dev/null +++ b/k-distribution/pl-tutorial/2_languages/1_simple/1_untyped/NOTES/index.html @@ -0,0 +1,401 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

This is not well tested now, and it was not well tested in v3.6 either. +We should add some rules as transitions, too, and then use search on all +the examples.

+

Exercises not revised yet.

+

.Bag should be . throughout this definition #1772

+

There seems to be a problem with defining auxiliary constructs of sort +KItem when we want to use them as a particular sort in rule. We had to +declare them as construct for that sort instead. May want to explain +this a bit in the Latex discussion (related to #1803):

+
    +
  • syntax Exp ::= lookup(Int)
  • +
+
    +
  • syntax KItem ::= lookup(Int)
  • +
+

We currently add Vals to KResult, but we should have a better pattern for +List{Sort} and in general for any collections, where we make them hybrid +(they become KResults when their elements become KResults)

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/2_languages/1_simple/1_untyped/simple-untyped/index.html b/k-distribution/pl-tutorial/2_languages/1_simple/1_untyped/simple-untyped/index.html new file mode 100644 index 00000000000..0cab9eb4a99 --- /dev/null +++ b/k-distribution/pl-tutorial/2_languages/1_simple/1_untyped/simple-untyped/index.html @@ -0,0 +1,1709 @@ + + + + + + + + + + + + + + +SIMPLE — Untyped | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

SIMPLE — Untyped

+

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+

This is the K semantic definition of the untyped SIMPLE language. +SIMPLE is intended to be a pedagogical and research language that captures +the essence of the imperative programming paradigm, extended with several +features often encountered in imperative programming languages. +A program consists of a set of global variable declarations and +function definitions. Like in C, function definitions cannot be +nested and each program must have one function called main, +which is invoked when the program is executed. To make it more +interesting and to highlight some of K's strengths, SIMPLE includes +the following features in addition to the conventional imperative +expression and statement constructs:

+
    +
  • +

    Multidimensional arrays and array references. An array evaluates +to an array reference, which is a special value holding a location (where +the elements of the array start) together with the size of the array; +the elements of the array can be array references themselves (particularly +when the array is multi-dimensional). Array references are ordinary values, +so they can be assigned to variables and passed/received by functions.

    +
  • +
  • +

    Functions and function values. Functions can have zero or +more parameters and can return abruptly using a return statement. +SIMPLE follows a call-by-value parameter passing style, with static scoping. +Function names evaluate to function abstractions, which hereby become ordinary +values in the language, same like the array references.

    +
  • +
  • +

    Blocks with locals. SIMPLE variables can be declared +anywhere, their scope being from the place where they are declared +until the end of the most nested enclosing block.

    +
  • +
  • +

    Input/Output. The expression read() evaluates to the +next value in the input buffer, and the statement write(e) +evaluates e and outputs its value to the output buffer. The +input and output buffers are lists of values.

    +
  • +
  • +

    Exceptions. SIMPLE has parametric exceptions (the value thrown as +an exception can be caught and bound).

    +
  • +
  • +

    Concurrency via dynamic thread creation/termination and +synchronization. One can spawn a thread to execute any statement. +The spawned thread shares with its parent its environment at creation time. +Threads can be synchronized via a join command which blocks the current thread +until the joined thread completes, via re-entrant locks which can be acquired +and released, as well as through rendezvous commands.

    +
  • +
+

Like in many other languages, some of SIMPLE's constructs can be +desugared into a smaller set of basic constructs. We do that at the end +of the syntax module, and then we only give semantics to the core constructs.

+

Note: This definition is commented slightly more than others, because it is +intended to be one of the first non-trivial definitions that the new +user of K sees. We recommend the beginner user to first check the +language definitions discussed in the K tutorial.

+
k
module SIMPLE-UNTYPED-SYNTAX + imports DOMAINS-SYNTAX +
+

Syntax

+

We start by defining the SIMPLE syntax. The language constructs discussed +above have the expected syntax and evaluation strategies. Recall that in K +we annotate the syntax with appropriate strictness attributes, thus giving +each language construct the desired evaluation strategy.

+

Identifiers

+

Recall from the K tutorial that identifiers are builtin and come under the +syntactic category Id. The special identifier for the function +main belongs to all programs, and plays a special role in the semantics, +so we declare it explicitly. This would not be necessary if the identifiers +were all included automatically in semantic definitions, but that is not +possible because of parsing reasons (e.g., K variables used to match +concrete identifiers would then be ambiguously parsed as identifiers). They +are only included in the parser generated to parse programs (and used by the +kast tool). Consequently, we have to explicitly declare all the +concrete identifiers that play a special role in the semantics, like +main below.

+
k
syntax Id ::= "main" [token] +
+

Declarations

+

There are two types of declarations: for variables (including arrays) and +for functions. We are going to allow declarations of the form +var x=10, a[10,10], y=23;, which is why we allow the var +keyword to take a list of expressions. The non-terminals used in the two +productions below are defined shortly.

+
k
syntax Stmt ::= "var" Exps ";" + | "function" Id "(" Ids ")" Block +
+

Expressions

+

The expression constructs below are standard. Increment (++) takes +an expression rather than a variable because it can also increment an array +element. Recall that the syntax we define in K is what we call the syntax +of the semantics: while powerful enough to define non-trivial syntaxes +(thanks to the underlying SDF technology that we use), we typically refrain +from defining precise syntaxes, that is, ones which accept precisely the +well-formed programs (that would not be possible anyway in general). That job +is deferred to type systems, which can also be defined in K. In other words, +we are not making any effort to guarantee syntactically that only variables +or array elements are passed to the increment construct, we allow any +expression. Nevertheless, we will only give semantics to those, so expressions +of the form ++5, which parse (but which will be rejected by our type +system in the typed version of SIMPLE later), will get stuck when executed. +Arrays can be multidimensional and can hold other arrays, so their +lookup operation takes a list of expressions as argument and applies to an +expression (which can in particular be another array lookup), respectively. +The construct sizeOf gives the size of an array in number of elements +of its first dimension. Note that almost all constructs are strict. The only +constructs which are not strict are the increment (since its first argument +gets updated, so it cannot be evaluated), the input read which takes no +arguments so strictness is irrelevant for it, the logical and and or constructs +which are short-circuited, the thread spawning construct which creates a new +thread executing the argument expression and return its unique identifier to +the creating thread (so it cannot just evaluate its argument in place), and the +assignment which is only strict in its second argument (for the same reason as +the increment).

+
k
syntax Exp ::= Int | Bool | String | Id + | "(" Exp ")" [bracket] + | "++" Exp + > Exp "[" Exps "]" [strict] + > Exp "(" Exps ")" [strict] + | "-" Exp [strict] + | "sizeOf" "(" Exp ")" [strict] + | "read" "(" ")" + > left: + Exp "*" Exp [strict, left] + | Exp "/" Exp [strict, left] + | Exp "%" Exp [strict, left] + > left: + Exp "+" Exp [strict, left] + | Exp "-" Exp [strict, left] + > non-assoc: + Exp "<" Exp [strict, non-assoc] + | Exp "<=" Exp [strict, non-assoc] + | Exp ">" Exp [strict, non-assoc] + | Exp ">=" Exp [strict, non-assoc] + | Exp "==" Exp [strict, non-assoc] + | Exp "!=" Exp [strict, non-assoc] + > "!" Exp [strict] + > left: + Exp "&&" Exp [strict(1), left] + | Exp "||" Exp [strict(1), left] + > "spawn" Block + > Exp "=" Exp [strict(2), right] +
+

We also need comma-separated lists of identifiers and of expressions. +Moreover, we want them to be strict, that is, to evaluate to lists of results +whenever requested (e.g., when they appear as strict arguments of +the constructs above).

+
k
syntax Ids ::= List{Id,","} [overload(Exps)] + syntax Exps ::= List{Exp,","} [overload(Exps), strict] // automatically hybrid now + syntax Exps ::= Ids + syntax Val + syntax Vals ::= List{Val,","} [overload(Exps)] + syntax Bottom + syntax Bottoms ::= List{Bottom,","} [overload(Exps)] + syntax Ids ::= Bottoms +
+

Statements

+

Most of the statement constructs are standard for imperative languages. +We syntactically distinguish between empty and non-empty blocks, because we +chose Stmts not to be a (;-separated) list of +Stmt. Variables can be declared anywhere inside a block, their scope +ending with the block. Expressions are allowed to be used for their side +effects only (followed by a semicolon ;). Functions are allowed +to abruptly return. The exceptions are parametric, i.e., one can throw a value +which is bound to the variable declared by catch. Threads can be +dynamically created and terminated, and can synchronize with join, +acquire, release and rendezvous. Note that the +strictness attributes obey the intended evaluation strategy of the various +constructs. In particular, the if-then-else construct is strict only in its +first argument (the if-then construct will be desugared into if-then-else), +while the loop constructs are not strict in any arguments. The print +statement construct is variadic, that is, it takes an arbitrary number of +arguments.

+
k
syntax Block ::= "{" "}" + | "{" Stmt "}" + + syntax Stmt ::= Block + | Exp ";" [strict] + | "if" "(" Exp ")" Block "else" Block [avoid, strict(1)] + | "if" "(" Exp ")" Block [macro] + | "while" "(" Exp ")" Block + | "for" "(" Stmt Exp ";" Exp ")" Block [macro] + | "return" Exp ";" [strict] + | "return" ";" [macro] + | "print" "(" Exps ")" ";" [strict] +// NOTE: print strict allows non-deterministic evaluation of its arguments +// Either keep like this but document, or otherwise make Exps seqstrict. +// Of define and use a different expression list here, which is seqstrict. + | "try" Block "catch" "(" Id ")" Block + | "throw" Exp ";" [strict] + | "join" Exp ";" [strict] + | "acquire" Exp ";" [strict] + | "release" Exp ";" [strict] + | "rendezvous" Exp ";" [strict] +
+

The reason we allow Stmts as the first argument of for +instead of Stmt is because we want to allow more than one statement +to be executed when the loop is initialized. Also, as seens shorly, macros +may expand one statement into more statements; for example, an initialized +variable declaration statement var x=0; desugars into two statements, +namely var x; x=0;, so if we use Stmt instead of Stmts +in the production of for above then we risk that the macro expansion +of statement var x=0; happens before the macro expansion of for, +also shown below, in which case the latter would not apply anymore because +of syntactic mismatch.

+
k
syntax Stmt ::= Stmt Stmt [right] + +// I wish I were able to write the following instead, but confuses the parser. +// +// syntax Stmts ::= List{Stmt,""} +// syntax Top ::= Stmt | "function" Id "(" Ids ")" Block +// syntax Pgm ::= List{Top,""} +// +// With that, I could have also eliminated the empty block +
+

Desugared Syntax

+

This part desugars some of SIMPLE's language constructs into core ones. +We only want to give semantics to core constructs, so we get rid of the +derived ones before we start the semantics. All desugaring macros below are +straightforward.

+
k
rule if (E) S => if (E) S else {} + rule for(Start Cond; Step) {S} => {Start while (Cond) {S Step;}} + rule for(Start Cond; Step) {} => {Start while (Cond) {Step;}} + rule var E1:Exp, E2:Exp, Es:Exps; => var E1; var E2, Es; + rule var X:Id = E; => var X; X = E; +
+

For the semantics, we can therefore assume from now on that each +conditional has both branches, that there are only while loops, and +that each variable is declared alone and without any initialization as part of +the declaration.

+
k
endmodule + + +module SIMPLE-UNTYPED + imports SIMPLE-UNTYPED-SYNTAX + imports DOMAINS +
+

Basic Semantic Infrastructure

+

Before one starts adding semantic rules to a K definition, one needs to +define the basic semantic infrastructure consisting of definitions for +values and configuration. As discussed in the definitions +in the K tutorial, the values are needed to know when to stop applying +the heating rules and when to start applying the cooling rules corresponding +to strictness or context declarations. The configuration serves as a backbone +for the process of configuration abstraction which allows users to only +mention the relevant cells in each semantic rule, the rest of the configuration +context being inferred automatically. Although in some cases the configuration +could be automatically inferred from the rules, we believe that it is very +useful for language designers/semanticists to actually think of and design +their configuration explicitly, so the current implementation of K requires +one to define it.

+

Values

+

We here define the values of the language that the various fragments of +programs evaluate to. First, integers and Booleans are values. As discussed, +arrays evaluate to special array reference values holding (1) a location from +where the array's elements are contiguously allocated in the store, and +(2) the size of the array. Functions evaluate to function values as +λ-abstractions (we do not need to evaluate functions to closures +because each function is executed in the fixed global environment and +function definitions cannot be nested). Like in IMP and other +languages, we finally tell the tool that values are K results.

+
k
syntax Val ::= Int | Bool | String + | array(Int,Int) + | lambda(Ids,Stmt) + syntax Exp ::= Val + syntax Exps ::= Vals + syntax Vals ::= Bottoms + syntax KResult ::= Val + | Vals // TODO: should not need this +
+

The inclusion of values in expressions follows the methodology of +syntactic definitions (like, e.g., in SOS): extend the syntax of the language +to encompass all values and additional constructs needed to give semantics. +In addition to that, it allows us to write the semantic rules using the +original syntax of the language, and to parse them with the same (now extended +with additional values) parser. If writing the semantics directly on the K +AST, using the associated labels instead of the syntactic constructs, then one +would not need to include values in expressions.

+

Configuration

+

The K configuration of SIMPLE consists of a top level cell, T, +holding a threads cell, a global environment map cell genv +mapping the global variables and function names to their locations, a shared +store map cell store mapping each location to some value, a set cell +busy holding the locks which have been acquired but not yet released +by threads, a set cell terminated holding the unique identifiers of +the threads which already terminated (needed for join), input +and output list cells, and a nextLoc cell holding a natural +number indicating the next available location. Unlike in the small languages +in the K tutorial, where we used the fresh predicate to generate fresh +locations, in larger languages, like SIMPLE, we prefer to explicitly manage +memory. The location counter in nextLoc models an actual physical +location in the store; for simplicity, we assume arbitrarily large memory and +no garbage collection. The threads cell contains one thread +cell for each existing thread in the program. Note that the thread cell has +multiplicity *, which means that at any given moment there could be zero, +one or more thread cells. Each thread cell contains a +computation cell k, a control cell holding the various +control structures needed to jump to certain points of interest in the program +execution, a local environment map cell env mapping the thread local +variables to locations in the store, and finally a holds map cell +indicating what locks have been acquired by the thread and not released so far +and how many times (SIMPLE's locks are re-entrant). The control cell +currently contains only two subcells, a function stack fstack which +is a list and an exception stack xstack which is also a list. +One can add more control structures in the control cell, such as a +stack for break/continue of loops, etc., if the language is extended with more +control-changing constructs. Note that all cells except for k are +also initialized, in that they contain a ground term of their corresponding +sort. The k cell is initialized with the program that will be passed +to the K tool, as indicated by the $PGM variable, followed by the +execute task (defined shortly).

+
k
// the syntax declarations below are required because the sorts are + // referenced directly by a production and, because of the way KIL to KORE + // is implemented, the configuration syntax is not available yet + // should simply work once KIL is removed completely + // check other definitions for this hack as well + + syntax ControlCell + syntax ControlCellFragment + + configuration <T color="red"> + <threads color="orange"> + <thread multiplicity="*" type="Map" color="yellow"> + <id color="pink"> -1 </id> + <k color="green"> $PGM:Stmt ~> execute </k> + //<br/> // TODO(KORE): support latex annotations #1799 + <control color="cyan"> + <fstack color="blue"> .List </fstack> + <xstack color="purple"> .List </xstack> + </control> + //<br/> // TODO(KORE): support latex annotations #1799 + <env color="violet"> .Map </env> + <holds color="black"> .Map </holds> + </thread> + </threads> + //<br/> // TODO(KORE): support latex annotations #1799 + <genv color="pink"> .Map </genv> + <store color="white"> .Map </store> + <busy color="cyan"> .Set </busy> + <terminated color="red"> .Set </terminated> + //<br/> // TODO(KORE): support latex annotations #1799 + <input color="magenta" stream="stdin"> .List </input> + <output color="brown" stream="stdout"> .List </output> + <nextLoc color="gray"> 0 </nextLoc> + </T> +
+

Declarations and Initialization

+

We start by defining the semantics of declarations (for variables, +arrays and functions).

+

Variable Declaration

+

The SIMPLE syntax was desugared above so that each variable is +declared alone and its initialization is done as a separate statement. +The semantic rule below matches resulting variable declarations of the +form var X; on top of the k cell +(indeed, note that the k cell is complete, or round, to the +left, and is torn, or ruptured, to the right), allocates a fresh +location L in the store which is initialized with a special value + (indeed, the unit ., or nothing, is matched anywhere +in the map ‒note the tears at both sides‒ and replaced with the +mapping L ↦ ⊥), and binds X to L in the local +environment shadowing previous declarations of X, if any. +This possible shadowing of X requires us to therefore update the +entire environment map, which is expensive and can significantly slow +down the execution of larger programs. On the other hand, since we know +that L is not already bound in the store, we simply add the binding +L ↦ ⊥ to the store, thus avoiding a potentially complete +traversal of the the store map in order to update it. We prefer the approach +used for updating the store whenever possible, because, in addition to being +faster, it offers more true concurrency than the latter; indeed, according +to the concurrent semantics of K, the store is not frozen while +L ↦ ⊥ is added to it, while the environment is frozen during the +update operation Env[L/X]. The variable declaration command is +also removed from the top of the computation cell and the fresh location +counter is incremented. The undefined symbol added in the store +is of sort KItem, instead of Val, on purpose; this way, the +store lookup rules will get stuck when one attempts to lookup an +uninitialized location. All the above happen in one transactional step, +with the rule below. Note also how configuration abstraction allows us to +only mention the needed cells; indeed, as the configuration above states, +the k and env cells are actually located within a +thread cell within the threads cell, but one needs +not mention these: the configuration context of the rule is +automatically transformed to match the declared configuration +structure.

+
k
syntax KItem ::= "undefined" + + rule <k> var X:Id; => .K ...</k> + <env> Env => Env[X <- L] </env> + <store>... .Map => L |-> undefined ...</store> + <nextLoc> L => L +Int 1 </nextLoc> +
+

Array Declaration

+

The K semantics of the uni-dimensional array declaration is somehow similar +to the above declaration of ordinary variables. First, note the +context declaration below, which requests the evaluation of the array +dimension. Once evaluated, say to a natural number N, then +N +Int 1 locations are allocated in the store for +an array of size N, the additional location (chosen to be the first +one allocated) holding the array reference value. The array reference +value array(L,N) states that the array has size N and its +elements are located contiguously in the store starting with location +L. The operation L … L' ↦ V, defined at the end of this +file in the auxiliary operation section, initializes each location in +the list L … L' to V. Note that, since the dimensions of +array declarations can be arbitrary expressions, this virtually means +that we can dynamically allocate memory in SIMPLE by means of array +declarations.

+
k
context var _:Id[HOLE]; + + rule <k> var X:Id[N:Int]; => .K ...</k> + <env> Env => Env[X <- L] </env> + <store>... .Map => L |-> array(L +Int 1, N) + (L +Int 1) ... (L +Int N) |-> undefined ...</store> + <nextLoc> L => L +Int 1 +Int N </nextLoc> + requires N >=Int 0 +
+

SIMPLE allows multi-dimensional arrays. For semantic simplicity, we +desugar them all into uni-dimensional arrays by code transformation. +This way, we only need to give semantics to uni-dimensional arrays. +First, note that the context rule above actually evaluates all the array +dimensions (that's why we defined the expression lists strict!): +Upon evaluating the array dimensions, the code generation rule below +desugars multi-dimensional array declaration to uni-dimensional declarations. +To this aim, we introduce two special unique variable identifiers, +$1 and $2. The first variable, $1, iterates +through and initializes each element of the first dimension with an array +of the remaining dimensions, declared as variable $2:

+
k
syntax Id ::= "$1" [token] | "$2" [token] + rule var X:Id[N1:Int, N2:Int, Vs:Vals]; + => var X[N1]; + { + for(var $1 = 0; $1 <= N1 - 1; ++$1) { + var $2[N2, Vs]; + X[$1] = $2; + } + } +
+

Ideally, one would like to perform syntactic desugarings like the one +above before the actual semantics. Unfortunately, that was not possible in +this case because the dimension expressions of the multi-dimensional array need +to be evaluated first. Indeed, the desugaring rule above does not work if the +dimensions of the declared array are arbitrary expressions, because they can +have side effects (e.g., a[++x,++x]) and those side effects would be +propagated each time the expression is evaluated in the desugaring code (note +that both the loop condition and the nested multi-dimensional declaration +would need to evaluate the expressions given as array dimensions).

+

Function declaration

+

Functions are evaluated to λ-abstractions and stored like any other +values in the store. A binding is added into the environment for the function +name to the location holding its body. Similarly to the C language, SIMPLE +only allows function declarations at the top level of the program. More +precisely, the subsequent semantics of SIMPLE only works well when one +respects this requirement. Indeed, the simplistic context-free parser +generated by the grammar above is more generous than we may want, in that it +allows function declarations anywhere any declaration is allowed, including +inside arbitrary blocks. However, as the rule below shows, we are not +storing the declaration environment with the λ-abstraction value as +closures do. Instead, as seen shortly, we switch to the global environment +whenever functions are invoked, which is consistent with our requirement that +functions should only be declared at the top. Thus, if one declares local +functions, then one may see unexpected behaviors (e.g., when one shadows a +global variable before declaring a local function). The type checker of +SIMPLE, also defined in K (see examples/simple/typed/static), +discards programs which do not respect this requirement.

+
k
rule <k> function F(Xs) S => .K ...</k> + <env> Env => Env[F <- L] </env> + <store>... .Map => L |-> lambda(Xs, S) ...</store> + <nextLoc> L => L +Int 1 </nextLoc> +
+

When we are done with the first pass (pre-processing), the computation +cell k contains only the token execute (see the configuration +declaration above, where the computation item execute was placed +right after the program in the k cell of the initial configuration) +and the cell genv is empty. In this case, we have to call +main() and to initialize the global environment by transferring the +contents of the local environment into it. We prefer to do it this way, as +opposed to processing all the top level declarations directly within the global +environment, because we want to avoid duplication of semantics: the syntax of +the global declarations is identical to that of their corresponding local +declarations, so the semantics of the latter suffices provided that we copy +the local environment into the global one once we are done with the +pre-processing. We want this separate pre-processing step precisely because +we want to create the global environment. All (top-level) functions end up +having their names bound in the global environment and, as seen below, they +are executed in that same global environment; all these mean, in particular, +that the functions "see" each other, allowing for mutual recursion, etc.

+
k
syntax KItem ::= "execute" + rule <k> execute => main(.Exps); </k> + <env> Env </env> + <genv> .Map => Env </genv> +
+

Expressions

+

We next define the K semantics of all the expression constructs.

+

Variable lookup

+

When a variable X is the first computational task, and X is bound to some +location L in the environment, and L is mapped to some value V in the +store, then we rewrite X into V:

+
k
rule <k> X:Id => V ...</k> + <env>... X |-> L ...</env> + <store>... L |-> V:Val ...</store> +
+

Note that the rule above excludes reading , because is not +a value and V is checked at runtime to be a value.

+

Variable/Array increment

+

This is tricky, because we want to allow both ++x and ++a[5]. +Therefore, we need to extract the lvalue of the expression to increment. +To do that, we state that the expression to increment should be wrapped +by the auxiliary lvalue operation and then evaluated. The semantics +of this auxiliary operation is defined at the end of this file. For now, all +we need to know is that it takes an expression and evaluates to a location +value. Location values, also defined at the end of the file, are integers +wrapped with the operation loc, to distinguish them from ordinary +integers.

+
k
context ++(HOLE => lvalue(HOLE)) + rule <k> ++loc(L) => I +Int 1 ...</k> + <store>... L |-> (I => I +Int 1) ...</store> +
+

Arithmetic operators

+

There is nothing special about the following rules. They rewrite the +language constructs to their library counterparts when their arguments +become values of expected sorts:

+
k
rule I1 + I2 => I1 +Int I2 + rule Str1 + Str2 => Str1 +String Str2 + rule I1 - I2 => I1 -Int I2 + rule I1 * I2 => I1 *Int I2 + rule I1 / I2 => I1 /Int I2 requires I2 =/=K 0 + rule I1 % I2 => I1 %Int I2 requires I2 =/=K 0 + rule - I => 0 -Int I + rule I1 < I2 => I1 <Int I2 + rule I1 <= I2 => I1 <=Int I2 + rule I1 > I2 => I1 >Int I2 + rule I1 >= I2 => I1 >=Int I2 +
+

The equality and inequality constructs reduce to syntactic comparison +of the two argument values (which is what the equality on K terms does).

+
k
rule V1:Val == V2:Val => V1 ==K V2 + rule V1:Val != V2:Val => V1 =/=K V2 +
+

The logical negation is clear, but the logical conjunction and disjunction +are short-circuited:

+
k
rule ! T => notBool(T) + rule true && E => E + rule false && _ => false + rule true || _ => true + rule false || E => E +
+

Array lookup

+

Untyped SIMPLE does not check array bounds (the dynamically typed version of +it, in examples/simple/typed/dynamic, does check for array out of +bounds). The first rule below desugars the multi-dimensional array access to +uni-dimensional array access; recall that the array access operation was +declared strict, so all sub-expressions involved are already values at this +stage. The second rule rewrites the array access to a lookup operation at a +precise location; we prefer to do it this way to avoid locking the store. +The semantics of the auxiliary lookup operation is straightforward, +and is defined at the end of the file.

+
k
// The [anywhere] feature is underused, because it would only be used +// at the top of the computation or inside the lvalue wrapper. So it +// may not be worth, or we may need to come up with a special notation +// allowing us to enumerate contexts for [anywhere] rules. + rule V:Val[N1:Int, N2:Int, Vs:Vals] => V[N1][N2, Vs] + [anywhere] + + rule array(L,_)[N:Int] => lookup(L +Int N) + [anywhere] +
+

Size of an array

+

The size of the array is stored in the array reference value, and the +sizeOf construct was declared strict, so:

+
k
rule sizeOf(array(_,N)) => N +
+

Function call

+

Function application was strict in both its arguments, so we can +assume that both the function and its arguments are evaluated to +values (the former expected to be a λ-abstraction). The first +rule below matches a well-formed function application on top of the +computation and performs the following steps atomically: it switches +to the function body followed by return; (for the case in +which the function does not use an explicit return statement); it +pushes the remaining computation, the current environment, and the +current control data onto the function stack (the remaining +computation can thus also be discarded from the computation cell, +because an unavoidable subsequent return statement ‒see +above‒ will always recover it from the stack); it switches the +current environment (which is being pushed on the function stack) to +the global environment, which is where the free variables in the +function body should be looked up; it binds the formal parameters to +fresh locations in the new environment, and stores the actual +arguments to those locations in the store (this latter step is easily +done by reducing the problem to variable declarations, whose semantics +we have already defined; the auxiliary operation mkDecls is +defined at the end of the file). The second rule pops the +computation, the environment and the control data from the function +stack when a return statement is encountered as the next +computational task, passing the returned value to the popped +computation (the popped computation was the context in which the +returning function was called). Note that the pushing/popping of the +control data is crucial. Without it, one may have a function that +contains an exception block with a return statement inside, which +would put the xstack cell in an inconsistent state (since the +exception block modifies it, but that modification should be +irrelevant once the function returns). We add an artificial +nothing value to the language, which is returned by the +nulary return; statements.

+
k
syntax KItem ::= (Map,K,ControlCellFragment) + + rule <k> lambda(Xs,S)(Vs:Vals) ~> K => mkDecls(Xs,Vs) S return; </k> + <control> + <fstack> .List => ListItem((Env,K,C)) ...</fstack> + C + </control> + <env> Env => GEnv </env> + <genv> GEnv </genv> + + rule <k> return(V:Val); ~> _ => V ~> K </k> + <control> + <fstack> ListItem((Env,K,C)) => .List ...</fstack> + (_ => C) + </control> + <env> _ => Env </env> + + syntax Val ::= "nothing" + rule return; => return nothing; +
+

Like for division-by-zero, it is left unspecified what happens +when the nothing value is used in domain calculations. For +example, from the the perspective of the language semantics, +7 +Int nothing can evaluate to anything, or +may not evaluate at all (be undefined). If one wants to make sure that +such artificial values are never misused, then one needs to define a static +checker (also using K, like our the type checker in +examples/simple/typed/static) and reject programs that do. +Note that, unlike the undefined symbol which had the sort K +instead of Val, we defined nothing to be a value. That +is because, as explained above, we do not want the program to get +stuck when nothing is returned by a function. Instead, we want the +behavior to be unspecified; in particular, if one is careful to never +use the returned value in domain computation, like it happens when we +call a function for its side effects (e.g., with a statement of the +form f(x);), then the program does not get stuck.

+

Read

+

The read() expression construct simply evaluates to the next +input value, at the same time discarding the input value from the +in cell.

+
k
rule <k> read() => I ...</k> <input> ListItem(I:Int) => .List ...</input> +
+

Assignment

+

In SIMPLE, like in C, assignments are expression constructs and not statement +constructs. To make it a statement all one needs to do is to follow it by a +semi-colon ; (see the semantics for expression statements below). +Like for the increment, we want to allow assignments not only to variables but +also to array elements, e.g., e1[e2] = e3 where e1 evaluates +to an array reference, e2 to a natural number, and e3 to any +value. Thus, we first compute the lvalue of the left-hand-side expression +that appears in an assignment, and then we do the actual assignment to the +resulting location:

+
k
context (HOLE => lvalue(HOLE)) = _ + + rule <k> loc(L) = V:Val => V ...</k> <store>... L |-> (_ => V) ...</store> +
+

Statements

+

We next define the K semantics of statements.

+

Blocks

+

Empty blocks are simply discarded, as shown in the first rule below. +For non-empty blocks, we schedule the enclosed statement but we have to +make sure the environment is recovered after the enclosed statement executes. +Recall that we allow local variable declarations, whose scope is the block +enclosing them. That is the reason for which we have to recover the +environment after the block. This allows us to have a very simple semantics +for variable declarations, as we did above. One can make the two rules below +computational if one wants them to count as computational steps.

+
k
rule {} => .K + rule <k> { S } => S ~> setEnv(Env) ...</k> <env> Env </env> +
+

The basic definition of environment recovery is straightforward and +given in the section on auxiliary constructs at the end of the file.

+

There are two common alternatives to the above semantics of blocks. +One is to keep track of the variables which are declared in the block and only +recover those at the end of the block. This way one does more work for +variable declarations but conceptually less work for environment recovery; we +say conceptually because it is not clear that it is indeed the case that +one does less work when AC matching is involved. The other alternative is to +work with a stack of environments instead of a flat environment, and push the +current environment when entering a block and pop it when exiting it. This +way, one does more work when accessing variables (since one has to search the +variable in the environment stack in a top-down manner), but on the other hand +uses smaller environments and the definition gets closer to an implementation. +Based on experience with dozens of language semantics and other K definitions, +we have found that our approach above is the best trade-off between elegance +and efficiency (especially since rewrite engines have built-in techniques to +lazily copy terms, by need, thus not creating unnecessary copies), +so it is the one that we follow in general.

+

Sequential composition

+

Sequential composition is desugared into K's builtin sequentialization +operation (recall that, like in C, the semi-colon ; is not a +statement separator in SIMPLE — it is either a statement terminator or a +construct for a statement from an expression). Note that K allows +to define the semantics of SIMPLE in such a way that statements eventually +dissolve from the top of the computation when they are completed; this is in +sharp contrast to (artificially) evaluating them to a special +skip statement value and then getting rid of that special value, as +it is the case in other semantic approaches (where everything must evaluate +to something). This means that once S₁ completes in the rule below, S₂ +becomes automatically the next computation item without any additional +(explicit or implicit) rules.

+
k
rule S1:Stmt S2:Stmt => S1 ~> S2 +
+

A subtle aspect of the rule above is that S₁ is declared to have sort +Stmts and not Stmt. That is because desugaring macros can indeed +produce left associative sequential composition of statements. For example, +the code var x=0; x=1; is desugared to +(var x; x=0;) x=1;, so although originally the first term of +the sequential composition had sort Stmt, after desugaring it became +of sort Stmts. Note that the attribute [right] associated +to the sequential compositon production is an attribute of the syntax, and not +of the semantics: e.g., it tells the parser to parse +var x; x=0; x=1; as var x; (x=0; x=1;), but it +does not tell the rewrite engine to rewrite (var x; x=0;) x=1; to +var x; (x=0; x=1;).

+

Expression statements

+

Expression statements are only used for their side effects, so their result +value is simply discarded. Common examples of expression statements are ones +of the form ++x;, x=e;, e1[e2]=e3;, etc.

+
k
rule _:Val; => .K +
+

Conditional

+

Since the conditional was declared with the strict(1) attribute, we +can assume that its first argument will eventually be evaluated. The rules +below cover the only two possibilities in which the conditional is allowed to +proceed (otherwise the rewriting process gets stuck).

+
k
rule if ( true) S else _ => S + rule if (false) _ else S => S +
+

While loop

+

The simplest way to give the semantics of the while loop is by unrolling. +Note, however, that its unrolling is only allowed when the while loop reaches +the top of the computation (to avoid non-termination of unrolling). The +simple while loop semantics below works because our while loops in SIMPLE are +indeed very basic. If we allowed break/continue of loops then we would need +a completely different semantics, which would also involve the control cell.

+
k
rule while (E) S => if (E) {S while(E)S} +
+

Print

+

The print statement was strict, so all its arguments are now +evaluated (recall that print is variadic). We append each of +its evaluated arguments to the output buffer, and discard the residual +print statement with an empty list of arguments.

+
k
rule <k> print(V:Val, Es => Es); ...</k> <output>... .List => ListItem(V) </output> + rule print(.Vals); => .K +
+

Exceptions

+

SIMPLE allows parametric exceptions, in that one can throw and catch a +particular value. The statement try S₁ catch(X) S₂ +proceeds with the evaluation of S₁. If S₁ evaluates normally, i.e., +without any exception thrown, then S₂ is discarded and the execution +continues normally. If S₁ throws an exception with a statement of the +form throw E, then E is first evaluated to some value V +(throw was declared to be strict), then V is bound to X, then +S₂ is evaluated in the new environment while the reminder of S₁ is +discarded, then the environment is recovered and the execution continues +normally with the statement following the try S₁ catch(X) S₂ statement. +Exceptions can be nested and the statements in the +catch part (S₂ in our case) can throw exceptions to the +upper level. One should be careful with how one handles the control data +structures here, so that the abrupt changes of control due to exception +throwing and to function returns interact correctly with each other. +For example, we want to allow function calls inside the statement S₁ in +a try S₁ catch(X) S₂ block which can throw an exception +that is not caught by the function but instead is propagated to the +try S₁ catch(X) S₂ block that called the function. +Therefore, we have to make sure that the function stack as well as other +potential control structures are also properly modified when the exception +is thrown to correctly recover the execution context. This can be easily +achieved by pushing/popping the entire current control context onto the +exception stack. The three rules below modularly do precisely the above.

+
k
syntax KItem ::= (Id,Stmt,K,Map,ControlCellFragment) + + syntax KItem ::= "popx" + + rule <k> (try S1 catch(X) {S2} => S1 ~> popx) ~> K </k> + <control> + <xstack> .List => ListItem((X, S2, K, Env, C)) ...</xstack> + C + </control> + <env> Env </env> + + rule <k> popx => .K ...</k> + <xstack> ListItem(_) => .List ...</xstack> + + rule <k> throw V:Val; ~> _ => { var X = V; S2 } ~> K </k> + <control> + <xstack> ListItem((X, S2, K, Env, C)) => .List ...</xstack> + (_ => C) + </control> + <env> _ => Env </env> +
+

The catch statement S₂ needs to be executed in the original environment, +but where the thrown value V is bound to the catch variable X. We here +chose to rely on two previously defined constructs when giving semantics to +the catch part of the statement: (1) the variable declaration with +initialization, for binding X to V; and (2) the block construct for +preventing X from shadowing variables in the original environment upon the +completion of S₂.

+

Threads

+

SIMPLE's threads can be created and terminated dynamically, and can +synchronize by acquiring and releasing re-entrant locks and by rendezvous. +We discuss the seven rules giving the semantics of these operations below.

+

Thread creation

+

Threads can be created by any other threads using the spawn S +construct. The spawn expression construct evaluates to the unique identifier +of the newly created thread and, at the same time, a new thread cell is added +into the configuration, initialized with the S statement and sharing the +same environment with the parent thread. Note that the newly created +thread cell is torn. That means that the remaining cells are added +and initialized automatically as described in the definition of SIMPLE's +configuration. This is part of K's configuration abstraction mechanism.

+
k
rule <thread>... + <k> spawn S => !T:Int ...</k> + <env> Env </env> + ...</thread> + (.Bag => <thread>... + <k> S </k> + <env> Env </env> + <id> !T </id> + ...</thread>) +
+

Thread termination

+

Dually to the above, when a thread terminates its assigned computation (the +contents of its k cell) is empty, so the thread can be dissolved. +However, since no discipline is imposed on how locks are acquired and released, +it can be the case that a terminating thread still holds locks. Those locks +must be released, so other threads attempting to acquire them do not deadlock. +We achieve that by removing all the locks held by the terminating thread in its +holds cell from the set of busy locks in the busy cell +(keys(H) returns the domain of the map H as a set, that is, only +the locks themselves ignoring their multiplicity). As seen below, a lock is +added to the busy cell as soon as it is acquired for the first time +by a thread. The unique identifier of the terminated thread is also collected +into the terminated cell, so the join construct knows which +threads have terminated.

+
k
rule (<thread>... <k>.K</k> <holds>H</holds> <id>T</id> ...</thread> => .Bag) + <busy> Busy => Busy -Set keys(H) </busy> + <terminated>... .Set => SetItem(T) ...</terminated> +
+

Thread joining

+

Thread joining is now straightforward: all we need to do is to check whether +the identifier of the thread to be joined is in the terminated cell. +If yes, then the join statement dissolves and the joining thread +continues normally; if not, then the joining thread gets stuck.

+
k
rule <k> join T:Int; => .K ...</k> + <terminated>... SetItem(T) ...</terminated> +
+

Acquire lock

+

There are two cases to distinguish when a thread attempts to acquire a lock +(in SIMPLE any value can be used as a lock):
+(1) The thread does not currently have the lock, in which case it has to +take it provided that the lock is not already taken by another thread (see +the side condition of the first rule).
+(2) The thread already has the lock, in which case it just increments its +counter for the lock (the locks are re-entrant). These two cases are captured +by the two rules below:

+
k
rule <k> acquire V:Val; => .K ...</k> + <holds>... .Map => V |-> 0 ...</holds> + <busy> Busy (.Set => SetItem(V)) </busy> + requires (notBool(V in Busy)) + + rule <k> acquire V; => .K ...</k> + <holds>... V:Val |-> (N => N +Int 1) ...</holds> +
+

Release lock

+

Similarly, there are two corresponding cases to distinguish when a thread +releases a lock:
+(1) The thread holds the lock more than once, in which case all it needs to do +is to decrement the lock counter.
+(2) The thread holds the lock only once, in which case it needs to remove it +from its holds cell and also from the the shared busy cell, +so other threads can acquire it if they need to.

+
k
rule <k> release V:Val; => .K ...</k> + <holds>... V |-> (N => N -Int 1) ...</holds> + requires N >Int 0 + + rule <k> release V; => .K ...</k> <holds>... V:Val |-> 0 => .Map ...</holds> + <busy>... SetItem(V) => .Set ...</busy> +
+

Rendezvous synchronization

+

In addition to synchronization through acquire and release of locks, SIMPLE +also provides a construct for rendezvous synchronization. A thread whose next +statement to execute is rendezvous(V) gets stuck until another +thread reaches an identical statement; when that happens, the two threads +drop their rendezvous statements and continue their executions. If three +threads happen to have an identical rendezvous statement as their next +statement, then precisely two of them will synchronize and the other will +remain blocked until another thread reaches a similar rendezvous statement. +The rule below is as simple as it can be. Note, however, that, again, it is +K's mechanism for configuration abstraction that makes it work as desired: +since the only cell which can multiply containing a k cell inside is +the thread cell, the only way to concretize the rule below to the +actual configuration of SIMPLE is to include each k cell in a +thread cell.

+
k
rule <k> rendezvous V:Val; => .K ...</k> + <k> rendezvous V; => .K ...</k> +
+

Auxiliary declarations and operations

+

In this section we define all the auxiliary constructs used in the +above semantics.

+

Making declarations

+

The mkDecls auxiliary construct turns a list of identifiers +and a list of values in a sequence of corresponding variable +declarations.

+
k
syntax Stmt ::= mkDecls(Ids,Vals) [function] + rule mkDecls((X:Id, Xs:Ids), (V:Val, Vs:Vals)) => var X=V; mkDecls(Xs,Vs) + rule mkDecls(.Ids,.Vals) => {} +
+

Location lookup

+

The operation below is straightforward.

+
k
syntax Exp ::= lookup(Int) + rule <k> lookup(L) => V ...</k> <store>... L |-> V:Val ...</store> +
+

Environment recovery

+

We have already discussed the environment recovery auxiliary operation in the +IMP++ tutorial:

+
k
// TODO: eliminate the env wrapper, like we did in IMP++ + + syntax KItem ::= setEnv(Map) + rule <k> setEnv(Env) => .K ...</k> <env> _ => Env </env> +
+

While theoretically sufficient, the basic definition for environment +recovery alone is suboptimal. Consider a loop while (E)S, +whose semantics (see above) was given by unrolling. S +is a block. Then the semantics of blocks above, together with the +unrolling semantics of the while loop, will yield a computation +structure in the k cell that increasingly grows, adding a new +environment recovery task right in front of the already existing sequence of +similar environment recovery tasks (this phenomenon is similar to the ``tail +recursion'' problem). Of course, when we have a sequence of environment +recovery tasks, we only need to keep the last one. The elegant rule below +does precisely that, thus avoiding the unnecessary computation explosion +problem:

+
k
rule (setEnv(_) => .K) ~> setEnv(_) +
+

In fact, the above follows a common convention in K for recovery +operations of cell contents: the meaning of a computation task of the form +cell(C) that reaches the top of the computation is that the current +contents of cell cell is discarded and gets replaced with C. We +did not add support for these special computation tasks in our current +implementation of K, so we need to define them as above.

+

lvalue and loc

+

For convenience in giving the semantics of constructs like the increment and +the assignment, that we want to operate the same way on variables and on +array elements, we used an auxiliary lvalue(E) construct which was +expected to evaluate to the lvalue of the expression E. This is only +defined when E has an lvalue, that is, when E is either a variable or +evaluates to an array element. lvalue(E) evaluates to a value of +the form loc(L), where L is the location where the value of E +can be found; for clarity, we use loc to structurally distinguish +natural numbers from location values. In giving semantics to lvalue +there are two cases to consider. (1) If E is a variable, then all we need +to do is to grab its location from the environment. (2) If E is an array +element, then we first evaluate the array and its index in order to identify +the exact location of the element of concern, and then return that location; +the last rule below works because its preceding context declarations ensure +that the array and its index are evaluated, and then the rule for array lookup +(defined above) rewrites the evaluated array access construct to its +corresponding store lookup operation.

+
k
// For parsing reasons, we prefer to allow lvalue to take a K + + syntax Exp ::= lvalue(K) + syntax Val ::= loc(Int) + +// Local variable + + rule <k> lvalue(X:Id => loc(L)) ...</k> <env>... X |-> L:Int ...</env> + +// Array element: evaluate the array and its index; +// then the array lookup rule above applies. + + context lvalue(_::Exp[HOLE::Exps]) + context lvalue(HOLE::Exp[_::Exps]) + +// Finally, return the address of the desired object member + + rule lvalue(lookup(L:Int) => loc(L)) +
+

Initializing multiple locations

+

The following operation initializes a sequence of locations with the same +value:

+
k
syntax Map ::= Int "..." Int "|->" K [function] + rule N...M |-> _ => .Map requires N >Int M + rule N...M |-> K => N |-> K (N +Int 1)...M |-> K requires N <=Int M +
+

The semantics of SIMPLE is now complete. Make sure you kompile the +definition with the right options in order to generate the desired model. +No kompile options are needed if you only only want to execute the definition +(and thus get an interpreter), but if you want to search for a different +program behaviors then you need to kompile with the --enable-search option

+
k
endmodule +
+

Go to Lesson 2, SIMPLE typed static

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/2_languages/1_simple/2_typed/1_static/NOTES/index.html b/k-distribution/pl-tutorial/2_languages/1_simple/2_typed/1_static/NOTES/index.html new file mode 100644 index 00000000000..d0088e33381 --- /dev/null +++ b/k-distribution/pl-tutorial/2_languages/1_simple/2_typed/1_static/NOTES/index.html @@ -0,0 +1,388 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

While we disallow global variables with the same name, and that includes +vector variables, we currently do not check that function names are distinct +from each other and from other global variables. Since we can pass functions +around through their names, this can be problematic. May want to make this +into an exercise in the future.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/2_languages/1_simple/2_typed/1_static/exercises/functions-with-throws/tests/index.html b/k-distribution/pl-tutorial/2_languages/1_simple/2_typed/1_static/exercises/functions-with-throws/tests/index.html new file mode 100644 index 00000000000..1b1ae6859cf --- /dev/null +++ b/k-distribution/pl-tutorial/2_languages/1_simple/2_typed/1_static/exercises/functions-with-throws/tests/index.html @@ -0,0 +1,387 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

The programs in this folder are typed variants of the SIMPLE untyped programs. +These programs will be executed both with the dynamic and with the static +semantics of the typed SIMPLE language. Each of the semantics contains its +own results folder showing the expected results of executing these programs.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/2_languages/1_simple/2_typed/1_static/simple-typed-static/index.html b/k-distribution/pl-tutorial/2_languages/1_simple/2_typed/1_static/simple-typed-static/index.html new file mode 100644 index 00000000000..1f7f34defcd --- /dev/null +++ b/k-distribution/pl-tutorial/2_languages/1_simple/2_typed/1_static/simple-typed-static/index.html @@ -0,0 +1,1158 @@ + + + + + + + + + + + + + + +SIMPLE — Typed — Static | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

SIMPLE — Typed — Static

+

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+

This is the K definition of the static semantics of the typed SIMPLE +language, or in other words, a type system for the typed SIMPLE +language in K. We do not re-discuss the various features of the +SIMPLE language here. The reader is referred to the untyped version of +the language for such discussions. We here only focus on the new and +interesting problems raised by the addition of type declarations, and +what it takes to devise a type system/checker for the language.

+

When designing a type system for a language, no matter in what +paradigm, we have to decide upon the intended typing policy. Note +that we can have multiple type systems for the same language, one for +each typing policy. For example, should we accept programs which +don't have a main function? Or should we allow functions that do not +return explicitly? Or should we allow functions whose type expects +them to return a value (say an int) to use a plain +return; statement, which returns no value, like in C? +And so on and so forth. Typically, there are two opposite tensions +when designing a type system. On the one hand, you want your type +system to be as permissive as possible, that is, to accept as many +programs that do not get stuck when executed with the untyped +semantics as possible; this will keep the programmers using your +language happy. On the other hand, you want your type system to have +a reasonable performance when implemented; this will keep both the +programmers and the implementers of your language happy. For example, +a type system for rejecting programs that could perform +division-by-zero is not expected to be feasible in general. A simple +guideline when designing typing policies is to imagine how the +semantics of the untyped language may get stuck and try to prevent +those situations from happening.

+

Before we give the K type system of SIMPLE formally, we discuss, +informally, the intended typing policy:

+
    +
  • +

    Each program should contain a main() function. Indeed, +the untyped SIMPLE semantics will get stuck on any program which does +not have a main function.

    +
  • +
  • +

    Each primitive value has its own type, which can be int +bool, or string. There is also a type void +for nonexistent values, for example for the result of a function meant +to return no value (but only be used for its side effects, like a +procedure).

    +
  • +
  • +

    The syntax of untyped SIMPLE is extended to allow type +declarations for all the variables, including array variables. This is +done in a C/Java-style. For example, int x; or +int x=7, y=x+3;, or int[][][] a[10,20]; +(the latter defines a 10 × 20 matrix of arrays of integers). +Recall from untyped SIMPLE that, unlike in C/Java, our multi-dimensional +arrays use comma-separated arguments, although they have the array-of-array +semantics.

    +
  • +
  • +

    Functions are also typed in a C/Java style. However, since in SIMPLE +we allow functions to be passed to and returned by other functions, we also +need function types. We will use the conventional higher-order arrow-notation +for function types, but will separate the argument types with commas. For +example, a function returning an array of bool elements and +taking as argument an array x of two-integer-argument functions +returning an integer, is declared using a syntax of the form +bool[] f(((int,int)->int)[] x) { ... } +and has the type ((int,int)->int)[] -> bool[].

    +
  • +
  • +

    We allow any variable declarations at the top level. Functions +can only be declared at the top level. Each function can only access the +other functions and variables declared at the top level, or its own locally +declared variables. SIMPLE has static scoping.

    +
  • +
  • +

    The various expression and statement constructs take only elements of +the expected types.

    +
  • +
  • +

    Increment and assignment can operate both on variables and on array +elements. For example, if f has type int->int[][] and +function g has the type int->int, then the +increment expression ++f(7)[g(2),g(3)] is valid.

    +
  • +
  • +

    Functions should only return values of their declared result +type. To give the programmers more flexibility, we allow functions to +use return; statements to terminate without returning an +actual value, or to not explicitly use any return statement, +regardless of their declared return type. This flexibility can be +handy when writing programs using certain functions only for their +side effects. Nevertheless, as the dynamic semantics shows, a return +value is automatically generated when an explicit return +statement is not encountered.

    +
  • +
  • +

    For simplicity, we here limit exceptions to only throw and catch +integer values. We let it as an exercise to the reader to extend the +semantics to allow throwing and catching arbitrary-type exceptions. +Like in programming languages like Java, one can go even further and +define a semantics where thrown exceptions are propagated through +try-catch statements until one of the corresponding type is found. +We will do this when we define the KOOL language, not here. +To keep the definition if SIMPLE simple, here we do not attempt to +reject programs which throw uncaught exceptions.

    +
  • +
+

Like in untyped SIMPLE, some constructs can be desugared into a +smaller set of basic constructs. In general, it should be clear why a +program does not type by looking at the top of the k cells in +its stuck configuration.

+
k
module SIMPLE-TYPED-STATIC-SYNTAX + imports DOMAINS-SYNTAX +
+

Syntax

+

The syntax of typed SIMPLE extends that of untyped SIMPLE with support +for declaring types to variables and functions.

+
k
syntax Id ::= "main" [token] +
+

Types

+

Primitive, array and function types, as well as lists (or tuples) of types. +The lists of types are useful for function arguments.

+
k
syntax Type ::= "void" | "int" | "bool" | "string" + | Type "[" "]" + | "(" Type ")" [bracket] + > Types "->" Type + + syntax Types ::= List{Type,","} [overload(exps)] +
+

Declarations

+

Variable and function declarations have the expected syntax. For variables, +we basically just replaced the var keyword of untyped SIMPLE with a +type. For functions, besides replacing the function keyword with a +type, we also introduce a new syntactic category for typed variables, +Param, and lists over it.

+
k
syntax Param ::= Type Id + syntax Params ::= List{Param,","} + + syntax Stmt ::= Type Exps ";" + | Type Id "(" Params ")" Block +
+

Expressions

+

The syntax of expressions is identical to that in untyped SIMPLE, +except for the logical conjunction and disjunction which have +different strictness attributes, because they now have different +evaluation strategies.

+
k
syntax Exp ::= Int | Bool | String | Id + | "(" Exp ")" [bracket] + | "++" Exp + > Exp "[" Exps "]" [strict] + > Exp "(" Exps ")" [strict] + | "-" Exp [strict] + | "sizeOf" "(" Exp ")" [strict] + | "read" "(" ")" + > left: + Exp "*" Exp [strict, left] + | Exp "/" Exp [strict, left] + | Exp "%" Exp [strict, left] + > left: + Exp "+" Exp [strict, left] + | Exp "-" Exp [strict, left] + > non-assoc: + Exp "<" Exp [strict, non-assoc] + | Exp "<=" Exp [strict, non-assoc] + | Exp ">" Exp [strict, non-assoc] + | Exp ">=" Exp [strict, non-assoc] + | Exp "==" Exp [strict, non-assoc] + | Exp "!=" Exp [strict, non-assoc] + > "!" Exp [strict] + > left: + Exp "&&" Exp [strict, left] + | Exp "||" Exp [strict, left] + > "spawn" Block + > Exp "=" Exp [strict(2), right] +
+

Note that spawn has not been declared strict. This may +seem unexpected, because the child thread shares the same environment +with the parent thread, so from a typing perspective the spawned +statement makes the same sense in a child thread as it makes in the +parent thread. The reason for not declaring it strict is because we +want to disallow programs where the spawned thread calls the +return statement, because those programs would get stuck in +the dynamic semantics. The type semantics of spawn below will reject +such programs.

+

We still need lists of expressions, defined below, but note that we do +not need lists of identifiers anymore. They have been replaced by the lists +of parameters.

+
k
syntax Exps ::= List{Exp,","} [strict, overload(exps)] +
+

Statements

+

The statements have the same syntax as in untyped SIMPLE, except for +the exceptions, which now type their parameter. Note that, unlike in untyped +SIMPLE, all statement constructs which have arguments and are not desugared +are strict, including the conditional and the while. Indeed, from a +typing perspective, they are all strict: first type their arguments and then +type the actual construct.

+
k
syntax Block ::= "{" "}" + | "{" Stmt "}" + + syntax Stmt ::= Block + | Exp ";" [strict] + | "if" "(" Exp ")" Block "else" Block [avoid, strict] + | "if" "(" Exp ")" Block [macro] + | "while" "(" Exp ")" Block [strict] + | "for" "(" Stmt Exp ";" Exp ")" Block [macro] + | "return" Exp ";" [strict] + | "return" ";" + | "print" "(" Exps ")" ";" [strict] + | "try" Block "catch" "(" Param ")" Block [strict(1)] + | "throw" Exp ";" [strict] + | "join" Exp ";" [strict] + | "acquire" Exp ";" [strict] + | "release" Exp ";" [strict] + | "rendezvous" Exp ";" [strict] +
+

Note that the sequential composition is now sequentially strict, +because, unlike in the dynamic semantics where statements dissolved, +they now reduce to the stmt type, which is a result.

+
k
syntax Stmt ::= Stmt Stmt [seqstrict, right] +
+

Desugaring macros

+

We use the same desugaring macros like in untyped SIMPLE, but, of +course, including the types of the involved variables.

+
k
rule if (E) S => if (E) S else {} + rule for(Start Cond; Step) {S:Stmt} => {Start while(Cond){S Step;}} + rule for(Start Cond; Step) {} => {Start while(Cond){Step;}} + rule T:Type E1:Exp, E2:Exp, Es:Exps; => T E1; T E2, Es; [anywhere] + rule T:Type X:Id = E; => T X; X = E; [anywhere] + +endmodule + + +module SIMPLE-TYPED-STATIC + imports SIMPLE-TYPED-STATIC-SYNTAX + imports DOMAINS +
+

Static semantics

+

Here we define the type system of SIMPLE. Like concrete semantics, +type systems defined in K are also executable. However, K type +systems turn into type checkers instead of interpreters when executed.

+

The typing process is done in two (overlapping) phases. In the first +phase the global environment is built, which contains type bindings +for all the globally declared variables and functions. For functions, +the declared types will be ``trusted'' during the first phase and +simply bound to their corresponding function names and placed in the +global type environment. At the same time, type-checking tasks that +the function bodies indeed respect their claimed types are generated. +All these tasks are (concurrently) verified during the second phase. +This way, all the global variable and function declarations are +available in the global type environment and can be used in order to +type-check each function code. This is consistent with the semantics +of untyped SIMPLE, where functions can access all the global variables +and can call any other function declared in the same program. The +two phases may overlap because of the K concurrent semantics. For +example, a function task can be started while the first phase is still +running; moreover, it may even complete before the first phase does, +namely when all the global variables and functions that it needs have +already been processed and made available in the global environment by +the first phase task.

+

Extended syntax and results

+

The idea is to start with a configuration holding the program to type +in one of its cells, then apply rewrite rules on it mixing types and +language syntax, and eventually obtain a type instead of the original +program. In other words, the program reduces to its type using +the K rules giving the type system of the language. In doing so, +additional typing tasks for function bodies are generated and solved +the same way. If this rewriting process gets stuck, then we say that +the program is not well-typed. Otherwise the program is well-typed +(by definition). We did not need types for statements and for blocks +as part of the typed SIMPLE syntax, because programmers are not allowed +to use such types explicitly. However, we are going to need them in the +type system, because blocks and statements reduce to them.

+

We start by allowing types to be used inside expressions and statements in +our language. This way, types can be used together with language syntax in +subsequent K rules without any parsing errors. Like in the type system of +IMP++ in the K tutorial, we prefer to group the block and statement types +under one syntactic sub-category of types, because this allows us to more +compactly state that certain terms can be either blocks or statements. Also, +since programs and fragments of program will reduce to their types, in order +for the strictness and context declarations to be executable we state that +types are results (same like we did in the IMP++ tutorial).

+
k
syntax Exp ::= Type + syntax Exps ::= Types + syntax BlockOrStmtType ::= "block" | "stmt" + syntax Type ::= BlockOrStmtType + syntax Block ::= BlockOrStmtType + syntax KResult ::= Type + | Types //TODO: remove this, eventually +
+

Configuration

+

The configuration of our type system consists of a tasks cell +holding various typing task cells, and a global type environment. +Each task includes a k cell holding the code to type, a tenv +cell holding the local type environment, and a return cell holding +the return type of the currently checked function. The latter is needed in +order to check whether return statements return values of the expected type. +Initially, the program is placed in a k cell inside a +task cell. Since the cells with multiplicity ? are not +included in the initial configuration, the task cell holding +the original program in its k cell will contain no other +subcells.

+
k
configuration <T color="yellow"> + <tasks color="orange"> + <task multiplicity="*" color="yellow" type="Set"> + <k color="green"> $PGM:Stmt </k> + <tenv multiplicity="?" color="cyan"> .Map </tenv> + <returnType multiplicity="?" color="black"> void </returnType> + </task> + </tasks> +// <br/> + <gtenv color="blue"> .Map </gtenv> + </T> +
+

Variable declarations

+

Variable declarations type as statements, that is, they reduce to the +type stmt. There are only two cases that need to be +considered: when a simple variable is declared and when an array +variable is declared. The macros at the end of the syntax module +above take care of reducing other variable declarations, including +ones where the declared variables are initialized, to only these two +cases. The first case has two subcases: when the variable declaration +is global (i.e., the task cell contains only the k +cell), in which case it is added to the global type environment +checking at the same time that the variable has not been already +declared; and when the variable declaration is local (i.e., a +tenv cell is available), in which case it is simply added to +the local type environment, possibly shadowing previous homonymous +variables. The third case reduces to the second, incrementally moving +the array dimension into the type until the array becomes a simple +variable.

+
k
rule <task> <k> T:Type X:Id; => stmt ...</k> </task> + <gtenv> Rho (.Map => X |-> T) </gtenv> + requires notBool(X in keys(Rho)) + rule <k> T:Type X:Id; => stmt ...</k> <tenv> Rho => Rho[X <- T] </tenv> + + context _:Type _::Exp[HOLE::Exps]; +// The rule below may need to sort E to Exp in the future, if the +// parser gets stricter; without that information, it may not be able +// to complete the LHS into T E[int,Ts],.Exps; (and similarly for the RHS) + rule T:Type E:Exp[int,Ts:Types]; => T[] E[Ts]; +// I want to write the rule below as _:Type (E:Exp[.Types] => E), +// but the list completion seems to not work well with that. + rule T:Type E:Exp[.Types]; => T E; +
+

Function declarations

+

Functions are allowed to be declared only at the top level (the +task cell holds only its k subcell). Each function +declaration reduces to a variable declaration (a binding of its name +to its declared function type), but also adds a task into the +tasks cell. The task consists of a typing of the statement +declaring all the function parameters followed by the function body, +together with the expected return type of the function. The +getTypes and mkDecls functions, defined at the end of +the file in the section on auxiliary operations, extracts the list of +types and makes a sequence of variable declarations from a list of +function parameters, respectively. Note that, although in the dynamic +semantics we include a terminating return statement at the +end of the function body to eliminate from the analysis the case when +the function does not provide an explicit return, we do not need to +include such a similar return statement here. That's because +the return statements type to stmt anyway, and the +entire code of the function body needs to type anyway.

+
k
rule <task> <k> T:Type F:Id(Ps:Params) S => getTypes(Ps)->T F; ...</k> </task> + (.Bag => <task> + <k> mkDecls(Ps) S </k> <tenv> .Map </tenv> <returnType> T </returnType> + </task>) +
+

Checking if main() exists}

+

Once the entire program is processed (generating appropriate tasks +to type check its function bodies), we can dissolve the main +task cell (the one holding only a k subcell). Since +we want to enforce that programs include a main function, we also +generate a function task executing main() to ensure that it +types (remove this task creation if you do not want your type system +to reject programs without a main function).

+
k
rule <task> <k> stmt => main(.Exps); </k> (.Bag => <tenv> .Map </tenv>) </task> +
+

Collecting the terminated tasks

+

Similarly, once a non-main task (i.e., one which contains a +tenv subcells) is completed using the subsequent rules (i.e., +its k cell holds only the block or stmt +type), we can dissolve its corresponding cell. Note that it is +important to ensure that we only dissolve tasks containing a +tenv cell with the rule below, because the main task should +not dissolve this way! It should do what the above rule says. +In the end, there should be no task cell left in the configuration +when the program correctly type checks.

+
k
rule <task>... <k> _:BlockOrStmtType </k> <tenv> _ </tenv> ...</task> => .Bag +
+

Basic values

+

The first three rewrite rules below reduce the primitive values to +their types, as we typically do when we define type systems in K.

+
k
rule _:Int => int + rule _:Bool => bool + rule _:String => string +
+

Variable lookup

+

There are three cases to distinguish for variable lookup: (1) if the +variable is bound in the local type environment, then look its type up +there; (2) if a local environment exists and the variable is not bound +in it, then look its type up in the global environment; (3) finally, +if there is no local environment, meaning that we are executing the +top-level pass, then look the variable's type up in the global +environment, too.

+
k
rule <k> X:Id => T ...</k> <tenv>... X |-> T ...</tenv> + + rule <k> X:Id => T ...</k> <tenv> Rho </tenv> <gtenv>... X |-> T ...</gtenv> + requires notBool(X in keys(Rho)) + + rule <task> <k> X:Id => T ...</k> </task> <gtenv>... X |-> T ...</gtenv> +
+

Increment

+

We want the increment operation to apply to any lvalue, including +array elements, not only to variables. For that reason, we define a +special context extracting the type of the argument of the increment +operation only if that argument is an lvalue. Otherwise the rewriting +process gets stuck. The operation ltype is defined at the +end of this file, in the auxiliary operation section. It essentially +acts as a filter, getting stuck if its argument is not an lvalue and +letting it reduce otherwise. The type of the lvalue is expected to be +an integer in order to be allowed to be incremented, as seen in the +rule ++ int => int below.

+
k
context ++(HOLE => ltype(HOLE)) + rule ++ int => int +
+

Common expression constructs

+

The rules below are straightforward and self-explanatory:

+
k
rule int + int => int + rule string + string => string + rule int - int => int + rule int * int => int + rule int / int => int + rule int % int => int + rule - int => int + rule int < int => bool + rule int <= int => bool + rule int > int => bool + rule int >= int => bool + rule T:Type == T => bool + rule T:Type != T => bool + rule bool && bool => bool + rule bool || bool => bool + rule ! bool => bool +
+

Array access and size

+

Array access requires each index to type to an integer, and the +array type to be at least as deep as the number of indexes:

+
k
// NOTE: +// We used to need parentheses in the RHS, to avoid capturing Ts as an attribute +// Let's hope that is not a problem anymore. + + rule (T[])[int, Ts:Types] => T[Ts] + rule T:Type[.Types] => T +
+

sizeOf only needs to check that its argument is an array:

+
k
rule sizeOf(_T[]) => int +
+

Input/Output

+

The read expression construct types to an integer, while print types +to a statement provided that all its arguments type to integers or +strings.

+
k
rule read() => int + + rule print(T:Type, Ts => Ts); requires T ==K int orBool T ==K string + rule print(.Types); => stmt +
+

Assignment

+

The special context and the rule for assignment below are similar +to those for increment: the LHS of the assignment must be an lvalue +and, in that case, it must have the same type as the RHS, which then +becomes the type of the assignment.

+
k
context (HOLE => ltype(HOLE)) = _ + rule T:Type = T => T +
+

Function application and return

+

Function application requires the type of the function and the +types of the passed values to be compatible. Note that a special case +is needed to handle the no-argument case:

+
k
rule (Ts:Types -> T)(Ts) => T requires Ts =/=K .Types + rule (void -> T)(.Types) => T +
+

The returned value must have the same type as the declared +function return type. If an empty return is encountered, than +we should check that we are in a function (and not a thread) +context, that is, a return cell must be available:

+
k
rule <k> return T:Type; => stmt ...</k> <returnType> T </returnType> + rule <k> return; => stmt ...</k> <returnType> _ </returnType> +
+

Blocks

+

To avoid having to recover type environments after blocks, we prefer +to start a new task for block body, making sure that the new task +is passed the same type environment and return cells. The value +returned by return statements must have the same type as +stated in the return cell. The print variadic +function is allowed to only print integers and strings. The thrown +exceptions can only have integer type.

+
k
rule {} => block + + rule <task> <k> {S} => block ...</k> <tenv> Rho </tenv> R </task> + (.Bag => <task> <k> S </k> <tenv> Rho </tenv> R </task>) +
+

Expression statement

+
k
rule _:Type; => stmt +
+

Conditional and while loop

+
k
rule if (bool) block else block => stmt + rule while (bool) block => stmt +
+

Exceptions

+

We currently force the parameters of exceptions to only be integers. +Moreover, for simplicity, we assume that integer exceptions can be +thrown from anywhere, including from functions which do not define +any try-catch block (with the currently unchecked ‒also for +simplicity‒ expectation that the caller functions would catch those +exceptions).

+
k
rule try block catch(int X:Id) {S} => {int X; S} + rule try block catch(int X:Id) {} => {int X;} + rule throw int; => stmt +
+

Concurrency

+

Nothing special about typing the concurrency constructs, except that +we do not want the spawned thread to return, so we do not include any +return cell in the new task cell for the thread statement. +Same like with the functions above, we do not check for thrown +exceptions which are not caught.

+
k
rule <k> spawn S => int ...</k> <tenv> Rho </tenv> + (.Bag => <task> <k> S </k> <tenv> Rho </tenv> </task>) + rule join int; => stmt + rule acquire _:Type; => stmt + rule release _:Type; => stmt + rule rendezvous _:Type; => stmt + + rule _:BlockOrStmtType _:BlockOrStmtType => stmt +
+

Auxiliary constructs

+

The function mkDecls turns a list of parameters into a +list of variable declarations.

+
k
syntax Stmt ::= mkDecls(Params) [function] + rule mkDecls(T:Type X:Id, Ps:Params) => T X; mkDecls(Ps) + rule mkDecls(.Params) => {} +
+

The ltype context allows only expressions which have an +lvalue to evaluate.

+
k
syntax LValue ::= Id + rule isLValue(_:Exp[_:Exps]) => true + syntax Exp ::= LValue // K should be able to infer this + // if not added, then it gets stuck with an Id on k cell + +// Instead of the second LValue production above you can use a rule: +// rule isLValue(_:Exp[_:Exps]) => true + + syntax Exp ::= ltype(Exp) +// context ltype(HOLE:LValue) +// The above context does not work due to some error, so we write instead + context ltype(HOLE) requires isLValue(HOLE) +
+

The function getTypes is the same as in SIMPLE typed dynamic.

+
k
syntax Types ::= getTypes(Params) [function] + rule getTypes(T:Type _:Id) => T, .Types // I would like to not use .Types + rule getTypes(T:Type _:Id, P, Ps) => T, getTypes(P,Ps) + rule getTypes(.Params) => void, .Types + +endmodule +
+

Go to Lesson 3, SIMPLE typed dynamic

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/2_languages/1_simple/2_typed/2_dynamic/simple-typed-dynamic/index.html b/k-distribution/pl-tutorial/2_languages/1_simple/2_typed/2_dynamic/simple-typed-dynamic/index.html new file mode 100644 index 00000000000..fb8ed577c85 --- /dev/null +++ b/k-distribution/pl-tutorial/2_languages/1_simple/2_typed/2_dynamic/simple-typed-dynamic/index.html @@ -0,0 +1,1149 @@ + + + + + + + + + + + + + + +SIMPLE — Typed — Dynamic | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

SIMPLE — Typed — Dynamic

+

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+

This is the K dynamic semantics of the typed SIMPLE language. +It is very similar to the semantics of the untyped SIMPLE, the +difference being that we now dynamically check the typing policy +described in the static semantics of typed SIMPLE. Because of the +dynamic nature of the semantics, we can also perform some additional +checks which were not possible in the static semantics, such as +memory leaks due to accessing an array out of its bounds. We will +highlight the differences between the dynamically typed and the +untyped SIMPLE as we proceed with the semantics. We recommend the +reader to consult the typing policy and the syntax of types discussed +in the static semantics of the typed SIMPLE language.

+
k
module SIMPLE-TYPED-DYNAMIC-SYNTAX + imports DOMAINS-SYNTAX +
+

Syntax

+

The syntax of typed SIMPLE extends that of untyped SIMPLE with support +for declaring types to variables and functions.

+

The syntax below is identical to that of the static semantics of typed +SIMPLE. However, the K strictness attributes are like those of the untyped +SIMPLE, to capture the desired evaluation strategies of the various language +constructs.

+
k
syntax Id ::= "main" [token] +
+

Types

+
k
syntax Type ::= "void" | "int" | "bool" | "string" + | Type "[" "]" + | "(" Type ")" [bracket] + > Types "->" Type + syntax Types ::= List{Type,","} [overload(exps)] +
+

Declarations

+
k
syntax Param ::= Type Id + syntax Params ::= List{Param,","} + + syntax Stmt ::= Type Exps ";" + | Type Id "(" Params ")" Block +
+

Expressions

+
k
syntax Exp ::= Int | Bool | String | Id + | "(" Exp ")" [bracket] + | "++" Exp + > Exp "[" Exps "]" [strict] + > Exp "(" Exps ")" [strict] + | "-" Exp [strict] + | "sizeOf" "(" Exp ")" [strict] + | "read" "(" ")" + > left: + Exp "*" Exp [strict, left] + | Exp "/" Exp [strict, left] + | Exp "%" Exp [strict, left] + > left: + Exp "+" Exp [strict, left] + | Exp "-" Exp [strict, left] + > non-assoc: + Exp "<" Exp [strict, non-assoc] + | Exp "<=" Exp [strict, non-assoc] + | Exp ">" Exp [strict, non-assoc] + | Exp ">=" Exp [strict, non-assoc] + | Exp "==" Exp [strict, non-assoc] + | Exp "!=" Exp [strict, non-assoc] + > "!" Exp [strict] + > left: + Exp "&&" Exp [strict(1), left] + | Exp "||" Exp [strict(1), left] + > "spawn" Block + > Exp "=" Exp [strict(2), right] +
+

Like in the static semantics, there is no need for lists of identifiers +(because we now have lists of parameters).

+
k
syntax Exps ::= List{Exp,","} [strict, overload(exps)] + syntax Val + syntax Vals ::= List{Val,","} [overload(exps)] +
+

Statements

+
k
syntax Block ::= "{" "}" + | "{" Stmt "}" + + syntax Stmt ::= Block + | Exp ";" [strict] + | "if" "(" Exp ")" Block "else" Block [avoid, strict(1)] + | "if" "(" Exp ")" Block [macro] + | "while" "(" Exp ")" Block + | "for" "(" Stmt Exp ";" Exp ")" Block [macro] + | "print" "(" Exps ")" ";" [strict] + | "return" Exp ";" [strict] + | "return" ";" + | "try" Block "catch" "(" Param ")" Block + | "throw" Exp ";" [strict] + | "join" Exp ";" [strict] + | "acquire" Exp ";" [strict] + | "release" Exp ";" [strict] + | "rendezvous" Exp ";" [strict] + + syntax Stmt ::= Stmt Stmt [right] +
+

The same desugaring macros like in the statically typed SIMPLE.

+
k
rule if (E) S => if (E) S else {} + rule for(Start Cond; Step) {S:Stmt} => {Start while(Cond){S Step;}} + rule for(Start Cond; Step) {} => {Start while(Cond){Step;}} + rule T:Type E1:Exp, E2:Exp, Es:Exps; => T E1; T E2, Es; [anywhere] + rule T:Type X:Id = E; => T X; X = E; [anywhere] + +endmodule + + +module SIMPLE-TYPED-DYNAMIC + imports SIMPLE-TYPED-DYNAMIC-SYNTAX + imports DOMAINS +
+

Semantics

+

Values and results

+

These are similar to those of untyped SIMPLE, except that the array +references and the function abstrations now also hold their types. +These types are needed in order to easily compute the type of any +value in the language (see the auxiliary typeOf operation at +the end of this module).

+
k
syntax Val ::= Int | Bool | String + | array(Type,Int,Int) + | lambda(Type,Params,Stmt) + syntax Exp ::= Val + syntax Exps ::= Vals + syntax KResult ::= Val + | Vals // TODO: should not need this +
+

Configuration

+

The configuration is almost identical to that of untyped SIMPLE, +except for a return cell inside the control cell. +This return cell will hold, like in the static semantics of +typed SIMPLE, the expected type of the value returned by the function +being executed. The contents of this cell will be set whenever a +function is invoked and will be checked whenever the evaluation of the +function body encounters an explicit return statement.

+
k
// the syntax declarations below are required because the sorts are + // referenced directly by a production and, because of the way KIL to KORE + // is implemented, the configuration syntax is not available yet + // should simply work once KIL is removed completely + // check other definitions for this hack as well + + syntax ControlCell + syntax ControlCellFragment + + configuration <T color="red"> + <threads color="orange"> + <thread multiplicity="*" color="yellow" type="Map"> + <id color="pink"> 0 </id> + <k color="green"> ($PGM:Stmt ~> execute) </k> +// <br/> + <control color="cyan"> + <fstack color="blue"> .List </fstack> + <xstack color="purple"> .List </xstack> + <returnType color="LimeGreen"> void </returnType> + </control> +// <br/> + <env color="violet"> .Map </env> + <holds color="black"> .Map </holds> + </thread> + </threads> +// <br/> + <genv color="pink"> .Map </genv> + <store color="white"> .Map </store> + <busy color="cyan">.Set</busy> + <terminated color="red"> .Set </terminated> + <input color="magenta" stream="stdin"> .List </input> + <output color="brown" stream="stdout"> .List </output> + <nextLoc color="gray"> 0 </nextLoc> + </T> +
+

Declarations and Initialization

+

Variable Declaration

+

The undefined construct is now parameterized by a type. +A main difference between untyped SIMPLE and dynamically typed SIMPLE +is that the latter assigns a type to each of its locations and that +type cannot be changed during the execution of the program. We do not +do any memory management in our semantic definitions here, so +locations cannot be reclaimed, garbage collected and/or reused. Each +location corresponds precisely to an allocated variable or array +element, whose type was explicitly or implicitly declared in the +program and does not change. It is therefore safe to type each +location and then never allow that type to change. The typed +undefined values effectively assign both a type and an undefined value +to a location.

+
k
syntax KItem ::= undefined(Type) + + rule <k> T:Type X:Id; => .K ...</k> + <env> Env => Env[X <- L] </env> + <store>... .Map => L |-> undefined(T) ...</store> + <nextLoc> L:Int => L +Int 1 </nextLoc> +
+

Array Declaration

+

The dynamic semantics of typed array declarations is similar to that +in untyped SIMPLE, but we have to make sure that we associate the +right type to the allocated locations.

+
k
rule <k> T:Type X:Id[N:Int]; => .K ...</k> + <env> Env => Env[X <- L] </env> + <store>... .Map => L |-> array(T, L +Int 1, N) + (L +Int 1)...(L +Int N) |-> undefined(T) ...</store> + <nextLoc> L:Int => L +Int 1 +Int N </nextLoc> + requires N >=Int 0 + + context _:Type _::Exp[HOLE::Exps]; +
+

The desugaring of multi-dimensional arrays into unidimensional +ones is also similar to that in untyped SIMPLE, although we have to +make sure that all the declared variables have the right types. The +auxiliary operation T<Vs>, defined at the end of the file, +adds the length of Vs dimensions to the type T.

+
k
// TODO: Check the desugaring below to be consistent with the one for untyped simple + + syntax Id ::= "$1" [token] | "$2" [token] + rule T:Type X:Id[N1:Int, N2:Int, Vs:Vals]; + => T[]<Vs> X[N1]; + { + T[][]<Vs> $1=X; + for(int $2=0; $2 <= N1 - 1; ++$2) { + T X[N2,Vs]; + $1[$2] = X; + } + } +
+

Function declaration

+

Store all function parameters, as well as the return type, as part +of the lambda abstraction. In the spirit of dynamic typing, we will +make sure that parameters are well typed when the function is invoked.

+
k
rule <k> T:Type F:Id(Ps:Params) S => .K ...</k> + <env> Env => Env[F <- L] </env> + <store>... .Map => L |-> lambda(T, Ps, S) ...</store> + <nextLoc> L => L +Int 1 </nextLoc> +
+

Calling main()

+

When done with the first pass, call main().

+
k
syntax KItem ::= "execute" + rule <k> execute => main(.Exps); </k> + <env> Env </env> + <genv> .Map => Env </genv> +
+

Expressions

+

Variable lookup

+
k
rule <k> X:Id => V ...</k> + <env>... X |-> L ...</env> + <store>... L |-> V:Val ...</store> +
+

Variable/Array increment

+
k
context ++(HOLE => lvalue(HOLE)) + rule <k> ++loc(L) => I +Int 1 ...</k> + <store>... L |-> (I:Int => I +Int 1) ...</store> +
+

Arithmetic operators

+
k
rule I1 + I2 => I1 +Int I2 + rule Str1 + Str2 => Str1 +String Str2 + rule I1 - I2 => I1 -Int I2 + rule I1 * I2 => I1 *Int I2 + rule I1 / I2 => I1 /Int I2 requires I2 =/=K 0 + rule I1 % I2 => I1 %Int I2 requires I2 =/=K 0 + rule - I => 0 -Int I + rule I1 < I2 => I1 <Int I2 + rule I1 <= I2 => I1 <=Int I2 + rule I1 > I2 => I1 >Int I2 + rule I1 >= I2 => I1 >=Int I2 + rule V1:Val == V2:Val => V1 ==K V2 + rule V1:Val != V2:Val => V1 =/=K V2 + rule ! T => notBool(T) + rule true && E => E + rule false && _ => false + rule true || _ => true + rule false || E => E +
+

Array lookup

+

Check array bounds, as part of the dynamic typing policy.

+
k
// Same comment as for simple untyped regarding [anywhere] + rule V:Val[N1:Int, N2:Int, Vs:Vals] => V[N1][N2, Vs] + [anywhere] + +// Same comment as for simple untyped regarding [anywhere] + rule array(_:Type, L:Int, M:Int)[N:Int] => lookup(L +Int N) + requires N >=Int 0 andBool N <Int M [anywhere] +
+

Size of an array

+
k
rule sizeOf(array(_,_,N)) => N +
+

Function call

+

Define function call and return together, to see their relationship. +Note that the operation mkDecls now declares properly typed +instantiated variables, and that the semantics of return also +checks that that type of the returned value is expected one.

+
k
syntax KItem ::= (Type,Map,K,ControlCellFragment) + + rule <k> lambda(T,Ps,S)(Vs:Vals) ~> K => mkDecls(Ps,Vs) S return; </k> + <control> + <fstack> .List => ListItem((T',Env,K,C)) ...</fstack> + <returnType> T' => T </returnType> + C + </control> + <env> Env => GEnv </env> + <genv> GEnv </genv> + + rule <k> return V:Val; ~> _ => V ~> K </k> + <control> + <fstack> ListItem((T',Env,K,C)) => .List ...</fstack> + <returnType> T => T' </returnType> + (_ => C) + </control> + <env> _ => Env </env> + requires typeOf(V) ==K T // check the type of the returned value +
+

Like the undefined above, nothing also gets +tagged with a type now. The empty return statement is +completed to return the nothing value tagged as expected.

+
k
syntax Val ::= nothing(Type) + rule <k> return; => return nothing(T); ...</k> <returnType> T </returnType> +
+

Read

+
k
rule <k> read() => I ...</k> <input> ListItem(I:Int) => .List ...</input> +
+

Assignment

+

The assignment now checks that the type of the assigned location is +preserved:

+
k
context (HOLE => lvalue(HOLE)) = _ + + rule <k> loc(L) = V:Val => V ...</k> <store>... L |-> (V' => V) ...</store> + requires typeOf(V) ==K typeOf(V') +
+

Statements

+

Blocks

+
k
rule {} => .K + rule <k> { S } => S ~> setEnv(Env) ...</k> <env> Env </env> +
+

Sequential composition

+
k
rule S1:Stmt S2:Stmt => S1 ~> S2 +
+

Expression statements

+
k
rule _:Val; => .K +
+

Conditional

+
k
rule if ( true) S else _ => S + rule if (false) _ else S => S +
+

While loop

+
k
rule while (E) S => if (E) {S while(E)S} +
+

Print

+

We only allow printing integers and strings:

+
k
rule <k> print(V:Val, Es => Es); ...</k> <output>... .List => ListItem(V) </output> + requires typeOf(V) ==K int orBool typeOf(V) ==K string + rule print(.Vals); => .K +
+

Exceptions

+

Exception parameters are now typed, but note that the semantics below +works correctly only when the thrown exception has the same type as +the innermost try-catch paramete. To keep things simple, for the time +being we can assume that SIMPLE only throws and catches integer +values, in which case our semantics below works fine:

+
k
syntax KItem ::= (Param,Stmt,K,Map,ControlCellFragment) // Param instead of Id + + syntax KItem ::= "popx" + + rule <k> (try S1 catch(P) S2 => S1 ~> popx) ~> K </k> + <control> + <xstack> .List => ListItem((P, S2, K, Env, C)) ...</xstack> + C + </control> + <env> Env </env> + + rule <k> popx => .K ...</k> + <xstack> ListItem(_) => .List ...</xstack> + + rule <k> throw V:Val; ~> _ => { T X = V; S2 } ~> K </k> + <control> + <xstack> ListItem((T:Type X:Id, S2, K, Env, C)) => .List ...</xstack> + (_ => C) + </control> + <env> _ => Env </env> +
+

Threads

+

Thread creation

+
k
rule <thread>... + <k> spawn S => !T:Int +Int 1 ...</k> + <env> Env </env> + ...</thread> + (.Bag => <thread>... + <k> S </k> + <env> Env </env> + <id> !T +Int 1 </id> + ...</thread>) +
+

Thread termination

+
k
rule (<thread>... <k>.K</k> <holds>H</holds> <id>T</id> ...</thread> => .Bag) + <busy> Busy => Busy -Set keys(H) </busy> + <terminated>... .Set => SetItem(T) ...</terminated> +
+

Thread joining

+
k
rule <k> join T:Int; => .K ...</k> + <terminated>... SetItem(T) ...</terminated> +
+

Acquire lock

+
k
rule <k> acquire V:Val; => .K ...</k> + <holds>... .Map => V |-> 0 ...</holds> + <busy> Busy (.Set => SetItem(V)) </busy> + requires (notBool(V in Busy:Set)) + + rule <k> acquire V; => .K ...</k> + <holds>... V:Val |-> (N:Int => N +Int 1) ...</holds> +
+

Release lock

+
k
rule <k> release V:Val; => .K ...</k> + <holds>... V |-> (N => N:Int -Int 1) ...</holds> + requires N >Int 0 + + rule <k> release V; => .K ...</k> <holds>... V:Val |-> 0 => .Map ...</holds> + <busy>... SetItem(V) => .Set ...</busy> +
+

Rendezvous synchronization

+
k
rule <k> rendezvous V:Val; => .K ...</k> + <k> rendezvous V; => .K ...</k> +
+

Auxiliary declarations and operations

+

Turns a list of parameters and a list of instance values for them +into a list of variable declarations.

+
k
syntax Stmt ::= mkDecls(Params,Vals) [function] + rule mkDecls((T:Type X:Id, Ps:Params), (V:Val, Vs:Vals)) + => T X=V; mkDecls(Ps,Vs) + rule mkDecls(.Params,.Vals) => {} +
+

Location lookup.

+
k
syntax Exp ::= lookup(Int) // see NOTES.md for why Exp instead of KItem + rule <k> lookup(L) => V ...</k> <store>... L |-> V:Val ...</store> +
+

Environment recovery.

+
k
// TODO: same comment regarding setEnv(...) as for simple untyped + + syntax KItem ::= setEnv(Map) + rule <k> setEnv(Env) => .K ...</k> <env> _ => Env </env> + rule (setEnv(_) => .K) ~> setEnv(_) +
+

lvalue and loc

+
k
syntax Exp ::= lvalue(K) + syntax Val ::= loc(Int) + + rule <k> lvalue(X:Id => loc(L)) ...</k> <env>... X |-> L:Int ...</env> + + //context lvalue(_[HOLE]) + //context lvalue(HOLE[_]) + context lvalue(_::Exp[HOLE::Exps]) + context lvalue(HOLE::Exp[_::Exps]) + + rule lvalue(lookup(L:Int) => loc(L)) +
+

Adds the corresponding depth to an array type

+
k
syntax Type ::= Type "<" Vals ">" [function] + rule T:Type<_,Vs:Vals> => T[]<Vs> + rule T:Type<.Vals> => T +
+

Sequences of locations.

+
k
syntax Map ::= Int "..." Int "|->" K [function] + rule N...M |-> _ => .Map requires N >Int M + rule N...M |-> K => N |-> K (N +Int 1)...M |-> K requires N <=Int M + +// Type of a value. + syntax Type ::= typeOf(K) [function] + rule typeOf(_:Int) => int + rule typeOf(_:Bool) => bool + rule typeOf(_:String) => string + rule typeOf(array(T,_,_)) => (T[]) // () needed! K parses [] as "no tags" + rule typeOf(lambda(T,Ps,_)) => getTypes(Ps) -> T + rule typeOf(undefined(T)) => T + rule typeOf(nothing(T)) => T +
+

List of types of a parameter.

+
k
syntax Types ::= getTypes(Params) [function] + rule getTypes(T:Type _:Id) => T, .Types // I would like to not use .Types + rule getTypes(T:Type _:Id, P, Ps) => T, getTypes(P,Ps) + rule getTypes(.Params) => void, .Types +endmodule +
+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/2_languages/1_simple/2_typed/programs/index.html b/k-distribution/pl-tutorial/2_languages/1_simple/2_typed/programs/index.html new file mode 100644 index 00000000000..daa976b7fde --- /dev/null +++ b/k-distribution/pl-tutorial/2_languages/1_simple/2_typed/programs/index.html @@ -0,0 +1,387 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

The programs in this folder are typed variants of the SIMPLE untyped programs. +These programs will be executed both with the dynamic and with the static +semantics of the typed SIMPLE language. Each of the semantics contains its +own results folder showing the expected results of executing these programs.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/2_languages/2_kool/1_untyped/kool-untyped/index.html b/k-distribution/pl-tutorial/2_languages/2_kool/1_untyped/kool-untyped/index.html new file mode 100644 index 00000000000..8b712fa422f --- /dev/null +++ b/k-distribution/pl-tutorial/2_languages/2_kool/1_untyped/kool-untyped/index.html @@ -0,0 +1,1526 @@ + + + + + + + + + + + + + + +KOOL — Untyped | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

KOOL — Untyped

+

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+

This is the K semantic definition of the untyped KOOL language. KOOL +is aimed at being a pedagogical and research language that captures +the essence of the object-oriented programming paradigm. Its untyped +variant discussed here is simpler than the typed one, ignoring several +intricate aspects of types in the presence of objects. A program +consists of a set of class declarations. Each class can extend at +most one other class (KOOL is single-inheritance). A class can +declare a set of fields and a set of methods, all public and called +the class' members. Specifically, KOOL includes the +following features:

+
    +
  • +

    Class declarations, where a class may or may not explicitly +extend another class. In case a class does not explicitly extend +another class, then it is assumed that it extends the default top-most +and empty (i.e., no members) class called Object. Each class +is required to declare precisely one homonymous method, called its +constructor. Each valid program should contain one class +named Main, whose constructor, Main(), takes no +arguments. The execution of a program consists of creating an object +instance of class Main and invoking the constructor +Main() on it, that is, of executing new Main();.

    +
  • +
  • +

    All features of SIMPLE (see examples/simple/untyped), +i.e., multidimensional arrays, function (here called "method") +abstractions with call-by-value parameter passing style and static +scoping, blocks with locals, input/output, parametric exceptions, and +concurrency via dynamic thread creation/termination and synchronization. +The only change in the syntax of SIMPLE when imported in KOOL is the +function declaration keyword, function, which is changed into +method. The exact same desugaring macros from SIMPLE are +also included in KOOL. We can think of KOOL's classes as embedding +SIMPLE programs (extended with OO constructs, as discussed next).

    +
  • +
  • +

    Object creation using the new C(e1,...,en) +expression construct. An object instance of class C is first +created and then the constructor C(e1,...,en) is implicitly +called on that object. KOOL only allows (and requires) one +constructor per class. The class constructor can be called either +implicitly during a new object creation for the class, or explicitly. +The superclass constructor is not implicitly invoked when a +class constructor is invoked; if you want to invoke the superclass +constructor from a subclass constructor then you have to do it +explicitly.

    +
  • +
  • +

    An expression construct this, which evaluates to the +current object.

    +
  • +
  • +

    An expression construct super, which is used (only) in +combination with member lookup (see next) to refer to a superclass +field or method.

    +
  • +
  • +

    A member lookup expression construct e.x, where e +is an expression (either an expression expected to evaluate to an object +or the super construct) and x is a class member name, +that is, a field or a method name.

    +
  • +
  • +

    Expression constructs e instanceOf C and +(C) e, where e is an expression expected +to evaluate to an object and C a class name. The former +tells whether the class of e is a subclass of C, +that is, whether e can be used as an instance of C, +and the latter changes the class of e to C. These +operations always succeed: the former returns a Boolean value, while +the latter changes the current class of e to C +regardless of whether it is safe to do so or not. The typed version +of KOOL will check the safety of casting by ensuring that the instance +class of the object is a subclass of C. In untyped KOOL we +do not want to perform this check because we want to allow the +programmer maximum of flexibility: if one always accesses only +available members, then the program can execute successfully despite +the potentially unsafe cast.

    +
  • +
+

There are some specific aspects of KOOL that need to be discussed.

+

First, KOOL is higher-order, allowing function abstractions to be +treated like any other values in the language. For example, if +m is a method of object e then e.m +evaluates to the corresponding function abstraction. The function +abstraction is in fact a closure, because in addition to the method +parameters and body it also encapsulates the object value (i.e., the +environment of the object together with its current class—see below) +that e evaluates to. This way, function abstractions can be +invoked anywhere and have the capability to change the state of their +object. For example, if m is a method of object e +which increments a field c of e when invoked, and if +getm is another method of e which simply returns +m when invoked, then the double application +(e.getm())() has the same effect as e.m(), that is, +increments the counter c of e. Note that the +higher-order nature of KOOL was not originally planned; it came as a +natural consequence of evaluating methods to closures and we decided +to keep it. If you do not like it then do not use it.

+

Second, since all the fields and methods are public in KOOL and since +they can be redeclared in subclasses, it is not immediately clear how +to lookup the member x when we write e.x and +e is different from super. We distinguish two cases, +depending on whether e.x occurs in a method invocation +context (i.e., e.x(...)) or in a field context. KOOL has +dynamic method dispatch, so if e.x is invoked as a method +then x will be searched for starting with the instance class of +the object value to which e evaluates. If e.x +occurs in a non-method-invocation context then x will be +treated as a field (although it may hold a method closure due to the +higher-order nature of KOOL) and thus will be searched starting with +the current class of the object value of e (which, because of +this and casting, may be different from its instance class). +In order to achieve the above, each object value will consist of a +pair holding the current class of the object and an environment stack +with one layer for each class in the object's instance class hierarchy.

+

Third, although KOOL is dynamic method dispatch, its capabilities +described above are powerful enough to allow us to mimic static +method dispatch. For example, suppose that you want to invoke method +m() statically. Then all you need to do is to declare a +local variable and bind it to m, for example var staticm = m;, and +then call staticm(). This works because +staticm is first bound to the method closure that m +evaluates to, and then looked up as any local variable when invoked. +We only enable the dynamic method dispatch when we have an object +member on an application position, e.g., m().

+

In what follows, we limit our comments to the new, KOOL-specific +aspects of the language. We refer the reader to the untyped SIMPLE +language for documentation on the the remaining features, because +those were all borrowed from SIMPLE.

+
k
module KOOL-UNTYPED-SYNTAX + imports DOMAINS-SYNTAX +
+

Syntax

+

The syntax of KOOL extends that of SIMPLE with object-oriented +constructs. We removed from the K annotated syntax of SIMPLE two +constructs, namely the one for function declarations (because we want +to call them methods now) and the one for function application +(because application is not strict in the first argument +anymore—needs to initiate dynamic method dispatch). The additional +syntax includes:

+
    +
  • First, we need a new dedicated identifier, Object, for +the default top-most class.
  • +
  • Second, we rename the function keyword of SIMPLE into method.
  • +
  • Third, we add syntax for class declarations together with a +macro making classes which extend nothing to extend Object.
  • +
  • Fourth, we change the strictness attribute of application +into strict(2).
  • +
  • Finally, we add syntax and corresponding strictness +for the KOOL object-oriented constructs.
  • +
+
k
syntax Id ::= "Object" [token] | "Main" [token] + + syntax Stmt ::= "var" Exps ";" + | "method" Id "(" Ids ")" Block // called "function" in SIMPLE + | "class" Id Block // KOOL + | "class" Id "extends" Id Block // KOOL + + syntax Exp ::= Int | Bool | String | Id + | "this" // KOOL + | "super" // KOOL + | "(" Exp ")" [bracket] + | "++" Exp + | Exp "instanceOf" Id [strict(1)] // KOOL + | "(" Id ")" Exp [strict(2)] // KOOL cast + | "new" Id "(" Exps ")" [strict(2)] // KOOL + | Exp "." Id // KOOL + > Exp "[" Exps "]" [strict] + > Exp "(" Exps ")" [strict(2)] // was strict in SIMPLE + | "-" Exp [strict] + | "sizeOf" "(" Exp ")" [strict] + | "read" "(" ")" + > left: + Exp "*" Exp [strict, left] + | Exp "/" Exp [strict, left] + | Exp "%" Exp [strict, left] + > left: + Exp "+" Exp [strict, left] + | Exp "-" Exp [strict, left] + > non-assoc: + Exp "<" Exp [strict, non-assoc] + | Exp "<=" Exp [strict, non-assoc] + | Exp ">" Exp [strict, non-assoc] + | Exp ">=" Exp [strict, non-assoc] + | Exp "==" Exp [strict, non-assoc] + | Exp "!=" Exp [strict, non-assoc] + > "!" Exp [strict] + > left: + Exp "&&" Exp [strict(1), left] + | Exp "||" Exp [strict(1), left] + > "spawn" Block + > Exp "=" Exp [strict(2), right] + + syntax Ids ::= List{Id,","} + + syntax Exps ::= List{Exp,","} [strict, overload(exps)] + syntax Val + syntax Vals ::= List{Val,","} [overload(exps)] + + syntax Block ::= "{" "}" + | "{" Stmt "}" + + syntax Stmt ::= Block + | Exp ";" [strict] + | "if" "(" Exp ")" Block "else" Block [avoid, strict(1)] + | "if" "(" Exp ")" Block [macro] + | "while" "(" Exp ")" Block + | "for" "(" Stmt Exp ";" Exp ")" Block [macro] + | "return" Exp ";" [strict] + | "return" ";" [macro] + | "print" "(" Exps ")" ";" [strict] + | "try" Block "catch" "(" Id ")" Block + | "throw" Exp ";" [strict] + | "join" Exp ";" [strict] + | "acquire" Exp ";" [strict] + | "release" Exp ";" [strict] + | "rendezvous" Exp ";" [strict] + + syntax Stmt ::= Stmt Stmt [right] +
+

Old desugaring rules, from SIMPLE

+
k
rule if (E) S => if (E) S else {} + rule for(Start Cond; Step) {S} => {Start while (Cond) {S Step;}} + rule var E1::Exp, E2::Exp, Es::Exps; => var E1; var E2, Es; [anywhere] + rule var X::Id = E; => var X; X = E; [anywhere] +
+

New desugaring rule

+
k
rule class C:Id S => class C extends Object S // KOOL + +endmodule +
+

Semantics

+

We first discuss the new configuration of KOOL, which extends that of +SIMPLE. Then we include the semantics of the constructs borrowed from +SIMPLE unchanged; we refrain from discussing those, because they were +already discussed in the K definition of SIMPLE. Then we discuss +changes to SIMPLE's semantics needed for the more general meaning of +the previous SIMPLE constructs (for example for thread spawning, +assignment, etc.). Finally, we discuss in detail the +semantics of the additional KOOL constructs.

+
k
module KOOL-UNTYPED + imports KOOL-UNTYPED-SYNTAX + imports DOMAINS +
+

Configuration

+

KOOL removes one cell and adds two nested cells to the configuration +of SIMPLE. The cell which is removed is the one holding the global +environment, because a KOOL program consists of a set of classes only, +with no global declarations. In fact, since informally speaking each +KOOL class now includes a SIMPLE program, it is safe to say that the +global variables in SIMPLE became class fields in KOOL. Let us now +discuss the new cells that are added to the configuration of SIMPLE.

+
    +
  • +

    The cell crntObj holds data pertaining to the current +object, that is, the object environment in which the code in cell +k executes: crntClass holds the current class (which +can change as methods of the current object are invoked); +envStack holds the stack of environments as a list, +each layer corresponding to one class in the objects' instance class +hierarchy; location, which is optional, holds the location in +the store where the current object is or has to be located (this is +useful both for method closures and for the semantics of object +creation).

    +
  • +
  • +

    The cell classes holds all the declared classes, each +class being held in its own class cell which contains a name +(className), a parent (extends), and the actual +member declarations (declarations).

    +
  • +
+
k
// the syntax declarations below are required because the sorts are + // referenced directly by a production and, because of the way KIL to KORE + // is implemented, the configuration syntax is not available yet + // should simply work once KIL is removed completely + // check other definitions for this hack as well + syntax EnvCell + syntax ControlCell + syntax EnvStackCell + syntax CrntObjCellFragment + + configuration <T color="red"> + <threads color="orange"> + <thread multiplicity="*" type="Set" color="yellow"> + <k color="green"> $PGM:Stmt ~> execute </k> + //<br/> // TODO(KORE): support latex annotations #1799 + <control color="cyan"> + <fstack color="blue"> .List </fstack> + <xstack color="purple"> .List </xstack> + //<br/> // TODO(KORE): support latex annotations #1799 + <crntObj color="Fuchsia"> // KOOL + <crntClass> Object </crntClass> + <envStack> .List </envStack> + <location multiplicity="?"> .K </location> + </crntObj> + </control> + //<br/> // TODO(KORE): support latex annotations #1799 + <env color="violet"> .Map </env> + <holds color="black"> .Map </holds> + <id color="pink"> 0 </id> + </thread> + </threads> + //<br/> // TODO(KORE): support latex annotations #1799 + <store color="white"> .Map </store> + <busy color="cyan">.Set </busy> + <terminated color="red"> .Set </terminated> + <input color="magenta" stream="stdin"> .List </input> + <output color="brown" stream="stdout"> .List </output> + <nextLoc color="gray"> 0 </nextLoc> + //<br/> // TODO(KORE): support latex annotations #1799 + <classes color="Fuchsia"> // KOOL + <classData multiplicity="*" type="Map" color="Fuchsia"> + // the Map has as its key the first child of the cell, + // in this case the className cell. + <className color="Fuchsia"> Main </className> + <baseClass color="Fuchsia"> Object </baseClass> + <declarations color="Fuchsia"> .K </declarations> + </classData> + </classes> + </T> +
+

Unchanged Semantics from untyped SIMPLE

+

The semantics below is taken over from SIMPLE unchanged. +The semantics of function declaration and invocation, including the +use of the special lambda abstraction value, needs to change +in order to account for the fact that methods are now invoked into +their object's environment. The semantics of function return actually +stays unchanged. Also, the semantics of program initialization is +different: now we have to create an instance of the Main +class which also calls the constructor Main(), while in +SIMPLE we only had to invoke the function Main(). +Finally, the semantics of thread spawning needs to change, too: the +parent thread needs to also share its object environment with the +spawned thread (in addition to its local environment, like in SIMPLE). +This is needed in order to be able to spawn method invokations under +dynamic method dispatch; for example, spawn { run(); } +will need to look up the method run() in the newly created +thread, operation which will most likely fail unless the child thread +sees the object environment of the parent thread. Note that the +spawn statement of KOOL is more permissive than the threads +of Java. In fact, the latter can be implemented in terms of our +spawn—see the program threads.kool for a sketch.

+

Below is a subset of the values of SIMPLE, which are also values +of KOOL. We will add other values later in the semantics, such as +object and method closures.

+
k
syntax Val ::= Int | Bool | String + | array(Int,Int) + syntax Exp ::= Val + syntax Exps ::= Vals + syntax KResult ::= Val + syntax KResult ::= Vals +
+

The semantics below are taken verbatim from the untyped SIMPLE +definition.

+
k
syntax KItem ::= "undefined" + + rule <k> var X:Id; => .K ...</k> + <env> Env => Env[X <- L] </env> + <store>... .Map => L |-> undefined ...</store> + <nextLoc> L:Int => L +Int 1 </nextLoc> + + + context var _:Id[HOLE]; + + rule <k> var X:Id[N:Int]; => .K ...</k> + <env> Env => Env[X <- L] </env> + <store>... .Map => L |-> array(L +Int 1, N) + (L +Int 1) ... (L +Int N) |-> undefined ...</store> + <nextLoc> L:Int => L +Int 1 +Int N </nextLoc> + requires N >=Int 0 + + + syntax Id ::= "$1" [token] | "$2" [token] + rule var X:Id[N1:Int, N2:Int, Vs:Vals]; + => var X[N1]; + { + var $1=X; + for(var $2=0; $2 <= N1 - 1; ++$2) { + var X[N2,Vs]; + $1[$2] = X; + } + } + + + rule <k> X:Id => V ...</k> + <env>... X |-> L ...</env> + <store>... L |-> V:Val ...</store> + + + context ++(HOLE => lvalue(HOLE)) + rule <k> ++loc(L) => I +Int 1 ...</k> + <store>... L |-> (I:Int => I +Int 1) ...</store> + + + rule I1 + I2 => I1 +Int I2 + rule Str1 + Str2 => Str1 +String Str2 + rule I1 - I2 => I1 -Int I2 + rule I1 * I2 => I1 *Int I2 + rule I1 / I2 => I1 /Int I2 requires I2 =/=K 0 + rule I1 % I2 => I1 %Int I2 requires I2 =/=K 0 + rule - I => 0 -Int I + rule I1 < I2 => I1 <Int I2 + rule I1 <= I2 => I1 <=Int I2 + rule I1 > I2 => I1 >Int I2 + rule I1 >= I2 => I1 >=Int I2 + + rule V1:Val == V2:Val => V1 ==K V2 + rule V1:Val != V2:Val => V1 =/=K V2 + rule ! T => notBool(T) + rule true && E => E + rule false && _ => false + rule true || _ => true + rule false || E => E + + + rule V:Val[N1:Int, N2:Int, Vs:Vals] => V[N1][N2, Vs] + [anywhere] + + rule array(L,_)[N:Int] => lookup(L +Int N) + [anywhere] + + + rule sizeOf(array(_,N)) => N +
+

The semantics of function application needs to change into dynamic +method dispatch invocation, which is defined shortly. However, +interestingly, the semantics of return stays unchanged.

+
k
rule <k> return(V:Val); ~> _ => V ~> K </k> + <control> + <fstack> ListItem(fstackFrame(Env,K,XS,<crntObj> CO </crntObj>)) => .List ...</fstack> + <xstack> _ => XS </xstack> + <crntObj> _ => CO </crntObj> + </control> + <env> _ => Env </env> + + syntax Val ::= "nothing" + rule return; => return nothing; + + + rule <k> read() => I ...</k> <input> ListItem(I:Int) => .List ...</input> + + + context (HOLE => lvalue(HOLE)) = _ + + rule <k> loc(L) = V:Val => V ...</k> <store>... L |-> (_ => V) ...</store> + + rule {} => .K + rule <k> { S } => S ~> setEnv(Env) ...</k> <env> Env </env> + + + rule S1::Stmt S2::Stmt => S1 ~> S2 + + rule _:Val; => .K + + rule if ( true) S else _ => S + rule if (false) _ else S => S + + rule while (E) S => if (E) {S while(E)S} + + rule <k> print(V:Val, Es => Es); ...</k> <output>... .List => ListItem(V) </output> + rule print(.Vals); => .K + + + syntax KItem ::= xstackFrame(Id,Stmt,K,Map,K) + // TODO(KORE): drop the additional production once parsing issue #1842 is fixed + | (Id,Stmt,K,Map,K) + + syntax KItem ::= "popx" + + rule <k> (try S1 catch(X) {S2} => S1 ~> popx) ~> K </k> + <control> + <xstack> .List => ListItem(xstackFrame(X, S2, K, Env, C)) ...</xstack> + C + </control> + <env> Env </env> + + rule <k> popx => .K ...</k> + <xstack> ListItem(_) => .List ...</xstack> + + rule <k> throw V:Val; ~> _ => { var X = V; S2 } ~> K </k> + <control> + <xstack> ListItem(xstackFrame(X, S2, K, Env, C)) => .List ...</xstack> + (_ => C) + </control> + <env> _ => Env </env> +
+

Thread spawning needs a new semantics, because we want the child +thread to also share the object environment with its parent. The new +semantics of thread spawning will be defined shortly. However, +interestingly, the other concurrency constructs keep their semantics +from SIMPLE unchanged.

+
k
// TODO(KORE): ..Bag should be . throughout this definition #1772 + rule (<thread>... <k>.K</k> <holds>H</holds> <id>T</id> ...</thread> => .Bag) + /* + rule (<thread>... <k>.</k> <holds>H</holds> <id>T</id> ...</thread> => .) + */ + <busy> Busy => Busy -Set keys(H) </busy> + <terminated>... .Set => SetItem(T) ...</terminated> + + rule <k> join T:Int; => .K ...</k> + <terminated>... SetItem(T) ...</terminated> + + rule <k> acquire V:Val; => .K ...</k> + <holds>... .Map => V |-> 0 ...</holds> + <busy> Busy (.Set => SetItem(V)) </busy> + requires (notBool(V in Busy:Set)) + + rule <k> acquire V; => .K ...</k> + <holds>... V:Val |-> (N:Int => N +Int 1) ...</holds> + + rule <k> release V:Val; => .K ...</k> + <holds>... V |-> (N => N:Int -Int 1) ...</holds> + requires N >Int 0 + + rule <k> release V; => .K ...</k> <holds>... V:Val |-> 0 => .Map ...</holds> + <busy>... SetItem(V) => .Set ...</busy> + + rule <k> rendezvous V:Val; => .K ...</k> + <k> rendezvous V; => .K ...</k> +
+

Unchanged auxiliary operations from untyped SIMPLE

+
k
syntax Stmt ::= mkDecls(Ids,Vals) [function] + rule mkDecls((X:Id, Xs:Ids), (V:Val, Vs:Vals)) => var X=V; mkDecls(Xs,Vs) + rule mkDecls(.Ids,.Vals) => {} + + // TODO(KORE): clarify sort inferences #1803 + syntax Exp ::= lookup(Int) + /* + syntax KItem ::= lookup(Int) + */ + rule <k> lookup(L) => V ...</k> <store>... L |-> V:Val ...</store> + + syntax KItem ::= setEnv(Map) + rule <k> setEnv(Env) => .K ...</k> <env> _ => Env </env> + rule (setEnv(_) => .K) ~> setEnv(_) + // TODO: How can we make sure that the second rule above applies before the first one? + // Probably we'll deal with this using strategies, eventually. + + syntax Exp ::= lvalue(K) + syntax Val ::= loc(Int) + + rule <k> lvalue(X:Id => loc(L)) ...</k> <env>... X |-> L:Int ...</env> + + context lvalue(_::Exp[HOLE::Exps]) + context lvalue(HOLE::Exp[_::Exps]) + + rule lvalue(lookup(L:Int) => loc(L)) + + + syntax Map ::= Int "..." Int "|->" K + [function] + rule N...M |-> _ => .Map requires N >Int M + rule N...M |-> K => N |-> K (N +Int 1)...M |-> K requires N <=Int M +
+

Changes to the existing untyped SIMPLE semantics

+

When we extend a language, sometimes we need to do more than just add +new language constructs and semantics for them. Sometimes we want to +also extend the semantics of existing language constructs, in order to +get more from them.

+

Program initialization

+

In SIMPLE, once all the global declarations were processed, the +function main() was invoked. In KOOL, the global +declarations are classes, and their specific semantics is given +shortly; essentially, they are pre-processed one by one and added +into the class cell structure in the configuration. +Once all the classes are processed, the computation item +execute, which was placed right after the program in the +initial configuration, is reached. In SIMPLE, the program was +initialized by calling the method main(). In KOOL, the +program is initialized by creating an object instance of class +Main. This will also implicitly call the method +Main() (the Main class constructor). The emptiness +of the env cell below is just a sanity check, to make sure +that the user has not declared anything but classes at the top level +of the program.

+
k
syntax KItem ::= "execute" + rule <k> execute => new Main(.Exps); </k> <env> .Map </env> +
+

The semantics of new (defined below) requires the +execution of all the class' declarations (and also of its +superclasses').

+

Object and method closures

+

Before we can define the semantics of method application (previously +called function application in SIMPLE), we need to add two more values +to the language, namely object and method closures:

+
k
syntax Val ::= objectClosure(Id, List) + | methodClosure(Id,Int,Ids,Stmt) +
+

An object value consists of an objectClosure-wrapped bag +containing the current class of the object and the environment stack +of the object. The current class of an object will always be one of +the classes mapped to an environment in the environment stack of the +object. A method closure encapsulates the method's parameters and +code (last two arguments), as well as the object context in which the +method code should execute. This object context includes the current +class of the object (the first argument of methodClosure) and +the object environment stack (located in the object stored at the +location specified as the second argument of methodClosure).

+

Method application

+

KOOL has a complex mechanism to invoke methods, because it allows both +dynamic method dispatch and methods as first-class-citizen values (the +latter making it a higher-order language). The invocation mechanism +will be defined later. What is sufficient to know for now is that +the two arguments of the application construct eventually reduce to +values, the first being a method closure and the latter a list of +values. The semantics of the method closure application is then as +expected: the local environment and control are stacked, then we +switch to method closure's class and object environment and execute +the method body. The mkDecls construct is the one that came +with the unchanged semantics of SIMPLE above.

+
k
syntax KItem ::= fstackFrame(Map,K,List,K) + // TODO(KORE): drop the additional production once parsing issue #1842 is fixed + | (Map,K,K) + + rule <k> methodClosure(Class,OL,Xs,S)(Vs:Vals) ~> K + => mkDecls(Xs,Vs) S return; </k> + <env> Env => .Map </env> + <store>... OL |-> objectClosure(_, EnvStack)...</store> + //<br/> // TODO(KORE): support latex annotations #1799 + <control> + <xstack> XS </xstack> + <fstack> .List => ListItem(fstackFrame(Env, K, XS, <crntObj> Obj' </crntObj>)) + ...</fstack> + <crntObj> Obj' => <crntClass> Class </crntClass> <envStack> EnvStack </envStack> </crntObj> + </control> +
+

Spawn

+

We want to extend the semantics of spawn to also share the +current object environment with the child thread, in addition to the +current environment. This extension will allow us to also use method +invocations in the spawned statements, which will be thus looked up as +expected, using dynamic method dispatch. This lookup operation would +fail if the child thread did not have access to its parent's object +environment.

+
k
rule <thread>... + <k> spawn S => !T:Int ...</k> + <env> Env </env> + <crntObj> Obj </crntObj> + ...</thread> + (.Bag => <thread>... + <k> S </k> + <env> Env </env> + <id> !T </id> + <crntObj> Obj </crntObj> + ...</thread>) +
+

Semantics of the new KOOL constructs

+

Class declaration

+

Initially, the classes forming the program are moved into their +corresponding cells:

+
k
rule <k> class Class1 extends Class2 { S } => .K ...</k> + <classes>... (.Bag => <classData> + <className> Class1 </className> + <baseClass> Class2 </baseClass> + <declarations> S </declarations> + </classData>) + ...</classes> +
+

Method declaration

+

Like in SIMPLE, method names are added to the environment and bound +to their code. However, unlike in SIMPLE where each function was +executed in the same environment, namely the program global +environment, a method in KOOL needs to be executed into its object's +environment. Thus, methods evaluate to closures, which encapsulate +their object's context (i.e., the current class and environment stack +of the object) in addition to method's parameters and body. This +approach to bind method names to method closures in the environment +will also allow objects to pass their methods to other objects, to +dynamically change their methods by assigning them other method +closures, and even to allow all these to be done from other objects. +This gives the KOOL programmer a lot of power; one should use this +power wisely, though, because programs can become easily hard to +understand and reason about if one overuses these features.

+
k
rule <k> method F:Id(Xs:Ids) S => .K ...</k> + <crntClass> Class:Id </crntClass> + <location> OL:Int </location> + <env> Env => Env[F <- L] </env> + <store>... .Map => L |-> methodClosure(Class,OL,Xs,S) ...</store> + <nextLoc> L => L +Int 1 </nextLoc> +
+

New

+

The semantics of new consists of two actions: memory +allocation for the new object and execution of the corresponding +constructor. Then the created object is returned as the result of the +new operation; the value returned by the constructor, if any, +is discarded. The current environment and object are stored onto the +stack and recovered after new (according to the semantics of +return borrowed from SIMPLE, when the statement +return this; in the rule below is reached and evaluated), +because the object creation part of new will destroy them. +The rule below also initializes the object creation process by +emptying the local environment and the current object, and allocating +a location in the store where the created object will be eventually +stored (this is what the storeObj task after the object +creation task in the rule below will do—its rule is defined +shortly). The location where the object will be stored is also made +available in the crntObj cell, so that method closures can +refer to it (see rule above).

+
k
syntax KItem ::= "envStackFrame" "(" Id "," Map ")" + + rule <k> new Class:Id(Vs:Vals) ~> K + => create(Class) ~> storeObj ~> Class(Vs); return this; </k> + <env> Env => .Map </env> + <nextLoc> L:Int => L +Int 1 </nextLoc> + //<br/> // TODO(KORE): support latex annotations #1799 + <control> <xstack> XS </xstack> + <crntObj> Obj + => <crntClass> Object </crntClass> + <envStack> ListItem(envStackFrame(Object, .Map)) </envStack> + <location> L </location> + </crntObj> + <fstack> .List => ListItem(fstackFrame(Env, K, XS, <crntObj> Obj </crntObj>)) ...</fstack> + </control> +
+

The creation of a new object (the memory allocation part only) is +a recursive process, requiring to first create an object for the +superclass. A memory object representation is a layered structure: +for each class on the path from the instance class to the root of the +hierarchy there is a layer including the memory allocated for the +members (both fields and methods) of that class.

+
k
syntax KItem ::= create(Id) + + rule <k> create(Class:Id) + => create(Class1) ~> setCrntClass(Class) ~> S ~> addEnvLayer ...</k> + <className> Class </className> + <baseClass> Class1:Id </baseClass> + <declarations> S </declarations> + + rule <k> create(Object) => .K ...</k> +
+

The next operation sets the current class of the current object. +This is necessary to be done at each layer, because the current class +of the object is enclosed as part of the method closures (see the +semantics of method declarations above).

+
k
syntax KItem ::= setCrntClass(Id) + + rule <k> setCrntClass(C) => .K ...</k> + <crntClass> _ => C </crntClass> +
+

The next operation adds a new tagged environment layer to the +current object and gets ready for the next layer by clearing the +environment (note that create expects the environment to be +empty).

+
k
syntax KItem ::= "addEnvLayer" + + rule <k> addEnvLayer => .K ...</k> + <env> Env => .Map </env> + <crntClass> Class:Id </crntClass> + <envStack> .List => ListItem(envStackFrame(Class, Env)) ...</envStack> +
+

The following operation stores the created object at the location +reserved by new. Note that the location reserved by +new was temporarily stored in the crntObj cell +precisely for this purpose. Now that the newly created object is +stored at its location and that all method closures are aware of it, +the location is unnecessary and thus we delete it from the +crntObj cell.

+
k
syntax KItem ::= "storeObj" + + rule <k> storeObj => .K ...</k> + <crntObj> <crntClass> CC </crntClass> <envStack> ES </envStack> (<location> L:Int </location> => .Bag) </crntObj> + <store>... .Map => L |-> objectClosure(CC, ES) ...</store> +
+

Self reference

+

The semantics of this is straightforward: evaluate to the +current object.

+
k
rule <k> this => objectClosure(CC, ES) ...</k> + <crntObj> <crntClass> CC </crntClass> <envStack> ES </envStack> </crntObj> +
+

Object member access

+

We can access an object member (field or method) either explicitly, +using the construct e.x, or implicitly, using only the member +name x directly. The borrowed semantics of SIMPLE will +already lookup a sole name in the local environment. The first rule +below reduces implicit member access to explicit access when the name +cannot be found in the local environment. There are two cases to +analyze for explicit object member access, depending upon whether the +object is a proper object or it is just a redirection to the parent +class via the construct super. In the first case, we +evaluate the object expression and lookup the member starting with the +current class (static scoping). Note the use of the conditional +evaluation context. In the second case, we just lookup the member +starting with the superclass of the current class. In both cases, +the lookupMember task eventually yields a lookup(L) +task for some appropriate location L, which will be further +solved with the corresponding rule borrowed from SIMPLE. Note that the +current object is not altered by super, so future method +invocations see the entire object, as needed for dynamic method dispatch.

+
k
rule <k> X:Id => this . X ...</k> <env> Env:Map </env> + requires notBool(X in keys(Env)) + + context HOLE._::Id requires (HOLE =/=K super) + +// TODO: explain how Assoc matching has been replaced with two rules here. +// Maybe also improve it a bit. + +/* rule objectClosure(<crntClass> Class:Id </crntClass> + <envStack>... envStackFrame(Class,EnvC) EStack </envStack>) + . X:Id + => lookupMember(envStackFrame(Class,EnvC) EStack, X) */ + + rule objectClosure(Class:Id, ListItem(envStackFrame(Class,Env)) EStack) + . X:Id + => lookupMember(ListItem(envStackFrame(Class,Env)) EStack, X) + rule objectClosure(Class:Id, (ListItem(envStackFrame(Class':Id,_)) => .List) _) + . _X:Id + requires Class =/=K Class' + +/* rule <k> super . X => lookupMember(EStack, X) ...</k> + <crntClass> Class </crntClass> + <envStack>... envStackFrame(Class,EnvC) EStack </envStack> */ + rule <k> super . X => lookupMember(EStack, X) ...</k> + <crntClass> Class:Id </crntClass> + <envStack> ListItem(envStackFrame(Class,_)) EStack </envStack> + rule <k> super . _X ...</k> + <crntClass> Class </crntClass> + <envStack> ListItem(envStackFrame(Class':Id,_)) => .List ...</envStack> + requires Class =/=K Class' +
+

Method invocation

+

Unlike in SIMPLE, in KOOL application was declared strict only in its +second argument. That is because we want to ensure dynamic method +dispatch when the first argument is a method access. As a +consequence, we need to consider all the cases of interest for the +first argument and to explicitly say what to do in each case. In all +cases except for method access in a proper object (i.e., not +super), we want the same behavior for the first argument as +if it was not in a method invocation position. When it is a member +access (the third rule below), we look it up starting with the +instance class of the corresponding object. This ensures dynamic +dispatch for methods; it actually dynamically dispatches field +accesses, too, which is correct in KOOL, because one can assign method +closures to fields and the field appeared in a method invocation +context. The last context declaration below says that method +applications or array accesses are also allowed as first argument to +applications; that is because methods are allowed to return methods +and arrays are allowed to hold methods in KOOL, since it is +higher-order. If that is the case, then we want to evaluate the +method call or the array access.

+
k
rule <k> (X:Id => V)(_:Exps) ...</k> + <env>... X |-> L ...</env> + <store>... L |-> V:Val ...</store> + + rule <k> (X:Id => this . X)(_:Exps) ...</k> + <env> Env </env> + requires notBool(X in keys(Env)) + + context HOLE._::Id(_) requires HOLE =/=K super + + rule (objectClosure(_, EStack) . X + => lookupMember(EStack, X:Id))(_:Exps) + +/* rule <k> (super . X + => lookupMember(EStack,X))(_:Exps)...</k> + <crntClass> Class </crntClass> + <envStack>... envStackFrame(Class,_) EStack </envStack> */ + rule <k> (super . X + => lookupMember(EStack,X))(_:Exps)...</k> + <crntClass> Class </crntClass> + <envStack> ListItem(envStackFrame(Class,_)) EStack </envStack> + rule <k> (super . _X)(_:Exps) ...</k> + <crntClass> Class </crntClass> + <envStack> ListItem(envStackFrame(Class':Id,_)) => .List ...</envStack> + requires Class =/=K Class' + + // TODO(KORE): fix getKLabel #1801 + rule (A:Exp(B:Exps))(C:Exps) => A(B) ~> #freezerFunCall(C) + rule (A:Exp[B:Exps])(C:Exps) => A[B] ~> #freezerFunCall(C) + rule V:Val ~> #freezerFunCall(C:Exps) => V(C) + syntax KItem ::= "#freezerFunCall" "(" K ")" + /* + context HOLE(_:Exps) + when getKLabel(HOLE) ==K #klabel(`_(_)`) orBool getKLabel(HOLE) ==K #klabel(`_[_]`) + */ +
+

Eventually, each of the rules above produces a lookup(L) +task as a replacement for the method. When that happens, we just +lookup the value at location L:

+
k
rule <k> (lookup(L) => V)(_:Exps) ...</k> <store>... L |-> V:Val ...</store> +
+

The value V looked up above is expected to be a method closure, +in which case the semantics of method application given above will +apply. Otherwise, the execution will get stuck.

+

Instance Of

+

It searches the object environment for a layer corresponding to the +desired class. It returns true iff it can find the class, +otherwise it returns false; it only gets stuck when its first +argument does not evaluate to an object.

+
k
rule objectClosure(_, ListItem(envStackFrame(C,_)) _) + instanceOf C => true + + rule objectClosure(_, (ListItem(envStackFrame(C,_)) => .List) _) + instanceOf C' requires C =/=K C' +//TODO: remove the sort cast ::Id of C above, when sort inference bug fixed + + rule objectClosure(_, .List) instanceOf _ => false +
+

Cast

+

In untyped KOOL, we prefer to not check the validity of casting. In +other words, any cast is allowed on any object, simply changing the +current class of the object to the desired class. The execution will +get stuck later if one attempts to access a field which is not +available. Moreover, the execution may complete successfully even +in the presence of invalid casts, provided that each accessed member +during the current execution is, or happens to be, available.

+
k
rule (C) objectClosure(_ , EnvStack) => objectClosure(C ,EnvStack) +
+

KOOL-specific auxiliary declarations and operations

+

Here we define all the auxiliary constructs used in the above +KOOL-specific semantics (those used in the SIMPLE fragment +have already been defined in a corresponding section above).

+

Objects as lvalues

+

The current machinery borrowed with the semantics of SIMPLE allows us +to enrich the set of lvalues, this way allowing new means to assign +values to locations. In KOOL, we want object member names to be +lvalues, so that we can assign values to them using the already +existing machinery. The first rule below ensures that the object is +always explicit, the evaluation context enforces the object to be +evaluated, and finally the second rule initiates the lookup for the +member's location based on the current class of the object.

+
k
rule <k> lvalue(X:Id => this . X) ...</k> <env> Env </env> + requires notBool(X in keys(Env)) + + context lvalue((HOLE . _)::Exp) + +/* rule lvalue(objectClosure(<crntClass> C </crntClass> + <envStack>... envStackFrame(C,EnvC) EStack </envStack>) + . X + => lookupMember(<envStack> envStackFrame(C,EnvC) EStack </envStack>, + X)) */ + rule lvalue(objectClosure(Class, ListItem(envStackFrame(Class,Env)) EStack) + . X + => lookupMember(ListItem(envStackFrame(Class,Env)) EStack, + X)) + rule lvalue(objectClosure(Class, (ListItem(envStackFrame(Class':Id,_)) => .List) _) + . _X) + requires Class =/=K Class' +
+

Lookup member

+

It searches for the given member in the given environment stack, +starting with the most concrete class and going up in the hierarchy.

+
k
// TODO(KORE): clarify sort inferences #1803 + syntax Exp ::= lookupMember(List, Id) [function] + /* + syntax KItem ::= lookupMember(EnvStackCell,Id) [function] + */ + +// rule lookupMember(<envStack> envStackFrame(_, <env>... X|->L ...</env>) ...</envStack>, X) +// => lookup(L) + rule lookupMember(ListItem(envStackFrame(_, X|->L _)) _, X) + => lookup(L) + +// rule lookupMember(<envStack> envStackFrame(_, <env> Env </env>) => .List ...</envStack>, X) +// when notBool(X in keys(Env)) + rule lookupMember(ListItem(envStackFrame(_, Env)) Rest, X) => + lookupMember(Rest, X) + requires notBool(X in keys(Env)) +//TODO: beautify the above + +endmodule +
+

Go to Lesson 2, KOOL typed dynamic.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/2_languages/2_kool/2_typed/1_dynamic/kool-typed-dynamic/index.html b/k-distribution/pl-tutorial/2_languages/2_kool/2_typed/1_dynamic/kool-typed-dynamic/index.html new file mode 100644 index 00000000000..5223cb2491c --- /dev/null +++ b/k-distribution/pl-tutorial/2_languages/2_kool/2_typed/1_dynamic/kool-typed-dynamic/index.html @@ -0,0 +1,1390 @@ + + + + + + + + + + + + + + +KOOL — Typed — Dynamic | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

KOOL — Typed — Dynamic

+

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+

This is the K dynamic semantics of the typed KOOL language. It is +very similar to the semantics of the untyped KOOL, the difference +being that we now check the typing policy dynamically. Since we have +to now declare the types of variables and methods, we adopt a syntax +for those which is close to Java. Like in the semantics of +untyped KOOL, where we borrowed almost all the semantics of untyped +SIMPLE, we are going to also borrow much of the semantics of +dynamically typed SIMPLE here. We will highlight the differences +between the dynamically typed and the untyped KOOL as we proceed with +the semantics. In general, the type policy of the typed KOOL language +is similar to that of Java. You may find it useful to also read +the discussion in the preamble of the static semantics of typed KOOL +before proceeding.

+
k
module KOOL-TYPED-DYNAMIC-SYNTAX + imports DOMAINS-SYNTAX +
+

Syntax

+

Like for the untyped KOOL language, the syntax of typed KOOL extends +that of typed SIMPLE with object-oriented constructs. +The syntax below was produced by copying and modifying/extending the +syntax of dynamically typed SIMPLE. In fact, the only change we made +to the existing syntax of dynamically typed SIMPLE was to change the +strictness of the application construct like in untyped KOOL, from +strict to strict(2) (because application is not +strict in the first argument anymore due to dynamic method dispatch). +The KOOL-specific syntactic extensions are identical to those in +untyped KOOL.

+
k
syntax Id ::= "Object" [token] | "Main" [token] +
+

Types

+
k
syntax Type ::= "void" | "int" | "bool" | "string" + | Id // KOOL class + | Type "[" "]" + | "(" Type ")" [bracket] + > Types "->" Type + // TODO(KORE): drop klabel once issues #1913 are fixed + syntax Types ::= List{Type,","} [symbol(_,_::Types)] + /* + syntax Types ::= List{Type,","} + */ +
+

Declarations

+
k
syntax Param ::= Type Id + syntax Params ::= List{Param,","} + + syntax Stmt ::= Type Exps ";" [avoid] + | Type Id "(" Params ")" Block // stays like in typed SIMPLE + | "class" Id Block // KOOL + | "class" Id "extends" Id Block // KOOL +
+

Expressions

+
k
syntax Exp ::= Int | Bool | String | Id + | "this" // KOOL + | "super" // KOOL + | "(" Exp ")" [bracket] + | "++" Exp + | Exp "instanceOf" Id [strict(1)] // KOOL + | "(" Id ")" Exp [strict(2)] // KOOL cast + | "new" Id "(" Exps ")" [strict(2)] // KOOL + | Exp "." Id // KOOL + > Exp "[" Exps "]" [strict] + > Exp "(" Exps ")" [strict(2)] // was strict in SIMPLE + | "-" Exp [strict] + | "sizeOf" "(" Exp ")" [strict] + | "read" "(" ")" + > left: + Exp "*" Exp [strict, left] + | Exp "/" Exp [strict, left] + | Exp "%" Exp [strict, left] + > left: + Exp "+" Exp [strict, left] + | Exp "-" Exp [strict, left] + > non-assoc: + Exp "<" Exp [strict, non-assoc] + | Exp "<=" Exp [strict, non-assoc] + | Exp ">" Exp [strict, non-assoc] + | Exp ">=" Exp [strict, non-assoc] + | Exp "==" Exp [strict, non-assoc] + | Exp "!=" Exp [strict, non-assoc] + > "!" Exp [strict] + > left: + Exp "&&" Exp [strict(1), left] + | Exp "||" Exp [strict(1), left] + > "spawn" Block + > Exp "=" Exp [strict(2), right] + + syntax Exps ::= List{Exp,","} [strict, overload(exps)] + syntax Val + syntax Vals ::= List{Val,","} [overload(exps)] +
+

Statements

+
k
syntax Block ::= "{" "}" + | "{" Stmt "}" + + syntax Stmt ::= Block + | Exp ";" [strict] + | "if" "(" Exp ")" Block "else" Block [avoid, strict(1)] + | "if" "(" Exp ")" Block [macro] + | "while" "(" Exp ")" Block + | "for" "(" Stmt Exp ";" Exp ")" Block [macro] + | "print" "(" Exps ")" ";" [strict] + | "return" Exp ";" [strict] + | "return" ";" + | "try" Block "catch" "(" Param ")" Block + | "throw" Exp ";" [strict] + | "join" Exp ";" [strict] + | "acquire" Exp ";" [strict] + | "release" Exp ";" [strict] + | "rendezvous" Exp ";" [strict] + + syntax Stmt ::= Stmt Stmt [right] +
+

Desugaring macros

+
k
rule if (E) S => if (E) S else {} + rule for(Start Cond; Step) {S::Stmt} => {Start while(Cond){S Step;}} + rule T::Type E1::Exp, E2::Exp, Es::Exps; => T E1; T E2, Es; [anywhere] + rule T::Type X::Id = E; => T X; X = E; [anywhere] + + rule class C:Id S => class C extends Object S // KOOL + +endmodule +
+

Semantics

+

We first discuss the new configuration, then we include the semantics of +the constructs borrowed from SIMPLE which stay unchanged, then those +whose semantics had to change, and finally the semantics of the +KOOL-specific constructs.

+
k
module KOOL-TYPED-DYNAMIC + imports KOOL-TYPED-DYNAMIC-SYNTAX + imports DOMAINS +
+

Configuration

+

The configuration of dynamically typed KOOL is almost identical to +that of its untyped variant. The only difference is the cell +return, inside the control cell, whose role is to +hold the expected return type of the invoked method. That is because +we want to dynamically check that the value that a method returns has +the expected type.

+
k
// the syntax declarations below are required because the sorts are + // referenced directly by a production and, because of the way KIL to KORE + // is implemented, the configuration syntax is not available yet + // should simply work once KIL is removed completely + // check other definitions for this hack as well + syntax EnvCell + syntax ControlCellFragment + syntax EnvStackCell + syntax CrntObjCellFragment + + configuration <T color="red"> + <threads color="orange"> + <thread multiplicity="*" type="Set" color="yellow"> + <k color="green"> ($PGM:Stmt ~> execute) </k> + //<br/> // TODO(KORE): support latex annotations #1799 + <control color="cyan"> + <fstack color="blue"> .List </fstack> + <xstack color="purple"> .List </xstack> + <returnType color="LimeGreen"> void </returnType> // KOOL + //<br/> // TODO(KORE): support latex annotations #1799 + <crntObj color="Fuchsia"> // KOOL + <crntClass> Object </crntClass> + <envStack> .List </envStack> + <location multiplicity="?"> .K </location> + </crntObj> + </control> + //<br/> // TODO(KORE): support latex annotations #1799 + <env color="violet"> .Map </env> + <holds color="black"> .Map </holds> + <id color="pink"> 0 </id> + </thread> + </threads> + //<br/> // TODO(KORE): support latex annotations #1799 + <store color="white"> .Map </store> + <busy color="cyan">.Set </busy> + <terminated color="red"> .Set </terminated> + <input color="magenta" stream="stdin"> .List </input> + <output color="brown" stream="stdout"> .List </output> + <nextLoc color="gray"> 0 </nextLoc> + //<br/> // TODO(KORE): support latex annotations #1799 + <classes color="Fuchsia"> // KOOL + <classData multiplicity="*" type="Map" color="Fuchsia"> + <className color="Fuchsia"> Main </className> + <baseClass color="Fuchsia"> Object </baseClass> + <declarations color="Fuchsia"> .K </declarations> + </classData> + </classes> + </T> +
+

Unchanged semantics from dynamically typed SIMPLE

+

The semantics below is taken over from dynamically typed SIMPLE +unchanged. Like for untyped KOOL, the semantics of function/method +declaration and invocation, and of program initialization needs to +change. Moreover, due to subtyping, the semantics of several imported +SIMPLE constructs can be made more general, such as that of the +return statement, that of the assignment, and that of the exceptions. +We removed all these from the imported semantics of SIMPLE below and +gave their modified semantics right after, together with the extended +semantics of thread spawning (which is identical to that of untyped +KOOL).

+
k
syntax Val ::= Int | Bool | String + | array(Type,Int,Int) + syntax Exp ::= Val + syntax Exps ::= Vals + syntax KResult ::= Val + syntax KResult ::= Vals + + + syntax KItem ::= undefined(Type) + + rule <k> T:Type X:Id; => .K ...</k> + <env> Env => Env[X <- L] </env> + <store>... .Map => L |-> undefined(T) ...</store> + <nextLoc> L:Int => L +Int 1 </nextLoc> + + + rule <k> T:Type X:Id[N:Int]; => .K ...</k> + <env> Env => Env[X <- L] </env> + <store>... .Map => L |-> array(T, L +Int 1, N) + (L +Int 1)...(L +Int N) |-> undefined(T) ...</store> + <nextLoc> L:Int => L +Int 1 +Int N </nextLoc> + requires N >=Int 0 + + context _:Type _::Exp[HOLE::Exps]; + + + syntax Id ::= "$1" [token] | "$2" [token] + rule T:Type X:Id[N1:Int, N2:Int, Vs:Vals]; + => T[]<Vs> X[N1]; + { + T[][]<Vs> $1=X; + for(int $2=0; $2 <= N1 - 1; ++$2) { + T X[N2,Vs]; + $1[$2] = X; + } + } + + + rule <k> X:Id => V ...</k> + <env>... X |-> L ...</env> + <store>... L |-> V:Val ...</store> + + + context ++(HOLE => lvalue(HOLE)) + rule <k> ++loc(L) => I +Int 1 ...</k> + <store>... L |-> (I:Int => I +Int 1) ...</store> + + + rule I1 + I2 => I1 +Int I2 + rule Str1 + Str2 => Str1 +String Str2 + rule I1 - I2 => I1 -Int I2 + rule I1 * I2 => I1 *Int I2 + rule I1 / I2 => I1 /Int I2 requires I2 =/=K 0 + rule I1 % I2 => I1 %Int I2 requires I2 =/=K 0 + rule - I => 0 -Int I + rule I1 < I2 => I1 <Int I2 + rule I1 <= I2 => I1 <=Int I2 + rule I1 > I2 => I1 >Int I2 + rule I1 >= I2 => I1 >=Int I2 + rule V1:Val == V2:Val => V1 ==K V2 + rule V1:Val != V2:Val => V1 =/=K V2 + rule ! T => notBool(T) + rule true && E => E + rule false && _ => false + rule true || _ => true + rule false || E => E + + + rule V:Val[N1:Int, N2:Int, Vs:Vals] => V[N1][N2, Vs] + [anywhere] + + rule array(_:Type, L:Int, M:Int)[N:Int] => lookup(L +Int N) + requires N >=Int 0 andBool N <Int M [anywhere] + + rule sizeOf(array(_,_,N)) => N + + + syntax Val ::= nothing(Type) + rule <k> return; => return nothing(T); ...</k> <returnType> T </returnType> + + + rule <k> read() => I ...</k> <input> ListItem(I:Int) => .List ...</input> + + + context (HOLE => lvalue(HOLE)) = _ + + + rule {} => .K + rule <k> { S } => S ~> setEnv(Env) ...</k> <env> Env </env> + + + rule S1:Stmt S2:Stmt => S1 ~> S2 + + + rule _:Val; => .K + + + rule if ( true) S else _ => S + rule if (false) _ else S => S + + + rule while (E) S => if (E) {S while(E)S} + + + rule <k> print(V:Val, Es => Es); ...</k> <output>... .List => ListItem(V) </output> + requires typeOf(V) ==K int orBool typeOf(V) ==K string + rule print(.Vals); => .K + + + rule (<thread>... <k>.K</k> <holds>H</holds> <id>T</id> ...</thread> => .Bag) + <busy> Busy => Busy -Set keys(H) </busy> + <terminated>... .Set => SetItem(T) ...</terminated> + + rule <k> join T:Int; => .K ...</k> + <terminated>... SetItem(T) ...</terminated> + + rule <k> acquire V:Val; => .K ...</k> + <holds>... .Map => V |-> 0 ...</holds> + <busy> Busy (.Set => SetItem(V)) </busy> + requires (notBool(V in Busy:Set)) + + rule <k> acquire V; => .K ...</k> + <holds>... V:Val |-> (N:Int => N +Int 1) ...</holds> + + rule <k> release V:Val; => .K ...</k> + <holds>... V |-> (N => N:Int -Int 1) ...</holds> + requires N >Int 0 + + rule <k> release V; => .K ...</k> <holds>... V:Val |-> 0 => .Map ...</holds> + <busy>... SetItem(V) => .Set ...</busy> + + rule <k> rendezvous V:Val; => .K ...</k> + <k> rendezvous V; => .K ...</k> +
+

Unchanged auxiliary operations from dynamically typed SIMPLE

+
k
syntax Stmt ::= mkDecls(Params,Vals) [function] + rule mkDecls((T:Type X:Id, Ps:Params), (V:Val, Vs:Vals)) + => T X=V; mkDecls(Ps,Vs) + rule mkDecls(.Params,.Vals) => {} + + syntax Exp ::= lookup(Int) + rule <k> lookup(L) => V ...</k> <store>... L |-> V:Val ...</store> + + syntax KItem ::= setEnv(Map) + rule <k> setEnv(Env) => .K ...</k> <env> _ => Env </env> + rule (setEnv(_) => .K) ~> setEnv(_) + + syntax Exp ::= lvalue(K) + syntax Val ::= loc(Int) + rule <k> lvalue(X:Id => loc(L)) ...</k> <env>... X |-> L:Int ...</env> + + context lvalue(_::Exp[HOLE::Exps]) + context lvalue(HOLE::Exp[_::Exps]) + + rule lvalue(lookup(L:Int) => loc(L)) + + syntax Type ::= Type "<" Vals ">" [function] + rule T:Type<_,Vs:Vals> => T[]<Vs> + rule T:Type<.Vals> => T + + syntax Map ::= Int "..." Int "|->" K [function] + rule N...M |-> _ => .Map requires N >Int M + rule N...M |-> K => N |-> K (N +Int 1)...M |-> K requires N <=Int M + + syntax Type ::= typeOf(K) [function] + rule typeOf(_:Int) => int + rule typeOf(_:Bool) => bool + rule typeOf(_:String) => string + rule typeOf(array(T,_,_)) => (T[]) + rule typeOf(undefined(T)) => T + rule typeOf(nothing(T)) => T + + syntax Types ::= getTypes(Params) [function] + rule getTypes(T:Type _:Id) => T, .Types + rule getTypes(T:Type _:Id, P, Ps) => T, getTypes(P,Ps) + rule getTypes(.Params) => void, .Types +
+

Changes to the existing dynamically typed SIMPLE semantics

+

We extend/change the semantics of several SIMPLE constructs in order +to take advantage of the richer KOOL semantic infrastructure and thus +get more from the existing SIMPLE constructs.

+

Program initialization

+

Like in untyped KOOL.

+
k
syntax KItem ::= "execute" + rule <k> execute => new Main(.Exps); </k> <env> .Map </env> +
+

Method application

+

The only change to untyped KOOL's values is that method closures are +now typed (their first argument holds their type):

+
k
syntax Val ::= objectClosure(Id,List) + | methodClosure(Type,Id,Int,Params,Stmt) +
+

The type held by a method clossure will be the entire type of the +method, not only its result type like the lambda-closure of typed +SIMPLE. The reason for this change comes from the the need to +dynamically upcast values when passed to contexts where values of +superclass types are expected; since we want method closures to be +first-class-citizen values in our language, we have to be able to +dynamically upcast them, and in order to do that elegantly it is +convenient to store the entire ``current type'' of the method closure +instead of just its result type. Note that this was unnecessary in +the semantics of the dynamically typed SIMPLE language.

+

Method closure application needs to also set a new return type in +the return cell, like in dynamically typed SIMPLE, in order +for the values returned by its body to be checked against the return +type of the method. To do this correctly, we also need to stack the +current status of the return cell and then pop it when the +method returns. We have to do the same with the current object +environment, so we group them together in the stack frame.

+
k
syntax KItem ::= fstackFrame(Map, K, List, Type, K) + + rule <k> methodClosure(_->T,Class,OL,Ps,S)(Vs:Vals) ~> K + => mkDecls(Ps,Vs) S return; </k> + <env> Env => .Map </env> + <store>... OL |-> objectClosure(_, EStack)...</store> + //<br/> // TODO(KORE): support latex annotations #1799 + <control> + <fstack> .List => ListItem(fstackFrame(Env, K, XS, T', <crntObj> Obj' </crntObj>)) ...</fstack> + <xstack> XS </xstack> + <returnType> T' => T </returnType> + <crntObj> Obj' => <crntClass> Class </crntClass> <envStack> EStack </envStack> </crntObj> + </control> +
+

At method return, we have to check that the type of the returned +value is a subtype of the expected return type. Moreover, if that is +the case, then we also upcast the returned value to one of the +expected type. The computation item unsafeCast(V,T) changes +the typeof V to T without any additional checks; however, it only +does it when V is an object or a method, otherwise it returns V +unchanged.

+
k
rule <k> return V:Val; ~> _ + => subtype(typeOf(V), T) ~> true? ~> unsafeCast(V, T) ~> K + </k> + <control> + <fstack> ListItem(fstackFrame(Env, K, XS, RT, <crntObj> CO </crntObj>)) => .List ...</fstack> + <xstack> _ => XS </xstack> + <returnType> T:Type => RT </returnType> + <crntObj> _ => CO </crntObj> + </control> + <env> _ => Env </env> +
+

Assignment

+

Typed KOOL allows to assign subtype instance values to supertype +lvalues. The semantics of assignment below is similar in spirit to +dynamically typed SIMPLE's, but a check is performed that the assigned +value's type is a subtype of the location's type. If that is the +case, then the assigned value is returned as a result and stored, but +it is upcast appropriately first, so the context will continue to see +a value of the expected type of the location. Note that the type of a +location is implicit in the type of its contents and it never changes +during the execution of a program; its type is assigned when the +location is allocated and initialized, and then only type-preserving +values are allowed to be stored in each location.

+
k
rule <k> loc(L) = V:Val + => subtype(typeOf(V),typeOf(V')) ~> true? + ~> unsafeCast(V, typeOf(V')) ...</k> + <store>... L |-> (V' => unsafeCast(V, typeOf(V'))) ...</store> +
+

Typed exceptions

+

Exceptions are propagated now until a catch that can handle them is +encountered.

+
k
syntax KItem ::= xstackFrame(Param, Stmt, K, Map, K) + syntax KItem ::= "popx" + + rule <k> (try S1 catch(P) S2 => S1 ~> popx) ~> K </k> + <control> + <xstack> .List => ListItem(xstackFrame(P, S2, K, Env, C)) ...</xstack> + C + </control> + <env> Env </env> + + rule <k> popx => .K ...</k> + <xstack> ListItem(_) => .List ...</xstack> + + rule <k> throw V:Val; ~> _ + => if (subtype(typeOf(V),T)) { T X = V; S2 } else { throw V; } ~> K + </k> + <control> + <xstack> ListItem(xstackFrame(T:Type X:Id, S2, K, Env, C)) => .List ...</xstack> + (_ => C) + </control> + <env> _ => Env </env> +
+

Spawn

+

Like in untyped KOOL.

+
k
rule <thread>... + <k> spawn S => !T:Int ...</k> + <env> Env </env> + <crntObj> Obj </crntObj> + ...</thread> + (.Bag => <thread>... + <k> S </k> + <env> Env </env> + <id> !T </id> + <crntObj> Obj </crntObj> + ...</thread>) +
+

Semantics of the new KOOL constructs

+

Class declaration

+

Like in untyped KOOL.

+
k
rule <k> class Class1 extends Class2 { S } => .K ...</k> + <classes>... (.Bag => <classData> + <className> Class1 </className> + <baseClass> Class2 </baseClass> + <declarations> S </declarations> + </classData>) + ...</classes> +
+

Method declaration

+

Methods are now typed and we need to store their types in their +closures, so that their type contract can be checked at invocation +time. The rule below is conceptually similar to that of untyped KOOL; +the only difference is the addition of the types.

+
k
rule <k> T:Type F:Id(Ps:Params) S => .K ...</k> + <crntClass> C </crntClass> + <location> OL </location> + <env> Env => Env[F <- L] </env> + <store>... .Map => L|->methodClosure(getTypes(Ps)->T,C,OL,Ps,S) ...</store> + <nextLoc> L => L +Int 1 </nextLoc> +
+

New

+

The semantics of new in dynamically typed KOOL is also +similar to that in untyped KOOL, the main difference being the +management of the return types. Indeed, when a new object is created +we also have to stack the current type in the return cell in +order to be recovered after the creation of the new object. Only the +first rule below needs to be changed; the others are identical to +those in untyped KOOL.

+
k
syntax KItem ::= envStackFrame(Id, Map) + + rule <k> new Class:Id(Vs:Vals) ~> K + => create(Class) ~> (storeObj ~> ((Class(Vs)); return this;)) </k> + <env> Env => .Map </env> + <nextLoc> L:Int => L +Int 1 </nextLoc> + //<br/> // TODO(KORE): support latex annotations #1799 + <control> + <xstack> XS </xstack> + <crntObj> Obj + => <crntClass> Object </crntClass> + <envStack> ListItem(envStackFrame(Object, .Map)) </envStack> + <location> L </location> + </crntObj> + <returnType> T => Class </returnType> + <fstack> .List => ListItem(fstackFrame(Env, K, XS, T, <crntObj>Obj</crntObj>)) ...</fstack> + </control> + + syntax KItem ::= create(Id) + + rule <k> create(Class:Id) + => create(Class1) ~> setCrntClass(Class) ~> S ~> addEnvLayer ...</k> + <className> Class </className> + <baseClass> Class1:Id </baseClass> + <declarations> S </declarations> + + rule <k> create(Object) => .K ...</k> + + syntax KItem ::= setCrntClass(Id) + + rule <k> setCrntClass(C) => .K ...</k> + <crntClass> _ => C </crntClass> + + syntax KItem ::= "addEnvLayer" + + rule <k> addEnvLayer => .K ...</k> + <env> Env => .Map </env> + <crntClass> Class:Id </crntClass> + <envStack> .List => ListItem(envStackFrame(Class, Env)) ...</envStack> + + syntax KItem ::= "storeObj" + + rule <k> storeObj => .K ...</k> + <crntObj> + <crntClass> Class </crntClass> + <envStack> EStack </envStack> + (<location> L:Int </location> => .Bag) + </crntObj> + <store>... .Map => L |-> objectClosure(Class, EStack) ...</store> +
+

Self reference

+

Like in untyped KOOL.

+
k
rule <k> this => objectClosure(Class, EStack) ...</k> + <crntObj> + <crntClass> Class </crntClass> + <envStack> EStack </envStack> + ... + </crntObj> +
+

Object member access

+

Like in untyped KOOL.

+
k
rule <k> X:Id => this . X ...</k> <env> Env:Map </env> + requires notBool(X in keys(Env)) + + context HOLE . _::Id requires (HOLE =/=K super) + +/* rule objectClosure(<crntObj> <crntClass> Class:Id </crntClass> + <envStack>... ListItem((Class,EnvC:EnvCell)) EStack </envStack> </crntObj>) + . X:Id + => lookupMember(<envStack> ListItem((Class,EnvC)) EStack </envStack>, X) */ + rule objectClosure(Class:Id, + ListItem(envStackFrame(Class,Env)) EStack) + . X:Id + => lookupMember(ListItem(envStackFrame(Class,Env)) EStack, X) + rule objectClosure(Class:Id, + (ListItem(envStackFrame(Class':Id,_)) => .List) _EStack) + . _X:Id + requires Class =/=K Class' + +/* rule <k> super . X => lookupMember(<envStack>EStack</envStack>, X) ...</k> + <crntClass> Class </crntClass> + <envStack>... ListItem((Class,EnvC:EnvCell)) EStack </envStack> */ + rule <k> super . X => lookupMember(EStack, X) ...</k> + <crntClass> Class:Id </crntClass> + <envStack> ListItem(envStackFrame(Class,_)) EStack </envStack> + rule <k> super . _X ...</k> + <crntClass> Class:Id </crntClass> + <envStack> (ListItem(envStackFrame(Class':Id,_)) => .List) _EStack </envStack> + requires Class =/=K Class' +
+

Method invocation

+

The method lookup is the same as in untyped KOOL.

+
k
rule <k> (X:Id => V)(_:Exps) ...</k> + <env>... X |-> L ...</env> + <store>... L |-> V:Val ...</store> + + rule <k> (X:Id => this . X)(_:Exps) ...</k> + <env> Env </env> + requires notBool(X in keys(Env)) + + context HOLE._::Id(_) requires HOLE =/=K super + + rule (objectClosure(_, EStack) . X + => lookupMember(EStack, X:Id))(_:Exps) + +/* rule <k> (super . X + => lookupMember(<envStack>EStack</envStack>,X))(_:Exps)...</k> + <crntClass> Class </crntClass> + <envStack>... ListItem((Class,_)) EStack </envStack> */ + rule <k> (super . X + => lookupMember(EStack,X))(_:Exps)...</k> + <crntClass> Class:Id </crntClass> + <envStack> ListItem(envStackFrame(Class,_)) EStack </envStack> + rule <k> (super . _X)(_:Exps)...</k> + <crntClass> Class:Id </crntClass> + <envStack> (ListItem(envStackFrame(Class':Id,_)) => .List) _EStack </envStack> + requires Class =/=K Class' + + // TODO(KORE): fix getKLabel #1801 + rule (A:Exp(B:Exps))(C:Exps) => A(B) ~> #freezerFunCall(C) + rule (A:Exp[B:Exps])(C:Exps) => A[B] ~> #freezerFunCall(C) + rule V:Val ~> #freezerFunCall(C:Exps) => V(C) + syntax KItem ::= "#freezerFunCall" "(" K ")" + /* + context HOLE(_:Exps) + requires getKLabel HOLE ==KLabel '_`(_`) orBool getKLabel HOLE ==KLabel '_`[_`] + */ + + rule <k> (lookup(L) => V)(_:Exps) ...</k> <store>... L |-> V:Val ...</store> +
+

Instance of

+

Like in untyped KOOL.

+
k
rule objectClosure(_, ListItem(envStackFrame(C,_)) _) + instanceOf C => true + + rule objectClosure(_, (ListItem(envStackFrame(C::Id,_)) => .List) _) + instanceOf C' requires C =/=K C' + + rule objectClosure(_, .List) instanceOf _ => false +
+

Cast

+

Unlike in untyped KOOL, in typed KOOL we actually check that the object +can indeed be cast to the claimed type.

+
k
rule (C:Id) objectClosure(Irrelevant, EStack) + => objectClosure(Irrelevant, EStack) instanceOf C ~> true? + ~> objectClosure(C, EStack) +
+

KOOL-specific auxiliary declarations and operations

+

Objects as lvalues

+

Like in untyped KOOL.

+
k
rule <k> lvalue(X:Id => this . X) ...</k> <env> Env </env> + requires notBool(X in keys(Env)) + + context lvalue((HOLE . _)::Exp) + +/* rule lvalue(objectClosure(<crntObj> <crntClass> C </crntClass> + <envStack>... ListItem((C,EnvC:EnvCell)) EStack </envStack> </crntObj>) + . X + => lookupMember(<envStack> ListItem((C,EnvC)) EStack </envStack>, + X)) */ + rule lvalue(objectClosure(C:Id, + ListItem(envStackFrame(C,Env)) EStack) + . X + => lookupMember(ListItem(envStackFrame(C,Env)) EStack, + X)) + rule lvalue(objectClosure(C, + (ListItem(envStackFrame(C',_)) => .List) _EStack) + . _X) + requires C =/=K C' +
+

Lookup member

+

Like in untyped KOOL.

+
k
syntax Exp ::= lookupMember(List,Id) [function] + + rule lookupMember(ListItem(envStackFrame(_, X |-> L _)) _, X) => lookup(L) + + // TODO: fix rule below as shown once we support functions with deep rewrites + // rule lookupMember(<envStack> ListItem((_, <env> Env </env>)) => .List + // ...</envStack>, X) + // requires notBool(X in keys(Env)) + rule lookupMember(ListItem(envStackFrame(_, Env)) L, X) + => lookupMember(L, X) + requires notBool(X in keys(Env)) +
+

typeOf for the additional values}

+
k
rule typeOf(objectClosure(C,_)) => C + rule typeOf(methodClosure(T:Type,_,_,_Ps:Params,_)) => T +
+

Subtype checking

+

The subclass relation induces a subtyping relation.

+
k
syntax Exp ::= subtype(Types,Types) + + rule subtype(T:Type, T) => true + + rule <k> subtype(C1:Id, C:Id) => subtype(C2, C) ...</k> + <className> C1 </className> + <baseClass> C2:Id </baseClass> + requires C1 =/=K C + + rule subtype(Object,Class:Id) => false + requires Class =/=K Object + + rule subtype(Ts1->T2,Ts1'->T2') => subtype(((T2)::Type,Ts1'),((T2')::Type,Ts1)) + +// Note that the following rule would be wrong! +// rule subtype(T[],T'[]) => subtype(T,T') + + rule subtype((T:Type,Ts),(T':Type,Ts')) => subtype(T,T') && subtype(Ts,Ts') + requires Ts =/=K .Types + rule subtype(.Types,.Types) => true +
+

Unsafe Casting

+

Performs unsafe casting. One should only use it in combination with +the subtype relation above.

+
k
syntax Val ::= unsafeCast(Val,Type) [function] + + rule unsafeCast(objectClosure(_,EStack), C:Id) + => objectClosure(C,EStack) + + rule unsafeCast(methodClosure(_T',C,OL,Ps,S), T) => methodClosure(T,C,OL,Ps,S) + + rule unsafeCast(V:Val, T:Type) => V requires typeOf(V) ==K T +
+

Generic guard

+

A generic computational guard: it allows the computation to continue +only if a prefix guard evaluates to true.

+
k
syntax KItem ::= "true?" + rule true ~> true? => .K + +endmodule +
+

Go to Lesson 3, KOOL typed static.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/2_languages/2_kool/2_typed/2_static/NOTES/index.html b/k-distribution/pl-tutorial/2_languages/2_kool/2_typed/2_static/NOTES/index.html new file mode 100644 index 00000000000..d65737bb2b5 --- /dev/null +++ b/k-distribution/pl-tutorial/2_languages/2_kool/2_typed/2_static/NOTES/index.html @@ -0,0 +1,388 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Why is the following happening at line 347? It should infer the sort Stmts for S:

+

rule {S} => block ... Rho R +(.Bag => S Rho R )

+

[Error] Critical: Could not infer a sort for variable 'S' to match every location.

+

Similarly at line 517.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/2_languages/2_kool/2_typed/2_static/kool-typed-static/index.html b/k-distribution/pl-tutorial/2_languages/2_kool/2_typed/2_static/kool-typed-static/index.html new file mode 100644 index 00000000000..219f502a659 --- /dev/null +++ b/k-distribution/pl-tutorial/2_languages/2_kool/2_typed/2_static/kool-typed-static/index.html @@ -0,0 +1,1432 @@ + + + + + + + + + + + + + + +KOOL — Typed — Static | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

KOOL — Typed — Static

+

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+

This is the K static semantics of the typed KOOL language. +It extends the static semantics of typed SIMPLE with static semantics +for the object-oriented constructs. Also, the static semantics of +some of the existing SIMPLE constructs need to change, in order to +become more generous with regards to the set of accepted programs, +mostly due to subtyping. For example, the assignment construct +x = e required that both the variable x and the +expression e had the same type in SIMPLE. In KOOL, the type +of e can be a subtype of the type of x. +Specifically, we define the following typing policy for KOOL, +everything else not mentioned below borrowing its semantics from +SIMPLE:

+
    +
  • +

    Each class C yields a homonymous type, which can be +explicitly used in programs to type variables and methods, possibly in +combination with other types.

    +
  • +
  • +

    Since now we have user-defined types, we check that each type +used in a KOOL program is well-formed, that is, it is constructed only +from primitive and class types corresponding to declared classes.

    +
  • +
  • +

    Class members and their types form a class type +environment. Each class will have such a type environment. +Each member in a class is allowed to be declared only once. Since in +KOOL we allow methods to be assigned to fields, we make no distinction +between field and method members; in other words, we reject programs +declaring both a field and a method with the same name.

    +
  • +
  • +

    If an identifier is not found in the local type environment, it +will be searched for in the current class type environment. If not +there, then it will be searched for in its superclass' type +environment. And so on and so forth. If not found until the +Object class is reached, a typing error is reported.

    +
  • +
  • +

    The assignment allows variables to be assigned values of +more concrete types. The result type of the assignment expression +construct will be the (more abstract) type of the assigned variable, +and not the (more concrete) type of the expression, like in Java.

    +
  • +
  • +

    Exceptions are changed (from SIMPLE) to allow throwing and +catching only objects, like in Java. Also, unlike in SIMPLE, we do +not check whether the type of the thrown exception matches the type of +the caught variable, because exceptions can be caught by other +try/catch blocks, even by ones in other methods. To avoid +having to annotate each method with what exceptions it can throw, we +prefer to not check the type safety of exceptions (although this is an +excellent homework!). We only check that the try block +type-checks and that the catch block type-checks after we bind +the caught variable to its claimed type.

    +
  • +
  • +

    Class declarations are not allowed to have any cycles in their +extends relation. Such cycles would lead to non-termination of +new, as it actually does in the dynamic semantics of KOOL +where no such circularity checks are performed.

    +
  • +
  • +

    Methods overriding other methods should be in the right subtyping +relationship with the overridden methods: co-variant in the codomain +and contra-variant in the domain.

    +
  • +
+
k
module KOOL-TYPED-STATIC-SYNTAX + imports DOMAINS-SYNTAX +
+

Syntax

+

The syntax of statically typed KOOL is identical to that of +dynamically typed KOOL, they both taking as input the same programs. +What differs is the K strictness attributes. Like in statically +typed SIMPLE, almost all language constructs are strict now, since we +want each to type its arguments almost all the time. Like in the +other two KOOL definitions, we prefer to copy and then modify/extend +the syntax of statically typed SIMPLE.

+

Note: This paragraph is old, now we can do things better. We keep +it here only for historical reasons, to see how much we used to suffer :)

+

Annoying K-tool technical problem: +Currently, the K tool treats the "non-terminal" productions (i.e., +productions consisting of just one non-terminal), also called +"subsorting" production, differently from the other productions. +Specifically, it does not insert a node in the AST for them. This may +look desirable at first, but it has a big problem: it does not allow +us to treat the subsort differently in different context. For +example, since we want Id to be both a type (a class name) and a +program variable, and since we want expressions to reduce to their +types, we are in an impossible situations in which we do not know how +to treat an identifier in the semantics: as a type, i.e., a result of +computations, or as a program variable, i.e., a non-result. Ideally, +we would like to tag the identifiers at parse-time with their local +interpretation, but that, unfortunately, is not possible with the +current parsing capabilities of the K tool, because it requires to +insert additional information in the AST for the subsort productions. +This will be fixed soon. Until then, unfortunately, we have to do the +job of the parser manually. Instead of subsorting Id directly +to Type, we "wrap" it first, say with a wrapper called +class(...), exactly how the parser should have done. +The major drawback of this is that all the typed KOOL programs +in kool/typed/programs need to also be modified to always +declare class types accordingly. The modified programs can be found +in kool/typed/static/programs. So make sure you execute the +static semantics of KOOL using the modified programs. To avoid seeing +the wrapper in the generated documentation, we associate it an +"invisibility" latex attribute below.

+
k
syntax Id ::= "Object" [token] | "Main" [token] +
+

Types

+
k
syntax Type ::= "void" | "int" | "bool" | "string" + | Id [klabel("class"), symbol, avoid] // see next + | Type "[" "]" + | "(" Type ")" [bracket] + > Types "->" Type + + syntax Types ::= List{Type,","} [overload(exps)] +
+

Declarations

+
k
syntax Param ::= Type Id + syntax Params ::= List{Param,","} + + syntax Stmt ::= Type Exps ";" [avoid] + | Type Id "(" Params ")" Block + | "class" Id Block + | "class" Id "extends" Id Block +
+

Expressions

+
k
syntax FieldReference ::= Exp "." Id [strict(1)] + syntax ArrayReference ::= Exp "[" Exps "]" [strict] + + syntax Exp ::= Int | Bool | String | Id + | "this" + | "super" + | "(" Exp ")" [bracket] + | "++" Exp + | Exp "instanceOf" Id [strict(1)] + | "(" Id ")" Exp [strict(2)] + | "new" Id "(" Exps ")" [strict(2)] + > Exp "(" Exps ")" [strict] + | "-" Exp [strict] + | "sizeOf" "(" Exp ")" [strict] + | "read" "(" ")" + > left: + Exp "*" Exp [strict, left] + | Exp "/" Exp [strict, left] + | Exp "%" Exp [strict, left] + > left: + Exp "+" Exp [strict, left] + | Exp "-" Exp [strict, left] + > non-assoc: + Exp "<" Exp [strict, non-assoc] + | Exp "<=" Exp [strict, non-assoc] + | Exp ">" Exp [strict, non-assoc] + | Exp ">=" Exp [strict, non-assoc] + | Exp "==" Exp [strict, non-assoc] + | Exp "!=" Exp [strict, non-assoc] + > "!" Exp [strict] + > left: + Exp "&&" Exp [strict, left] + | Exp "||" Exp [strict, left] + > "spawn" Block // not strict: to check return and exceptions + > Exp "=" Exp [strict(2), right] + + syntax Exp ::= FieldReference | ArrayReference + syntax priority _.__KOOL-TYPED-STATIC-SYNTAX > _[_]_KOOL-TYPED-STATIC-SYNTAX > _(_)_KOOL-TYPED-STATIC-SYNTAX + + syntax Exps ::= List{Exp,","} [strict, overload(exps)] +
+

Statements

+
k
syntax Block ::= "{" "}" + | "{" Stmt "}" + + syntax Stmt ::= Block + | Exp ";" [strict] + | "if" "(" Exp ")" Block "else" Block [avoid, strict] + | "if" "(" Exp ")" Block [macro] + | "while" "(" Exp ")" Block [strict] + | "for" "(" Stmt Exp ";" Exp ")" Block [macro] + | "return" Exp ";" [strict] + | "return" ";" + | "print" "(" Exps ")" ";" [strict] + | "try" Block "catch" "(" Param ")" Block [strict(1)] + | "throw" Exp ";" [strict] + | "join" Exp ";" [strict] + | "acquire" Exp ";" [strict] + | "release" Exp ";" [strict] + | "rendezvous" Exp ";" [strict] + + syntax Stmt ::= Stmt Stmt [seqstrict, right] +
+

Desugaring macros

+
k
rule if (E) S => if (E) S else {} + rule for(Start Cond; Step) {S:Stmt} => {Start while(Cond){S Step;}} + rule T:Type E1:Exp, E2:Exp, Es:Exps; => T E1; T E2, Es; [anywhere] + rule T:Type X:Id = E; => T X; X = E; [anywhere] + + rule class C:Id S => class C extends Object S + +endmodule +
+

Static semantics

+

We first discuss the configuration, then give the static semantics +taken over unchanged from SIMPLE, then discuss the static semantics of +SIMPLE syntactic constructs that needs to change, and in the end we +discuss the static semantics and additional checks specifically +related to the KOOL proper syntax.

+
k
module KOOL-TYPED-STATIC + imports KOOL-TYPED-STATIC-SYNTAX + imports DOMAINS +
+

Configuration

+

The configuration of our type system consists of a tasks +cell with the same meaning like in statically typed SIMPLE, of an +out cell streamed to the standard output that will be used to +display typing error messages, and of a cell classes holding +data about each class in a separate class cell. The +task cells now have two additional optional subcells, namely +ctenvT and inClass. The former holds a temporary +class type environment; its contents will be transferred into the +ctenv cell of the corresponding class as soon as all the +fields and methods in the task are processed. In fact, there will be +three types of tasks in the subsequent semantics, each determined by +the subset of cells that it holds:

+
    +
  1. +

    Main task, holding only a k cell holding the +original program as a set of classes. The role of this task is to +process each class, generating a class task (see next) for each.

    +
  2. +
  3. +

    Class task, holding k, ctenvT, and +inClass subcells. The role of this task type is to process +a class' contents, generating a class type environment in the +ctenvT cell and a method task (see next) for each method in +the class. To avoid interference with object member lookup rules +below, it is important to add the class type environment to a class +atomically; this is the reason for which we use ctenvT +temporary cells within class tasks (instead of adding each member +incrementally to the class' type environment).

    +
  4. +
  5. +

    Method task, holding k, tenv and +return cells. These tasks are similar to SIMPLE's function +tasks, so we do not discuss them here any further.

    +
  6. +
+

Each class cell hods its name (in the className +cell) and the name of the class it extends (in the extends +cell), as well as its type environment (in the ctenv cell) +and the set of all its superclasses (in the extendsAll cell). +The later is useful for example for checking whether there are cycles +in the class extends relation.

+
k
configuration <T multiplicity="?" color="yellow"> + <tasks color="orange" multiplicity="?"> + <task multiplicity="*" color="yellow" type="Set"> + <k color="green"> $PGM:Stmt </k> + <tenv multiplicity="?" color="cyan"> .Map </tenv> + <ctenvT multiplicity="?" color="blue"> .Map </ctenvT> + <returnType multiplicity="?" color="black"> void </returnType> + <inClass multiplicity="?" color="Fuchsia"> .K </inClass> + </task> + </tasks> +// <br/> + <classes color="Fuchsia"> + <classData multiplicity="*" type="Map"> + <className color="Fuchsia"> Object </className> + <baseClass color="Fuchsia"> .K </baseClass> + <baseClasses color="Fuchsia"> .Set </baseClasses> + <ctenv multiplicity="?" color="blue"> .Map </ctenv> + </classData> + </classes> + </T> + <output color="brown" stream="stdout"> .List </output> +
+

Unchanged semantics from statically typed SIMPLE

+

The syntax and rules below are borrowed unchanged from statically +typed SIMPLE, so we do not discuss them much here.

+
k
syntax Exp ::= Type + syntax Exps ::= Types + syntax BlockOrStmtType ::= "block" | "stmt" + syntax Type ::= BlockOrStmtType + syntax Block ::= BlockOrStmtType + syntax KResult ::= Type + | Types // TODO: should not be needed + + + context _:Type _::Exp[HOLE::Exps]; + + rule T:Type E:Exp[int,Ts:Types]; => T[] E[Ts]; + rule T:Type E:Exp[.Types]; => T E; + + + rule <task>... <k> _:BlockOrStmtType </k> <tenv> _ </tenv> ...</task> => .Bag + + + rule _:Int => int + rule _:Bool => bool + rule _:String => string + + + rule <k> X:Id => T ...</k> <tenv>... X |-> T ...</tenv> + + + context ++(HOLE => ltype(HOLE)) + rule ++ int => int + rule int + int => int + rule string + string => string + rule int - int => int + rule int * int => int + rule int / int => int + rule int % int => int + rule - int => int + rule int < int => bool + rule int <= int => bool + rule int > int => bool + rule int >= int => bool + rule T:Type == T => bool + rule T:Type != T => bool + rule bool && bool => bool + rule bool || bool => bool + rule ! bool => bool + + + rule (T[])[int, Ts:Types] => T[Ts] + rule T:Type[.Types] => T + + rule sizeOf(_T[]) => int + + + rule read() => int + + rule print(T:Type, Ts => Ts); requires T ==K int orBool T ==K string + rule print(.Types); => stmt + + + context (HOLE => ltype(HOLE)) = _ + + + rule <k> return; => stmt ...</k> <returnType> _ </returnType> + + + rule {} => block + + rule <task> <k> {S:Stmt} => block ...</k> <tenv> Rho </tenv> R </task> + (.Bag => <task> <k> S </k> <tenv> Rho </tenv> R </task>) + + rule _:Type; => stmt + rule if (bool) block else block => stmt + rule while (bool) block => stmt + + rule join int; => stmt + rule acquire _:Type; => stmt + rule release _:Type; => stmt + rule rendezvous _:Type; => stmt + + syntax Stmt ::= BlockOrStmtType + rule _:BlockOrStmtType _:BlockOrStmtType => stmt +
+

Unchanged auxiliary operations from dynamically typed SIMPLE

+
k
syntax Stmt ::= mkDecls(Params) [function] + rule mkDecls(T:Type X:Id, Ps:Params) => T X; mkDecls(Ps) + rule mkDecls(.Params) => {} + + syntax LValue ::= Id + | FieldReference + | ArrayReference + syntax Exp ::= LValue + + syntax Exp ::= ltype(Exp) +// We would like to say: +// context ltype(HOLE:LValue) +// but we currently cannot type the HOLE + context ltype(HOLE) requires isLValue(HOLE) + +// OLD approach: +// syntax Exp ::= ltype(Exp) [function] +// rule ltype(X:Id) => X +// rule ltype(E:Exp [Es:Exps]) => E[Es] + + syntax Types ::= getTypes(Params) [function] + rule getTypes(T:Type _:Id) => T, .Types + rule getTypes(T:Type _:Id, P, Ps) => T, getTypes(P,Ps) + rule getTypes(.Params) => void, .Types +
+

Changes to the existing statically typed SIMPLE semantics

+

Below we give the new static semantics for language constructs that +come from SIMPLE, but whose SIMPLE static semantics was too +restrictive or too permissive and thus had to change.

+

Local variable declaration

+

Since we can define new types in KOOL (corresponding to classes), the +variable declaration needs to now check that the claimed types exist. +The operation checkType, defined at the end of this module, +checks whether the argument type is correct (it actually works with +lists of types as well).

+
k
rule <k> T:Type X:Id; => checkType(T) ~> stmt ...</k> + <tenv> Rho => Rho[X <- T] </tenv> +
+

Class member declaration

+

In class tasks, variable declarations mean class member declarations. +Since we reduce method declarations to variable declarations (see +below), a variable declaration in a class task can mean either a field +or a method declaration. Unlike local variable declarations, which +can shadow previous homonymous local or member declarations, member +declarations are regarded as a set, so we disallow multiple +declarations for the same member (one could improve upon this, like in +Java, by treating members with different types or number of arguments +as different, etc., but we do not do it here). We also issue an error +message if one attempts to redeclare the same class member. The +framed variable declaration in the second rule below should be read +"stuck". In fact, it is nothing but a unary operation called +stuck, which takes a K-term as argument and does nothing +with it; this stuck operation is displayed as a frame in this +PDF document because of its latex attribute (see the ASCII .k file, +at the end of this module).

+
k
rule <k> T:Type X:Id; => checkType(T) ~> stmt ...</k> + <ctenvT> Rho (.Map => X |-> T) </ctenvT> + requires notBool(X in keys(Rho)) + + rule <k> T:Type X:Id; => stuck(T X;) ...</k> + <ctenvT>... X |-> _ ...</ctenvT> + <inClass> C:Id </inClass> +// <br/> + <output>... .List => ListItem("Member \"" +String Id2String(X) + +String "\" declared twice in class \"" + +String Id2String(C) +String "\"!\n") </output> +
+

Method declaration

+

A method declaration requires two conceptual checks to be performed: +first, that the method's type is consistent with the type of the +homonymous method that it overrides, if any; and second, that its body +types correctly. At the same time, it should also be added to the +type environment of its class. The first conceptual task is performed +using the checkMethod operation defined below, and the second +by generating a corresponding method task. To add it to the class +type environment, we take advantage of the fact that KOOL is higher +order and reduce the problem to a field declaration problem, which we +have already defined. The role of the ctenvT cell in the +rule below is to structurally ensure that the method declaration takes +place in a class task (we do not want to allow methods to be declared, +for example, inside other methods).

+
k
rule <k> T:Type F:Id(Ps:Params) S + => checkMethod(F, getTypes(Ps)->T, C') + ~> getTypes(Ps)->T F; ...</k> +// <br/> + <inClass> C </inClass> + <ctenvT> _ </ctenvT> // to ensure we are in a class pass + <className> C </className> + <baseClass> C' </baseClass> +// <br/> + (.Bag => <task> + <k> mkDecls(Ps) S </k> + <inClass> C </inClass> + <tenv> .Map </tenv> + <returnType> T </returnType> + </task>) +
+

Assignment

+

A more concrete value is allowed to be assigned to a more abstract +variable. The operation checkSubtype is defined at the end +of the module and it also works with pairs of lists of types.

+
k
rule T:Type = T':Type => checkSubtype(T', T) ~> T +
+

Method invocation and return

+

Methods can be applied on values of more concrete types than their +arguments:

+
k
rule (Ts:Types -> T:Type) (Ts':Types) => checkSubtype(Ts',Ts) ~> T +
+

Similarly, we allow values of more concrete types to be returned by +methods:

+
k
rule <k> return T:Type; => checkSubtype(T,T') ~> stmt ...</k> + <returnType> T':Type </returnType> +
+

Exceptions

+

Exceptions can throw and catch values of any types. Since unlike in Java +KOOL's methods do not declare the exception types that they can throw, +we cannot test the full type safety of exceptions. Instead, we +only check that the try and the catch statements +type correctly.

+
k
rule try block catch(T:Type X:Id) S => {T X; S} + rule throw _T:Type ; => stmt +
+

Spawn

+

The spawned cell needs to also be passed the parent's class.

+
k
// explain why + + rule <k> spawn S:Block => int ...</k> + <tenv> Rho </tenv> + <inClass> C </inClass> + (.Bag => <task> + <k> S </k> + <tenv> Rho </tenv> + <inClass> C </inClass> + </task>) +
+

Semantics of the new KOOL constructs

+

Class declaration

+

We process each class in the main task, adding the corresponding data +into its class cell and also adding a class task for it. We +also perform some well-formedness checks on the class hierarchy.

+

Initiate class processing
+We create a class cell and a class task for each task. Also, we start +the class task with a check that the class it extends is declared +(this delays the task until that class is processed using another +instance of this rule).

+
k
// There seems to be some error with the configuration concretization, +// as the rule below does not work when rewriting . to both the task +// and the class cells; I had to include two separate . rewrites + +// TODO: the following fails krun; see #2117 + rule <task> <k> class C:Id extends C':Id { S:Stmt } => stmt ...</k> </task> + (.Bag => <classData>... + <className> C </className> + <baseClass> C' </baseClass> + ...</classData>) +// <br/> + (.Bag => <task> + <k> checkType(`class`(C')) ~> S </k> + <inClass> C </inClass> + <ctenvT> .Map </ctenvT> + </task>) + +// You may want to try the thing below, but that failed, too +/* +syntax Type ::= "stmtStop" + + rule <tasks>... + <task> <k> class C:Id extends C':Id { S:Stmt } => stmtStop ...</k> </task> + (.Bag => <task> + <k> checkType(`class`(C')) ~> S </k> + <inClass> C </inClass> + <ctenvT> .Map </ctenvT> + </task>) + ...</tasks> + <classes>... + .Bag => <classData>... + <className> C </className> + <baseClass> C' </baseClass> + ...</classData> + ...</classes> +// <br/> +*/ +
+

Check for unique class names

+
k
rule (<T>... + <className> C </className> + <className> C </className> + ...</T> => .Bag) + <output>... .List => ListItem("Class \"" +String Id2String(C) + +String "\" declared twice!\n") </output> +
+

Check for cycles in class hierarchy
+We check for cycles in the class hierarchy by transitively closing the +class extends relation using the extendsAll cells, and +checking that a class will never appear in its own extendsAll +cell. The first rule below initiates the transitive closure of the +superclass relation, the second transitively closes it, and the third +checks for cycles.

+
k
rule <baseClass> C </baseClass> + <baseClasses> .Set => SetItem(C) </baseClasses> [priority(25)] + + rule <classData>... + <baseClasses> SetItem(C) Cs:Set (.Set => SetItem(C')) </baseClasses> + ...</classData> + <classData>... <className>C</className> <baseClass>C'</baseClass> ...</classData> + requires notBool(C' in (SetItem(C) Cs)) [priority(25)] + + rule (<T>... + <className> C </className> + <baseClasses>... SetItem(C) ...</baseClasses> + ...</T> => .Bag) + <output>... .List => ListItem("Class \"" +String Id2String(C) + +String "\" is in a cycle!\n") </output> + [priority(25)] +
+

New

+

To type new we only need to check that the class constructor +can be called with arguments of the given types, so we initiate a call +to the constructor method in the corresponding class. If that +succeeds, meaning that it types to stmt, then we discard the +stmt type and produce instead the corresponding class type of +the new object. The auxiliary discard operation is defined +also at the end of this module.

+
k
rule new C:Id(Ts:Types) => `class`(C) . C (Ts) ~> discard ~> `class`(C) +
+

Self reference

+

The typing rule for this is straightforward: reduce to the +current class type.

+
k
rule <k> this => `class`(C) ...</k> + <inClass> C:Id </inClass> +
+

Super

+

Similarly, super types to the parent class type. +Note that for typing concerns, super can be considered as an object +(recall that this was not the case in the dynamic semantics).

+
k
rule <k> super => `class`(C') ...</k> + <inClass> C:Id </inClass> + <className> C </className> + <baseClass> C':Id </baseClass> +
+

Object member access

+

There are several cases to consider here. First, if we are in a class +task, we should lookup the member into the temporary class type +environemnt in cell ctenvT. That is because we want to allow +initialized field declarations in classes, such as int x=10;. +This is desugared to a declaration of x, which is added to +ctenvT during the class task processing, followed by an +assignment of x to 10. In order for the assignment to type +check, we need to know that x has been declared with type +int; this information can only be found in the +ctenvT cell. Second, we should redirect non-local variable +lookups in method tasks to corresponding member accesses (the +local variables are handled by the rule borrowed from SIMPLE). +This is what the second rule below does. Third, we should allow +object member accesses as lvalues, which is done by the third rule +below. These last two rules therefore ensure that each necessary +object member access is explicitly allowed for evaluation. Recall +from the annotated syntax module above that the member access +operation is strict in the object. That means that the object is +expected to evaluate to a class type. The next two rules below define +the actual member lookup operation, moving the search to the +superclass when the member is not found in the current class. Note +that this works because we create the class type environments +atomically; thus, a class either has its complete type environment +available, in which case these rules can safely apply, or its cell +ctenv is not yet available, in which case these rules have to +wait. Finally, the sixth rule below reports an error when the +Object class is reached.

+
k
rule <k> X:Id => T ...</k> + <ctenvT>... X |-> T ...</ctenvT> + + rule <k> X:Id => this . X ...</k> + <tenv> Rho </tenv> + requires notBool(X in keys(Rho)) + +// OLD approach: +// rule ltype(E:Exp . X:Id) => E . X + + rule <k> `class`(C:Id) . X:Id => T ...</k> + <className> C </className> + <ctenv>... X |-> T:Type ...</ctenv> + + rule <k> `class`(C1:Id => C2) . X:Id ...</k> + <className> C1 </className> + <baseClass> C2:Id </baseClass> + <ctenv> Rho </ctenv> + requires notBool(X in keys(Rho)) + + rule <k> `class`(Object) . X:Id => stuck(`class`(Object) . X) ...</k> + <inClass> C:Id </inClass> +// <br/> + <output>... .List => ListItem("Member \"" +String Id2String(X) + +String "\" not declared! (see class \"" + +String Id2String(C) +String "\")\n") </output> +
+

Instance of and casting

+

As it is hard to check statically whether casting is always safe, +the programmer is simply trusted from a typing perspective. We only +do some basic upcasting and downcasting checks, to reject casts which +will absolutely fail. However, dynamic semantics or implementations +of the language need to insert runtime checks for downcasting to be safe.

+
k
rule `class`(_C1:Id) instanceOf _C2:Id => bool + rule (C:Id) `class`(C) => `class`(C) + rule <k> (C2:Id) `class`(C1:Id) => `class`(C2) ...</k> + <className> C1 </className> + <baseClasses>...SetItem(C2)...</baseClasses> // upcast + rule <k> (C2:Id) `class`(C1:Id) => `class`(C2) ...</k> + <className> C2 </className> + <baseClasses>...SetItem(C1)...</baseClasses> // downcast + rule <k> (C2) `class`(C1:Id) => stuck((C2) `class`(C1)) ...</k> + <classData>... + <className> C1 </className> + <baseClasses> S1 </baseClasses> + ...</classData> + <classData>... + <className> C2 </className> + <baseClasses> S2 </baseClasses> + ...</classData> + <output>... .List => ListItem("Classes \"" +String Id2String(C1) + +String "\" and \"" +String Id2String(C2) + +String "\" are incompatible!\n") </output> + requires notBool(C1 in S2) andBool notBool(C2 in S1) +
+

Cleanup tasks

+

Finally, we need to clean up the terminated tasks. Each of the three +types of tasks is handled differently. The main task is replaced by a +method task holding new main();, which will ensure that a +main class with a main() method actually exists +(first rule below). A class task moves its temporary class type +environment into its class' cell, and then it dissolves itself (second +rule). A method task simply dissolves when terminated (third rule); +the presence of the tenv cell in that rule ensures that that +task is a method task. +Finally, when all the tasks are cleaned up, we can also remove the +tasks cell, issuing a corresponding message. Note that +checking for cycles or duplicate methods can still be performed after +the tasks cell has been removed.

+
k
// discard main task when done, issuing a "new main();" command to +// make sure that the class main and the method main() are declared. + + rule <task> <k> stmt => new Main(.Exps); </k> + (.Bag => <tenv> .Map </tenv> + <returnType> void </returnType> + <inClass> Main </inClass>) + </task> + +// discard class task when done, adding a ctenv in class + + rule (<task> + <k> stmt </k> + <ctenvT> Rho </ctenvT> + <inClass> C:Id </inClass> + </task> => .Bag) + <className> C </className> + (.Bag => <ctenv> Rho </ctenv>) + +// discard method task when done + + rule <task>... + <k> stmt </k> + <tenv> _ </tenv> // only to ensure that this is a method task + ...</task> => .Bag + +// cleanup tasks and output a success message when done + + rule (<T>... <tasks> .Bag </tasks> ...</T> => .Bag) + <output>... .List => ListItem("Type checked!\n") </output> +
+

KOOL-specific auxiliary declarations and operations

+

Subtype checking

+

The subclass relation introduces a subtyping relation.

+
k
syntax KItem ::= checkSubtype(Types,Types) + + rule checkSubtype(T:Type, T) => .K + + rule <k> checkSubtype(`class`(C:Id), `class`(C':Id)) => .K ...</k> + <className> C </className> + <baseClasses>... SetItem(C') ...</baseClasses> + + rule checkSubtype(Ts1->T2,Ts1'->T2') + => checkSubtype(((T2)::Type,Ts1'),((T2')::Type,Ts1)) + +// note that the following rule would be wrong! +// rule checkSubtype(T[],T'[]) => checkSubtype(T,T') + + rule checkSubtype((T:Type,Ts),(T':Type,Ts')) + => checkSubtype(T,T') ~> checkSubtype(Ts,Ts') + requires Ts =/=K .Types + + rule checkSubtype(.Types,.Types) => .K + rule checkSubtype(.Types,void) => .K +
+

Checking well-formedness of types

+

Since now any Id can be used as the type of a class, we need to +check that the types used in the program actually exists

+
k
syntax KItem ::= checkType(Types) + + rule checkType(T:Type,Ts:Types) => checkType(T) ~> checkType(Ts) + requires Ts =/=K .Types + rule checkType(.Types) => .K + rule checkType(int) => .K + rule checkType(bool) => .K + rule checkType(string) => .K + rule checkType(void) => .K + rule <k> checkType(`class`(C:Id)) => .K ...</k> <className> C </className> + rule checkType(`class`(Object)) => .K + rule checkType(Ts:Types -> T:Type) => checkType(T,Ts) + rule checkType(T:Type[]) => checkType(T) +
+

Checking correct overiding of methods

+

The checkMethod operation below searches to see whether +the current method overrides some other method in some superclass. +If yes, then it issues an additional check that the new method's type +is more concrete than the overridden method's. The types T and T' +below can only be function types. See the definition of +checkSubtype on function types at the end of this module (it +is co-variant in the codomain and contra-variant in the domain).

+
k
syntax KItem ::= checkMethod(Id,Type,Id) + + rule <k> checkMethod(F:Id, T:Type, C:Id) => checkSubtype(T, T') ...</k> + <className> C </className> + <ctenv>... F |-> T':Type ...</ctenv> + + rule <k> checkMethod(F:Id, _T:Type, (C:Id => C')) ...</k> + <className> C </className> + <baseClass> C':Id </baseClass> + <ctenv> Rho </ctenv> + requires notBool(F in keys(Rho)) + + rule checkMethod(_:Id,_,Object) => .K +
+

Generic operations which could be part of the K framework

+
k
syntax KItem ::= stuck(K) + + syntax KItem ::= "discard" + rule _:KResult ~> discard => .K + +endmodule +
+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/2_languages/3_fun/1_untyped/1_environment/fun-untyped/index.html b/k-distribution/pl-tutorial/2_languages/3_fun/1_untyped/1_environment/fun-untyped/index.html new file mode 100644 index 00000000000..00663237a22 --- /dev/null +++ b/k-distribution/pl-tutorial/2_languages/3_fun/1_untyped/1_environment/fun-untyped/index.html @@ -0,0 +1,1233 @@ + + + + + + + + + + + + + + +FUN — Untyped — Environment | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

FUN — Untyped — Environment

+

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+

This is the K semantic definition of the untyped FUN language. +FUN is a pedagogical and research language that captures the essence +of the functional programming paradigm, extended with several features +often encountered in functional programming languages. +Like many functional languages, FUN is an expression language, that +is, everything, including the main program, is an expression. +Functions can be declared anywhere and are first class values in the +language. +FUN is call-by-value here, but it has been extended (as student +homework assignments) with other parameter-passing styles. +To make it more interesting and to highlight some of K's strengths, +FUN includes the following features:

+
    +
  • +

    The basic builtin data-types of integers, booleans and strings.

    +
  • +
  • +

    Builtin lists, which can hold any elements, including other lists. +Lists are enclosed in square brackets and their elements are +comma-separated; e.g., [1,2,3].

    +
  • +
  • +

    User-defined data-types, by means of constructor terms. +Constructor names start with a capital letter (while any other +identifier in the language starts with a lowercase letter), and they +can be followed by an arbitrary number of comma-separated arguments +enclosed in parentheses; parentheses are not needed when the +constructor takes no arguments. +For example, Pair(5,7) is a constructor term holding two +numbers, Cons(1,Cons(2,Cons(3,Nil))) is a list-like +constructor term holding 3 elements, and +Tree(Tree(Leaf(1), Leaf(2)), Leaf(3)) is a tree-like +constructor term holding 3 elements. +In the untyped version of the FUN language, no type checking or +inference is performed to ensure that the data constructors are used +correctly. +The execution will simply get stuck when they are misused. +Moreover, since no type checking is performed, the data-types are not +even declared in the untyped version of FUN.

    +
  • +
  • +

    Functions and let/letrec binders can take +multiple space-separated arguments, but these are desugared to +ones that only take one argument, by currying. For example, the +expressions

    +
    fun x y -> x y
    +let x y = y in x
    +
    +

    are desugared, respectively, into the following expressions:

    +
    fun x -> fun y -> x y
    +let x = fun y -> y in x
    +
    +
  • +
  • +

    Functions can be defined using pattern matching over the +available data-types. For example, the program

    +
    letrec max = fun [h] -> h
    +             |   [h|t] -> let x = max t
    +                          in  if h > x then h else x
    +in max [1, 3, 5, 2, 4, 0, -1, -5]
    +
    +

    defines a function max that calculates the maximum element of +a non-empty list, and the function

    +
    letrec ack = fun Pair(0,n) -> n + 1
    +             |   Pair(m,0) -> ack Pair(m - 1, 1)
    +             |   Pair(m,n) -> ack Pair(m - 1, ack Pair(m, n - 1))
    +in ack Pair(2,3)
    +
    +

    calculates the Ackermann function applied to a particular pair of numbers. +Patterns can be nested. Patterns can currently only be used in function +definitions, and not directly in let/letrec binders. +For example, this is not allowed:

    +
    letrec Pai(x,y) = Pair(1,2) in x+y
    +
    +

    But this is allowed:

    +
    let f Pair(x,y) = x+y in f Pair(1,2)
    +
    +

    because it is first reduced to

    +
    let f = fun Pair(x,y) -> x+y in f Pair(1,2)
    +
    +

    by uncurrying of the let binder, and pattern matching is +allowed in function arguments.

    +
  • +
  • +

    We include a callcc construct, for two reasons: first, +several functional languages support this construct; second, some +semantic frameworks have difficulties defining it. Not K.

    +
  • +
  • +

    Finally, we include mutables by means of referencing an +expression, getting the reference of a variable, dereferencing and +assignment. We include these for the same reasons as above: there are +languages which have them, and they are not easy to define in some +semantic frameworks.

    +
  • +
+

Like in many other languages, some of FUN's constructs can be +desugared into a smaller set of basic constructs. We do that as usual, +using macros, and then we only give semantics to the core constructs.

+

Note:
+We recommend the reader to first consult the dynamic semantics of the +LAMBDA++ language in the first part of the K Tutorial. +To keep the comments below small and focused, we will not re-explain +functional or K features that have already been explained in there.

+

Syntax

+
k
//require "modules/pattern-matching.k" + +module FUN-UNTYPED-COMMON + imports DOMAINS-SYNTAX +
+

FUN is an expression language. The constructs below fall into +several categories: names, arithmetic constructs, conventional +functional constructs, patterns and pattern matching, data constructs, +lists, references, and call-with-current-continuation (callcc). +The arithmetic constructs are standard; they are present in almost all +our K language definitions. The meaning of FUN's constructs are +discussed in more depth when we define their semantics in the next +module.

+

The Syntactic Constructs

+

We start with the syntactic definition of FUN names. +We have several categories of names: ones to be used for functions and +variables, others to be used for data constructors, others for types and +others for type variables. We will introduce them as needed, starting +with the former category. We prefer the names of variables and functions +to start with lower case letters. We take the freedom to tacitly introduce +syntactic lists/sequences for each nonterminal for which we need them:

+
k
syntax Name [token] + syntax Names ::= List{Name,","} [overload(exps)] +
+

Expression constructs will be defined throughtout the syntax module. +Below are the very basic ones, namely the builtins, the names, and the +parentheses used as brackets for grouping. Lists of expressions are +declared strict, so all expressions in the list get evaluated whenever +the list is on a position which can be evaluated:

+
k
syntax Exp ::= Int | Bool | String | Name + | "(" Exp ")" [bracket] + syntax Exps ::= List{Exp,","} [strict, overload(exps)] + syntax Val + syntax Exp ::= Val + syntax Exps ::= Vals + syntax Vals ::= List{Val,","} [overload(exps)] + syntax Bottom + syntax Bottoms ::= List{Bottom,","} [overload(exps)] +
+

We next define the syntax of arithmetic constructs, together with +their relative priorities and left-/non-associativities. We also +tag all these rules as members of a new group, "arith", so we can more easily +define global syntax priorities later (at the end of the syntax module).

+
k
syntax Exp ::= left: + Exp "*" Exp [strict, group(arith)] + | Exp "/" Exp [strict, group(arith)] + | Exp "%" Exp [strict, group(arith)] + > left: + Exp "+" Exp [strict, left, group(arith)] + | Exp "^" Exp [strict, left, group(arith)] +// left attribute should not be necessary; currently a parsing bug + | Exp "-" Exp [strict, prefer, group(arith)] +// the "prefer" attribute above is to not parse x-1 as x(-1) +// Due to some parsing problems, we currently cannot add unary minus: + | "-" Exp [strict, group(arith)] + > non-assoc: + Exp "<" Exp [strict, group(arith)] + | Exp "<=" Exp [strict, group(arith)] + | Exp ">" Exp [strict, group(arith)] + | Exp ">=" Exp [strict, group(arith)] + | Exp "==" Exp [strict, group(arith)] + | Exp "!=" Exp [strict, group(arith)] + > "!" Exp [strict, group(arith)] + > Exp "&&" Exp [strict(1), left, group(arith)] + > Exp "||" Exp [strict(1), left, group(arith)] +
+

The conditional construct has the expected evaluation strategy, +stating that only the first argument is evaluate:

+
k
syntax Exp ::= "if" Exp "then" Exp "else" Exp [strict(1)] +
+

FUN's builtin lists are formed by enclosing comma-separated +sequences of expressions (i.e., terms of sort Exps) in square +brackets. The list constructor cons adds a new element to the +top of the list, head and tail get the first element +and the tail sublist of a list if they exist, respectively, and get +stuck otherwise, and null?? tests whether a list is empty or +not; syntactically, these are just expression constants. +In function patterns, we are also going to allow patterns following the +usual head/tail notation; for example, the pattern [x_1,...,x_n|t] +binds x_1, ..., x_n to the first elements of the matched list, +and t to the list formed with the remaining elements. We define list +patterns as ordinary expression constructs, although we will make sure that +we do not give them semantics if they appear in any other place then in a +function case pattern.

+
k
syntax Exp ::= "[" Exps "]" [strict, klabel(list)] + | "head" [macro] | "tail" [macro] | "null?" [macro] + | "[" Exps "|" Exp "]" + syntax Val ::= "[" Vals "]" [klabel(list)] + syntax Cons ::= "cons" + syntax Val ::= Cons + syntax Val ::= Cons Val [klabel(apply)] +
+

Data constructors start with capital letters and they may or may +not have arguments. We need to use the attribute "prefer" to make +sure that, e.g., Cons(a) parses as constructor Cons with +argument a, and not as the expression Cons (because +constructor names are also expressions) regarded as a function applied +to the expression a. Also, note that the constructor is strict +in its second argument, because we want to evaluate its arguments but +not the constuctor name itsef.

+
k
syntax ConstructorName [token] + syntax Exp ::= ConstructorName + | ConstructorName "(" Exps ")" [prefer, strict(2), klabel(constructor)] + syntax Val ::= ConstructorName "(" Vals ")" [klabel(constructor)] +
+

A function is essentially a |-separated ordered +sequence of cases, each case of the form pattern -> expression, +preceded by the language construct fun. Patterns will be defined +shortly, both for the builtin lists and for user-defined constructors. +Recall that the syntax we define in K is not meant to serve as a +ultimate parser for the defined language, but rather as a convenient +notation for K abstract syntax trees, which we prefer when we write +the semantic rules. It is therefore often the case that we define a +more ``generous'' syntax than we want to allow programs to use. +We do it here, too. Specifically, the syntax of Cases +below allows any expressions to appear as pattern. This syntactic +relaxation permits many wrong programs to be parsed, but that is not a +problem because we are not going to give semantics to wrong combinations, +so those programs will get stuck; moreover, our type inferencer will reject +those programs anyway. Function application is just concatenation of +expressions, without worrying about type correctness. Again, the type +system will reject type-incorrect programs.

+
k
syntax Exp ::= "fun" Cases + | Exp Exp [strict, left, klabel(apply)] +// NOTE: We would like eventually to also have Exp "(" Exps ") + syntax Case ::= Exp "->" Exp + syntax Cases ::= List{Case, "|"} +
+

The let and letrec binders have the usual syntax +and functional meaning. We allow multiple and-separated bindings. +Like for the function cases above, we allow a more generous syntax for +the left-hand sides of bindings, noting that the semantics will get stuck +on incorrect bindings and that the type system will reject those programs.

+
k
syntax Exp ::= "let" Bindings "in" Exp + | "letrec" Bindings "in" Exp [prefer] +// The "prefer" attribute for letrec currently needed due to tool bug, +// to make sure that "letrec" is not parsed as "let rec". + syntax Binding ::= Exp "=" Exp + syntax Bindings ::= List{Binding,"and"} +
+

References are first class values in FUN. The construct ref +takes an expression, evaluates it, and then it stores the resulting value +at a fresh location in the store and returns that reference. Syntactically, +ref is just an expression constant. The construct & +takes a name as argument and evaluates to a reference, namely the store +reference where the variable passed as argument stores its value; this +construct is a bit controversial and is further discussed in the +environment-based semantics of the FUN language, where we desugar +ref to it. The construct @ takes a reference +and evaluates to the value stored there. The construct := takes +two expressions, the first expected to evaluate to a reference; the value +of its second argument will be stored at the location to which the first +points (the old value is thus lost). Finally, since expression evaluation +now has side effects, it makes sense to also add a sequential composition +construct, which is sequentially strict. This evaluates to the value of +its second argument; the value of the first argument is lost (which has +therefore been evaluated only for its side effects.

+
k
syntax Exp ::= "ref" [macro] + | "&" Name + | "@" Exp [strict] + | Exp ":=" Exp [strict] + | Exp ";" Exp [strict(1), right] +
+

Call-with-current-continuation, named callcc in FUN, is a +powerful control operator that originated in the Scheme programming +language, but it now exists in many other functional languages. It works +by evaluating its argument, expected to evaluate to a function, and by +passing the current continuation, or evaluation context (or computation, +in K terminology), as a special value to it. When/If this special value +is invoked, the current context is discarded and replaced with the one +held by the special value and the computation continues from there. +It is like taking a snapshot of the execution context at some moment +in time and then, when desired, being able to get back in time to that +point. If you like games, it is like saving the game now (so you can +work on your homework!) and then continuing the game tomorrow or whenever +you wish. To issustrate the strength of callcc, we also +allow exceptions in FUN by means of a conventional try-catch +construct, which will desugar to callcc. We also need to +introduce the special expression contant throw, but we need to +use it as a function argument name in the desugaring macro, so we define +it as a name instead of as an expression constant:

+
k
syntax Exp ::= "try" Exp "catch" "(" Name ")" Exp [macro] + syntax Val ::= "callcc" + syntax Name ::= "throw" [token] +
+

Finally, FUN also allows polymorphic datatype declarations. These +will be useful when we define the type system later on.

+
k
syntax Exp ::= "datatype" Type "=" TypeCases Exp [macro] +// NOTE: In a future version of K, we want the datatype declaration +// to be a construct by itself, but that is not possible currently +// because K's parser wronly identifies the __ operation allowing +// a declaration to appear in front of an expression with the function +// application construct, giving ambiguous parsing errors. +
+

We next need to define the syntax of types and type cases that appear +in datatype declarations.

+

Like in many functional languages, type parameters/variables in +user-defined types are quoted identifiers.

+
k
syntax TypeVar [token] + syntax TypeVars ::= List{TypeVar,","} [overload(types)] +
+

Types can be basic types, function types, or user-defined +parametric types. In the dynamic semantics we are going to simply ignore +all the type declations, so here the syntax of types below is only useful +for generating the desired parser. To avoid syntactic ambiguities with +the arrow construct for function cases, we use the symbol --> as +a constructor for function types:

+
k
syntax TypeName [token] + syntax Type ::= "int" | "bool" | "string" + | Type "-->" Type [right] + | "(" Type ")" [bracket] + | TypeVar + | TypeName [klabel(TypeName), avoid] + | Type TypeName [klabel(Type-TypeName), symbol, macro] + | "(" Types ")" TypeName [prefer] + syntax Types ::= List{Type,","} [overload(types)] + syntax Types ::= TypeVars + + syntax TypeCase ::= ConstructorName + | ConstructorName "(" Types ")" + syntax TypeCases ::= List{TypeCase,"|"} [symbol(_|TypeCase_)] +
+

Additional Priorities

+
k
syntax priority @__FUN-UNTYPED-COMMON + > apply + > arith + > _:=__FUN-UNTYPED-COMMON + > let_in__FUN-UNTYPED-COMMON + letrec_in__FUN-UNTYPED-COMMON + if_then_else__FUN-UNTYPED-COMMON + > _;__FUN-UNTYPED-COMMON + > fun__FUN-UNTYPED-COMMON + > datatype_=___FUN-UNTYPED-COMMON +endmodule + +module FUN-UNTYPED-MACROS + imports FUN-UNTYPED-COMMON +
+

Desugaring macros

+

We desugar the list non-constructor operations to functions matching +over list patterns. In order to do that we need some new variables; for +those, we follow the same convention like in the K tutorial, where we +added them as new identifier constructs starting with the character $, +so we can easily recognize them when we debug or trace the semantics.

+
k
syntax Name ::= "$h" [token] | "$t" [token] + rule head => fun [$h|$t] -> $h + rule tail => fun [$h|$t] -> $t + rule null? => fun [.Exps] -> true | [$h|$t] -> false +
+

Multiple-head list patterns desugar into successive one-head patterns:

+
k
rule [E1,E2,Es:Exps|T] => [E1|[E2,Es|T]] [anywhere] +
+

Uncurrying of multiple arguments in functions and binders:

+
k
rule P1 P2 -> E => P1 -> fun P2 -> E [anywhere] + rule F P = E => F = fun P -> E [anywhere] +
+

We desugar the try-catch construct into callcc:

+
k
syntax Name ::= "$k" [token] | "$v" [token] + rule try E catch(X) E' + => callcc (fun $k -> (fun throw -> E)(fun X -> $k E')) +
+

For uniformity, we reduce all types to their general form:

+
k
rule `Type-TypeName`(T:Type, Tn:TypeName) => (T) Tn +
+

The dynamic semantics ignores all the type declarations:

+
k
rule datatype _T = _TCs E => E + +endmodule + + +module FUN-UNTYPED-SYNTAX + imports FUN-UNTYPED-COMMON + imports BUILTIN-ID-TOKENS + + syntax Name ::= r"[a-z][_a-zA-Z0-9]*" [token, prec(2)] + | #LowerId [token] + syntax ConstructorName ::= #UpperId [token] + syntax TypeVar ::= r"['][a-z][_a-zA-Z0-9]*" [token] + syntax TypeName ::= Name [token] +endmodule +
+

Semantics

+

The semantics below is environment-based. A substitution-based +definition of FUN is also available, but that drops the & +construct as explained above.

+
k
module FUN-UNTYPED + imports FUN-UNTYPED-COMMON + imports FUN-UNTYPED-MACROS + imports DOMAINS + //imports PATTERN-MATCHING +
+

Configuration

+

The k, env, and store cells are standard +(see, for example, the definition of LAMBDA++ or IMP++ in the first +part of the K tutorial).

+
k
configuration <T color="yellow"> + <k color="green"> $PGM:Exp </k> + <env color="violet"> .Map </env> + <store color="white"> .Map </store> + </T> +
+

Values and results

+

We only define integers, Booleans and strings as values here, but will +add more values later.

+
k
syntax Val ::= Int | Bool | String + syntax Vals ::= Bottoms + syntax KResult ::= Val +
+

Lookup

+
k
rule <k> X:Name => V ...</k> + <env>... X |-> L ...</env> + <store>... L |-> V ...</store> +
+

Arithmetic expressions

+
k
rule I1 * I2 => I1 *Int I2 + rule I1 / I2 => I1 /Int I2 requires I2 =/=K 0 + rule I1 % I2 => I1 %Int I2 requires I2 =/=K 0 + rule I1 + I2 => I1 +Int I2 + rule S1 ^ S2 => S1 +String S2 + rule I1 - I2 => I1 -Int I2 + rule - I => 0 -Int I + rule I1 < I2 => I1 <Int I2 + rule I1 <= I2 => I1 <=Int I2 + rule I1 > I2 => I1 >Int I2 + rule I1 >= I2 => I1 >=Int I2 + rule V1:Val == V2:Val => V1 ==K V2 + rule V1:Val != V2:Val => V1 =/=K V2 + rule ! T => notBool(T) + rule true && E => E + rule false && _ => false + rule true || _ => true + rule false || E => E +
+

Conditional

+
k
rule if true then E else _ => E + rule if false then _ else E => E +
+

Lists

+

We have already declared the syntactic list of expressions strict, so +we can assume that all the elements that appear in a FUN list are +evaluated. The only thing left to do is to state that a list of +values is a value itself, that is, that the list square-bracket +construct is indeed a constructor, and to give the semantics of +cons. Since cons is a builtin function and is +expected to take two arguments, we have to also state that +cons itself is a value (specifically, a function/closure +value, but we do not need that level of detail here), and also that +cons applied to a value is a value (specifically, it would be +a function/closure value that expects the second, list argument):

+
k
rule cons V:Val [Vs:Vals] => [V,Vs] +
+

Data Constructors

+

Constructors take values as arguments and produce other values:

+
k
syntax Val ::= ConstructorName +
+

Functions and Closures

+

Like in the environment-based semantics of LAMBDA++ in the first part +of the K tutorial, functions evaluate to closures. A closure includes +the current environment besides the function contents; the environment +will be used at execution time to lookup all the variables that appear +free in the function body (we want static scoping in FUN).

+
k
syntax Val ::= closure(Map,Cases) + rule <k> fun Cases => closure(Rho,Cases) ...</k> <env> Rho </env> +
+

Note: The reader may want to get familiar with +how the pre-defined pattern matching works before proceeding. +The best way to do that is to consult +k/include/modules/pattern-matching.k.

+ +

We distinguish two cases when the closure is applied. +If the first pattern matches, then we pick the first case: switch to +the closed environment, get the matching map and bind all its +variables, and finally evaluate the function body of the first case, +making sure that the environment is properly recovered afterwards. +If the first pattern does not match, then we drop it and thus move on +to the next one.

+
k
rule (.K => getMatching(P, V)) ~> closure(_, P->_ | _) V:Val + rule <k> matchResult(M:Map) ~> closure(Rho, _->E | _) _ + => bindMap(M) ~> E ~> setEnv(Rho') ...</k> + <env> Rho' => Rho </env> + rule (matchFailure => .K) ~> closure(_, (_->_ | Cs:Cases => Cs)) _ +// rule <k> closure(Rho, P->E | _) V:Val +// => bindMap(getMatching(P,V)) ~> E ~> setEnv(Rho') ...</k> +// <env> Rho' => Rho </env> when isMatching(P,V) +// rule closure(_, (P->_ | Cs:Cases => Cs)) V:Val when notBool isMatching(P,V) +
+

Let and Letrec

+

To highlight the similarities and differences between let and +letrec, we prefer to give them direct semantics instead of +to desugar them like in LAMBDA. See the formal definitions of +bindTo, bind, and assignTo at the end of +this module. Informally, bindTo(Xs, Es) first +evaluates the expressions Es in Exps in the current +environment (i.e., it is strict in its second argument), then it binds +the variables in Xs in Names to new locations and adds +those bindings to the environment, and finally writes the values +previously obtained after evaluating the expressions Es to those +new locations; bind(Xs) does only the bindings of +Xs to new locations and adds those bindings to the environment; +and assignTo(Xs,Es) evaluates the expressions +Es in the current environment and then it writes the resulting +values to the locations to which the variables Xs are already +bound to in the environment.

+

Therefore, let Xs = Es in E first +evaluates Es in the current environment, then adds new +bindings for Xs to fresh locations in the environment, then +writes the values of Es to those locations, and finally +evaluates E in the new environment, making sure that the +environment is properly recovered after the evaluation of E. +On the other hand, letrec does the same things but in a +different order: it first adds new bindings for Xs to fresh +locations in the environment, then it evaluates Es in the new +environment, then it writes the resulting values to their +corresponding locations, and finally it evaluates E and +recovers the environment. The crucial difference is that the +expressions Es now see the locations of the variables Xs +in the environment, so if they are functions, which is typically the +case with letrec, their closures will encapsulate in their +environments the bindings of all the bound variables, including +themselves (thus, we may have a closure value stored at location +L, whose environment contains a binding of the form +F ↦ L; this way, the closure can invoke +itself).

+
k
rule <k> let Bs in E + => bindTo(names(Bs),exps(Bs)) ~> E ~> setEnv(Rho) ...</k> + <env> Rho </env> + + rule <k> letrec Bs in E + => bind(names(Bs))~>assignTo(names(Bs),exps(Bs))~>E~>setEnv(Rho)...</k> + <env> Rho </env> +
+

Recall that our syntax allows let and letrec to +take any expression in place of its binding. This allows us to use +the already existing function application construct to bind names to +functions, such as, e.g., let x y = y in .... +The desugaring macro in the syntax module uncurries such declarations, +and then the semantic rules above only work when the remaining +bindings are identifiers, so the semantics will get stuck on programs +that misuse the let and letrec binders.

+

References

+

The semantics of references is self-explanatory, except maybe for the +desugaring rule of ref, which is further discussed. Note +that &X grabs the location of X from the environment. +Sequential composition, which is needed only to accumulate the +side effects due to assignments, was strict in the first argument. +Once evaluated, its first argument is simply discarded:

+
k
syntax Name ::= "$x" [token] + rule ref => fun $x -> & $x + rule <k> & X => L ...</k> <env>... X |-> L ...</env> + rule <k> @ L:Int => V:Val ...</k> <store>... L |-> V ...</store> + rule <k> L:Int := V:Val => V ...</k> <store>... L |-> (_=>V) ...</store> + rule _V:Val; E => E +
+

The desugaring rule of ref (first rule above) works +because & takes a variable and returns its location (like in C). +Note that some ``pure'' functional programming researchers strongly dislike +the & construct, but favor ref. We refrain from having +a personal opinion on this issue here, but support & in the +environment-based definition of FUN because it is, technically speaking, +more powerful than ref. From a language design perspective, it +would be equally easy to drop & and instead give a direct +semantics to ref. In fact, this is precisely what we do in the +substitution-based definition of FUN, because there appears to be no way +to give a substitution-based definition to the & construct.

+

Callcc

+

As we know it from the LAMBDA++ tutorial, call-with-current-continuation +is quite easy to define in K. We first need to define a special +value wrapping an execution context, that is, an environment saying +where the variables should be looked up, and a computation structure +saying what is left to execute (in a substitution-based definition, +this special value would be even simpler, as it would only need to +wrap the computation structure---see, for example, the +substitution-based semantics of LAMBDA++ in the the first part of the +K tutorial, or the substitution-based definition of FUN). Then +callcc creates such a value containing the current +environment and the current remaining computation, and passes it to +its argument function. When/If invoked, the special value replaces +the current execution context with its own and continues the execution +normally.

+
k
syntax Val ::= cc(Map,K) + rule <k> (callcc V:Val => V cc(Rho,K)) ~> K </k> <env> Rho </env> + rule <k> cc(Rho,K) V:Val ~> _ => V ~> K </k> <env> _ => Rho </env> +
+

Auxiliary operations

+

Environment recovery

+

The environment recovery operation is the same as for the LAMBDA++ +language in the K tutorial and many other languages provided with the +K distribution. The first ``anywhere'' rule below shows an elegant +way to achieve the benefits of tail recursion in K.

+
k
syntax KItem ::= setEnv(Map) // TODO: get rid of env + //rule (setEnv(_) => .) ~> setEnv(_) [anywhere] + rule <k> _:Val ~> (setEnv(Rho) => .K) ...</k> <env> _ => Rho </env> +
+

bindTo, bind and assignTo

+

The meaning of these operations has already been explained when we +discussed the let and letrec language constructs +above.

+
k
syntax KItem ::= bindTo(Names,Exps) [strict(2)] + | bindMap(Map) + | bind(Names) + + rule (.K => getMatchingAux(Xs,Vs)) ~> bindTo(Xs:Names,Vs:Vals) + rule matchResult(M:Map) ~> bindTo(_:Names, _:Vals) => bindMap(M) + + rule bindMap(.Map) => .K + rule <k> bindMap((X:Name |-> V:Val => .Map) _:Map) ...</k> + <env> Rho => Rho[X <- !L:Int] </env> + <store>... .Map => !L |-> V ...</store> + + rule bind(.Names) => .K + rule <k> bind(X:Name,Xs => Xs) ...</k> + <env> Rho => Rho[X <- !_L:Int] </env> + + syntax KItem ::= assignTo(Names,Exps) [strict(2)] + + rule <k> assignTo(.Names,.Vals) => .K ...</k> + rule <k> assignTo((X:Name,Xs => Xs),(V:Val,Vs:Vals => Vs)) ...</k> + <env>... X |-> L ...</env> + <store>... .Map => L |-> V ...</store> +
+

Getters

+

The following auxiliary operations extract the list of identifiers +and of expressions in a binding, respectively.

+
k
syntax Names ::= names(Bindings) [function] + rule names(.Bindings) => .Names + rule names(X:Name=_ and Bs) => (X,names(Bs))::Names + + syntax Exps ::= exps(Bindings) [function] + rule exps(.Bindings) => .Exps + rule exps(_:Name=E and Bs) => E,exps(Bs) + + /* Extra kore stuff */ + syntax KResult ::= Vals + syntax Exps ::= Names + syntax Names ::= Bottoms + + /* Matching */ + syntax MatchResult ::= getMatching(Exp, Val) [function] + | getMatchingAux(Exps, Vals) [function] + | mergeMatching(MatchResult, MatchResult) [function] + | matchResult(Map) + | "matchFailure" + + rule getMatching(C:ConstructorName(Es:Exps), C(Vs:Vals)) => getMatchingAux(Es, Vs) + rule getMatching([Es:Exps], [Vs:Vals]) => getMatchingAux(Es, Vs) + rule getMatching(C:ConstructorName, C) => matchResult(.Map) + rule getMatching(B:Bool, B) => matchResult(.Map) + rule getMatching(I:Int, I) => matchResult(.Map) + rule getMatching(S:String, S) => matchResult(.Map) + rule getMatching(N:Name, V:Val) => matchResult(N |-> V) + rule getMatching(_, _) => matchFailure [owise] + + rule getMatchingAux((E:Exp, Es:Exps), (V:Val, Vs:Vals)) => mergeMatching(getMatching(E, V), getMatchingAux(Es, Vs)) + rule getMatchingAux(.Exps, .Vals) => matchResult(.Map) + rule getMatchingAux(_, _) => matchFailure [owise] + + rule mergeMatching(matchResult(M1:Map), matchResult(M2:Map)) => matchResult(M1 M2) + requires intersectSet(keys(M1), keys(M2)) ==K .Set + //rule mergeMatching(_, _) => matchFailure [owsie] + rule mergeMatching(matchResult(_:Map), matchFailure) => matchFailure + rule mergeMatching(matchFailure, matchResult(_:Map)) => matchFailure + rule mergeMatching(matchFailure, matchFailure) => matchFailure +
+

Besides the generic decomposition rules for patterns and values, +we also want to allow [head|tail] matching for lists, so we add +the following custom pattern decomposition rule:

+
k
rule getMatching([H:Exp | T:Exp], [V:Val, Vs:Vals]) + => getMatchingAux((H, T), (V, [Vs])) +endmodule +
+

Go to Lesson 2, FUN untyped, Substitution-Based.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/2_languages/3_fun/1_untyped/2_substitution/fun-untyped/index.html b/k-distribution/pl-tutorial/2_languages/3_fun/1_untyped/2_substitution/fun-untyped/index.html new file mode 100644 index 00000000000..5e5829e3267 --- /dev/null +++ b/k-distribution/pl-tutorial/2_languages/3_fun/1_untyped/2_substitution/fun-untyped/index.html @@ -0,0 +1,731 @@ + + + + + + + + + + + + + + +FUN — Untyped — Substitution | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

// NOTE: this definition is not up to date with the latest version of K, as it +// uses both substitution and symbolic reasoning. +// It is intended for documentation and academic purposes only.

+

FUN — Untyped — Substitution

+

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+

This is the substitution-based definition of FUN. For additional +explanations regarding the semantics of the various FUN constructs, +the reader should consult the emvironment-based definition of FUN.

+

Syntax

+
k
requires "substitution.md" +//requires "modules/pattern-matching.k" + +module FUN-UNTYPED-COMMON + imports DOMAINS-SYNTAX +
+

The Syntactic Constructs

+
k
syntax Name + syntax Names ::= List{Name,","} + + syntax Exp ::= Int | Bool | String | Name + | "(" Exp ")" [bracket] + syntax Exps ::= List{Exp,","} [strict] + syntax Val + syntax Vals ::= List{Val,","} + + syntax Exp ::= left: + Exp "*" Exp [strict, arith] + | Exp "/" Exp [strict, arith] + | Exp "%" Exp [strict, arith] + > left: + Exp "+" Exp [strict, left, arith] + | Exp "^" Exp [strict, left, arith] + | Exp "-" Exp [strict, prefer, arith] + | "-" Exp [strict, arith] + > non-assoc: + Exp "<" Exp [strict, arith] + | Exp "<=" Exp [strict, arith] + | Exp ">" Exp [strict, arith] + | Exp ">=" Exp [strict, arith] + | Exp "==" Exp [strict, arith] + | Exp "!=" Exp [strict, arith] + > "!" Exp [strict, arith] + > Exp "&&" Exp [strict(1), left, arith] + > Exp "||" Exp [strict(1), left, arith] + + syntax Exp ::= "if" Exp "then" Exp "else" Exp [strict(1)] + + syntax Exp ::= "[" Exps "]" [strict] + | "cons" | "head" | "tail" | "null?" + | "[" Exps "|" Exp "]" + syntax Val ::= "[" Vals "]" + + syntax ConstructorName + syntax Exp ::= ConstructorName + | ConstructorName "(" Exps ")" [prefer, strict(2)] + syntax Val ::= ConstructorName "(" Vals ")" + + syntax Exp ::= "fun" Cases + | Exp Exp [strict, left] + syntax Case ::= Exp "->" Exp [binder] +// NOTE: The binder attribute above is the only difference between this +// module and the syntax module of environment-based FUN. We need +// to fix a bug in order to import modules and override the attributes +// of operations. + syntax Cases ::= List{Case, "|"} + + syntax Exp ::= "let" Bindings "in" Exp + | "letrec" Bindings "in" Exp [prefer] + syntax Binding ::= Exp "=" Exp + syntax Bindings ::= List{Binding,"and"} + + syntax Exp ::= "ref" + | "&" Name + | "@" Exp [strict] + | Exp ":=" Exp [strict] + | Exp ";" Exp [strict(1), right] + + syntax Exp ::= "callcc" + | "try" Exp "catch" "(" Name ")" Exp + syntax Name ::= "throw" [token] + + syntax Exp ::= "datatype" Type "=" TypeCases Exp + + syntax TypeVar + syntax TypeVars ::= List{TypeVar,","} + + syntax TypeName + syntax Type ::= "int" | "bool" | "string" + | Type "-->" Type [right] + | "(" Type ")" [bracket] + | TypeVar + | TypeName [klabel(TypeName), avoid] + | Type TypeName [klabel(Type-TypeName), onlyLabel] + | "(" Types ")" TypeName [prefer] + syntax Types ::= List{Type,","} + syntax Types ::= TypeVars + + syntax TypeCase ::= ConstructorName + | ConstructorName "(" Types ")" + syntax TypeCases ::= List{TypeCase,"|"} [klabel(_|TypeCase_)] +
+

Additional Priorities

+
k
syntax priority @__FUN-UNTYPED-COMMON + > ___FUN-UNTYPED-COMMON + > arith + > _:=__FUN-UNTYPED-COMMON + > let_in__FUN-UNTYPED-COMMON + letrec_in__FUN-UNTYPED-COMMON + if_then_else__FUN-UNTYPED-COMMON + > _;__FUN-UNTYPED-COMMON + > fun__FUN-UNTYPED-COMMON + > datatype_=___FUN-UNTYPED-COMMON +endmodule + +module FUN-UNTYPED-MACROS + imports FUN-UNTYPED-COMMON +
+

Desugaring macros

+
k
rule P1 P2 -> E => P1 -> fun P2 -> E [macro-rec] + rule F P = E => F = fun P -> E [macro-rec] + + rule [E1,E2,Es:Exps|T] => [E1|[E2,Es|T]] [macro-rec] + +// rule 'TypeName(Tn:TypeName) => (.TypeVars) Tn [macro] + rule `Type-TypeName`(T:Type, Tn:TypeName) => (T) Tn [macro] + + syntax Name ::= "$h" | "$t" + rule head => fun [$h|$t] -> $h [macro] + rule tail => fun [$h|$t] -> $t [macro] + rule null? => fun [.Exps] -> true | [$h|$t] -> false [macro] + + syntax Name ::= "$k" | "$v" + rule try E catch(X) E' + => callcc (fun $k -> (fun throw -> E)(fun X -> $k E')) [macro] + + rule datatype _T = _TCs E => E [macro] +
+

mu needed for letrec, but we put it here so we can also write +programs with mu in them, which is particularly useful for testing.

+
k
syntax Exp ::= "mu" Case + +endmodule + + +module FUN-UNTYPED-SYNTAX + imports FUN-UNTYPED-COMMON + imports BUILTIN-ID-TOKENS + + syntax Name ::= r"[a-z][_a-zA-Z0-9]*" [token, prec(2)] + | #LowerId [token] + syntax ConstructorName ::= #UpperId [token] + syntax TypeVar ::= r"['][a-z][_a-zA-Z0-9]*" [token] + syntax TypeName ::= Name [token] +endmodule +
+

Semantics

+
k
module FUN-UNTYPED + imports FUN-UNTYPED-COMMON + imports FUN-UNTYPED-MACROS + imports DOMAINS + imports SUBSTITUTION + //imports PATTERN-MATCHING + + configuration <T color="yellow"> + <k color="green"> $PGM:Exp </k> + <store color="white"> .Map </store> + </T> +
+

Both Name and functions are values now:

+
k
syntax Val ::= Int | Bool | String | Name + syntax Exp ::= Val + syntax Exps ::= Vals + syntax KResult ::= Val + syntax Exps ::= Names + syntax Vals ::= Names + + rule I1 * I2 => I1 *Int I2 + rule I1 / I2 => I1 /Int I2 when I2 =/=K 0 + rule I1 % I2 => I1 %Int I2 when I2 =/=K 0 + rule I1 + I2 => I1 +Int I2 + rule S1 ^ S2 => S1 +String S2 + rule I1 - I2 => I1 -Int I2 + rule - I => 0 -Int I + rule I1 < I2 => I1 <Int I2 + rule I1 <= I2 => I1 <=Int I2 + rule I1 > I2 => I1 >Int I2 + rule I1 >= I2 => I1 >=Int I2 + rule V1:Val == V2:Val => V1 ==K V2 + rule V1:Val != V2:Val => V1 =/=K V2 + rule ! T => notBool(T) + rule true && E => E + rule false && _ => false + rule true || _ => true + rule false || E => E + + rule if true then E else _ => E + rule if false then _ else E => E + + rule isVal(cons) => true + rule isVal(cons _V:Val) => true + rule cons V:Val [Vs:Vals] => [V,Vs] + + syntax Val ::= ConstructorName + + rule isVal(fun _) => true + syntax KVar ::= Name + syntax Name ::= freshName(Int) [freshGenerator, function] + rule freshName(I:Int) => {#parseToken("Name", "#" +String Int2String(I))}:>Name + + rule (. => getMatching(P, V)) ~> (fun P->_ | _) V:Val + rule matchResult(M:Map) ~> (fun _->E | _) _ => E[M] + rule (matchFailure => .) ~> (fun (_->_ | Cs:Cases => Cs)) _ +// rule (fun P->E | _) V:Val => E[getMatching(P,V)] when isMatching(P,V) +// rule (fun (P->_ | Cs:Cases => Cs)) V:Val when notBool isMatching(P,V) +
+

We can reduce multiple bindings to one list binding, and then +apply the usual desugaring of let into function application. +It is important that the rule below is a macro, so let is eliminated +immediately, otherwise it may interfere in ugly ways with substitution.

+
k
rule let Bs in E => ((fun [names(Bs)] -> E) [exps(Bs)]) [macro] +
+

We only give the semantics of one-binding letrec. +Multipe bindings are left as an exercise.

+
k
// changed because of parsing error + //rule mu X:Name -> E => E[(mu X -> E) / X] + rule mu X:Name -> E => E[X |-> (mu X -> E)] + rule letrec F:Name = E in E' => let F = (mu F -> E) in E' [macro] +
+

We cannot have & anymore, but we can give direct +semantics to ref. We also have to declare ref to +be a value, so that we will never heat on it.

+
k
// rule <k> & X => L ...</k> <env>... X |-> L </env> + rule isVal(ref) => true + rule <k> ref V:Val => !L:Int ...</k> <store>... .Map => !L |-> V ...</store> + rule <k> @ L:Int => V:Val ...</k> <store>... L |-> V ...</store> + rule <k> L:Int := V:Val => V ...</k> <store>... L |-> (_=>V) ...</store> + rule _V:Val; E => E + + syntax Val ::= cc(K) + rule isVal(callcc) => true + rule <k> (callcc V:Val => V cc(K)) ~> K </k> + rule <k> cc(K) V:Val ~> _ => V ~> K </k> +
+

Auxiliary getters

+
k
syntax Names ::= names(Bindings) [function] + rule names(.Bindings) => .Names + rule names(X:Name=_ and Bs) => X,names(Bs) + + syntax Exps ::= exps(Bindings) [function] + rule exps(.Bindings) => .Exps + rule exps(_:Name=E and Bs) => E,exps(Bs) + + /* Extra kore stuff */ + syntax KResult ::= Vals + syntax Exps ::= Names + + /* Matching */ + syntax MatchResult ::= getMatching(Exp, Val) [function] + | getMatchingAux(Exps, Vals) [function] + | mergeMatching(MatchResult, MatchResult) [function] + | matchResult(Map) + | "matchFailure" + + rule getMatching(C:ConstructorName(Es:Exps), C(Vs:Vals)) => getMatchingAux(Es, Vs) + rule getMatching([Es:Exps], [Vs:Vals]) => getMatchingAux(Es, Vs) + rule getMatching(C:ConstructorName, C) => matchResult(.Map) + rule getMatching(B:Bool, B) => matchResult(.Map) + rule getMatching(I:Int, I) => matchResult(.Map) + rule getMatching(S:String, S) => matchResult(.Map) + rule getMatching(N:Name, V:Val) => matchResult(N |-> V) + rule getMatching(_, _) => matchFailure [owise] + + rule getMatchingAux((E:Exp, Es:Exps), (V:Val, Vs:Vals)) => mergeMatching(getMatching(E, V), getMatchingAux(Es, Vs)) + rule getMatchingAux(.Exps, .Vals) => matchResult(.Map) + rule getMatchingAux(_, _) => matchFailure [owise] + + rule mergeMatching(matchResult(M1:Map), matchResult(M2:Map)) => matchResult(M1 M2) + requires intersectSet(keys(M1), keys(M2)) ==K .Set + //rule mergeMatching(_, _) => matchFailure [owsie] + rule mergeMatching(matchResult(_:Map), matchFailure) => matchFailure + rule mergeMatching(matchFailure, matchResult(_:Map)) => matchFailure + rule mergeMatching(matchFailure, matchFailure) => matchFailure +
+

Besides the generic decomposition rules for patterns and values, +we also want to allow [head|tail] matching for lists, so we add +the following custom pattern decomposition rule:

+
k
rule getMatching([H:Exp | T:Exp], [V:Val, Vs:Vals]) + => getMatchingAux((H, T), (V, [Vs])) +endmodule +
+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/2_languages/4_logik/basic/logik/index.html b/k-distribution/pl-tutorial/2_languages/4_logik/basic/logik/index.html new file mode 100644 index 00000000000..1639ac6d96f --- /dev/null +++ b/k-distribution/pl-tutorial/2_languages/4_logik/basic/logik/index.html @@ -0,0 +1,720 @@ + + + + + + + + + + + + + + +LOGIK | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

// NOTE: this definition is not runnable as is. +// It is intended for documentation and academic purposes only.

+

LOGIK

+

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+

This is the K semantic definition of LOGIK, a trivial language +capturing the essence of the logic programming paradigm. In this +definition, we explicitly focus on simplicity and mathematical +clarity, not on advanced logic programming features or performance. +Those are covered in the LOGIK++ extension under examples/logik++.

+

Specifically, a LOGIK program consists of a sequence of Horn clauses +of the form

+
P :- P1, P2, ..., Pn .
+
+

followed by a query of the form

+
?- Q1, Q2, ..., Qm .
+
+

where P, P1, P2, ..., Pn, Q1, Q2, +..., Qm are literals. The +symbol :- is read "if". A literal has the form +p(T1,T2,...,Tk), where p is a predicate symbol +and where T1,T2,...,Tk are terms. Terms are built as +usual, with operation symbols and variables. A common +convention in logic programming languages, also adopted here, is that +variables are capitalized and operation symbols are not. Operations +with zero arguments are called constants and are written without +parentheses, that is, c instead of c(). Horn +clauses without conditions, called facts, are written +without :-, that is, P. instead of P :- ..

+

For example, the LOGIK program below gives a few facts about a +parent predicate, then several clauses defining some useful +predicates including an ancestor predicate, and finally a +query asking for those who both have ancestors and are ancestors +themselves in the parent relation:

+
parent(david,john).
+parent(jim,david).
+parent(steve,jim).
+parent(nathan,steve).
+
+grandparent(A,B):-
+  parent(A,X),
+  parent(X,B).
+
+ancestor(A,B):-
+  parent(A,X),
+  parents(X,B).
+
+parents(X,X).
+parents(A,B):-
+  ancestor(A,B).
+
+both(X) :- ancestor(A,X), ancestor(X,B).
+
+?- both(X).
+
+

Above, we only have constant operation symbols, so these and variables +are the only terms that can be used in predicates. As expected, the +LOGIK program above will give us three solutions for X: +david, steve, and jim. If we inline the +both(X) predicate in the query, that is, if we replace the +query with ?- ancestor(A,X), ancestor(X,B). then we get +10 solutions, one for for each triple A, X, and +B satisfying both predicates ancestor(A,X) and +ancestor(X,B).

+

As another example, the program below defines an append +predicate followed by a simple goal:

+
append(nil,L,L).
+append(cons(H,T),L,cons(H,Z)) :- append(T,L,Z).
+
+?- append(cons(a,nil), cons(b,nil), V).
+
+

Besides the predicate symbol append, the program above also +includes a constant symbol nil and a binary operation symbol +cons. Additionally, the query also includes two more +constants, a and b. The capitalized identifiers are +all variables. As expected, the LOGIK program above yields only one +solution, namely V = cons(a,cons(b,nil)). On the other hand, +if we change the query to:

+
?- append(L1, cons(a,L2), cons(a,cons(b,cons(a,nil)))).
+
+

then LOGIK yields two solutions: one where L1 is +cons(a,cons(b,nil)) and L2 is nil, +and another where L1 is nil and L2 is +cons(a,cons(b,nil)).

+

The programs above all generated ground solutions, that is, +solutions where the query variables are mapped to ground terms (i.e., +terms without variables). Let us now consider the following query:

+
?- append(cons(a,nil), Y, Z).
+
+

There are obviously infinitely many ground solutions for the query +above, e.g.,
+Y = nil and Z = cons(a,nil),
+Y = cons(a,nil) and Z = cons(a,cons(a,nil)),
+Y = cons(b,nil) and Z = cons(a,cons(b,nil)),
+Y = cons(c,cons(b,nil)) and Z = cons(a,cons(c,cons(b,nil))),
+etc. However, all the ground solutions for the query above can be +elegantly characterized by the property that Z is bound to a list +starting with a and followed by the list that Y is +bound to. This property can in fact be described as a symbolic solution +to the query: Z = cons(a,Y) or, equivalently, +Y = Symb and Z = cons(a,Symb). It is possible to +define a ``more general than'' relation on such symbolic solutions, +in the sense that the more particular solution can be obtained as a +specialization/substitution of the more general one, and then it can +be shown that the above is the most general solution to the +stated query. Logic programming languages, including our LOGIK, +attempt to always compute such most general solutions.

+

Logic programming languages are highly non-deterministic, in that +several Horn clauses may be used at the same time, each possibly +resulting in a different solution. Implementations of logic +programming languages consist of complex, optimized search and +indexing algorithms, which we are not concerned with here. Instead, +we here take advantage of K's builtin support for search. +Specifically, to find all the solutions of a LOGIK program, we have to +use krun with the option --search. However, note +that some programs have infinitely many solutions which cannot relate +to each other by the "more general" relation. For example, the query

+
?- append(L1, cons(a,L2), L3) .
+
+

To address such cases and terminate, logic programming languages allow +the user to choose how many solutions to be computed and displayed. +In LOGIK, we can use the --bound option of krun for +this purpose.

+

Finally, note that some queries have no solution. In some cases that +is easy to detect by exhaustive analysis, such as for the following +query:

+
?- append(cons(a,L1), L2, cons(b,L3)).
+
+

Logic programming languages, including LOGIK, terminate in such cases +and report a no solution answer. However, there are cases where +exhaustive analysis is not sufficient, such as for the query:

+
?- append(cons(a,L), nil, L).
+
+

In such cases, logic programming languages do not terminate. While +one may devise techniques to detect non-termination in some cases, +one cannot do it in general (same like for all Turing-complete +languages).

+
k
requires "unification.k" + +module LOGIK-COMMON + imports DOMAINS-SYNTAX +
+

Syntax

+

The syntax of LOGIK is straightforward: a program is a sequence of +Horn clauses followed by a query:

+
k
syntax Literal + syntax Term ::= Literal | Literal "(" Terms ")" + syntax Terms ::= List{Term,","} + syntax Clause ::= Term ":-" Terms "." | Term "." + syntax Query ::= "?-" Terms "." + syntax Pgm ::= Query | Clause Pgm +endmodule + +module LOGIK-SYNTAX + imports LOGIK-COMMON + imports BUILTIN-ID-TOKENS +
+

Variables and literals are defined as tokens following the conventions +used in Prolog (variables start with _ or capital letter, while literals +start with lower case letters):

+
k
syntax #KVariable ::= r"[A-Z_][A-Za-z0-9_]*" [token, prec(2)] + | #UpperId [token] + syntax Term ::= #KVariable [klabel(#SemanticCastToTerm)] + syntax Literal ::= r"[a-z][a-zA-Z0-9_]*" [token] + | #LowerId [token] +endmodule + +module LOGIK + imports LOGIK-COMMON + imports DOMAINS + imports UNIFICATION +
+

Unification is at the core of logic programming. Here we are +going to use the predefined unification procedure (the same one we +used in the type inferencers in Tutorial 5).

+

Configuration

+

The configuration stores each clause in its own cell for easy access, +and the most general unifier in a cell named mgu, same like +the type inferencers. The k cell holds the query and the +fresh cell holds a fresh clause instance to be attempted on +the next query item. To more easily read the solutions, we add a +second top-level cell, solution. Both top cells are +optional. Indeed, we start with the main top cell and, when a +solution is found, we move it into the solution cell and +discard the main cell.

+
k
configuration <T color="yellow" multiplicity="?"> + <k color="green"> $PGM:Pgm </k> + <fresh color="orange"> .K </fresh> + <clauses color="red"> + <clause color="pink" multiplicity="*"> .K </clause> + </clauses> + <mgu> .K </mgu> + </T> + <solution multiplicity="?"> .K </solution> +
+

Pre- and post-processing

+

Before we launch the semantics, we first scan the given program and +place each clause in its own cell, and then place the query in the +k cell and initialize the mgu with the variables from the query.

+

Note that we put a fresh instance of the clause to avoid interference with +the query variables. By a "fresh instance" of a clause we mean one whose +variables are renamed with fresh names; we need that in order to avoid +undesired unification conflicts due to particular names chosen for +variables in the original program, as well as conflicts due to +subsequent uses of the same clause. It is safe to rename the +variables in a clause, because clauses are universally quantified in +their variables. This process of creating a fresh instance of a +clause is similar to how we created fresh instances of type schemas in +the higher-order type inferencer discussed in Tutorial 5. Indeed, we +can safely regard clauses as "clause schemas" comprising infinitely +many instances, one for each context.

+
k
rule <k> C:Clause Pgm => Pgm </k> + (.Bag => <clause> #renameVariables(C) </clause>) + + rule <k> ?- Ls:Terms. => Ls ...</k> + <mgu> _ => #variablesMap(#variables(Ls)) </mgu> +
+

We also sequentialize the goals for easier processing:

+
k
rule L:Term, Ls:Terms => L ~> Ls + rule .Terms => . +
+

When all the goals are solved, indicated by the empty k +cell, the calculated most general unifier (mgu) is in the mgu +cell. In that case, to ease reading of the final solution we move the +mgu in the solution cell and delete the rest of the +configuration:

+
k
rule <T>... <k> . </k> <mgu> Theta </mgu> ...</T> + => <solution> Theta </solution> +
+

Since we are not interested in seeing the failed attempts to solve +the query, we collapse all the error configurations into an empty +configuration (recall that both top-level cells in the configuration +were declared optional). This way, if we see an empty configuration +when we search for all solutions, we know that some attempts failed +(but we do not know which ones).

+
k
// this would be nice, but we need feedback from the external unifier +// for this. +// rule <T>... <mgu> _:MguError </mgu> ...</T> => . +
+

Semantics

+

Once all the infrastructure is in place, the actual semantics of LOGIK +is quite simple. All we have to do is to pick some (fresh instance of +a) clause, then unify its conclusion with the first query literal, and +then replace that literal with condition of the clause. The intuition +here is the following: to satisfy the first literal in the query, we +need to find some instance of some clause that matches it, and then to +similarly show that we can satisfy the conditions of that clause. +Mathematically, this is an instance of the proof principle called +resolution: if p ∨ q and ¬ p ∨ r hold, then so does +q ∨ r. We let it as an exercise to the reader to see how the two +relate (hint: assume the negation of the goal together with all the +clauses, and then derive false).

+

The following two rules are tightly connected and they together +perform the following core task: pick a fresh instance of a clause +which unifies with the first goal item, then add its conditions as new +goals.

+

Pick a clause and generate a fresh instance of it when the +fresh cell is empty:

+
k
rule <fresh> . => #renameVariables(C) </fresh> <clause> C </clause> + <k> T:Term ...</k> + requires #unifiable(T,head(C)) + + syntax Term ::= head(Clause) [function] + rule head(L.) => L + rule head(L:-_.) => L +
+

If the goal is unifiable with the fresh clause's head, replace the goal +with the clause body, and empty the fresh cell (so that +another clause can be chosen using the rule above):

+
k
rule <k> L:Term => . ...</k> + <fresh> L:Term . => . </fresh> + + rule <k> L:Term :KItem => Ls ...</k> + <fresh> L:Term :- Ls:Terms. => . </fresh> +
+

Note that there is no problem if a clause is chosen whose +conclusion literal does not unify with the first goal literal. +The search +option of krun will systematically try all clauses, so no +solution is missed. Of course, the above is not the most efficient +way to implement a logic programming language, but recall that our +objective here was to present a simple and mathematically clean +solution. We encourage the interested reader to consult the LOGIK++ +language definition for a more efficient definition of a richer logic +programming language.

+
k
endmodule +
+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/2_languages/index.html b/k-distribution/pl-tutorial/2_languages/index.html new file mode 100644 index 00000000000..13458098aae --- /dev/null +++ b/k-distribution/pl-tutorial/2_languages/index.html @@ -0,0 +1,404 @@ + + + + + + + + + + + + + + +K Languages | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

K Languages

+

Here we present several "real-world" language examples. These languages +demonstrate many of the features you would expect to find in a full-fledged +programming language.

+
    +
  • SIMPLE: Imperative programming language with threads.
  • +
  • KOOL: SIMPLE extended with object-oriented features.
  • +
  • FUN: A functional language with algebraic data-types and pattern-matching.
  • +
  • LOGIK: A logical programming language based on clause unification.
  • +
+
+
+ + +
+ + + +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/LICENSE/index.html b/k-distribution/pl-tutorial/LICENSE/index.html new file mode 100644 index 00000000000..12c44eda061 --- /dev/null +++ b/k-distribution/pl-tutorial/LICENSE/index.html @@ -0,0 +1,414 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

BSD 3-Clause License

+

Copyright (c) 2010-2024, K Team +All rights reserved.

+

Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met:

+
    +
  1. +

    Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer.

    +
  2. +
  3. +

    Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution.

    +
  4. +
  5. +

    Neither the name of the copyright holder nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission.

    +
  6. +
+

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/NOTES/index.html b/k-distribution/pl-tutorial/NOTES/index.html new file mode 100644 index 00000000000..f4288c5dfb7 --- /dev/null +++ b/k-distribution/pl-tutorial/NOTES/index.html @@ -0,0 +1,463 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Dear reader: these NOTES.md documents are mostly for myself (Grigore), to +keep track of changes and updates that need to be made as things evolve +in the K framework. You can safely ignore them. However, if you are curious +how things will change and decide to read them, I would be grateful if you +let me know whenever you find inconsistencies or things that I forgot to +mention here. Or even better, feel free to make pull requests with suggested +changes or updates.

+

Global changes that need to be made:

+
    +
  • Replace when in rules with requires
  • +
+

Things to revise each time the structure of the tutorial changes:

+
    +
  • 1_k\2_imp\lesson_1\README.md refers to Tutorial 1
  • +
  • 1_k\2_imp\lesson_4\README.md refers to Lesson 1
  • +
  • 1_k\2_imp\lesson_4\README.md refers to Tutorial 1, Lesson 2.5
  • +
  • 1_k\3_lambda++\lesson_1\README.md refers to Lesson 8, Tutorial 1
  • +
  • `1_k\3_lambda++\lesson_1\exercises refers to Lesson 1, tests/config.xml
  • +
  • 1_k\3_lambda++\lesson_2\README.md Lesson 1, Tutorial 1; Tutorial 6; Part 1, 2
  • +
  • 1_k\3_lambda++\lesson_3\README.md refers to Lesson 7, Tutorial 1
  • +
  • 1_k\3_lambda++\lesson_4\README.md refers to Lesson 1
  • +
  • 1_k\3_lambda++\lesson_5\README.md refers to Lesson 4, Lesson 8 and Lesson 7 of Tutorial 1
  • +
  • 1_k\3_lambda++\lesson_6\README.md refers to Parts 3 and 4 of the tutorial
  • +
  • 1_k\4_imp++\lesson_1\README.md refers to Lesson 4, Tutorial 2; also Tutorial 3 (at the end)
  • +
  • 1_k\4_imp++\lesson_2\README.md refers to Tutorial 3; Tutorial 2
  • +
  • 1_k\4_imp++\lesson_3\README.md refers to Lesson 1, Lesson 6
  • +
  • 1_k\4_imp++\lesson_4\README.md refers to Tutorial 6
  • +
  • 1_k\4_imp++\lesson_5\README.md refers to Lesson 4; Tutorial 3
  • +
  • 1_k\4_imp++\lesson_6\README.md refers to Lesson 3
  • +
  • 1_k\4_imp++\lesson_7\README.md refers to Lesson 1, 6
  • +
  • 1_k\5_types\lesson_1\README.md refers to Part 4; SIMPLE
  • +
  • 1_k\5_types\lesson_2\NOTES/README.md refer to Tutorial 1
  • +
  • 1_k\5_types\lesson_3\README.md refers to Part 1; Lesson 1,2
  • +
  • 1_k\5_types\lesson_4\README.md refers to Part 1, and to Lessons 2 and 3
  • +
  • 1_k\5_types\lesson_5\README.md refers to Lessons 4, 3, 2
  • +
  • 1_k\5_types\lesson_6\README.md refers to Lesson 5; SIMPLE, KOOL, IMP++
  • +
  • 1_k\5_types\lesson_7\README.md refers to Lesson 4, 8, 9
  • +
  • 1_k\5_types\lesson_8\README.md refers to Lessons 5, 7
  • +
  • 1_k\5_types\lesson_9\README.md refers to Lessons 8, 5, 7, 4
  • +
  • +
+

1_k\4_imp++\lesson_2\README.md states "generates a term of the form +symNat(n) of sort Nat", but the representation of symbolic numbers may +have changed

+

Describe/use/explain/justify the terminology "the <k/> cell" as opposed to "the k cell".

+

Would it be a good idea to make the README files self contained, that is, +to include the entire lang.k code in them, spread over the entire README, as things +are discussed? In case we decide not to, make sure that the code snippets mentioned +in the READMEs are in perfect correspondence to the code in the actual .k definitions. +Maybe add a tag before each code snippet saying what file and what lines in that +file comes from, then we can use a script to check them to be identical.

+

1_k\5_types\lesson_4\README.md refers to polymorphism, but some may say that is not precisely +polymorphism, because the types are not universally quantified. Explain that better.

+

Modify the entire tutorial to use . or, if needed, .::Map, etc., +instead of .Map, etc.. Check for each instance specifically, because +the surrounding text may also need to be modified.

+

We sometimes use "Kompile", or "kompile", as a verb instead of "Compile", +or "compile", to indicate that we mean compilation with K. Similarly for +"Krun", or "krun", instead of "Run" or "run".

+

Add citations to:

+
    +
  • chemical abstract machine
  • +
  • logics, where the distinction between side condition and premise is explained
  • +
  • reduction semantics with evaluation contexts
  • +
+

Replace I1 +Int I2, notBool B, etc., with (I1 + I2)@INT, (not B)@Bool, +etc., when we have module qualification in place and working.

+

Explain isSort(T) for all sorts Sort, in one place, when it is first used. +Explain also that T:Sort yields a side condition isSort(T).

+

Currently all the K collections are "untyped", that is, over the sort K. +In the future we want to have parametric collections. Make sure the tutorial +is systematically changed when this happens.

+

See issue #2023 and modify pl-tutorial/1_k/2_imp/lesson_4 accordingly, if needed.

+

All definitions, and corresponding READMEs, should be changed to take advantage +of modules and module operations. Ideally, we'd like to have no code repetition +in any examples, except for demonstration purposes.

+

In the PL semantics book, define print(AExps) to have the same semantics as in +IMP++: evaluates and prints each of its arguments in order (as opposed to +first evaluate all of them and then print them---for example, if the second +argument performs a division by zero, I still want to print the first argument.)

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/pl-tutorial/index.html b/k-distribution/pl-tutorial/index.html new file mode 100644 index 00000000000..c1e2a00473a --- /dev/null +++ b/k-distribution/pl-tutorial/index.html @@ -0,0 +1,586 @@ + + + + + + + + + + + + + + +K PL Tutorial | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

K PL Tutorial

+

Here you will learn how to use the K tool to define languages by means of a series of screencast movies. It is recommended to do these in the indicated order, because K features already discussed in a previous language definition will likely not be rediscussed in latter definitions. The screencasts follow quite closely the structure of the files under the tutorial folder in the K tool distribution. If you'd rather follow the instructions there and do the tutorial exercises yourself, then go back to https://kframework.org and download the K tool, if you have not done it already. Or, you can first watch the screencasts below and then do the exercises, or do them in parallel.

+

K Overview

+

Make sure you watch the K overview video before you do the K tutorial:

+ +

Learning K

+

[34'46"] Part 1: Defining LAMBDA

+

Here you will learn how to define a very simple functional language in K and the basics of how to use the K tool. The language is a call-by-value variant of lambda calculus with builtins and mu, and its definition is based on substitution.

+ +

[37'07"] Part 2: Defining IMP

+

Here you will learn how to define a very simple, prototypical textbook C-like imperative language, called IMP, and several new features of the K tool.

+ +

[33'10"] Part 3: Defining LAMBDA++

+

Here you will learn how to define constructs which abruptly change the execution control, as well as how to define functional languages using environments and closures. LAMBDA++ extends the LAMBDA language above with a callcc construct.

+ +

[46'46"] Part 4: Defining IMP++

+

Here you will learn how to refine configurations, how to generate fresh elements, how to tag syntactic constructs and rules, how to exhaustively search the space of non-deterministic or concurrent program executions, etc. IMP++ extends the IMP language above with increment, blocks and locals, dynamic threads, input/output, and abrupt termination.

+ +

[17'03"] Part 5: Defining Type Systems

+

Here you will learn how to define various kinds of type systems following various approaches or styles using K.

+ +

[??'??"] Part 6: Miscellaneous Other K Features

+

Here you will learn a few other K features, and better understand how features that you have already seen work.

+
    +
  • [??'??"] ...
  • +
+

Learning Language Design and Semantics using K

+

[??'??"] Part 7: SIMPLE: Designing Imperative Programming Languages

+

Here you will learn how to design imperative programming languages using K. SIMPLE is an imperative language with functions, threads, pointers, exceptions, multi-dimensional arrays, etc. We first define an untyped version of SIMPLE, then a typed version. For the typed version, we define both a static and a dynamic semantics.

+ +

[??'??"] Part 8: KOOL: Designing Object-Oriented Programming Languages

+

Here woul will learn how to design object-oriented programming languages using K. KOOL is an object-oriented language that extends SIMPLE with classes and objects. We first define an untyped version of KOOL, then a typed version, with both a dynamic and a static semantics.

+ +

[??'??"] Part 9: FUN: Designing Functional Programming Languages

+

H +ere woul will learn how to design functional programming languages using K. FUN is a higher-order functional language with general let, letrec, pattern matching, references, lists, callcc, etc. We first define an untyped version of FUN, then a let-polymorphic type inferencer.

+ +

[??'??"] Part 10: LOGIK: Designing Logic Programming Languages

+

Here you will learn how to design a logic programming language using K.

+ +
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/tests/regression-new/checks/markdownErrorLocation/index.html b/k-distribution/tests/regression-new/checks/markdownErrorLocation/index.html new file mode 100644 index 00000000000..ca6ced7bfac --- /dev/null +++ b/k-distribution/tests/regression-new/checks/markdownErrorLocation/index.html @@ -0,0 +1,401 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

1

+
k
// keep indentation +module MARKDOWNERRORLOCATION-SYNTAX +endmodule +
+

7

+
.a .b
9 +
+

11

+
.k .x
module MARKDOWNERRORLOCATION + imports INT +
+
{
} +
+
k
rule 21 // pandoc would think this is line 20, column 7 +
+
.y .k
endmodule // pandoc would miss this last unfinished block +
+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/tests/regression-new/imp++-llvm/imp/index.html b/k-distribution/tests/regression-new/imp++-llvm/imp/index.html new file mode 100644 index 00000000000..7f882724828 --- /dev/null +++ b/k-distribution/tests/regression-new/imp++-llvm/imp/index.html @@ -0,0 +1,969 @@ + + + + + + + + + + + + + + +IMP++ | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

IMP++

+

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Abstract

+

This is the K semantic definition of the IMP++ language. +IMP++ extends the IMP language with the features listed below. We +strongly recommend you to first familiarize yourself with the IMP +language and its K definition in Tutorial 2 before proceeding.

+

Strings and concatenation of strings. Strings are useful +for the print statement, which is discussed below. For +string concatenation, we use the same + construct that we use +for addition (so we overload it).

+

Variable increment. We only add a pre-increment construct: +++x increments variable x and evaluates to the +incremented value. Variable increment makes the evaluation of +expressions have side effects, and thus makes the evaluation strategies +of the various language constructs have an influence on the set +of possible program behaviors.

+

Input and output. IMP++ adds a read() expression +construct which reads an integer number and evaluates to it, and +a variadic (i.e., it has an arbitrary number of arguments) statement +construct print(e1,e2,...,en) which evaluates its arguments +and then outputs their values. Note that the K tool allows to +connect the input and output cells to the standard input and output +buffers, this way compiling the language definition into an +interactive interpreter.

+

Abrupt termination. The halt statement simply halts +the program. The K tool shows the resulting configuration, as if the +program terminated normally. We therefore assume that an external +observer does not care whether the program terminates normally or +abruptly, same like with exit statements in conventional +programming languages like C.

+

Dynamic threads. The expression construct spawn s +starts a new concurrent thread that executes statement s, +which is expected to be a block, and evaluates immediately to a fresh +thread identifier that is also assigned to the newly created thread. +The new thread is given at creation time the environment of its +parent, so it can access all its parent's variables. This allows for +the parent thread, and the child thread to communicate; it also allows +for races and "unexpected" behaviors, so be careful. +For thread synchronization, IMP++ provides a thread join statement +construct join t;, where t evaluates to a thread +identifier, which stalls the current thread until thread t +completes its computation. For simplicity, we here assume a +sequentially consistent shared memory model. To experiment with other +memory models, see the definition of KERNELC.

+

Blocks and local variables. IMP++ allows blocks enclosed by +curly brackets. Also, IMP's global variable declaration construct is +generalized to be used anywhere as a statement, not only at the +beginning of the program. As expected, the scope of the declared +variables is from their declaration point till the end of the most +nested enclosing block.

+

What You Will Learn Here

+
    +
  • How to define a less trivial language in K, as explained above.
  • +
  • How to use the superheat and supercool +options of the K tool kompile to exhaustively explore the +non-determinism due to underspecified evaluation strategies.
  • +
  • How to use the transition option of the K tool to +exhaustively explore the non-determinism due to concurrency.
  • +
  • How to connect certain cells in the configuration to the +standard input and standard output, and thus turn the krun +tool into an interactive interpreter for the defined language.
  • +
  • How to exhaustively search for the non-deterministic behaviors +of a program using the search option of krun.
  • +
+
k
module IMP-SYNTAX + imports DOMAINS-SYNTAX +
+

Syntax

+

IMP++ adds several syntactic constructs to IMP. Also, since the +variable declaration construct is generalized to be used anywhere a +statement can be used, not only at the beginning of the program, we +need to remove the previous global variable declaration of IMP and +instead add a variable declaration statement construct

+

We do not re-discuss the constructs which are taken over from IMP, +except when their syntax has been subtly modified (such as, for +example, the syntax of the previous "statement" assignment which +is now obtained by composing the new assignment expression, and the +new expression statement constructs); go the last lesson of +Tutorial 2 if you are interested in IMP's constructs. For execution +purposes, we tag the addition and division operations with the +addition and division tags. These attributes have +no theoretical significance, in that they do not affect the semantics +of the language in any way. They only have practical relevance, +specific to our implementation of the K tool. Specifically, we can +tell the K tool (using its superheat and supercool +options) that we want to exhaustively explore all the non-deterministic +behaviors (due to strictness) of these language constructs. For performance +reasons, by default the K tool chooses an arbitrary but fixed order to +evaluate the arguments of the strict language constructs, thus possibly +losing behaviors due to missed interleavings. This aspect was irrelevant in +IMP, because its expressions had no side effects, but it becomes relevant +in IMP++.

+

The syntax of the IMP++ constructs is self-explanatory. Note that assignment +is now an expression construct. Also, print is variadic, taking a +list of expressions as argument. It is also strict, which means that the +entire list of expressions, that is, each expression in the list, will be +evaluated. Note also that we have now defined sequential composition +of statements as a whitespace-separated list of statements, aliased with +the nonterminal Stmts, and block as such a (possibly empty) sequence +of statements surrounded by curly brackets.

+
k
syntax AExp ::= Int | String | Id + | "++" Id + | "read" "(" ")" + | "-" AExp [strict] + | "(" AExp ")" [bracket] + > AExp "/" AExp [left, strict, group(division)] + > AExp "+" AExp [left, strict] + > "spawn" Block + > Id "=" AExp [strict(2)] + syntax BExp ::= Bool + | AExp "<=" AExp [seqstrict] + | "!" BExp [strict] + | "(" BExp ")" [bracket] + > BExp "&&" BExp [left, strict(1)] + syntax Block ::= "{" Stmts "}" + syntax Stmt ::= Block + | AExp ";" [strict] + | "if" "(" BExp ")" + Block "else" Block [strict(1)] + | "while" "(" BExp ")" Block + | "int" Ids ";" + | "print" "(" AExps ")" ";" + | "halt" ";" + > "join" AExp ";" [strict] + + syntax Ids ::= List{Id,","} + syntax AExps ::= List{AExp,","} + syntax Stmts ::= List{Stmt,""} + syntax AExps ::= Ids +endmodule + + +module IMP + imports IMP-SYNTAX + imports DOMAINS +
+

Semantics

+

We next give the semantics of IMP++. We start by first defining its +configuration.

+

Configuration

+

The original configuration of IMP has been extended to include +all the various additional cells needed for IMP++. +To facilitate the semantics of threads, more specifically +to naturally give them access to their parent's variables, we prefer a +(rather conventional) split of the program state into an +environment and a store. An environment maps +variable names into locations, while a store maps locations +into values. Stores are also sometimes called states, or +heaps, or memory, in the literature. Like values, locations +can be anything. For simplicity, here we assume they are natural +numbers. Moreover, each thread has its own environment, so it knows +where all the variables that it has access to are located in the store +(that includes its locally declared variables as well as the variables +of its parent thread), and its own unique identifier. The store is +shared by all threads. For simplicity, we assume a sequentially consistent +memory model in IMP++. Note that the thread cell has multiplicity +*, meaning that there could be zero, one, or more instances of that cell +in the configuration at any given time. This multiplicity information +is important for K's configuration abstraction process: it tells +K how to complete rules which, in order to increase the modularity of the +definition, choose to not mention the entire configuration context. +The in and out cells hold the input and the output +buffers as lists of items.

+
k
configuration <T color="yellow"> + <threads color="orange"> + <thread multiplicity="*" type="Set" color="blue"> + <k color="green"> $PGM:Stmts </k> + <env color="LightSkyBlue"> .Map </env> + <id color="black"> 0 </id> + </thread> + </threads> +// <br/> + <store color="red"> .Map </store> +// <input color="magenta"> .List </input> +// <output color="Orchid"> .List </output> + <input color="magenta" stream="stdin"> .List </input> + <output color="Orchid" stream="stdout"> .List </output> + </T> +// Replace the <input/> and <output/> cells with the next two in order to +// initialize the input buffer through krun +// <input color="magenta"> $IN:List </input> +// <output color="Orchid"> .List </output> +// Replace the <input/> and <output/> cells with the next two to connect the +// input/output buffers to stdin/stdout through krun +// <input color="magenta" stream="stdin"> .List </input> +// <output color="Orchid" stream="stdout"> .List </output> +// Replace the <input/> and <output/> cells with the next two to connect the +// input/output buffers to stdin/stdout and also allow input through krun +// <input color="magenta" stream="stdin"> $IN:List </input> +// <output color="Orchid" stream="stdout"> .List </output> +
+

We can also use configuration variables to initialize +the configuration through krun. For example, we may want to +pass a few list items in the in cell when the program makes +use of read(), so that the semantics does not get stuck. +Recall from IMP that configuration variables start with a ParseError: KaTeX parse error: Expected group after '_' at position 63: …, for example, +_̲PGM) and can be initialized with any string by +krun; or course, the string should parse to a term of the +corresponding sort, otherwise errors will be generated. +Moreover, K allows you to connect list cells to the standard input or +the standard output. For example, if you add the attribute +stream="stdin" to the in cell, then krun +will prompt the user to pass input when the in cell is empty +and any semantic rule needs at least one item to be present there in +order to match. Similarly but dually, if you add the attribute +stream="stdout" to the out cell, then any item +placed into this cell by any rule will be promptly sent to the +standard output. This way, krun can be used to obtain +interactive interpreters based directly on the K semantics of the +language. For example:

+
shell
sh$ krun sum-io.imp --output none +Add numbers up to (<= 0 to quit)? 10 +Sum = 55 +Add numbers up to (<= 0 to quit)? 1000 +Sum = 500500 +Add numbers up to (<= 0 to quit)? 0 +sh$ +
+

The option --output none instructs krun to not +display the resulting configuration after the program executes. The +input/output streaming works with or without this option, although +if you don't use the option then a configuration with empty +in and out cells will be displayed after the program +is executed. You can also initialize the configuration using +configuration variables and stream the contents of the cells to +standard input/output at the same time. For example, if you use a +configuration variable in the in cell and pass contents to it +through krun, then that contents will be first consumed and +then the user will be prompted to introduce additional input if the +program's execution encounters more read() constructs.

+

The old IMP constructs

+

The semantics of the old IMP constructs is almost identical to their +semantics in the original IMP language, except for those constructs +making use of the program state and for those whose syntax has slightly +changed. Indeed, the rules for variable lookup and assignment in IMP +accessed the state cell, but that cell is not available in IMP++ +anymore. Instead, we have to use the combination of environment and store +cells. Thanks to K's implicit configuration abstraction, we do not have +to mention the thread and threads cells: these are +automatically inferred (and added by the K tool at compile time) from the +definition of the configuration above, as there is only one correct +way to complete the configuration context of these rules in order to +match the configuration declared above. In our case here, "correct way" +means that the k and env cells will be considered as +being part of the same thread cell, as opposed to each being part +of a different thread. Configuration abstraction is crucial for modularity, +because it gives us the possibility to write our definitions in a way that +may not require us to revisit existing rules when we change the configuration. +Changes in the configuration are quite frequent in practice, typically +needed in order to accommodate new language features. For example, +imagine that we initially did not have threads in IMP++. There +would be no need for the thread and threads cells in +the configuration then, the cells k and env being simply +placed at the top level in the T cell, together with the +already existing cells. Then the rules below would be exactly the +same. Thus, configuration abstraction allows you to not have to +modify your rules when you make structural changes in your language +configuration.

+

Below we list the semantics of the old IMP constructs, referring the +reader to the K semantics of IMP for their meaning. Like we tagged the +addition and the division rules above in the syntax, we also tag the lookup +and the assignment rules below (with tags lookup and +assignment), because we want to refer to them when we generate the +language model (with the kompile tool), basically to allow them to +generate (possibly non-deterministic) transitions. Indeed, these two rules, +unlike the other rules corresponding to old IMP constructs, can yield +non-deterministic behaviors when more threads are executed concurrently. +In terms of rewriting, these two rules can "compete" with each other on +some program configurations, in the sense that they can both match at the +same time and different behaviors may be obtained depending upon which of +them is chosen first.

+
k
syntax KResult ::= Int | Bool +
+

Variable lookup

+
k
rule <k> X:Id => I ...</k> + <env>... X |-> N ...</env> + <store>... N |-> I ...</store> [group(lookup)] +
+

Arithmetic constructs

+
k
rule I1 / I2 => I1 /Int I2 requires I2 =/=Int 0 + rule I1 + I2 => I1 +Int I2 + rule - I => 0 -Int I +
+

Boolean constructs

+
k
rule I1 <= I2 => I1 <=Int I2 + rule ! T => notBool T + rule true && B => B + rule false && _ => false +
+

Variable assignment

+

Note that the old IMP assignment statement X = I; is now composed of two +constructs: an assignment expression construct X = I, followed by a +semicolon ; turning the expression into a statement. The rationale behind +this syntactic restructuring has been explained in Lesson 7. Here is the +semantics of the two constructs:

+
k
rule _:Int; => .K + rule <k> X = I:Int => I ...</k> + <env>... X |-> N ...</env> + <store>... N |-> (_ => I) ...</store> [group(assignment)] +
+

Sequential composition

+

Sequential composition has been defined as a whitespace-separated syntactic +list of statements. Recall that syntactic lists are actually syntactic +sugar for cons-lists. Therefore, the following two rules eventually +sequentialize a syntactic list of statements s1 s2 ... sn.. into the +corresponding computation s1 ~> s2 ~> ... ~> sn.

+
k
rule .Stmts => .K + rule S:Stmt Ss:Stmts => S ~> Ss +
+

Conditional statement

+
k
rule if (true) S else _ => S + rule if (false) _ else S => S +
+

While loop

+

The only thing to notice here is that the empty block has been replaced +with the block holding the explicit empty sequence. That's because in +the semantics all empty lists become explicit corresponding dots +(to avoid parsing ambiguities)

+
k
rule while (B) S => if (B) {S while (B) S} else {.Stmts} +
+

The new IMP++ constructs

+

We next discuss the semantics of the new IMP++ constructs.

+

Strings

+

First, we have to state that strings are also results. +Second, we give the semantics of IMP++ string concatenation (which +uses the already existing addition symbol + from IMP) by +reduction to the built-in string concatenation operation.

+
k
syntax KResult ::= String + rule Str1 + Str2 => Str1 +String Str2 +
+

Variable increment

+

Like variable lookup, this is also meant to be a supercool transition: we +want it to count both in the non-determinism due to strict operations above +it in the computation and in the non-determinism due to thread +interleavings. This rule also relies on K's configuration abstraction. +Without abstraction, you would have to also include the thread and +threads cells.

+
k
rule <k> ++X => I +Int 1 ...</k> + <env>... X |-> N ...</env> + <store>... N |-> (I => I +Int 1) ...</store> [group(increment)] +
+

Read

+

The read() construct evaluates to the first integer in the +input buffer, which it consumes. Note that this rule is tagged +increment. This is because we will include it in the set of +potentially non-deterministic transitions when we kompile the definition; +we want to do that because two or more threads can "compete" on +reading the next integer from the input buffer, and different choices +for the next transition can lead to different behaviors.

+
k
rule <k> read() => I ...</k> + <input> ListItem(I:Int) => .List ...</input> [group(read)] +
+

Print

+

The print statement is strict, so all its arguments are +eventually evaluated (recall that print is variadic). We +append each of its evaluated arguments, in order, to the output buffer, +and structurally discard the residual print statement with an +empty list of arguments. We only want to allow printing integers and +strings, so we define a Printable syntactic category including +only these and define the print statement to only print +Printable elements. Alternatively, we could have had two +similar rules, one for integers and one for strings. Recall that, +currently, K's lists are cons-lists, so we cannot simply rewrite the +head of a list (P) into a list (.). The first rule below is tagged, +because we want to include it in the list of transitions when we kompile; +different threads may compete on the output buffer and we want to capture +all behaviors.

+
k
syntax Printable ::= Int | String + +/* currently it is necessary to subsort Printable to AExp, + but future K should be able to infer that automatically. */ + syntax AExp ::= Printable + + context print(HOLE:AExp, _AEs:AExps); + + rule <k> print(P:Printable,AEs => AEs); ...</k> + <output>... .List => ListItem(P) </output> [group(print)] + rule print(.AExps); => .K +
+

Halt

+

The halt statement empties the computation, so the rewriting process +simply terminates as if the program terminated normally. Interestingly, once +we add threads to the language, the halt statement as defined below +will terminate the current thread only. If you want an abrupt termination +statement that halts the entire program, then you need to discard the entire +contents of the threads cell, so the entire computation abruptly +terminates the entire program, no matter how many concurrent threads it has, +because there is nothing else to rewrite.

+
k
rule <k> halt; ~> _ => .K </k> +
+

Spawn thread

+

A spawned thread is passed its parent's environment at creation time. +The spawn expression in the parent thread is immediately +replaced by the unique identifier of the newly created thread, so the +parent thread can continue its execution. We only consider a sequentially +consistent shared memory model for IMP++, but other memory models can also +be defined in K; see, for example, the definition of KERNELC. Note that +the rule below does not need to be tagged in order to make it a transition +when we kompile, because the creation of the thread itself does not interfere +with the execution of other threads. Also, note that K's configuration +abstraction is at heavy work here, in two different places. First, the +parent thread's k and env cells are wrapped within a +thread cell. Second, the child thread's k, env +and id cells are also wrapped within a thread cell. Why +that way and not putting all these four cells together within the +same thread, or even create an additional threads cell at top +holding a thread cell with the new k, env +and id? Because in the original configuration we declared +the multiplicity of the thread cell to be *, which +effectively tells the K tool that zero, one or more such cells can +co-exist in a configuration at any moment. The other cells have the +default multiplicity one, so they are not allowed to multiply. +Thus, the only way to complete the rule below in a way consistent with +the declared configuration is to wrap the first two cells in a +thread cell, and the latter two cells under the . +also in a thread cell. Once the rule applies, the spawning +thread cell will add a new thread cell next to it, which is consistent +with the declared configuration cell multiplicity. The unique identifier +of the new thread is generated using the fresh side condition.

+
k
rule <k> spawn S => !T:Int ...</k> <env> Rho </env> + (.Bag => <thread>... <k> S </k> <env> Rho </env> <id> !T </id> ...</thread>) +
+

Join thread

+

A thread who wants to join another thread T has to wait until +the computation of T becomes empty. When that happens, the +join statement is simply dissolved. The terminated thread is not removed, +because we want to allow possible other join statements to also dissolve.

+
k
rule <k> join(T); => .K ...</k> <thread>... <k> .K </k> <id>T</id> ...</thread> +
+

Blocks

+

The body statement of a block is executed normally, making sure +that the environment at the block entry point is saved in the computation, +in order to be recovered after the block body statement. This step is +necessary because blocks can declare new variables having the same +name as variables which already exist in the environment, and our +semantics of variable declarations is to update the environment map in +the declared variable with a fresh location. Thus, variables which +are shadowed lose their original binding, which is why we take a +snapshot of the environment at block entrance and place it after the +block body (see the semantics of environment recovery at the end of +this module). Note that any store updates through variables which are +not declared locally are kept at the end of the block, since the store +is not saved/restored. An alternative to this environment save/restore +approach is to actually maintain a stack of environments and to push a +new layer at block entrance and pop it at block exit. The variable +lookup/assign/increment operations then also need to change, so we do +not prefer that non-modular approach. Compilers solve this problem by +statically renaming all local variables into fresh ones, to completely +eliminate shadowing and thus environment saving/restoring. The rule +below can be structural, because what it effectively does is to take a +snapshot of the current environment; this operation is arguably not a +computational step.

+
k
rule <k> {Ss} => Ss ~> Rho ...</k> <env> Rho </env> +
+

Variable declaration

+

We allocate a fresh location for each newly declared variable and +initialize it with 0.

+
k
rule <k> int (X,Xs => Xs); ...</k> + <env> Rho => Rho[X <- !N:Int] </env> + <store>... .Map => !N |-> 0 ...</store> + rule int .Ids; => .K +
+

Auxiliary operations

+

We only have one auxiliary operation in IMP++, the environment +recovery. Its role is to discard the current environment in the +env cell and replace it with the environment that it holds. +This rule is structural: we do not want them to count as computational +steps in the transition system of a program.

+
k
rule <k> Rho => .K ...</k> <env> _ => Rho </env> +
+

If you want to avoid useless environment recovery steps and keep the size +of the computation structure smaller, then you can also add the rule

+
  rule (_:Map => .) ~> _:Map
+
+

This rule acts like a ``tail recursion'' optimization, but for blocks. */

+
k
// verification ids + syntax Id ::= "n" [token] + | "sum" [token] + | "a" [token] + | "b" [token] + | "c" [token] +endmodule +
+

On Kompilation Options

+

We are done with the IMP++ semantics. The next step is to kompile the +definition using the kompile tool, this way generating a language +model. Depending upon for what you want to use the generated language model, +you may need to kompile the definition using various options. We here discuss +these options.

+

To tell the K tool to exhaustively explore all the behaviors due to the +non-determinism of addition, division, and threads, we have to kompile +with the command:

+
shell
kompile imp.k --transition="addition division lookup assignment increment read print" +
+

As already mentioned, the syntax and rule tags play no theoretical or +foundational role in K. They are only a means to allow kompile to +refer to them in its options, like we did above. By default, kompile's +transition option is empty, because this yields the fastest language model when +executed. Transitions may slow down the execution, but they instrument +the language model to allow for formal analysis of program behaviors, even for +exhaustive analysis.

+

Theoretically, the heating/cooling rules in K are fully reversible and +unconstrained by side conditions as we showed in the semantics of IMP. +For example, the theoretical heating/cooling rules corresponding to the +strict attribute of division are the following:

+
E₁ / E₂ ⇒ E₁ ⤳ □ / E₂
+E₁ ⤳ □ / E₂ ⇒ E₁ / E₂
+E₁ / E₂ ⇒ E₂ ⤳ E₁ / □
+E₂ ⤳ E₁ / □ ⇒ E₁ / E₂
+
+

The other semantic rules apply modulo such structural rules. +For example, using heating rules we can bring a redex (a subterm which +can be reduced with semantic rules) to the front of the computation, +then reduce it, then use cooling rules to reconstruct a term over the +original syntax of the language, then heat again and +non-deterministically pick another redex, and so on and so forth +without losing any opportunities to apply semantic rules. +Nevertheless, these unrestricted heating/cooling rules may create an +immense, often unfeasibly large space of possibilities to analyze. +The --transition option implements an optimization which works +well with other implementation choices made in the current K tool. +Recall from the detailed description of the IMP language semantics that +(theoretical) reversible rules like above are restricted by default +to complementary conditional rules of the form

+
E₁ / E₂ ⇒ E₁ ⤳ □ / E₂
+   if E₁ not in KResult
+E₁ ⤳ □ / E₂ ⇒ E₁ / E₂
+   if E₁ in KResult
+E₁ / E₂ ⇒ E₂ ⤳ E₁ / □
+   if E₂ not in KResult
+E₂ ⤳ E₁ / □  ⇒ E₁ / E₂
+   if  E₂ in KResult
+
+

Therefore, our tool eagerly heats and lazily cools the computation. +In other words, heating rules apply until a redex gets placed on the +top of the computation, then some semantic rule applies and rewrites +that into a result, then a cooling rule is applied to plug the +obtained result back into its context, then another argument may be +chosen and completely heated, and so on. This leads to efficient +execution, but it may and typically does hide program behaviors. +Using the --transition option allows you to interfere with this +process and to obtain all possible non-deterministic behaviors as if +the theoretical heating/cooling rules were applied. Optimizations +of course happen under the hood, but you need not be aware of them. +Used carefully, this mechanism allows us to efficiently explore more of +the non-deterministic behaviors of a program, even all of them (like here). +For example, with the semantics of IMP++ given above, the krun +command with the --search option detects all five behaviors +of the following IMP++ program (x can be 0, 1, 2, 3, or undefined +due to division-by-zero):

+
  int x,y;
+  x = 1;
+  y = ++x / (++x / x);
+
+

Besides non-determinism due to underspecified argument evaluation +orders, which the current K tool addresses as explained above, there +is another important source of non-determinism in programming languages: +non-determinism due to concurrency/parallelism. For example, when two +or more threads are about to access the same location in the store and at +least one of these accesses is a write (i.e., an instance of the variable +assignment rule), there is a high chance that different choices for +the next transition lead to different program behaviors. While in the +theory of K all the rules count as computational steps +and hereby as transitions in the transition system associated to the +program, in practice that may yield a tremendous number of step +interleavings to consider. Most of these interleavings are behaviorally +equivalent for most purposes. For example, the fact that a thread computes +a step 8+3 ⇒ 11 is likely irrelevant for the other +threads, so one may not want to consider it as an observable transition in +the space of interleavings. Since the K tool cannot know without help which +transitions need to be explored and which do not, our approach is to +let the user say so explicitly using the transition option of +kompile.

+
+
+ + +
+ + + +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/tests/regression-new/kprove-markdown/set-balance-spec/index.html b/k-distribution/tests/regression-new/kprove-markdown/set-balance-spec/index.html new file mode 100644 index 00000000000..b607ad09251 --- /dev/null +++ b/k-distribution/tests/regression-new/kprove-markdown/set-balance-spec/index.html @@ -0,0 +1,441 @@ + + + + + + + + + + + + + + +Balances Module Specifications | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Balances Module Specifications

+
keep
module SET-BALANCE-SPEC + imports VERIFICATION +
+
k
ignore thie code block! +
+

total_balance tests

+
keep
claim <k> totalBalance(AID) => 50 </k> + <account> + <accountID> AID </accountID> + <freeBalance> 30 </freeBalance> + <reservedBalance> 20 </reservedBalance> + ... + </account> +
+

No Zero-Balance Accounts Exist

+

This property shows that set_balance will not result in a zero-balance attack. +TODO: Generalize to any EntryAction. +TODO: Assertions about log events.

+
discard
rule <k> set_balance(Root, WHO, FREE_BALANCE', RESERVED_BALANCE') => . ... </k> + <totalIssuance> TOTAL_ISSUANCE => TOTAL_ISSUANCE +Int ( FREE_BALANCE' -Int FREE_BALANCE ) +Int ( RESERVED_BALANCE' -Int RESERVED_BALANCE ) </totalIssuance> + <existentialDeposit> EXISTENTIAL_DEPOSIT </existentialDeposit> + <account> + <accountID> WHO </accountID> + <freeBalance> FREE_BALANCE => FREE_BALANCE' </freeBalance> + <reservedBalance> RESERVED_BALANCE => RESERVED_BALANCE' </reservedBalance> + ... + </account> + requires #inWidth(96, TOTAL_ISSUANCE +Int (FREE_BALANCE' -Int FREE_BALANCE)) + andBool #inWidth(96, TOTAL_ISSUANCE +Int (FREE_BALANCE' -Int FREE_BALANCE) +Int (RESERVED_BALANCE' -Int RESERVED_BALANCE)) + andBool EXISTENTIAL_DEPOSIT <=Int FREE_BALANCE' + andBool EXISTENTIAL_DEPOSIT <=Int RESERVED_BALANCE' +
+
keep
claim <k> set_balance_reserved ( WHO , RESERVED_BALANCE' ) => .K ... </k> + <existentialDeposit> EXISTENTIAL_DEPOSIT </existentialDeposit> + <totalIssuance> TOTAL_ISSUANCE +Int ( FREE_BALANCE' -Int FREE_BALANCE ) => TOTAL_ISSUANCE +Int ( FREE_BALANCE' -Int FREE_BALANCE ) +Int ( RESERVED_BALANCE' -Int RESERVED_BALANCE ) </totalIssuance> + <account> + <accountID> WHO </accountID> + <freeBalance> FREE_BALANCE' </freeBalance> + <reservedBalance> RESERVED_BALANCE => RESERVED_BALANCE' </reservedBalance> + ... + </account> + requires #inWidth(96, TOTAL_ISSUANCE +Int (FREE_BALANCE' -Int FREE_BALANCE) +Int (RESERVED_BALANCE' -Int RESERVED_BALANCE)) + andBool EXISTENTIAL_DEPOSIT <=Int RESERVED_BALANCE' +
+
keep
endmodule +
+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/tests/regression-new/kprove-markdown/set-balance/index.html b/k-distribution/tests/regression-new/kprove-markdown/set-balance/index.html new file mode 100644 index 00000000000..6c3cbf75b48 --- /dev/null +++ b/k-distribution/tests/regression-new/kprove-markdown/set-balance/index.html @@ -0,0 +1,1235 @@ + + + + + + + + + + + + + + +set_balance spec | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

set_balance spec

+

State Model

+
k
+module SET-BALANCE + imports INT + imports DOMAINS + imports COLLECTIONS + + configuration + <set-balance> + <k> $ACTION:Action </k> + <now> 0 </now> + <events> .List </events> + <return-value> .Result </return-value> + <call-stack> .List </call-stack> + <existentialDeposit> 0 </existentialDeposit> + <creationFee> 0 </creationFee> + <transferFee> 0 </transferFee> + <totalIssuance> 0 </totalIssuance> + <accounts> + <account multiplicity="*" type="Map"> + <accountID> .AccountId:AccountId </accountID> + <freeBalance> 0 </freeBalance> + <reservedBalance> 0 </reservedBalance> + <vestingBalance> 0 </vestingBalance> + <startingBlock> 0 </startingBlock> + <perBlock> 0 </perBlock> + <nonce> .Nonce </nonce> + <locks> .Set </locks> + </account> + </accounts> + </set-balance> +
+

Data

+
    +
  • An AccountId is an Int.
  • +
  • An Origin is an AccountId, Root, or None.
  • +
  • A Nonce is an optional Int.
  • +
  • An Event records some happenning.
  • +
+
k
syntax AccountId ::= ".AccountId" | Int + // --------------------------------------- + + syntax Origin ::= AccountId | ".Root" | ".None" + // ----------------------------------------------- + + syntax Nonce ::= ".Nonce" | Int + // ------------------------------- + + syntax Event ::= DustEvent ( Int ) + // ---------------------------------- +
+

Some predicates which help specifying behavior:

+
    +
  • #inWidth: Specify that a given number is in some bitwidth.
  • +
+
k
syntax Bool ::= #inWidth(Int, Int) [function, total] + // --------------------------------------------------------- + rule #inWidth(N, M) => 0 <=Int M andBool M <Int (2 ^Int N) +
+

Results

+

A Result is the return value of an execution step.

+
    +
  • AccountKilled indicates that the free balance goes below the existential threshold.
  • +
  • Updated indicates that an account was updated successfully.
  • +
+
k
syntax Result ::= ".Result" | "AccountKilled" | "Updated" + // --------------------------------------------------------- +
+

Public Getters

+

total_balance

+

Retrieves the total balance of an account. This includes both the free and +reserved balances.

+
k
syntax Int ::= "total_balance" "(" AccountId ")" [function, total] + // ----------------------------------------------------------------------- + rule total_balance(WHO) => free_balance(WHO) +Int reserved_balance(WHO) +
+

free_balance

+

Gets the free balance of an account.

+

Other than when this module is executing, this will never be strictly between +EXISTENTIAL_DEPOSIT and zero.

+
k
syntax Int ::= "free_balance" "(" AccountId ")" [function, total] + // ---------------------------------------------------------------------- + rule free_balance(_) => 0 [owise] + rule [[ free_balance(WHO) => FREE_BALANCE ]] + <account> + <accountID> WHO </accountID> + <freeBalance> FREE_BALANCE </freeBalance> + ... + </account> +
+

reserved_balance

+

Gets the reserved balance of an account.

+

Other than when this module is executing, this will never be strictly between +EXISTENTIAL_DEPOSIT and zero.

+
k
syntax Int ::= "reserved_balance" "(" AccountId ")" [function, total] + // -------------------------------------------------------------------------- + rule reserved_balance(_) => 0 [owise] + rule [[ reserved_balance(WHO) => FREE_BALANCE ]] + <account> + <accountID> WHO </accountID> + <reservedBalance> FREE_BALANCE </reservedBalance> + ... + </account> +
+

can_slash

+

Determines if an account’s free balance is over the value provided. This is +often used to determine if an account has enough balance to cover a potential +slash, hence the name.

+
k
syntax Bool ::= "can_slash" "(" AccountId "," Int ")" [function, total] + // ---------------------------------------------------------------------------- + rule can_slash(_, _) => false + rule [[ can_slash(WHO, AMOUNT) => FREE_BALANCE >=Int AMOUNT ]] + <account> + <accountID> WHO </accountID> + <freeBalance> FREE_BALANCE </freeBalance> + ... + </account> +
+

total_issuance

+

Retrieves the total outstanding amount of currency outstanding. This will +always be equal to the sum of all free and reserved balances in all active +accounts, except when the balances module is executing.

+
k
syntax Int ::= "total_issuance" [function, total] + // ------------------------------------------------------ + rule [[ total_issuance => TOTAL_ISSUANCE ]] + <totalIssuance> TOTAL_ISSUANCE </totalIssuance> +
+

issue

+

Issues currency, creating an imbalance.

+

This is not specified, since these semantics do not include the concept of an +imbalance. Without the concept of destructors and move semantics, it would be +almost impossible to use correctly.

+

burn

+

Burns currency.

+

This is not part of the semantics for the same reason burn is not.

+

Actions and Results

+

An Action is an execution step (or the result of an execution step). +An EntryAction is an Action that can be invoked externally. +A Result is considered an Action, as is an EntryAction.

+
k
syntax Action ::= Result | EntryAction + // -------------------------------------- +
+

account_exists

+
k
syntax Bool ::= "account_exists" "(" AccountId ")" [function, total] + // ------------------------------------------------------------------------- + rule account_exists(_) => false [owise] + rule [[ account_exists(WHO) => true ]] + <account> <accountID> WHO </accountID> ... </account> +
+

create_account

+
k
syntax Action ::= "create_account" "(" AccountId ")" + // ---------------------------------------------------- + rule <k> create_account(WHO) => .K ... </k> + <accounts> ( .Bag => <account> <accountID> WHO </accountID> ... </account> ) ... </accounts> +
+

set_free_balance

+
    +
  • Updates an accounts balance if the new balance is above the existential threshold.
  • +
  • Kills the account if the balance goes below the existential threshold and the reserved balance is non-zero.
  • +
  • Reaps the account if the balance goes below the existential threshold and the reserved balance is zero.
  • +
+
k
syntax Action ::= "set_free_balance" "(" AccountId "," Int ")" + // -------------------------------------------------------------- + rule <k> (.K => create_account(WHO)) ~> set_free_balance(WHO, _) ... </k> + requires notBool account_exists(WHO) + + rule [free-account-updated]: + <k> set_free_balance(WHO, BALANCE) => .K ... </k> + <existentialDeposit> EXISTENTIAL_DEPOSIT </existentialDeposit> + <account> + <accountID> WHO </accountID> + <freeBalance> _ => BALANCE </freeBalance> + ... + </account> + requires EXISTENTIAL_DEPOSIT <=Int BALANCE + + rule [free-account-killed]: + <k> set_free_balance(WHO, BALANCE) => .K ... </k> + <events> ... (.List => ListItem(DustEvent(FREE_BALANCE))) </events> + <existentialDeposit> EXISTENTIAL_DEPOSIT </existentialDeposit> + <totalIssuance> TOTAL_ISSUANCE => TOTAL_ISSUANCE -Int BALANCE </totalIssuance> + <account> + <accountID> WHO </accountID> + <nonce> _ => .Nonce </nonce> + <freeBalance> FREE_BALANCE => 0 </freeBalance> + <reservedBalance> RESERVED_BALANCE </reservedBalance> + ... + </account> + requires BALANCE <Int EXISTENTIAL_DEPOSIT + andBool 0 <Int RESERVED_BALANCE + + rule [free-account-reaped]: + <k> set_free_balance(WHO, BALANCE) => .K ... </k> + <events> ... (.List => ListItem(DustEvent(FREE_BALANCE))) </events> + <existentialDeposit> EXISTENTIAL_DEPOSIT </existentialDeposit> + <totalIssuance> TOTAL_ISSUANCE => TOTAL_ISSUANCE -Int BALANCE </totalIssuance> + <accounts> + ( <account> + <accountID> WHO </accountID> + <freeBalance> FREE_BALANCE </freeBalance> + <reservedBalance> 0 </reservedBalance> + ... + </account> + => .Bag + ) + ... + </accounts> + requires BALANCE <Int EXISTENTIAL_DEPOSIT +
+

set_reserved_balance

+
    +
  • Updates an accounts balance if the new balance is above the existential threshold.
  • +
  • Kills the account if the balance goes below the existential threshold and the free balance is non-zero.
  • +
  • Reaps the account if the balance goes below the existential threshold and the free balance is zero.
  • +
+
k
syntax Action ::= "set_reserved_balance" "(" AccountId "," Int ")" + // ------------------------------------------------------------------ + rule <k> (.K => create_account(WHO)) ~> set_reserved_balance(WHO, _) ... </k> + requires notBool account_exists(WHO) + + rule [reserved-account-updated]: + <k> set_reserved_balance(WHO, BALANCE) => .K ... </k> + <existentialDeposit> EXISTENTIAL_DEPOSIT </existentialDeposit> + <account> + <accountID> WHO </accountID> + <reservedBalance> _ => BALANCE </reservedBalance> + ... + </account> + requires EXISTENTIAL_DEPOSIT <=Int BALANCE + + rule [reserved-account-killed]: + <k> set_reserved_balance(WHO, BALANCE) => .K ... </k> + <events> ... (.List => ListItem(DustEvent(RESERVED_BALANCE))) </events> + <existentialDeposit> EXISTENTIAL_DEPOSIT </existentialDeposit> + <totalIssuance> TOTAL_ISSUANCE => TOTAL_ISSUANCE -Int BALANCE </totalIssuance> + <account> + <accountID> WHO </accountID> + <nonce> _ => .Nonce </nonce> + <freeBalance> FREE_BALANCE </freeBalance> + <reservedBalance> RESERVED_BALANCE => 0 </reservedBalance> + ... + </account> + requires BALANCE <Int EXISTENTIAL_DEPOSIT + andBool 0 <Int FREE_BALANCE + + rule [reserved-account-reaped]: + <k> set_reserved_balance(WHO, BALANCE) => .K ... </k> + <events> ... (.List => ListItem(DustEvent(RESERVED_BALANCE))) </events> + <existentialDeposit> EXISTENTIAL_DEPOSIT </existentialDeposit> + <totalIssuance> TOTAL_ISSUANCE => TOTAL_ISSUANCE -Int BALANCE </totalIssuance> + <accounts> + ( <account> + <accountID> WHO </accountID> + <freeBalance> 0 </freeBalance> + <reservedBalance> RESERVED_BALANCE </reservedBalance> + ... + </account> + => .Bag + ) + ... + </accounts> + requires BALANCE <Int EXISTENTIAL_DEPOSIT +
+

set_balance

+
    +
  • Sets the new free balance
  • +
  • Creates suitible imbalances (both positive and negative).
  • +
  • Calls set_free_balance with the new free balance.
  • +
  • Calls set_reserved_balance with the new reserved balance.
  • +
+
k
syntax EntryAction ::= "set_balance" "(" AccountId "," AccountId "," Int "," Int ")" + // ------------------------------------------------------------------------------------ + rule [balance-set]: + <k> set_balance(_, WHO, FREE_BALANCE, RESERVED_BALANCE) + => set_balance_free(WHO, FREE_BALANCE) + ~> set_balance_reserved(WHO, RESERVED_BALANCE) + ... + </k> +
+

Helpers for calling set_free_balance and set_reserved_balance.

+
    +
  • Sets the new free balance
  • +
  • Emits an imbalance event
  • +
  • Helper function for set_balance
  • +
+
k
syntax Action ::= "set_balance_free" "(" AccountId "," Int ")" + syntax Action ::= "set_balance_reserved" "(" AccountId "," Int ")" + // ------------------------------------------------------------------ + rule [balance-set-free]: + <k> set_balance_free(WHO, FREE_BALANCE') => set_free_balance(WHO, FREE_BALANCE') ... </k> + <totalIssuance> ISSUANCE => ISSUANCE +Int (FREE_BALANCE' -Int free_balance(WHO)) </totalIssuance> + requires #inWidth(96, ISSUANCE +Int (FREE_BALANCE' -Int free_balance(WHO))) + + rule [balance-set-reserved]: + <k> set_balance_reserved(WHO, RESERVED_BALANCE') => set_reserved_balance(WHO, RESERVED_BALANCE') ... </k> + <totalIssuance> ISSUANCE => ISSUANCE +Int (RESERVED_BALANCE' -Int reserved_balance(WHO)) </totalIssuance> + requires #inWidth(96, ISSUANCE +Int (RESERVED_BALANCE' -Int reserved_balance(WHO))) +
+

transfer_raw

+

Transfer some liquid free balance to another account.

+

transfer will set the FreeBalance of the sender and receiver. +It will decrease the total issuance of the system by the TransferFee. +If the sender's account is below the existential deposit as a result +of the transfer, the account will be reaped.

+

The dispatch origin for this call must be Signed by the transactor.

+
k
syntax ExistenceRequirement ::= "AllowDeath" + | "KeepAlive" + + syntax EntryAction ::= transfer(Origin, AccountId, Int) + | "transfer_keep_alive" "(" Origin "," AccountId "," Int ")" + // --------------------------------------------------------------------------------- + + syntax Action ::= rawTransfer(AccountId, AccountId, Int, ExistenceRequirement) + // ------------------------------------------------------------------------------ + rule [transfer-to-raw]: + <k> transfer(ORIGIN:AccountId, DESTINATION, AMOUNT) + => rawTransfer(ORIGIN, DESTINATION, AMOUNT, AllowDeath) + ... + </k> + + rule [transfer-keep-alive]: + <k> transfer_keep_alive(ORIGIN:AccountId, DESTINATION, AMOUNT) + => rawTransfer(ORIGIN, DESTINATION, AMOUNT, KeepAlive) + ... + </k> + + rule <k> (.K => create_account(DESTINATION)) ~> rawTransfer(ORIGIN, DESTINATION, _, _) ... </k> + requires account_exists(ORIGIN) + andBool notBool account_exists(DESTINATION) + + rule [transfer-self]: + <k> rawTransfer(ORIGIN:AccountId, ORIGIN, _, _) => .K ... </k> + requires account_exists(ORIGIN) + + rule [transfer-existing-account]: + <k> rawTransfer(ORIGIN, DESTINATION, AMOUNT, EXISTENCE_REQUIREMENT) + => set_free_balance(ORIGIN, SOURCE_BALANCE -Int AMOUNT -Int FEE) + ~> set_free_balance(DESTINATION, DESTINATION_BALANCE +Int AMOUNT) + ... + </k> + <totalIssuance> ISSUANCE => ISSUANCE -Int FEE </totalIssuance> + <existentialDeposit> EXISTENTIAL_DEPOSIT </existentialDeposit> + <transferFee> FEE </transferFee> + <accounts> + <account> + <accountID> ORIGIN </accountID> + <freeBalance> SOURCE_BALANCE </freeBalance> + ... + </account> + <account> + <accountID> DESTINATION </accountID> + <freeBalance> DESTINATION_BALANCE </freeBalance> + ... + </account> + </accounts> + requires ORIGIN =/=K DESTINATION + andBool DESTINATION_BALANCE >Int 0 + andBool SOURCE_BALANCE >=Int (AMOUNT +Int FEE) + andBool ensure_can_withdraw(ORIGIN, Transfer, SOURCE_BALANCE -Int AMOUNT -Int FEE) + andBool (EXISTENCE_REQUIREMENT ==K AllowDeath orBool SOURCE_BALANCE -Int AMOUNT -Int FEE >Int EXISTENTIAL_DEPOSIT) + + rule [transfer-create-account]: + <k> rawTransfer(ORIGIN:AccountId, DESTINATION, AMOUNT, EXISTENCE_REQUIREMENT) + => set_free_balance(ORIGIN, SOURCE_BALANCE -Int AMOUNT -Int CREATION_FEE) + ~> set_free_balance(DESTINATION, AMOUNT) + ... + </k> + <totalIssuance> ISSUANCE => ISSUANCE -Int CREATION_FEE </totalIssuance> + <existentialDeposit> EXISTENTIAL_DEPOSIT </existentialDeposit> + <creationFee> CREATION_FEE </creationFee> + <accounts> + <account> + <accountID> ORIGIN </accountID> + <freeBalance> SOURCE_BALANCE </freeBalance> + ... + </account> + <account> + <accountID> DESTINATION </accountID> + <freeBalance> 0 </freeBalance> + <reservedBalance> 0 </reservedBalance> + ... + </account> + ... + </accounts> + requires ORIGIN =/=K DESTINATION + andBool SOURCE_BALANCE >=Int (AMOUNT +Int CREATION_FEE) + andBool EXISTENTIAL_DEPOSIT <=Int AMOUNT + andBool ensure_can_withdraw(ORIGIN, Transfer, SOURCE_BALANCE -Int AMOUNT -Int CREATION_FEE) + andBool (EXISTENCE_REQUIREMENT ==K AllowDeath orBool SOURCE_BALANCE -Int AMOUNT -Int CREATION_FEE >=Int EXISTENTIAL_DEPOSIT) +
+

force_transfer

+

Force a transfer from any account to any other account. This can only be done by root.

+
k
syntax EntryAction ::= "force_transfer" "(" Origin "," AccountId "," AccountId "," Int ")" + // ------------------------------------------------------------------------------------------ + rule [force-transfer]: + <k> force_transfer(.Root, SOURCE, DESTINATION, AMOUNT) => transfer(SOURCE, DESTINATION, AMOUNT) ... </k> +
+

withdraw

+

Withdraw funds from an account.

+
k
syntax EntryAction ::= withdraw(AccountId, Int, WithdrawReason, ExistenceRequirement) + // ------------------------------------------------------------------------------------- + rule [withdraw]: // K really needs where clauses + <k> withdraw(WHO, AMOUNT, REASON, EXISTENCE_REQUIREMENT) + => withdrawInner(WHO, AMOUNT, AMOUNT -Int free_balance(WHO), REASON, EXISTENCE_REQUIREMENT) + ... + </k> + + syntax Action ::= withdrawInner(AccountId, Int, Int, WithdrawReason, ExistenceRequirement) + // ------------------------------------------------------------------------------------------ + rule [withdrawInner]: + <k> withdrawInner(WHO, AMOUNT, NEW_BALANCE, REASON, EXISTENCE_REQUIREMENT) + => set_free_balance(WHO, NEW_BALANCE) + ... + </k> + <totalIssuance> ISSUANCE => ISSUANCE -Int AMOUNT </totalIssuance> + <existentialDeposit> EXISTENTIAL_DEPOSIT </existentialDeposit> + requires NEW_BALANCE >=Int 0 + andBool ensure_can_withdraw(WHO, REASON, NEW_BALANCE) + andBool (EXISTENCE_REQUIREMENT ==K AllowDeath orBool NEW_BALANCE >=Int EXISTENTIAL_DEPOSIT) +
+

Call Frames

+

Function call and return.

+
k
syntax CallFrame ::= frame(continuation: K) + syntax Action ::= call ( Action ) + | return ( Result ) + // ----------------------------------- + rule [call]: + <k> call(Action) ~> CONT => Action </k> + <call-stack> .List => ListItem(frame(CONT)) ... </call-stack> + + rule [return]: + <k> return(R) ~> _ => CONT </k> + <return-value> _ => R </return-value> + <call-stack> ListItem(frame(CONT)) => .List ... </call-stack> + + rule [return-unit]: + <k> .K => CONT </k> + <return-value> _ => .Result </return-value> + <call-stack> ListItem(frame(CONT)) => .List ... </call-stack> +
+

Ensure that a given amount can be withdrawn from an account.

+

FIXME: we do not account for multiple withdrawl reasons, due to K’s +lacking polymorphism.

+
k
syntax WithdrawReason ::= "TransactionPayment" + | "Transfer" + | "Reserve" + | "Fee" + | "Tip" + // ------------------------------- + + + syntax Bool ::= "ensure_can_withdraw" "(" AccountId "," WithdrawReason "," Int ")" [function, total] + // --------------------------------------------------------------------------------------------------------- + rule ensure_can_withdraw(_, _, _) => true [owise] + + rule [[ ensure_can_withdraw(WHO, Transfer #Or Reserve, BALANCE) => false ]] + <account> + <accountID> WHO </accountID> + <vestingBalance> VESTING_BALANCE </vestingBalance> + ... + </account> + requires VESTING_BALANCE <Int BALANCE + + rule [[ ensure_can_withdraw(WHO, REASON, BALANCE) => false ]] + <now> NOW </now> + <account> + <accountID> WHO </accountID> + <locks> ACCOUNT_LOCKS </locks> + ... + </account> + requires activeLocks(ACCOUNT_LOCKS, NOW, REASON, BALANCE) + + syntax LockID ::= "Election" + | "Staking" + | "Democracy" + | "Phragmen" + // ---------------------------- + + syntax AccountLock ::= lock ( id: LockID, until: Int, amount: Int, reasons: Set ) + // --------------------------------------------------------------------------------- + + syntax Bool ::= activeLock (AccountLock, Int, WithdrawReason, Int ) [function] + | activeLocks(Set, Int, WithdrawReason, Int ) [function] + | activeLocks(List, Int, WithdrawReason, Int, Bool) [function, klabel(activeLocksAux)] + // ----------------------------------------------------------------------------------------------------------- + rule activeLock(AL, NOW, REASON, BALANCE) => NOW <Int until(AL) andBool BALANCE <Int amount(AL) andBool REASON in reasons(AL) + + rule activeLocks(ALS, NOW, REASON, BALANCE) => activeLocks(Set2List(ALS), NOW, REASON, BALANCE, false) + + rule activeLocks(.List, _, _, _, RESULT) => RESULT + rule activeLocks((ListItem(AL) => .List) _, NOW, REASON, BALANCE, RESULT => RESULT orBool activeLock(AL, NOW, REASON, BALANCE)) +
+

Slashing and repatriation of reserved balances

+

The first of these is also used by slash.

+
    +
  • slash_reserved
  • +
  • repatriate_reserved
  • +
+
k
syntax Action ::= "slash_reserved" "(" AccountId "," Int ")" + // ------------------------------------------------------------ + rule [slash-reserved]: + <k> slash_reserved(ACCOUNT, AMOUNT) + => set_reserved_balance(ACCOUNT, maxInt(0, RESERVED_BALANCE -Int AMOUNT)) + ... + </k> + <accounts> + <account> + <accountID> ACCOUNT </accountID> + <reservedBalance> RESERVED_BALANCE </reservedBalance> + ... + </account> + </accounts> + <totalIssuance> TOTAL_ISSUANCE => TOTAL_ISSUANCE -Int minInt(RESERVED_BALANCE, AMOUNT) </totalIssuance> + + syntax Action ::= "repatriate_reserved" "(" AccountId "," AccountId "," Int ")" + // ------------------------------------------------------------------------------- + rule [repatriate-reserved]: + <k> repatriate_reserved(SLASHED, BENEFICIARY, AMOUNT) + => set_free_balance(BENEFICIARY, BENEFICIARY_FREE_BALANCE +Int minInt(SLASHED_RESERVED_BALANCE, AMOUNT)) + ~> set_reserved_balance(SLASHED, SLASHED_RESERVED_BALANCE -Int minInt(SLASHED_RESERVED_BALANCE, AMOUNT)) + ... + </k> + <accounts> + <account> + <accountID> SLASHED </accountID> + <reservedBalance> SLASHED_RESERVED_BALANCE </reservedBalance> + ... + </account> + <account> + <accountID> BENEFICIARY </accountID> + <reservedBalance> BENEFICIARY_RESERVED_BALANCE </reservedBalance> + <freeBalance> BENEFICIARY_FREE_BALANCE </freeBalance> + ... + </account> + </accounts> + requires BENEFICIARY_FREE_BALANCE +Int BENEFICIARY_RESERVED_BALANCE >Int 0 + andBool SLASHED =/=K BENEFICIARY + + rule [repatriate-reserved-same-account]: + <k> repatriate_reserved(SLASHED, SLASHED, AMOUNT) => unreserve(SLASHED, AMOUNT) ... </k> +
+

Slashing

+

Used to punish a node for violating the protocol.

+
k
syntax EntryAction ::= slash ( AccountId , Int ) + // ------------------------------------------------ + rule [slash]: + <k> slash(ACCOUNT, AMOUNT) => set_free_balance(ACCOUNT, FREE_BALANCE -Int AMOUNT) ... </k> + <accounts> + <account> + <accountID> ACCOUNT </accountID> + <freeBalance> FREE_BALANCE </freeBalance> + ... + </account> + </accounts> + <totalIssuance> TOTAL_ISSUANCE => TOTAL_ISSUANCE -Int AMOUNT </totalIssuance> + requires FREE_BALANCE >=Int AMOUNT + + rule [slash-empty-free]: + <k> slash(ACCOUNT, AMOUNT) + => set_free_balance(ACCOUNT, 0) + ~> slash_reserved(ACCOUNT, AMOUNT -Int FREE_BALANCE) + ... + </k> + <accounts> + <account> + <accountID> ACCOUNT </accountID> + <freeBalance> FREE_BALANCE </freeBalance> + ... + </account> + </accounts> + <totalIssuance> TOTAL_ISSUANCE => TOTAL_ISSUANCE -Int FREE_BALANCE </totalIssuance> + requires FREE_BALANCE <Int AMOUNT +
+

Reservation and unreservation of balances

+

Used to move balance from free to reserved and visa versa.

+
k
syntax Action ::= reserve ( AccountId , Int ) + // --------------------------------------------- + rule [reserve]: + <k> reserve(ACCOUNT, AMOUNT) + => set_reserved_balance(ACCOUNT, FREE_BALANCE +Int AMOUNT) + ~> set_free_balance(ACCOUNT, FREE_BALANCE -Int AMOUNT) + ... + </k> + <accounts> + <account> + <accountID> ACCOUNT </accountID> + <freeBalance> FREE_BALANCE </freeBalance> + <reservedBalance> _ </reservedBalance> + ... + </account> + </accounts> + requires FREE_BALANCE >=Int AMOUNT + andBool ensure_can_withdraw(ACCOUNT, Reserve, FREE_BALANCE -Int AMOUNT) + + syntax Action ::= unreserve ( AccountId , Int ) + // ----------------------------------------------- + rule [unreserve]: + <k> unreserve(ACCOUNT, AMOUNT) + => set_free_balance(ACCOUNT, FREE_BALANCE +Int minInt(AMOUNT, RESERVED_BALANCE)) + ~> set_reserved_balance(ACCOUNT, FREE_BALANCE -Int minInt(AMOUNT, RESERVED_BALANCE)) + ... + </k> + <accounts> + <account> + <accountID> ACCOUNT </accountID> + <freeBalance> FREE_BALANCE </freeBalance> + <reservedBalance> RESERVED_BALANCE </reservedBalance> + ... + </account> + </accounts> +
+

Vesting

+
    +
  • locked_at ― amount currently locked
  • +
  • vesting_balance ― get the balance that cannot currently be withdrawn.
  • +
+
k
syntax Int ::= "locked_at" "(" AccountId ")" [function, total] + // ------------------------------------------------------------------- + rule [[ locked_at(WHO) => maxInt(0, VESTING_BALANCE -Int (PER_BLOCK *Int maxInt(0, NOW -Int STARTING_BLOCK))) ]] + <now> NOW </now> + <account> + <accountID> WHO </accountID> + <vestingBalance> VESTING_BALANCE </vestingBalance> + <startingBlock> STARTING_BLOCK </startingBlock> + <perBlock> PER_BLOCK </perBlock> + ... + </account> + + syntax Int ::= "vesting_balance" "(" AccountId ")" [function, total] + // ------------------------------------------------------------------------- + rule [[ vesting_balance(WHO) => minInt(FREE_BALANCE, locked_at(WHO)) ]] + <account> + <accountID> WHO </accountID> + <freeBalance> FREE_BALANCE </freeBalance> + ... + </account> +
+

Deposits

+

Deposit into an existing account.

+
k
syntax EntryAction ::= "deposit_into_existing" "(" AccountId "," Int ")" + // ------------------------------------------------------------------------ + rule [deposit-into-existing]: + <k> deposit_into_existing(WHO, AMOUNT) => .K ... </k> + <totalIssuance> TOTAL_ISSUANCE => TOTAL_ISSUANCE +Int AMOUNT </totalIssuance> + <account> + <accountID> WHO </accountID> + <freeBalance> FREE_BALANCE => FREE_BALANCE +Int AMOUNT </freeBalance> + ... + </account> + requires FREE_BALANCE >Int 0 +
+

End of module

+
k
endmodule +
+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/tests/regression-new/markdownSelectors/a-spec/index.html b/k-distribution/tests/regression-new/markdownSelectors/a-spec/index.html new file mode 100644 index 00000000000..29fa727b151 --- /dev/null +++ b/k-distribution/tests/regression-new/markdownSelectors/a-spec/index.html @@ -0,0 +1,389 @@ + + + + + + + + + + + + + + +K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+
k
requires "test.md" + +module A-SPEC + +endmodule +
+
+
+ + +
+ +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/tests/regression-new/markdownSelectors/test/index.html b/k-distribution/tests/regression-new/markdownSelectors/test/index.html new file mode 100644 index 00000000000..6fa53b45e61 --- /dev/null +++ b/k-distribution/tests/regression-new/markdownSelectors/test/index.html @@ -0,0 +1,415 @@ + + + + + + + + + + + + + + +Test | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Test

+
k
module TEST-SYNTAX + imports INT +endmodule +module TEST + imports INT +
+
.k
configuration <k> $PGM:K </k> <r> 0 </r> +
+
.discard
rule <r> 0 => 1 </r> +
+
.keep
rule <k> 0 => 1 </k> +
+
.k .keep
rule <k> 1 => 2 </k> +
+
.k .discard .numberLines
rule <k> 2 => 3 </k> +
+
.keep .discard
rule <r> 0 => 1 </r> +
+
k
endmodule +
+
+
+ + +
+ + + +
+
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/tests/regression-new/pl-tutorial/2_languages/1_simple/1_untyped/simple-untyped/index.html b/k-distribution/tests/regression-new/pl-tutorial/2_languages/1_simple/1_untyped/simple-untyped/index.html new file mode 100644 index 00000000000..9201c03c458 --- /dev/null +++ b/k-distribution/tests/regression-new/pl-tutorial/2_languages/1_simple/1_untyped/simple-untyped/index.html @@ -0,0 +1,1717 @@ + + + + + + + + + + + + + + +SIMPLE — Untyped | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

SIMPLE — Untyped

+

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+

This is the K semantic definition of the untyped SIMPLE language. +SIMPLE is intended to be a pedagogical and research language that captures +the essence of the imperative programming paradigm, extended with several +features often encountered in imperative programming languages. +A program consists of a set of global variable declarations and +function definitions. Like in C, function definitions cannot be +nested and each program must have one function called main, +which is invoked when the program is executed. To make it more +interesting and to highlight some of K's strengths, SIMPLE includes +the following features in addition to the conventional imperative +expression and statement constructs:

+
    +
  • +

    Multidimensional arrays and array references. An array evaluates +to an array reference, which is a special value holding a location (where +the elements of the array start) together with the size of the array; +the elements of the array can be array references themselves (particularly +when the array is multi-dimensional). Array references are ordinary values, +so they can be assigned to variables and passed/received by functions.

    +
  • +
  • +

    Functions and function values. Functions can have zero or +more parameters and can return abruptly using a return statement. +SIMPLE follows a call-by-value parameter passing style, with static scoping. +Function names evaluate to function abstractions, which hereby become ordinary +values in the language, same like the array references.

    +
  • +
  • +

    Blocks with locals. SIMPLE variables can be declared +anywhere, their scope being from the place where they are declared +until the end of the most nested enclosing block.

    +
  • +
  • +

    Input/Output. The expression read() evaluates to the +next value in the input buffer, and the statement write(e) +evaluates e and outputs its value to the output buffer. The +input and output buffers are lists of values.

    +
  • +
  • +

    Exceptions. SIMPLE has parametric exceptions (the value thrown as +an exception can be caught and bound).

    +
  • +
  • +

    Concurrency via dynamic thread creation/termination and +synchronization. One can spawn a thread to execute any statement. +The spawned thread shares with its parent its environment at creation time. +Threads can be synchronized via a join command which blocks the current thread +until the joined thread completes, via re-entrant locks which can be acquired +and released, as well as through rendezvous commands.

    +
  • +
+

Like in many other languages, some of SIMPLE's constructs can be +desugared into a smaller set of basic constructs. We do that at the end +of the syntax module, and then we only give semantics to the core constructs.

+

Note: This definition is commented slightly more than others, because it is +intended to be one of the first non-trivial definitions that the new +user of K sees. We recommend the beginner user to first check the +language definitions discussed in the K tutorial.

+
k
module SIMPLE-UNTYPED-SYNTAX + imports DOMAINS-SYNTAX +
+

Syntax

+

We start by defining the SIMPLE syntax. The language constructs discussed +above have the expected syntax and evaluation strategies. Recall that in K +we annotate the syntax with appropriate strictness attributes, thus giving +each language construct the desired evaluation strategy.

+

Identifiers

+

Recall from the K tutorial that identifiers are builtin and come under the +syntactic category Id. The special identifier for the function +main belongs to all programs, and plays a special role in the semantics, +so we declare it explicitly. This would not be necessary if the identifiers +were all included automatically in semantic definitions, but that is not +possible because of parsing reasons (e.g., K variables used to match +concrete identifiers would then be ambiguously parsed as identifiers). They +are only included in the parser generated to parse programs (and used by the +kast tool). Consequently, we have to explicitly declare all the +concrete identifiers that play a special role in the semantics, like +main below.

+
k
syntax Id ::= "main" [token] +
+

Declarations

+

There are two types of declarations: for variables (including arrays) and +for functions. We are going to allow declarations of the form +var x=10, a[10,10], y=23;, which is why we allow the var +keyword to take a list of expressions. The non-terminals used in the two +productions below are defined shortly.

+
k
syntax Stmt ::= "var" Exps ";" + | "function" Id "(" Ids ")" Block +
+

Expressions

+

The expression constructs below are standard. Increment (++) takes +an expression rather than a variable because it can also increment an array +element. Recall that the syntax we define in K is what we call the syntax +of the semantics: while powerful enough to define non-trivial syntaxes +(thanks to the underlying SDF technology that we use), we typically refrain +from defining precise syntaxes, that is, ones which accept precisely the +well-formed programs (that would not be possible anyway in general). That job +is deferred to type systems, which can also be defined in K. In other words, +we are not making any effort to guarantee syntactically that only variables +or array elements are passed to the increment construct, we allow any +expression. Nevertheless, we will only give semantics to those, so expressions +of the form ++5, which parse (but which will be rejected by our type +system in the typed version of SIMPLE later), will get stuck when executed. +Arrays can be multidimensional and can hold other arrays, so their +lookup operation takes a list of expressions as argument and applies to an +expression (which can in particular be another array lookup), respectively. +The construct sizeOf gives the size of an array in number of elements +of its first dimension. Note that almost all constructs are strict. The only +constructs which are not strict are the increment (since its first argument +gets updated, so it cannot be evaluated), the input read which takes no +arguments so strictness is irrelevant for it, the logical and and or constructs +which are short-circuited, the thread spawning construct which creates a new +thread executing the argument expression and return its unique identifier to +the creating thread (so it cannot just evaluate its argument in place), and the +assignment which is only strict in its second argument (for the same reason as +the increment).

+
k
syntax Exp ::= Int | Bool | String | Id + | "(" Exp ")" [bracket] + | "++" Exp + > Exp "[" Exps "]" [strict] + > Exp "(" Exps ")" [strict] + | "-" Exp [strict] + | "sizeOf" "(" Exp ")" [strict] + | "read" "(" ")" + > left: + Exp "*" Exp [strict, left] + | Exp "/" Exp [strict, left] + | Exp "%" Exp [strict, left] + > left: + Exp "+" Exp [strict, left] + | Exp "-" Exp [strict, left] + > non-assoc: + Exp "<" Exp [strict, non-assoc] + | Exp "<=" Exp [strict, non-assoc] + | Exp ">" Exp [strict, non-assoc] + | Exp ">=" Exp [strict, non-assoc] + | Exp "==" Exp [strict, non-assoc] + | Exp "!=" Exp [strict, non-assoc] + > "!" Exp [strict] + > left: + Exp "&&" Exp [strict(1), left] + | Exp "||" Exp [strict(1), left] + > "spawn" Block + > Exp "=" Exp [strict(2), right] +
+

We also need comma-separated lists of identifiers and of expressions. +Moreover, we want them to be strict, that is, to evaluate to lists of results +whenever requested (e.g., when they appear as strict arguments of +the constructs above).

+
k
syntax Ids ::= List{Id,","} [overload(Exps)] + syntax Exps ::= List{Exp,","} [overload(Exps), strict] // automatically hybrid now + syntax Exps ::= Ids + syntax Val + syntax Vals ::= List{Val,","} [overload(Exps)] + syntax Bottom + syntax Bottoms ::= List{Bottom,","} [overload(Exps)] + syntax Ids ::= Bottoms +
+

Statements

+

Most of the statement constructs are standard for imperative languages. +We syntactically distinguish between empty and non-empty blocks, because we +chose Stmts not to be a (;-separated) list of +Stmt. Variables can be declared anywhere inside a block, their scope +ending with the block. Expressions are allowed to be used for their side +effects only (followed by a semicolon ;). Functions are allowed +to abruptly return. The exceptions are parametric, i.e., one can throw a value +which is bound to the variable declared by catch. Threads can be +dynamically created and terminated, and can synchronize with join, +acquire, release and rendezvous. Note that the +strictness attributes obey the intended evaluation strategy of the various +constructs. In particular, the if-then-else construct is strict only in its +first argument (the if-then construct will be desugared into if-then-else), +while the loop constructs are not strict in any arguments. The print +statement construct is variadic, that is, it takes an arbitrary number of +arguments.

+
k
syntax Block ::= "{" "}" + | "{" Stmt "}" + + syntax Stmt ::= Block + | Exp ";" [strict] + | "if" "(" Exp ")" Block "else" Block [avoid, strict(1)] + | "if" "(" Exp ")" Block [macro] + | "while" "(" Exp ")" Block + | "for" "(" Stmt Exp ";" Exp ")" Block [macro] + | "return" Exp ";" [strict] + | "return" ";" [macro] + | "print" "(" Exps ")" ";" [strict] +// NOTE: print strict allows non-deterministic evaluation of its arguments +// Either keep like this but document, or otherwise make Exps seqstrict. +// Of define and use a different expression list here, which is seqstrict. + | "try" Block "catch" "(" Id ")" Block + | "throw" Exp ";" [strict] + | "join" Exp ";" [strict] + | "acquire" Exp ";" [strict] + | "release" Exp ";" [strict] + | "rendezvous" Exp ";" [strict] +
+

The reason we allow Stmts as the first argument of for +instead of Stmt is because we want to allow more than one statement +to be executed when the loop is initialized. Also, as seens shorly, macros +may expand one statement into more statements; for example, an initialized +variable declaration statement var x=0; desugars into two statements, +namely var x; x=0;, so if we use Stmt instead of Stmts +in the production of for above then we risk that the macro expansion +of statement var x=0; happens before the macro expansion of for, +also shown below, in which case the latter would not apply anymore because +of syntactic mismatch.

+
k
syntax Stmt ::= Stmt Stmt [right] + +// I wish I were able to write the following instead, but confuses the parser. +// +// syntax Stmts ::= List{Stmt,""} +// syntax Top ::= Stmt | "function" Id "(" Ids ")" Block +// syntax Pgm ::= List{Top,""} +// +// With that, I could have also eliminated the empty block +
+

Desugared Syntax

+

This part desugars some of SIMPLE's language constructs into core ones. +We only want to give semantics to core constructs, so we get rid of the +derived ones before we start the semantics. All desugaring macros below are +straightforward.

+
k
rule if (E) S => if (E) S else {} + rule for(Start Cond; Step) {S} => {Start while (Cond) {S Step;}} + rule for(Start Cond; Step) {} => {Start while (Cond) {Step;}} + rule var E1:Exp, E2:Exp, Es:Exps; => var E1; var E2, Es; + rule var X:Id = E; => var X; X = E; +
+

For the semantics, we can therefore assume from now on that each +conditional has both branches, that there are only while loops, and +that each variable is declared alone and without any initialization as part of +the declaration.

+
k
endmodule + + +module SIMPLE-UNTYPED + imports SIMPLE-UNTYPED-SYNTAX + imports DOMAINS +
+

Basic Semantic Infrastructure

+

Before one starts adding semantic rules to a K definition, one needs to +define the basic semantic infrastructure consisting of definitions for +values and configuration. As discussed in the definitions +in the K tutorial, the values are needed to know when to stop applying +the heating rules and when to start applying the cooling rules corresponding +to strictness or context declarations. The configuration serves as a backbone +for the process of configuration abstraction which allows users to only +mention the relevant cells in each semantic rule, the rest of the configuration +context being inferred automatically. Although in some cases the configuration +could be automatically inferred from the rules, we believe that it is very +useful for language designers/semanticists to actually think of and design +their configuration explicitly, so the current implementation of K requires +one to define it.

+

Values

+

We here define the values of the language that the various fragments of +programs evaluate to. First, integers and Booleans are values. As discussed, +arrays evaluate to special array reference values holding (1) a location from +where the array's elements are contiguously allocated in the store, and +(2) the size of the array. Functions evaluate to function values as +λ-abstractions (we do not need to evaluate functions to closures +because each function is executed in the fixed global environment and +function definitions cannot be nested). Like in IMP and other +languages, we finally tell the tool that values are K results.

+
k
syntax Val ::= Int | Bool | String + | array(Int,Int) + | lambda(Ids,Stmt) + syntax Exp ::= Val + syntax Exps ::= Vals + syntax Val ::= Bottom + syntax Vals ::= Bottoms + syntax KResult ::= Val + | Vals // TODO: should not need this +
+

The inclusion of values in expressions follows the methodology of +syntactic definitions (like, e.g., in SOS): extend the syntax of the language +to encompass all values and additional constructs needed to give semantics. +In addition to that, it allows us to write the semantic rules using the +original syntax of the language, and to parse them with the same (now extended +with additional values) parser. If writing the semantics directly on the K +AST, using the associated labels instead of the syntactic constructs, then one +would not need to include values in expressions.

+

Configuration

+

The K configuration of SIMPLE consists of a top level cell, T, +holding a threads cell, a global environment map cell genv +mapping the global variables and function names to their locations, a shared +store map cell store mapping each location to some value, a set cell +busy holding the locks which have been acquired but not yet released +by threads, a set cell terminated holding the unique identifiers of +the threads which already terminated (needed for join), input +and output list cells, and a nextLoc cell holding a natural +number indicating the next available location. Unlike in the small languages +in the K tutorial, where we used the fresh predicate to generate fresh +locations, in larger languages, like SIMPLE, we prefer to explicitly manage +memory. The location counter in nextLoc models an actual physical +location in the store; for simplicity, we assume arbitrarily large memory and +no garbage collection. The threads cell contains one thread +cell for each existing thread in the program. Note that the thread cell has +multiplicity *, which means that at any given moment there could be zero, +one or more thread cells. Each thread cell contains a +computation cell k, a control cell holding the various +control structures needed to jump to certain points of interest in the program +execution, a local environment map cell env mapping the thread local +variables to locations in the store, and finally a holds map cell +indicating what locks have been acquired by the thread and not released so far +and how many times (SIMPLE's locks are re-entrant). The control cell +currently contains only two subcells, a function stack fstack which +is a list and an exception stack xstack which is also a list. +One can add more control structures in the control cell, such as a +stack for break/continue of loops, etc., if the language is extended with more +control-changing constructs. Note that all cells except for k are +also initialized, in that they contain a ground term of their corresponding +sort. The k cell is initialized with the program that will be passed +to the K tool, as indicated by the $PGM variable, followed by the +execute task (defined shortly).

+
k
// the syntax declarations below are required because the sorts are + // referenced directly by a production and, because of the way KIL to KORE + // is implemented, the configuration syntax is not available yet + // should simply work once KIL is removed completely + // check other definitions for this hack as well + + syntax ControlCell + syntax ControlCellFragment + + configuration <T color="red"> + <threads color="orange"> + <thread multiplicity="*" type="Map" color="yellow"> + <id color="pink"> -1 </id> + <k color="green"> $PGM:Stmt ~> execute </k> + //<br/> // TODO(KORE): support latex annotations #1799 + <control color="cyan"> + <fstack color="blue"> .List </fstack> + <xstack color="purple"> .List </xstack> + </control> + //<br/> // TODO(KORE): support latex annotations #1799 + <env color="violet"> .Map </env> + <holds color="black"> .Map </holds> + </thread> + </threads> + //<br/> // TODO(KORE): support latex annotations #1799 + <genv color="pink"> .Map </genv> + <store color="white"> .Map </store> + <busy color="cyan"> .Set </busy> + <terminated color="red"> .Set </terminated> + //<br/> // TODO(KORE): support latex annotations #1799 + <input color="magenta" stream="stdin"> .List </input> + <output color="brown" stream="stdout"> .List </output> + <nextLoc color="gray"> 0 </nextLoc> + </T> +
+

Declarations and Initialization

+

We start by defining the semantics of declarations (for variables, +arrays and functions).

+

Variable Declaration

+

The SIMPLE syntax was desugared above so that each variable is +declared alone and its initialization is done as a separate statement. +The semantic rule below matches resulting variable declarations of the +form var X; on top of the k cell +(indeed, note that the k cell is complete, or round, to the +left, and is torn, or ruptured, to the right), allocates a fresh +location L in the store which is initialized with a special value + (indeed, the unit ., or nothing, is matched anywhere +in the map ‒note the tears at both sides‒ and replaced with the +mapping L ↦ ⊥), and binds X to L in the local +environment shadowing previous declarations of X, if any. +This possible shadowing of X requires us to therefore update the +entire environment map, which is expensive and can significantly slow +down the execution of larger programs. On the other hand, since we know +that L is not already bound in the store, we simply add the binding +L ↦ ⊥ to the store, thus avoiding a potentially complete +traversal of the the store map in order to update it. We prefer the approach +used for updating the store whenever possible, because, in addition to being +faster, it offers more true concurrency than the latter; indeed, according +to the concurrent semantics of K, the store is not frozen while +L ↦ ⊥ is added to it, while the environment is frozen during the +update operation Env[L/X]. The variable declaration command is +also removed from the top of the computation cell and the fresh location +counter is incremented. The undefined symbol added in the store +is of sort KItem, instead of Val, on purpose; this way, the +store lookup rules will get stuck when one attempts to lookup an +uninitialized location. All the above happen in one transactional step, +with the rule below. Note also how configuration abstraction allows us to +only mention the needed cells; indeed, as the configuration above states, +the k and env cells are actually located within a +thread cell within the threads cell, but one needs +not mention these: the configuration context of the rule is +automatically transformed to match the declared configuration +structure.

+
k
syntax KItem ::= "undefined" + + rule <k> var X:Id; => .K ...</k> + <env> Env => Env[X <- L] </env> + <store>... .Map => L |-> undefined ...</store> + <nextLoc> L => L +Int 1 </nextLoc> +
+

Array Declaration

+

The K semantics of the uni-dimensional array declaration is somehow similar +to the above declaration of ordinary variables. First, note the +context declaration below, which requests the evaluation of the array +dimension. Once evaluated, say to a natural number N, then +N +Int 1 locations are allocated in the store for +an array of size N, the additional location (chosen to be the first +one allocated) holding the array reference value. The array reference +value array(L,N) states that the array has size N and its +elements are located contiguously in the store starting with location +L. The operation L … L' ↦ V, defined at the end of this +file in the auxiliary operation section, initializes each location in +the list L … L' to V. Note that, since the dimensions of +array declarations can be arbitrary expressions, this virtually means +that we can dynamically allocate memory in SIMPLE by means of array +declarations.

+
k
context var _:Id[HOLE]; + + rule <k> var X:Id[N:Int]; => .K ...</k> + <env> Env => Env[X <- L] </env> + <store>... .Map => L |-> array(L +Int 1, N) + (L +Int 1) ... (L +Int N) |-> undefined ...</store> + <nextLoc> L => L +Int 1 +Int N </nextLoc> + requires N >=Int 0 +
+

SIMPLE allows multi-dimensional arrays. For semantic simplicity, we +desugar them all into uni-dimensional arrays by code transformation. +This way, we only need to give semantics to uni-dimensional arrays. +First, note that the context rule above actually evaluates all the array +dimensions (that's why we defined the expression lists strict!): +Upon evaluating the array dimensions, the code generation rule below +desugars multi-dimensional array declaration to uni-dimensional declarations. +To this aim, we introduce two special unique variable identifiers, +$1 and $2. The first variable, $1, iterates +through and initializes each element of the first dimension with an array +of the remaining dimensions, declared as variable $2:

+
k
syntax Id ::= "$1" [token] | "$2" [token] + rule var X:Id[N1:Int, N2:Int, Vs:Vals]; + => var X[N1]; + { + for(var $1 = 0; $1 <= N1 - 1; ++$1) { + var $2[N2, Vs]; + X[$1] = $2; + } + } +
+

Ideally, one would like to perform syntactic desugarings like the one +above before the actual semantics. Unfortunately, that was not possible in +this case because the dimension expressions of the multi-dimensional array need +to be evaluated first. Indeed, the desugaring rule above does not work if the +dimensions of the declared array are arbitrary expressions, because they can +have side effects (e.g., a[++x,++x]) and those side effects would be +propagated each time the expression is evaluated in the desugaring code (note +that both the loop condition and the nested multi-dimensional declaration +would need to evaluate the expressions given as array dimensions).

+

Function declaration

+

Functions are evaluated to λ-abstractions and stored like any other +values in the store. A binding is added into the environment for the function +name to the location holding its body. Similarly to the C language, SIMPLE +only allows function declarations at the top level of the program. More +precisely, the subsequent semantics of SIMPLE only works well when one +respects this requirement. Indeed, the simplistic context-free parser +generated by the grammar above is more generous than we may want, in that it +allows function declarations anywhere any declaration is allowed, including +inside arbitrary blocks. However, as the rule below shows, we are not +storing the declaration environment with the λ-abstraction value as +closures do. Instead, as seen shortly, we switch to the global environment +whenever functions are invoked, which is consistent with our requirement that +functions should only be declared at the top. Thus, if one declares local +functions, then one may see unexpected behaviors (e.g., when one shadows a +global variable before declaring a local function). The type checker of +SIMPLE, also defined in K (see examples/simple/typed/static), +discards programs which do not respect this requirement.

+
k
rule <k> function F(Xs) S => .K ...</k> + <env> Env => Env[F <- L] </env> + <store>... .Map => L |-> lambda(Xs, S) ...</store> + <nextLoc> L => L +Int 1 </nextLoc> +
+

When we are done with the first pass (pre-processing), the computation +cell k contains only the token execute (see the configuration +declaration above, where the computation item execute was placed +right after the program in the k cell of the initial configuration) +and the cell genv is empty. In this case, we have to call +main() and to initialize the global environment by transferring the +contents of the local environment into it. We prefer to do it this way, as +opposed to processing all the top level declarations directly within the global +environment, because we want to avoid duplication of semantics: the syntax of +the global declarations is identical to that of their corresponding local +declarations, so the semantics of the latter suffices provided that we copy +the local environment into the global one once we are done with the +pre-processing. We want this separate pre-processing step precisely because +we want to create the global environment. All (top-level) functions end up +having their names bound in the global environment and, as seen below, they +are executed in that same global environment; all these mean, in particular, +that the functions "see" each other, allowing for mutual recursion, etc.

+
k
syntax KItem ::= "execute" + rule <k> execute => main(.Exps); </k> + <env> Env </env> + <genv> .Map => Env </genv> +
+

Expressions

+

We next define the K semantics of all the expression constructs.

+

Variable lookup

+

When a variable X is the first computational task, and X is bound to some +location L in the environment, and L is mapped to some value V in the +store, then we rewrite X into V:

+
k
rule <k> X:Id => V ...</k> + <env>... X |-> L ...</env> + <store>... L |-> V:Val ...</store> [group(lookup)] +
+

Note that the rule above excludes reading , because is not +a value and V is checked at runtime to be a value.

+

Variable/Array increment

+

This is tricky, because we want to allow both ++x and ++a[5]. +Therefore, we need to extract the lvalue of the expression to increment. +To do that, we state that the expression to increment should be wrapped +by the auxiliary lvalue operation and then evaluated. The semantics +of this auxiliary operation is defined at the end of this file. For now, all +we need to know is that it takes an expression and evaluates to a location +value. Location values, also defined at the end of the file, are integers +wrapped with the operation loc, to distinguish them from ordinary +integers.

+
k
context ++(HOLE => lvalue(HOLE)) + rule <k> ++loc(L) => I +Int 1 ...</k> + <store>... L |-> (I => I +Int 1) ...</store> [group(increment)] +
+

Arithmetic operators

+

There is nothing special about the following rules. They rewrite the +language constructs to their library counterparts when their arguments +become values of expected sorts:

+
k
rule I1 + I2 => I1 +Int I2 + rule Str1 + Str2 => Str1 +String Str2 + rule I1 - I2 => I1 -Int I2 + rule I1 * I2 => I1 *Int I2 + rule I1 / I2 => I1 /Int I2 requires I2 =/=K 0 + rule I1 % I2 => I1 %Int I2 requires I2 =/=K 0 + rule - I => 0 -Int I + rule I1 < I2 => I1 <Int I2 + rule I1 <= I2 => I1 <=Int I2 + rule I1 > I2 => I1 >Int I2 + rule I1 >= I2 => I1 >=Int I2 +
+

The equality and inequality constructs reduce to syntactic comparison +of the two argument values (which is what the equality on K terms does).

+
k
rule V1:Val == V2:Val => V1 ==K V2 + rule V1:Val != V2:Val => V1 =/=K V2 +
+

The logical negation is clear, but the logical conjunction and disjunction +are short-circuited:

+
k
rule ! T => notBool(T) + rule true && E => E + rule false && _ => false + rule true || _ => true + rule false || E => E +
+

Array lookup

+

Untyped SIMPLE does not check array bounds (the dynamically typed version of +it, in examples/simple/typed/dynamic, does check for array out of +bounds). The first rule below desugars the multi-dimensional array access to +uni-dimensional array access; recall that the array access operation was +declared strict, so all sub-expressions involved are already values at this +stage. The second rule rewrites the array access to a lookup operation at a +precise location; we prefer to do it this way to avoid locking the store. +The semantics of the auxiliary lookup operation is straightforward, +and is defined at the end of the file.

+
k
// The [anywhere] feature is underused, because it would only be used +// at the top of the computation or inside the lvalue wrapper. So it +// may not be worth, or we may need to come up with a special notation +// allowing us to enumerate contexts for [anywhere] rules. + rule V:Val[N1:Int, N2:Int, Vs:Vals] => V[N1][N2, Vs] + [anywhere] + + rule array(L,_)[N:Int] => lookup(L +Int N) + [anywhere] +
+

Size of an array

+

The size of the array is stored in the array reference value, and the +sizeOf construct was declared strict, so:

+
k
rule sizeOf(array(_,N)) => N +
+

Function call

+

Function application was strict in both its arguments, so we can +assume that both the function and its arguments are evaluated to +values (the former expected to be a λ-abstraction). The first +rule below matches a well-formed function application on top of the +computation and performs the following steps atomically: it switches +to the function body followed by return; (for the case in +which the function does not use an explicit return statement); it +pushes the remaining computation, the current environment, and the +current control data onto the function stack (the remaining +computation can thus also be discarded from the computation cell, +because an unavoidable subsequent return statement ‒see +above‒ will always recover it from the stack); it switches the +current environment (which is being pushed on the function stack) to +the global environment, which is where the free variables in the +function body should be looked up; it binds the formal parameters to +fresh locations in the new environment, and stores the actual +arguments to those locations in the store (this latter step is easily +done by reducing the problem to variable declarations, whose semantics +we have already defined; the auxiliary operation mkDecls is +defined at the end of the file). The second rule pops the +computation, the environment and the control data from the function +stack when a return statement is encountered as the next +computational task, passing the returned value to the popped +computation (the popped computation was the context in which the +returning function was called). Note that the pushing/popping of the +control data is crucial. Without it, one may have a function that +contains an exception block with a return statement inside, which +would put the xstack cell in an inconsistent state (since the +exception block modifies it, but that modification should be +irrelevant once the function returns). We add an artificial +nothing value to the language, which is returned by the +nulary return; statements.

+
k
syntax KItem ::= (Map,K,ControlCellFragment) + + rule <k> lambda(Xs,S)(Vs:Vals) ~> K => mkDecls(Xs,Vs) S return; </k> + <control> + <fstack> .List => ListItem((Env,K,C)) ...</fstack> + C + </control> + <env> Env => GEnv </env> + <genv> GEnv </genv> + + rule <k> return(V:Val); ~> _ => V ~> K </k> + <control> + <fstack> ListItem((Env,K,C)) => .List ...</fstack> + (_ => C) + </control> + <env> _ => Env </env> + + syntax Val ::= "nothing" + rule return; => return nothing; +
+

Like for division-by-zero, it is left unspecified what happens +when the nothing value is used in domain calculations. For +example, from the the perspective of the language semantics, +7 +Int nothing can evaluate to anything, or +may not evaluate at all (be undefined). If one wants to make sure that +such artificial values are never misused, then one needs to define a static +checker (also using K, like our the type checker in +examples/simple/typed/static) and reject programs that do. +Note that, unlike the undefined symbol which had the sort K +instead of Val, we defined nothing to be a value. That +is because, as explained above, we do not want the program to get +stuck when nothing is returned by a function. Instead, we want the +behavior to be unspecified; in particular, if one is careful to never +use the returned value in domain computation, like it happens when we +call a function for its side effects (e.g., with a statement of the +form f(x);), then the program does not get stuck.

+

Read

+

The read() expression construct simply evaluates to the next +input value, at the same time discarding the input value from the +in cell.

+
k
rule <k> read() => I ...</k> <input> ListItem(I:Int) => .List ...</input> [group(read)] +
+

Assignment

+

In SIMPLE, like in C, assignments are expression constructs and not statement +constructs. To make it a statement all one needs to do is to follow it by a +semi-colon ; (see the semantics for expression statements below). +Like for the increment, we want to allow assignments not only to variables but +also to array elements, e.g., e1[e2] = e3 where e1 evaluates +to an array reference, e2 to a natural number, and e3 to any +value. Thus, we first compute the lvalue of the left-hand-side expression +that appears in an assignment, and then we do the actual assignment to the +resulting location:

+
k
context (HOLE => lvalue(HOLE)) = _ + + rule <k> loc(L) = V:Val => V ...</k> <store>... L |-> (_ => V) ...</store> + [group(assignment)] +
+

Statements

+

We next define the K semantics of statements.

+

Blocks

+

Empty blocks are simply discarded, as shown in the first rule below. +For non-empty blocks, we schedule the enclosed statement but we have to +make sure the environment is recovered after the enclosed statement executes. +Recall that we allow local variable declarations, whose scope is the block +enclosing them. That is the reason for which we have to recover the +environment after the block. This allows us to have a very simple semantics +for variable declarations, as we did above. One can make the two rules below +computational if one wants them to count as computational steps.

+
k
rule {} => .K + rule <k> { S } => S ~> setEnv(Env) ...</k> <env> Env </env> +
+

The basic definition of environment recovery is straightforward and +given in the section on auxiliary constructs at the end of the file.

+

There are two common alternatives to the above semantics of blocks. +One is to keep track of the variables which are declared in the block and only +recover those at the end of the block. This way one does more work for +variable declarations but conceptually less work for environment recovery; we +say conceptually because it is not clear that it is indeed the case that +one does less work when AC matching is involved. The other alternative is to +work with a stack of environments instead of a flat environment, and push the +current environment when entering a block and pop it when exiting it. This +way, one does more work when accessing variables (since one has to search the +variable in the environment stack in a top-down manner), but on the other hand +uses smaller environments and the definition gets closer to an implementation. +Based on experience with dozens of language semantics and other K definitions, +we have found that our approach above is the best trade-off between elegance +and efficiency (especially since rewrite engines have built-in techniques to +lazily copy terms, by need, thus not creating unnecessary copies), +so it is the one that we follow in general.

+

Sequential composition

+

Sequential composition is desugared into K's builtin sequentialization +operation (recall that, like in C, the semi-colon ; is not a +statement separator in SIMPLE — it is either a statement terminator or a +construct for a statement from an expression). Note that K allows +to define the semantics of SIMPLE in such a way that statements eventually +dissolve from the top of the computation when they are completed; this is in +sharp contrast to (artificially) evaluating them to a special +skip statement value and then getting rid of that special value, as +it is the case in other semantic approaches (where everything must evaluate +to something). This means that once S₁ completes in the rule below, S₂ +becomes automatically the next computation item without any additional +(explicit or implicit) rules.

+
k
rule S1:Stmt S2:Stmt => S1 ~> S2 +
+

A subtle aspect of the rule above is that S₁ is declared to have sort +Stmts and not Stmt. That is because desugaring macros can indeed +produce left associative sequential composition of statements. For example, +the code var x=0; x=1; is desugared to +(var x; x=0;) x=1;, so although originally the first term of +the sequential composition had sort Stmt, after desugaring it became +of sort Stmts. Note that the attribute [right] associated +to the sequential compositon production is an attribute of the syntax, and not +of the semantics: e.g., it tells the parser to parse +var x; x=0; x=1; as var x; (x=0; x=1;), but it +does not tell the rewrite engine to rewrite (var x; x=0;) x=1; to +var x; (x=0; x=1;).

+

Expression statements

+

Expression statements are only used for their side effects, so their result +value is simply discarded. Common examples of expression statements are ones +of the form ++x;, x=e;, e1[e2]=e3;, etc.

+
k
rule _:Val; => .K +
+

Conditional

+

Since the conditional was declared with the strict(1) attribute, we +can assume that its first argument will eventually be evaluated. The rules +below cover the only two possibilities in which the conditional is allowed to +proceed (otherwise the rewriting process gets stuck).

+
k
rule if ( true) S else _ => S + rule if (false) _ else S => S +
+

While loop

+

The simplest way to give the semantics of the while loop is by unrolling. +Note, however, that its unrolling is only allowed when the while loop reaches +the top of the computation (to avoid non-termination of unrolling). The +simple while loop semantics below works because our while loops in SIMPLE are +indeed very basic. If we allowed break/continue of loops then we would need +a completely different semantics, which would also involve the control cell.

+
k
rule while (E) S => if (E) {S while(E)S} +
+

Print

+

The print statement was strict, so all its arguments are now +evaluated (recall that print is variadic). We append each of +its evaluated arguments to the output buffer, and discard the residual +print statement with an empty list of arguments.

+
k
rule <k> print(V:Val, Es:Vals => Es); ...</k> <output>... .List => ListItem(V) </output> + [group(print)] + rule print(.Vals); => .K +
+

Exceptions

+

SIMPLE allows parametric exceptions, in that one can throw and catch a +particular value. The statement try S₁ catch(X) S₂ +proceeds with the evaluation of S₁. If S₁ evaluates normally, i.e., +without any exception thrown, then S₂ is discarded and the execution +continues normally. If S₁ throws an exception with a statement of the +form throw E, then E is first evaluated to some value V +(throw was declared to be strict), then V is bound to X, then +S₂ is evaluated in the new environment while the reminder of S₁ is +discarded, then the environment is recovered and the execution continues +normally with the statement following the try S₁ catch(X) S₂ statement. +Exceptions can be nested and the statements in the +catch part (S₂ in our case) can throw exceptions to the +upper level. One should be careful with how one handles the control data +structures here, so that the abrupt changes of control due to exception +throwing and to function returns interact correctly with each other. +For example, we want to allow function calls inside the statement S₁ in +a try S₁ catch(X) S₂ block which can throw an exception +that is not caught by the function but instead is propagated to the +try S₁ catch(X) S₂ block that called the function. +Therefore, we have to make sure that the function stack as well as other +potential control structures are also properly modified when the exception +is thrown to correctly recover the execution context. This can be easily +achieved by pushing/popping the entire current control context onto the +exception stack. The three rules below modularly do precisely the above.

+
k
syntax KItem ::= (Id,Stmt,K,Map,ControlCellFragment) + + syntax KItem ::= "popx" + + rule <k> (try S1 catch(X) {S2} => S1 ~> popx) ~> K </k> + <control> + <xstack> .List => ListItem((X, S2, K, Env, C)) ...</xstack> + C + </control> + <env> Env </env> + + rule <k> popx => .K ...</k> + <xstack> ListItem(_) => .List ...</xstack> + + rule <k> throw V:Val; ~> _ => { var X = V; S2 } ~> K </k> + <control> + <xstack> ListItem((X, S2, K, Env, C)) => .List ...</xstack> + (_ => C) + </control> + <env> _ => Env </env> +
+

The catch statement S₂ needs to be executed in the original environment, +but where the thrown value V is bound to the catch variable X. We here +chose to rely on two previously defined constructs when giving semantics to +the catch part of the statement: (1) the variable declaration with +initialization, for binding X to V; and (2) the block construct for +preventing X from shadowing variables in the original environment upon the +completion of S₂.

+

Threads

+

SIMPLE's threads can be created and terminated dynamically, and can +synchronize by acquiring and releasing re-entrant locks and by rendezvous. +We discuss the seven rules giving the semantics of these operations below.

+

Thread creation

+

Threads can be created by any other threads using the spawn S +construct. The spawn expression construct evaluates to the unique identifier +of the newly created thread and, at the same time, a new thread cell is added +into the configuration, initialized with the S statement and sharing the +same environment with the parent thread. Note that the newly created +thread cell is torn. That means that the remaining cells are added +and initialized automatically as described in the definition of SIMPLE's +configuration. This is part of K's configuration abstraction mechanism.

+
k
rule <thread>... + <k> spawn S => !T:Int ...</k> + <env> Env </env> + ...</thread> + (.Bag => <thread>... + <k> S </k> + <env> Env </env> + <id> !T </id> + ...</thread>) +
+

Thread termination

+

Dually to the above, when a thread terminates its assigned computation (the +contents of its k cell) is empty, so the thread can be dissolved. +However, since no discipline is imposed on how locks are acquired and released, +it can be the case that a terminating thread still holds locks. Those locks +must be released, so other threads attempting to acquire them do not deadlock. +We achieve that by removing all the locks held by the terminating thread in its +holds cell from the set of busy locks in the busy cell +(keys(H) returns the domain of the map H as a set, that is, only +the locks themselves ignoring their multiplicity). As seen below, a lock is +added to the busy cell as soon as it is acquired for the first time +by a thread. The unique identifier of the terminated thread is also collected +into the terminated cell, so the join construct knows which +threads have terminated.

+
k
rule (<thread>... <k>.K</k> <holds>H</holds> <id>T</id> ...</thread> => .Bag) + <busy> Busy => Busy -Set keys(H) </busy> + <terminated>... .Set => SetItem(T) ...</terminated> +
+

Thread joining

+

Thread joining is now straightforward: all we need to do is to check whether +the identifier of the thread to be joined is in the terminated cell. +If yes, then the join statement dissolves and the joining thread +continues normally; if not, then the joining thread gets stuck.

+
k
rule <k> join T:Int; => .K ...</k> + <terminated>... SetItem(T) ...</terminated> +
+

Acquire lock

+

There are two cases to distinguish when a thread attempts to acquire a lock +(in SIMPLE any value can be used as a lock):
+(1) The thread does not currently have the lock, in which case it has to +take it provided that the lock is not already taken by another thread (see +the side condition of the first rule).
+(2) The thread already has the lock, in which case it just increments its +counter for the lock (the locks are re-entrant). These two cases are captured +by the two rules below:

+
k
rule <k> acquire V:Val; => .K ...</k> + <holds>... .Map => V |-> 0 ...</holds> + <busy> Busy (.Set => SetItem(V)) </busy> + requires (notBool(V in Busy)) [group(acquire)] + + rule <k> acquire V; => .K ...</k> + <holds>... V:Val |-> (N => N +Int 1) ...</holds> +
+

Release lock

+

Similarly, there are two corresponding cases to distinguish when a thread +releases a lock:
+(1) The thread holds the lock more than once, in which case all it needs to do +is to decrement the lock counter.
+(2) The thread holds the lock only once, in which case it needs to remove it +from its holds cell and also from the the shared busy cell, +so other threads can acquire it if they need to.

+
k
rule <k> release V:Val; => .K ...</k> + <holds>... V |-> (N => N -Int 1) ...</holds> + requires N >Int 0 + + rule <k> release V; => .K ...</k> <holds>... V:Val |-> 0 => .Map ...</holds> + <busy>... SetItem(V) => .Set ...</busy> +
+

Rendezvous synchronization

+

In addition to synchronization through acquire and release of locks, SIMPLE +also provides a construct for rendezvous synchronization. A thread whose next +statement to execute is rendezvous(V) gets stuck until another +thread reaches an identical statement; when that happens, the two threads +drop their rendezvous statements and continue their executions. If three +threads happen to have an identical rendezvous statement as their next +statement, then precisely two of them will synchronize and the other will +remain blocked until another thread reaches a similar rendezvous statement. +The rule below is as simple as it can be. Note, however, that, again, it is +K's mechanism for configuration abstraction that makes it work as desired: +since the only cell which can multiply containing a k cell inside is +the thread cell, the only way to concretize the rule below to the +actual configuration of SIMPLE is to include each k cell in a +thread cell.

+
k
rule <k> rendezvous V:Val; => .K ...</k> + <k> rendezvous V; => .K ...</k> [group(rendezvous)] +
+

Auxiliary declarations and operations

+

In this section we define all the auxiliary constructs used in the +above semantics.

+

Making declarations

+

The mkDecls auxiliary construct turns a list of identifiers +and a list of values in a sequence of corresponding variable +declarations.

+
k
syntax Stmt ::= mkDecls(Ids,Vals) [function] + rule mkDecls((X:Id, Xs:Ids), (V:Val, Vs:Vals)) => var X=V; mkDecls(Xs,Vs) + rule mkDecls(.Ids,.Vals) => {} +
+

Location lookup

+

The operation below is straightforward. Note that we place it in the same +lookup group as the variable lookup rule defined above. This way, +both rules will be considered transitions when we include the lookup +tag in the transition option of kompile.

+
k
syntax Exp ::= lookup(Int) + rule <k> lookup(L) => V ...</k> <store>... L |-> V:Val ...</store> [group(lookup)] +
+

Environment recovery

+

We have already discussed the environment recovery auxiliary operation in the +IMP++ tutorial:

+
k
// TODO: eliminate the env wrapper, like we did in IMP++ + + syntax KItem ::= setEnv(Map) + rule <k> setEnv(Env) => .K ...</k> <env> _ => Env </env> +
+

While theoretically sufficient, the basic definition for environment +recovery alone is suboptimal. Consider a loop while (E)S, +whose semantics (see above) was given by unrolling. S +is a block. Then the semantics of blocks above, together with the +unrolling semantics of the while loop, will yield a computation +structure in the k cell that increasingly grows, adding a new +environment recovery task right in front of the already existing sequence of +similar environment recovery tasks (this phenomenon is similar to the ``tail +recursion'' problem). Of course, when we have a sequence of environment +recovery tasks, we only need to keep the last one. The elegant rule below +does precisely that, thus avoiding the unnecessary computation explosion +problem:

+
k
rule (setEnv(_) => .K) ~> setEnv(_) +
+

In fact, the above follows a common convention in K for recovery +operations of cell contents: the meaning of a computation task of the form +cell(C) that reaches the top of the computation is that the current +contents of cell cell is discarded and gets replaced with C. We +did not add support for these special computation tasks in our current +implementation of K, so we need to define them as above.

+

lvalue and loc

+

For convenience in giving the semantics of constructs like the increment and +the assignment, that we want to operate the same way on variables and on +array elements, we used an auxiliary lvalue(E) construct which was +expected to evaluate to the lvalue of the expression E. This is only +defined when E has an lvalue, that is, when E is either a variable or +evaluates to an array element. lvalue(E) evaluates to a value of +the form loc(L), where L is the location where the value of E +can be found; for clarity, we use loc to structurally distinguish +natural numbers from location values. In giving semantics to lvalue +there are two cases to consider. (1) If E is a variable, then all we need +to do is to grab its location from the environment. (2) If E is an array +element, then we first evaluate the array and its index in order to identify +the exact location of the element of concern, and then return that location; +the last rule below works because its preceding context declarations ensure +that the array and its index are evaluated, and then the rule for array lookup +(defined above) rewrites the evaluated array access construct to its +corresponding store lookup operation.

+
k
// For parsing reasons, we prefer to allow lvalue to take a K + + syntax Exp ::= lvalue(K) + syntax Val ::= loc(Int) + +// Local variable + + rule <k> lvalue(X:Id => loc(L)) ...</k> <env>... X |-> L:Int ...</env> + +// Array element: evaluate the array and its index; +// then the array lookup rule above applies. + + context lvalue(_::Exp[HOLE::Exps]) + context lvalue(HOLE::Exp[_::Exps]) + +// Finally, return the address of the desired object member + + rule lvalue(lookup(L:Int) => loc(L)) +
+

Initializing multiple locations

+

The following operation initializes a sequence of locations with the same +value:

+
k
syntax Map ::= Int "..." Int "|->" K [function] + rule N...M |-> _ => .Map requires N >Int M + rule N...M |-> K => N |-> K (N +Int 1)...M |-> K requires N <=Int M +
+

The semantics of SIMPLE is now complete. Make sure you kompile the +definition with the right options in order to generate the desired model. +No kompile options are needed if you only only want to execute the definition +(and thus get an interpreter), but if you want to search for a different +program behaviors then you need to kompile with the transition option +including rule groups such as lookup, increment, acquire, etc. See the +IMP++ tutorial for what the transition option means how to use it.

+
k
endmodule +
+

Go to Lesson 2, SIMPLE typed static

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/tests/regression-new/pl-tutorial/2_languages/1_simple/2_typed/1_static/simple-typed-static/index.html b/k-distribution/tests/regression-new/pl-tutorial/2_languages/1_simple/2_typed/1_static/simple-typed-static/index.html new file mode 100644 index 00000000000..78acdcdb237 --- /dev/null +++ b/k-distribution/tests/regression-new/pl-tutorial/2_languages/1_simple/2_typed/1_static/simple-typed-static/index.html @@ -0,0 +1,1158 @@ + + + + + + + + + + + + + + +SIMPLE — Typed — Static | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

SIMPLE — Typed — Static

+

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+

This is the K definition of the static semantics of the typed SIMPLE +language, or in other words, a type system for the typed SIMPLE +language in K. We do not re-discuss the various features of the +SIMPLE language here. The reader is referred to the untyped version of +the language for such discussions. We here only focus on the new and +interesting problems raised by the addition of type declarations, and +what it takes to devise a type system/checker for the language.

+

When designing a type system for a language, no matter in what +paradigm, we have to decide upon the intended typing policy. Note +that we can have multiple type systems for the same language, one for +each typing policy. For example, should we accept programs which +don't have a main function? Or should we allow functions that do not +return explicitly? Or should we allow functions whose type expects +them to return a value (say an int) to use a plain +return; statement, which returns no value, like in C? +And so on and so forth. Typically, there are two opposite tensions +when designing a type system. On the one hand, you want your type +system to be as permissive as possible, that is, to accept as many +programs that do not get stuck when executed with the untyped +semantics as possible; this will keep the programmers using your +language happy. On the other hand, you want your type system to have +a reasonable performance when implemented; this will keep both the +programmers and the implementers of your language happy. For example, +a type system for rejecting programs that could perform +division-by-zero is not expected to be feasible in general. A simple +guideline when designing typing policies is to imagine how the +semantics of the untyped language may get stuck and try to prevent +those situations from happening.

+

Before we give the K type system of SIMPLE formally, we discuss, +informally, the intended typing policy:

+
    +
  • +

    Each program should contain a main() function. Indeed, +the untyped SIMPLE semantics will get stuck on any program which does +not have a main function.

    +
  • +
  • +

    Each primitive value has its own type, which can be int +bool, or string. There is also a type void +for nonexistent values, for example for the result of a function meant +to return no value (but only be used for its side effects, like a +procedure).

    +
  • +
  • +

    The syntax of untyped SIMPLE is extended to allow type +declarations for all the variables, including array variables. This is +done in a C/Java-style. For example, int x; or +int x=7, y=x+3;, or int[][][] a[10,20]; +(the latter defines a 10 × 20 matrix of arrays of integers). +Recall from untyped SIMPLE that, unlike in C/Java, our multi-dimensional +arrays use comma-separated arguments, although they have the array-of-array +semantics.

    +
  • +
  • +

    Functions are also typed in a C/Java style. However, since in SIMPLE +we allow functions to be passed to and returned by other functions, we also +need function types. We will use the conventional higher-order arrow-notation +for function types, but will separate the argument types with commas. For +example, a function returning an array of bool elements and +taking as argument an array x of two-integer-argument functions +returning an integer, is declared using a syntax of the form +bool[] f(((int,int)->int)[] x) { ... } +and has the type ((int,int)->int)[] -> bool[].

    +
  • +
  • +

    We allow any variable declarations at the top level. Functions +can only be declared at the top level. Each function can only access the +other functions and variables declared at the top level, or its own locally +declared variables. SIMPLE has static scoping.

    +
  • +
  • +

    The various expression and statement constructs take only elements of +the expected types.

    +
  • +
  • +

    Increment and assignment can operate both on variables and on array +elements. For example, if f has type int->int[][] and +function g has the type int->int, then the +increment expression ++f(7)[g(2),g(3)] is valid.

    +
  • +
  • +

    Functions should only return values of their declared result +type. To give the programmers more flexibility, we allow functions to +use return; statements to terminate without returning an +actual value, or to not explicitly use any return statement, +regardless of their declared return type. This flexibility can be +handy when writing programs using certain functions only for their +side effects. Nevertheless, as the dynamic semantics shows, a return +value is automatically generated when an explicit return +statement is not encountered.

    +
  • +
  • +

    For simplicity, we here limit exceptions to only throw and catch +integer values. We let it as an exercise to the reader to extend the +semantics to allow throwing and catching arbitrary-type exceptions. +Like in programming languages like Java, one can go even further and +define a semantics where thrown exceptions are propagated through +try-catch statements until one of the corresponding type is found. +We will do this when we define the KOOL language, not here. +To keep the definition if SIMPLE simple, here we do not attempt to +reject programs which throw uncaught exceptions.

    +
  • +
+

Like in untyped SIMPLE, some constructs can be desugared into a +smaller set of basic constructs. In general, it should be clear why a +program does not type by looking at the top of the k cells in +its stuck configuration.

+
k
module SIMPLE-TYPED-STATIC-SYNTAX + imports DOMAINS-SYNTAX +
+

Syntax

+

The syntax of typed SIMPLE extends that of untyped SIMPLE with support +for declaring types to variables and functions.

+
k
syntax Id ::= "main" [token] +
+

Types

+

Primitive, array and function types, as well as lists (or tuples) of types. +The lists of types are useful for function arguments.

+
k
syntax Type ::= "void" | "int" | "bool" | "string" + | Type "[" "]" + | "(" Type ")" [bracket] + > Types "->" Type + + syntax Types ::= List{Type,","} [overload(exps)] +
+

Declarations

+

Variable and function declarations have the expected syntax. For variables, +we basically just replaced the var keyword of untyped SIMPLE with a +type. For functions, besides replacing the function keyword with a +type, we also introduce a new syntactic category for typed variables, +Param, and lists over it.

+
k
syntax Param ::= Type Id + syntax Params ::= List{Param,","} + + syntax Stmt ::= Type Exps ";" + | Type Id "(" Params ")" Block +
+

Expressions

+

The syntax of expressions is identical to that in untyped SIMPLE, +except for the logical conjunction and disjunction which have +different strictness attributes, because they now have different +evaluation strategies.

+
k
syntax Exp ::= Int | Bool | String | Id + | "(" Exp ")" [bracket] + | "++" Exp + > Exp "[" Exps "]" [strict] + > Exp "(" Exps ")" [strict] + | "-" Exp [strict] + | "sizeOf" "(" Exp ")" [strict] + | "read" "(" ")" + > left: + Exp "*" Exp [strict, left] + | Exp "/" Exp [strict, left] + | Exp "%" Exp [strict, left] + > left: + Exp "+" Exp [strict, left] + | Exp "-" Exp [strict, left] + > non-assoc: + Exp "<" Exp [strict, non-assoc] + | Exp "<=" Exp [strict, non-assoc] + | Exp ">" Exp [strict, non-assoc] + | Exp ">=" Exp [strict, non-assoc] + | Exp "==" Exp [strict, non-assoc] + | Exp "!=" Exp [strict, non-assoc] + > "!" Exp [strict] + > left: + Exp "&&" Exp [strict, left] + | Exp "||" Exp [strict, left] + > "spawn" Block + > Exp "=" Exp [strict(2), right] +
+

Note that spawn has not been declared strict. This may +seem unexpected, because the child thread shares the same environment +with the parent thread, so from a typing perspective the spawned +statement makes the same sense in a child thread as it makes in the +parent thread. The reason for not declaring it strict is because we +want to disallow programs where the spawned thread calls the +return statement, because those programs would get stuck in +the dynamic semantics. The type semantics of spawn below will reject +such programs.

+

We still need lists of expressions, defined below, but note that we do +not need lists of identifiers anymore. They have been replaced by the lists +of parameters.

+
k
syntax Exps ::= List{Exp,","} [strict, overload(exps)] +
+

Statements

+

The statements have the same syntax as in untyped SIMPLE, except for +the exceptions, which now type their parameter. Note that, unlike in untyped +SIMPLE, all statement constructs which have arguments and are not desugared +are strict, including the conditional and the while. Indeed, from a +typing perspective, they are all strict: first type their arguments and then +type the actual construct.

+
k
syntax Block ::= "{" "}" + | "{" Stmt "}" + + syntax Stmt ::= Block + | Exp ";" [strict] + | "if" "(" Exp ")" Block "else" Block [avoid, strict] + | "if" "(" Exp ")" Block [macro] + | "while" "(" Exp ")" Block [strict] + | "for" "(" Stmt Exp ";" Exp ")" Block [macro] + | "return" Exp ";" [strict] + | "return" ";" + | "print" "(" Exps ")" ";" [strict] + | "try" Block "catch" "(" Param ")" Block [strict(1)] + | "throw" Exp ";" [strict] + | "join" Exp ";" [strict] + | "acquire" Exp ";" [strict] + | "release" Exp ";" [strict] + | "rendezvous" Exp ";" [strict] +
+

Note that the sequential composition is now sequentially strict, +because, unlike in the dynamic semantics where statements dissolved, +they now reduce to the stmt type, which is a result.

+
k
syntax Stmt ::= Stmt Stmt [seqstrict, right] +
+

Desugaring macros

+

We use the same desugaring macros like in untyped SIMPLE, but, of +course, including the types of the involved variables.

+
k
rule if (E) S => if (E) S else {} + rule for(Start Cond; Step) {S:Stmt} => {Start while(Cond){S Step;}} + rule for(Start Cond; Step) {} => {Start while(Cond){Step;}} + rule T:Type E1:Exp, E2:Exp, Es:Exps; => T E1; T E2, Es; [anywhere] + rule T:Type X:Id = E; => T X; X = E; [anywhere] + +endmodule + + +module SIMPLE-TYPED-STATIC + imports SIMPLE-TYPED-STATIC-SYNTAX + imports DOMAINS +
+

Static semantics

+

Here we define the type system of SIMPLE. Like concrete semantics, +type systems defined in K are also executable. However, K type +systems turn into type checkers instead of interpreters when executed.

+

The typing process is done in two (overlapping) phases. In the first +phase the global environment is built, which contains type bindings +for all the globally declared variables and functions. For functions, +the declared types will be ``trusted'' during the first phase and +simply bound to their corresponding function names and placed in the +global type environment. At the same time, type-checking tasks that +the function bodies indeed respect their claimed types are generated. +All these tasks are (concurrently) verified during the second phase. +This way, all the global variable and function declarations are +available in the global type environment and can be used in order to +type-check each function code. This is consistent with the semantics +of untyped SIMPLE, where functions can access all the global variables +and can call any other function declared in the same program. The +two phases may overlap because of the K concurrent semantics. For +example, a function task can be started while the first phase is still +running; moreover, it may even complete before the first phase does, +namely when all the global variables and functions that it needs have +already been processed and made available in the global environment by +the first phase task.

+

Extended syntax and results

+

The idea is to start with a configuration holding the program to type +in one of its cells, then apply rewrite rules on it mixing types and +language syntax, and eventually obtain a type instead of the original +program. In other words, the program reduces to its type using +the K rules giving the type system of the language. In doing so, +additional typing tasks for function bodies are generated and solved +the same way. If this rewriting process gets stuck, then we say that +the program is not well-typed. Otherwise the program is well-typed +(by definition). We did not need types for statements and for blocks +as part of the typed SIMPLE syntax, because programmers are not allowed +to use such types explicitly. However, we are going to need them in the +type system, because blocks and statements reduce to them.

+

We start by allowing types to be used inside expressions and statements in +our language. This way, types can be used together with language syntax in +subsequent K rules without any parsing errors. Like in the type system of +IMP++ in the K tutorial, we prefer to group the block and statement types +under one syntactic sub-category of types, because this allows us to more +compactly state that certain terms can be either blocks or statements. Also, +since programs and fragments of program will reduce to their types, in order +for the strictness and context declarations to be executable we state that +types are results (same like we did in the IMP++ tutorial).

+
k
syntax Exp ::= Type + syntax Exps ::= Types + syntax BlockOrStmtType ::= "block" | "stmt" + syntax Type ::= BlockOrStmtType + syntax Block ::= BlockOrStmtType + syntax KResult ::= Type + | Types //TODO: remove this, eventually +
+

Configuration

+

The configuration of our type system consists of a tasks cell +holding various typing task cells, and a global type environment. +Each task includes a k cell holding the code to type, a tenv +cell holding the local type environment, and a return cell holding +the return type of the currently checked function. The latter is needed in +order to check whether return statements return values of the expected type. +Initially, the program is placed in a k cell inside a +task cell. Since the cells with multiplicity ? are not +included in the initial configuration, the task cell holding +the original program in its k cell will contain no other +subcells.

+
k
configuration <T color="yellow"> + <tasks color="orange"> + <task multiplicity="*" color="yellow" type="Set"> + <k color="green"> $PGM:Stmt </k> + <tenv multiplicity="?" color="cyan"> .Map </tenv> + <returnType multiplicity="?" color="black"> void </returnType> + </task> + </tasks> +// <br/> + <gtenv color="blue"> .Map </gtenv> + </T> +
+

Variable declarations

+

Variable declarations type as statements, that is, they reduce to the +type stmt. There are only two cases that need to be +considered: when a simple variable is declared and when an array +variable is declared. The macros at the end of the syntax module +above take care of reducing other variable declarations, including +ones where the declared variables are initialized, to only these two +cases. The first case has two subcases: when the variable declaration +is global (i.e., the task cell contains only the k +cell), in which case it is added to the global type environment +checking at the same time that the variable has not been already +declared; and when the variable declaration is local (i.e., a +tenv cell is available), in which case it is simply added to +the local type environment, possibly shadowing previous homonymous +variables. The third case reduces to the second, incrementally moving +the array dimension into the type until the array becomes a simple +variable.

+
k
rule <task> <k> T:Type X:Id; => stmt ...</k> </task> + <gtenv> Rho (.Map => X |-> T) </gtenv> + requires notBool(X in keys(Rho)) + rule <k> T:Type X:Id; => stmt ...</k> <tenv> Rho => Rho[X <- T] </tenv> + + context _:Type _::Exp[HOLE::Exps]; +// The rule below may need to sort E to Exp in the future, if the +// parser gets stricter; without that information, it may not be able +// to complete the LHS into T E[int,Ts],.Exps; (and similarly for the RHS) + rule T:Type E:Exp[int,Ts:Types]; => T[] E[Ts]; +// I want to write the rule below as _:Type (E:Exp[.Types] => E), +// but the list completion seems to not work well with that. + rule T:Type E:Exp[.Types]; => T E; +
+

Function declarations

+

Functions are allowed to be declared only at the top level (the +task cell holds only its k subcell). Each function +declaration reduces to a variable declaration (a binding of its name +to its declared function type), but also adds a task into the +tasks cell. The task consists of a typing of the statement +declaring all the function parameters followed by the function body, +together with the expected return type of the function. The +getTypes and mkDecls functions, defined at the end of +the file in the section on auxiliary operations, extracts the list of +types and makes a sequence of variable declarations from a list of +function parameters, respectively. Note that, although in the dynamic +semantics we include a terminating return statement at the +end of the function body to eliminate from the analysis the case when +the function does not provide an explicit return, we do not need to +include such a similar return statement here. That's because +the return statements type to stmt anyway, and the +entire code of the function body needs to type anyway.

+
k
rule <task> <k> T:Type F:Id(Ps:Params) S => getTypes(Ps)->T F; ...</k> </task> + (.Bag => <task> + <k> mkDecls(Ps) S </k> <tenv> .Map </tenv> <returnType> T </returnType> + </task>) +
+

Checking if main() exists}

+

Once the entire program is processed (generating appropriate tasks +to type check its function bodies), we can dissolve the main +task cell (the one holding only a k subcell). Since +we want to enforce that programs include a main function, we also +generate a function task executing main() to ensure that it +types (remove this task creation if you do not want your type system +to reject programs without a main function).

+
k
rule <task> <k> stmt => main(.Exps); </k> (.Bag => <tenv> .Map </tenv>) </task> +
+

Collecting the terminated tasks

+

Similarly, once a non-main task (i.e., one which contains a +tenv subcells) is completed using the subsequent rules (i.e., +its k cell holds only the block or stmt +type), we can dissolve its corresponding cell. Note that it is +important to ensure that we only dissolve tasks containing a +tenv cell with the rule below, because the main task should +not dissolve this way! It should do what the above rule says. +In the end, there should be no task cell left in the configuration +when the program correctly type checks.

+
k
rule <task>... <k> _:BlockOrStmtType </k> <tenv> _ </tenv> ...</task> => .Bag +
+

Basic values

+

The first three rewrite rules below reduce the primitive values to +their types, as we typically do when we define type systems in K.

+
k
rule _:Int => int + rule _:Bool => bool + rule _:String => string +
+

Variable lookup

+

There are three cases to distinguish for variable lookup: (1) if the +variable is bound in the local type environment, then look its type up +there; (2) if a local environment exists and the variable is not bound +in it, then look its type up in the global environment; (3) finally, +if there is no local environment, meaning that we are executing the +top-level pass, then look the variable's type up in the global +environment, too.

+
k
rule <k> X:Id => T ...</k> <tenv>... X |-> T ...</tenv> + + rule <k> X:Id => T ...</k> <tenv> Rho </tenv> <gtenv>... X |-> T ...</gtenv> + requires notBool(X in keys(Rho)) + + rule <task> <k> X:Id => T ...</k> </task> <gtenv>... X |-> T ...</gtenv> +
+

Increment

+

We want the increment operation to apply to any lvalue, including +array elements, not only to variables. For that reason, we define a +special context extracting the type of the argument of the increment +operation only if that argument is an lvalue. Otherwise the rewriting +process gets stuck. The operation ltype is defined at the +end of this file, in the auxiliary operation section. It essentially +acts as a filter, getting stuck if its argument is not an lvalue and +letting it reduce otherwise. The type of the lvalue is expected to be +an integer in order to be allowed to be incremented, as seen in the +rule ++ int => int below.

+
k
context ++(HOLE => ltype(HOLE)) + rule ++ int => int +
+

Common expression constructs

+

The rules below are straightforward and self-explanatory:

+
k
rule int + int => int + rule string + string => string + rule int - int => int + rule int * int => int + rule int / int => int + rule int % int => int + rule - int => int + rule int < int => bool + rule int <= int => bool + rule int > int => bool + rule int >= int => bool + rule T:Type == T => bool + rule T:Type != T => bool + rule bool && bool => bool + rule bool || bool => bool + rule ! bool => bool +
+

Array access and size

+

Array access requires each index to type to an integer, and the +array type to be at least as deep as the number of indexes:

+
k
// NOTE: +// We used to need parentheses in the RHS, to avoid capturing Ts as an attribute +// Let's hope that is not a problem anymore. + + rule (T[])[int, Ts:Types] => T[Ts] + rule T:Type[.Types] => T +
+

sizeOf only needs to check that its argument is an array:

+
k
rule sizeOf(_T[]) => int +
+

Input/Output

+

The read expression construct types to an integer, while print types +to a statement provided that all its arguments type to integers or +strings.

+
k
rule read() => int + + rule print(T:Type, Ts => Ts); requires T ==K int orBool T ==K string + rule print(.Types); => stmt +
+

Assignment

+

The special context and the rule for assignment below are similar +to those for increment: the LHS of the assignment must be an lvalue +and, in that case, it must have the same type as the RHS, which then +becomes the type of the assignment.

+
k
context (HOLE => ltype(HOLE)) = _ + rule T:Type = T => T +
+

Function application and return

+

Function application requires the type of the function and the +types of the passed values to be compatible. Note that a special case +is needed to handle the no-argument case:

+
k
rule (Ts:Types -> T)(Ts) => T requires Ts =/=K .Types + rule (void -> T)(.Types) => T +
+

The returned value must have the same type as the declared +function return type. If an empty return is encountered, than +we should check that we are in a function (and not a thread) +context, that is, a return cell must be available:

+
k
rule <k> return T:Type; => stmt ...</k> <returnType> T </returnType> + rule <k> return; => stmt ...</k> <returnType> _ </returnType> +
+

Blocks

+

To avoid having to recover type environments after blocks, we prefer +to start a new task for block body, making sure that the new task +is passed the same type environment and return cells. The value +returned by return statements must have the same type as +stated in the return cell. The print variadic +function is allowed to only print integers and strings. The thrown +exceptions can only have integer type.

+
k
rule {} => block + + rule <task> <k> {S} => block ...</k> <tenv> Rho </tenv> R </task> + (.Bag => <task> <k> S </k> <tenv> Rho </tenv> R </task>) +
+

Expression statement

+
k
rule _:Type; => stmt +
+

Conditional and while loop

+
k
rule if (bool) block else block => stmt + rule while (bool) block => stmt +
+

Exceptions

+

We currently force the parameters of exceptions to only be integers. +Moreover, for simplicity, we assume that integer exceptions can be +thrown from anywhere, including from functions which do not define +any try-catch block (with the currently unchecked ‒also for +simplicity‒ expectation that the caller functions would catch those +exceptions).

+
k
rule try block catch(int X:Id) {S} => {int X; S} + rule try block catch(int X:Id) {} => {int X;} + rule throw int; => stmt +
+

Concurrency

+

Nothing special about typing the concurrency constructs, except that +we do not want the spawned thread to return, so we do not include any +return cell in the new task cell for the thread statement. +Same like with the functions above, we do not check for thrown +exceptions which are not caught.

+
k
rule <k> spawn S => int ...</k> <tenv> Rho </tenv> + (.Bag => <task> <k> S </k> <tenv> Rho </tenv> </task>) + rule join int; => stmt + rule acquire _:Type; => stmt + rule release _:Type; => stmt + rule rendezvous _:Type; => stmt + + rule _:BlockOrStmtType _:BlockOrStmtType => stmt +
+

Auxiliary constructs

+

The function mkDecls turns a list of parameters into a +list of variable declarations.

+
k
syntax Stmt ::= mkDecls(Params) [function] + rule mkDecls(T:Type X:Id, Ps:Params) => T X; mkDecls(Ps) + rule mkDecls(.Params) => {} +
+

The ltype context allows only expressions which have an +lvalue to evaluate.

+
k
syntax LValue ::= Id + rule isLValue(_:Exp[_:Exps]) => true + syntax Exp ::= LValue // K should be able to infer this + // if not added, then it gets stuck with an Id on k cell + +// Instead of the second LValue production above you can use a rule: +// rule isLValue(_:Exp[_:Exps]) => true + + syntax Exp ::= ltype(Exp) +// context ltype(HOLE:LValue) +// The above context does not work due to some error, so we write instead + context ltype(HOLE) requires isLValue(HOLE) +
+

The function getTypes is the same as in SIMPLE typed dynamic.

+
k
syntax Types ::= getTypes(Params) [function] + rule getTypes(T:Type _:Id) => T, .Types // I would like to not use .Types + rule getTypes(T:Type _:Id, P, Ps) => T, getTypes(P,Ps) + rule getTypes(.Params) => void, .Types + +endmodule +
+

Go to Lesson 3, SIMPLE typed dynamic

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/tests/regression-new/pl-tutorial/2_languages/1_simple/2_typed/2_dynamic/simple-typed-dynamic/index.html b/k-distribution/tests/regression-new/pl-tutorial/2_languages/1_simple/2_typed/2_dynamic/simple-typed-dynamic/index.html new file mode 100644 index 00000000000..cf8f736ff14 --- /dev/null +++ b/k-distribution/tests/regression-new/pl-tutorial/2_languages/1_simple/2_typed/2_dynamic/simple-typed-dynamic/index.html @@ -0,0 +1,1149 @@ + + + + + + + + + + + + + + +SIMPLE — Typed — Dynamic | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

SIMPLE — Typed — Dynamic

+

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+

This is the K dynamic semantics of the typed SIMPLE language. +It is very similar to the semantics of the untyped SIMPLE, the +difference being that we now dynamically check the typing policy +described in the static semantics of typed SIMPLE. Because of the +dynamic nature of the semantics, we can also perform some additional +checks which were not possible in the static semantics, such as +memory leaks due to accessing an array out of its bounds. We will +highlight the differences between the dynamically typed and the +untyped SIMPLE as we proceed with the semantics. We recommend the +reader to consult the typing policy and the syntax of types discussed +in the static semantics of the typed SIMPLE language.

+
k
module SIMPLE-TYPED-DYNAMIC-SYNTAX + imports DOMAINS-SYNTAX +
+

Syntax

+

The syntax of typed SIMPLE extends that of untyped SIMPLE with support +for declaring types to variables and functions.

+

The syntax below is identical to that of the static semantics of typed +SIMPLE. However, the K strictness attributes are like those of the untyped +SIMPLE, to capture the desired evaluation strategies of the various language +constructs.

+
k
syntax Id ::= "main" [token] +
+

Types

+
k
syntax Type ::= "void" | "int" | "bool" | "string" + | Type "[" "]" + | "(" Type ")" [bracket] + > Types "->" Type + syntax Types ::= List{Type,","} +
+

Declarations

+
k
syntax Param ::= Type Id + syntax Params ::= List{Param,","} + + syntax Stmt ::= Type Exps ";" + | Type Id "(" Params ")" Block +
+

Expressions

+
k
syntax Exp ::= Int | Bool | String | Id + | "(" Exp ")" [bracket] + | "++" Exp + > Exp "[" Exps "]" [strict] + > Exp "(" Exps ")" [strict] + | "-" Exp [strict] + | "sizeOf" "(" Exp ")" [strict] + | "read" "(" ")" + > left: + Exp "*" Exp [strict, left] + | Exp "/" Exp [strict, left] + | Exp "%" Exp [strict, left] + > left: + Exp "+" Exp [strict, left] + | Exp "-" Exp [strict, left] + > non-assoc: + Exp "<" Exp [strict, non-assoc] + | Exp "<=" Exp [strict, non-assoc] + | Exp ">" Exp [strict, non-assoc] + | Exp ">=" Exp [strict, non-assoc] + | Exp "==" Exp [strict, non-assoc] + | Exp "!=" Exp [strict, non-assoc] + > "!" Exp [strict] + > left: + Exp "&&" Exp [strict(1), left] + | Exp "||" Exp [strict(1), left] + > "spawn" Block + > Exp "=" Exp [strict(2), right] +
+

Like in the static semantics, there is no need for lists of identifiers +(because we now have lists of parameters).

+
k
syntax Exps ::= List{Exp,","} [strict, overload(exps)] + syntax Val + syntax Vals ::= List{Val,","} [overload(exps)] +
+

Statements

+
k
syntax Block ::= "{" "}" + | "{" Stmt "}" + + syntax Stmt ::= Block + | Exp ";" [strict] + | "if" "(" Exp ")" Block "else" Block [avoid, strict(1)] + | "if" "(" Exp ")" Block [macro] + | "while" "(" Exp ")" Block + | "for" "(" Stmt Exp ";" Exp ")" Block [macro] + | "print" "(" Exps ")" ";" [strict] + | "return" Exp ";" [strict] + | "return" ";" + | "try" Block "catch" "(" Param ")" Block + | "throw" Exp ";" [strict] + | "join" Exp ";" [strict] + | "acquire" Exp ";" [strict] + | "release" Exp ";" [strict] + | "rendezvous" Exp ";" [strict] + + syntax Stmt ::= Stmt Stmt [right] +
+

The same desugaring macros like in the statically typed SIMPLE.

+
k
rule if (E) S => if (E) S else {} + rule for(Start Cond; Step) {S:Stmt} => {Start while(Cond){S Step;}} + rule for(Start Cond; Step) {} => {Start while(Cond){Step;}} + rule T:Type E1:Exp, E2:Exp, Es:Exps; => T E1; T E2, Es; [anywhere] + rule T:Type X:Id = E; => T X; X = E; [anywhere] + +endmodule + + +module SIMPLE-TYPED-DYNAMIC + imports SIMPLE-TYPED-DYNAMIC-SYNTAX + imports DOMAINS +
+

Semantics

+

Values and results

+

These are similar to those of untyped SIMPLE, except that the array +references and the function abstrations now also hold their types. +These types are needed in order to easily compute the type of any +value in the language (see the auxiliary typeOf operation at +the end of this module).

+
k
syntax Val ::= Int | Bool | String + | array(Type,Int,Int) + | lambda(Type,Params,Stmt) + syntax Exp ::= Val + syntax Exps ::= Vals + syntax KResult ::= Val + | Vals // TODO: should not need this +
+

Configuration

+

The configuration is almost identical to that of untyped SIMPLE, +except for a return cell inside the control cell. +This return cell will hold, like in the static semantics of +typed SIMPLE, the expected type of the value returned by the function +being executed. The contents of this cell will be set whenever a +function is invoked and will be checked whenever the evaluation of the +function body encounters an explicit return statement.

+
k
// the syntax declarations below are required because the sorts are + // referenced directly by a production and, because of the way KIL to KORE + // is implemented, the configuration syntax is not available yet + // should simply work once KIL is removed completely + // check other definitions for this hack as well + + syntax ControlCell + syntax ControlCellFragment + + configuration <T color="red"> + <threads color="orange"> + <thread multiplicity="*" color="yellow" type="Map"> + <id color="pink"> 0 </id> + <k color="green"> ($PGM:Stmt ~> execute) </k> +// <br/> + <control color="cyan"> + <fstack color="blue"> .List </fstack> + <xstack color="purple"> .List </xstack> + <returnType color="LimeGreen"> void </returnType> + </control> +// <br/> + <env color="violet"> .Map </env> + <holds color="black"> .Map </holds> + </thread> + </threads> +// <br/> + <genv color="pink"> .Map </genv> + <store color="white"> .Map </store> + <busy color="cyan">.Set</busy> + <terminated color="red"> .Set </terminated> + <input color="magenta" stream="stdin"> .List </input> + <output color="brown" stream="stdout"> .List </output> + <nextLoc color="gray"> 0 </nextLoc> + </T> +
+

Declarations and Initialization

+

Variable Declaration

+

The undefined construct is now parameterized by a type. +A main difference between untyped SIMPLE and dynamically typed SIMPLE +is that the latter assigns a type to each of its locations and that +type cannot be changed during the execution of the program. We do not +do any memory management in our semantic definitions here, so +locations cannot be reclaimed, garbage collected and/or reused. Each +location corresponds precisely to an allocated variable or array +element, whose type was explicitly or implicitly declared in the +program and does not change. It is therefore safe to type each +location and then never allow that type to change. The typed +undefined values effectively assign both a type and an undefined value +to a location.

+
k
syntax KItem ::= undefined(Type) + + rule <k> T:Type X:Id; => .K ...</k> + <env> Env => Env[X <- L] </env> + <store>... .Map => L |-> undefined(T) ...</store> + <nextLoc> L:Int => L +Int 1 </nextLoc> +
+

Array Declaration

+

The dynamic semantics of typed array declarations is similar to that +in untyped SIMPLE, but we have to make sure that we associate the +right type to the allocated locations.

+
k
rule <k> T:Type X:Id[N:Int]; => .K ...</k> + <env> Env => Env[X <- L] </env> + <store>... .Map => L |-> array(T, L +Int 1, N) + (L +Int 1)...(L +Int N) |-> undefined(T) ...</store> + <nextLoc> L:Int => L +Int 1 +Int N </nextLoc> + requires N >=Int 0 + + context _:Type _::Exp[HOLE::Exps]; +
+

The desugaring of multi-dimensional arrays into unidimensional +ones is also similar to that in untyped SIMPLE, although we have to +make sure that all the declared variables have the right types. The +auxiliary operation T<Vs>, defined at the end of the file, +adds the length of Vs dimensions to the type T.

+
k
// TODO: Check the desugaring below to be consistent with the one for untyped simple + + syntax Id ::= "$1" [token] | "$2" [token] + rule T:Type X:Id[N1:Int, N2:Int, Vs:Vals]; + => T[]<Vs> X[N1]; + { + T[][]<Vs> $1=X; + for(int $2=0; $2 <= N1 - 1; ++$2) { + T X[N2,Vs]; + $1[$2] = X; + } + } +
+

Function declaration

+

Store all function parameters, as well as the return type, as part +of the lambda abstraction. In the spirit of dynamic typing, we will +make sure that parameters are well typed when the function is invoked.

+
k
rule <k> T:Type F:Id(Ps:Params) S => .K ...</k> + <env> Env => Env[F <- L] </env> + <store>... .Map => L |-> lambda(T, Ps, S) ...</store> + <nextLoc> L => L +Int 1 </nextLoc> +
+

Calling main()

+

When done with the first pass, call main().

+
k
syntax KItem ::= "execute" + rule <k> execute => main(.Exps); </k> + <env> Env </env> + <genv> .Map => Env </genv> +
+

Expressions

+

Variable lookup

+
k
rule <k> X:Id => V ...</k> + <env>... X |-> L ...</env> + <store>... L |-> V:Val ...</store> [group(lookup)] +
+

Variable/Array increment

+
k
context ++(HOLE => lvalue(HOLE)) + rule <k> ++loc(L) => I +Int 1 ...</k> + <store>... L |-> (I:Int => I +Int 1) ...</store> [group(increment)] +
+

Arithmetic operators

+
k
rule I1 + I2 => I1 +Int I2 + rule Str1 + Str2 => Str1 +String Str2 + rule I1 - I2 => I1 -Int I2 + rule I1 * I2 => I1 *Int I2 + rule I1 / I2 => I1 /Int I2 requires I2 =/=K 0 + rule I1 % I2 => I1 %Int I2 requires I2 =/=K 0 + rule - I => 0 -Int I + rule I1 < I2 => I1 <Int I2 + rule I1 <= I2 => I1 <=Int I2 + rule I1 > I2 => I1 >Int I2 + rule I1 >= I2 => I1 >=Int I2 + rule V1:Val == V2:Val => V1 ==K V2 + rule V1:Val != V2:Val => V1 =/=K V2 + rule ! T => notBool(T) + rule true && E => E + rule false && _ => false + rule true || _ => true + rule false || E => E +
+

Array lookup

+

Check array bounds, as part of the dynamic typing policy.

+
k
// Same comment as for simple untyped regarding [anywhere] + rule V:Val[N1:Int, N2:Int, Vs:Vals] => V[N1][N2, Vs] + [anywhere] + +// Same comment as for simple untyped regarding [anywhere] + rule array(_:Type, L:Int, M:Int)[N:Int] => lookup(L +Int N) + requires N >=Int 0 andBool N <Int M [anywhere] +
+

Size of an array

+
k
rule sizeOf(array(_,_,N)) => N +
+

Function call

+

Define function call and return together, to see their relationship. +Note that the operation mkDecls now declares properly typed +instantiated variables, and that the semantics of return also +checks that that type of the returned value is expected one.

+
k
syntax KItem ::= (Type,Map,K,ControlCellFragment) + + rule <k> lambda(T,Ps,S)(Vs:Vals) ~> K => mkDecls(Ps,Vs) S return; </k> + <control> + <fstack> .List => ListItem((T',Env,K,C)) ...</fstack> + <returnType> T' => T </returnType> + C + </control> + <env> Env => GEnv </env> + <genv> GEnv </genv> + + rule <k> return V:Val; ~> _ => V ~> K </k> + <control> + <fstack> ListItem((T',Env,K,C)) => .List ...</fstack> + <returnType> T => T' </returnType> + (_ => C) + </control> + <env> _ => Env </env> + requires typeOf(V) ==K T // check the type of the returned value +
+

Like the undefined above, nothing also gets +tagged with a type now. The empty return statement is +completed to return the nothing value tagged as expected.

+
k
syntax Val ::= nothing(Type) + rule <k> return; => return nothing(T); ...</k> <returnType> T </returnType> +
+

Read

+
k
rule <k> read() => I ...</k> <input> ListItem(I:Int) => .List ...</input> [group(read)] +
+

Assignment

+

The assignment now checks that the type of the assigned location is +preserved:

+
k
context (HOLE => lvalue(HOLE)) = _ + + rule <k> loc(L) = V:Val => V ...</k> <store>... L |-> (V' => V) ...</store> + requires typeOf(V) ==K typeOf(V') [group(assignment)] +
+

Statements

+

Blocks

+
k
rule {} => .K + rule <k> { S } => S ~> setEnv(Env) ...</k> <env> Env </env> +
+

Sequential composition

+
k
rule S1:Stmt S2:Stmt => S1 ~> S2 +
+

Expression statements

+
k
rule _:Val; => .K +
+

Conditional

+
k
rule if ( true) S else _ => S + rule if (false) _ else S => S +
+

While loop

+
k
rule while (E) S => if (E) {S while(E)S} +
+

Print

+

We only allow printing integers and strings:

+
k
rule <k> print(V:Val, Es => Es); ...</k> <output>... .List => ListItem(V) </output> + requires typeOf(V) ==K int orBool typeOf(V) ==K string [group(print)] + rule print(.Vals); => .K +
+

Exceptions

+

Exception parameters are now typed, but note that the semantics below +works correctly only when the thrown exception has the same type as +the innermost try-catch paramete. To keep things simple, for the time +being we can assume that SIMPLE only throws and catches integer +values, in which case our semantics below works fine:

+
k
syntax KItem ::= (Param,Stmt,K,Map,ControlCellFragment) // Param instead of Id + + syntax KItem ::= "popx" + + rule <k> (try S1 catch(P) S2 => S1 ~> popx) ~> K </k> + <control> + <xstack> .List => ListItem((P, S2, K, Env, C)) ...</xstack> + C + </control> + <env> Env </env> + + rule <k> popx => .K ...</k> + <xstack> ListItem(_) => .List ...</xstack> + + rule <k> throw V:Val; ~> _ => { T X = V; S2 } ~> K </k> + <control> + <xstack> ListItem((T:Type X:Id, S2, K, Env, C)) => .List ...</xstack> + (_ => C) + </control> + <env> _ => Env </env> +
+

Threads

+

Thread creation

+
k
rule <thread>... + <k> spawn S => !T:Int +Int 1 ...</k> + <env> Env </env> + ...</thread> + (.Bag => <thread>... + <k> S </k> + <env> Env </env> + <id> !T +Int 1 </id> + ...</thread>) +
+

Thread termination

+
k
rule (<thread>... <k>.K</k> <holds>H</holds> <id>T</id> ...</thread> => .Bag) + <busy> Busy => Busy -Set keys(H) </busy> + <terminated>... .Set => SetItem(T) ...</terminated> +
+

Thread joining

+
k
rule <k> join T:Int; => .K ...</k> + <terminated>... SetItem(T) ...</terminated> +
+

Acquire lock

+
k
rule <k> acquire V:Val; => .K ...</k> + <holds>... .Map => V |-> 0 ...</holds> + <busy> Busy (.Set => SetItem(V)) </busy> + requires (notBool(V in Busy:Set)) [group(acquire)] + + rule <k> acquire V; => .K ...</k> + <holds>... V:Val |-> (N:Int => N +Int 1) ...</holds> +
+

Release lock

+
k
rule <k> release V:Val; => .K ...</k> + <holds>... V |-> (N => N:Int -Int 1) ...</holds> + requires N >Int 0 + + rule <k> release V; => .K ...</k> <holds>... V:Val |-> 0 => .Map ...</holds> + <busy>... SetItem(V) => .Set ...</busy> +
+

Rendezvous synchronization

+
k
rule <k> rendezvous V:Val; => .K ...</k> + <k> rendezvous V; => .K ...</k> [group(rendezvous)] +
+

Auxiliary declarations and operations

+

Turns a list of parameters and a list of instance values for them +into a list of variable declarations.

+
k
syntax Stmt ::= mkDecls(Params,Vals) [function] + rule mkDecls((T:Type X:Id, Ps:Params), (V:Val, Vs:Vals)) + => T X=V; mkDecls(Ps,Vs) + rule mkDecls(.Params,.Vals) => {} +
+

Location lookup.

+
k
syntax Exp ::= lookup(Int) // see NOTES.md for why Exp instead of KItem + rule <k> lookup(L) => V ...</k> <store>... L |-> V:Val ...</store> [group(lookup)] +
+

Environment recovery.

+
k
// TODO: same comment regarding setEnv(...) as for simple untyped + + syntax KItem ::= setEnv(Map) + rule <k> setEnv(Env) => .K ...</k> <env> _ => Env </env> + rule (setEnv(_) => .K) ~> setEnv(_) +
+

lvalue and loc

+
k
syntax Exp ::= lvalue(K) + syntax Val ::= loc(Int) + + rule <k> lvalue(X:Id => loc(L)) ...</k> <env>... X |-> L:Int ...</env> + + //context lvalue(_[HOLE]) + //context lvalue(HOLE[_]) + context lvalue(_::Exp[HOLE::Exps]) + context lvalue(HOLE::Exp[_::Exps]) + + rule lvalue(lookup(L:Int) => loc(L)) +
+

Adds the corresponding depth to an array type

+
k
syntax Type ::= Type "<" Vals ">" [function] + rule T:Type<_,Vs:Vals> => T[]<Vs> + rule T:Type<.Vals> => T +
+

Sequences of locations.

+
k
syntax Map ::= Int "..." Int "|->" K [function] + rule N...M |-> _ => .Map requires N >Int M + rule N...M |-> K => N |-> K (N +Int 1)...M |-> K requires N <=Int M + +// Type of a value. + syntax Type ::= typeOf(K) [function] + rule typeOf(_:Int) => int + rule typeOf(_:Bool) => bool + rule typeOf(_:String) => string + rule typeOf(array(T,_,_)) => (T[]) // () needed! K parses [] as "no tags" + rule typeOf(lambda(T,Ps,_)) => getTypes(Ps) -> T + rule typeOf(undefined(T)) => T + rule typeOf(nothing(T)) => T +
+

List of types of a parameter.

+
k
syntax Types ::= getTypes(Params) [function] + rule getTypes(T:Type _:Id) => T, .Types // I would like to not use .Types + rule getTypes(T:Type _:Id, P, Ps) => T, getTypes(P,Ps) + rule getTypes(.Params) => void, .Types +endmodule +
+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/tests/regression-new/pl-tutorial/2_languages/2_kool/1_untyped/kool-untyped/index.html b/k-distribution/tests/regression-new/pl-tutorial/2_languages/2_kool/1_untyped/kool-untyped/index.html new file mode 100644 index 00000000000..35e0afc2c71 --- /dev/null +++ b/k-distribution/tests/regression-new/pl-tutorial/2_languages/2_kool/1_untyped/kool-untyped/index.html @@ -0,0 +1,1529 @@ + + + + + + + + + + + + + + +KOOL — Untyped | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

KOOL — Untyped

+

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+

This is the K semantic definition of the untyped KOOL language. KOOL +is aimed at being a pedagogical and research language that captures +the essence of the object-oriented programming paradigm. Its untyped +variant discussed here is simpler than the typed one, ignoring several +intricate aspects of types in the presence of objects. A program +consists of a set of class declarations. Each class can extend at +most one other class (KOOL is single-inheritance). A class can +declare a set of fields and a set of methods, all public and called +the class' members. Specifically, KOOL includes the +following features:

+
    +
  • +

    Class declarations, where a class may or may not explicitly +extend another class. In case a class does not explicitly extend +another class, then it is assumed that it extends the default top-most +and empty (i.e., no members) class called Object. Each class +is required to declare precisely one homonymous method, called its +constructor. Each valid program should contain one class +named Main, whose constructor, Main(), takes no +arguments. The execution of a program consists of creating an object +instance of class Main and invoking the constructor +Main() on it, that is, of executing new Main();.

    +
  • +
  • +

    All features of SIMPLE (see examples/simple/untyped), +i.e., multidimensional arrays, function (here called "method") +abstractions with call-by-value parameter passing style and static +scoping, blocks with locals, input/output, parametric exceptions, and +concurrency via dynamic thread creation/termination and synchronization. +The only change in the syntax of SIMPLE when imported in KOOL is the +function declaration keyword, function, which is changed into +method. The exact same desugaring macros from SIMPLE are +also included in KOOL. We can think of KOOL's classes as embedding +SIMPLE programs (extended with OO constructs, as discussed next).

    +
  • +
  • +

    Object creation using the new C(e1,...,en) +expression construct. An object instance of class C is first +created and then the constructor C(e1,...,en) is implicitly +called on that object. KOOL only allows (and requires) one +constructor per class. The class constructor can be called either +implicitly during a new object creation for the class, or explicitly. +The superclass constructor is not implicitly invoked when a +class constructor is invoked; if you want to invoke the superclass +constructor from a subclass constructor then you have to do it +explicitly.

    +
  • +
  • +

    An expression construct this, which evaluates to the +current object.

    +
  • +
  • +

    An expression construct super, which is used (only) in +combination with member lookup (see next) to refer to a superclass +field or method.

    +
  • +
  • +

    A member lookup expression construct e.x, where e +is an expression (either an expression expected to evaluate to an object +or the super construct) and x is a class member name, +that is, a field or a method name.

    +
  • +
  • +

    Expression constructs e instanceOf C and +(C) e, where e is an expression expected +to evaluate to an object and C a class name. The former +tells whether the class of e is a subclass of C, +that is, whether e can be used as an instance of C, +and the latter changes the class of e to C. These +operations always succeed: the former returns a Boolean value, while +the latter changes the current class of e to C +regardless of whether it is safe to do so or not. The typed version +of KOOL will check the safety of casting by ensuring that the instance +class of the object is a subclass of C. In untyped KOOL we +do not want to perform this check because we want to allow the +programmer maximum of flexibility: if one always accesses only +available members, then the program can execute successfully despite +the potentially unsafe cast.

    +
  • +
+

There are some specific aspects of KOOL that need to be discussed.

+

First, KOOL is higher-order, allowing function abstractions to be +treated like any other values in the language. For example, if +m is a method of object e then e.m +evaluates to the corresponding function abstraction. The function +abstraction is in fact a closure, because in addition to the method +parameters and body it also encapsulates the object value (i.e., the +environment of the object together with its current class—see below) +that e evaluates to. This way, function abstractions can be +invoked anywhere and have the capability to change the state of their +object. For example, if m is a method of object e +which increments a field c of e when invoked, and if +getm is another method of e which simply returns +m when invoked, then the double application +(e.getm())() has the same effect as e.m(), that is, +increments the counter c of e. Note that the +higher-order nature of KOOL was not originally planned; it came as a +natural consequence of evaluating methods to closures and we decided +to keep it. If you do not like it then do not use it.

+

Second, since all the fields and methods are public in KOOL and since +they can be redeclared in subclasses, it is not immediately clear how +to lookup the member x when we write e.x and +e is different from super. We distinguish two cases, +depending on whether e.x occurs in a method invocation +context (i.e., e.x(...)) or in a field context. KOOL has +dynamic method dispatch, so if e.x is invoked as a method +then x will be searched for starting with the instance class of +the object value to which e evaluates. If e.x +occurs in a non-method-invocation context then x will be +treated as a field (although it may hold a method closure due to the +higher-order nature of KOOL) and thus will be searched starting with +the current class of the object value of e (which, because of +this and casting, may be different from its instance class). +In order to achieve the above, each object value will consist of a +pair holding the current class of the object and an environment stack +with one layer for each class in the object's instance class hierarchy.

+

Third, although KOOL is dynamic method dispatch, its capabilities +described above are powerful enough to allow us to mimic static +method dispatch. For example, suppose that you want to invoke method +m() statically. Then all you need to do is to declare a +local variable and bind it to m, for example var staticm = m;, and +then call staticm(). This works because +staticm is first bound to the method closure that m +evaluates to, and then looked up as any local variable when invoked. +We only enable the dynamic method dispatch when we have an object +member on an application position, e.g., m().

+

In what follows, we limit our comments to the new, KOOL-specific +aspects of the language. We refer the reader to the untyped SIMPLE +language for documentation on the the remaining features, because +those were all borrowed from SIMPLE.

+
k
module KOOL-UNTYPED-SYNTAX + imports DOMAINS-SYNTAX +
+

Syntax

+

The syntax of KOOL extends that of SIMPLE with object-oriented +constructs. We removed from the K annotated syntax of SIMPLE two +constructs, namely the one for function declarations (because we want +to call them methods now) and the one for function application +(because application is not strict in the first argument +anymore—needs to initiate dynamic method dispatch). The additional +syntax includes:

+
    +
  • First, we need a new dedicated identifier, Object, for +the default top-most class.
  • +
  • Second, we rename the function keyword of SIMPLE into method.
  • +
  • Third, we add syntax for class declarations together with a +macro making classes which extend nothing to extend Object.
  • +
  • Fourth, we change the strictness attribute of application +into strict(2).
  • +
  • Finally, we add syntax and corresponding strictness +for the KOOL object-oriented constructs.
  • +
+
k
syntax Id ::= "Object" [token] | "Main" [token] + + syntax Stmt ::= "var" Exps ";" + | "method" Id "(" Ids ")" Block // called "function" in SIMPLE + | "class" Id Block // KOOL + | "class" Id "extends" Id Block // KOOL + + syntax Exp ::= Int | Bool | String | Id + | "this" // KOOL + | "super" // KOOL + | "(" Exp ")" [bracket] + | "++" Exp + | Exp "instanceOf" Id [strict(1)] // KOOL + | "(" Id ")" Exp [strict(2)] // KOOL cast + | "new" Id "(" Exps ")" [strict(2)] // KOOL + | Exp "." Id // KOOL + > Exp "[" Exps "]" [strict] + > Exp "(" Exps ")" [strict(2)] // was strict in SIMPLE + | "-" Exp [strict] + | "sizeOf" "(" Exp ")" [strict] + | "read" "(" ")" + > left: + Exp "*" Exp [strict, left] + | Exp "/" Exp [strict, left] + | Exp "%" Exp [strict, left] + > left: + Exp "+" Exp [strict, left] + | Exp "-" Exp [strict, left] + > non-assoc: + Exp "<" Exp [strict, non-assoc] + | Exp "<=" Exp [strict, non-assoc] + | Exp ">" Exp [strict, non-assoc] + | Exp ">=" Exp [strict, non-assoc] + | Exp "==" Exp [strict, non-assoc] + | Exp "!=" Exp [strict, non-assoc] + > "!" Exp [strict] + > left: + Exp "&&" Exp [strict(1), left] + | Exp "||" Exp [strict(1), left] + > "spawn" Block + > Exp "=" Exp [strict(2), right] + + syntax Ids ::= List{Id,","} + + syntax Exps ::= List{Exp,","} [strict, overload(exps)] + syntax Val + syntax Vals ::= List{Val,","} [overload(exps)] + + syntax Block ::= "{" "}" + | "{" Stmt "}" + + syntax Stmt ::= Block + | Exp ";" [strict] + | "if" "(" Exp ")" Block "else" Block [avoid, strict(1)] + | "if" "(" Exp ")" Block [macro] + | "while" "(" Exp ")" Block + | "for" "(" Stmt Exp ";" Exp ")" Block [macro] + | "return" Exp ";" [strict] + | "return" ";" [macro] + | "print" "(" Exps ")" ";" [strict] + | "try" Block "catch" "(" Id ")" Block + | "throw" Exp ";" [strict] + | "join" Exp ";" [strict] + | "acquire" Exp ";" [strict] + | "release" Exp ";" [strict] + | "rendezvous" Exp ";" [strict] + + syntax Stmt ::= Stmt Stmt [right] +
+

Old desugaring rules, from SIMPLE

+
k
rule if (E) S => if (E) S else {} + rule for(Start Cond; Step) {S} => {Start while (Cond) {S Step;}} + rule var E1::Exp, E2::Exp, Es::Exps; => var E1; var E2, Es; [anywhere] + rule var X::Id = E; => var X; X = E; [anywhere] +
+

New desugaring rule

+
k
rule class C:Id S => class C extends Object S // KOOL + +endmodule +
+

Semantics

+

We first discuss the new configuration of KOOL, which extends that of +SIMPLE. Then we include the semantics of the constructs borrowed from +SIMPLE unchanged; we refrain from discussing those, because they were +already discussed in the K definition of SIMPLE. Then we discuss +changes to SIMPLE's semantics needed for the more general meaning of +the previous SIMPLE constructs (for example for thread spawning, +assignment, etc.). Finally, we discuss in detail the +semantics of the additional KOOL constructs.

+
k
module KOOL-UNTYPED + imports KOOL-UNTYPED-SYNTAX + imports DOMAINS +
+

Configuration

+

KOOL removes one cell and adds two nested cells to the configuration +of SIMPLE. The cell which is removed is the one holding the global +environment, because a KOOL program consists of a set of classes only, +with no global declarations. In fact, since informally speaking each +KOOL class now includes a SIMPLE program, it is safe to say that the +global variables in SIMPLE became class fields in KOOL. Let us now +discuss the new cells that are added to the configuration of SIMPLE.

+
    +
  • +

    The cell crntObj holds data pertaining to the current +object, that is, the object environment in which the code in cell +k executes: crntClass holds the current class (which +can change as methods of the current object are invoked); +envStack holds the stack of environments as a list, +each layer corresponding to one class in the objects' instance class +hierarchy; location, which is optional, holds the location in +the store where the current object is or has to be located (this is +useful both for method closures and for the semantics of object +creation).

    +
  • +
  • +

    The cell classes holds all the declared classes, each +class being held in its own class cell which contains a name +(className), a parent (extends), and the actual +member declarations (declarations).

    +
  • +
+
k
// the syntax declarations below are required because the sorts are + // referenced directly by a production and, because of the way KIL to KORE + // is implemented, the configuration syntax is not available yet + // should simply work once KIL is removed completely + // check other definitions for this hack as well + syntax EnvCell + syntax ControlCell + syntax EnvStackCell + syntax CrntObjCellFragment + + configuration <T color="red"> + <threads color="orange"> + <thread multiplicity="*" type="Set" color="yellow"> + <k color="green"> $PGM:Stmt ~> execute </k> + //<br/> // TODO(KORE): support latex annotations #1799 + <control color="cyan"> + <fstack color="blue"> .List </fstack> + <xstack color="purple"> .List </xstack> + //<br/> // TODO(KORE): support latex annotations #1799 + <crntObj color="Fuchsia"> // KOOL + <crntClass> Object </crntClass> + <envStack> .List </envStack> + <location multiplicity="?"> .K </location> + </crntObj> + </control> + //<br/> // TODO(KORE): support latex annotations #1799 + <env color="violet"> .Map </env> + <holds color="black"> .Map </holds> + <id color="pink"> 0 </id> + </thread> + </threads> + //<br/> // TODO(KORE): support latex annotations #1799 + <store color="white"> .Map </store> + <busy color="cyan">.Set </busy> + <terminated color="red"> .Set </terminated> + <input color="magenta" stream="stdin"> .List </input> + <output color="brown" stream="stdout"> .List </output> + <nextLoc color="gray"> 0 </nextLoc> + //<br/> // TODO(KORE): support latex annotations #1799 + <classes color="Fuchsia"> // KOOL + <classData multiplicity="*" type="Map" color="Fuchsia"> + // the Map has as its key the first child of the cell, + // in this case the className cell. + <className color="Fuchsia"> Main </className> + <baseClass color="Fuchsia"> Object </baseClass> + <declarations color="Fuchsia"> .K </declarations> + </classData> + </classes> + </T> +
+

Unchanged Semantics from untyped SIMPLE

+

The semantics below is taken over from SIMPLE unchanged. +The semantics of function declaration and invocation, including the +use of the special lambda abstraction value, needs to change +in order to account for the fact that methods are now invoked into +their object's environment. The semantics of function return actually +stays unchanged. Also, the semantics of program initialization is +different: now we have to create an instance of the Main +class which also calls the constructor Main(), while in +SIMPLE we only had to invoke the function Main(). +Finally, the semantics of thread spawning needs to change, too: the +parent thread needs to also share its object environment with the +spawned thread (in addition to its local environment, like in SIMPLE). +This is needed in order to be able to spawn method invokations under +dynamic method dispatch; for example, spawn { run(); } +will need to look up the method run() in the newly created +thread, operation which will most likely fail unless the child thread +sees the object environment of the parent thread. Note that the +spawn statement of KOOL is more permissive than the threads +of Java. In fact, the latter can be implemented in terms of our +spawn—see the program threads.kool for a sketch.

+

Below is a subset of the values of SIMPLE, which are also values +of KOOL. We will add other values later in the semantics, such as +object and method closures.

+
k
syntax Val ::= Int | Bool | String + | array(Int,Int) + syntax Exp ::= Val + syntax Exps ::= Vals + syntax KResult ::= Val + syntax KResult ::= Vals +
+

The semantics below are taken verbatim from the untyped SIMPLE +definition.

+
k
syntax KItem ::= "undefined" + + rule <k> var X:Id; => .K ...</k> + <env> Env => Env[X <- L] </env> + <store>... .Map => L |-> undefined ...</store> + <nextLoc> L:Int => L +Int 1 </nextLoc> + + + context var _:Id[HOLE]; + + rule <k> var X:Id[N:Int]; => .K ...</k> + <env> Env => Env[X <- L] </env> + <store>... .Map => L |-> array(L +Int 1, N) + (L +Int 1) ... (L +Int N) |-> undefined ...</store> + <nextLoc> L:Int => L +Int 1 +Int N </nextLoc> + requires N >=Int 0 + + + syntax Id ::= "$1" [token] | "$2" [token] + rule var X:Id[N1:Int, N2:Int, Vs:Vals]; + => var X[N1]; + { + var $1=X; + for(var $2=0; $2 <= N1 - 1; ++$2) { + var X[N2,Vs]; + $1[$2] = X; + } + } + + + rule <k> X:Id => V ...</k> + <env>... X |-> L ...</env> + <store>... L |-> V:Val ...</store> [group(lookup)] + + + context ++(HOLE => lvalue(HOLE)) + rule <k> ++loc(L) => I +Int 1 ...</k> + <store>... L |-> (I:Int => I +Int 1) ...</store> [group(increment)] + + + rule I1 + I2 => I1 +Int I2 + rule Str1 + Str2 => Str1 +String Str2 + rule I1 - I2 => I1 -Int I2 + rule I1 * I2 => I1 *Int I2 + rule I1 / I2 => I1 /Int I2 requires I2 =/=K 0 + rule I1 % I2 => I1 %Int I2 requires I2 =/=K 0 + rule - I => 0 -Int I + rule I1 < I2 => I1 <Int I2 + rule I1 <= I2 => I1 <=Int I2 + rule I1 > I2 => I1 >Int I2 + rule I1 >= I2 => I1 >=Int I2 + + rule V1:Val == V2:Val => V1 ==K V2 + rule V1:Val != V2:Val => V1 =/=K V2 + rule ! T => notBool(T) + rule true && E => E + rule false && _ => false + rule true || _ => true + rule false || E => E + + + rule V:Val[N1:Int, N2:Int, Vs:Vals] => V[N1][N2, Vs] + [anywhere] + + rule array(L,_)[N:Int] => lookup(L +Int N) + [anywhere] + + + rule sizeOf(array(_,N)) => N +
+

The semantics of function application needs to change into dynamic +method dispatch invocation, which is defined shortly. However, +interestingly, the semantics of return stays unchanged.

+
k
rule <k> return(V:Val); ~> _ => V ~> K </k> + <control> + <fstack> ListItem(fstackFrame(Env,K,XS,<crntObj> CO </crntObj>)) => .List ...</fstack> + <xstack> _ => XS </xstack> + <crntObj> _ => CO </crntObj> + </control> + <env> _ => Env </env> + + syntax Val ::= "nothing" + rule return; => return nothing; + + + rule <k> read() => I ...</k> <input> ListItem(I:Int) => .List ...</input> [group(read)] + + + context (HOLE => lvalue(HOLE)) = _ + + rule <k> loc(L) = V:Val => V ...</k> <store>... L |-> (_ => V) ...</store> + [group(assignment)] + + + rule {} => .K + rule <k> { S } => S ~> setEnv(Env) ...</k> <env> Env </env> + + + rule S1::Stmt S2::Stmt => S1 ~> S2 + + rule _:Val; => .K + + rule if ( true) S else _ => S + rule if (false) _ else S => S + + rule while (E) S => if (E) {S while(E)S} + + rule <k> print(V:Val, Es => Es); ...</k> <output>... .List => ListItem(V) </output> + [group(print)] + rule print(.Vals); => .K + + + syntax KItem ::= xstackFrame(Id,Stmt,K,Map,K) + // TODO(KORE): drop the additional production once parsing issue #1842 is fixed + | (Id,Stmt,K,Map,K) + + syntax KItem ::= "popx" + + rule <k> (try S1 catch(X) {S2} => S1 ~> popx) ~> K </k> + <control> + <xstack> .List => ListItem(xstackFrame(X, S2, K, Env, C)) ...</xstack> + C + </control> + <env> Env </env> + + rule <k> popx => .K ...</k> + <xstack> ListItem(_) => .List ...</xstack> + + rule <k> throw V:Val; ~> _ => { var X = V; S2 } ~> K </k> + <control> + <xstack> ListItem(xstackFrame(X, S2, K, Env, C)) => .List ...</xstack> + (_ => C) + </control> + <env> _ => Env </env> +
+

Thread spawning needs a new semantics, because we want the child +thread to also share the object environment with its parent. The new +semantics of thread spawning will be defined shortly. However, +interestingly, the other concurrency constructs keep their semantics +from SIMPLE unchanged.

+
k
// TODO(KORE): ..Bag should be . throughout this definition #1772 + rule (<thread>... <k>.K</k> <holds>H</holds> <id>T</id> ...</thread> => .Bag) + /* + rule (<thread>... <k>.</k> <holds>H</holds> <id>T</id> ...</thread> => .) + */ + <busy> Busy => Busy -Set keys(H) </busy> + <terminated>... .Set => SetItem(T) ...</terminated> + + rule <k> join T:Int; => .K ...</k> + <terminated>... SetItem(T) ...</terminated> + + rule <k> acquire V:Val; => .K ...</k> + <holds>... .Map => V |-> 0 ...</holds> + <busy> Busy (.Set => SetItem(V)) </busy> + requires (notBool(V in Busy:Set)) [group(acquire)] + + rule <k> acquire V; => .K ...</k> + <holds>... V:Val |-> (N:Int => N +Int 1) ...</holds> + + rule <k> release V:Val; => .K ...</k> + <holds>... V |-> (N => N:Int -Int 1) ...</holds> + requires N >Int 0 + + rule <k> release V; => .K ...</k> <holds>... V:Val |-> 0 => .Map ...</holds> + <busy>... SetItem(V) => .Set ...</busy> + + rule <k> rendezvous V:Val; => .K ...</k> + <k> rendezvous V; => .K ...</k> [group(rendezvous)] +
+

Unchanged auxiliary operations from untyped SIMPLE

+
k
syntax Stmt ::= mkDecls(Ids,Vals) [function] + rule mkDecls((X:Id, Xs:Ids), (V:Val, Vs:Vals)) => var X=V; mkDecls(Xs,Vs) + rule mkDecls(.Ids,.Vals) => {} + + // TODO(KORE): clarify sort inferences #1803 + syntax Exp ::= lookup(Int) + /* + syntax KItem ::= lookup(Int) + */ + rule <k> lookup(L) => V ...</k> <store>... L |-> V:Val ...</store> [group(lookup)] + + syntax KItem ::= setEnv(Map) + rule <k> setEnv(Env) => .K ...</k> <env> _ => Env </env> + rule (setEnv(_) => .K) ~> setEnv(_) + // TODO: How can we make sure that the second rule above applies before the first one? + // Probably we'll deal with this using strategies, eventually. + + syntax Exp ::= lvalue(K) + syntax Val ::= loc(Int) + + rule <k> lvalue(X:Id => loc(L)) ...</k> <env>... X |-> L:Int ...</env> + + context lvalue(_::Exp[HOLE::Exps]) + context lvalue(HOLE::Exp[_::Exps]) + + rule lvalue(lookup(L:Int) => loc(L)) + + + syntax Map ::= Int "..." Int "|->" K [function] + rule N...M |-> _ => .Map requires N >Int M + rule N...M |-> K => N |-> K (N +Int 1)...M |-> K requires N <=Int M +
+

Changes to the existing untyped SIMPLE semantics

+

When we extend a language, sometimes we need to do more than just add +new language constructs and semantics for them. Sometimes we want to +also extend the semantics of existing language constructs, in order to +get more from them.

+

Program initialization

+

In SIMPLE, once all the global declarations were processed, the +function main() was invoked. In KOOL, the global +declarations are classes, and their specific semantics is given +shortly; essentially, they are pre-processed one by one and added +into the class cell structure in the configuration. +Once all the classes are processed, the computation item +execute, which was placed right after the program in the +initial configuration, is reached. In SIMPLE, the program was +initialized by calling the method main(). In KOOL, the +program is initialized by creating an object instance of class +Main. This will also implicitly call the method +Main() (the Main class constructor). The emptiness +of the env cell below is just a sanity check, to make sure +that the user has not declared anything but classes at the top level +of the program.

+
k
syntax KItem ::= "execute" + rule <k> execute => new Main(.Exps); </k> <env> .Map </env> +
+

The semantics of new (defined below) requires the +execution of all the class' declarations (and also of its +superclasses').

+

Object and method closures

+

Before we can define the semantics of method application (previously +called function application in SIMPLE), we need to add two more values +to the language, namely object and method closures:

+
k
syntax Val ::= objectClosure(Id, List) + | methodClosure(Id,Int,Ids,Stmt) +
+

An object value consists of an objectClosure-wrapped bag +containing the current class of the object and the environment stack +of the object. The current class of an object will always be one of +the classes mapped to an environment in the environment stack of the +object. A method closure encapsulates the method's parameters and +code (last two arguments), as well as the object context in which the +method code should execute. This object context includes the current +class of the object (the first argument of methodClosure) and +the object environment stack (located in the object stored at the +location specified as the second argument of methodClosure).

+

Method application

+

KOOL has a complex mechanism to invoke methods, because it allows both +dynamic method dispatch and methods as first-class-citizen values (the +latter making it a higher-order language). The invocation mechanism +will be defined later. What is sufficient to know for now is that +the two arguments of the application construct eventually reduce to +values, the first being a method closure and the latter a list of +values. The semantics of the method closure application is then as +expected: the local environment and control are stacked, then we +switch to method closure's class and object environment and execute +the method body. The mkDecls construct is the one that came +with the unchanged semantics of SIMPLE above.

+
k
syntax KItem ::= fstackFrame(Map,K,List,K) + // TODO(KORE): drop the additional production once parsing issue #1842 is fixed + | (Map,K,K) + + rule <k> methodClosure(Class,OL,Xs,S)(Vs:Vals) ~> K + => mkDecls(Xs,Vs) S return; </k> + <env> Env => .Map </env> + <store>... OL |-> objectClosure(_, EnvStack)...</store> + //<br/> // TODO(KORE): support latex annotations #1799 + <control> + <xstack> XS </xstack> + <fstack> .List => ListItem(fstackFrame(Env, K, XS, <crntObj> Obj' </crntObj>)) + ...</fstack> + <crntObj> Obj' => <crntClass> Class </crntClass> <envStack> EnvStack </envStack> </crntObj> + </control> +
+

Spawn

+

We want to extend the semantics of spawn to also share the +current object environment with the child thread, in addition to the +current environment. This extension will allow us to also use method +invocations in the spawned statements, which will be thus looked up as +expected, using dynamic method dispatch. This lookup operation would +fail if the child thread did not have access to its parent's object +environment.

+
k
rule <thread>... + <k> spawn S => !T:Int ...</k> + <env> Env </env> + <crntObj> Obj </crntObj> + ...</thread> + (.Bag => <thread>... + <k> S </k> + <env> Env </env> + <id> !T </id> + <crntObj> Obj </crntObj> + ...</thread>) +
+

Semantics of the new KOOL constructs

+

Class declaration

+

Initially, the classes forming the program are moved into their +corresponding cells:

+
k
rule <k> class Class1 extends Class2 { S } => .K ...</k> + <classes>... (.Bag => <classData> + <className> Class1 </className> + <baseClass> Class2 </baseClass> + <declarations> S </declarations> + </classData>) + ...</classes> +
+

Method declaration

+

Like in SIMPLE, method names are added to the environment and bound +to their code. However, unlike in SIMPLE where each function was +executed in the same environment, namely the program global +environment, a method in KOOL needs to be executed into its object's +environment. Thus, methods evaluate to closures, which encapsulate +their object's context (i.e., the current class and environment stack +of the object) in addition to method's parameters and body. This +approach to bind method names to method closures in the environment +will also allow objects to pass their methods to other objects, to +dynamically change their methods by assigning them other method +closures, and even to allow all these to be done from other objects. +This gives the KOOL programmer a lot of power; one should use this +power wisely, though, because programs can become easily hard to +understand and reason about if one overuses these features.

+
k
rule <k> method F:Id(Xs:Ids) S => .K ...</k> + <crntClass> Class:Id </crntClass> + <location> OL:Int </location> + <env> Env => Env[F <- L] </env> + <store>... .Map => L |-> methodClosure(Class,OL,Xs,S) ...</store> + <nextLoc> L => L +Int 1 </nextLoc> +
+

New

+

The semantics of new consists of two actions: memory +allocation for the new object and execution of the corresponding +constructor. Then the created object is returned as the result of the +new operation; the value returned by the constructor, if any, +is discarded. The current environment and object are stored onto the +stack and recovered after new (according to the semantics of +return borrowed from SIMPLE, when the statement +return this; in the rule below is reached and evaluated), +because the object creation part of new will destroy them. +The rule below also initializes the object creation process by +emptying the local environment and the current object, and allocating +a location in the store where the created object will be eventually +stored (this is what the storeObj task after the object +creation task in the rule below will do—its rule is defined +shortly). The location where the object will be stored is also made +available in the crntObj cell, so that method closures can +refer to it (see rule above).

+
k
syntax KItem ::= "envStackFrame" "(" Id "," Map ")" + + rule <k> new Class:Id(Vs:Vals) ~> K + => create(Class) ~> storeObj ~> Class(Vs); return this; </k> + <env> Env => .Map </env> + <nextLoc> L:Int => L +Int 1 </nextLoc> + //<br/> // TODO(KORE): support latex annotations #1799 + <control> <xstack> XS </xstack> + <crntObj> Obj + => <crntClass> Object </crntClass> + <envStack> ListItem(envStackFrame(Object, .Map)) </envStack> + <location> L </location> + </crntObj> + <fstack> .List => ListItem(fstackFrame(Env, K, XS, <crntObj> Obj </crntObj>)) ...</fstack> + </control> +
+

The creation of a new object (the memory allocation part only) is +a recursive process, requiring to first create an object for the +superclass. A memory object representation is a layered structure: +for each class on the path from the instance class to the root of the +hierarchy there is a layer including the memory allocated for the +members (both fields and methods) of that class.

+
k
syntax KItem ::= create(Id) + + rule <k> create(Class:Id) + => create(Class1) ~> setCrntClass(Class) ~> S ~> addEnvLayer ...</k> + <className> Class </className> + <baseClass> Class1:Id </baseClass> + <declarations> S </declarations> + + rule <k> create(Object) => .K ...</k> +
+

The next operation sets the current class of the current object. +This is necessary to be done at each layer, because the current class +of the object is enclosed as part of the method closures (see the +semantics of method declarations above).

+
k
syntax KItem ::= setCrntClass(Id) + + rule <k> setCrntClass(C) => .K ...</k> + <crntClass> _ => C </crntClass> +
+

The next operation adds a new tagged environment layer to the +current object and gets ready for the next layer by clearing the +environment (note that create expects the environment to be +empty).

+
k
syntax KItem ::= "addEnvLayer" + + rule <k> addEnvLayer => .K ...</k> + <env> Env => .Map </env> + <crntClass> Class:Id </crntClass> + <envStack> .List => ListItem(envStackFrame(Class, Env)) ...</envStack> +
+

The following operation stores the created object at the location +reserved by new. Note that the location reserved by +new was temporarily stored in the crntObj cell +precisely for this purpose. Now that the newly created object is +stored at its location and that all method closures are aware of it, +the location is unnecessary and thus we delete it from the +crntObj cell.

+
k
syntax KItem ::= "storeObj" + + rule <k> storeObj => .K ...</k> + <crntObj> <crntClass> CC </crntClass> <envStack> ES </envStack> (<location> L:Int </location> => .Bag) </crntObj> + <store>... .Map => L |-> objectClosure(CC, ES) ...</store> +
+

Self reference

+

The semantics of this is straightforward: evaluate to the +current object.

+
k
rule <k> this => objectClosure(CC, ES) ...</k> + <crntObj> <crntClass> CC </crntClass> <envStack> ES </envStack> </crntObj> +
+

Object member access

+

We can access an object member (field or method) either explicitly, +using the construct e.x, or implicitly, using only the member +name x directly. The borrowed semantics of SIMPLE will +already lookup a sole name in the local environment. The first rule +below reduces implicit member access to explicit access when the name +cannot be found in the local environment. There are two cases to +analyze for explicit object member access, depending upon whether the +object is a proper object or it is just a redirection to the parent +class via the construct super. In the first case, we +evaluate the object expression and lookup the member starting with the +current class (static scoping). Note the use of the conditional +evaluation context. In the second case, we just lookup the member +starting with the superclass of the current class. In both cases, +the lookupMember task eventually yields a lookup(L) +task for some appropriate location L, which will be further +solved with the corresponding rule borrowed from SIMPLE. Note that the +current object is not altered by super, so future method +invocations see the entire object, as needed for dynamic method dispatch.

+
k
rule <k> X:Id => this . X ...</k> <env> Env:Map </env> + requires notBool(X in keys(Env)) + + context HOLE._::Id requires (HOLE =/=K super) + +// TODO: explain how Assoc matching has been replaced with two rules here. +// Maybe also improve it a bit. + +/* rule objectClosure(<crntClass> Class:Id </crntClass> + <envStack>... envStackFrame(Class,EnvC) EStack </envStack>) + . X:Id + => lookupMember(envStackFrame(Class,EnvC) EStack, X) */ + + rule objectClosure(Class:Id, ListItem(envStackFrame(Class,Env)) EStack) + . X:Id + => lookupMember(ListItem(envStackFrame(Class,Env)) EStack, X) + rule objectClosure(Class:Id, (ListItem(envStackFrame(Class':Id,_)) => .List) _) + . _X:Id + requires Class =/=K Class' + +/* rule <k> super . X => lookupMember(EStack, X) ...</k> + <crntClass> Class </crntClass> + <envStack>... envStackFrame(Class,EnvC) EStack </envStack> */ + rule <k> super . X => lookupMember(EStack, X) ...</k> + <crntClass> Class:Id </crntClass> + <envStack> ListItem(envStackFrame(Class,_)) EStack </envStack> + rule <k> super . _X ...</k> + <crntClass> Class </crntClass> + <envStack> ListItem(envStackFrame(Class':Id,_)) => .List ...</envStack> + requires Class =/=K Class' +
+

Method invocation

+

Unlike in SIMPLE, in KOOL application was declared strict only in its +second argument. That is because we want to ensure dynamic method +dispatch when the first argument is a method access. As a +consequence, we need to consider all the cases of interest for the +first argument and to explicitly say what to do in each case. In all +cases except for method access in a proper object (i.e., not +super), we want the same behavior for the first argument as +if it was not in a method invocation position. When it is a member +access (the third rule below), we look it up starting with the +instance class of the corresponding object. This ensures dynamic +dispatch for methods; it actually dynamically dispatches field +accesses, too, which is correct in KOOL, because one can assign method +closures to fields and the field appeared in a method invocation +context. The last context declaration below says that method +applications or array accesses are also allowed as first argument to +applications; that is because methods are allowed to return methods +and arrays are allowed to hold methods in KOOL, since it is +higher-order. If that is the case, then we want to evaluate the +method call or the array access.

+
k
rule <k> (X:Id => V)(_:Exps) ...</k> + <env>... X |-> L ...</env> + <store>... L |-> V:Val ...</store> [group(lookup)] + + rule <k> (X:Id => this . X)(_:Exps) ...</k> + <env> Env </env> + requires notBool(X in keys(Env)) + + context HOLE._::Id(_) requires HOLE =/=K super + + rule (objectClosure(_, EStack) . X + => lookupMember(EStack, X:Id))(_:Exps) + +/* rule <k> (super . X + => lookupMember(EStack,X))(_:Exps)...</k> + <crntClass> Class </crntClass> + <envStack>... envStackFrame(Class,_) EStack </envStack> */ + rule <k> (super . X + => lookupMember(EStack,X))(_:Exps)...</k> + <crntClass> Class </crntClass> + <envStack> ListItem(envStackFrame(Class,_)) EStack </envStack> + rule <k> (super . _X)(_:Exps) ...</k> + <crntClass> Class </crntClass> + <envStack> ListItem(envStackFrame(Class':Id,_)) => .List ...</envStack> + requires Class =/=K Class' + + // TODO(KORE): fix getKLabel #1801 + rule (A:Exp(B:Exps))(C:Exps) => A(B) ~> #freezerFunCall(C) + rule (A:Exp[B:Exps])(C:Exps) => A[B] ~> #freezerFunCall(C) + rule V:Val ~> #freezerFunCall(C:Exps) => V(C) + syntax KItem ::= "#freezerFunCall" "(" K ")" + /* + context HOLE(_:Exps) + requires getKLabel(HOLE) ==K #klabel(`_(_)`) orBool getKLabel(HOLE) ==K #klabel(`_[_]`) + */ +
+

Eventually, each of the rules above produces a lookup(L) +task as a replacement for the method. When that happens, we just +lookup the value at location L:

+
k
rule <k> (lookup(L) => V)(_:Exps) ...</k> <store>... L |-> V:Val ...</store> + [group(lookup)] +
+

The value V looked up above is expected to be a method closure, +in which case the semantics of method application given above will +apply. Otherwise, the execution will get stuck.

+

Instance Of

+

It searches the object environment for a layer corresponding to the +desired class. It returns true iff it can find the class, +otherwise it returns false; it only gets stuck when its first +argument does not evaluate to an object.

+
k
rule objectClosure(_, ListItem(envStackFrame(C,_)) _) + instanceOf C => true + + rule objectClosure(_, (ListItem(envStackFrame(C,_)) => .List) _) + instanceOf C' requires C =/=K C' +//TODO: remove the sort cast ::Id of C above, when sort inference bug fixed + + rule objectClosure(_, .List) instanceOf _ => false +
+

Cast

+

In untyped KOOL, we prefer to not check the validity of casting. In +other words, any cast is allowed on any object, simply changing the +current class of the object to the desired class. The execution will +get stuck later if one attempts to access a field which is not +available. Moreover, the execution may complete successfully even +in the presence of invalid casts, provided that each accessed member +during the current execution is, or happens to be, available.

+
k
rule (C) objectClosure(_ , EnvStack) => objectClosure(C ,EnvStack) +
+

KOOL-specific auxiliary declarations and operations

+

Here we define all the auxiliary constructs used in the above +KOOL-specific semantics (those used in the SIMPLE fragment +have already been defined in a corresponding section above).

+

Objects as lvalues

+

The current machinery borrowed with the semantics of SIMPLE allows us +to enrich the set of lvalues, this way allowing new means to assign +values to locations. In KOOL, we want object member names to be +lvalues, so that we can assign values to them using the already +existing machinery. The first rule below ensures that the object is +always explicit, the evaluation context enforces the object to be +evaluated, and finally the second rule initiates the lookup for the +member's location based on the current class of the object.

+
k
rule <k> lvalue(X:Id => this . X) ...</k> <env> Env </env> + requires notBool(X in keys(Env)) + + context lvalue((HOLE . _)::Exp) + +/* rule lvalue(objectClosure(<crntClass> C </crntClass> + <envStack>... envStackFrame(C,EnvC) EStack </envStack>) + . X + => lookupMember(<envStack> envStackFrame(C,EnvC) EStack </envStack>, + X)) */ + rule lvalue(objectClosure(Class, ListItem(envStackFrame(Class,Env)) EStack) + . X + => lookupMember(ListItem(envStackFrame(Class,Env)) EStack, + X)) + rule lvalue(objectClosure(Class, (ListItem(envStackFrame(Class':Id,_)) => .List) _) + . _X) + requires Class =/=K Class' +
+

Lookup member

+

It searches for the given member in the given environment stack, +starting with the most concrete class and going up in the hierarchy.

+
k
// TODO(KORE): clarify sort inferences #1803 + syntax Exp ::= lookupMember(List, Id) [function] + /* + syntax KItem ::= lookupMember(EnvStackCell,Id) [function] + */ + +// rule lookupMember(<envStack> envStackFrame(_, <env>... X|->L ...</env>) ...</envStack>, X) +// => lookup(L) + rule lookupMember(ListItem(envStackFrame(_, X|->L _)) _, X) + => lookup(L) + +// rule lookupMember(<envStack> envStackFrame(_, <env> Env </env>) => .List ...</envStack>, X) +// requires notBool(X in keys(Env)) + rule lookupMember(ListItem(envStackFrame(_, Env)) Rest, X) => + lookupMember(Rest, X) + requires notBool(X in keys(Env)) +//TODO: beautify the above + +endmodule +
+

Go to Lesson 2, KOOL typed dynamic.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/tests/regression-new/pl-tutorial/2_languages/2_kool/2_typed/1_dynamic/kool-typed-dynamic/index.html b/k-distribution/tests/regression-new/pl-tutorial/2_languages/2_kool/2_typed/1_dynamic/kool-typed-dynamic/index.html new file mode 100644 index 00000000000..a6d67261233 --- /dev/null +++ b/k-distribution/tests/regression-new/pl-tutorial/2_languages/2_kool/2_typed/1_dynamic/kool-typed-dynamic/index.html @@ -0,0 +1,1388 @@ + + + + + + + + + + + + + + +KOOL — Typed — Dynamic | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

KOOL — Typed — Dynamic

+

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+

This is the K dynamic semantics of the typed KOOL language. It is +very similar to the semantics of the untyped KOOL, the difference +being that we now check the typing policy dynamically. Since we have +to now declare the types of variables and methods, we adopt a syntax +for those which is close to Java. Like in the semantics of +untyped KOOL, where we borrowed almost all the semantics of untyped +SIMPLE, we are going to also borrow much of the semantics of +dynamically typed SIMPLE here. We will highlight the differences +between the dynamically typed and the untyped KOOL as we proceed with +the semantics. In general, the type policy of the typed KOOL language +is similar to that of Java. You may find it useful to also read +the discussion in the preamble of the static semantics of typed KOOL +before proceeding.

+
k
module KOOL-TYPED-DYNAMIC-SYNTAX + imports DOMAINS-SYNTAX +
+

Syntax

+

Like for the untyped KOOL language, the syntax of typed KOOL extends +that of typed SIMPLE with object-oriented constructs. +The syntax below was produced by copying and modifying/extending the +syntax of dynamically typed SIMPLE. In fact, the only change we made +to the existing syntax of dynamically typed SIMPLE was to change the +strictness of the application construct like in untyped KOOL, from +strict to strict(2) (because application is not +strict in the first argument anymore due to dynamic method dispatch). +The KOOL-specific syntactic extensions are identical to those in +untyped KOOL.

+
k
syntax Id ::= "Object" [token] | "Main" [token] +
+

Types

+
k
syntax Type ::= "void" | "int" | "bool" | "string" + | Id // KOOL class + | Type "[" "]" + | "(" Type ")" [bracket] + > Types "->" Type + syntax Types ::= List{Type,","} +
+

Declarations

+
k
syntax Param ::= Type Id + syntax Params ::= List{Param,","} + + syntax Stmt ::= Type Exps ";" [avoid] + | Type Id "(" Params ")" Block // stays like in typed SIMPLE + | "class" Id Block // KOOL + | "class" Id "extends" Id Block // KOOL +
+

Expressions

+
k
syntax Exp ::= Int | Bool | String | Id + | "this" // KOOL + | "super" // KOOL + | "(" Exp ")" [bracket] + | "++" Exp + | Exp "instanceOf" Id [strict(1)] // KOOL + | "(" Id ")" Exp [strict(2)] // KOOL cast + | "new" Id "(" Exps ")" [strict(2)] // KOOL + | Exp "." Id // KOOL + > Exp "[" Exps "]" [strict] + > Exp "(" Exps ")" [strict(2)] // was strict in SIMPLE + | "-" Exp [strict] + | "sizeOf" "(" Exp ")" [strict] + | "read" "(" ")" + > left: + Exp "*" Exp [strict, left] + | Exp "/" Exp [strict, left] + | Exp "%" Exp [strict, left] + > left: + Exp "+" Exp [strict, left] + | Exp "-" Exp [strict, left] + > non-assoc: + Exp "<" Exp [strict, non-assoc] + | Exp "<=" Exp [strict, non-assoc] + | Exp ">" Exp [strict, non-assoc] + | Exp ">=" Exp [strict, non-assoc] + | Exp "==" Exp [strict, non-assoc] + | Exp "!=" Exp [strict, non-assoc] + > "!" Exp [strict] + > left: + Exp "&&" Exp [strict(1), left] + | Exp "||" Exp [strict(1), left] + > "spawn" Block + > Exp "=" Exp [strict(2), right] + + syntax Exps ::= List{Exp,","} [strict, overload(exps)] + syntax Val + syntax Vals ::= List{Val,","} [overload(exps)] +
+

Statements

+
k
syntax Block ::= "{" "}" + | "{" Stmt "}" + + syntax Stmt ::= Block + | Exp ";" [strict] + | "if" "(" Exp ")" Block "else" Block [avoid, strict(1)] + | "if" "(" Exp ")" Block [macro] + | "while" "(" Exp ")" Block + | "for" "(" Stmt Exp ";" Exp ")" Block [macro] + | "print" "(" Exps ")" ";" [strict] + | "return" Exp ";" [strict] + | "return" ";" + | "try" Block "catch" "(" Param ")" Block + | "throw" Exp ";" [strict] + | "join" Exp ";" [strict] + | "acquire" Exp ";" [strict] + | "release" Exp ";" [strict] + | "rendezvous" Exp ";" [strict] + + syntax Stmt ::= Stmt Stmt [right] +
+

Desugaring macros

+
k
rule if (E) S => if (E) S else {} + rule for(Start Cond; Step) {S::Stmt} => {Start while(Cond){S Step;}} + rule T::Type E1::Exp, E2::Exp, Es::Exps; => T E1; T E2, Es; [anywhere] + rule T::Type X::Id = E; => T X; X = E; [anywhere] + + rule class C:Id S => class C extends Object S // KOOL + +endmodule +
+

Semantics

+

We first discuss the new configuration, then we include the semantics of +the constructs borrowed from SIMPLE which stay unchanged, then those +whose semantics had to change, and finally the semantics of the +KOOL-specific constructs.

+
k
module KOOL-TYPED-DYNAMIC + imports KOOL-TYPED-DYNAMIC-SYNTAX + imports DOMAINS +
+

Configuration

+

The configuration of dynamically typed KOOL is almost identical to +that of its untyped variant. The only difference is the cell +return, inside the control cell, whose role is to +hold the expected return type of the invoked method. That is because +we want to dynamically check that the value that a method returns has +the expected type.

+
k
// the syntax declarations below are required because the sorts are + // referenced directly by a production and, because of the way KIL to KORE + // is implemented, the configuration syntax is not available yet + // should simply work once KIL is removed completely + // check other definitions for this hack as well + syntax EnvCell + syntax ControlCellFragment + syntax EnvStackCell + syntax CrntObjCellFragment + + configuration <T color="red"> + <threads color="orange"> + <thread multiplicity="*" type="Set" color="yellow"> + <k color="green"> ($PGM:Stmt ~> execute) </k> + //<br/> // TODO(KORE): support latex annotations #1799 + <control color="cyan"> + <fstack color="blue"> .List </fstack> + <xstack color="purple"> .List </xstack> + <returnType color="LimeGreen"> void </returnType> // KOOL + //<br/> // TODO(KORE): support latex annotations #1799 + <crntObj color="Fuchsia"> // KOOL + <crntClass> Object </crntClass> + <envStack> .List </envStack> + <location multiplicity="?"> .K </location> + </crntObj> + </control> + //<br/> // TODO(KORE): support latex annotations #1799 + <env color="violet"> .Map </env> + <holds color="black"> .Map </holds> + <id color="pink"> 0 </id> + </thread> + </threads> + //<br/> // TODO(KORE): support latex annotations #1799 + <store color="white"> .Map </store> + <busy color="cyan">.Set </busy> + <terminated color="red"> .Set </terminated> + <input color="magenta" stream="stdin"> .List </input> + <output color="brown" stream="stdout"> .List </output> + <nextLoc color="gray"> 0 </nextLoc> + //<br/> // TODO(KORE): support latex annotations #1799 + <classes color="Fuchsia"> // KOOL + <classData multiplicity="*" type="Map" color="Fuchsia"> + <className color="Fuchsia"> Main </className> + <baseClass color="Fuchsia"> Object </baseClass> + <declarations color="Fuchsia"> .K </declarations> + </classData> + </classes> + </T> +
+

Unchanged semantics from dynamically typed SIMPLE

+

The semantics below is taken over from dynamically typed SIMPLE +unchanged. Like for untyped KOOL, the semantics of function/method +declaration and invocation, and of program initialization needs to +change. Moreover, due to subtyping, the semantics of several imported +SIMPLE constructs can be made more general, such as that of the +return statement, that of the assignment, and that of the exceptions. +We removed all these from the imported semantics of SIMPLE below and +gave their modified semantics right after, together with the extended +semantics of thread spawning (which is identical to that of untyped +KOOL).

+
k
syntax Val ::= Int | Bool | String + | array(Type,Int,Int) + syntax Exp ::= Val + syntax Exps ::= Vals + syntax KResult ::= Val + syntax KResult ::= Vals + + + syntax KItem ::= undefined(Type) + + rule <k> T:Type X:Id; => .K ...</k> + <env> Env => Env[X <- L] </env> + <store>... .Map => L |-> undefined(T) ...</store> + <nextLoc> L:Int => L +Int 1 </nextLoc> + + + rule <k> T:Type X:Id[N:Int]; => .K ...</k> + <env> Env => Env[X <- L] </env> + <store>... .Map => L |-> array(T, L +Int 1, N) + (L +Int 1)...(L +Int N) |-> undefined(T) ...</store> + <nextLoc> L:Int => L +Int 1 +Int N </nextLoc> + requires N >=Int 0 + + context _:Type _::Exp[HOLE::Exps]; + + + syntax Id ::= "$1" [token] | "$2" [token] + rule T:Type X:Id[N1:Int, N2:Int, Vs:Vals]; + => T[]<Vs> X[N1]; + { + T[][]<Vs> $1=X; + for(int $2=0; $2 <= N1 - 1; ++$2) { + T X[N2,Vs]; + $1[$2] = X; + } + } + + + rule <k> X:Id => V ...</k> + <env>... X |-> L ...</env> + <store>... L |-> V:Val ...</store> [group(lookup)] + + + context ++(HOLE => lvalue(HOLE)) + rule <k> ++loc(L) => I +Int 1 ...</k> + <store>... L |-> (I:Int => I +Int 1) ...</store> [group(increment)] + + + rule I1 + I2 => I1 +Int I2 + rule Str1 + Str2 => Str1 +String Str2 + rule I1 - I2 => I1 -Int I2 + rule I1 * I2 => I1 *Int I2 + rule I1 / I2 => I1 /Int I2 requires I2 =/=K 0 + rule I1 % I2 => I1 %Int I2 requires I2 =/=K 0 + rule - I => 0 -Int I + rule I1 < I2 => I1 <Int I2 + rule I1 <= I2 => I1 <=Int I2 + rule I1 > I2 => I1 >Int I2 + rule I1 >= I2 => I1 >=Int I2 + rule V1:Val == V2:Val => V1 ==K V2 + rule V1:Val != V2:Val => V1 =/=K V2 + rule ! T => notBool(T) + rule true && E => E + rule false && _ => false + rule true || _ => true + rule false || E => E + + + rule V:Val[N1:Int, N2:Int, Vs:Vals] => V[N1][N2, Vs] + [anywhere] + + rule array(_:Type, L:Int, M:Int)[N:Int] => lookup(L +Int N) + requires N >=Int 0 andBool N <Int M [anywhere] + + rule sizeOf(array(_,_,N)) => N + + + syntax Val ::= nothing(Type) + rule <k> return; => return nothing(T); ...</k> <returnType> T </returnType> + + + rule <k> read() => I ...</k> <input> ListItem(I:Int) => .List ...</input> [group(read)] + + + context (HOLE => lvalue(HOLE)) = _ + + + rule {} => .K + rule <k> { S } => S ~> setEnv(Env) ...</k> <env> Env </env> + + + rule S1:Stmt S2:Stmt => S1 ~> S2 + + + rule _:Val; => .K + + + rule if ( true) S else _ => S + rule if (false) _ else S => S + + + rule while (E) S => if (E) {S while(E)S} + + + rule <k> print(V:Val, Es => Es); ...</k> <output>... .List => ListItem(V) </output> + requires typeOf(V) ==K int orBool typeOf(V) ==K string [group(print)] + rule print(.Vals); => .K + + + rule (<thread>... <k>.K</k> <holds>H</holds> <id>T</id> ...</thread> => .Bag) + <busy> Busy => Busy -Set keys(H) </busy> + <terminated>... .Set => SetItem(T) ...</terminated> + + rule <k> join T:Int; => .K ...</k> + <terminated>... SetItem(T) ...</terminated> + + rule <k> acquire V:Val; => .K ...</k> + <holds>... .Map => V |-> 0 ...</holds> + <busy> Busy (.Set => SetItem(V)) </busy> + requires (notBool(V in Busy:Set)) [group(acquire)] + + rule <k> acquire V; => .K ...</k> + <holds>... V:Val |-> (N:Int => N +Int 1) ...</holds> + + rule <k> release V:Val; => .K ...</k> + <holds>... V |-> (N => N:Int -Int 1) ...</holds> + requires N >Int 0 + + rule <k> release V; => .K ...</k> <holds>... V:Val |-> 0 => .Map ...</holds> + <busy>... SetItem(V) => .Set ...</busy> + + rule <k> rendezvous V:Val; => .K ...</k> + <k> rendezvous V; => .K ...</k> [group(rendezvous)] +
+

Unchanged auxiliary operations from dynamically typed SIMPLE

+
k
syntax Stmt ::= mkDecls(Params,Vals) [function] + rule mkDecls((T:Type X:Id, Ps:Params), (V:Val, Vs:Vals)) + => T X=V; mkDecls(Ps,Vs) + rule mkDecls(.Params,.Vals) => {} + + syntax Exp ::= lookup(Int) + rule <k> lookup(L) => V ...</k> <store>... L |-> V:Val ...</store> [group(lookup)] + + syntax KItem ::= setEnv(Map) + rule <k> setEnv(Env) => .K ...</k> <env> _ => Env </env> + rule (setEnv(_) => .K) ~> setEnv(_) + + syntax Exp ::= lvalue(K) + syntax Val ::= loc(Int) + rule <k> lvalue(X:Id => loc(L)) ...</k> <env>... X |-> L:Int ...</env> + + context lvalue(_::Exp[HOLE::Exps]) + context lvalue(HOLE::Exp[_::Exps]) + + rule lvalue(lookup(L:Int) => loc(L)) + + syntax Type ::= Type "<" Vals ">" [function] + rule T:Type<_,Vs:Vals> => T[]<Vs> + rule T:Type<.Vals> => T + + syntax Map ::= Int "..." Int "|->" K [function] + rule N...M |-> _ => .Map requires N >Int M + rule N...M |-> K => N |-> K (N +Int 1)...M |-> K requires N <=Int M + + syntax Type ::= typeOf(K) [function] + rule typeOf(_:Int) => int + rule typeOf(_:Bool) => bool + rule typeOf(_:String) => string + rule typeOf(array(T,_,_)) => (T[]) + rule typeOf(undefined(T)) => T + rule typeOf(nothing(T)) => T + + syntax Types ::= getTypes(Params) [function] + rule getTypes(T:Type _:Id) => T, .Types + rule getTypes(T:Type _:Id, P, Ps) => T, getTypes(P,Ps) + rule getTypes(.Params) => void, .Types +
+

Changes to the existing dynamically typed SIMPLE semantics

+

We extend/change the semantics of several SIMPLE constructs in order +to take advantage of the richer KOOL semantic infrastructure and thus +get more from the existing SIMPLE constructs.

+

Program initialization

+

Like in untyped KOOL.

+
k
syntax KItem ::= "execute" + rule <k> execute => new Main(.Exps); </k> <env> .Map </env> +
+

Method application

+

The only change to untyped KOOL's values is that method closures are +now typed (their first argument holds their type):

+
k
syntax Val ::= objectClosure(Id,List) + | methodClosure(Type,Id,Int,Params,Stmt) +
+

The type held by a method clossure will be the entire type of the +method, not only its result type like the lambda-closure of typed +SIMPLE. The reason for this change comes from the the need to +dynamically upcast values when passed to contexts where values of +superclass types are expected; since we want method closures to be +first-class-citizen values in our language, we have to be able to +dynamically upcast them, and in order to do that elegantly it is +convenient to store the entire ``current type'' of the method closure +instead of just its result type. Note that this was unnecessary in +the semantics of the dynamically typed SIMPLE language.

+

Method closure application needs to also set a new return type in +the return cell, like in dynamically typed SIMPLE, in order +for the values returned by its body to be checked against the return +type of the method. To do this correctly, we also need to stack the +current status of the return cell and then pop it when the +method returns. We have to do the same with the current object +environment, so we group them together in the stack frame.

+
k
syntax KItem ::= fstackFrame(Map, K, List, Type, K) + + rule <k> methodClosure(_->T,Class,OL,Ps,S)(Vs:Vals) ~> K + => mkDecls(Ps,Vs) S return; </k> + <env> Env => .Map </env> + <store>... OL |-> objectClosure(_, EStack)...</store> + //<br/> // TODO(KORE): support latex annotations #1799 + <control> + <fstack> .List => ListItem(fstackFrame(Env, K, XS, T', <crntObj> Obj' </crntObj>)) ...</fstack> + <xstack> XS </xstack> + <returnType> T' => T </returnType> + <crntObj> Obj' => <crntClass> Class </crntClass> <envStack> EStack </envStack> </crntObj> + </control> +
+

At method return, we have to check that the type of the returned +value is a subtype of the expected return type. Moreover, if that is +the case, then we also upcast the returned value to one of the +expected type. The computation item unsafeCast(V,T) changes +the typeof V to T without any additional checks; however, it only +does it when V is an object or a method, otherwise it returns V +unchanged.

+
k
rule <k> return V:Val; ~> _ + => subtype(typeOf(V), T) ~> true? ~> unsafeCast(V, T) ~> K + </k> + <control> + <fstack> ListItem(fstackFrame(Env, K, XS, RT, <crntObj> CO </crntObj>)) => .List ...</fstack> + <xstack> _ => XS </xstack> + <returnType> T:Type => RT </returnType> + <crntObj> _ => CO </crntObj> + </control> + <env> _ => Env </env> +
+

Assignment

+

Typed KOOL allows to assign subtype instance values to supertype +lvalues. The semantics of assignment below is similar in spirit to +dynamically typed SIMPLE's, but a check is performed that the assigned +value's type is a subtype of the location's type. If that is the +case, then the assigned value is returned as a result and stored, but +it is upcast appropriately first, so the context will continue to see +a value of the expected type of the location. Note that the type of a +location is implicit in the type of its contents and it never changes +during the execution of a program; its type is assigned when the +location is allocated and initialized, and then only type-preserving +values are allowed to be stored in each location.

+
k
rule <k> loc(L) = V:Val + => subtype(typeOf(V),typeOf(V')) ~> true? + ~> unsafeCast(V, typeOf(V')) ...</k> + <store>... L |-> (V' => unsafeCast(V, typeOf(V'))) ...</store> + [group(assignment)] +
+

Typed exceptions

+

Exceptions are propagated now until a catch that can handle them is +encountered.

+
k
syntax KItem ::= xstackFrame(Param, Stmt, K, Map, K) + syntax KItem ::= "popx" + + rule <k> (try S1 catch(P) S2 => S1 ~> popx) ~> K </k> + <control> + <xstack> .List => ListItem(xstackFrame(P, S2, K, Env, C)) ...</xstack> + C + </control> + <env> Env </env> + + rule <k> popx => .K ...</k> + <xstack> ListItem(_) => .List ...</xstack> + + rule <k> throw V:Val; ~> _ + => if (subtype(typeOf(V),T)) { T X = V; S2 } else { throw V; } ~> K + </k> + <control> + <xstack> ListItem(xstackFrame(T:Type X:Id, S2, K, Env, C)) => .List ...</xstack> + (_ => C) + </control> + <env> _ => Env </env> +
+

Spawn

+

Like in untyped KOOL.

+
k
rule <thread>... + <k> spawn S => !T:Int ...</k> + <env> Env </env> + <crntObj> Obj </crntObj> + ...</thread> + (.Bag => <thread>... + <k> S </k> + <env> Env </env> + <id> !T </id> + <crntObj> Obj </crntObj> + ...</thread>) +
+

Semantics of the new KOOL constructs

+

Class declaration

+

Like in untyped KOOL.

+
k
rule <k> class Class1 extends Class2 { S } => .K ...</k> + <classes>... (.Bag => <classData> + <className> Class1 </className> + <baseClass> Class2 </baseClass> + <declarations> S </declarations> + </classData>) + ...</classes> +
+

Method declaration

+

Methods are now typed and we need to store their types in their +closures, so that their type contract can be checked at invocation +time. The rule below is conceptually similar to that of untyped KOOL; +the only difference is the addition of the types.

+
k
rule <k> T:Type F:Id(Ps:Params) S => .K ...</k> + <crntClass> C </crntClass> + <location> OL </location> + <env> Env => Env[F <- L] </env> + <store>... .Map => L|->methodClosure(getTypes(Ps)->T,C,OL,Ps,S) ...</store> + <nextLoc> L => L +Int 1 </nextLoc> +
+

New

+

The semantics of new in dynamically typed KOOL is also +similar to that in untyped KOOL, the main difference being the +management of the return types. Indeed, when a new object is created +we also have to stack the current type in the return cell in +order to be recovered after the creation of the new object. Only the +first rule below needs to be changed; the others are identical to +those in untyped KOOL.

+
k
syntax KItem ::= envStackFrame(Id, Map) + + rule <k> new Class:Id(Vs:Vals) ~> K + => create(Class) ~> (storeObj ~> ((Class(Vs)); return this;)) </k> + <env> Env => .Map </env> + <nextLoc> L:Int => L +Int 1 </nextLoc> + //<br/> // TODO(KORE): support latex annotations #1799 + <control> + <xstack> XS </xstack> + <crntObj> Obj + => <crntClass> Object </crntClass> + <envStack> ListItem(envStackFrame(Object, .Map)) </envStack> + <location> L </location> + </crntObj> + <returnType> T => Class </returnType> + <fstack> .List => ListItem(fstackFrame(Env, K, XS, T, <crntObj>Obj</crntObj>)) ...</fstack> + </control> + + syntax KItem ::= create(Id) + + rule <k> create(Class:Id) + => create(Class1) ~> setCrntClass(Class) ~> S ~> addEnvLayer ...</k> + <className> Class </className> + <baseClass> Class1:Id </baseClass> + <declarations> S </declarations> + + rule <k> create(Object) => .K ...</k> + + syntax KItem ::= setCrntClass(Id) + + rule <k> setCrntClass(C) => .K ...</k> + <crntClass> _ => C </crntClass> + + syntax KItem ::= "addEnvLayer" + + rule <k> addEnvLayer => .K ...</k> + <env> Env => .Map </env> + <crntClass> Class:Id </crntClass> + <envStack> .List => ListItem(envStackFrame(Class, Env)) ...</envStack> + + syntax KItem ::= "storeObj" + + rule <k> storeObj => .K ...</k> + <crntObj> + <crntClass> Class </crntClass> + <envStack> EStack </envStack> + (<location> L:Int </location> => .Bag) + </crntObj> + <store>... .Map => L |-> objectClosure(Class, EStack) ...</store> +
+

Self reference

+

Like in untyped KOOL.

+
k
rule <k> this => objectClosure(Class, EStack) ...</k> + <crntObj> + <crntClass> Class </crntClass> + <envStack> EStack </envStack> + ... + </crntObj> +
+

Object member access

+

Like in untyped KOOL.

+
k
rule <k> X:Id => this . X ...</k> <env> Env:Map </env> + requires notBool(X in keys(Env)) + + context HOLE . _::Id requires (HOLE =/=K super) + +/* rule objectClosure(<crntObj> <crntClass> Class:Id </crntClass> + <envStack>... ListItem((Class,EnvC:EnvCell)) EStack </envStack> </crntObj>) + . X:Id + => lookupMember(<envStack> ListItem((Class,EnvC)) EStack </envStack>, X) */ + rule objectClosure(Class:Id, + ListItem(envStackFrame(Class,Env)) EStack) + . X:Id + => lookupMember(ListItem(envStackFrame(Class,Env)) EStack, X) + rule objectClosure(Class:Id, + (ListItem(envStackFrame(Class':Id,_)) => .List) _EStack) + . _X:Id + requires Class =/=K Class' + +/* rule <k> super . X => lookupMember(<envStack>EStack</envStack>, X) ...</k> + <crntClass> Class </crntClass> + <envStack>... ListItem((Class,EnvC:EnvCell)) EStack </envStack> */ + rule <k> super . X => lookupMember(EStack, X) ...</k> + <crntClass> Class:Id </crntClass> + <envStack> ListItem(envStackFrame(Class,_)) EStack </envStack> + rule <k> super . _X ...</k> + <crntClass> Class:Id </crntClass> + <envStack> (ListItem(envStackFrame(Class':Id,_)) => .List) _EStack </envStack> + requires Class =/=K Class' +
+

Method invocation

+

The method lookup is the same as in untyped KOOL.

+
k
rule <k> (X:Id => V)(_:Exps) ...</k> + <env>... X |-> L ...</env> + <store>... L |-> V:Val ...</store> [group(lookup)] + + rule <k> (X:Id => this . X)(_:Exps) ...</k> + <env> Env </env> + requires notBool(X in keys(Env)) + + context HOLE._::Id(_) requires HOLE =/=K super + + rule (objectClosure(_, EStack) . X + => lookupMember(EStack, X:Id))(_:Exps) + +/* rule <k> (super . X + => lookupMember(<envStack>EStack</envStack>,X))(_:Exps)...</k> + <crntClass> Class </crntClass> + <envStack>... ListItem((Class,_)) EStack </envStack> */ + rule <k> (super . X + => lookupMember(EStack,X))(_:Exps)...</k> + <crntClass> Class:Id </crntClass> + <envStack> ListItem(envStackFrame(Class,_)) EStack </envStack> + rule <k> (super . _X)(_:Exps)...</k> + <crntClass> Class:Id </crntClass> + <envStack> (ListItem(envStackFrame(Class':Id,_)) => .List) _EStack </envStack> + requires Class =/=K Class' + + // TODO(KORE): fix getKLabel #1801 + rule (A:Exp(B:Exps))(C:Exps) => A(B) ~> #freezerFunCall(C) + rule (A:Exp[B:Exps])(C:Exps) => A[B] ~> #freezerFunCall(C) + rule V:Val ~> #freezerFunCall(C:Exps) => V(C) + syntax KItem ::= "#freezerFunCall" "(" K ")" + /* + context HOLE(_:Exps) + requires getKLabel HOLE ==KLabel '_`(_`) orBool getKLabel HOLE ==KLabel '_`[_`] + */ + + rule <k> (lookup(L) => V)(_:Exps) ...</k> <store>... L |-> V:Val ...</store> + [group(lookup)] +
+

Instance of

+

Like in untyped KOOL.

+
k
rule objectClosure(_, ListItem(envStackFrame(C,_)) _) + instanceOf C => true + + rule objectClosure(_, (ListItem(envStackFrame(C::Id,_)) => .List) _) + instanceOf C' requires C =/=K C' + + rule objectClosure(_, .List) instanceOf _ => false +
+

Cast

+

Unlike in untyped KOOL, in typed KOOL we actually check that the object +can indeed be cast to the claimed type.

+
k
rule (C:Id) objectClosure(Irrelevant, EStack) + => objectClosure(Irrelevant, EStack) instanceOf C ~> true? + ~> objectClosure(C, EStack) +
+

KOOL-specific auxiliary declarations and operations

+

Objects as lvalues

+

Like in untyped KOOL.

+
k
rule <k> lvalue(X:Id => this . X) ...</k> <env> Env </env> + requires notBool(X in keys(Env)) + + context lvalue((HOLE . _)::Exp) + +/* rule lvalue(objectClosure(<crntObj> <crntClass> C </crntClass> + <envStack>... ListItem((C,EnvC:EnvCell)) EStack </envStack> </crntObj>) + . X + => lookupMember(<envStack> ListItem((C,EnvC)) EStack </envStack>, + X)) */ + rule lvalue(objectClosure(C:Id, + ListItem(envStackFrame(C,Env)) EStack) + . X + => lookupMember(ListItem(envStackFrame(C,Env)) EStack, + X)) + rule lvalue(objectClosure(C, + (ListItem(envStackFrame(C',_)) => .List) _EStack) + . _X) + requires C =/=K C' +
+

Lookup member

+

Like in untyped KOOL.

+
k
syntax Exp ::= lookupMember(List,Id) [function] + + rule lookupMember(ListItem(envStackFrame(_, X |-> L _)) _, X) => lookup(L) + + // TODO: fix rule below as shown once we support functions with deep rewrites + // rule lookupMember(<envStack> ListItem((_, <env> Env </env>)) => .List + // ...</envStack>, X) + // requires notBool(X in keys(Env)) + rule lookupMember(ListItem(envStackFrame(_, Env)) L, X) + => lookupMember(L, X) + requires notBool(X in keys(Env)) +
+

typeOf for the additional values}

+
k
rule typeOf(objectClosure(C,_)) => C + rule typeOf(methodClosure(T:Type,_,_,_Ps:Params,_)) => T +
+

Subtype checking

+

The subclass relation induces a subtyping relation.

+
k
syntax Exp ::= subtype(Types,Types) + + rule subtype(T:Type, T) => true + + rule <k> subtype(C1:Id, C:Id) => subtype(C2, C) ...</k> + <className> C1 </className> + <baseClass> C2:Id </baseClass> + requires C1 =/=K C + + rule subtype(Object,Class:Id) => false + requires Class =/=K Object + + rule subtype(Ts1->T2,Ts1'->T2') => subtype(((T2)::Type,Ts1'),((T2')::Type,Ts1)) + +// Note that the following rule would be wrong! +// rule subtype(T[],T'[]) => subtype(T,T') + + rule subtype((T:Type,Ts),(T':Type,Ts')) => subtype(T,T') && subtype(Ts,Ts') + requires Ts =/=K .Types + rule subtype(.Types,.Types) => true +
+

Unsafe Casting

+

Performs unsafe casting. One should only use it in combination with +the subtype relation above.

+
k
syntax Val ::= unsafeCast(Val,Type) [function] + + rule unsafeCast(objectClosure(_,EStack), C:Id) + => objectClosure(C,EStack) + + rule unsafeCast(methodClosure(_T',C,OL,Ps,S), T) => methodClosure(T,C,OL,Ps,S) + + rule unsafeCast(V:Val, T:Type) => V requires typeOf(V) ==K T +
+

Generic guard

+

A generic computational guard: it allows the computation to continue +only if a prefix guard evaluates to true.

+
k
syntax KItem ::= "true?" + rule true ~> true? => .K + +endmodule +
+

Go to Lesson 3, KOOL typed static.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/tests/regression-new/pl-tutorial/2_languages/2_kool/2_typed/2_static/kool-typed-static/index.html b/k-distribution/tests/regression-new/pl-tutorial/2_languages/2_kool/2_typed/2_static/kool-typed-static/index.html new file mode 100644 index 00000000000..03963b6c3f7 --- /dev/null +++ b/k-distribution/tests/regression-new/pl-tutorial/2_languages/2_kool/2_typed/2_static/kool-typed-static/index.html @@ -0,0 +1,1432 @@ + + + + + + + + + + + + + + +KOOL — Typed — Static | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

KOOL — Typed — Static

+

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+

This is the K static semantics of the typed KOOL language. +It extends the static semantics of typed SIMPLE with static semantics +for the object-oriented constructs. Also, the static semantics of +some of the existing SIMPLE constructs need to change, in order to +become more generous with regards to the set of accepted programs, +mostly due to subtyping. For example, the assignment construct +x = e required that both the variable x and the +expression e had the same type in SIMPLE. In KOOL, the type +of e can be a subtype of the type of x. +Specifically, we define the following typing policy for KOOL, +everything else not mentioned below borrowing its semantics from +SIMPLE:

+
    +
  • +

    Each class C yields a homonymous type, which can be +explicitly used in programs to type variables and methods, possibly in +combination with other types.

    +
  • +
  • +

    Since now we have user-defined types, we check that each type +used in a KOOL program is well-formed, that is, it is constructed only +from primitive and class types corresponding to declared classes.

    +
  • +
  • +

    Class members and their types form a class type +environment. Each class will have such a type environment. +Each member in a class is allowed to be declared only once. Since in +KOOL we allow methods to be assigned to fields, we make no distinction +between field and method members; in other words, we reject programs +declaring both a field and a method with the same name.

    +
  • +
  • +

    If an identifier is not found in the local type environment, it +will be searched for in the current class type environment. If not +there, then it will be searched for in its superclass' type +environment. And so on and so forth. If not found until the +Object class is reached, a typing error is reported.

    +
  • +
  • +

    The assignment allows variables to be assigned values of +more concrete types. The result type of the assignment expression +construct will be the (more abstract) type of the assigned variable, +and not the (more concrete) type of the expression, like in Java.

    +
  • +
  • +

    Exceptions are changed (from SIMPLE) to allow throwing and +catching only objects, like in Java. Also, unlike in SIMPLE, we do +not check whether the type of the thrown exception matches the type of +the caught variable, because exceptions can be caught by other +try/catch blocks, even by ones in other methods. To avoid +having to annotate each method with what exceptions it can throw, we +prefer to not check the type safety of exceptions (although this is an +excellent homework!). We only check that the try block +type-checks and that the catch block type-checks after we bind +the caught variable to its claimed type.

    +
  • +
  • +

    Class declarations are not allowed to have any cycles in their +extends relation. Such cycles would lead to non-termination of +new, as it actually does in the dynamic semantics of KOOL +where no such circularity checks are performed.

    +
  • +
  • +

    Methods overriding other methods should be in the right subtyping +relationship with the overridden methods: co-variant in the codomain +and contra-variant in the domain.

    +
  • +
+
k
module KOOL-TYPED-STATIC-SYNTAX + imports DOMAINS-SYNTAX +
+

Syntax

+

The syntax of statically typed KOOL is identical to that of +dynamically typed KOOL, they both taking as input the same programs. +What differs is the K strictness attributes. Like in statically +typed SIMPLE, almost all language constructs are strict now, since we +want each to type its arguments almost all the time. Like in the +other two KOOL definitions, we prefer to copy and then modify/extend +the syntax of statically typed SIMPLE.

+

Note: This paragraph is old, now we can do things better. We keep +it here only for historical reasons, to see how much we used to suffer :)

+

Annoying K-tool technical problem: +Currently, the K tool treats the "non-terminal" productions (i.e., +productions consisting of just one non-terminal), also called +"subsorting" production, differently from the other productions. +Specifically, it does not insert a node in the AST for them. This may +look desirable at first, but it has a big problem: it does not allow +us to treat the subsort differently in different context. For +example, since we want Id to be both a type (a class name) and a +program variable, and since we want expressions to reduce to their +types, we are in an impossible situations in which we do not know how +to treat an identifier in the semantics: as a type, i.e., a result of +computations, or as a program variable, i.e., a non-result. Ideally, +we would like to tag the identifiers at parse-time with their local +interpretation, but that, unfortunately, is not possible with the +current parsing capabilities of the K tool, because it requires to +insert additional information in the AST for the subsort productions. +This will be fixed soon. Until then, unfortunately, we have to do the +job of the parser manually. Instead of subsorting Id directly +to Type, we "wrap" it first, say with a wrapper called +class(...), exactly how the parser should have done. +The major drawback of this is that all the typed KOOL programs +in kool/typed/programs need to also be modified to always +declare class types accordingly. The modified programs can be found +in kool/typed/static/programs. So make sure you execute the +static semantics of KOOL using the modified programs. To avoid seeing +the wrapper in the generated documentation, we associate it an +"invisibility" latex attribute below.

+
k
syntax Id ::= "Object" [token] | "Main" [token] +
+

Types

+
k
syntax Type ::= "void" | "int" | "bool" | "string" + | Id [symbol("class"), avoid] // see next + | Type "[" "]" + | "(" Type ")" [bracket] + > Types "->" Type + + syntax Types ::= List{Type,","} [overload(exps)] +
+

Declarations

+
k
syntax Param ::= Type Id + syntax Params ::= List{Param,","} + + syntax Stmt ::= Type Exps ";" [avoid] + | Type Id "(" Params ")" Block + | "class" Id Block + | "class" Id "extends" Id Block +
+

Expressions

+
k
syntax FieldReference ::= Exp "." Id [strict(1)] + syntax ArrayReference ::= Exp "[" Exps "]" [strict] + + syntax Exp ::= Int | Bool | String | Id + | "this" + | "super" + | "(" Exp ")" [bracket] + | "++" Exp + | Exp "instanceOf" Id [strict(1)] + | "(" Id ")" Exp [strict(2)] + | "new" Id "(" Exps ")" [strict(2)] + > Exp "(" Exps ")" [strict] + | "-" Exp [strict] + | "sizeOf" "(" Exp ")" [strict] + | "read" "(" ")" + > left: + Exp "*" Exp [strict, left] + | Exp "/" Exp [strict, left] + | Exp "%" Exp [strict, left] + > left: + Exp "+" Exp [strict, left] + | Exp "-" Exp [strict, left] + > non-assoc: + Exp "<" Exp [strict, non-assoc] + | Exp "<=" Exp [strict, non-assoc] + | Exp ">" Exp [strict, non-assoc] + | Exp ">=" Exp [strict, non-assoc] + | Exp "==" Exp [strict, non-assoc] + | Exp "!=" Exp [strict, non-assoc] + > "!" Exp [strict] + > left: + Exp "&&" Exp [strict, left] + | Exp "||" Exp [strict, left] + > "spawn" Block // not strict: to check return and exceptions + > Exp "=" Exp [strict(2), right] + + syntax Exp ::= FieldReference | ArrayReference + syntax priority _.__KOOL-TYPED-STATIC-SYNTAX > _[_]_KOOL-TYPED-STATIC-SYNTAX > _(_)_KOOL-TYPED-STATIC-SYNTAX + + syntax Exps ::= List{Exp,","} [strict, overload(exps)] +
+

Statements

+
k
syntax Block ::= "{" "}" + | "{" Stmt "}" + + syntax Stmt ::= Block + | Exp ";" [strict] + | "if" "(" Exp ")" Block "else" Block [avoid, strict] + | "if" "(" Exp ")" Block [macro] + | "while" "(" Exp ")" Block [strict] + | "for" "(" Stmt Exp ";" Exp ")" Block [macro] + | "return" Exp ";" [strict] + | "return" ";" + | "print" "(" Exps ")" ";" [strict] + | "try" Block "catch" "(" Param ")" Block [strict(1)] + | "throw" Exp ";" [strict] + | "join" Exp ";" [strict] + | "acquire" Exp ";" [strict] + | "release" Exp ";" [strict] + | "rendezvous" Exp ";" [strict] + + syntax Stmt ::= Stmt Stmt [seqstrict, right] +
+

Desugaring macros

+
k
rule if (E) S => if (E) S else {} + rule for(Start Cond; Step) {S:Stmt} => {Start while(Cond){S Step;}} + rule T:Type E1:Exp, E2:Exp, Es:Exps; => T E1; T E2, Es; [anywhere] + rule T:Type X:Id = E; => T X; X = E; [anywhere] + + rule class C:Id S => class C extends Object S + +endmodule +
+

Static semantics

+

We first discuss the configuration, then give the static semantics +taken over unchanged from SIMPLE, then discuss the static semantics of +SIMPLE syntactic constructs that needs to change, and in the end we +discuss the static semantics and additional checks specifically +related to the KOOL proper syntax.

+
k
module KOOL-TYPED-STATIC + imports KOOL-TYPED-STATIC-SYNTAX + imports DOMAINS +
+

Configuration

+

The configuration of our type system consists of a tasks +cell with the same meaning like in statically typed SIMPLE, of an +out cell streamed to the standard output that will be used to +display typing error messages, and of a cell classes holding +data about each class in a separate class cell. The +task cells now have two additional optional subcells, namely +ctenvT and inClass. The former holds a temporary +class type environment; its contents will be transferred into the +ctenv cell of the corresponding class as soon as all the +fields and methods in the task are processed. In fact, there will be +three types of tasks in the subsequent semantics, each determined by +the subset of cells that it holds:

+
    +
  1. +

    Main task, holding only a k cell holding the +original program as a set of classes. The role of this task is to +process each class, generating a class task (see next) for each.

    +
  2. +
  3. +

    Class task, holding k, ctenvT, and +inClass subcells. The role of this task type is to process +a class' contents, generating a class type environment in the +ctenvT cell and a method task (see next) for each method in +the class. To avoid interference with object member lookup rules +below, it is important to add the class type environment to a class +atomically; this is the reason for which we use ctenvT +temporary cells within class tasks (instead of adding each member +incrementally to the class' type environment).

    +
  4. +
  5. +

    Method task, holding k, tenv and +return cells. These tasks are similar to SIMPLE's function +tasks, so we do not discuss them here any further.

    +
  6. +
+

Each class cell hods its name (in the className +cell) and the name of the class it extends (in the extends +cell), as well as its type environment (in the ctenv cell) +and the set of all its superclasses (in the extendsAll cell). +The later is useful for example for checking whether there are cycles +in the class extends relation.

+
k
configuration <T multiplicity="?" color="yellow"> + <tasks color="orange" multiplicity="?"> + <task multiplicity="*" color="yellow" type="Set"> + <k color="green"> $PGM:Stmt </k> + <tenv multiplicity="?" color="cyan"> .Map </tenv> + <ctenvT multiplicity="?" color="blue"> .Map </ctenvT> + <returnType multiplicity="?" color="black"> void </returnType> + <inClass multiplicity="?" color="Fuchsia"> .K </inClass> + </task> + </tasks> +// <br/> + <classes color="Fuchsia"> + <classData multiplicity="*" type="Map"> + <className color="Fuchsia"> Object </className> + <baseClass color="Fuchsia"> .K </baseClass> + <baseClasses color="Fuchsia"> .Set </baseClasses> + <ctenv multiplicity="?" color="blue"> .Map </ctenv> + </classData> + </classes> + </T> + <output color="brown" stream="stdout"> .List </output> +
+

Unchanged semantics from statically typed SIMPLE

+

The syntax and rules below are borrowed unchanged from statically +typed SIMPLE, so we do not discuss them much here.

+
k
syntax Exp ::= Type + syntax Exps ::= Types + syntax BlockOrStmtType ::= "block" | "stmt" + syntax Type ::= BlockOrStmtType + syntax Block ::= BlockOrStmtType + syntax KResult ::= Type + | Types // TODO: should not be needed + + + context _:Type _::Exp[HOLE::Exps]; + + rule T:Type E:Exp[int,Ts:Types]; => T[] E[Ts]; + rule T:Type E:Exp[.Types]; => T E; + + + rule <task>... <k> _:BlockOrStmtType </k> <tenv> _ </tenv> ...</task> => .Bag + + + rule _:Int => int + rule _:Bool => bool + rule _:String => string + + + rule <k> X:Id => T ...</k> <tenv>... X |-> T ...</tenv> + + + context ++(HOLE => ltype(HOLE)) + rule ++ int => int + rule int + int => int + rule string + string => string + rule int - int => int + rule int * int => int + rule int / int => int + rule int % int => int + rule - int => int + rule int < int => bool + rule int <= int => bool + rule int > int => bool + rule int >= int => bool + rule T:Type == T => bool + rule T:Type != T => bool + rule bool && bool => bool + rule bool || bool => bool + rule ! bool => bool + + + rule (T[])[int, Ts:Types] => T[Ts] + rule T:Type[.Types] => T + + rule sizeOf(_T[]) => int + + + rule read() => int + + rule print(T:Type, Ts => Ts); requires T ==K int orBool T ==K string + rule print(.Types); => stmt + + + context (HOLE => ltype(HOLE)) = _ + + + rule <k> return; => stmt ...</k> <returnType> _ </returnType> + + + rule {} => block + + rule <task> <k> {S:Stmt} => block ...</k> <tenv> Rho </tenv> R </task> + (.Bag => <task> <k> S </k> <tenv> Rho </tenv> R </task>) + + rule _:Type; => stmt + rule if (bool) block else block => stmt + rule while (bool) block => stmt + + rule join int; => stmt + rule acquire _:Type; => stmt + rule release _:Type; => stmt + rule rendezvous _:Type; => stmt + + syntax Stmt ::= BlockOrStmtType + rule _:BlockOrStmtType _:BlockOrStmtType => stmt +
+

Unchanged auxiliary operations from dynamically typed SIMPLE

+
k
syntax Stmt ::= mkDecls(Params) [function] + rule mkDecls(T:Type X:Id, Ps:Params) => T X; mkDecls(Ps) + rule mkDecls(.Params) => {} + + syntax LValue ::= Id + | FieldReference + | ArrayReference + syntax Exp ::= LValue + + syntax Exp ::= ltype(Exp) +// We would like to say: +// context ltype(HOLE:LValue) +// but we currently cannot type the HOLE + context ltype(HOLE) requires isLValue(HOLE) + +// OLD approach: +// syntax Exp ::= ltype(Exp) [function] +// rule ltype(X:Id) => X +// rule ltype(E:Exp [Es:Exps]) => E[Es] + + syntax Types ::= getTypes(Params) [function] + rule getTypes(T:Type _:Id) => T, .Types + rule getTypes(T:Type _:Id, P, Ps) => T, getTypes(P,Ps) + rule getTypes(.Params) => void, .Types +
+

Changes to the existing statically typed SIMPLE semantics

+

Below we give the new static semantics for language constructs that +come from SIMPLE, but whose SIMPLE static semantics was too +restrictive or too permissive and thus had to change.

+

Local variable declaration

+

Since we can define new types in KOOL (corresponding to classes), the +variable declaration needs to now check that the claimed types exist. +The operation checkType, defined at the end of this module, +checks whether the argument type is correct (it actually works with +lists of types as well).

+
k
rule <k> T:Type X:Id; => checkType(T) ~> stmt ...</k> + <tenv> Rho => Rho[X <- T] </tenv> +
+

Class member declaration

+

In class tasks, variable declarations mean class member declarations. +Since we reduce method declarations to variable declarations (see +below), a variable declaration in a class task can mean either a field +or a method declaration. Unlike local variable declarations, which +can shadow previous homonymous local or member declarations, member +declarations are regarded as a set, so we disallow multiple +declarations for the same member (one could improve upon this, like in +Java, by treating members with different types or number of arguments +as different, etc., but we do not do it here). We also issue an error +message if one attempts to redeclare the same class member. The +framed variable declaration in the second rule below should be read +"stuck". In fact, it is nothing but a unary operation called +stuck, which takes a K-term as argument and does nothing +with it; this stuck operation is displayed as a frame in this +PDF document because of its latex attribute (see the ASCII .k file, +at the end of this module).

+
k
rule <k> T:Type X:Id; => checkType(T) ~> stmt ...</k> + <ctenvT> Rho (.Map => X |-> T) </ctenvT> + requires notBool(X in keys(Rho)) + + rule <k> T:Type X:Id; => stuck(T X;) ...</k> + <ctenvT>... X |-> _ ...</ctenvT> + <inClass> C:Id </inClass> +// <br/> + <output>... .List => ListItem("Member \"" +String Id2String(X) + +String "\" declared twice in class \"" + +String Id2String(C) +String "\"!\n") </output> +
+

Method declaration

+

A method declaration requires two conceptual checks to be performed: +first, that the method's type is consistent with the type of the +homonymous method that it overrides, if any; and second, that its body +types correctly. At the same time, it should also be added to the +type environment of its class. The first conceptual task is performed +using the checkMethod operation defined below, and the second +by generating a corresponding method task. To add it to the class +type environment, we take advantage of the fact that KOOL is higher +order and reduce the problem to a field declaration problem, which we +have already defined. The role of the ctenvT cell in the +rule below is to structurally ensure that the method declaration takes +place in a class task (we do not want to allow methods to be declared, +for example, inside other methods).

+
k
rule <k> T:Type F:Id(Ps:Params) S + => checkMethod(F, getTypes(Ps)->T, C') + ~> getTypes(Ps)->T F; ...</k> +// <br/> + <inClass> C </inClass> + <ctenvT> _ </ctenvT> // to ensure we are in a class pass + <className> C </className> + <baseClass> C' </baseClass> +// <br/> + (.Bag => <task> + <k> mkDecls(Ps) S </k> + <inClass> C </inClass> + <tenv> .Map </tenv> + <returnType> T </returnType> + </task>) +
+

Assignment

+

A more concrete value is allowed to be assigned to a more abstract +variable. The operation checkSubtype is defined at the end +of the module and it also works with pairs of lists of types.

+
k
rule T:Type = T':Type => checkSubtype(T', T) ~> T +
+

Method invocation and return

+

Methods can be applied on values of more concrete types than their +arguments:

+
k
rule (Ts:Types -> T:Type) (Ts':Types) => checkSubtype(Ts',Ts) ~> T +
+

Similarly, we allow values of more concrete types to be returned by +methods:

+
k
rule <k> return T:Type; => checkSubtype(T,T') ~> stmt ...</k> + <returnType> T':Type </returnType> +
+

Exceptions

+

Exceptions can throw and catch values of any types. Since unlike in Java +KOOL's methods do not declare the exception types that they can throw, +we cannot test the full type safety of exceptions. Instead, we +only check that the try and the catch statements +type correctly.

+
k
rule try block catch(T:Type X:Id) S => {T X; S} + rule throw _T:Type ; => stmt +
+

Spawn

+

The spawned cell needs to also be passed the parent's class.

+
k
// explain why + + rule <k> spawn S:Block => int ...</k> + <tenv> Rho </tenv> + <inClass> C </inClass> + (.Bag => <task> + <k> S </k> + <tenv> Rho </tenv> + <inClass> C </inClass> + </task>) +
+

Semantics of the new KOOL constructs

+

Class declaration

+

We process each class in the main task, adding the corresponding data +into its class cell and also adding a class task for it. We +also perform some well-formedness checks on the class hierarchy.

+

Initiate class processing
+We create a class cell and a class task for each task. Also, we start +the class task with a check that the class it extends is declared +(this delays the task until that class is processed using another +instance of this rule).

+
k
// There seems to be some error with the configuration concretization, +// as the rule below does not work when rewriting . to both the task +// and the class cells; I had to include two separate . rewrites + +// TODO: the following fails krun; see #2117 + rule <task> <k> class C:Id extends C':Id { S:Stmt } => stmt ...</k> </task> + (.Bag => <classData>... + <className> C </className> + <baseClass> C' </baseClass> + ...</classData>) +// <br/> + (.Bag => <task> + <k> checkType(`class`(C')) ~> S </k> + <inClass> C </inClass> + <ctenvT> .Map </ctenvT> + </task>) + +// You may want to try the thing below, but that failed, too +/* +syntax Type ::= "stmtStop" + + rule <tasks>... + <task> <k> class C:Id extends C':Id { S:Stmt } => stmtStop ...</k> </task> + (.Bag => <task> + <k> checkType(`class`(C')) ~> S </k> + <inClass> C </inClass> + <ctenvT> .Map </ctenvT> + </task>) + ...</tasks> + <classes>... + .Bag => <classData>... + <className> C </className> + <baseClass> C' </baseClass> + ...</classData> + ...</classes> +// <br/> +*/ +
+

Check for unique class names

+
k
rule (<T>... + <className> C </className> + <className> C </className> + ...</T> => .Bag) + <output>... .List => ListItem("Class \"" +String Id2String(C) + +String "\" declared twice!\n") </output> +
+

Check for cycles in class hierarchy
+We check for cycles in the class hierarchy by transitively closing the +class extends relation using the extendsAll cells, and +checking that a class will never appear in its own extendsAll +cell. The first rule below initiates the transitive closure of the +superclass relation, the second transitively closes it, and the third +checks for cycles.

+
k
rule <baseClass> C </baseClass> + <baseClasses> .Set => SetItem(C) </baseClasses> [priority(25)] + + rule <classData>... + <baseClasses> SetItem(C) Cs:Set (.Set => SetItem(C')) </baseClasses> + ...</classData> + <classData>... <className>C</className> <baseClass>C'</baseClass> ...</classData> + requires notBool(C' in (SetItem(C) Cs)) [priority(25)] + + rule (<T>... + <className> C </className> + <baseClasses>... SetItem(C) ...</baseClasses> + ...</T> => .Bag) + <output>... .List => ListItem("Class \"" +String Id2String(C) + +String "\" is in a cycle!\n") </output> + [group(inheritance-cycle), priority(25)] +
+

New

+

To type new we only need to check that the class constructor +can be called with arguments of the given types, so we initiate a call +to the constructor method in the corresponding class. If that +succeeds, meaning that it types to stmt, then we discard the +stmt type and produce instead the corresponding class type of +the new object. The auxiliary discard operation is defined +also at the end of this module.

+
k
rule new C:Id(Ts:Types) => `class`(C) . C (Ts) ~> discard ~> `class`(C) +
+

Self reference

+

The typing rule for this is straightforward: reduce to the +current class type.

+
k
rule <k> this => `class`(C) ...</k> + <inClass> C:Id </inClass> +
+

Super

+

Similarly, super types to the parent class type. +Note that for typing concerns, super can be considered as an object +(recall that this was not the case in the dynamic semantics).

+
k
rule <k> super => `class`(C') ...</k> + <inClass> C:Id </inClass> + <className> C </className> + <baseClass> C':Id </baseClass> +
+

Object member access

+

There are several cases to consider here. First, if we are in a class +task, we should lookup the member into the temporary class type +environemnt in cell ctenvT. That is because we want to allow +initialized field declarations in classes, such as int x=10;. +This is desugared to a declaration of x, which is added to +ctenvT during the class task processing, followed by an +assignment of x to 10. In order for the assignment to type +check, we need to know that x has been declared with type +int; this information can only be found in the +ctenvT cell. Second, we should redirect non-local variable +lookups in method tasks to corresponding member accesses (the +local variables are handled by the rule borrowed from SIMPLE). +This is what the second rule below does. Third, we should allow +object member accesses as lvalues, which is done by the third rule +below. These last two rules therefore ensure that each necessary +object member access is explicitly allowed for evaluation. Recall +from the annotated syntax module above that the member access +operation is strict in the object. That means that the object is +expected to evaluate to a class type. The next two rules below define +the actual member lookup operation, moving the search to the +superclass when the member is not found in the current class. Note +that this works because we create the class type environments +atomically; thus, a class either has its complete type environment +available, in which case these rules can safely apply, or its cell +ctenv is not yet available, in which case these rules have to +wait. Finally, the sixth rule below reports an error when the +Object class is reached.

+
k
rule <k> X:Id => T ...</k> + <ctenvT>... X |-> T ...</ctenvT> + + rule <k> X:Id => this . X ...</k> + <tenv> Rho </tenv> + requires notBool(X in keys(Rho)) + +// OLD approach: +// rule ltype(E:Exp . X:Id) => E . X + + rule <k> `class`(C:Id) . X:Id => T ...</k> + <className> C </className> + <ctenv>... X |-> T:Type ...</ctenv> + + rule <k> `class`(C1:Id => C2) . X:Id ...</k> + <className> C1 </className> + <baseClass> C2:Id </baseClass> + <ctenv> Rho </ctenv> + requires notBool(X in keys(Rho)) + + rule <k> `class`(Object) . X:Id => stuck(`class`(Object) . X) ...</k> + <inClass> C:Id </inClass> +// <br/> + <output>... .List => ListItem("Member \"" +String Id2String(X) + +String "\" not declared! (see class \"" + +String Id2String(C) +String "\")\n") </output> +
+

Instance of and casting

+

As it is hard to check statically whether casting is always safe, +the programmer is simply trusted from a typing perspective. We only +do some basic upcasting and downcasting checks, to reject casts which +will absolutely fail. However, dynamic semantics or implementations +of the language need to insert runtime checks for downcasting to be safe.

+
k
rule `class`(_C1:Id) instanceOf _C2:Id => bool + rule (C:Id) `class`(C) => `class`(C) + rule <k> (C2:Id) `class`(C1:Id) => `class`(C2) ...</k> + <className> C1 </className> + <baseClasses>...SetItem(C2)...</baseClasses> // upcast + rule <k> (C2:Id) `class`(C1:Id) => `class`(C2) ...</k> + <className> C2 </className> + <baseClasses>...SetItem(C1)...</baseClasses> // downcast + rule <k> (C2) `class`(C1:Id) => stuck((C2) `class`(C1)) ...</k> + <classData>... + <className> C1 </className> + <baseClasses> S1 </baseClasses> + ...</classData> + <classData>... + <className> C2 </className> + <baseClasses> S2 </baseClasses> + ...</classData> + <output>... .List => ListItem("Classes \"" +String Id2String(C1) + +String "\" and \"" +String Id2String(C2) + +String "\" are incompatible!\n") </output> + requires notBool(C1 in S2) andBool notBool(C2 in S1) +
+

Cleanup tasks

+

Finally, we need to clean up the terminated tasks. Each of the three +types of tasks is handled differently. The main task is replaced by a +method task holding new main();, which will ensure that a +main class with a main() method actually exists +(first rule below). A class task moves its temporary class type +environment into its class' cell, and then it dissolves itself (second +rule). A method task simply dissolves when terminated (third rule); +the presence of the tenv cell in that rule ensures that that +task is a method task. +Finally, when all the tasks are cleaned up, we can also remove the +tasks cell, issuing a corresponding message. Note that +checking for cycles or duplicate methods can still be performed after +the tasks cell has been removed.

+
k
// discard main task when done, issuing a "new main();" command to +// make sure that the class main and the method main() are declared. + + rule <task> <k> stmt => new Main(.Exps); </k> + (.Bag => <tenv> .Map </tenv> + <returnType> void </returnType> + <inClass> Main </inClass>) + </task> + +// discard class task when done, adding a ctenv in class + + rule (<task> + <k> stmt </k> + <ctenvT> Rho </ctenvT> + <inClass> C:Id </inClass> + </task> => .Bag) + <className> C </className> + (.Bag => <ctenv> Rho </ctenv>) + +// discard method task when done + + rule <task>... + <k> stmt </k> + <tenv> _ </tenv> // only to ensure that this is a method task + ...</task> => .Bag + +// cleanup tasks and output a success message when done + + rule (<T>... <tasks> .Bag </tasks> ...</T> => .Bag) + <output>... .List => ListItem("Type checked!\n") </output> +
+

KOOL-specific auxiliary declarations and operations

+

Subtype checking

+

The subclass relation introduces a subtyping relation.

+
k
syntax KItem ::= checkSubtype(Types,Types) + + rule checkSubtype(T:Type, T) => .K + + rule <k> checkSubtype(`class`(C:Id), `class`(C':Id)) => .K ...</k> + <className> C </className> + <baseClasses>... SetItem(C') ...</baseClasses> + + rule checkSubtype(Ts1->T2,Ts1'->T2') + => checkSubtype(((T2)::Type,Ts1'),((T2')::Type,Ts1)) + +// note that the following rule would be wrong! +// rule checkSubtype(T[],T'[]) => checkSubtype(T,T') + + rule checkSubtype((T:Type,Ts),(T':Type,Ts')) + => checkSubtype(T,T') ~> checkSubtype(Ts,Ts') + requires Ts =/=K .Types + + rule checkSubtype(.Types,.Types) => .K + rule checkSubtype(.Types,void) => .K +
+

Checking well-formedness of types

+

Since now any Id can be used as the type of a class, we need to +check that the types used in the program actually exists

+
k
syntax KItem ::= checkType(Types) + + rule checkType(T:Type,Ts:Types) => checkType(T) ~> checkType(Ts) + requires Ts =/=K .Types + rule checkType(.Types) => .K + rule checkType(int) => .K + rule checkType(bool) => .K + rule checkType(string) => .K + rule checkType(void) => .K + rule <k> checkType(`class`(C:Id)) => .K ...</k> <className> C </className> + rule checkType(`class`(Object)) => .K + rule checkType(Ts:Types -> T:Type) => checkType(T,Ts) + rule checkType(T:Type[]) => checkType(T) +
+

Checking correct overiding of methods

+

The checkMethod operation below searches to see whether +the current method overrides some other method in some superclass. +If yes, then it issues an additional check that the new method's type +is more concrete than the overridden method's. The types T and T' +below can only be function types. See the definition of +checkSubtype on function types at the end of this module (it +is co-variant in the codomain and contra-variant in the domain).

+
k
syntax KItem ::= checkMethod(Id,Type,Id) + + rule <k> checkMethod(F:Id, T:Type, C:Id) => checkSubtype(T, T') ...</k> + <className> C </className> + <ctenv>... F |-> T':Type ...</ctenv> + + rule <k> checkMethod(F:Id, _T:Type, (C:Id => C')) ...</k> + <className> C </className> + <baseClass> C':Id </baseClass> + <ctenv> Rho </ctenv> + requires notBool(F in keys(Rho)) + + rule checkMethod(_:Id,_,Object) => .K +
+

Generic operations which could be part of the K framework

+
k
syntax KItem ::= stuck(K) + + syntax KItem ::= "discard" + rule _:KResult ~> discard => .K + +endmodule +
+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/k-distribution/tests/regression-new/pl-tutorial/2_languages/3_fun/1_untyped/1_environment/fun-untyped/index.html b/k-distribution/tests/regression-new/pl-tutorial/2_languages/3_fun/1_untyped/1_environment/fun-untyped/index.html new file mode 100644 index 00000000000..7aa47c62735 --- /dev/null +++ b/k-distribution/tests/regression-new/pl-tutorial/2_languages/3_fun/1_untyped/1_environment/fun-untyped/index.html @@ -0,0 +1,1234 @@ + + + + + + + + + + + + + + +FUN — Untyped — Environment | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

FUN — Untyped — Environment

+

Author: Grigore Roșu (grosu@illinois.edu)
+Organization: University of Illinois at Urbana-Champaign

+

Author: Traian Florin Șerbănuță (traian.serbanuta@unibuc.ro)
+Organization: University of Bucharest

+

Abstract

+

This is the K semantic definition of the untyped FUN language. +FUN is a pedagogical and research language that captures the essence +of the functional programming paradigm, extended with several features +often encountered in functional programming languages. +Like many functional languages, FUN is an expression language, that +is, everything, including the main program, is an expression. +Functions can be declared anywhere and are first class values in the +language. +FUN is call-by-value here, but it has been extended (as student +homework assignments) with other parameter-passing styles. +To make it more interesting and to highlight some of K's strengths, +FUN includes the following features:

+
    +
  • +

    The basic builtin data-types of integers, booleans and strings.

    +
  • +
  • +

    Builtin lists, which can hold any elements, including other lists. +Lists are enclosed in square brackets and their elements are +comma-separated; e.g., [1,2,3].

    +
  • +
  • +

    User-defined data-types, by means of constructor terms. +Constructor names start with a capital letter (while any other +identifier in the language starts with a lowercase letter), and they +can be followed by an arbitrary number of comma-separated arguments +enclosed in parentheses; parentheses are not needed when the +constructor takes no arguments. +For example, Pair(5,7) is a constructor term holding two +numbers, Cons(1,Cons(2,Cons(3,Nil))) is a list-like +constructor term holding 3 elements, and +Tree(Tree(Leaf(1), Leaf(2)), Leaf(3)) is a tree-like +constructor term holding 3 elements. +In the untyped version of the FUN language, no type checking or +inference is performed to ensure that the data constructors are used +correctly. +The execution will simply get stuck when they are misused. +Moreover, since no type checking is performed, the data-types are not +even declared in the untyped version of FUN.

    +
  • +
  • +

    Functions and let/letrec binders can take +multiple space-separated arguments, but these are desugared to +ones that only take one argument, by currying. For example, the +expressions

    +
    fun x y -> x y
    +let x y = y in x
    +
    +

    are desugared, respectively, into the following expressions:

    +
    fun x -> fun y -> x y
    +let x = fun y -> y in x
    +
    +
  • +
  • +

    Functions can be defined using pattern matching over the +available data-types. For example, the program

    +
    letrec max = fun [h] -> h
    +             |   [h|t] -> let x = max t
    +                          in  if h > x then h else x
    +in max [1, 3, 5, 2, 4, 0, -1, -5]
    +
    +

    defines a function max that calculates the maximum element of +a non-empty list, and the function

    +
    letrec ack = fun Pair(0,n) -> n + 1
    +             |   Pair(m,0) -> ack Pair(m - 1, 1)
    +             |   Pair(m,n) -> ack Pair(m - 1, ack Pair(m, n - 1))
    +in ack Pair(2,3)
    +
    +

    calculates the Ackermann function applied to a particular pair of numbers. +Patterns can be nested. Patterns can currently only be used in function +definitions, and not directly in let/letrec binders. +For example, this is not allowed:

    +
    letrec Pai(x,y) = Pair(1,2) in x+y
    +
    +

    But this is allowed:

    +
    let f Pair(x,y) = x+y in f Pair(1,2)
    +
    +

    because it is first reduced to

    +
    let f = fun Pair(x,y) -> x+y in f Pair(1,2)
    +
    +

    by uncurrying of the let binder, and pattern matching is +allowed in function arguments.

    +
  • +
  • +

    We include a callcc construct, for two reasons: first, +several functional languages support this construct; second, some +semantic frameworks have difficulties defining it. Not K.

    +
  • +
  • +

    Finally, we include mutables by means of referencing an +expression, getting the reference of a variable, dereferencing and +assignment. We include these for the same reasons as above: there are +languages which have them, and they are not easy to define in some +semantic frameworks.

    +
  • +
+

Like in many other languages, some of FUN's constructs can be +desugared into a smaller set of basic constructs. We do that as usual, +using macros, and then we only give semantics to the core constructs.

+

Note:
+We recommend the reader to first consult the dynamic semantics of the +LAMBDA++ language in the first part of the K Tutorial. +To keep the comments below small and focused, we will not re-explain +functional or K features that have already been explained in there.

+

Syntax

+
k
//require "modules/pattern-matching.k" + +module FUN-UNTYPED-COMMON + imports DOMAINS-SYNTAX +
+

FUN is an expression language. The constructs below fall into +several categories: names, arithmetic constructs, conventional +functional constructs, patterns and pattern matching, data constructs, +lists, references, and call-with-current-continuation (callcc). +The arithmetic constructs are standard; they are present in almost all +our K language definitions. The meaning of FUN's constructs are +discussed in more depth when we define their semantics in the next +module.

+

The Syntactic Constructs

+

We start with the syntactic definition of FUN names. +We have several categories of names: ones to be used for functions and +variables, others to be used for data constructors, others for types and +others for type variables. We will introduce them as needed, starting +with the former category. We prefer the names of variables and functions +to start with lower case letters. We take the freedom to tacitly introduce +syntactic lists/sequences for each nonterminal for which we need them:

+
k
syntax Name [token] + syntax Names ::= List{Name,","} [overload(exps)] +
+

Expression constructs will be defined throughtout the syntax module. +Below are the very basic ones, namely the builtins, the names, and the +parentheses used as brackets for grouping. Lists of expressions are +declared strict, so all expressions in the list get evaluated whenever +the list is on a position which can be evaluated:

+
k
syntax Exp ::= Int | Bool | String | Name + | "(" Exp ")" [bracket] + syntax Exps ::= List{Exp,","} [strict, overload(exps)] + syntax Val + syntax Exp ::= Val + syntax Exps ::= Vals + syntax Vals ::= List{Val,","} [overload(exps)] + syntax Bottom + syntax Bottoms ::= List{Bottom,","} [overload(exps)] +
+

We next define the syntax of arithmetic constructs, together with +their relative priorities and left-/non-associativities. We also +tag all these rules as members of a new group, "arith", so we can more easily +define global syntax priorities later (at the end of the syntax module).

+
k
syntax Exp ::= left: + Exp "*" Exp [strict, group(arith)] + | Exp "/" Exp [strict, group(arith)] + | Exp "%" Exp [strict, group(arith)] + > left: + Exp "+" Exp [strict, left, group(arith)] + | Exp "^" Exp [strict, left, group(arith)] +// left attribute should not be necessary; currently a parsing bug + | Exp "-" Exp [strict, prefer, group(arith)] +// the "prefer" attribute above is to not parse x-1 as x(-1) +// Due to some parsing problems, we currently cannot add unary minus: + | "-" Exp [strict, group(arith)] + > non-assoc: + Exp "<" Exp [strict, group(arith)] + | Exp "<=" Exp [strict, group(arith)] + | Exp ">" Exp [strict, group(arith)] + | Exp ">=" Exp [strict, group(arith)] + | Exp "==" Exp [strict, group(arith)] + | Exp "!=" Exp [strict, group(arith)] + > "!" Exp [strict, group(arith)] + > Exp "&&" Exp [strict(1), left, group(arith)] + > Exp "||" Exp [strict(1), left, group(arith)] +
+

The conditional construct has the expected evaluation strategy, +stating that only the first argument is evaluate:

+
k
syntax Exp ::= "if" Exp "then" Exp "else" Exp [strict(1)] +
+

FUN's builtin lists are formed by enclosing comma-separated +sequences of expressions (i.e., terms of sort Exps) in square +brackets. The list constructor cons adds a new element to the +top of the list, head and tail get the first element +and the tail sublist of a list if they exist, respectively, and get +stuck otherwise, and null?? tests whether a list is empty or +not; syntactically, these are just expression constants. +In function patterns, we are also going to allow patterns following the +usual head/tail notation; for example, the pattern [x_1,...,x_n|t] +binds x_1, ..., x_n to the first elements of the matched list, +and t to the list formed with the remaining elements. We define list +patterns as ordinary expression constructs, although we will make sure that +we do not give them semantics if they appear in any other place then in a +function case pattern.

+
k
syntax Exp ::= "[" Exps "]" [strict, klabel(list)] + | "head" [macro] | "tail" [macro] | "null?" [macro] + | "[" Exps "|" Exp "]" + syntax Val ::= "[" Vals "]" [klabel(list)] + syntax Cons ::= "cons" + syntax Val ::= Cons + syntax Val ::= Cons Val [klabel(apply)] +
+

Data constructors start with capital letters and they may or may +not have arguments. We need to use the attribute "prefer" to make +sure that, e.g., Cons(a) parses as constructor Cons with +argument a, and not as the expression Cons (because +constructor names are also expressions) regarded as a function applied +to the expression a. Also, note that the constructor is strict +in its second argument, because we want to evaluate its arguments but +not the constuctor name itsef.

+
k
syntax ConstructorName [token] + syntax Exp ::= ConstructorName + | ConstructorName "(" Exps ")" [prefer, strict(2), klabel(constructor)] + syntax Val ::= ConstructorName "(" Vals ")" [klabel(constructor)] +
+

A function is essentially a |-separated ordered +sequence of cases, each case of the form pattern -> expression, +preceded by the language construct fun. Patterns will be defined +shortly, both for the builtin lists and for user-defined constructors. +Recall that the syntax we define in K is not meant to serve as a +ultimate parser for the defined language, but rather as a convenient +notation for K abstract syntax trees, which we prefer when we write +the semantic rules. It is therefore often the case that we define a +more ``generous'' syntax than we want to allow programs to use. +We do it here, too. Specifically, the syntax of Cases +below allows any expressions to appear as pattern. This syntactic +relaxation permits many wrong programs to be parsed, but that is not a +problem because we are not going to give semantics to wrong combinations, +so those programs will get stuck; moreover, our type inferencer will reject +those programs anyway. Function application is just concatenation of +expressions, without worrying about type correctness. Again, the type +system will reject type-incorrect programs.

+
k
syntax Exp ::= "fun" Cases + | Exp Exp [strict, left, klabel(apply)] +// NOTE: We would like eventually to also have Exp "(" Exps ") + syntax Case ::= Exp "->" Exp + syntax Cases ::= List{Case, "|"} +
+

The let and letrec binders have the usual syntax +and functional meaning. We allow multiple and-separated bindings. +Like for the function cases above, we allow a more generous syntax for +the left-hand sides of bindings, noting that the semantics will get stuck +on incorrect bindings and that the type system will reject those programs.

+
k
syntax Exp ::= "let" Bindings "in" Exp + | "letrec" Bindings "in" Exp [prefer] +// The "prefer" attribute for letrec currently needed due to tool bug, +// to make sure that "letrec" is not parsed as "let rec". + syntax Binding ::= Exp "=" Exp + syntax Bindings ::= List{Binding,"and"} +
+

References are first class values in FUN. The construct ref +takes an expression, evaluates it, and then it stores the resulting value +at a fresh location in the store and returns that reference. Syntactically, +ref is just an expression constant. The construct & +takes a name as argument and evaluates to a reference, namely the store +reference where the variable passed as argument stores its value; this +construct is a bit controversial and is further discussed in the +environment-based semantics of the FUN language, where we desugar +ref to it. The construct @ takes a reference +and evaluates to the value stored there. The construct := takes +two expressions, the first expected to evaluate to a reference; the value +of its second argument will be stored at the location to which the first +points (the old value is thus lost). Finally, since expression evaluation +now has side effects, it makes sense to also add a sequential composition +construct, which is sequentially strict. This evaluates to the value of +its second argument; the value of the first argument is lost (which has +therefore been evaluated only for its side effects.

+
k
syntax Exp ::= "ref" [macro] + | "&" Name + | "@" Exp [strict] + | Exp ":=" Exp [strict] + | Exp ";" Exp [strict(1), right] +
+

Call-with-current-continuation, named callcc in FUN, is a +powerful control operator that originated in the Scheme programming +language, but it now exists in many other functional languages. It works +by evaluating its argument, expected to evaluate to a function, and by +passing the current continuation, or evaluation context (or computation, +in K terminology), as a special value to it. When/If this special value +is invoked, the current context is discarded and replaced with the one +held by the special value and the computation continues from there. +It is like taking a snapshot of the execution context at some moment +in time and then, when desired, being able to get back in time to that +point. If you like games, it is like saving the game now (so you can +work on your homework!) and then continuing the game tomorrow or whenever +you wish. To issustrate the strength of callcc, we also +allow exceptions in FUN by means of a conventional try-catch +construct, which will desugar to callcc. We also need to +introduce the special expression contant throw, but we need to +use it as a function argument name in the desugaring macro, so we define +it as a name instead of as an expression constant:

+
k
syntax Exp ::= "try" Exp "catch" "(" Name ")" Exp [macro] + syntax Val ::= "callcc" + syntax Name ::= "throw" [token] +
+

Finally, FUN also allows polymorphic datatype declarations. These +will be useful when we define the type system later on.

+
k
syntax Exp ::= "datatype" Type "=" TypeCases Exp [macro] +// NOTE: In a future version of K, we want the datatype declaration +// to be a construct by itself, but that is not possible currently +// because K's parser wronly identifies the __ operation allowing +// a declaration to appear in front of an expression with the function +// application construct, giving ambiguous parsing errors. +
+

We next need to define the syntax of types and type cases that appear +in datatype declarations.

+

Like in many functional languages, type parameters/variables in +user-defined types are quoted identifiers.

+
k
syntax TypeVar [token] + syntax TypeVars ::= List{TypeVar,","} [overload(types)] +
+

Types can be basic types, function types, or user-defined +parametric types. In the dynamic semantics we are going to simply ignore +all the type declations, so here the syntax of types below is only useful +for generating the desired parser. To avoid syntactic ambiguities with +the arrow construct for function cases, we use the symbol --> as +a constructor for function types:

+
k
syntax TypeName [token] + syntax Type ::= "int" | "bool" | "string" + | Type "-->" Type [right] + | "(" Type ")" [bracket] + | TypeVar + | TypeName [symbol(TypeName), avoid] + | Type TypeName [symbol(Type-TypeName), macro] + | "(" Types ")" TypeName [prefer] + syntax Types ::= List{Type,","} [overload(types)] + syntax Types ::= TypeVars + + syntax TypeCase ::= ConstructorName + | ConstructorName "(" Types ")" + syntax TypeCases ::= List{TypeCase,"|"} [symbol(_|TypeCase_)] +
+

Additional Priorities

+
k
syntax priority @__FUN-UNTYPED-COMMON + > apply + > arith + > _:=__FUN-UNTYPED-COMMON + > let_in__FUN-UNTYPED-COMMON + letrec_in__FUN-UNTYPED-COMMON + if_then_else__FUN-UNTYPED-COMMON + > _;__FUN-UNTYPED-COMMON + > fun__FUN-UNTYPED-COMMON + > datatype_=___FUN-UNTYPED-COMMON +endmodule + +module FUN-UNTYPED-MACROS + imports FUN-UNTYPED-COMMON +
+

Desugaring macros

+

We desugar the list non-constructor operations to functions matching +over list patterns. In order to do that we need some new variables; for +those, we follow the same convention like in the K tutorial, where we +added them as new identifier constructs starting with the character $, +so we can easily recognize them when we debug or trace the semantics.

+
k
syntax Name ::= "$h" [token] | "$t" [token] + rule head => fun [$h|$t] -> $h + rule tail => fun [$h|$t] -> $t + rule null? => fun [.Exps] -> true | [$h|$t] -> false +
+

Multiple-head list patterns desugar into successive one-head patterns:

+
k
rule [E1,E2,Es:Exps|T] => [E1|[E2,Es|T]] [anywhere] +
+

Uncurrying of multiple arguments in functions and binders:

+
k
rule P1 P2 -> E => P1 -> fun P2 -> E [anywhere] + rule F P = E => F = fun P -> E [anywhere] +
+

We desugar the try-catch construct into callcc:

+
k
syntax Name ::= "$k" [token] | "$v" [token] + rule try E catch(X) E' + => callcc (fun $k -> (fun throw -> E)(fun X -> $k E')) +
+

For uniformity, we reduce all types to their general form:

+
k
rule `Type-TypeName`(T:Type, Tn:TypeName) => (T) Tn +
+

The dynamic semantics ignores all the type declarations:

+
k
rule datatype _T = _TCs E => E + +endmodule + + +module FUN-UNTYPED-SYNTAX + imports FUN-UNTYPED-COMMON + imports BUILTIN-ID-TOKENS + + syntax Name ::= r"[a-z][_a-zA-Z0-9]*" [token, prec(2)] + | #LowerId [token] + syntax ConstructorName ::= #UpperId [token] + syntax TypeVar ::= r"['][a-z][_a-zA-Z0-9]*" [token] + syntax TypeName ::= Name [token] +endmodule +
+

Semantics

+

The semantics below is environment-based. A substitution-based +definition of FUN is also available, but that drops the & +construct as explained above.

+
k
module FUN-UNTYPED + imports FUN-UNTYPED-COMMON + imports FUN-UNTYPED-MACROS + imports DOMAINS + //imports PATTERN-MATCHING +
+

Configuration

+

The k, env, and store cells are standard +(see, for example, the definition of LAMBDA++ or IMP++ in the first +part of the K tutorial).

+
k
configuration <T color="yellow"> + <k color="green"> $PGM:Exp </k> + <env color="violet"> .Map </env> + <store color="white"> .Map </store> + </T> +
+

Values and results

+

We only define integers, Booleans and strings as values here, but will +add more values later.

+
k
syntax Val ::= Int | Bool | String + syntax Val ::= Bottom + syntax Vals ::= Bottoms + syntax KResult ::= Val +
+

Lookup

+
k
rule <k> X:Name => V ...</k> + <env>... X |-> L ...</env> + <store>... L |-> V ...</store> +
+

Arithmetic expressions

+
k
rule I1 * I2 => I1 *Int I2 + rule I1 / I2 => I1 /Int I2 requires I2 =/=K 0 + rule I1 % I2 => I1 %Int I2 requires I2 =/=K 0 + rule I1 + I2 => I1 +Int I2 + rule S1 ^ S2 => S1 +String S2 + rule I1 - I2 => I1 -Int I2 + rule - I => 0 -Int I + rule I1 < I2 => I1 <Int I2 + rule I1 <= I2 => I1 <=Int I2 + rule I1 > I2 => I1 >Int I2 + rule I1 >= I2 => I1 >=Int I2 + rule V1:Val == V2:Val => V1 ==K V2 + rule V1:Val != V2:Val => V1 =/=K V2 + rule ! T => notBool(T) + rule true && E => E + rule false && _ => false + rule true || _ => true + rule false || E => E +
+

Conditional

+
k
rule if true then E else _ => E + rule if false then _ else E => E +
+

Lists

+

We have already declared the syntactic list of expressions strict, so +we can assume that all the elements that appear in a FUN list are +evaluated. The only thing left to do is to state that a list of +values is a value itself, that is, that the list square-bracket +construct is indeed a constructor, and to give the semantics of +cons. Since cons is a builtin function and is +expected to take two arguments, we have to also state that +cons itself is a value (specifically, a function/closure +value, but we do not need that level of detail here), and also that +cons applied to a value is a value (specifically, it would be +a function/closure value that expects the second, list argument):

+
k
rule cons V:Val [Vs:Vals] => [V,Vs] +
+

Data Constructors

+

Constructors take values as arguments and produce other values:

+
k
syntax Val ::= ConstructorName +
+

Functions and Closures

+

Like in the environment-based semantics of LAMBDA++ in the first part +of the K tutorial, functions evaluate to closures. A closure includes +the current environment besides the function contents; the environment +will be used at execution time to lookup all the variables that appear +free in the function body (we want static scoping in FUN).

+
k
syntax Val ::= closure(Map,Cases) + rule <k> fun Cases => closure(Rho,Cases) ...</k> <env> Rho </env> +
+

Note: The reader may want to get familiar with +how the pre-defined pattern matching works before proceeding. +The best way to do that is to consult +k/include/modules/pattern-matching.k.

+ +

We distinguish two cases when the closure is applied. +If the first pattern matches, then we pick the first case: switch to +the closed environment, get the matching map and bind all its +variables, and finally evaluate the function body of the first case, +making sure that the environment is properly recovered afterwards. +If the first pattern does not match, then we drop it and thus move on +to the next one.

+
k
rule (.K => getMatching(P, V)) ~> closure(_, P->_ | _) V:Val + rule <k> matchResult(M:Map) ~> closure(Rho, _->E | _) _ + => bindMap(M) ~> E ~> setEnv(Rho') ...</k> + <env> Rho' => Rho </env> + rule (matchFailure => .K) ~> closure(_, (_->_ | Cs:Cases => Cs)) _ +// rule <k> closure(Rho, P->E | _) V:Val +// => bindMap(getMatching(P,V)) ~> E ~> setEnv(Rho') ...</k> +// <env> Rho' => Rho </env> requires isMatching(P,V) +// rule closure(_, (P->_ | Cs:Cases => Cs)) V:Val requires notBool isMatching(P,V) +
+

Let and Letrec

+

To highlight the similarities and differences between let and +letrec, we prefer to give them direct semantics instead of +to desugar them like in LAMBDA. See the formal definitions of +bindTo, bind, and assignTo at the end of +this module. Informally, bindTo(Xs, Es) first +evaluates the expressions Es in Exps in the current +environment (i.e., it is strict in its second argument), then it binds +the variables in Xs in Names to new locations and adds +those bindings to the environment, and finally writes the values +previously obtained after evaluating the expressions Es to those +new locations; bind(Xs) does only the bindings of +Xs to new locations and adds those bindings to the environment; +and assignTo(Xs,Es) evaluates the expressions +Es in the current environment and then it writes the resulting +values to the locations to which the variables Xs are already +bound to in the environment.

+

Therefore, let Xs = Es in E first +evaluates Es in the current environment, then adds new +bindings for Xs to fresh locations in the environment, then +writes the values of Es to those locations, and finally +evaluates E in the new environment, making sure that the +environment is properly recovered after the evaluation of E. +On the other hand, letrec does the same things but in a +different order: it first adds new bindings for Xs to fresh +locations in the environment, then it evaluates Es in the new +environment, then it writes the resulting values to their +corresponding locations, and finally it evaluates E and +recovers the environment. The crucial difference is that the +expressions Es now see the locations of the variables Xs +in the environment, so if they are functions, which is typically the +case with letrec, their closures will encapsulate in their +environments the bindings of all the bound variables, including +themselves (thus, we may have a closure value stored at location +L, whose environment contains a binding of the form +F ↦ L; this way, the closure can invoke +itself).

+
k
rule <k> let Bs in E + => bindTo(names(Bs),exps(Bs)) ~> E ~> setEnv(Rho) ...</k> + <env> Rho </env> + + rule <k> letrec Bs in E + => bind(names(Bs))~>assignTo(names(Bs),exps(Bs))~>E~>setEnv(Rho)...</k> + <env> Rho </env> +
+

Recall that our syntax allows let and letrec to +take any expression in place of its binding. This allows us to use +the already existing function application construct to bind names to +functions, such as, e.g., let x y = y in .... +The desugaring macro in the syntax module uncurries such declarations, +and then the semantic rules above only work when the remaining +bindings are identifiers, so the semantics will get stuck on programs +that misuse the let and letrec binders.

+

References

+

The semantics of references is self-explanatory, except maybe for the +desugaring rule of ref, which is further discussed. Note +that &X grabs the location of X from the environment. +Sequential composition, which is needed only to accumulate the +side effects due to assignments, was strict in the first argument. +Once evaluated, its first argument is simply discarded:

+
k
syntax Name ::= "$x" [token] + rule ref => fun $x -> & $x + rule <k> & X => L ...</k> <env>... X |-> L ...</env> + rule <k> @ L:Int => V:Val ...</k> <store>... L |-> V ...</store> + rule <k> L:Int := V:Val => V ...</k> <store>... L |-> (_=>V) ...</store> + rule _V:Val; E => E +
+

The desugaring rule of ref (first rule above) works +because & takes a variable and returns its location (like in C). +Note that some ``pure'' functional programming researchers strongly dislike +the & construct, but favor ref. We refrain from having +a personal opinion on this issue here, but support & in the +environment-based definition of FUN because it is, technically speaking, +more powerful than ref. From a language design perspective, it +would be equally easy to drop & and instead give a direct +semantics to ref. In fact, this is precisely what we do in the +substitution-based definition of FUN, because there appears to be no way +to give a substitution-based definition to the & construct.

+

Callcc

+

As we know it from the LAMBDA++ tutorial, call-with-current-continuation +is quite easy to define in K. We first need to define a special +value wrapping an execution context, that is, an environment saying +where the variables should be looked up, and a computation structure +saying what is left to execute (in a substitution-based definition, +this special value would be even simpler, as it would only need to +wrap the computation structure---see, for example, the +substitution-based semantics of LAMBDA++ in the the first part of the +K tutorial, or the substitution-based definition of FUN). Then +callcc creates such a value containing the current +environment and the current remaining computation, and passes it to +its argument function. When/If invoked, the special value replaces +the current execution context with its own and continues the execution +normally.

+
k
syntax Val ::= cc(Map,K) + rule <k> (callcc V:Val => V cc(Rho,K)) ~> K </k> <env> Rho </env> + rule <k> cc(Rho,K) V:Val ~> _ => V ~> K </k> <env> _ => Rho </env> +
+

Auxiliary operations

+

Environment recovery

+

The environment recovery operation is the same as for the LAMBDA++ +language in the K tutorial and many other languages provided with the +K distribution. The first ``anywhere'' rule below shows an elegant +way to achieve the benefits of tail recursion in K.

+
k
syntax KItem ::= setEnv(Map) // TODO: get rid of env + //rule (setEnv(_) => .) ~> setEnv(_) [anywhere] + rule <k> _:Val ~> (setEnv(Rho) => .K) ...</k> <env> _ => Rho </env> +
+

bindTo, bind and assignTo

+

The meaning of these operations has already been explained when we +discussed the let and letrec language constructs +above.

+
k
syntax KItem ::= bindTo(Names,Exps) [strict(2)] + | bindMap(Map) + | bind(Names) + + rule (.K => getMatchingAux(Xs,Vs)) ~> bindTo(Xs:Names,Vs:Vals) + rule matchResult(M:Map) ~> bindTo(_:Names, _:Vals) => bindMap(M) + + rule bindMap(.Map) => .K + rule <k> bindMap((X:Name |-> V:Val => .Map) _:Map) ...</k> + <env> Rho => Rho[X <- !L:Int] </env> + <store>... .Map => !L |-> V ...</store> + + rule bind(.Names) => .K + rule <k> bind(X:Name,Xs => Xs) ...</k> + <env> Rho => Rho[X <- !_L:Int] </env> + + syntax KItem ::= assignTo(Names,Exps) [strict(2)] + + rule <k> assignTo(.Names,.Vals) => .K ...</k> + rule <k> assignTo((X:Name,Xs => Xs),(V:Val,Vs:Vals => Vs)) ...</k> + <env>... X |-> L ...</env> + <store>... .Map => L |-> V ...</store> +
+

Getters

+

The following auxiliary operations extract the list of identifiers +and of expressions in a binding, respectively.

+
k
syntax Names ::= names(Bindings) [function] + rule names(.Bindings) => .Names + rule names(X:Name=_ and Bs) => (X,names(Bs))::Names + + syntax Exps ::= exps(Bindings) [function] + rule exps(.Bindings) => .Exps + rule exps(_:Name=E and Bs) => E,exps(Bs) + + /* Extra kore stuff */ + syntax KResult ::= Vals + syntax Exps ::= Names + syntax Names ::= Bottoms + + /* Matching */ + syntax MatchResult ::= getMatching(Exp, Val) [function] + | getMatchingAux(Exps, Vals) [function] + | mergeMatching(MatchResult, MatchResult) [function] + | matchResult(Map) + | "matchFailure" + + rule getMatching(C:ConstructorName(Es:Exps), C(Vs:Vals)) => getMatchingAux(Es, Vs) + rule getMatching([Es:Exps], [Vs:Vals]) => getMatchingAux(Es, Vs) + rule getMatching(C:ConstructorName, C) => matchResult(.Map) + rule getMatching(B:Bool, B) => matchResult(.Map) + rule getMatching(I:Int, I) => matchResult(.Map) + rule getMatching(S:String, S) => matchResult(.Map) + rule getMatching(N:Name, V:Val) => matchResult(N |-> V) + rule getMatching(_, _) => matchFailure [owise] + + rule getMatchingAux((E:Exp, Es:Exps), (V:Val, Vs:Vals)) => mergeMatching(getMatching(E, V), getMatchingAux(Es, Vs)) + rule getMatchingAux(.Exps, .Vals) => matchResult(.Map) + rule getMatchingAux(_, _) => matchFailure [owise] + + rule mergeMatching(matchResult(M1:Map), matchResult(M2:Map)) => matchResult(M1 M2) + requires intersectSet(keys(M1), keys(M2)) ==K .Set + //rule mergeMatching(_, _) => matchFailure [owsie] + rule mergeMatching(matchResult(_:Map), matchFailure) => matchFailure + rule mergeMatching(matchFailure, matchResult(_:Map)) => matchFailure + rule mergeMatching(matchFailure, matchFailure) => matchFailure +
+

Besides the generic decomposition rules for patterns and values, +we also want to allow [head|tail] matching for lists, so we add +the following custom pattern decomposition rule:

+
k
rule getMatching([H:Exp | T:Exp], [V:Val, Vs:Vals]) + => getMatchingAux((H, T), (V, [Vs])) +endmodule +
+

Go to Lesson 2, FUN untyped, Substitution-Based.

+
+
+ + + +
+ +
+
+ + + + + + + + + + + + + diff --git a/news/k-framework-demo/index.html b/news/k-framework-demo/index.html new file mode 100644 index 00000000000..d29863d80d2 --- /dev/null +++ b/news/k-framework-demo/index.html @@ -0,0 +1,362 @@ + + + + + + + + + + + + + + +The K Framework Demo | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

The K Framework Demo

+ +
+
+ + + +
+ +
+
+ + + + + + + + + + + + diff --git a/overview/index.html b/overview/index.html new file mode 100644 index 00000000000..fa1c579e65b --- /dev/null +++ b/overview/index.html @@ -0,0 +1,364 @@ + + + + + + + + + + + + + + +K overview | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

K overview

+ +

Go to Youtube mirror, if the above does not work.

+

Go back to https://kframework.org for further links, the K tool and contact information.

+
+
+ + +
+ + + +
+
+ +
+
+ + + + + + + + + + + + diff --git a/projects/index.html b/projects/index.html new file mode 100644 index 00000000000..5fca0faa00b --- /dev/null +++ b/projects/index.html @@ -0,0 +1,455 @@ + + + + + + + + + + + + + + +Projects using K | Runtime Verification Inc + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Projects using K

+

A list of projects using the K framework. If you are working on something interesting, and you want to share it with the community, +let us know on our socials, and we will feature you on this list.

+
+ +
+
    +
  • +

    KAVM (Feb 2022 - Present)

    +

    The Algorand Virtual Machine and TEAL Semantics in K
    +KAVM leverages the K Framework to empower Algorand smart contracts' developers +with property-based testing and formal verification.

    +
  • +
  • +

    KPlutus (2016 - Present)

    +

    The K Semantics of Plutus-Core

    +
  • +
  • +

    Dedukti (Mar 2021 - Present)

    +

    This project aims to translate real K semantics into Dedukti.

    +
  • +
  • +

    KWasm (Aug 2015 - Present)

    +

    KWasm is the K semantics of WebAssembly. +WebAssembly is a low-level (but simple and streamlined) assembly language that was originally developed to provide a fast execution engine for browser-based tools. +More recently, it has been used in several blockchain smart-contract platforms as the underlying language for executing financial agreements. +KWasm has been used for measuring coverage of test-suites over Wasm code and verifying programs which are compiled to Wasm.

    +
  • +
  • +

    KEVM (Sep 2017 - Present)

    +

    KEVM is the K semantics of the Ethereum Virtual Machine. +It passes all the Ethereum Test Suite, and is used for verifying EVM programs.

    +
  • +
  • +

    IELE (Oct 2016 - Present)

    +

    IELE is the underlying VM integrated into the Cardano blockchain. +IELE is a register-based VM (inspired by LLVM), which attempts to avoid many of the missteps in design present in EVM.

    +
  • +
  • +

    K-Michelson (Oct 2019 - Present)

    +

    K-Michelson is the K semantics of Michelson blockchain programming language, which powers the Tezos blockchain. +KMichelson provides additional testing tools for developers, including a unit-testing framework which is extendable to symbolic property testing.

    +
  • +
  • +

    C (Jul 2010 - Present)

    +

    The K semantics of the C programming language specifies the translation, linking, and execution semantics of the C language according to the official C standard. +It has been used to build tools like RV-Match, which detects undefined behaviors in users programs by running their test-suites through the C semantics.

    +
  • +
+

Archived

+ +
+
+ + +
+ + + +
+
+ +
+
+ + + + + + + + + + + + diff --git a/pyk/_modules/index.html b/pyk/_modules/index.html new file mode 100644 index 00000000000..45c95c31323 --- /dev/null +++ b/pyk/_modules/index.html @@ -0,0 +1,179 @@ + + + + + + + + Overview: module code — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + +
  • +
  • +
+
+
+ +
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/coverage.html b/pyk/_modules/pyk/coverage.html new file mode 100644 index 00000000000..427ed14db9d --- /dev/null +++ b/pyk/_modules/pyk/coverage.html @@ -0,0 +1,246 @@ + + + + + + + + pyk.coverage — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.coverage

+  1from __future__ import annotations
+  2
+  3from typing import TYPE_CHECKING
+  4
+  5from .kast import Atts
+  6from .kast.inner import KApply, KRewrite, KSequence
+  7from .kast.outer import KRule, read_kast_definition
+  8
+  9if TYPE_CHECKING:
+ 10    from collections.abc import Iterable
+ 11    from os import PathLike
+ 12
+ 13    from .kast.outer import KDefinition
+ 14
+ 15
+
+[docs] + 16def get_rule_by_id(definition: KDefinition, rule_id: str) -> KRule: + 17 """Get a rule from the definition by coverage rule id. + 18 + 19 Args: + 20 definition: JSON-encoded definition. + 21 rule_id: String of unique rule identifier generated by `kompile --coverage`. + 22 + 23 Returns: + 24 JSON encoded rule which has identifier `rule_id`. + 25 """ + 26 for module in definition.modules: + 27 for sentence in module.sentences: + 28 if type(sentence) is KRule: + 29 if Atts.UNIQUE_ID in sentence.att and sentence.att[Atts.UNIQUE_ID] == rule_id: + 30 return sentence + 31 raise ValueError(f'Could not find rule with ID: {rule_id}')
+ + 32 + 33 +
+[docs] + 34def strip_coverage_logger(rule: KRule) -> KRule: + 35 body = rule.body + 36 if type(body) is KRewrite: + 37 lhs = body.lhs + 38 rhs = body.rhs + 39 if type(rhs) is KApply and rhs.label.name.startswith('project:'): + 40 rhs_seq = rhs.args[0] + 41 if type(rhs_seq) is KSequence and rhs_seq.arity == 2: + 42 body = KRewrite(lhs, rhs_seq.items[1]) + 43 return rule.let(body=body)
+ + 44 + 45 +
+[docs] + 46def translate_coverage( + 47 src_all_rules: Iterable[str], + 48 dst_all_rules: Iterable[str], + 49 dst_definition: KDefinition, + 50 src_rules_list: Iterable[str], + 51) -> list[str]: + 52 """Translate the coverage data from one kompiled definition to another. + 53 + 54 Args: + 55 src_all_rules: Contents of allRules.txt for definition which coverage was generated for. + 56 dst_all_rules: Contents of allRules.txt for definition which you desire coverage for. + 57 dst_definition: JSON encoded definition of dst kompiled definition. + 58 src_rules_list: Actual coverage data produced. + 59 + 60 Returns: + 61 List of non-functional rules applied in dst definition translated from src definition. + 62 """ + 63 # Load the src_rule_id -> src_source_location rule map from the src kompiled directory + 64 src_rule_map = {} + 65 for line in src_all_rules: + 66 src_rule_hash, src_rule_loc = line.split(' ') + 67 src_rule_loc = src_rule_loc.split('/')[-1] + 68 src_rule_map[src_rule_hash.strip()] = src_rule_loc.strip() + 69 + 70 # Load the dst_rule_id -> dst_source_location rule map (and inverts it) from the dst kompiled directory + 71 dst_rule_map = {} + 72 for line in dst_all_rules: + 73 dst_rule_hash, dst_rule_loc = line.split(' ') + 74 dst_rule_loc = dst_rule_loc.split('/')[-1] + 75 dst_rule_map[dst_rule_loc.strip()] = dst_rule_hash.strip() + 76 + 77 src_rule_list = [rule_hash.strip() for rule_hash in src_rules_list] + 78 + 79 # Filter out non-functional rules from rule map (determining if they are functional via the top symbol in the rule being `<generatedTop>`) + 80 dst_non_function_rules = [] + 81 for module in dst_definition.modules: + 82 for sentence in module.sentences: + 83 if type(sentence) is KRule: + 84 body = sentence.body + 85 if (type(body) is KApply and body.label.name == '<generatedTop>') or ( + 86 type(body) is KRewrite and type(body.lhs) is KApply and body.lhs.label.name == '<generatedTop>' + 87 ): + 88 if Atts.UNIQUE_ID in sentence.att: + 89 dst_non_function_rules.append(sentence.att[Atts.UNIQUE_ID]) + 90 + 91 # Convert the src_coverage rules to dst_no_coverage rules via the maps generated above + 92 dst_rule_list = [] + 93 for src_rule in src_rule_list: + 94 if src_rule not in src_rule_map: + 95 raise ValueError(f'Could not find rule in src_rule_map: {src_rule}') + 96 src_rule_loc = src_rule_map[src_rule] + 97 + 98 if src_rule_loc not in dst_rule_map: + 99 raise ValueError(f'Could not find rule location in dst_rule_map: {src_rule_loc}') +100 dst_rule = dst_rule_map[src_rule_loc] +101 +102 if dst_rule in dst_non_function_rules: +103 dst_rule_list.append(dst_rule) +104 +105 return dst_rule_list
+ +106 +107 +
+[docs] +108def translate_coverage_from_paths(src_kompiled_dir: str, dst_kompiled_dir: str, src_rules_file: PathLike) -> list[str]: +109 """Translate coverage information given paths to needed files. +110 +111 Args: +112 src_kompiled_dir: Path to kompiled directory of source. +113 dst_kompiled_dir: Path to kompiled directory of destination. +114 src_rules_file: Path to generated rules coverage file. +115 +116 Returns: +117 Translated list of rules with non-semantic rules stripped out. +118 """ +119 src_all_rules = [] +120 with open(src_kompiled_dir + '/allRules.txt') as src_all_rules_file: +121 src_all_rules = [line.strip() for line in src_all_rules_file] +122 +123 dst_all_rules = [] +124 with open(dst_kompiled_dir + '/allRules.txt') as dst_all_rules_file: +125 dst_all_rules = [line.strip() for line in dst_all_rules_file] +126 +127 dst_definition = read_kast_definition(dst_kompiled_dir + '/compiled.json') +128 +129 src_rules_list = [] +130 with open(src_rules_file) as src_rules: +131 src_rules_list = [line.strip() for line in src_rules] +132 +133 return translate_coverage(src_all_rules, dst_all_rules, dst_definition, src_rules_list)
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/cterm/cterm.html b/pyk/_modules/pyk/cterm/cterm.html new file mode 100644 index 00000000000..c2416727419 --- /dev/null +++ b/pyk/_modules/pyk/cterm/cterm.html @@ -0,0 +1,635 @@ + + + + + + + + pyk.cterm.cterm — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.cterm.cterm

+  1from __future__ import annotations
+  2
+  3from dataclasses import dataclass
+  4from functools import cached_property
+  5from itertools import chain
+  6from typing import TYPE_CHECKING
+  7
+  8from ..kast import KInner
+  9from ..kast.inner import KApply, KRewrite, KToken, KVariable, Subst, bottom_up
+ 10from ..kast.manip import (
+ 11    abstract_term_safely,
+ 12    build_claim,
+ 13    build_rule,
+ 14    extract_subst,
+ 15    flatten_label,
+ 16    free_vars,
+ 17    ml_pred_to_bool,
+ 18    normalize_constraints,
+ 19    push_down_rewrites,
+ 20    remove_useless_constraints,
+ 21    split_config_and_constraints,
+ 22    split_config_from,
+ 23)
+ 24from ..prelude.k import GENERATED_TOP_CELL, K
+ 25from ..prelude.kbool import andBool, orBool
+ 26from ..prelude.ml import is_bottom, is_top, mlAnd, mlBottom, mlEquals, mlEqualsTrue, mlImplies, mlTop
+ 27from ..utils import not_none, unique
+ 28
+ 29if TYPE_CHECKING:
+ 30    from collections.abc import Iterable, Iterator
+ 31    from typing import Any
+ 32
+ 33    from ..kast.outer import KClaim, KDefinition, KRule
+ 34
+ 35
+
+[docs] + 36@dataclass(frozen=True, order=True) + 37class CTerm: + 38 """Represent a symbolic program state, obtained and manipulated using symbolic execution. + 39 + 40 Contains the data: + 41 - `config`: the _configuration_ (structural component of the state, potentially containing free variabls) + 42 - `constraints`: conditions which limit/constraint the free variables from the `config` + 43 """ + 44 + 45 config: KInner # TODO Optional? + 46 constraints: tuple[KInner, ...] + 47 +
+[docs] + 48 def __init__(self, config: KInner, constraints: Iterable[KInner] = ()) -> None: + 49 """Instantiate a given `CTerm`, performing basic sanity checks on the `config` and `constraints`.""" + 50 if is_top(config, weak=True): + 51 config = mlTop() + 52 constraints = () + 53 elif is_bottom(config, weak=True): + 54 config = mlBottom() + 55 constraints = () + 56 else: + 57 self._check_config(config) + 58 constraints = self._normalize_constraints(constraints) + 59 object.__setattr__(self, 'config', config) + 60 object.__setattr__(self, 'constraints', constraints)
+ + 61 +
+[docs] + 62 @staticmethod + 63 def from_kast(kast: KInner) -> CTerm: + 64 """Interpret a given `KInner` as a `CTerm` by splitting the `config` and `constraints` (see `CTerm.kast`).""" + 65 if is_top(kast, weak=True): + 66 return CTerm.top() + 67 elif is_bottom(kast, weak=True): + 68 return CTerm.bottom() + 69 else: + 70 config, constraint = split_config_and_constraints(kast) + 71 constraints = flatten_label('#And', constraint) + 72 return CTerm(config, constraints)
+ + 73 +
+[docs] + 74 @staticmethod + 75 def from_dict(dct: dict[str, Any]) -> CTerm: + 76 """Deserialize a `CTerm` from its dictionary representation.""" + 77 config = KInner.from_dict(dct['config']) + 78 constraints = [KInner.from_dict(c) for c in dct['constraints']] + 79 return CTerm(config, constraints)
+ + 80 +
+[docs] + 81 @staticmethod + 82 def top() -> CTerm: + 83 """Construct a `CTerm` representing all possible states.""" + 84 return CTerm(mlTop(), ())
+ + 85 +
+[docs] + 86 @staticmethod + 87 def bottom() -> CTerm: + 88 """Construct a `CTerm` representing no possible states.""" + 89 return CTerm(mlBottom(), ())
+ + 90 + 91 @staticmethod + 92 def _check_config(config: KInner) -> None: + 93 if not isinstance(config, KApply) or not config.is_cell: + 94 raise ValueError(f'Expected cell label, found: {config}') + 95 + 96 @staticmethod + 97 def _normalize_constraints(constraints: Iterable[KInner]) -> tuple[KInner, ...]: + 98 constraints = sorted(normalize_constraints(constraints), key=CTerm._constraint_sort_key) + 99 return tuple(constraints) +100 +101 @property +102 def is_bottom(self) -> bool: +103 """Check if a given `CTerm` is trivially empty.""" +104 return is_bottom(self.config, weak=True) or any(is_bottom(cterm, weak=True) for cterm in self.constraints) +105 +106 @staticmethod +107 def _constraint_sort_key(term: KInner) -> tuple[int, str]: +108 term_str = str(term) +109 return (len(term_str), term_str) +110 +
+[docs] +111 def __iter__(self) -> Iterator[KInner]: +112 """Return an iterator with the head being the `config` and the tail being the `constraints`.""" +113 return chain([self.config], self.constraints)
+ +114 +
+[docs] +115 def to_dict(self) -> dict[str, Any]: +116 """Serialize a `CTerm` to dictionary representation.""" +117 return { +118 'config': self.config.to_dict(), +119 'constraints': [c.to_dict() for c in self.constraints], +120 }
+ +121 +122 @cached_property +123 def kast(self) -> KInner: +124 """Return the unstructured bare `KInner` representation of a `CTerm` (see `CTerm.from_kast`).""" +125 return mlAnd(self, GENERATED_TOP_CELL) +126 +127 @cached_property +128 def free_vars(self) -> frozenset[str]: +129 """Return the set of free variable names contained in this `CTerm`.""" +130 return frozenset(free_vars(self.kast)) +131 +132 @property +133 def hash(self) -> str: +134 """Unique hash representing the contents of this `CTerm`.""" +135 return self.kast.hash +136 +137 @cached_property +138 def cells(self) -> Subst: +139 """Return key-value store of the contents of each cell in the `config`.""" +140 _, subst = split_config_from(self.config) +141 return Subst(subst) +142 +
+[docs] +143 def cell(self, cell: str) -> KInner: +144 """Access the contents of a named cell in the `config`, die on failure.""" +145 return self.cells[cell]
+ +146 +
+[docs] +147 def try_cell(self, cell: str) -> KInner | None: +148 """Access the contents of a named cell in the `config`, return `None` on failure.""" +149 return self.cells.get(cell)
+ +150 +
+[docs] +151 def match(self, cterm: CTerm) -> Subst | None: +152 """Find `Subst` instantiating this `CTerm` to the other, return `None` if no such `Subst` exists.""" +153 csubst = self.match_with_constraint(cterm) +154 +155 if not csubst: +156 return None +157 +158 if csubst.constraint != mlTop(GENERATED_TOP_CELL): +159 return None +160 +161 return csubst.subst
+ +162 +
+[docs] +163 def match_with_constraint(self, cterm: CTerm) -> CSubst | None: +164 """Find `CSubst` instantiating this `CTerm` to the other, return `None` if no such `CSubst` exists.""" +165 subst = self.config.match(cterm.config) +166 +167 if subst is None: +168 return None +169 +170 source_constraints = [subst(c) for c in self.constraints] +171 constraints = [c for c in cterm.constraints if c not in source_constraints] +172 +173 return CSubst(subst, constraints)
+ +174 +175 @staticmethod +176 def _ml_impl(antecedents: Iterable[KInner], consequents: Iterable[KInner]) -> KInner: +177 antecedent = mlAnd(unique(antecedents), GENERATED_TOP_CELL) +178 consequent = mlAnd(unique(term for term in consequents if term not in set(antecedents)), GENERATED_TOP_CELL) +179 +180 if mlTop(GENERATED_TOP_CELL) in {antecedent, consequent}: +181 return consequent +182 +183 return mlImplies(antecedent, consequent, GENERATED_TOP_CELL) +184 +
+[docs] +185 def add_constraint(self, new_constraint: KInner) -> CTerm: +186 """Return a new `CTerm` with the additional constraints.""" +187 return CTerm(self.config, [new_constraint] + list(self.constraints))
+ +188 +
+[docs] +189 def anti_unify( +190 self, other: CTerm, keep_values: bool = False, kdef: KDefinition | None = None +191 ) -> tuple[CTerm, CSubst, CSubst]: +192 """Given two `CTerm` instances, find a more general `CTerm` which can instantiate to both. +193 +194 Args: +195 other: other `CTerm` to consider for finding a more general `CTerm` with this one. +196 keep_values: do not discard information about abstracted variables in returned result. +197 kdef (optional): `KDefinition` to make analysis more precise. +198 +199 Returns: +200 A tuple ``(cterm, csubst1, csubst2)`` where +201 +202 - ``cterm``: More general `CTerm` than either `self` or `other`. +203 - ``csubst1``: Constrained substitution to apply to `cterm` to obtain `self`. +204 - ``csubst2``: Constrained substitution to apply to `cterm` to obtain `other`. +205 """ +206 new_config, self_subst, other_subst = anti_unify(self.config, other.config, kdef=kdef) +207 # todo: It's not able to distinguish between constraints in different cterms, +208 # because variable names may be used inconsistently in different cterms. +209 common_constraints = [constraint for constraint in self.constraints if constraint in other.constraints] +210 self_unique_constraints = [ +211 ml_pred_to_bool(constraint) for constraint in self.constraints if constraint not in other.constraints +212 ] +213 other_unique_constraints = [ +214 ml_pred_to_bool(constraint) for constraint in other.constraints if constraint not in self.constraints +215 ] +216 +217 new_cterm = CTerm(config=new_config, constraints=()) +218 if keep_values: +219 disjunct_lhs = andBool([self_subst.pred] + self_unique_constraints) +220 disjunct_rhs = andBool([other_subst.pred] + other_unique_constraints) +221 if KToken('true', 'Bool') not in [disjunct_lhs, disjunct_rhs]: +222 new_cterm = new_cterm.add_constraint(mlEqualsTrue(orBool([disjunct_lhs, disjunct_rhs]))) +223 +224 new_constraints = remove_useless_constraints(common_constraints, new_cterm.free_vars) +225 +226 for constraint in new_constraints: +227 new_cterm = new_cterm.add_constraint(constraint) +228 self_csubst = new_cterm.match_with_constraint(self) +229 other_csubst = new_cterm.match_with_constraint(other) +230 if self_csubst is None or other_csubst is None: +231 raise ValueError( +232 f'Anti-unification failed to produce a more general state: {(new_cterm, (self, self_csubst), (other, other_csubst))}' +233 ) +234 return (new_cterm, self_csubst, other_csubst)
+ +235 +
+[docs] +236 def remove_useless_constraints(self, keep_vars: Iterable[str] = ()) -> CTerm: +237 """Return a new `CTerm` with constraints over unbound variables removed. +238 +239 Args: +240 keep_vars: List of variables to keep constraints for even if unbound in the `CTerm`. +241 +242 Returns: +243 A `CTerm` with the constraints over unbound variables removed. +244 """ +245 initial_vars = free_vars(self.config) | set(keep_vars) +246 new_constraints = remove_useless_constraints(self.constraints, initial_vars) +247 return CTerm(self.config, new_constraints)
+
+ +248 +249 +
+[docs] +250def anti_unify(state1: KInner, state2: KInner, kdef: KDefinition | None = None) -> tuple[KInner, Subst, Subst]: +251 """Return a generalized state over the two input states. +252 +253 Args: +254 state1: State to generalize over, represented as bare `KInner`. +255 state2: State to generalize over, represented as bare `KInner`. +256 kdef (optional): `KDefinition` to make the analysis more precise. +257 +258 Note: +259 Both `state1` and `state2` are expected to be bare configurations with no constraints attached. +260 +261 Returns: +262 A tuple ``(state, subst1, subst2)`` such that +263 +264 - ``state``: A symbolic state represented as `KInner` which is more general than `state1` or `state2`. +265 - ``subst1``: A `Subst` which, when applied to `state`, recovers `state1`. +266 - ``subst2``: A `Subst` which, when applied to `state`, recovers `state2`. +267 """ +268 +269 def _rewrites_to_abstractions(_kast: KInner) -> KInner: +270 if type(_kast) is KRewrite: +271 sort = kdef.sort(_kast) if kdef else None +272 return abstract_term_safely(_kast, sort=sort) +273 return _kast +274 +275 minimized_rewrite = push_down_rewrites(KRewrite(state1, state2)) +276 abstracted_state = bottom_up(_rewrites_to_abstractions, minimized_rewrite) +277 subst1 = abstracted_state.match(state1) +278 subst2 = abstracted_state.match(state2) +279 if subst1 is None or subst2 is None: +280 raise ValueError('Anti-unification failed to produce a more general state!') +281 return (abstracted_state, subst1, subst2)
+ +282 +283 +
+[docs] +284@dataclass(frozen=True, order=True) +285class CSubst: +286 """Store information about instantiation of a symbolic state (`CTerm`) to a more specific one. +287 +288 Contains the data: +289 - `subst`: assignment to apply to free variables in the state to achieve more specific one +290 - `constraints`: additional constraints over the free variables of the original state and the `subst` to add to the new state +291 """ +292 +293 subst: Subst +294 constraints: tuple[KInner, ...] +295 +
+[docs] +296 def __init__(self, subst: Subst | None = None, constraints: Iterable[KInner] = ()) -> None: +297 """Construct a new `CSubst` given a `Subst` and set of constraints as `KInner`, performing basic sanity checks.""" +298 object.__setattr__(self, 'subst', subst if subst is not None else Subst({})) +299 object.__setattr__(self, 'constraints', normalize_constraints(constraints))
+ +300 +
+[docs] +301 def __iter__(self) -> Iterator[Subst | KInner]: +302 """Return an iterator with the head being the `subst` and the tail being the `constraints`.""" +303 return chain([self.subst], self.constraints)
+ +304 +
+[docs] +305 def to_dict(self) -> dict[str, Any]: +306 """Serialize `CSubst` to dictionary representation.""" +307 return { +308 'subst': self.subst.to_dict(), +309 'constraints': [c.to_dict() for c in self.constraints], +310 }
+ +311 +
+[docs] +312 @staticmethod +313 def from_dict(dct: dict[str, Any]) -> CSubst: +314 """Deserialize `CSubst` from a dictionary representation.""" +315 subst = Subst.from_dict(dct['subst']) +316 constraints = (KInner.from_dict(c) for c in dct['constraints']) +317 return CSubst(subst=subst, constraints=constraints)
+ +318 +
+[docs] +319 @staticmethod +320 def from_pred(pred: KInner) -> CSubst: +321 """Extract from a boolean predicate a CSubst.""" +322 subst, pred = extract_subst(pred) +323 return CSubst(subst=subst, constraints=flatten_label('#And', pred))
+ +324 +
+[docs] +325 def pred(self, sort_with: KDefinition | None = None, subst: bool = True, constraints: bool = True) -> KInner: +326 """Return an ML predicate representing this substitution.""" +327 _preds: list[KInner] = [] +328 if subst: +329 for k, v in self.subst.minimize().items(): +330 sort = K +331 if sort_with is not None: +332 _sort = sort_with.sort(v) +333 sort = _sort if _sort is not None else sort +334 _preds.append(mlEquals(KVariable(k, sort=sort), v, arg_sort=sort)) +335 if constraints: +336 _preds.extend(self.constraints) +337 return mlAnd(_preds)
+ +338 +339 @property +340 def constraint(self) -> KInner: +341 """Return the set of constraints as a single flattened constraint using `mlAnd`.""" +342 return mlAnd(self.constraints) +343 +
+[docs] +344 def add_constraint(self, constraint: KInner) -> CSubst: +345 """Return this `CSubst` with an additional constraint added.""" +346 return CSubst(self.subst, list(self.constraints) + [constraint])
+ +347 +
+[docs] +348 def apply(self, cterm: CTerm) -> CTerm: +349 """Apply this `CSubst` to the given `CTerm` (instantiating the free variables, and adding the constraints).""" +350 config = self.subst(cterm.config) +351 constraints = [self.subst(constraint) for constraint in cterm.constraints] + list(self.constraints) +352 return CTerm(config, constraints)
+ +353 +
+[docs] +354 def __call__(self, cterm: CTerm) -> CTerm: +355 """Overload for `CSubst.apply`.""" +356 return self.apply(cterm)
+
+ +357 +358 +
+[docs] +359def cterm_build_claim( +360 claim_id: str, init_cterm: CTerm, final_cterm: CTerm, keep_vars: Iterable[str] = () +361) -> tuple[KClaim, Subst]: +362 """Return a `KClaim` between the supplied initial and final states. +363 +364 Args: +365 claim_id: Label to give the claim. +366 init_cterm: State to put on LHS of the rule (constraints interpreted as `requires` clause). +367 final_cterm: State to put on RHS of the rule (constraints interpreted as `ensures` clause). +368 keep_vars: Variables to leave in the side-conditions even if not bound in the configuration. +369 +370 Returns: +371 A tuple ``(claim, var_map)`` where +372 +373 - ``claim``: A `KClaim` with variable naming conventions applied +374 so that it should be parseable by the K Frontend. +375 - ``var_map``: The variable renamings applied to make the claim parseable by the K Frontend +376 (which can be undone to recover original variables). +377 """ +378 init_config, *init_constraints = init_cterm +379 final_config, *final_constraints = final_cterm +380 return build_claim(claim_id, init_config, final_config, init_constraints, final_constraints, keep_vars=keep_vars)
+ +381 +382 +
+[docs] +383def cterm_build_rule( +384 rule_id: str, +385 init_cterm: CTerm, +386 final_cterm: CTerm, +387 priority: int | None = None, +388 keep_vars: Iterable[str] = (), +389 defunc_with: KDefinition | None = None, +390) -> tuple[KRule, Subst]: +391 """Return a `KRule` between the supplied initial and final states. +392 +393 Args: +394 rule_id: Label to give the rule. +395 init_cterm: State to put on LHS of the rule (constraints interpreted as `requires` clause). +396 final_cterm: State to put on RHS of the rule (constraints interpreted as `ensures` clause). +397 keep_vars: Variables to leave in the side-conditions even if not bound in the configuration. +398 priority: Priority index to use for generated rules. +399 defunc_with (optional): KDefinition to be able to defunctionalize LHS appropriately. +400 +401 Returns: +402 A tuple ``(rule, var_map)`` where +403 +404 - ``rule``: A `KRule` with variable naming conventions applied +405 so that it should be parseable by the K Frontend. +406 - ``var_map``: The variable renamings applied to make the rule parseable by the K Frontend +407 (which can be undone to recover original variables). +408 """ +409 init_config, *init_constraints = init_cterm +410 final_config, *final_constraints = final_cterm +411 return build_rule( +412 rule_id, +413 init_config, +414 final_config, +415 init_constraints, +416 final_constraints, +417 priority, +418 keep_vars, +419 defunc_with=defunc_with, +420 )
+ +421 +422 +
+[docs] +423def cterms_anti_unify( +424 cterms: Iterable[CTerm], keep_values: bool = False, kdef: KDefinition | None = None +425) -> tuple[CTerm, list[CSubst]]: +426 """Given many `CTerm` instances, find a more general `CTerm` which can instantiate to all. +427 +428 Args: +429 cterms: `CTerm`s to consider for finding a more general `CTerm` with this one. +430 keep_values: do not discard information about abstracted variables in returned result. +431 kdef (optional): `KDefinition` to make analysis more precise. +432 +433 Returns: +434 A tuple ``(cterm, csubsts)`` where +435 +436 - ``cterm``: More general `CTerm` than any of the input `CTerm`s. +437 - ``csubsts``: List of `CSubst` which, when applied to `cterm`, yield the input `CTerm`s. +438 """ +439 # TODO: optimize this function, reduce useless auto-generated variables. +440 cterms = list(cterms) +441 if not cterms: +442 raise ValueError('Anti-unification failed, no CTerms provided') +443 merged_cterm = cterms[0] +444 for cterm in cterms[1:]: +445 merged_cterm = merged_cterm.anti_unify(cterm, keep_values, kdef)[0] +446 csubsts = [not_none(merged_cterm.match_with_constraint(cterm)) for cterm in cterms] +447 return merged_cterm, csubsts
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/cterm/symbolic.html b/pyk/_modules/pyk/cterm/symbolic.html new file mode 100644 index 00000000000..b4066ed9f8d --- /dev/null +++ b/pyk/_modules/pyk/cterm/symbolic.html @@ -0,0 +1,495 @@ + + + + + + + + pyk.cterm.symbolic — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.cterm.symbolic

+  1from __future__ import annotations
+  2
+  3import logging
+  4from contextlib import contextmanager
+  5from dataclasses import dataclass
+  6from typing import TYPE_CHECKING, NamedTuple, final
+  7
+  8from pyk.utils import not_none
+  9
+ 10from ..cterm import CSubst, CTerm
+ 11from ..kast.inner import KApply, KLabel, KRewrite, KToken, KVariable, Subst
+ 12from ..kast.manip import flatten_label, is_spurious_constraint, sort_ac_collections
+ 13from ..kast.pretty import PrettyPrinter
+ 14from ..konvert import kast_to_kore, kore_to_kast
+ 15from ..kore.rpc import (
+ 16    AbortedResult,
+ 17    KoreClient,
+ 18    KoreExecLogFormat,
+ 19    SatResult,
+ 20    SmtSolverError,
+ 21    StopReason,
+ 22    TransportType,
+ 23    UnknownResult,
+ 24    UnsatResult,
+ 25    kore_server,
+ 26)
+ 27from ..prelude.k import GENERATED_TOP_CELL, K_ITEM
+ 28from ..prelude.ml import mlAnd
+ 29
+ 30if TYPE_CHECKING:
+ 31    from collections.abc import Iterable, Iterator
+ 32    from pathlib import Path
+ 33    from typing import Final
+ 34
+ 35    from ..kast import KInner
+ 36    from ..kast.outer import KDefinition
+ 37    from ..kore.rpc import FallbackReason, LogEntry
+ 38    from ..kore.syntax import Pattern
+ 39    from ..utils import BugReport
+ 40
+ 41
+ 42_LOGGER: Final = logging.getLogger(__name__)
+ 43
+ 44
+
+[docs] + 45class NextState(NamedTuple): + 46 state: CTerm + 47 condition: KInner | None
+ + 48 + 49 +
+[docs] + 50class CTermExecute(NamedTuple): + 51 state: CTerm + 52 next_states: tuple[NextState, ...] + 53 depth: int + 54 vacuous: bool + 55 logs: tuple[LogEntry, ...]
+ + 56 + 57 +
+[docs] + 58class CTermImplies(NamedTuple): + 59 csubst: CSubst | None + 60 failing_cells: tuple[tuple[str, KInner], ...] + 61 remaining_implication: KInner | None + 62 logs: tuple[LogEntry, ...]
+ + 63 + 64 +
+[docs] + 65@final + 66@dataclass + 67class CTermSMTError(Exception): + 68 def __init__(self, message: str): + 69 super().__init__(message) + 70 self.message = message
+ + 71 + 72 +
+[docs] + 73class CTermSymbolic: + 74 _kore_client: KoreClient + 75 _definition: KDefinition + 76 _log_succ_rewrites: bool + 77 _log_fail_rewrites: bool + 78 + 79 def __init__( + 80 self, + 81 kore_client: KoreClient, + 82 definition: KDefinition, + 83 *, + 84 log_succ_rewrites: bool = True, + 85 log_fail_rewrites: bool = False, + 86 ): + 87 self._kore_client = kore_client + 88 self._definition = definition + 89 self._log_succ_rewrites = log_succ_rewrites + 90 self._log_fail_rewrites = log_fail_rewrites + 91 +
+[docs] + 92 def kast_to_kore(self, kinner: KInner) -> Pattern: + 93 return kast_to_kore(self._definition, kinner, sort=GENERATED_TOP_CELL)
+ + 94 +
+[docs] + 95 def kore_to_kast(self, pattern: Pattern) -> KInner: + 96 return kore_to_kast(self._definition, pattern)
+ + 97 +
+[docs] + 98 def execute( + 99 self, +100 cterm: CTerm, +101 depth: int | None = None, +102 cut_point_rules: Iterable[str] | None = None, +103 terminal_rules: Iterable[str] | None = None, +104 module_name: str | None = None, +105 ) -> CTermExecute: +106 +107 _LOGGER.debug(f'Executing: {cterm}') +108 kore = self.kast_to_kore(cterm.kast) +109 try: +110 response = self._kore_client.execute( +111 kore, +112 max_depth=depth, +113 cut_point_rules=cut_point_rules, +114 terminal_rules=terminal_rules, +115 module_name=module_name, +116 log_successful_rewrites=self._log_succ_rewrites, +117 log_failed_rewrites=self._log_succ_rewrites and self._log_fail_rewrites, +118 ) +119 except SmtSolverError as err: +120 raise self._smt_solver_error(err) from err +121 +122 if isinstance(response, AbortedResult): +123 unknown_predicate = response.unknown_predicate.text if response.unknown_predicate else None +124 raise ValueError(f'Backend responded with aborted state. Unknown predicate: {unknown_predicate}') +125 +126 state = CTerm.from_kast(self.kore_to_kast(response.state.kore)) +127 resp_next_states = response.next_states or () +128 branching_constraints = tuple( +129 self.kore_to_kast(not_none(s.rule_predicate)) if s.rule_predicate is not None else None +130 for s in resp_next_states +131 ) +132 next_states = tuple( +133 NextState(CTerm.from_kast(self.kore_to_kast(ns.kore)), c) +134 for ns, c in zip(resp_next_states, branching_constraints, strict=True) +135 ) +136 +137 assert all(not cterm.is_bottom for cterm, _ in next_states) +138 assert len(next_states) != 1 or response.reason is StopReason.CUT_POINT_RULE +139 +140 return CTermExecute( +141 state=state, +142 next_states=next_states, +143 depth=response.depth, +144 vacuous=response.reason is StopReason.VACUOUS, +145 logs=response.logs, +146 )
+ +147 +
+[docs] +148 def simplify(self, cterm: CTerm, module_name: str | None = None) -> tuple[CTerm, tuple[LogEntry, ...]]: +149 _LOGGER.debug(f'Simplifying: {cterm}') +150 kast_simplified, logs = self.kast_simplify(cterm.kast, module_name=module_name) +151 return CTerm.from_kast(kast_simplified), logs
+ +152 +
+[docs] +153 def kast_simplify(self, kast: KInner, module_name: str | None = None) -> tuple[KInner, tuple[LogEntry, ...]]: +154 _LOGGER.debug(f'Simplifying: {kast}') +155 kore = self.kast_to_kore(kast) +156 try: +157 kore_simplified, logs = self._kore_client.simplify(kore, module_name=module_name) +158 except SmtSolverError as err: +159 raise self._smt_solver_error(err) from err +160 +161 kast_simplified = self.kore_to_kast(kore_simplified) +162 return kast_simplified, logs
+ +163 +
+[docs] +164 def get_model(self, cterm: CTerm, module_name: str | None = None) -> Subst | None: +165 _LOGGER.debug(f'Getting model: {cterm}') +166 kore = self.kast_to_kore(cterm.kast) +167 try: +168 result = self._kore_client.get_model(kore, module_name=module_name) +169 except SmtSolverError as err: +170 raise self._smt_solver_error(err) from err +171 +172 if type(result) is UnknownResult: +173 _LOGGER.debug('Result is Unknown') +174 return None +175 elif type(result) is UnsatResult: +176 _LOGGER.debug('Result is UNSAT') +177 return None +178 elif type(result) is SatResult: +179 _LOGGER.debug('Result is SAT') +180 if not result.model: +181 return Subst({}) +182 model_subst = self.kore_to_kast(result.model) +183 try: +184 return Subst.from_pred(model_subst) +185 except ValueError as err: +186 raise AssertionError(f'Received a non-substitution from get-model endpoint: {model_subst}') from err +187 +188 else: +189 raise AssertionError('Received an invalid response from get-model endpoint')
+ +190 +
+[docs] +191 def implies( +192 self, +193 antecedent: CTerm, +194 consequent: CTerm, +195 bind_universally: bool = False, +196 failure_reason: bool = False, +197 module_name: str | None = None, +198 assume_defined: bool = False, +199 ) -> CTermImplies: +200 _LOGGER.debug(f'Checking implication: {antecedent} #Implies {consequent}') +201 _consequent = consequent.kast +202 unbound_consequent = [v for v in consequent.free_vars if v not in antecedent.free_vars] +203 if len(unbound_consequent) > 0: +204 bind_text, bind_label = ('existentially', '#Exists') +205 if bind_universally: +206 bind_text, bind_label = ('universally', '#Forall') +207 _LOGGER.debug(f'Binding variables in consequent {bind_text}: {unbound_consequent}') +208 for uc in unbound_consequent: +209 # Setting Sort1 to KItem in #Exists to avoid inferring the type of each uc. +210 # This should not have any effect on the resulting KORE pattern (\exists only has Sort2 as sort variable). +211 _consequent = KApply(KLabel(bind_label, [K_ITEM, GENERATED_TOP_CELL]), [KVariable(uc), _consequent]) +212 antecedent_kore = self.kast_to_kore(antecedent.kast) +213 consequent_kore = self.kast_to_kore(_consequent) +214 try: +215 result = self._kore_client.implies( +216 antecedent_kore, consequent_kore, module_name=module_name, assume_defined=assume_defined +217 ) +218 except SmtSolverError as err: +219 raise self._smt_solver_error(err) from err +220 +221 if not result.valid: +222 if result.substitution is not None: +223 _LOGGER.debug(f'Received a non-empty substitution for falsifiable implication: {result.substitution}') +224 if result.predicate is not None: +225 _LOGGER.debug(f'Received a non-empty predicate for falsifiable implication: {result.predicate}') +226 failing_cells: list[tuple[str, KInner]] = [] +227 remaining_implication: KInner | None = None +228 if failure_reason: +229 _config_match = self.implies( +230 CTerm.from_kast(antecedent.config), +231 CTerm.from_kast(consequent.config), +232 bind_universally=bind_universally, +233 failure_reason=False, +234 module_name=module_name, +235 assume_defined=assume_defined, +236 ) +237 config_match = _config_match.csubst +238 if config_match is None: +239 curr_cell_match = Subst({}) +240 for cell in antecedent.cells: +241 antecedent_cell = sort_ac_collections(antecedent.cell(cell)) +242 +243 if cell not in consequent.cells: +244 failing_cells.append((cell, KRewrite(antecedent_cell, KToken('.K', sort='KItem')))) +245 else: +246 consequent_cell = sort_ac_collections(consequent.cell(cell)) +247 cell_match = consequent_cell.match(antecedent_cell) +248 if cell_match is not None: +249 _curr_cell_match = curr_cell_match.union(cell_match) +250 if _curr_cell_match is not None: +251 curr_cell_match = _curr_cell_match +252 continue +253 failing_cells.append((cell, KRewrite(antecedent_cell, consequent_cell))) +254 else: +255 consequent_constraints = list( +256 filter( +257 lambda x: not is_spurious_constraint(x), +258 map(config_match.subst, consequent.constraints), +259 ) +260 ) +261 remaining_implication = CTerm._ml_impl(antecedent.constraints, consequent_constraints) +262 return CTermImplies(None, tuple(failing_cells), remaining_implication, result.logs) +263 +264 if result.substitution is None: +265 raise ValueError('Received empty substutition for valid implication.') +266 if result.predicate is None: +267 raise ValueError('Received empty predicate for valid implication.') +268 ml_subst = self.kore_to_kast(result.substitution) +269 ml_pred = self.kore_to_kast(result.predicate) +270 ml_subst_pred = mlAnd(flatten_label('#And', ml_subst) + flatten_label('#And', ml_pred)) +271 csubst = CSubst.from_pred(ml_subst_pred) +272 return CTermImplies(csubst, (), None, result.logs)
+ +273 +
+[docs] +274 def assume_defined(self, cterm: CTerm, module_name: str | None = None) -> CTerm: +275 _LOGGER.debug(f'Computing definedness condition for: {cterm}') +276 cterm_simplified, logs = self.simplify(cterm, module_name=module_name) +277 kast = KApply(KLabel('#Ceil', [GENERATED_TOP_CELL, GENERATED_TOP_CELL]), [cterm_simplified.config]) +278 kast_simplified, logs = self.kast_simplify(kast, module_name=module_name) +279 _LOGGER.debug(f'Definedness condition computed: {kast_simplified}') +280 return cterm.add_constraint(kast_simplified)
+ +281 +282 def _smt_solver_error(self, err: SmtSolverError) -> CTermSMTError: +283 kast = self.kore_to_kast(err.pattern) +284 pretty_pattern = PrettyPrinter(self._definition).print(kast) +285 return CTermSMTError(pretty_pattern)
+ +286 +287 +
+[docs] +288@contextmanager +289def cterm_symbolic( +290 definition: KDefinition, +291 definition_dir: Path, +292 *, +293 id: str | None = None, +294 port: int | None = None, +295 kore_rpc_command: str | Iterable[str] | None = None, +296 llvm_definition_dir: Path | None = None, +297 smt_timeout: int | None = None, +298 smt_retry_limit: int | None = None, +299 smt_tactic: str | None = None, +300 bug_report: BugReport | None = None, +301 haskell_log_format: KoreExecLogFormat = KoreExecLogFormat.ONELINE, +302 haskell_log_entries: Iterable[str] = (), +303 log_axioms_file: Path | None = None, +304 log_succ_rewrites: bool = True, +305 log_fail_rewrites: bool = False, +306 start_server: bool = True, +307 maude_port: int | None = None, +308 fallback_on: Iterable[FallbackReason] | None = None, +309 interim_simplification: int | None = None, +310 no_post_exec_simplify: bool = False, +311) -> Iterator[CTermSymbolic]: +312 if start_server: +313 # Old way of handling KoreServer, to be removed +314 with kore_server( +315 definition_dir=definition_dir, +316 llvm_definition_dir=llvm_definition_dir, +317 module_name=definition.main_module_name, +318 port=port, +319 command=kore_rpc_command, +320 bug_report=bug_report, +321 smt_timeout=smt_timeout, +322 smt_retry_limit=smt_retry_limit, +323 smt_tactic=smt_tactic, +324 haskell_log_format=haskell_log_format, +325 haskell_log_entries=haskell_log_entries, +326 log_axioms_file=log_axioms_file, +327 fallback_on=fallback_on, +328 interim_simplification=interim_simplification, +329 no_post_exec_simplify=no_post_exec_simplify, +330 ) as server: +331 with KoreClient('localhost', server.port, bug_report=bug_report, bug_report_id=id) as client: +332 yield CTermSymbolic( +333 client, definition, log_succ_rewrites=log_succ_rewrites, log_fail_rewrites=log_fail_rewrites +334 ) +335 else: +336 if port is None: +337 raise ValueError('Missing port with start_server=False') +338 if maude_port is None: +339 dispatch = None +340 else: +341 dispatch = { +342 'execute': [('localhost', maude_port, TransportType.HTTP)], +343 'simplify': [('localhost', maude_port, TransportType.HTTP)], +344 'add-module': [ +345 ('localhost', maude_port, TransportType.HTTP), +346 ('localhost', port, TransportType.SINGLE_SOCKET), +347 ], +348 } +349 with KoreClient('localhost', port, bug_report=bug_report, bug_report_id=id, dispatch=dispatch) as client: +350 yield CTermSymbolic( +351 client, definition, log_succ_rewrites=log_succ_rewrites, log_fail_rewrites=log_fail_rewrites +352 )
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/dequote.html b/pyk/_modules/pyk/dequote.html new file mode 100644 index 00000000000..df1590a7f55 --- /dev/null +++ b/pyk/_modules/pyk/dequote.html @@ -0,0 +1,248 @@ + + + + + + + + pyk.dequote — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.dequote

+  1from __future__ import annotations
+  2
+  3from typing import TYPE_CHECKING
+  4
+  5if TYPE_CHECKING:
+  6    from collections.abc import Iterable, Iterator
+  7    from typing import Final
+  8
+  9
+
+[docs] + 10def enquote_string(s: str) -> str: + 11 return ''.join(enquoted(s))
+ + 12 + 13 +
+[docs] + 14def dequote_string(s: str) -> str: + 15 return ''.join(dequoted(s))
+ + 16 + 17 +
+[docs] + 18def enquote_bytes(s: str) -> str: + 19 return ''.join(enquoted(s, allow_unicode=False))
+ + 20 + 21 +
+[docs] + 22def dequote_bytes(s: str) -> str: + 23 return ''.join(dequoted(s, allow_unicode=False))
+ + 24 + 25 +
+[docs] + 26def bytes_encode(s: str) -> bytes: + 27 return s.encode('latin-1')
+ + 28 + 29 +
+[docs] + 30def bytes_decode(b: bytes) -> str: + 31 return b.decode('latin-1')
+ + 32 + 33 + 34NORMAL = 1 + 35ESCAPE = 2 + 36CPOINT = 3 + 37 + 38ESCAPE_TABLE: Final = { + 39 '"': '"', + 40 '\\': '\\', + 41 'n': '\n', + 42 't': '\t', + 43 'r': '\r', + 44 'f': '\f', + 45} + 46 + 47CPOINT_TABLE: Final = { + 48 'x': 2, + 49 'u': 4, + 50 'U': 8, + 51} + 52 + 53HEX_TABLE = {c: int(c, 16) for c in '0123456789abcdefABCDEF'} + 54 + 55 +
+[docs] + 56def dequoted(it: Iterable[str], *, allow_unicode: bool = True) -> Iterator[str]: + 57 acc = 0 + 58 cnt = 0 + 59 state = NORMAL + 60 for c in it: + 61 if state == CPOINT: + 62 if c not in HEX_TABLE: + 63 raise ValueError(f'Expected hex digit, got: {c}') + 64 + 65 acc *= 16 + 66 acc += HEX_TABLE[c] + 67 cnt -= 1 + 68 if cnt == 0: + 69 yield chr(acc) + 70 acc = 0 + 71 state = NORMAL + 72 + 73 elif state == ESCAPE: + 74 if c in CPOINT_TABLE: + 75 if not allow_unicode and c != 'x': + 76 raise ValueError(fr'Unicode escape sequence not allowed: \{c}') + 77 cnt = CPOINT_TABLE[c] + 78 state = CPOINT + 79 elif c in ESCAPE_TABLE: + 80 yield ESCAPE_TABLE[c] + 81 state = NORMAL + 82 else: + 83 raise ValueError(fr'Unexpected escape sequence: \{c}') + 84 + 85 elif c == '\\': + 86 state = ESCAPE + 87 + 88 else: + 89 yield c + 90 + 91 if state == CPOINT: + 92 raise ValueError('Incomplete Unicode code point') + 93 elif state == ESCAPE: + 94 raise ValueError('Incomplete escape sequence')
+ + 95 + 96 + 97ENQUOTE_TABLE: Final = { + 98 ord('\t'): r'\t', # 9 + 99 ord('\n'): r'\n', # 10 +100 ord('\f'): r'\f', # 12 +101 ord('\r'): r'\r', # 13 +102 ord('"'): r'\"', # 34 +103 ord('\\'): r'\\', # 92 +104} +105 +106 +
+[docs] +107def enquoted(it: Iterable[str], *, allow_unicode: bool = True) -> Iterator[str]: +108 for c in it: +109 code = ord(c) +110 if code in ENQUOTE_TABLE: +111 yield ENQUOTE_TABLE[code] +112 elif 32 <= code < 127: +113 yield c +114 elif code <= 0xFF: +115 yield fr'\x{code:02x}' +116 elif not allow_unicode: +117 raise ValueError(f"Unicode character not allowed: '{c}' ({code})") +118 elif code <= 0xFFFF: +119 yield fr'\u{code:04x}' +120 elif code <= 0xFFFFFFFF: +121 yield fr'\U{code:08x}' +122 else: +123 raise ValueError(f"Unsupported character: '{c}' ({code})")
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kast/att.html b/pyk/_modules/pyk/kast/att.html new file mode 100644 index 00000000000..c0964dcc5a8 --- /dev/null +++ b/pyk/_modules/pyk/kast/att.html @@ -0,0 +1,789 @@ + + + + + + + + pyk.kast.att — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kast.att

+  1from __future__ import annotations
+  2
+  3import logging
+  4import re
+  5from abc import ABC, abstractmethod
+  6from collections.abc import Mapping
+  7from dataclasses import dataclass, field
+  8from functools import cache
+  9from itertools import chain
+ 10from pathlib import Path
+ 11from typing import ClassVar  # noqa: TC003
+ 12from typing import TYPE_CHECKING, Any, Generic, TypeVar, final, overload
+ 13
+ 14from ..utils import FrozenDict
+ 15from .color import Color
+ 16from .kast import KAst
+ 17
+ 18if TYPE_CHECKING:
+ 19    from collections.abc import Callable, Container, Iterable, Iterator
+ 20    from typing import Final
+ 21
+ 22    U = TypeVar('U')
+ 23    W = TypeVar('W', bound='WithKAtt')
+ 24
+ 25
+ 26T = TypeVar('T')
+ 27_LOGGER: Final = logging.getLogger(__name__)
+ 28
+ 29
+
+[docs] + 30class AttType(Generic[T], ABC): +
+[docs] + 31 @abstractmethod + 32 def from_dict(self, obj: Any) -> T: ...
+ + 33 +
+[docs] + 34 @abstractmethod + 35 def to_dict(self, value: T) -> Any: ...
+ + 36 +
+[docs] + 37 @abstractmethod + 38 def unparse(self, value: T) -> str | None: ...
+ + 39 +
+[docs] + 40 @abstractmethod + 41 def parse(self, text: str) -> T: ...
+
+ + 42 + 43 +
+[docs] + 44class NoneType(AttType[None]): +
+[docs] + 45 def from_dict(self, obj: Any) -> None: + 46 assert obj == '' + 47 return None
+ + 48 +
+[docs] + 49 def to_dict(self, value: None) -> Any: + 50 assert value is None + 51 return ''
+ + 52 +
+[docs] + 53 def unparse(self, value: None) -> None: + 54 return None
+ + 55 +
+[docs] + 56 def parse(self, text: str) -> None: + 57 assert text == '' + 58 return None
+
+ + 59 + 60 +
+[docs] + 61class OptionalType(Generic[T], AttType[T | None]): + 62 _value_type: AttType[T] + 63 + 64 def __init__(self, value_type: AttType[T]): + 65 self._value_type = value_type + 66 +
+[docs] + 67 def from_dict(self, obj: Any) -> T | None: + 68 if obj == '': + 69 return None + 70 return self._value_type.from_dict(obj)
+ + 71 +
+[docs] + 72 def to_dict(self, value: T | None) -> Any: + 73 if value is None: + 74 return '' + 75 return self._value_type.to_dict(value)
+ + 76 +
+[docs] + 77 def unparse(self, value: T | None) -> str | None: + 78 if value is None: + 79 return None + 80 return self._value_type.unparse(value)
+ + 81 +
+[docs] + 82 def parse(self, text: str) -> T | None: + 83 if text == '': + 84 return None + 85 return self._value_type.parse(text)
+
+ + 86 + 87 +
+[docs] + 88class AnyType(AttType[Any]): +
+[docs] + 89 def from_dict(self, obj: Any) -> Any: + 90 return self._freeze(obj)
+ + 91 +
+[docs] + 92 def to_dict(self, value: Any) -> Any: + 93 return self._unfreeze(value)
+ + 94 +
+[docs] + 95 def unparse(self, value: Any) -> str: + 96 return str(value)
+ + 97 +
+[docs] + 98 def parse(self, text: str) -> Any: + 99 raise ValueError(f'Parsing a string into an Any attribute type is not supported. Attempted to parse: {text!r}')
+ +100 +101 @staticmethod +102 def _freeze(obj: Any) -> Any: +103 if isinstance(obj, list): +104 return tuple(AnyType._freeze(v) for v in obj) +105 if isinstance(obj, dict): +106 return FrozenDict((k, AnyType._freeze(v)) for (k, v) in obj.items()) +107 return obj +108 +109 @staticmethod +110 def _unfreeze(value: Any) -> Any: +111 if isinstance(value, tuple): +112 return [AnyType._unfreeze(v) for v in value] +113 if isinstance(value, FrozenDict): +114 return {k: AnyType._unfreeze(v) for (k, v) in value.items()} +115 return value
+ +116 +117 +
+[docs] +118class IntType(AttType[int]): +
+[docs] +119 def from_dict(self, obj: Any) -> int: +120 assert isinstance(obj, str) +121 return int(obj)
+ +122 +
+[docs] +123 def to_dict(self, value: int) -> str: +124 return str(value)
+ +125 +
+[docs] +126 def unparse(self, value: int) -> str: +127 return str(value)
+ +128 +
+[docs] +129 def parse(self, text: str) -> int: +130 return int(text)
+
+ +131 +132 +
+[docs] +133class StrType(AttType[str]): +
+[docs] +134 def from_dict(self, obj: Any) -> str: +135 assert isinstance(obj, str) +136 return obj
+ +137 +
+[docs] +138 def to_dict(self, value: str) -> Any: +139 return value
+ +140 +
+[docs] +141 def unparse(self, value: str) -> str: +142 return f'"{value}"'
+ +143 +
+[docs] +144 def parse(self, text: str) -> str: +145 return text
+
+ +146 +147 +
+[docs] +148class LocationType(AttType[tuple[int, int, int, int]]): +149 _PARSE_REGEX: Final = re.compile('(\\d+),(\\d+),(\\d+),(\\d+)') +150 +
+[docs] +151 def from_dict(self, obj: Any) -> tuple[int, int, int, int]: +152 assert isinstance(obj, list) +153 a, b, c, d = obj +154 assert isinstance(a, int) +155 assert isinstance(b, int) +156 assert isinstance(c, int) +157 assert isinstance(d, int) +158 return a, b, c, d
+ +159 +
+[docs] +160 def to_dict(self, value: tuple[int, int, int, int]) -> Any: +161 return list(value)
+ +162 +
+[docs] +163 def unparse(self, value: tuple[int, int, int, int]) -> str: +164 return ','.join(str(e) for e in value)
+ +165 +
+[docs] +166 def parse(self, text: str) -> tuple[int, int, int, int]: +167 m = self._PARSE_REGEX.fullmatch(text) +168 assert m is not None +169 a, b, c, d = (int(x) for x in m.groups()) +170 return a, b, c, d
+
+ +171 +172 +
+[docs] +173class PathType(AttType[Path]): +
+[docs] +174 def from_dict(self, obj: Any) -> Path: +175 assert isinstance(obj, str) +176 return Path(obj)
+ +177 +
+[docs] +178 def to_dict(self, value: Path) -> Any: +179 return str(value)
+ +180 +
+[docs] +181 def unparse(self, value: Path) -> str: +182 return f'"{value}"'
+ +183 +
+[docs] +184 def parse(self, text: str) -> Path: +185 return Path(text)
+
+ +186 +187 +
+[docs] +188@final +189@dataclass(frozen=True) +190class Format: +191 tokens: tuple[str, ...] +192 +193 _pattern: ClassVar[re.Pattern] = re.compile(r'%\D|%\d+|[^%]+') +194 +195 def __init__(self, tokens: Iterable[str] = ()): +196 object.__setattr__(self, 'tokens', tuple(tokens)) +197 +
+[docs] +198 @classmethod +199 def parse(cls, s: str) -> Format: +200 matches = list(cls._pattern.finditer(s)) +201 +202 matched_len: int +203 if not matches: +204 matched_len = 0 +205 else: +206 _, matched_len = matches[-1].span() +207 +208 if matched_len != len(s): +209 assert s and s[-1] == '%' +210 raise ValueError(f'Incomplete escape sequence at the end of format string: {s}') +211 +212 return Format(m[0] for m in matches)
+ +213 +
+[docs] +214 def unparse(self) -> str: +215 return ''.join(self.tokens)
+
+ +216 +217 +
+[docs] +218class FormatType(AttType[Format]): +
+[docs] +219 def from_dict(self, obj: Any) -> Format: +220 assert isinstance(obj, str) +221 return Format.parse(obj)
+ +222 +
+[docs] +223 def to_dict(self, value: Format) -> Any: +224 return value.unparse()
+ +225 +
+[docs] +226 def unparse(self, value: Format) -> str: +227 return f'"{value.unparse}"'
+ +228 +
+[docs] +229 def parse(self, text: str) -> Format: +230 return Format.parse(text)
+
+ +231 +232 +
+[docs] +233class ColorType(AttType[Color]): +
+[docs] +234 def from_dict(self, obj: Any) -> Color: +235 assert isinstance(obj, str) +236 return Color(obj)
+ +237 +
+[docs] +238 def to_dict(self, value: Color) -> str: +239 return value.value
+ +240 +
+[docs] +241 def unparse(self, value: Color) -> str: +242 return value.value
+ +243 +
+[docs] +244 def parse(self, text: str) -> Color: +245 return Color(text)
+
+ +246 +247 +
+[docs] +248class ColorsType(AttType[tuple[Color, ...]]): +
+[docs] +249 def from_dict(self, obj: Any) -> tuple[Color, ...]: +250 assert isinstance(obj, str) +251 return self.parse(obj)
+ +252 +
+[docs] +253 def to_dict(self, value: tuple[Color, ...]) -> str: +254 return self.unparse(value)
+ +255 +
+[docs] +256 def unparse(self, value: tuple[Color, ...]) -> str: +257 return ','.join(v.value for v in value)
+ +258 +
+[docs] +259 def parse(self, text: str) -> tuple[Color, ...]: +260 return tuple(Color(color) for color in text.replace(' ', '').split(','))
+
+ +261 +262 +263_NONE: Final = NoneType() +264_ANY: Final = AnyType() +265_INT: Final = IntType() +266_STR: Final = StrType() +267_LOCATION: Final = LocationType() +268_PATH: Final = PathType() +269 +270 +
+[docs] +271@final +272@dataclass(frozen=True) +273class AttKey(Generic[T]): +274 name: str +275 type: AttType[T] = field(compare=False, repr=False, kw_only=True) +276 +277 def __call__(self, value: T) -> AttEntry[T]: +278 return AttEntry(self, value)
+ +279 +280 +
+[docs] +281@final +282@dataclass(frozen=True) +283class AttEntry(Generic[T]): +284 key: AttKey[T] +285 value: T
+ +286 +287 +
+[docs] +288class Atts: +289 ALIAS: Final = AttKey('alias', type=_NONE) +290 ALIAS_REC: Final = AttKey('alias-rec', type=_NONE) +291 ANYWHERE: Final = AttKey('anywhere', type=_NONE) +292 ASSOC: Final = AttKey('assoc', type=_NONE) +293 AVOID: Final = AttKey('avoid', type=_NONE) +294 BRACKET: Final = AttKey('bracket', type=_NONE) +295 BRACKET_LABEL: Final = AttKey('bracketLabel', type=_ANY) +296 CIRCULARITY: Final = AttKey('circularity', type=_NONE) +297 CELL: Final = AttKey('cell', type=_NONE) +298 CELL_COLLECTION: Final = AttKey('cellCollection', type=_NONE) +299 CELL_FRAGMENT: Final = AttKey('cellFragment', type=_ANY) +300 CELL_NAME: Final = AttKey('cellName', type=_STR) +301 CELL_OPT_ABSENT: Final = AttKey('cellOptAbsent', type=_ANY) +302 COLOR: Final = AttKey('color', type=ColorType()) +303 COLORS: Final = AttKey('colors', type=ColorsType()) +304 COMM: Final = AttKey('comm', type=_NONE) +305 CONCAT: Final = AttKey('concat', type=_ANY) +306 CONCRETE: Final = AttKey('concrete', type=OptionalType(_STR)) +307 CONSTRUCTOR: Final = AttKey('constructor', type=_NONE) +308 DEPENDS: Final = AttKey('depends', type=_ANY) +309 DIGEST: Final = AttKey('digest', type=_ANY) +310 ELEMENT: Final = AttKey('element', type=_ANY) +311 EXIT: Final = AttKey('exit', type=_ANY) +312 FORMAT: Final = AttKey('format', type=FormatType()) +313 FRESH_GENERATOR: Final = AttKey('freshGenerator', type=_NONE) +314 FUNCTION: Final = AttKey('function', type=_NONE) +315 FUNCTIONAL: Final = AttKey('functional', type=_NONE) +316 GROUP: Final = AttKey('group', type=_STR) +317 HAS_DOMAIN_VALUES: Final = AttKey('hasDomainValues', type=_NONE) +318 HOOK: Final = AttKey('hook', type=_ANY) +319 IDEM: Final = AttKey('idem', type=_NONE) +320 IMPURE: Final = AttKey('impure', type=_NONE) +321 INDEX: Final = AttKey('index', type=_INT) +322 INITIALIZER: Final = AttKey('initializer', type=_NONE) +323 INJECTIVE: Final = AttKey('injective', type=_NONE) +324 LABEL: Final = AttKey('label', type=_ANY) +325 LEFT: Final = AttKey('left', type=_ANY) # LEFT and LEFT_INTERNAL on the Frontend +326 LOCATION: Final = AttKey('org.kframework.attributes.Location', type=_LOCATION) +327 MACRO: Final = AttKey('macro', type=_NONE) +328 MACRO_REC: Final = AttKey('macro-rec', type=_NONE) +329 MAINCELL: Final = AttKey('maincell', type=_NONE) +330 MULTIPLICITY: Final = AttKey('multiplicity', type=_ANY) +331 NO_EVALUATORS: Final = AttKey('no-evaluators', type=_NONE) +332 OVERLOAD: Final = AttKey('overload', type=_STR) +333 OWISE: Final = AttKey('owise', type=_NONE) +334 PREDICATE: Final = AttKey('predicate', type=_ANY) +335 PREFER: Final = AttKey('prefer', type=_NONE) +336 PRIORITY: Final = AttKey('priority', type=_ANY) +337 PRIORITIES: Final = AttKey('priorities', type=_ANY) # only in KORE output +338 PRIVATE: Final = AttKey('private', type=_NONE) +339 PRODUCTION: Final = AttKey('org.kframework.definition.Production', type=_ANY) +340 PROJECTION: Final = AttKey('projection', type=_NONE) +341 RIGHT: Final = AttKey('right', type=_ANY) # RIGHT and RIGHT_INTERNAL on the Frontend +342 RETURNS_UNIT: Final = AttKey('returnsUnit', type=_NONE) +343 SIMPLIFICATION: Final = AttKey('simplification', type=_ANY) +344 SEQSTRICT: Final = AttKey('seqstrict', type=_ANY) +345 SORT: Final = AttKey('org.kframework.kore.Sort', type=_ANY) +346 SOURCE: Final = AttKey('org.kframework.attributes.Source', type=_PATH) +347 SMTLEMMA: Final = AttKey('smt-lemma', type=_NONE) +348 STRICT: Final = AttKey('strict', type=_ANY) +349 SYMBOL: Final = AttKey('symbol', type=_STR) +350 SYNTAX_MODULE: Final = AttKey('syntaxModule', type=_STR) +351 SYMBOLIC: Final = AttKey('symbolic', type=OptionalType(_STR)) +352 TERMINALS: Final = AttKey('terminals', type=_STR) +353 TERMINATOR_SYMBOL: Final = AttKey('terminator-symbol', type=_ANY) +354 TOKEN: Final = AttKey('token', type=_NONE) +355 TOTAL: Final = AttKey('total', type=_NONE) +356 TRUSTED: Final = AttKey('trusted', type=_NONE) +357 TYPE: Final = AttKey('type', type=_ANY) +358 UNIT: Final = AttKey('unit', type=_STR) +359 UNIQUE_ID: Final = AttKey('UNIQUE_ID', type=_ANY) +360 UNPARSE_AVOID: Final = AttKey('unparseAvoid', type=_NONE) +361 UPDATE: Final = AttKey('update', type=_ANY) +362 USER_LIST: Final = AttKey('userList', type=_ANY) +363 WRAP_ELEMENT: Final = AttKey('wrapElement', type=_ANY) +364 +
+[docs] +365 @classmethod +366 @cache +367 def keys(cls) -> FrozenDict[str, AttKey]: +368 keys = [value for value in vars(cls).values() if isinstance(value, AttKey)] +369 res: FrozenDict[str, AttKey] = FrozenDict({key.name: key for key in keys}) +370 assert len(res) == len(keys) # Fails on duplicate key name +371 return res
+
+ +372 +373 +
+[docs] +374@final +375@dataclass(frozen=True) +376class KAtt(KAst, Mapping[AttKey, Any]): +377 atts: FrozenDict[AttKey, Any] +378 +379 def __init__(self, entries: Iterable[AttEntry] = ()): +380 atts: FrozenDict[AttKey, Any] = FrozenDict((e.key, e.value) for e in entries) +381 object.__setattr__(self, 'atts', atts) +382 +383 def __iter__(self) -> Iterator[AttKey]: +384 return iter(self.atts) +385 +386 def __len__(self) -> int: +387 return len(self.atts) +388 +389 def __getitem__(self, key: AttKey[T]) -> T: +390 return self.atts[key] +391 +392 @overload +393 def get(self, key: AttKey[T], /) -> T | None: ... +394 +395 @overload +396 def get(self, key: AttKey[T], /, default: U) -> T | U: ... +397 +
+[docs] +398 def get(self, *args: Any, **kwargs: Any) -> Any: +399 return self.atts.get(*args, **kwargs)
+ +400 +
+[docs] +401 def entries(self) -> Iterator[AttEntry]: +402 return (key(value) for key, value in self.atts.items())
+ +403 +
+[docs] +404 @classmethod +405 def from_dict(cls: type[KAtt], d: Mapping[str, Any]) -> KAtt: +406 entries: list[AttEntry] = [] +407 for k, v in d['att'].items(): +408 key = Atts.keys().get(k, AttKey(k, type=_ANY)) +409 value = key.type.from_dict(v) +410 entries.append(key(value)) +411 return KAtt(entries=entries)
+ +412 +
+[docs] +413 def to_dict(self) -> dict[str, Any]: +414 return {'node': 'KAtt', 'att': {key.name: key.type.to_dict(value) for key, value in self.atts.items()}}
+ +415 +
+[docs] +416 @classmethod +417 def parse(cls: type[KAtt], d: Mapping[str, str]) -> KAtt: +418 entries: list[AttEntry] = [] +419 for k, v in d.items(): +420 key = Atts.keys().get(k, AttKey(k, type=_ANY)) +421 value = key.type.parse(v) +422 entries.append(key(value)) +423 return KAtt(entries=entries)
+ +424 +425 @property +426 def pretty(self) -> str: +427 if not self: +428 return '' +429 att_strs: list[str] = [] +430 for key, value in self.items(): +431 value_str = key.type.unparse(value) +432 if value_str is None: +433 att_strs.append(key.name) +434 else: +435 att_strs.append(f'{key.name}({value_str})') +436 return f'[{", ".join(att_strs)}]' +437 +
+[docs] +438 def update(self, entries: Iterable[AttEntry]) -> KAtt: +439 entries = chain((AttEntry(key, value) for key, value in self.atts.items()), entries) +440 return KAtt(entries=entries)
+ +441 +
+[docs] +442 def discard(self, keys: Container[AttKey]) -> KAtt: +443 entries = (AttEntry(key, value) for key, value in self.atts.items() if key not in keys) +444 return KAtt(entries=entries)
+ +445 +
+[docs] +446 def drop_source(self) -> KAtt: +447 return self.discard([Atts.SOURCE, Atts.LOCATION])
+
+ +448 +449 +450EMPTY_ATT: Final = KAtt() +451 +452 +
+[docs] +453class WithKAtt(ABC): +454 att: KAtt +455 +
+[docs] +456 @abstractmethod +457 def let_att(self: W, att: KAtt) -> W: ...
+ +458 +
+[docs] +459 def map_att(self: W, f: Callable[[KAtt], KAtt]) -> W: +460 return self.let_att(att=f(self.att))
+ +461 +
+[docs] +462 def update_atts(self: W, entries: Iterable[AttEntry]) -> W: +463 return self.let_att(att=self.att.update(entries))
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kast/color.html b/pyk/_modules/pyk/kast/color.html new file mode 100644 index 00000000000..0fa9021aee2 --- /dev/null +++ b/pyk/_modules/pyk/kast/color.html @@ -0,0 +1,505 @@ + + + + + + + + pyk.kast.color — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kast.color

+  1from __future__ import annotations
+  2
+  3import sys
+  4from enum import Enum
+  5from typing import TYPE_CHECKING
+  6
+  7if TYPE_CHECKING:
+  8    from typing import IO, Final
+  9
+ 10
+
+[docs] + 11class Color(Enum): + 12 ALICE_BLUE = 'AliceBlue' + 13 ANTIQUE_WHITE = 'AntiqueWhite' + 14 APRICOT = 'Apricot' + 15 AQUA = 'Aqua' + 16 AQUAMARINE = 'Aquamarine' + 17 AZURE = 'Azure' + 18 BEIGE = 'Beige' + 19 BISQUE = 'Bisque' + 20 BITTERSWEET = 'Bittersweet' + 21 BLACK = 'black' + 22 BLANCHED_ALMOND = 'BlanchedAlmond' + 23 BLUE = 'blue' + 24 BLUE_GREEN = 'BlueGreen' + 25 BLUE_VIOLET = 'BlueViolet' + 26 BRICK_RED = 'BrickRed' + 27 BROWN = 'brown' + 28 BURLY_WOOD = 'BurlyWood' + 29 BURNT_ORANGE = 'BurntOrange' + 30 CADET_BLUE = 'CadetBlue' + 31 CARNATION_PINK = 'CarnationPink' + 32 CERULEAN = 'Cerulean' + 33 CHARTREUSE = 'Chartreuse' + 34 CHOCOLATE = 'Chocolate' + 35 CORAL = 'Coral' + 36 CORNFLOWER_BLUE = 'CornflowerBlue' + 37 CORNSILK = 'Cornsilk' + 38 CRIMSON = 'Crimson' + 39 CYAN = 'cyan' + 40 DANDELION = 'Dandelion' + 41 DARKGRAY = 'darkgray' + 42 DARK_BLUE = 'DarkBlue' + 43 DARK_CYAN = 'DarkCyan' + 44 DARK_GOLDENROD = 'DarkGoldenrod' + 45 DARK_GRAY = 'DarkGray' + 46 DARK_GREEN = 'DarkGreen' + 47 DARK_GREY = 'DarkGrey' + 48 DARK_KHAKI = 'DarkKhaki' + 49 DARK_MAGENTA = 'DarkMagenta' + 50 DARK_OLIVE_GREEN = 'DarkOliveGreen' + 51 DARK_ORANGE = 'DarkOrange' + 52 DARK_ORCHID = 'DarkOrchid' + 53 DARK_RED = 'DarkRed' + 54 DARK_SALMON = 'DarkSalmon' + 55 DARK_SEA_GREEN = 'DarkSeaGreen' + 56 DARK_SLATE_BLUE = 'DarkSlateBlue' + 57 DARK_SLATE_GRAY = 'DarkSlateGray' + 58 DARK_SLATE_GREY = 'DarkSlateGrey' + 59 DARK_TURQUOISE = 'DarkTurquoise' + 60 DARK_VIOLET = 'DarkViolet' + 61 DEEP_PINK = 'DeepPink' + 62 DEEP_SKY_BLUE = 'DeepSkyBlue' + 63 DIM_GRAY = 'DimGray' + 64 DIM_GREY = 'DimGrey' + 65 DODGER_BLUE = 'DodgerBlue' + 66 EMERALD = 'Emerald' + 67 FIRE_BRICK = 'FireBrick' + 68 FLORAL_WHITE = 'FloralWhite' + 69 FOREST_GREEN = 'ForestGreen' + 70 FUCHSIA = 'Fuchsia' + 71 GAINSBORO = 'Gainsboro' + 72 GHOST_WHITE = 'GhostWhite' + 73 GOLD = 'Gold' + 74 GOLDENROD = 'Goldenrod' + 75 GRAY = 'gray' + 76 GREEN = 'green' + 77 GREEN_YELLOW = 'GreenYellow' + 78 GREY = 'Grey' + 79 HONEYDEW = 'Honeydew' + 80 HOT_PINK = 'HotPink' + 81 INDIAN_RED = 'IndianRed' + 82 INDIGO = 'Indigo' + 83 IVORY = 'Ivory' + 84 JUNGLE_GREEN = 'JungleGreen' + 85 KHAKI = 'Khaki' + 86 LAVENDER = 'Lavender' + 87 LAVENDER_BLUSH = 'LavenderBlush' + 88 LAWN_GREEN = 'LawnGreen' + 89 LEMON_CHIFFON = 'LemonChiffon' + 90 LIGHTGRAY = 'lightgray' + 91 LIGHT_BLUE = 'LightBlue' + 92 LIGHT_CORAL = 'LightCoral' + 93 LIGHT_CYAN = 'LightCyan' + 94 LIGHT_GOLDENROD = 'LightGoldenrod' + 95 LIGHT_GOLDENROD_YELLOW = 'LightGoldenrodYellow' + 96 LIGHT_GRAY = 'LightGray' + 97 LIGHT_GREEN = 'LightGreen' + 98 LIGHT_GREY = 'LightGrey' + 99 LIGHT_PINK = 'LightPink' +100 LIGHT_SALMON = 'LightSalmon' +101 LIGHT_SEA_GREEN = 'LightSeaGreen' +102 LIGHT_SKY_BLUE = 'LightSkyBlue' +103 LIGHT_SLATE_BLUE = 'LightSlateBlue' +104 LIGHT_SLATE_GRAY = 'LightSlateGray' +105 LIGHT_SLATE_GREY = 'LightSlateGrey' +106 LIGHT_STEEL_BLUE = 'LightSteelBlue' +107 LIGHT_YELLOW = 'LightYellow' +108 LIME = 'lime' +109 LIME_GREEN = 'LimeGreen' +110 LINEN = 'Linen' +111 MAGENTA = 'magenta' +112 MAHOGANY = 'Mahogany' +113 MAROON = 'Maroon' +114 MEDIUM_AQUAMARINE = 'MediumAquamarine' +115 MEDIUM_BLUE = 'MediumBlue' +116 MEDIUM_ORCHID = 'MediumOrchid' +117 MEDIUM_PURPLE = 'MediumPurple' +118 MEDIUM_SEA_GREEN = 'MediumSeaGreen' +119 MEDIUM_SLATE_BLUE = 'MediumSlateBlue' +120 MEDIUM_SPRING_GREEN = 'MediumSpringGreen' +121 MEDIUM_TURQUOISE = 'MediumTurquoise' +122 MEDIUM_VIOLET_RED = 'MediumVioletRed' +123 MELON = 'Melon' +124 MIDNIGHT_BLUE = 'MidnightBlue' +125 MINT_CREAM = 'MintCream' +126 MISTY_ROSE = 'MistyRose' +127 MOCCASIN = 'Moccasin' +128 MULBERRY = 'Mulberry' +129 NAVAJO_WHITE = 'NavajoWhite' +130 NAVY = 'Navy' +131 NAVY_BLUE = 'NavyBlue' +132 OLD_LACE = 'OldLace' +133 OLIVE = 'olive' +134 OLIVE_DRAB = 'OliveDrab' +135 OLIVE_GREEN = 'OliveGreen' +136 ORANGE = 'orange' +137 ORANGE_RED = 'OrangeRed' +138 ORCHID = 'Orchid' +139 PALE_GOLDENROD = 'PaleGoldenrod' +140 PALE_GREEN = 'PaleGreen' +141 PALE_TURQUOISE = 'PaleTurquoise' +142 PALE_VIOLET_RED = 'PaleVioletRed' +143 PAPAYA_WHIP = 'PapayaWhip' +144 PEACH = 'Peach' +145 PEACH_PUFF = 'PeachPuff' +146 PERIWINKLE = 'Periwinkle' +147 PERU = 'Peru' +148 PINE_GREEN = 'PineGreen' +149 PINK = 'pink' +150 PLUM = 'Plum' +151 POWDER_BLUE = 'PowderBlue' +152 PROCESS_BLUE = 'ProcessBlue' +153 PURPLE = 'purple' +154 RAW_SIENNA = 'RawSienna' +155 RED = 'red' +156 RED_ORANGE = 'RedOrange' +157 RED_VIOLET = 'RedViolet' +158 RHODAMINE = 'Rhodamine' +159 ROSY_BROWN = 'RosyBrown' +160 ROYAL_BLUE = 'RoyalBlue' +161 ROYAL_PURPLE = 'RoyalPurple' +162 RUBINE_RED = 'RubineRed' +163 SADDLE_BROWN = 'SaddleBrown' +164 SALMON = 'Salmon' +165 SANDY_BROWN = 'SandyBrown' +166 SEASHELL = 'Seashell' +167 SEA_GREEN = 'SeaGreen' +168 SEPIA = 'Sepia' +169 SIENNA = 'Sienna' +170 SILVER = 'Silver' +171 SKY_BLUE = 'SkyBlue' +172 SLATE_BLUE = 'SlateBlue' +173 SLATE_GRAY = 'SlateGray' +174 SLATE_GREY = 'SlateGrey' +175 SNOW = 'Snow' +176 SPRING_GREEN = 'SpringGreen' +177 STEEL_BLUE = 'SteelBlue' +178 TAN = 'Tan' +179 TEAL = 'teal' +180 TEAL_BLUE = 'TealBlue' +181 THISTLE = 'Thistle' +182 TOMATO = 'Tomato' +183 TURQUOISE = 'Turquoise' +184 VIOLET = 'violet' +185 VIOLET_RED = 'VioletRed' +186 WHEAT = 'Wheat' +187 WHITE = 'white' +188 WHITE_SMOKE = 'WhiteSmoke' +189 WILD_STRAWBERRY = 'WildStrawberry' +190 YELLOW = 'yellow' +191 YELLOW_GREEN = 'YellowGreen' +192 YELLOW_ORANGE = 'YellowOrange' +193 +194 @property +195 def ansi_code(self) -> str: +196 return f'\x1b[38;5;{_ansi_index[self]}m' +197 +
+[docs] +198 @staticmethod +199 def reset_code() -> str: +200 return '\x1b[0m'
+ +201 +
+[docs] +202 def set(self, *, file: IO[str] = sys.stdout) -> None: +203 print(self.ansi_code, end='', file=file, flush=True)
+ +204 +
+[docs] +205 @staticmethod +206 def reset(*, file: IO[str] = sys.stdout) -> None: +207 print(Color.reset_code(), end='', file=file, flush=True)
+
+ +208 +209 +210_ansi_index: Final = { +211 Color.ALICE_BLUE: 231, +212 Color.ANTIQUE_WHITE: 231, +213 Color.APRICOT: 216, +214 Color.AQUA: 51, +215 Color.AQUAMARINE: 122, +216 Color.AZURE: 231, +217 Color.BEIGE: 230, +218 Color.BISQUE: 223, +219 Color.BITTERSWEET: 130, +220 Color.BLACK: 16, +221 Color.BLANCHED_ALMOND: 223, +222 Color.BLUE: 21, +223 Color.BLUE_GREEN: 37, +224 Color.BLUE_VIOLET: 93, +225 Color.BRICK_RED: 124, +226 Color.BROWN: 137, +227 Color.BURLY_WOOD: 180, +228 Color.BURNT_ORANGE: 208, +229 Color.CADET_BLUE: 73, +230 Color.CARNATION_PINK: 211, +231 Color.CERULEAN: 39, +232 Color.CHARTREUSE: 118, +233 Color.CHOCOLATE: 166, +234 Color.CORAL: 209, +235 Color.CORNFLOWER_BLUE: 68, +236 Color.CORNSILK: 230, +237 Color.CRIMSON: 197, +238 Color.CYAN: 51, +239 Color.DANDELION: 214, +240 Color.DARKGRAY: 59, +241 Color.DARK_BLUE: 18, +242 Color.DARK_CYAN: 30, +243 Color.DARK_GOLDENROD: 136, +244 Color.DARK_GRAY: 145, +245 Color.DARK_GREEN: 22, +246 Color.DARK_GREY: 145, +247 Color.DARK_KHAKI: 143, +248 Color.DARK_MAGENTA: 90, +249 Color.DARK_OLIVE_GREEN: 58, +250 Color.DARK_ORANGE: 208, +251 Color.DARK_ORCHID: 128, +252 Color.DARK_RED: 88, +253 Color.DARK_SALMON: 173, +254 Color.DARK_SEA_GREEN: 108, +255 Color.DARK_SLATE_BLUE: 61, +256 Color.DARK_SLATE_GRAY: 23, +257 Color.DARK_SLATE_GREY: 23, +258 Color.DARK_TURQUOISE: 44, +259 Color.DARK_VIOLET: 92, +260 Color.DEEP_PINK: 198, +261 Color.DEEP_SKY_BLUE: 74, +262 Color.DIM_GRAY: 59, +263 Color.DIM_GREY: 59, +264 Color.DODGER_BLUE: 33, +265 Color.EMERALD: 37, +266 Color.FIRE_BRICK: 124, +267 Color.FLORAL_WHITE: 231, +268 Color.FOREST_GREEN: 28, +269 Color.FUCHSIA: 201, +270 Color.GAINSBORO: 188, +271 Color.GHOST_WHITE: 231, +272 Color.GOLD: 220, +273 Color.GOLDENROD: 178, +274 Color.GRAY: 102, +275 Color.GREEN: 46, +276 Color.GREEN_YELLOW: 154, +277 Color.GREY: 102, +278 Color.HONEYDEW: 231, +279 Color.HOT_PINK: 205, +280 Color.INDIAN_RED: 167, +281 Color.INDIGO: 54, +282 Color.IVORY: 231, +283 Color.JUNGLE_GREEN: 37, +284 Color.KHAKI: 186, +285 Color.LAVENDER: 189, +286 Color.LAVENDER_BLUSH: 231, +287 Color.LAWN_GREEN: 118, +288 Color.LEMON_CHIFFON: 230, +289 Color.LIGHTGRAY: 145, +290 Color.LIGHT_BLUE: 152, +291 Color.LIGHT_CORAL: 210, +292 Color.LIGHT_CYAN: 195, +293 Color.LIGHT_GOLDENROD: 186, +294 Color.LIGHT_GOLDENROD_YELLOW: 230, +295 Color.LIGHT_GRAY: 188, +296 Color.LIGHT_GREEN: 120, +297 Color.LIGHT_GREY: 188, +298 Color.LIGHT_PINK: 217, +299 Color.LIGHT_SALMON: 216, +300 Color.LIGHT_SEA_GREEN: 37, +301 Color.LIGHT_SKY_BLUE: 117, +302 Color.LIGHT_SLATE_BLUE: 99, +303 Color.LIGHT_SLATE_GRAY: 102, +304 Color.LIGHT_SLATE_GREY: 102, +305 Color.LIGHT_STEEL_BLUE: 153, +306 Color.LIGHT_YELLOW: 230, +307 Color.LIME: 154, +308 Color.LIME_GREEN: 40, +309 Color.LINEN: 231, +310 Color.MAGENTA: 201, +311 Color.MAHOGANY: 124, +312 Color.MAROON: 88, +313 Color.MEDIUM_AQUAMARINE: 79, +314 Color.MEDIUM_BLUE: 20, +315 Color.MEDIUM_ORCHID: 134, +316 Color.MEDIUM_PURPLE: 98, +317 Color.MEDIUM_SEA_GREEN: 35, +318 Color.MEDIUM_SLATE_BLUE: 99, +319 Color.MEDIUM_SPRING_GREEN: 49, +320 Color.MEDIUM_TURQUOISE: 44, +321 Color.MEDIUM_VIOLET_RED: 162, +322 Color.MELON: 216, +323 Color.MIDNIGHT_BLUE: 18, +324 Color.MINT_CREAM: 231, +325 Color.MISTY_ROSE: 224, +326 Color.MOCCASIN: 223, +327 Color.MULBERRY: 126, +328 Color.NAVAJO_WHITE: 223, +329 Color.NAVY: 18, +330 Color.NAVY_BLUE: 18, +331 Color.OLD_LACE: 231, +332 Color.OLIVE: 100, +333 Color.OLIVE_DRAB: 64, +334 Color.OLIVE_GREEN: 28, +335 Color.ORANGE: 220, +336 Color.ORANGE_RED: 202, +337 Color.ORCHID: 170, +338 Color.PALE_GOLDENROD: 187, +339 Color.PALE_GREEN: 120, +340 Color.PALE_TURQUOISE: 159, +341 Color.PALE_VIOLET_RED: 168, +342 Color.PAPAYA_WHIP: 230, +343 Color.PEACH: 209, +344 Color.PEACH_PUFF: 223, +345 Color.PERIWINKLE: 104, +346 Color.PERU: 173, +347 Color.PINE_GREEN: 29, +348 Color.PINK: 217, +349 Color.PLUM: 182, +350 Color.POWDER_BLUE: 152, +351 Color.PROCESS_BLUE: 39, +352 Color.PURPLE: 161, +353 Color.RAW_SIENNA: 124, +354 Color.RED: 196, +355 Color.RED_ORANGE: 202, +356 Color.RED_VIOLET: 125, +357 Color.RHODAMINE: 205, +358 Color.ROSY_BROWN: 138, +359 Color.ROYAL_BLUE: 62, +360 Color.ROYAL_PURPLE: 61, +361 Color.RUBINE_RED: 198, +362 Color.SADDLE_BROWN: 94, +363 Color.SALMON: 210, +364 Color.SANDY_BROWN: 215, +365 Color.SEASHELL: 231, +366 Color.SEA_GREEN: 29, +367 Color.SEPIA: 52, +368 Color.SIENNA: 130, +369 Color.SILVER: 145, +370 Color.SKY_BLUE: 117, +371 Color.SLATE_BLUE: 62, +372 Color.SLATE_GRAY: 102, +373 Color.SLATE_GREY: 102, +374 Color.SNOW: 231, +375 Color.SPRING_GREEN: 48, +376 Color.STEEL_BLUE: 67, +377 Color.TAN: 180, +378 Color.TEAL: 30, +379 Color.TEAL_BLUE: 37, +380 Color.THISTLE: 182, +381 Color.TOMATO: 203, +382 Color.TURQUOISE: 80, +383 Color.VIOLET: 90, +384 Color.VIOLET_RED: 162, +385 Color.WHEAT: 223, +386 Color.WHITE: 231, +387 Color.WHITE_SMOKE: 231, +388 Color.WILD_STRAWBERRY: 197, +389 Color.YELLOW: 226, +390 Color.YELLOW_GREEN: 112, +391 Color.YELLOW_ORANGE: 214, +392} +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kast/formatter.html b/pyk/_modules/pyk/kast/formatter.html new file mode 100644 index 00000000000..02ceda52f28 --- /dev/null +++ b/pyk/_modules/pyk/kast/formatter.html @@ -0,0 +1,356 @@ + + + + + + + + pyk.kast.formatter — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kast.formatter

+  1from __future__ import annotations
+  2
+  3from typing import TYPE_CHECKING
+  4
+  5from ..prelude.k import K_ITEM
+  6from ..utils import intersperse
+  7from .att import Atts, Format, KAtt
+  8from .inner import KApply, KToken, KVariable, bottom_up
+  9from .outer import KNonTerminal, KProduction, KRegexTerminal, KSequence, KTerminal
+ 10
+ 11if TYPE_CHECKING:
+ 12    from typing import Final
+ 13
+ 14    from . import KInner
+ 15    from .inner import KSort
+ 16    from .outer import KDefinition
+ 17
+ 18
+ 19"""
+ 20Notes on _DEFAULT_BRACKET
+ 21-------------------------
+ 22
+ 23Module KSEQ defines the following production:
+ 24
+ 25syntax {Sort} Sort ::= "(" Sort ")" [bracket, group(defaultBracket), applyPriority(1)]
+ 26
+ 27https://github.com/runtimeverification/k/blob/5c84d48f697b73ad779395c53b7edc934ed4e8f5/k-distribution/include/kframework/builtin/kast.md?plain=1#L102
+ 28
+ 29For pretty printing, the K Frontend instantiates a module where parametric productions,
+ 30including this one, are instantiated with actual sorts:
+ 31
+ 32https://github.com/runtimeverification/k/blob/5c84d48f697b73ad779395c53b7edc934ed4e8f5/k-frontend/src/main/java/org/kframework/parser/inner/RuleGrammarGenerator.java
+ 33
+ 34_DEFAULT_BRACKET emulates this behavior without the need of actually constructing the module.
+ 35
+ 36Since the default bracket production is not included in syntaxDefinition.kore,
+ 37the pretty printer of the LLVM backend follows a similar approach (on the KORE level):
+ 38
+ 39https://github.com/runtimeverification/llvm-backend/blob/d5eab4b0f0e610bc60843ebb482f79c043b92702/lib/printer/addBrackets.cpp#L446-L447
+ 40https://github.com/runtimeverification/llvm-backend/blob/d5eab4b0f0e610bc60843ebb482f79c043b92702/lib/printer/printer.cpp#L63
+ 41"""
+ 42_DEFAULT_BRACKET_LABEL: Final = '__bracket__'
+ 43_DEFAULT_BRACKET: Final = KProduction(
+ 44    sort=K_ITEM,  # sort is irrelevant
+ 45    items=(
+ 46        KTerminal('('),
+ 47        KNonTerminal(K_ITEM),  # sort is irrelevant
+ 48        KTerminal(value=')'),
+ 49    ),
+ 50    att=KAtt(  # except for 'format', the other attributes are not necessary
+ 51        (
+ 52            Atts.BRACKET_LABEL({'name': _DEFAULT_BRACKET_LABEL}),
+ 53            Atts.BRACKET(None),
+ 54            Atts.FORMAT(Format.parse('%1 %2 %3')),
+ 55        )
+ 56    ),
+ 57)
+ 58
+ 59
+
+[docs] + 60class Formatter: + 61 definition: KDefinition + 62 + 63 _indent: int + 64 _brackets: bool + 65 + 66 def __init__(self, definition: KDefinition, *, indent: int = 0, brackets: bool = True): + 67 self.definition = definition + 68 self._indent = indent + 69 self._brackets = brackets + 70 + 71 def __call__(self, term: KInner) -> str: + 72 return self.format(term) + 73 +
+[docs] + 74 def format(self, term: KInner) -> str: + 75 if self._brackets: + 76 term = add_brackets(self.definition, term) + 77 return ''.join(self._format(term))
+ + 78 + 79 def _format(self, term: KInner) -> list[str]: + 80 match term: + 81 case KToken(token, _): + 82 return [token] + 83 case KVariable(name, sort): + 84 sort_str = f':{sort.name}' if sort else '' + 85 return [f'{name}{sort_str}'] + 86 case KSequence(): + 87 return self._format_ksequence(term) + 88 case KApply(): + 89 return self._format_kapply(term) + 90 case _: + 91 raise ValueError(f'Unsupported term: {term}') + 92 + 93 def _format_ksequence(self, ksequence: KSequence) -> list[str]: + 94 items = [self._format(item) for item in ksequence.items] # recur + 95 items.append(['.K']) + 96 return [chunk for chunks in intersperse(items, [' ~> ']) for chunk in chunks] + 97 + 98 def _format_kapply(self, kapply: KApply) -> list[str]: + 99 production: KProduction +100 if kapply.label.name == _DEFAULT_BRACKET_LABEL: +101 production = _DEFAULT_BRACKET +102 else: +103 production = self.definition.syntax_symbols[kapply.label.name] +104 +105 formatt = production.att.get(Atts.FORMAT, production.default_format) +106 return [ +107 chunk +108 for token in formatt.tokens +109 for chunks in self._interpret_token(token, production, kapply) +110 for chunk in chunks +111 ] +112 +113 def _interpret_token(self, token: str, production: KProduction, kapply: KApply) -> list[str]: +114 if not token[0] == '%': +115 return [token] +116 +117 escape = token[1:] +118 +119 if escape[0].isdigit(): +120 try: +121 index = int(escape) +122 except ValueError as err: +123 raise AssertionError(f'Incorrect format escape sequence: {token}') from err +124 return self._interpret_index(index, production, kapply) +125 +126 assert len(escape) == 1 +127 +128 match escape: +129 case 'n': +130 return ['\n', self._indent * ' '] +131 case 'i': +132 self._indent += 1 +133 return [] +134 case 'd': +135 self._indent -= 1 +136 return [] +137 case 'c' | 'r': +138 return [] # TODO add color support +139 case _: +140 return [escape] +141 +142 def _interpret_index(self, index: int, production: KProduction, kapply: KApply) -> list[str]: +143 assert index > 0 +144 if index > len(production.items): +145 raise ValueError(f'Format escape index out of bounds: {index}: {production}') +146 +147 item = production.items[index - 1] +148 match item: +149 case KTerminal(value): +150 return [value] +151 case KNonTerminal(): +152 arg_index = sum(isinstance(item, KNonTerminal) for item in production.items[: index - 1]) +153 if arg_index >= len(kapply.args): +154 raise ValueError('NonTerminal index out of bounds: {arg_index}: {kapply}') +155 arg = kapply.args[arg_index] +156 return self._format(arg) # recur +157 case KRegexTerminal(): +158 raise ValueError(f'Invalid format index escape to regex terminal: {index}: {production}') +159 case _: +160 raise AssertionError()
+ +161 +162 +
+[docs] +163def add_brackets(definition: KDefinition, term: KInner) -> KInner: +164 def _add_brackets(term: KInner) -> KInner: +165 if not isinstance(term, KApply): +166 return term +167 +168 prod = definition.symbols[term.label.name] +169 +170 args: list[KInner] = [] +171 +172 arg_index = -1 +173 for index, item in enumerate(prod.items): +174 if not isinstance(item, KNonTerminal): +175 continue +176 +177 arg_index += 1 +178 arg = term.args[arg_index] +179 arg = _with_bracket(definition, term, arg, item.sort, index) +180 args.append(arg) +181 +182 return term.let(args=args) +183 +184 return bottom_up(_add_brackets, term)
+ +185 +186 +187def _with_bracket(definition: KDefinition, parent: KApply, term: KInner, bracket_sort: KSort, index: int) -> KInner: +188 if not _requires_bracket(definition, parent, term, index): +189 return term +190 +191 bracket_prod = definition.brackets.get(bracket_sort, _DEFAULT_BRACKET) +192 bracket_label = bracket_prod.att[Atts.BRACKET_LABEL]['name'] +193 return KApply(bracket_label, term) +194 +195 +196def _requires_bracket(definition: KDefinition, parent: KApply, term: KInner, index: int) -> bool: +197 if isinstance(term, (KToken, KVariable, KSequence)): +198 return False +199 +200 assert isinstance(term, KApply) +201 +202 if len(term.args) == 1: +203 return False +204 +205 if _between_terminals(definition, parent, index): +206 return False +207 +208 if _associativity_wrong(definition, parent, term, index): +209 return True +210 +211 if _priority_wrong(definition, parent, term): +212 return True +213 +214 return False +215 +216 +217def _between_terminals(definition: KDefinition, parent: KApply, index: int) -> bool: +218 prod = definition.symbols[parent.label.name] +219 if index in [0, len(prod.items) - 1]: +220 return False +221 return all(isinstance(prod.items[index + offset], KTerminal) for offset in [-1, 1]) +222 +223 +224def _associativity_wrong(definition: KDefinition, parent: KApply, term: KApply, index: int) -> bool: +225 """Return whether `term` can appear as the `index`-th child of `parent` according to associativity rules. +226 +227 A left (right) associative symbol cannot appear as the rightmost (leftmost) child of a symbol with equal priority. +228 """ +229 parent_label = parent.label.name +230 term_label = term.label.name +231 prod = definition.symbols[parent_label] +232 if index == 0 and term_label in definition.right_assocs.get(parent_label, ()): +233 return True +234 if index == len(prod.items) - 1 and term_label in definition.left_assocs.get(parent_label, ()): +235 return True +236 return False +237 +238 +239def _priority_wrong(definition: KDefinition, parent: KApply, term: KApply) -> bool: +240 """Return whether `term` can appear as a child of `parent` according to priority rules. +241 +242 A symbol with a lesser priority cannot appear as the child of a symbol with greater priority. +243 """ +244 parent_label = parent.label.name +245 term_label = term.label.name +246 return term_label in definition.priorities.get(parent_label, ()) +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kast/inner.html b/pyk/_modules/pyk/kast/inner.html new file mode 100644 index 00000000000..c5481350f40 --- /dev/null +++ b/pyk/_modules/pyk/kast/inner.html @@ -0,0 +1,1292 @@ + + + + + + + + pyk.kast.inner — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kast.inner

+  1from __future__ import annotations
+  2
+  3import json
+  4import logging
+  5from abc import abstractmethod
+  6from collections.abc import Iterable, Mapping, Sequence
+  7from dataclasses import dataclass
+  8from functools import reduce
+  9from itertools import chain
+ 10from typing import TYPE_CHECKING, final, overload
+ 11
+ 12from ..utils import EMPTY_FROZEN_DICT, FrozenDict
+ 13from .kast import KAst
+ 14
+ 15if TYPE_CHECKING:
+ 16    from collections.abc import Callable, Iterator
+ 17    from typing import Any, Final, TypeVar
+ 18
+ 19    T = TypeVar('T', bound='KAst')
+ 20    KI = TypeVar('KI', bound='KInner')
+ 21    A = TypeVar('A')
+ 22    B = TypeVar('B')
+ 23
+ 24_LOGGER: Final = logging.getLogger(__name__)
+ 25
+ 26
+
+[docs] + 27@final + 28@dataclass(frozen=True) + 29class KSort(KAst): + 30 """Store a simple sort name.""" + 31 + 32 name: str + 33 +
+[docs] + 34 def __init__(self, name: str): + 35 """Construct a new sort given the name.""" + 36 object.__setattr__(self, 'name', name)
+ + 37 +
+[docs] + 38 @staticmethod + 39 def from_dict(d: Mapping[str, Any]) -> KSort: + 40 return KSort(name=d['name'])
+ + 41 +
+[docs] + 42 def to_dict(self) -> dict[str, Any]: + 43 return {'node': 'KSort', 'name': self.name}
+ + 44 +
+[docs] + 45 def let(self, *, name: str | None = None) -> KSort: + 46 """Return a new `KSort` with the name potentially updated.""" + 47 name = name if name is not None else self.name + 48 return KSort(name=name)
+
+ + 49 + 50 +
+[docs] + 51@final + 52@dataclass(frozen=True) + 53class KLabel(KAst): + 54 """Represents a symbol that can be applied in a K AST, potentially with sort parameters.""" + 55 + 56 name: str + 57 params: tuple[KSort, ...] + 58 + 59 @overload + 60 def __init__(self, name: str, params: Iterable[str | KSort]): ... + 61 + 62 @overload + 63 def __init__(self, name: str, *params: str | KSort): ... + 64 + 65 # TODO Is it possible to extract a decorator? +
+[docs] + 66 def __init__(self, name: str, *args: Any, **kwargs: Any): + 67 """Construct a new `KLabel`, with optional sort parameters.""" + 68 if kwargs: + 69 bad_arg = next((arg for arg in kwargs if arg != 'params'), None) + 70 if bad_arg: + 71 raise TypeError(f'KLabel() got an unexpected keyword argument: {bad_arg}') + 72 if args: + 73 raise TypeError('KLabel() got multiple values for argument: params') + 74 params = kwargs['params'] + 75 + 76 elif ( + 77 len(args) == 1 + 78 and isinstance(args[0], Iterable) + 79 and not isinstance(args[0], str) + 80 and not isinstance(args[0], KInner) + 81 ): + 82 params = args[0] + 83 + 84 else: + 85 params = args + 86 + 87 params = tuple(KSort(param) if type(param) is str else param for param in params) + 88 object.__setattr__(self, 'name', name) + 89 object.__setattr__(self, 'params', params)
+ + 90 +
+[docs] + 91 def __iter__(self) -> Iterator[str | KSort]: + 92 """Return this symbol as iterator with the name as the head and the parameters as the tail.""" + 93 return chain([self.name], self.params)
+ + 94 + 95 @overload + 96 def __call__(self, args: Iterable[KInner]) -> KApply: ... + 97 + 98 @overload + 99 def __call__(self, *args: KInner) -> KApply: ... +100 +101 def __call__(self, *args: Any, **kwargs: Any) -> KApply: +102 return self.apply(*args, **kwargs) +103 +
+[docs] +104 @staticmethod +105 def from_dict(d: Mapping[str, Any]) -> KLabel: +106 return KLabel(name=d['name'], params=(KSort.from_dict(param) for param in d['params']))
+ +107 +
+[docs] +108 def to_dict(self) -> dict[str, Any]: +109 return {'node': 'KLabel', 'name': self.name, 'params': [param.to_dict() for param in self.params]}
+ +110 +
+[docs] +111 def let(self, *, name: str | None = None, params: Iterable[str | KSort] | None = None) -> KLabel: +112 """Return a copy of this `KLabel` with potentially the name or sort parameters updated.""" +113 name = name if name is not None else self.name +114 params = params if params is not None else self.params +115 return KLabel(name=name, params=params)
+ +116 +117 @overload +118 def apply(self, args: Iterable[KInner]) -> KApply: ... +119 +120 @overload +121 def apply(self, *args: KInner) -> KApply: ... +122 +
+[docs] +123 def apply(self, *args: Any, **kwargs: Any) -> KApply: +124 """Construct a `KApply` with this `KLabel` as the AST head and the supplied parameters as the arguments.""" +125 return KApply(self, *args, **kwargs)
+
+ +126 +127 +
+[docs] +128class KInner(KAst): +129 """Represent the AST of a given K inner term. +130 +131 This class represents the AST of a given term. +132 The nodes in the AST should be coming from a given KDefinition, so that they can be checked for well-typedness. +133 """ +134 +135 _NODES: Final = {'KVariable', 'KToken', 'KApply', 'KAs', 'KRewrite', 'KSequence'} +136 +
+[docs] +137 @staticmethod +138 def from_json(s: str) -> KInner: +139 return KInner.from_dict(json.loads(s))
+ +140 +
+[docs] +141 @staticmethod +142 def from_dict(dct: Mapping[str, Any]) -> KInner: +143 """Deserialize a given `KInner` into a more specific type from a dictionary.""" +144 stack: list = [dct, KInner._extract_dicts(dct), []] +145 while True: +146 terms = stack[-1] +147 dcts = stack[-2] +148 dct = stack[-3] +149 idx = len(terms) - len(dcts) +150 if not idx: +151 stack.pop() +152 stack.pop() +153 stack.pop() +154 cls = globals()[dct['node']] +155 term = cls._from_dict(dct, terms) +156 if not stack: +157 return term +158 stack[-1].append(term) +159 else: +160 dct = dcts[idx] +161 stack.append(dct) +162 stack.append(KInner._extract_dicts(dct)) +163 stack.append([])
+ +164 +165 @staticmethod +166 def _extract_dicts(dct: Mapping[str, Any]) -> list[Mapping[str, Any]]: +167 match dct['node']: +168 case 'KApply': +169 return dct['args'] +170 case 'KSequence': +171 return dct['items'] +172 case 'KRewrite': +173 return [dct['lhs'], dct['rhs']] +174 case 'KAs': +175 return [dct['pattern'], dct['alias']] +176 case _: +177 return [] +178 +179 @classmethod +180 @abstractmethod +181 def _from_dict(cls: type[KI], d: Mapping[str, Any], terms: list[KInner]) -> KI: ... +182 +183 @property +184 @abstractmethod +185 def terms(self) -> tuple[KInner, ...]: +186 """Returns the children of this given `KInner`.""" +187 ... +188 +
+[docs] +189 @abstractmethod +190 def let_terms(self: KI, terms: Iterable[KInner]) -> KI: +191 """Set children of this given `KInner`.""" +192 ...
+ +193 +
+[docs] +194 @final +195 def map_inner(self: KI, f: Callable[[KInner], KInner]) -> KI: +196 """Apply a transformation to all children of this given `KInner`.""" +197 return self.let_terms(f(term) for term in self.terms)
+ +198 +
+[docs] +199 @abstractmethod +200 def match(self, term: KInner) -> Subst | None: +201 """Perform syntactic pattern matching and return the substitution. +202 +203 Args: +204 term: Term to match. +205 +206 Returns: +207 A substitution instantiating `self` to `term` if one exists, ``None`` otherwise. +208 """ +209 ...
+ +210 +211 @staticmethod +212 def _combine_matches(substs: Iterable[Subst | None]) -> Subst | None: +213 def combine(subst1: Subst | None, subst2: Subst | None) -> Subst | None: +214 if subst1 is None or subst2 is None: +215 return None +216 +217 return subst1.union(subst2) +218 +219 unit: Subst | None = Subst() +220 return reduce(combine, substs, unit) +221 +
+[docs] +222 @final +223 def to_dict(self) -> dict[str, Any]: +224 stack: list = [self, []] +225 while True: +226 dicts = stack[-1] +227 term = stack[-2] +228 idx = len(dicts) - len(term.terms) +229 if not idx: +230 stack.pop() +231 stack.pop() +232 dct = term._to_dict(dicts) +233 if not stack: +234 return dct +235 stack[-1].append(dct) +236 else: +237 stack.append(term.terms[idx]) +238 stack.append([])
+ +239 +240 @abstractmethod +241 def _to_dict(self, terms: list[KInner]) -> dict[str, Any]: ...
+ +242 +243 +
+[docs] +244@final +245@dataclass(frozen=True) +246class KToken(KInner): +247 """Represent a domain-value in K AST.""" +248 +249 token: str +250 sort: KSort +251 +
+[docs] +252 def __init__(self, token: str, sort: str | KSort): +253 """Construct a new `KToken` with a given string representation in the supplied sort.""" +254 if type(sort) is str: +255 sort = KSort(sort) +256 +257 object.__setattr__(self, 'token', token) +258 object.__setattr__(self, 'sort', sort)
+ +259 +260 @classmethod +261 def _from_dict(cls: type[KToken], dct: Mapping[str, Any], terms: list[KInner]) -> KToken: +262 assert not terms +263 return KToken(token=dct['token'], sort=KSort.from_dict(dct['sort'])) +264 +265 def _to_dict(self, terms: list[KInner]) -> dict[str, Any]: +266 assert not terms +267 return {'node': 'KToken', 'token': self.token, 'sort': self.sort.to_dict()} +268 +
+[docs] +269 def let(self, *, token: str | None = None, sort: str | KSort | None = None) -> KToken: +270 """Return a copy of the `KToken` with the token or sort potentially updated.""" +271 token = token if token is not None else self.token +272 sort = sort if sort is not None else self.sort +273 return KToken(token=token, sort=sort)
+ +274 +275 @property +276 def terms(self) -> tuple[()]: +277 return () +278 +
+[docs] +279 def let_terms(self, terms: Iterable[KInner]) -> KToken: +280 () = terms +281 return self
+ +282 +
+[docs] +283 def match(self, term: KInner) -> Subst | None: +284 if type(term) is KToken: +285 return Subst() if term.token == self.token else None +286 _LOGGER.debug(f'Matching failed: ({self}.match({term}))') +287 return None
+
+ +288 +289 +
+[docs] +290@final +291@dataclass(frozen=True) +292class KVariable(KInner): +293 """Represent a logical variable in a K AST, with a name and optionally a sort.""" +294 +295 name: str +296 sort: KSort | None +297 +
+[docs] +298 def __init__(self, name: str, sort: str | KSort | None = None): +299 """Construct a new `KVariable` with a given name and optional sort.""" +300 if type(sort) is str: +301 sort = KSort(sort) +302 +303 object.__setattr__(self, 'name', name) +304 object.__setattr__(self, 'sort', sort)
+ +305 +
+[docs] +306 def __lt__(self, other: Any) -> bool: +307 """Lexicographic comparison of `KVariable` based on name for sorting.""" +308 if not isinstance(other, KAst): +309 return NotImplemented +310 if type(other) is KVariable: +311 if (self.sort is None or other.sort is None) and self.name == other.name: +312 return self.sort is None +313 return super().__lt__(other)
+ +314 +315 @classmethod +316 def _from_dict(cls: type[KVariable], dct: Mapping[str, Any], terms: list[KInner]) -> KVariable: +317 assert not terms +318 sort = None +319 if 'sort' in dct: +320 sort = KSort.from_dict(dct['sort']) +321 return KVariable(name=dct['name'], sort=sort) +322 +323 def _to_dict(self, terms: list[KInner]) -> dict[str, Any]: +324 assert not terms +325 _d: dict[str, Any] = {'node': 'KVariable', 'name': self.name} +326 if self.sort is not None: +327 _d['sort'] = self.sort.to_dict() +328 return _d +329 +
+[docs] +330 def let(self, *, name: str | None = None, sort: str | KSort | None = None) -> KVariable: +331 """Return a copy of this `KVariable` with potentially the name or sort updated.""" +332 name = name if name is not None else self.name +333 sort = sort if sort is not None else self.sort +334 return KVariable(name=name, sort=sort)
+ +335 +
+[docs] +336 def let_sort(self, sort: KSort | None) -> KVariable: +337 """Return a copy of this `KVariable` with just the sort updated.""" +338 return KVariable(self.name, sort=sort)
+ +339 +340 @property +341 def terms(self) -> tuple[()]: +342 return () +343 +
+[docs] +344 def let_terms(self, terms: Iterable[KInner]) -> KVariable: +345 () = terms +346 return self
+ +347 +
+[docs] +348 def match(self, term: KInner) -> Subst: +349 return Subst({self.name: term})
+
+ +350 +351 +
+[docs] +352@final +353@dataclass(frozen=True) +354class KApply(KInner): +355 """Represent the application of a `KLabel` in a K AST to arguments.""" +356 +357 label: KLabel +358 args: tuple[KInner, ...] +359 +360 @overload +361 def __init__(self, label: str | KLabel, args: Iterable[KInner]): ... +362 +363 @overload +364 def __init__(self, label: str | KLabel, *args: KInner): ... +365 +
+[docs] +366 def __init__(self, label: str | KLabel, *args: Any, **kwargs: Any): +367 """Construct a new `KApply` given the input `KLabel` or str, applied to arguments.""" +368 if type(label) is str: +369 label = KLabel(label) +370 +371 if kwargs: +372 bad_arg = next((arg for arg in kwargs if arg != 'args'), None) +373 if bad_arg: +374 raise TypeError(f'KApply() got an unexpected keyword argument: {bad_arg}') +375 if args: +376 raise TypeError('KApply() got multiple values for argument: args') +377 _args = kwargs['args'] +378 +379 elif len(args) == 1 and isinstance(args[0], Iterable) and not isinstance(args[0], KInner): +380 _args = args[0] +381 +382 else: +383 _args = args +384 +385 object.__setattr__(self, 'label', label) +386 object.__setattr__(self, 'args', tuple(_args))
+ +387 +388 @property +389 def arity(self) -> int: +390 """Return the count of the arguments.""" +391 return len(self.args) +392 +393 @property +394 def is_cell(self) -> bool: +395 """Return whether this is a cell-label application (based on heuristic about label names).""" +396 return len(self.label.name) > 1 and self.label.name[0] == '<' and self.label.name[-1] == '>' +397 +398 @classmethod +399 def _from_dict(cls: type[KApply], dct: Mapping[str, Any], terms: list[KInner]) -> KApply: +400 return KApply(label=KLabel.from_dict(dct['label']), args=terms) +401 +402 def _to_dict(self, terms: list[KInner]) -> dict[str, Any]: +403 return { +404 'node': 'KApply', +405 'label': self.label.to_dict(), +406 'args': terms, +407 'arity': self.arity, +408 'variable': False, +409 } +410 +
+[docs] +411 def let(self, *, label: str | KLabel | None = None, args: Iterable[KInner] | None = None) -> KApply: +412 """Return a copy of this `KApply` with either the label or the arguments updated.""" +413 label = label if label is not None else self.label +414 args = args if args is not None else self.args +415 return KApply(label=label, args=args)
+ +416 +417 @property +418 def terms(self) -> tuple[KInner, ...]: +419 return self.args +420 +
+[docs] +421 def let_terms(self, terms: Iterable[KInner]) -> KApply: +422 return self.let(args=terms)
+ +423 +
+[docs] +424 def match(self, term: KInner) -> Subst | None: +425 if type(term) is KApply and term.label.name == self.label.name and term.arity == self.arity: +426 return KInner._combine_matches( +427 arg.match(term_arg) for arg, term_arg in zip(self.args, term.args, strict=True) +428 ) +429 _LOGGER.debug(f'Matching failed: ({self}.match({term}))') +430 return None
+
+ +431 +432 +
+[docs] +433@final +434@dataclass(frozen=True) +435class KAs(KInner): +436 """Represent a K `#as` pattern in the K AST format, with the original pattern and the variabl alias.""" +437 +438 pattern: KInner +439 alias: KInner +440 +
+[docs] +441 def __init__(self, pattern: KInner, alias: KInner): +442 """Construct a new `KAs` given the original pattern and the alias.""" +443 object.__setattr__(self, 'pattern', pattern) +444 object.__setattr__(self, 'alias', alias)
+ +445 +446 @classmethod +447 def _from_dict(cls: type[KAs], dct: Mapping[str, Any], terms: list[KInner]) -> KAs: +448 pattern, alias = terms +449 return KAs(pattern=pattern, alias=alias) +450 +451 def _to_dict(self, terms: list[KInner]) -> dict[str, Any]: +452 pattern, alias = terms +453 return {'node': 'KAs', 'pattern': pattern, 'alias': alias} +454 +
+[docs] +455 def let(self, *, pattern: KInner | None = None, alias: KInner | None = None) -> KAs: +456 """Return a copy of this `KAs` with potentially the pattern or alias updated.""" +457 pattern = pattern if pattern is not None else self.pattern +458 alias = alias if alias is not None else self.alias +459 return KAs(pattern=pattern, alias=alias)
+ +460 +461 @property +462 def terms(self) -> tuple[KInner, KInner]: +463 return (self.pattern, self.alias) +464 +
+[docs] +465 def let_terms(self, terms: Iterable[KInner]) -> KAs: +466 pattern, alias = terms +467 return KAs(pattern=pattern, alias=alias)
+ +468 +
+[docs] +469 def match(self, term: KInner) -> Subst | None: +470 raise TypeError('KAs does not support pattern matching')
+
+ +471 +472 +
+[docs] +473@final +474@dataclass(frozen=True) +475class KRewrite(KInner): +476 """Represent a K rewrite in the K AST.""" +477 +478 lhs: KInner +479 rhs: KInner +480 +
+[docs] +481 def __init__(self, lhs: KInner, rhs: KInner): +482 """Construct a `KRewrite` given the LHS (left-hand-side) and RHS (right-hand-side) to use.""" +483 object.__setattr__(self, 'lhs', lhs) +484 object.__setattr__(self, 'rhs', rhs)
+ +485 +
+[docs] +486 def __iter__(self) -> Iterator[KInner]: +487 """Return a two-element iterator with the LHS first and RHS second.""" +488 return iter([self.lhs, self.rhs])
+ +489 +490 def __call__(self, term: KInner, *, top: bool = False) -> KInner: +491 if top: +492 return self.apply_top(term) +493 +494 return self.apply(term) +495 +496 @classmethod +497 def _from_dict(cls: type[KRewrite], dct: Mapping[str, Any], terms: list[KInner]) -> KRewrite: +498 lhs, rhs = terms +499 return KRewrite(lhs=lhs, rhs=rhs) +500 +501 def _to_dict(self, terms: list[KInner]) -> dict[str, Any]: +502 lhs, rhs = terms +503 return {'node': 'KRewrite', 'lhs': lhs, 'rhs': rhs} +504 +
+[docs] +505 def let( +506 self, +507 *, +508 lhs: KInner | None = None, +509 rhs: KInner | None = None, +510 ) -> KRewrite: +511 """Return a copy of this `KRewrite` with potentially the LHS or RHS updated.""" +512 lhs = lhs if lhs is not None else self.lhs +513 rhs = rhs if rhs is not None else self.rhs +514 return KRewrite(lhs=lhs, rhs=rhs)
+ +515 +516 @property +517 def terms(self) -> tuple[KInner, KInner]: +518 return (self.lhs, self.rhs) +519 +
+[docs] +520 def let_terms(self, terms: Iterable[KInner]) -> KRewrite: +521 lhs, rhs = terms +522 return KRewrite(lhs=lhs, rhs=rhs)
+ +523 +
+[docs] +524 def match(self, term: KInner) -> Subst | None: +525 if type(term) is KRewrite: +526 lhs_subst = self.lhs.match(term.lhs) +527 rhs_subst = self.rhs.match(term.rhs) +528 if lhs_subst is None or rhs_subst is None: +529 return None +530 return lhs_subst.union(rhs_subst) +531 _LOGGER.debug(f'Matching failed: ({self}.match({term}))') +532 return None
+ +533 +
+[docs] +534 def apply_top(self, term: KInner) -> KInner: +535 """Rewrite a given term at the top. +536 +537 Args: +538 term: Term to rewrite. +539 +540 Returns: +541 The term with the rewrite applied once at the top. +542 """ +543 subst = self.lhs.match(term) +544 if subst is not None: +545 return subst(self.rhs) +546 return term
+ +547 +
+[docs] +548 def apply(self, term: KInner) -> KInner: +549 """Attempt rewriting once at every position in a term bottom-up. +550 +551 Args: +552 term: Term to rewrite. +553 +554 Returns: +555 The term with rewrites applied at every node once starting from the bottom. +556 """ +557 return bottom_up(self.apply_top, term)
+ +558 +
+[docs] +559 def replace_top(self, term: KInner) -> KInner: +560 """Similar to apply_top but using exact syntactic matching instead of pattern matching.""" +561 if self.lhs == term: +562 return self.rhs +563 return term
+ +564 +
+[docs] +565 def replace(self, term: KInner) -> KInner: +566 """Similar to apply but using exact syntactic matching instead of pattern matching.""" +567 return bottom_up(self.replace_top, term)
+
+ +568 +569 +
+[docs] +570@final +571@dataclass(frozen=True) +572class KSequence(KInner, Sequence[KInner]): +573 """Represent a associative list of `K` as a cons-list of `KItem` for sequencing computation in K AST format.""" +574 +575 items: tuple[KInner, ...] +576 +577 @overload +578 def __init__(self, items: Iterable[KInner]): ... +579 +580 @overload +581 def __init__(self, *items: KInner): ... +582 +
+[docs] +583 def __init__(self, *args: Any, **kwargs: Any): +584 """Construct a new `KSequence` given the arguments.""" +585 if kwargs: +586 bad_arg = next((arg for arg in kwargs if arg != 'items'), None) +587 if bad_arg: +588 raise TypeError(f'KSequence() got an unexpected keyword argument: {bad_arg}') +589 if args: +590 raise TypeError('KSequence() got multiple values for argument: items') +591 items = kwargs['items'] +592 +593 elif len(args) == 1 and isinstance(args[0], Iterable) and not isinstance(args[0], KInner): +594 items = args[0] +595 +596 else: +597 items = args +598 +599 _items: list[KInner] = [] +600 for i in items: +601 if type(i) is KSequence: +602 _items.extend(i.items) +603 else: +604 _items.append(i) +605 items = tuple(_items) +606 +607 object.__setattr__(self, 'items', tuple(items))
+ +608 +609 @overload +610 def __getitem__(self, key: int) -> KInner: ... +611 +612 @overload +613 def __getitem__(self, key: slice) -> tuple[KInner, ...]: ... +614 +615 def __getitem__(self, key: int | slice) -> KInner | tuple[KInner, ...]: +616 return self.items[key] +617 +618 def __len__(self) -> int: +619 return self.arity +620 +621 @property +622 def arity(self) -> int: +623 """Return the count of `KSequence` items.""" +624 return len(self.items) +625 +626 @classmethod +627 def _from_dict(cls: type[KSequence], dct: Mapping[str, Any], terms: list[KInner]) -> KSequence: +628 return KSequence(items=terms) +629 +630 def _to_dict(self, terms: list[KInner]) -> dict[str, Any]: +631 return {'node': 'KSequence', 'items': terms, 'arity': self.arity} +632 +
+[docs] +633 def let(self, *, items: Iterable[KInner] | None = None) -> KSequence: +634 """Return a copy of this `KSequence` with the items potentially updated.""" +635 items = items if items is not None else self.items +636 return KSequence(items=items)
+ +637 +638 @property +639 def terms(self) -> tuple[KInner, ...]: +640 return self.items +641 +
+[docs] +642 def let_terms(self, terms: Iterable[KInner]) -> KSequence: +643 return KSequence(items=terms)
+ +644 +
+[docs] +645 def match(self, term: KInner) -> Subst | None: +646 if type(term) is KSequence: +647 if term.arity == self.arity: +648 return KInner._combine_matches( +649 item.match(term_item) for item, term_item in zip(self.items, term.items, strict=True) +650 ) +651 if 0 < self.arity and self.arity < term.arity and type(self.items[-1]) is KVariable: +652 common_length = len(self.items) - 1 +653 _subst: Subst | None = Subst({self.items[-1].name: KSequence(term.items[common_length:])}) +654 for si, ti in zip(self.items[:common_length], term.items[:common_length], strict=True): +655 _subst = KInner._combine_matches([_subst, si.match(ti)]) +656 return _subst +657 _LOGGER.debug(f'Matching failed: ({self}.match({term}))') +658 return None
+
+ +659 +660 +
+[docs] +661@dataclass(frozen=True) +662class Subst(Mapping[str, KInner]): +663 """Represents a substitution, which is a binding of variables to values of `KInner`.""" +664 +665 _subst: FrozenDict[str, KInner] +666 +
+[docs] +667 def __init__(self, subst: Mapping[str, KInner] = EMPTY_FROZEN_DICT): +668 """Construct a new `Subst` given a mapping fo variable names to `KInner`.""" +669 object.__setattr__(self, '_subst', FrozenDict(subst))
+ +670 +
+[docs] +671 def __iter__(self) -> Iterator[str]: +672 """Return the underlying `Subst` mapping as an iterator.""" +673 return iter(self._subst)
+ +674 +
+[docs] +675 def __len__(self) -> int: +676 """Return the length of the underlying `Subst` mapping.""" +677 return len(self._subst)
+ +678 +
+[docs] +679 def __getitem__(self, key: str) -> KInner: +680 """Get the `KInner` associated with the given variable name from the underlying `Subst` mapping.""" +681 return self._subst[key]
+ +682 +
+[docs] +683 def __mul__(self, other: Subst) -> Subst: +684 """Overload for `Subst.compose`.""" +685 return self.compose(other)
+ +686 +
+[docs] +687 def __call__(self, term: KInner) -> KInner: +688 """Overload for `Subst.apply`.""" +689 return self.apply(term)
+ +690 +
+[docs] +691 @staticmethod +692 def from_dict(d: Mapping[str, Any]) -> Subst: +693 """Deserialize a `Subst` from a given dictionary representing it.""" +694 return Subst({k: KInner.from_dict(v) for k, v in d.items()})
+ +695 +
+[docs] +696 def to_dict(self) -> dict[str, Any]: +697 """Serialize a `Subst` to a dictionary representation.""" +698 return {k: v.to_dict() for k, v in self.items()}
+ +699 +
+[docs] +700 def minimize(self) -> Subst: +701 """Return a new substitution with any identity items removed.""" +702 return Subst({k: v for k, v in self.items() if type(v) is not KVariable or v.name != k})
+ +703 +
+[docs] +704 def compose(self, other: Subst) -> Subst: +705 """Union two substitutions together, preferring the assignments in `self` if present in both.""" +706 from_other = ((k, self(v)) for k, v in other.items()) +707 from_self = ((k, v) for k, v in self.items() if k not in other) +708 return Subst(dict(chain(from_other, from_self)))
+ +709 +
+[docs] +710 def union(self, other: Subst) -> Subst | None: +711 """Union two substitutions together, failing with `None` if there are conflicting assignments.""" +712 subst = dict(self) +713 for v in other: +714 if v in subst and subst[v] != other[v]: +715 return None +716 subst[v] = other[v] +717 return Subst(subst)
+ +718 +
+[docs] +719 def apply(self, term: KInner) -> KInner: +720 """Apply the given substitution to `KInner`, replacing free variable occurances with their valuations defined in this `Subst`.""" +721 +722 def replace(term: KInner) -> KInner: +723 if type(term) is KVariable and term.name in self: +724 return self[term.name] +725 return term +726 +727 return bottom_up(replace, term)
+ +728 +
+[docs] +729 def unapply(self, term: KInner) -> KInner: +730 """Replace occurances of valuations from this `Subst` with the variables that they are assigned to.""" +731 new_term = term +732 for var_name in self: +733 lhs = self[var_name] +734 rhs = KVariable(var_name) +735 new_term = KRewrite(lhs, rhs).replace(new_term) +736 return new_term
+ +737 +
+[docs] +738 @staticmethod +739 def from_pred(pred: KInner) -> Subst: +740 """Given a generic matching logic predicate, attempt to extract a `Subst` from it.""" +741 from .manip import flatten_label +742 +743 subst: dict[str, KInner] = {} +744 for conjunct in flatten_label('#And', pred): +745 match conjunct: +746 case KApply(KLabel('#Equals'), [KVariable(var), term]): +747 subst[var] = term +748 case _: +749 raise ValueError(f'Invalid substitution predicate: {conjunct}') +750 return Subst(subst)
+ +751 +752 @property +753 def pred(self) -> KInner: +754 """Turn this `Subst` into a boolean predicate using `_==K_` operator.""" +755 conjuncts = [ +756 KApply('_==K_', KVariable(name), val) +757 for name, val in self.items() +758 if type(val) is not KVariable or val.name != name +759 ] +760 if not conjuncts: +761 return KToken('true', 'Bool') +762 +763 return reduce(KLabel('_andBool_'), conjuncts) +764 +765 @property +766 def is_identity(self) -> bool: +767 return len(self.minimize()) == 0
+ +768 +769 +
+[docs] +770def bottom_up_with_summary(f: Callable[[KInner, list[A]], tuple[KInner, A]], kinner: KInner) -> tuple[KInner, A]: +771 """Traverse a term from the bottom moving upward, collecting information about it. +772 +773 Args: +774 f: Function to apply at each AST node to transform it and collect summary. +775 kinner: Term to apply this transformation to. +776 +777 Returns: +778 A tuple of the transformed term and the summarized results. +779 """ +780 stack: list = [kinner, [], []] +781 while True: +782 summaries = stack[-1] +783 terms = stack[-2] +784 term = stack[-3] +785 idx = len(terms) - len(term.terms) +786 if not idx: +787 stack.pop() +788 stack.pop() +789 stack.pop() +790 term, summary = f(term.let_terms(terms), summaries) +791 if not stack: +792 return term, summary +793 stack[-1].append(summary) +794 stack[-2].append(term) +795 else: +796 stack.append(term.terms[idx]) +797 stack.append([]) +798 stack.append([])
+ +799 +800 +801# TODO make method of KInner +
+[docs] +802def bottom_up(f: Callable[[KInner], KInner], kinner: KInner) -> KInner: +803 """Transform a term from the bottom moving upward. +804 +805 Args: +806 f: Function to apply to each node in the term. +807 kinner: Original term to transform. +808 +809 Returns: +810 The transformed term. +811 """ +812 stack: list = [kinner, []] +813 while True: +814 terms = stack[-1] +815 term = stack[-2] +816 idx = len(terms) - len(term.terms) +817 if not idx: +818 stack.pop() +819 stack.pop() +820 term = f(term.let_terms(terms)) +821 if not stack: +822 return term +823 stack[-1].append(term) +824 else: +825 stack.append(term.terms[idx]) +826 stack.append([])
+ +827 +828 +829# TODO make method of KInner +
+[docs] +830def top_down(f: Callable[[KInner], KInner], kinner: KInner) -> KInner: +831 """Transform a term from the top moving downward. +832 +833 Args: +834 f: Function to apply to each node in the term. +835 kinner: Original term to transform. +836 +837 Returns: +838 The transformed term. +839 """ +840 stack: list = [f(kinner), []] +841 while True: +842 terms = stack[-1] +843 term = stack[-2] +844 idx = len(terms) - len(term.terms) +845 if not idx: +846 stack.pop() +847 stack.pop() +848 term = term.let_terms(terms) +849 if not stack: +850 return term +851 stack[-1].append(term) +852 else: +853 stack.append(f(term.terms[idx])) +854 stack.append([])
+ +855 +856 +857# TODO: make method of KInner +
+[docs] +858def var_occurrences(term: KInner) -> dict[str, list[KVariable]]: +859 """Collect the list of occurrences of each variable in a given term. +860 +861 Args: +862 term: Term to collect variables from. +863 +864 Returns: +865 A dictionary with variable names as keys and the list of all occurrences of the variable as values. +866 """ +867 _var_occurrences: dict[str, list[KVariable]] = {} +868 +869 # TODO: should treat #Exists and #Forall specially. +870 def _var_occurence(_term: KInner) -> None: +871 if type(_term) is KVariable: +872 if _term.name not in _var_occurrences: +873 _var_occurrences[_term.name] = [] +874 _var_occurrences[_term.name].append(_term) +875 +876 collect(_var_occurence, term) +877 return _var_occurrences
+ +878 +879 +
+[docs] +880def collect(callback: Callable[[KInner], None], kinner: KInner) -> None: +881 """Collect information about a given term traversing it top-down using a function with side effects. +882 +883 Args: +884 callback: Function with the side effect of collecting desired information at each AST node. +885 kinner: The term to traverse. +886 """ +887 subterms = [kinner] +888 while subterms: +889 term = subterms.pop() +890 subterms.extend(reversed(term.terms)) +891 callback(term)
+ +892 +893 +
+[docs] +894def build_assoc(unit: KInner, label: str | KLabel, terms: Iterable[KInner]) -> KInner: +895 """Build an associative list. +896 +897 Args: +898 unit: The empty variant of the given list type. +899 label: The associative list join operator. +900 terms: List (potentially empty) of terms to join in an associative list. +901 +902 Returns: +903 The list of terms joined using the supplied label, or the unit element in the case of no terms. +904 """ +905 _label = label if type(label) is KLabel else KLabel(label) +906 res: KInner | None = None +907 for term in reversed(list(terms)): +908 if term == unit: +909 continue +910 if not res: +911 res = term +912 else: +913 res = _label(term, res) +914 return res or unit
+ +915 +916 +
+[docs] +917def build_cons(unit: KInner, label: str | KLabel, terms: Iterable[KInner]) -> KInner: +918 """Build a cons list. +919 +920 Args: +921 unit: The empty variant of the given list type. +922 label: The associative list join operator. +923 terms: List (potentially empty) of terms to join in an associative list. +924 +925 Returns: +926 The list of terms joined using the supplied label, terminated with the unit element. +927 """ +928 it = iter(terms) +929 try: +930 fst = next(it) +931 return KApply(label, (fst, build_cons(unit, label, it))) +932 except StopIteration: +933 return unit
+ +934 +935 +
+[docs] +936def flatten_label(label: str, kast: KInner) -> list[KInner]: +937 """Given a cons list, return a flat Python list of the elements. +938 +939 Args: +940 label: The cons operator. +941 kast: The cons list to flatten. +942 +943 Returns: +944 Items of cons list. +945 """ +946 flattened_args = [] +947 rest_of_args = [kast] # Rest of arguments in reversed order +948 while rest_of_args: +949 current_arg = rest_of_args.pop() +950 if isinstance(current_arg, KApply) and current_arg.label.name == label: +951 rest_of_args.extend(reversed(current_arg.args)) +952 else: +953 flattened_args.append(current_arg) +954 return flattened_args
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kast/kast.html b/pyk/_modules/pyk/kast/kast.html new file mode 100644 index 00000000000..f60eff55576 --- /dev/null +++ b/pyk/_modules/pyk/kast/kast.html @@ -0,0 +1,172 @@ + + + + + + + + pyk.kast.kast — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kast.kast

+ 1from __future__ import annotations
+ 2
+ 3import json
+ 4import logging
+ 5from abc import ABC, abstractmethod
+ 6from dataclasses import fields
+ 7from functools import cached_property
+ 8from typing import TYPE_CHECKING, Any, final
+ 9
+10from ..utils import hash_str
+11
+12if TYPE_CHECKING:
+13    from collections.abc import Mapping
+14    from typing import Final
+15
+16
+17_LOGGER: Final = logging.getLogger(__name__)
+18
+19
+
+[docs] +20class KAst(ABC): +
+[docs] +21 @staticmethod +22 def version() -> int: +23 return 3
+ +24 +
+[docs] +25 @abstractmethod +26 def to_dict(self) -> dict[str, Any]: ...
+ +27 +
+[docs] +28 @final +29 def to_json(self) -> str: +30 return json.dumps(self.to_dict(), sort_keys=True)
+ +31 +32 @final +33 @cached_property +34 def hash(self) -> str: +35 return hash_str(self.to_json()) +36 +37 def __lt__(self, other: Any) -> bool: +38 if not isinstance(other, KAst): +39 return NotImplemented +40 if type(self) == type(other): +41 return self._as_shallow_tuple() < other._as_shallow_tuple() +42 return type(self).__name__ < type(other).__name__ +43 +44 def _as_shallow_tuple(self) -> tuple[Any, ...]: +45 # shallow copy version of dataclass.astuple. +46 return tuple(self.__dict__[field.name] for field in fields(type(self))) # type: ignore
+ +47 +48 +
+[docs] +49def kast_term(dct: Mapping[str, Any]) -> Mapping[str, Any]: +50 if dct['format'] != 'KAST': +51 raise ValueError(f"Invalid format: {dct['format']}") +52 +53 if dct['version'] != KAst.version(): +54 raise ValueError(f"Invalid version: {dct['version']}") +55 +56 return dct['term']
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kast/lexer.html b/pyk/_modules/pyk/kast/lexer.html new file mode 100644 index 00000000000..ba5d90cb5d1 --- /dev/null +++ b/pyk/_modules/pyk/kast/lexer.html @@ -0,0 +1,378 @@ + + + + + + + + pyk.kast.lexer — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kast.lexer

+  1from __future__ import annotations
+  2
+  3from collections.abc import Callable, Iterator
+  4from enum import Enum, auto
+  5from typing import TYPE_CHECKING, NamedTuple
+  6
+  7if TYPE_CHECKING:
+  8    from collections.abc import Iterable
+  9    from typing import Final
+ 10
+ 11
+
+[docs] + 12class TokenType(Enum): + 13 EOF = auto() + 14 LPAREN = auto() + 15 RPAREN = auto() + 16 COMMA = auto() + 17 COLON = auto() + 18 KSEQ = auto() + 19 DOTK = auto() + 20 DOTKLIST = auto() + 21 TOKEN = auto() + 22 ID = auto() + 23 VARIABLE = auto() + 24 SORT = auto() + 25 KLABEL = auto() + 26 STRING = auto()
+ + 27 + 28 +
+[docs] + 29class Token(NamedTuple): + 30 text: str + 31 type: TokenType
+ + 32 + 33 +
+[docs] + 34class State(Enum): + 35 DEFAULT = auto() + 36 SORT = auto()
+ + 37 + 38 +
+[docs] + 39def lexer(text: Iterable[str]) -> Iterator[Token]: + 40 state = State.DEFAULT + 41 it = iter(text) + 42 la = next(it, '') + 43 while True: + 44 while la.isspace(): + 45 la = next(it, '') + 46 + 47 if not la: + 48 yield _TOKENS[TokenType.EOF] + 49 return + 50 + 51 try: + 52 sublexer = _SUBLEXER[state][la] + 53 except KeyError: + 54 raise _unexpected_char(la) from None + 55 + 56 token, la = sublexer(la, it) + 57 state = _STATE.get(token.type, State.DEFAULT) + 58 yield token
+ + 59 + 60 + 61_TOKENS: Final = { + 62 typ: Token(txt, typ) + 63 for typ, txt in ( + 64 (TokenType.EOF, ''), + 65 (TokenType.LPAREN, '('), + 66 (TokenType.RPAREN, ')'), + 67 (TokenType.COMMA, ','), + 68 (TokenType.COLON, ':'), + 69 (TokenType.KSEQ, '~>'), + 70 (TokenType.DOTK, '.K'), + 71 (TokenType.DOTKLIST, '.KList'), + 72 (TokenType.TOKEN, '#token'), + 73 ) + 74} + 75 + 76 + 77_DIGIT: Final = set('0123456789') + 78_LOWER: Final = set('abcdefghijklmnopqrstuvwxyz') + 79_UPPER: Final = set('ABCDEFGHIJKLMNOPQRSTUVWXYZ') + 80_ALNUM: Final = set.union(_DIGIT, _LOWER, _UPPER) + 81 + 82 + 83_UNEXPECTED_EOF: Final = ValueError('Unexpected end of file') + 84 + 85 + 86def _unexpected_char(actual: str, expected: str | None = None) -> ValueError: + 87 if expected is None: + 88 return ValueError(f'Unexpected character: {actual!r}') + 89 actual_str = repr(actual) if actual else '<EOF>' + 90 return ValueError(f'Expected {expected!r}, got: {actual_str}') + 91 + 92 + 93SubLexer = Callable[[str, Iterator[str]], tuple[Token, str]] + 94 + 95 + 96def _simple(token: Token) -> SubLexer: + 97 def sublexer(la: str, it: Iterator[str]) -> tuple[Token, str]: + 98 la = next(it, '') + 99 return token, la +100 +101 return sublexer +102 +103 +104def _delimited(delimiter: str, type: TokenType) -> SubLexer: +105 assert len(delimiter) == 1 +106 +107 def sublexer(la: str, it: Iterator[str]) -> tuple[Token, str]: +108 assert la == delimiter +109 buf = [la] +110 la = next(it, '') +111 while True: +112 if not la: +113 raise _UNEXPECTED_EOF +114 +115 elif la == delimiter: +116 buf.append(la) +117 la = next(it, '') +118 return Token(''.join(buf), type), la +119 +120 elif la == '\\': +121 buf.append(la) +122 la = next(it, '') +123 if not la: +124 raise _UNEXPECTED_EOF +125 buf.append(la) +126 la = next(it, '') +127 +128 else: +129 buf.append(la) +130 la = next(it, '') +131 +132 return sublexer +133 +134 +135def _kseq(la: str, it: Iterator[str]) -> tuple[Token, str]: +136 assert la == '~' +137 la = next(it, '') +138 if la != '>': +139 raise _unexpected_char(la, '>') +140 la = next(it, '') +141 return _TOKENS[TokenType.KSEQ], la +142 +143 +144_ID_CHARS: Final = set.union(_LOWER, _UPPER, _DIGIT) +145 +146 +147def _id_or_token(la: str, it: Iterator[str]) -> tuple[Token, str]: +148 """Match an ID or token. +149 +150 Corresponds to regex: [#a-z](a-zA-Z0-9)* +151 """ +152 assert la == '#' or la in _LOWER +153 buf = [la] +154 la = next(it, '') +155 while la in _ID_CHARS: +156 buf += la +157 la = next(it, '') +158 text = ''.join(buf) +159 if text == '#token': +160 return _TOKENS[TokenType.TOKEN], la +161 return Token(text, TokenType.ID), la +162 +163 +164_VARIABLE_CHARS: Final = set.union(_LOWER, _UPPER, _DIGIT, set("'_")) +165 +166 +167def _variable(la: str, it: Iterator[str]) -> tuple[Token, str]: +168 r"""Match a variable. +169 +170 Corresponds to regex: _ | \?_ | \??_?[A-Z][a-zA-Z0-9'_]* +171 """ +172 assert la == '?' or la == '_' or la in _UPPER +173 +174 # States: +175 # 0: expect '_' or _UPPER +176 # 1: continue if _UPPER +177 # 2: read while _VARIABLE_CHARS +178 state = {'?': 0, '_': 1}.get(la, 2) +179 +180 buf = [la] +181 la = next(it, '') +182 +183 if state == 0: +184 if la == '_': +185 state = 1 +186 elif la in _UPPER: +187 state = 2 +188 else: +189 raise _unexpected_char(la) +190 +191 buf += la +192 la = next(it, '') +193 +194 if state == 1: +195 if la in _UPPER: +196 buf += la +197 la = next(it, '') +198 state = 2 +199 else: +200 la = next(it, '') +201 text = ''.join(buf) +202 return Token(text, TokenType.VARIABLE), la +203 +204 assert state == 2 +205 while la in _VARIABLE_CHARS: +206 buf += la +207 la = next(it, '') +208 text = ''.join(buf) +209 return Token(text, TokenType.VARIABLE), la +210 +211 +212# For ease of implementation, KDOT and KDOTLIST tokens are read until _SEP +213# This allows LA(1) +214# But e.g. .KA won't be lexed, even though it can be read as [KDOT, VARIABLE] +215_SEP: Final = set(',:()`"#.~ \t\r\n').union({''}) +216 +217 +218def _dotk_or_dotklist(la: str, it: Iterator[str]) -> tuple[Token, str]: +219 assert la == '.' +220 la = next(it, '') +221 if la != 'K': +222 raise _unexpected_char(la, 'K') +223 la = next(it, '') +224 if la in _SEP: +225 return _TOKENS[TokenType.DOTK], la +226 for c in 'List': +227 if la != c: +228 raise _unexpected_char(la, c) +229 la = next(it, '') +230 if la in _SEP: +231 return _TOKENS[TokenType.DOTKLIST], la +232 raise _unexpected_char(la) +233 +234 +235def _sort(la: str, it: Iterator[str]) -> tuple[Token, str]: +236 assert la in _UPPER +237 buf = [la] +238 la = next(it, '') +239 while la in _ALNUM: +240 buf.append(la) +241 la = next(it, '') +242 text = ''.join(buf) +243 return Token(text, TokenType.SORT), la +244 +245 +246_SUBLEXER: Final[dict[State, dict[str, SubLexer]]] = { +247 State.DEFAULT: { +248 '(': _simple(_TOKENS[TokenType.LPAREN]), +249 ')': _simple(_TOKENS[TokenType.RPAREN]), +250 ',': _simple(_TOKENS[TokenType.COMMA]), +251 ':': _simple(_TOKENS[TokenType.COLON]), +252 '"': _delimited('"', TokenType.STRING), +253 '`': _delimited('`', TokenType.KLABEL), +254 '~': _kseq, +255 '.': _dotk_or_dotklist, +256 **{c: _id_or_token for c in {'#'}.union(_LOWER)}, +257 **{c: _variable for c in {'?', '_'}.union(_UPPER)}, +258 }, +259 State.SORT: {c: _sort for c in _UPPER}, +260} +261 +262 +263_STATE: Final[dict[TokenType, State]] = { +264 TokenType.COLON: State.SORT, +265} +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kast/manip.html b/pyk/_modules/pyk/kast/manip.html new file mode 100644 index 00000000000..c815dd5625e --- /dev/null +++ b/pyk/_modules/pyk/kast/manip.html @@ -0,0 +1,1113 @@ + + + + + + + + pyk.kast.manip — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kast.manip

+  1from __future__ import annotations
+  2
+  3import logging
+  4from collections import Counter
+  5from typing import TYPE_CHECKING
+  6
+  7from ..prelude.k import DOTS, GENERATED_TOP_CELL
+  8from ..prelude.kbool import FALSE, TRUE, andBool, impliesBool, notBool, orBool
+  9from ..prelude.ml import is_top, mlAnd, mlBottom, mlEquals, mlEqualsTrue, mlImplies, mlOr, mlTop
+ 10from ..utils import find_common_items, hash_str, unique
+ 11from .att import EMPTY_ATT, Atts, KAtt, WithKAtt
+ 12from .inner import (
+ 13    KApply,
+ 14    KLabel,
+ 15    KRewrite,
+ 16    KSequence,
+ 17    KSort,
+ 18    KToken,
+ 19    KVariable,
+ 20    Subst,
+ 21    bottom_up,
+ 22    collect,
+ 23    flatten_label,
+ 24    top_down,
+ 25    var_occurrences,
+ 26)
+ 27from .outer import KClaim, KDefinition, KFlatModule, KRule, KRuleLike
+ 28from .rewrite import indexed_rewrite
+ 29
+ 30if TYPE_CHECKING:
+ 31    from collections.abc import Callable, Collection, Iterable
+ 32    from typing import Final, TypeVar
+ 33
+ 34    from .inner import KInner
+ 35
+ 36    KI = TypeVar('KI', bound=KInner)
+ 37    W = TypeVar('W', bound=WithKAtt)
+ 38    RL = TypeVar('RL', bound=KRuleLike)
+ 39
+ 40_LOGGER: Final = logging.getLogger(__name__)
+ 41
+ 42
+
+[docs] + 43def is_term_like(kast: KInner) -> bool: + 44 non_term_found = False + 45 + 46 def _is_term_like(_kast: KInner) -> None: + 47 nonlocal non_term_found + 48 match _kast: + 49 case KVariable(name, _): + 50 if name.startswith('@'): + 51 non_term_found = True + 52 case KApply(KLabel(name, _), _): + 53 if name in { + 54 '#Equals', + 55 '#And', + 56 '#Or', + 57 '#Top', + 58 '#Bottom', + 59 '#Implies', + 60 '#Not', + 61 '#Ceil', + 62 '#Forall', + 63 '#Exists', + 64 }: + 65 non_term_found = True + 66 + 67 collect(_is_term_like, kast) + 68 return not non_term_found
+ + 69 + 70 +
+[docs] + 71def sort_assoc_label(label: str, kast: KInner) -> KInner: + 72 res: KInner | None = None + 73 if type(kast) is KApply and kast.label.name == label: + 74 terms = sorted(flatten_label(label, kast)) + 75 for term in reversed(terms): + 76 if not res: + 77 res = term + 78 else: + 79 res = kast.label(term, res) + 80 assert res is not None + 81 return res + 82 return kast
+ + 83 + 84 +
+[docs] + 85def sort_ac_collections(kast: KInner) -> KInner: + 86 def _sort_ac_collections(_kast: KInner) -> KInner: + 87 if type(_kast) is KApply and ( + 88 _kast.label.name in {'_Set_', '_Map_', '_RangeMap_'} or _kast.label.name.endswith('CellMap_') + 89 ): + 90 return sort_assoc_label(_kast.label.name, _kast) + 91 return _kast + 92 + 93 return top_down(_sort_ac_collections, kast)
+ + 94 + 95 +
+[docs] + 96def if_ktype(ktype: type[KI], then: Callable[[KI], KInner]) -> Callable[[KInner], KInner]: + 97 def fun(term: KInner) -> KInner: + 98 if isinstance(term, ktype): + 99 return then(term) +100 return term +101 +102 return fun
+ +103 +104 +
+[docs] +105def bool_to_ml_pred(kast: KInner, sort: str | KSort = GENERATED_TOP_CELL) -> KInner: +106 def _bool_constraint_to_ml(_kast: KInner) -> KInner: +107 if _kast == TRUE: +108 return mlTop(sort=sort) +109 if _kast == FALSE: +110 return mlBottom(sort=sort) +111 return mlEqualsTrue(_kast, sort=sort) +112 +113 return mlAnd([_bool_constraint_to_ml(cond) for cond in flatten_label('_andBool_', kast)], sort=sort)
+ +114 +115 +
+[docs] +116def ml_pred_to_bool(kast: KInner, unsafe: bool = False) -> KInner: +117 def _ml_constraint_to_bool(_kast: KInner) -> KInner: +118 if type(_kast) is KApply: +119 if _kast.label.name == '#Top': +120 return TRUE +121 if _kast.label.name == '#Bottom': +122 return FALSE +123 if _kast.label.name == '#Not' and len(_kast.args) == 1: +124 return notBool(_ml_constraint_to_bool(_kast.args[0])) +125 if _kast.label.name == '#And': +126 return andBool(map(_ml_constraint_to_bool, _kast.args)) +127 if _kast.label.name == '#Or': +128 return orBool(map(_ml_constraint_to_bool, _kast.args)) +129 if _kast.label.name == '#Implies' and len(_kast.args) == 2: +130 return impliesBool(_ml_constraint_to_bool(_kast.args[0]), _ml_constraint_to_bool(_kast.args[1])) +131 if _kast.label.name == '#Equals': +132 first, second = _kast.args +133 if first == TRUE: +134 return second +135 if first == FALSE: +136 return notBool(second) +137 if second == TRUE: +138 return first +139 if second == FALSE: +140 return notBool(first) +141 if isinstance(first, (KVariable, KToken)): +142 if first.sort == KSort('Int'): +143 return KApply('_==Int_', _kast.args) +144 else: +145 return KApply('_==K_', _kast.args) +146 if isinstance(second, (KVariable, KToken)): +147 if second.sort == KSort('Int'): +148 return KApply('_==Int_', _kast.args) +149 else: +150 return KApply('_==K_', _kast.args) +151 if type(first) is KSequence and type(second) is KSequence: +152 if first.arity == 1 and second.arity == 1: +153 return KApply('_==K_', (first.items[0], second.items[0])) +154 if is_term_like(first) and is_term_like(second): +155 return KApply('_==K_', first, second) +156 if unsafe: +157 if _kast.label.name == '#Equals': +158 return KApply('_==K_', _kast.args) +159 if _kast.label.name == '#Ceil': +160 ceil_var = abstract_term_safely(_kast, base_name='Ceil') +161 _LOGGER.warning(f'Converting #Ceil condition to variable {ceil_var.name}: {_kast}') +162 return ceil_var +163 if _kast.label.name == '#Exists': +164 exists_var = abstract_term_safely(_kast, base_name='Exists') +165 _LOGGER.warning(f'Converting #Exists condition to variable {exists_var.name}: {_kast}') +166 return exists_var +167 raise ValueError(f'Could not convert ML predicate to sort Bool: {_kast}') +168 +169 return _ml_constraint_to_bool(kast)
+ +170 +171 +
+[docs] +172def simplify_bool(k: KInner) -> KInner: +173 if k is None: +174 return None +175 +176 # fmt: off +177 simplify_rules = [ (KApply('_==K_', [KVariable('#LHS'), TRUE]), KVariable('#LHS')) +178 , (KApply('_==K_', [TRUE, KVariable('#RHS')]), KVariable('#RHS')) +179 , (KApply('_==K_', [KVariable('#LHS'), FALSE]), notBool(KVariable('#LHS'))) +180 , (KApply('_==K_', [FALSE, KVariable('#RHS')]), notBool(KVariable('#RHS'))) +181 , (notBool(FALSE), TRUE) +182 , (notBool(TRUE), FALSE) +183 , (notBool(KApply('_==K_' , [KVariable('#V1'), KVariable('#V2')])), KApply('_=/=K_' , [KVariable('#V1'), KVariable('#V2')])) +184 , (notBool(KApply('_=/=K_' , [KVariable('#V1'), KVariable('#V2')])), KApply('_==K_' , [KVariable('#V1'), KVariable('#V2')])) +185 , (notBool(KApply('_==Int_' , [KVariable('#V1'), KVariable('#V2')])), KApply('_=/=Int_' , [KVariable('#V1'), KVariable('#V2')])) +186 , (notBool(KApply('_=/=Int_' , [KVariable('#V1'), KVariable('#V2')])), KApply('_==Int_' , [KVariable('#V1'), KVariable('#V2')])) +187 , (andBool([TRUE, KVariable('#REST')]), KVariable('#REST')) +188 , (andBool([KVariable('#REST'), TRUE]), KVariable('#REST')) +189 , (andBool([FALSE, KVariable('#REST')]), FALSE) +190 , (andBool([KVariable('#REST'), FALSE]), FALSE) +191 , (orBool([FALSE, KVariable('#REST')]), KVariable('#REST')) +192 , (orBool([KVariable('#REST'), FALSE]), KVariable('#REST')) +193 , (orBool([TRUE, KVariable('#REST')]), TRUE) +194 , (orBool([KVariable('#REST'), TRUE]), TRUE) +195 ] +196 # fmt: on +197 +198 new_k = k +199 for rule in simplify_rules: +200 rewrite = KRewrite(*rule) +201 new_k = rewrite(new_k) +202 return new_k
+ +203 +204 +
+[docs] +205def normalize_ml_pred(pred: KInner) -> KInner: +206 return bool_to_ml_pred(simplify_bool(ml_pred_to_bool(pred)))
+ +207 +208 +
+[docs] +209def extract_lhs(term: KInner) -> KInner: +210 return top_down(if_ktype(KRewrite, lambda rw: rw.lhs), term)
+ +211 +212 +
+[docs] +213def extract_rhs(term: KInner) -> KInner: +214 return top_down(if_ktype(KRewrite, lambda rw: rw.rhs), term)
+ +215 +216 +
+[docs] +217def extract_subst(term: KInner) -> tuple[Subst, KInner]: +218 subst = {} +219 rem_conjuncts: list[KInner] = [] +220 +221 def _extract_subst(term: KInner, term2: KInner) -> tuple[str, KInner] | None: +222 if ( +223 (isinstance(term, KVariable) and term.name not in subst) +224 and not (isinstance(term2, KVariable) and term2.name in subst) +225 and term.name not in free_vars(term2) +226 ): +227 return (term.name, term2) +228 if ( +229 (isinstance(term2, KVariable) and term2.name not in subst) +230 and not (isinstance(term, KVariable) and term.name in subst) +231 and term2.name not in free_vars(term) +232 ): +233 return (term2.name, term) +234 if term == TRUE and isinstance(term2, KApply) and term2.label.name in {'_==K_', '_==Int_'}: +235 return _extract_subst(term2.args[0], term2.args[1]) +236 if term2 == TRUE and isinstance(term, KApply) and term.label.name in {'_==K_', '_==Int_'}: +237 return _extract_subst(term.args[0], term.args[1]) +238 return None +239 +240 for conjunct in flatten_label('#And', term): +241 if isinstance(conjunct, KApply) and conjunct.label.name == '#Equals': +242 if _conjunct_subst := _extract_subst(conjunct.args[0], conjunct.args[1]): +243 name, value = _conjunct_subst +244 subst[name] = value +245 else: +246 rem_conjuncts.append(conjunct) +247 else: +248 rem_conjuncts.append(conjunct) +249 +250 return Subst(subst), mlAnd(rem_conjuncts)
+ +251 +252 +
+[docs] +253def count_vars(term: KInner) -> Counter[str]: +254 counter: Counter[str] = Counter() +255 occurrences = var_occurrences(term) +256 for vname in occurrences: +257 counter[vname] = len(occurrences[vname]) +258 return counter
+ +259 +260 +
+[docs] +261def free_vars(kast: KInner) -> frozenset[str]: +262 return frozenset(count_vars(kast).keys())
+ +263 +264 +
+[docs] +265def propagate_up_constraints(k: KInner) -> KInner: +266 def _propagate_up_constraints(_k: KInner) -> KInner: +267 if not (type(_k) is KApply and _k.label.name == '#Or'): +268 return _k +269 top_sort = _k.label.params[0] +270 conjuncts1 = flatten_label('#And', _k.args[0]) +271 conjuncts2 = flatten_label('#And', _k.args[1]) +272 (common1, l1, r1) = find_common_items(conjuncts1, conjuncts2) +273 (common2, r2, l2) = find_common_items(r1, l1) +274 common = common1 + common2 +275 if len(common) == 0: +276 return _k +277 conjunct1 = mlAnd(l2, sort=top_sort) +278 conjunct2 = mlAnd(r2, sort=top_sort) +279 disjunct = mlOr([conjunct1, conjunct2], sort=top_sort) +280 return mlAnd([disjunct] + common, sort=top_sort) +281 +282 return bottom_up(_propagate_up_constraints, k)
+ +283 +284 +
+[docs] +285def split_config_and_constraints(kast: KInner) -> tuple[KInner, KInner]: +286 conjuncts = flatten_label('#And', kast) +287 term = None +288 constraints = [] +289 for c in conjuncts: +290 if type(c) is KApply and c.is_cell: +291 if term: +292 raise ValueError(f'Found two configurations in pattern:\n\n{term}\n\nand\n\n{c}') +293 term = c +294 else: +295 constraints.append(c) +296 if not term: +297 raise ValueError(f'Could not find configuration for: {kast}') +298 return (term, mlAnd(constraints, GENERATED_TOP_CELL))
+ +299 +300 +
+[docs] +301def cell_label_to_var_name(label: str) -> str: +302 """Return a variable name based on a cell label.""" +303 return label.replace('-', '_').replace('<', '').replace('>', '').upper() + '_CELL'
+ +304 +305 +
+[docs] +306def split_config_from(configuration: KInner) -> tuple[KInner, dict[str, KInner]]: +307 """Split the substitution from a given configuration. +308 +309 Given an input configuration `config`, will return a tuple `(symbolic_config, subst)`, where: +310 +311 1. `config == substitute(symbolic_config, subst)` +312 2. `symbolic_config` is the same configuration structure, but where the contents of leaf cells is replaced with a fresh KVariable. +313 3. `subst` is the substitution for the generated KVariables back to the original configuration contents. +314 """ +315 initial_substitution = {} +316 +317 def _replace_with_var(k: KInner) -> KInner: +318 if type(k) is KApply and k.is_cell: +319 if k.arity == 1 and not (type(k.args[0]) is KApply and k.args[0].is_cell): +320 config_var = cell_label_to_var_name(k.label.name) +321 initial_substitution[config_var] = k.args[0] +322 return KApply(k.label, [KVariable(config_var)]) +323 return k +324 +325 symbolic_config = top_down(_replace_with_var, configuration) +326 return (symbolic_config, initial_substitution)
+ +327 +328 +
+[docs] +329def collapse_dots(kast: KInner) -> KInner: +330 """Given a configuration with structural frames `...`, minimize the structural frames needed. +331 +332 Args: +333 kast: A configuration, potentially with structural frames. +334 +335 Returns: +336 The same configuration, with the amount of structural framing minimized. +337 """ +338 +339 def _collapse_dots(_kast: KInner) -> KInner: +340 if type(_kast) is KApply: +341 if _kast.is_cell and _kast.arity == 1 and _kast.args[0] == DOTS: +342 return DOTS +343 new_args = [arg for arg in _kast.args if arg != DOTS] +344 if _kast.is_cell and len(new_args) == 0: +345 return DOTS +346 if len(new_args) < len(_kast.args): +347 new_args.append(DOTS) +348 return _kast.let(args=new_args) +349 elif type(_kast) is KRewrite: +350 if _kast.lhs == DOTS: +351 return DOTS +352 return _kast +353 +354 return bottom_up(_collapse_dots, kast)
+ +355 +356 +
+[docs] +357def push_down_rewrites(kast: KInner) -> KInner: +358 def _push_down_rewrites(_kast: KInner) -> KInner: +359 if type(_kast) is KRewrite: +360 lhs = _kast.lhs +361 rhs = _kast.rhs +362 if lhs == rhs: +363 return lhs +364 if type(lhs) is KVariable and type(rhs) is KVariable and lhs.name == rhs.name: +365 return lhs +366 if type(lhs) is KApply and type(rhs) is KApply and lhs.label == rhs.label and lhs.arity == rhs.arity: +367 new_args = [ +368 KRewrite(left_arg, right_arg) for left_arg, right_arg in zip(lhs.args, rhs.args, strict=True) +369 ] +370 return lhs.let(args=new_args) +371 if type(lhs) is KSequence and type(rhs) is KSequence and lhs.arity > 0 and rhs.arity > 0: +372 if lhs.arity == 1 and rhs.arity == 1: +373 return KRewrite(lhs.items[0], rhs.items[0]) +374 if lhs.items[0] == rhs.items[0]: +375 lower_rewrite = _push_down_rewrites(KRewrite(KSequence(lhs.items[1:]), KSequence(rhs.items[1:]))) +376 return KSequence([lhs.items[0], lower_rewrite]) +377 if lhs.items[-1] == rhs.items[-1]: +378 lower_rewrite = _push_down_rewrites( +379 KRewrite(KSequence(lhs.items[0:-1]), KSequence(rhs.items[0:-1])) +380 ) +381 return KSequence([lower_rewrite, lhs.items[-1]]) +382 if ( +383 type(lhs) is KSequence +384 and lhs.arity > 0 +385 and type(lhs.items[-1]) is KVariable +386 and type(rhs) is KVariable +387 and lhs.items[-1] == rhs +388 ): +389 return KSequence([KRewrite(KSequence(lhs.items[0:-1]), KSequence([])), rhs]) +390 return _kast +391 +392 return top_down(_push_down_rewrites, kast)
+ +393 +394 +
+[docs] +395def inline_cell_maps(kast: KInner) -> KInner: +396 """Ensure that cell map collections are printed nicely, not as Maps. +397 +398 Args: +399 kast: A KAST term. +400 +401 Returns: +402 The KAST term with cell maps inlined. +403 """ +404 +405 def _inline_cell_maps(_kast: KInner) -> KInner: +406 if type(_kast) is KApply and _kast.label.name.endswith('CellMapItem'): +407 map_key = _kast.args[0] +408 if type(map_key) is KApply and map_key.is_cell: +409 return _kast.args[1] +410 return _kast +411 +412 return bottom_up(_inline_cell_maps, kast)
+ +413 +414 +
+[docs] +415def remove_semantic_casts(kast: KInner) -> KInner: +416 """Remove injected `#SemanticCast*` nodes in AST. +417 +418 Args: +419 kast: A term (possibly) containing automatically injected `#SemanticCast*` KApply nodes. +420 +421 Returns: +422 The term without the `#SemanticCast*` nodes. +423 """ +424 +425 def _remove_semtnaic_casts(_kast: KInner) -> KInner: +426 if type(_kast) is KApply and _kast.arity == 1 and _kast.label.name.startswith('#SemanticCast'): +427 return _kast.args[0] +428 return _kast +429 +430 return bottom_up(_remove_semtnaic_casts, kast)
+ +431 +432 +
+[docs] +433def useless_vars_to_dots(kast: KInner, keep_vars: Iterable[str] = ()) -> KInner: +434 """Structurally abstract away useless variables. +435 +436 Args: +437 kast: A term. +438 keep_vars: Iterable of variables to keep. +439 +440 Returns: +441 The term with the useless varables structurally abstracted. +442 """ +443 num_occs = count_vars(kast) + Counter(keep_vars) +444 +445 def _collapse_useless_vars(_kast: KInner) -> KInner: +446 if type(_kast) is KApply and _kast.is_cell: +447 new_args: list[KInner] = [] +448 for arg in _kast.args: +449 if type(arg) is KVariable and num_occs[arg.name] == 1: +450 new_args.append(DOTS) +451 else: +452 new_args.append(arg) +453 return _kast.let(args=new_args) +454 return _kast +455 +456 return bottom_up(_collapse_useless_vars, kast)
+ +457 +458 +
+[docs] +459def labels_to_dots(kast: KInner, labels: Collection[str]) -> KInner: +460 """Abstract specific labels for printing. +461 +462 Args: +463 kast: A term. +464 labels: List of labels to abstract. +465 +466 Returns +467 The term with `labels` abstracted. +468 """ +469 +470 def _labels_to_dots(k: KInner) -> KInner: +471 if type(k) is KApply and k.is_cell and k.label.name in labels: +472 return DOTS +473 return k +474 +475 return bottom_up(_labels_to_dots, kast)
+ +476 +477 +
+[docs] +478def extract_cells(kast: KInner, keep_cells: Collection[str]) -> KInner: +479 def _extract_cells(k: KInner) -> KInner: +480 if ( +481 type(k) is KApply +482 and k.is_cell +483 and not k.label.name in keep_cells +484 and all(type(arg) != KApply or not arg.is_cell or arg == DOTS for arg in k.args) +485 ): +486 return DOTS +487 return k +488 +489 return bottom_up(_extract_cells, kast)
+ +490 +491 +
+[docs] +492def on_attributes(kast: W, f: Callable[[KAtt], KAtt]) -> W: +493 kast = kast.map_att(f) +494 +495 # TODO mypy bug: https://github.com/python/mypy/issues/10817 +496 +497 if type(kast) is KFlatModule: +498 sentences = (sentence.map_att(f) for sentence in kast.sentences) +499 return kast.let(sentences=sentences) # type: ignore +500 +501 if type(kast) is KDefinition: +502 modules = (module.map_att(f) for module in kast.modules) +503 return kast.let(modules=modules) # type: ignore +504 +505 return kast
+ +506 +507 +
+[docs] +508def minimize_term( +509 term: KInner, keep_vars: Iterable[str] = (), abstract_labels: Collection[str] = (), keep_cells: Collection[str] = () +510) -> KInner: +511 """Minimize a K term for pretty-printing. +512 +513 - Variables only used once will be removed. +514 - Unused cells will be abstracted. +515 - Useless conditions will be attempted to be removed. +516 +517 Args: +518 kast: A term. +519 +520 Returns: +521 The term, minimized. +522 """ +523 term = inline_cell_maps(term) +524 term = remove_semantic_casts(term) +525 term = useless_vars_to_dots(term, keep_vars=keep_vars) +526 +527 if keep_cells: +528 term = extract_cells(term, keep_cells) +529 else: +530 term = labels_to_dots(term, abstract_labels) +531 +532 term = collapse_dots(term) +533 +534 return term
+ +535 +536 +
+[docs] +537def minimize_rule_like(rule: RL, keep_vars: Iterable[str] = ()) -> RL: +538 """Minimize a K rule or claim for pretty-printing. +539 +540 - Variables only used once will be removed. +541 - Unused cells will be abstracted. +542 - Useless side-conditions will be attempted to be removed. +543 +544 Args: +545 rule: A K rule or claim. +546 +547 Returns: +548 The rule or claim, minimized. +549 """ +550 body = rule.body +551 requires = rule.requires +552 ensures = rule.ensures +553 +554 requires = andBool(flatten_label('_andBool_', requires)) +555 requires = simplify_bool(requires) +556 +557 ensures = andBool(flatten_label('_andBool_', ensures)) +558 ensures = simplify_bool(ensures) +559 +560 constrained_vars = set(keep_vars) | free_vars(requires) | free_vars(ensures) +561 body = minimize_term(body, keep_vars=constrained_vars) +562 +563 return rule.let(body=body, requires=requires, ensures=ensures)
+ +564 +565 +
+[docs] +566def remove_source_map(definition: KDefinition) -> KDefinition: +567 return on_attributes(definition, lambda att: att.drop_source())
+ +568 +569 +
+[docs] +570def remove_attrs(term: KInner) -> KInner: +571 def remove_attr(term: KInner) -> KInner: +572 if isinstance(term, WithKAtt): +573 return term.let_att(EMPTY_ATT) +574 return term +575 +576 return top_down(remove_attr, term)
+ +577 +578 +
+[docs] +579def remove_generated_cells(term: KInner) -> KInner: +580 """Remove <generatedTop> and <generatedCounter> from a configuration. +581 +582 Args: +583 term: A term. +584 +585 Returns: +586 The term with those cells removed. +587 """ +588 rewrite = KRewrite(KApply('<generatedTop>', [KVariable('CONFIG'), KVariable('_')]), KVariable('CONFIG')) +589 return rewrite(term)
+ +590 +591 +
+[docs] +592def is_anon_var(kast: KInner) -> bool: +593 return type(kast) is KVariable and kast.name.startswith('_')
+ +594 +595 +
+[docs] +596def set_cell(constrained_term: KInner, cell_variable: str, cell_value: KInner) -> KInner: +597 state, constraint = split_config_and_constraints(constrained_term) +598 config, subst = split_config_from(state) +599 subst[cell_variable] = cell_value +600 return mlAnd([Subst(subst)(config), constraint])
+ +601 +602 +
+[docs] +603def abstract_term_safely( +604 kast: KInner, base_name: str = 'V', sort: KSort | None = None, existing_var_names: set[str] | None = None +605) -> KVariable: +606 def _abstract(k: KInner) -> KVariable: +607 vname = hash_str(k)[0:8] +608 return KVariable(base_name + '_' + vname, sort=sort) +609 +610 new_var = _abstract(kast) +611 if existing_var_names is not None: +612 while new_var.name in existing_var_names: +613 new_var = _abstract(new_var) +614 return new_var
+ +615 +616 +
+[docs] +617def apply_existential_substitutions(state: KInner, constraints: Iterable[KInner]) -> tuple[KInner, Iterable[KInner]]: +618 pattern = mlEqualsTrue(KApply('_==K_', [KVariable('#VAR'), KVariable('#VAL')])) +619 subst = {} +620 new_constraints = [] +621 for c in constraints: +622 match = pattern.match(c) +623 if match is not None and type(match['#VAR']) is KVariable and match['#VAR'].name.startswith('?'): +624 subst[match['#VAR'].name] = match['#VAL'] +625 else: +626 new_constraints.append(c) +627 return (Subst(subst)(state), [Subst(subst)(c) for c in new_constraints])
+ +628 +629 +
+[docs] +630def undo_aliases(definition: KDefinition, kast: KInner) -> KInner: +631 aliases = [] +632 for rule in definition.alias_rules: +633 rewrite = rule.body +634 if type(rewrite) is not KRewrite: +635 raise ValueError(f'Expected KRewrite as alias body, found: {rewrite}') +636 if rule.requires is not None and rule.requires != TRUE: +637 raise ValueError(f'Expended empty requires clause on alias, found: {rule.requires}') +638 if rule.ensures is not None and rule.ensures != TRUE: +639 raise ValueError(f'Expended empty ensures clause on alias, found: {rule.ensures}') +640 aliases.append(KRewrite(rewrite.rhs, rewrite.lhs)) +641 return indexed_rewrite(kast, aliases)
+ +642 +643 +
+[docs] +644def rename_generated_vars(term: KInner) -> KInner: +645 vars: set[str] = set(free_vars(term)) +646 cell_stack: list[str] = [] +647 +648 def _rename_vars(k: KInner) -> KInner: +649 if type(k) is KApply and k.is_cell: +650 cell_stack.append(cell_label_to_var_name(k.label.name)) +651 res = k.map_inner(_rename_vars) +652 cell_stack.pop() +653 return res +654 +655 if type(k) is KVariable and k.name.startswith(('_Gen', '?_Gen', '_DotVar', '?_DotVar')): +656 if not cell_stack: +657 return k +658 cell_name = cell_stack[-1] +659 new_var = abstract_term_safely(k, base_name=cell_name, sort=k.sort, existing_var_names=vars) +660 vars.add(new_var.name) +661 return new_var +662 +663 return k.map_inner(_rename_vars) +664 +665 return _rename_vars(term)
+ +666 +667 +
+[docs] +668def is_spurious_constraint(term: KInner) -> bool: +669 if type(term) is KApply and term.label.name == '#Equals' and term.args[0] == term.args[1]: +670 return True +671 if is_top(term, weak=True): +672 return True +673 return False
+ +674 +675 +
+[docs] +676def normalize_constraints(constraints: Iterable[KInner]) -> tuple[KInner, ...]: +677 constraints = (constraint for _constraint in constraints for constraint in flatten_label('#And', _constraint)) +678 constraints = unique(constraints) +679 constraints = (constraint for constraint in constraints if not is_spurious_constraint(constraint)) +680 return tuple(constraints)
+ +681 +682 +
+[docs] +683def remove_useless_constraints(constraints: Iterable[KInner], initial_vars: Iterable[str]) -> list[KInner]: +684 """Remove constraints that do not depend on a given iterable of variables (directly or indirectly). +685 +686 Args: +687 constraints: Iterable of constraints to filter. +688 initial_vars: Initial iterable of variables to keep constraints for. +689 +690 Returns: +691 A list of constraints with only those constraints that contain the initial variables, +692 or variables that depend on those through other constraints in the list. +693 """ +694 used_vars = list(initial_vars) +695 prev_len_used_vars = 0 +696 new_constraints = [] +697 while len(used_vars) > prev_len_used_vars: +698 prev_len_used_vars = len(used_vars) +699 for c in constraints: +700 if c not in new_constraints: +701 new_vars = free_vars(c) +702 if any(v in used_vars for v in new_vars): +703 new_constraints.append(c) +704 used_vars.extend(new_vars) +705 used_vars = list(set(used_vars)) +706 return new_constraints
+ +707 +708 +
+[docs] +709def build_claim( +710 claim_id: str, +711 init_config: KInner, +712 final_config: KInner, +713 init_constraints: Iterable[KInner] = (), +714 final_constraints: Iterable[KInner] = (), +715 keep_vars: Iterable[str] = (), +716) -> tuple[KClaim, Subst]: +717 """Return a `KClaim` between the supplied initial and final states. +718 +719 Args: +720 claim_id: Label to give the claim. +721 init_config: State to put on LHS of the rule. +722 final_config: State to put on RHS of the rule. +723 init_constraints: Constraints to use as `requires` clause. +724 final_constraints: Constraints to use as `ensures` clause. +725 keep_vars: Variables to leave in the side-conditions even if not bound in the configuration. +726 +727 Returns: +728 A tuple ``(claim, var_map)`` where +729 +730 - ``claim``: A `KClaim` with variable naming conventions applied +731 so that it should be parseable by the K Frontend. +732 - ``var_map``: The variable renamings applied to make the claim parseable by the K Frontend +733 (which can be undone to recover the original variables). +734 """ +735 rule, var_map = build_rule( +736 claim_id, init_config, final_config, init_constraints, final_constraints, keep_vars=keep_vars +737 ) +738 claim = KClaim(rule.body, requires=rule.requires, ensures=rule.ensures, att=rule.att) +739 return claim, var_map
+ +740 +741 +
+[docs] +742def build_rule( +743 rule_id: str, +744 init_config: KInner, +745 final_config: KInner, +746 init_constraints: Iterable[KInner] = (), +747 final_constraints: Iterable[KInner] = (), +748 priority: int | None = None, +749 keep_vars: Iterable[str] = (), +750 defunc_with: KDefinition | None = None, +751) -> tuple[KRule, Subst]: +752 """Return a `KRule` between the supplied initial and final states. +753 +754 Args: +755 rule_id: Label to give the rule. +756 init_config: State to put on LHS of the rule. +757 final_config: State to put on RHS of the rule. +758 init_constraints: Constraints to use as `requires` clause. +759 final_constraints: Constraints to use as `ensures` clause. +760 priority: Priority index to assign to generated rules. +761 keep_vars: Variables to leave in the side-conditions even if not bound in the configuration. +762 defunc_with: KDefinition for filtering out function symbols on LHS of rules. +763 +764 Returns: +765 A tuple ``(rule, var_map)`` where +766 +767 - ``rule``: A `KRule` with variable naming conventions applied +768 so that it should be parseable by the K Frontend. +769 - ``var_map``: The variable renamings applied to make the rule parseable by the K Frontend +770 (which can be undone to recover the original variables). +771 """ +772 init_constraints = [normalize_ml_pred(c) for c in init_constraints] +773 final_constraints = [normalize_ml_pred(c) for c in final_constraints] +774 final_constraints = [c for c in final_constraints if c not in init_constraints] +775 if defunc_with is not None: +776 init_config, new_constraints = defunctionalize(defunc_with, init_config) +777 init_constraints = init_constraints + new_constraints +778 init_term = mlAnd([init_config] + init_constraints) +779 final_term = mlAnd([final_config] + final_constraints) +780 +781 lhs_vars = free_vars(init_term) +782 rhs_vars = free_vars(final_term) +783 var_occurrences = count_vars( +784 mlAnd( +785 [push_down_rewrites(KRewrite(init_config, final_config))] + init_constraints + final_constraints, +786 GENERATED_TOP_CELL, +787 ) +788 ) +789 v_subst: dict[str, KVariable] = {} +790 vremap_subst: dict[str, KVariable] = {} +791 for v in var_occurrences: +792 new_v = v +793 if var_occurrences[v] == 1: +794 new_v = '_' + new_v +795 if v in rhs_vars and v not in lhs_vars: +796 new_v = '?' + new_v +797 if new_v != v: +798 v_subst[v] = KVariable(new_v) +799 vremap_subst[new_v] = KVariable(v) +800 +801 new_init_config = Subst(v_subst)(init_config) +802 new_init_constraints = [Subst(v_subst)(c) for c in init_constraints] +803 new_final_config, new_final_constraints = apply_existential_substitutions( +804 Subst(v_subst)(final_config), [Subst(v_subst)(c) for c in final_constraints] +805 ) +806 +807 rule_body = push_down_rewrites(KRewrite(new_init_config, new_final_config)) +808 rule_requires = simplify_bool(ml_pred_to_bool(mlAnd(new_init_constraints))) +809 rule_ensures = simplify_bool(ml_pred_to_bool(mlAnd(new_final_constraints))) +810 att_entries = [] if priority is None else [Atts.PRIORITY(str(priority))] +811 rule_att = KAtt(entries=att_entries) +812 +813 rule = KRule(rule_body, requires=rule_requires, ensures=rule_ensures, att=rule_att) +814 rule = rule.update_atts([Atts.LABEL(rule_id)]) +815 +816 return (rule, Subst(vremap_subst))
+ +817 +818 +
+[docs] +819def replace_rewrites_with_implies(kast: KInner) -> KInner: +820 def _replace_rewrites_with_implies(_kast: KInner) -> KInner: +821 if type(_kast) is KRewrite: +822 return mlImplies(_kast.lhs, _kast.rhs) +823 return _kast +824 +825 return bottom_up(_replace_rewrites_with_implies, kast)
+ +826 +827 +
+[docs] +828def no_cell_rewrite_to_dots(term: KInner) -> KInner: +829 """Transform a given term by replacing the contents of each cell with dots if the LHS and RHS are the same. +830 +831 This function recursively traverses the cells in a term. +832 When it finds a cell whose left-hand side (LHS) is identical to its right-hand side (RHS), +833 it replaces the cell's contents with a predefined DOTS. +834 +835 Args: +836 term: The term to be transformed. +837 +838 Returns: +839 The transformed term, where specific cell contents have been replaced with dots. +840 """ +841 +842 def _no_cell_rewrite_to_dots(_term: KInner) -> KInner: +843 if type(_term) is KApply and _term.is_cell: +844 lhs = extract_lhs(_term) +845 rhs = extract_rhs(_term) +846 if lhs == rhs: +847 return KApply(_term.label, [DOTS]) +848 return _term +849 +850 config, _subst = split_config_from(term) +851 subst = Subst({cell_name: _no_cell_rewrite_to_dots(cell_contents) for cell_name, cell_contents in _subst.items()}) +852 +853 return subst(config)
+ +854 +855 +
+[docs] +856def defunctionalize(defn: KDefinition, kinner: KInner) -> tuple[KInner, list[KInner]]: +857 """Turn non-constructor arguments into side-conditions so that a term is only constructor-like. +858 +859 Args: +860 defn: The definition to pull function label information from. +861 kinner: The term to defunctionalize. +862 +863 Returns: +864 A tuple of the defunctionalized term and the list of constraints generated. +865 """ +866 function_symbols = [prod.klabel for prod in defn.functions if prod.klabel is not None] +867 constraints: list[KInner] = [] +868 +869 def _defunctionalize(_kinner: KInner) -> KInner: +870 if type(_kinner) is KApply and _kinner.label in function_symbols: +871 sort = defn.sort(_kinner) +872 assert sort is not None +873 new_var = abstract_term_safely(_kinner, base_name='F', sort=sort) +874 var_constraint = mlEquals(new_var, _kinner, arg_sort=sort) +875 constraints.append(var_constraint) +876 return new_var +877 return _kinner +878 +879 new_kinner = top_down(_defunctionalize, kinner) +880 return (new_kinner, list(unique(constraints)))
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kast/markdown.html b/pyk/_modules/pyk/kast/markdown.html new file mode 100644 index 00000000000..91d74fa8b19 --- /dev/null +++ b/pyk/_modules/pyk/kast/markdown.html @@ -0,0 +1,355 @@ + + + + + + + + pyk.kast.markdown — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kast.markdown

+  1from __future__ import annotations
+  2
+  3import re
+  4from abc import ABC, abstractmethod
+  5from dataclasses import dataclass
+  6from typing import TYPE_CHECKING, NamedTuple, final
+  7
+  8if TYPE_CHECKING:
+  9    from collections.abc import Container, Iterable, Iterator
+ 10    from typing import Final
+ 11
+ 12
+
+[docs] + 13def select_code_blocks(text: str, selector: str | None = None) -> str: + 14 _selector = SelectorParser(selector).parse() if selector else None + 15 + 16 def selected(code_block: CodeBlock) -> bool: + 17 if _selector is None: + 18 return True + 19 + 20 tags = parse_tags(code_block.info) + 21 return _selector.eval(tags) + 22 + 23 # TODO: Preserve line numbers from input text + 24 return '\n'.join(code_block.code for code_block in code_blocks(text) if selected(code_block))
+ + 25 + 26 +
+[docs] + 27class CodeBlock(NamedTuple): + 28 info: str + 29 code: str
+ + 30 + 31 + 32_CODE_BLOCK_PATTERN: Final = re.compile( + 33 r'(^|(?<=\n)) {0,3}(?P<fence>```+)(?!`)(?P<info>.*)\n(?P<code>(.*\n)*?) {0,3}(?P=fence)`*' + 34) + 35 + 36 +
+[docs] + 37def code_blocks(text: str) -> Iterator[CodeBlock]: + 38 return (CodeBlock(match['info'], match['code'].rstrip()) for match in _CODE_BLOCK_PATTERN.finditer(text))
+ + 39 + 40 +
+[docs] + 41def parse_tags(text: str) -> set[str]: + 42 def check_tag(tag: str) -> None: + 43 if not (tag and all(c.isalnum() or c in '_-' for c in tag)): + 44 raise ValueError(f'Invalid tag: {tag!r}') + 45 + 46 if not text: + 47 return set() + 48 + 49 if text[0] != '{': + 50 check_tag(text) + 51 return {text} + 52 + 53 if text[-1] != '}': + 54 raise ValueError("Expected '}', found: {text[-1]!r}") + 55 + 56 res: set[str] = set() + 57 tags = text[1:-1].split() + 58 for tag in tags: + 59 if tag[0] != '.': + 60 raise ValueError("Expected '.', found: {tag[0]!r}") + 61 check_tag(tag[1:]) + 62 res.add(tag[1:]) + 63 + 64 return res
+ + 65 + 66 + 67# ---------------------- + 68# Selector mini-language + 69# ---------------------- + 70 + 71 +
+[docs] + 72class Selector(ABC): +
+[docs] + 73 @abstractmethod + 74 def eval(self, atoms: Container[str]) -> bool: ...
+
+ + 75 + 76 +
+[docs] + 77@final + 78@dataclass(frozen=True) + 79class Atom(Selector): + 80 name: str + 81 +
+[docs] + 82 def eval(self, atoms: Container[str]) -> bool: + 83 return self.name in atoms
+
+ + 84 + 85 +
+[docs] + 86@final + 87@dataclass(frozen=True) + 88class Not(Selector): + 89 op: Selector + 90 +
+[docs] + 91 def eval(self, atoms: Container[str]) -> bool: + 92 return not self.op.eval(atoms)
+
+ + 93 + 94 +
+[docs] + 95@final + 96@dataclass(frozen=True) + 97class And(Selector): + 98 ops: tuple[Selector, ...] + 99 +
+[docs] +100 def eval(self, atoms: Container[str]) -> bool: +101 return all(op.eval(atoms) for op in self.ops)
+
+ +102 +103 +
+[docs] +104@final +105@dataclass(frozen=True) +106class Or(Selector): +107 ops: tuple[Selector, ...] +108 +
+[docs] +109 def eval(self, atoms: Container[str]) -> bool: +110 return any(op.eval(atoms) for op in self.ops)
+
+ +111 +112 +113_SPECIAL = tuple('!&|()') +114 +115 +
+[docs] +116def selector_lexer(it: Iterable[str]) -> Iterator[str]: +117 it = iter(it) +118 la = next(it, '') +119 while True: +120 while la.isspace(): +121 la = next(it, '') +122 +123 if not la: +124 yield '' +125 return +126 +127 if la in _SPECIAL: +128 yield la +129 la = next(it, '') +130 continue +131 +132 buf: list[str] = [] +133 while la.isalnum() or la == '_': +134 buf.append(la) +135 la = next(it, '') +136 +137 if not buf: +138 raise ValueError(f'Unexpected character: {la!r}') +139 +140 yield ''.join(buf)
+ +141 +142 +
+[docs] +143class SelectorParser: +144 _la: str +145 _it: Iterator[str] +146 +147 def __init__(self, selector: str): +148 self._it = selector_lexer(selector) +149 self._consume() +150 +151 def _consume(self) -> None: +152 self._la = next(self._it, '') +153 +154 def _match(self, token: str) -> None: +155 if self._la != token: +156 raise ValueError('Unexpected token: {token}') +157 self._consume() +158 +
+[docs] +159 def parse(self) -> Selector: +160 res = self._or() +161 if self._la: +162 raise ValueError(f'Expected EOF, found: {self._la}') +163 return res
+ +164 +165 def _or(self) -> Selector: +166 ops = [self._and()] +167 while self._la == '|': +168 self._consume() +169 ops.append(self._and()) +170 if len(ops) > 1: +171 return Or(tuple(ops)) +172 return ops[0] +173 +174 def _and(self) -> Selector: +175 ops = [self._lit()] +176 while self._la == '&': +177 self._consume() +178 ops.append(self._lit()) +179 if len(ops) > 1: +180 return And(tuple(ops)) +181 return ops[0] +182 +183 def _lit(self) -> Selector: +184 if not self._la: +185 raise ValueError('Unexpected EOF') +186 +187 if self._la == '(': +188 self._consume() +189 expr = self._or() +190 self._match(')') +191 return expr +192 +193 if self._la == '!': +194 self._consume() +195 lit = self._lit() +196 return Not(lit) +197 +198 if len(self._la) > 1 or self._la.isalnum() or self._la == '-': +199 atom = self._la +200 self._consume() +201 return Atom(atom) +202 +203 raise ValueError(f'Unexpected token: {self._la}')
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kast/outer.html b/pyk/_modules/pyk/kast/outer.html new file mode 100644 index 00000000000..8f5530c73a5 --- /dev/null +++ b/pyk/_modules/pyk/kast/outer.html @@ -0,0 +1,2038 @@ + + + + + + + + pyk.kast.outer — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kast.outer

+   1from __future__ import annotations
+   2
+   3import json
+   4import logging
+   5import re
+   6from abc import abstractmethod
+   7from collections import defaultdict
+   8from collections.abc import Iterable
+   9from dataclasses import InitVar  # noqa: TC003
+  10from dataclasses import dataclass
+  11from enum import Enum
+  12from functools import cached_property, reduce
+  13from itertools import pairwise, product
+  14from typing import TYPE_CHECKING, final, overload
+  15
+  16from ..prelude.kbool import TRUE
+  17from ..prelude.ml import ML_QUANTIFIERS
+  18from ..utils import FrozenDict, POSet, filter_none, not_none, single, unique
+  19from .att import EMPTY_ATT, Atts, Format, KAst, KAtt, WithKAtt
+  20from .inner import (
+  21    KApply,
+  22    KInner,
+  23    KLabel,
+  24    KRewrite,
+  25    KSequence,
+  26    KSort,
+  27    KToken,
+  28    KVariable,
+  29    Subst,
+  30    bottom_up,
+  31    bottom_up_with_summary,
+  32    top_down,
+  33)
+  34from .kast import kast_term
+  35from .rewrite import indexed_rewrite
+  36
+  37if TYPE_CHECKING:
+  38    from collections.abc import Callable, Iterator, Mapping
+  39    from os import PathLike
+  40    from typing import Any, Final, TypeVar
+  41
+  42    S = TypeVar('S', bound='KSentence')
+  43    RL = TypeVar('RL', bound='KRuleLike')
+  44
+  45_LOGGER: Final = logging.getLogger(__name__)
+  46
+  47
+
+[docs] + 48class KOuter(KAst): + 49 """Represents K definitions in KAST format. + 50 + 51 Outer syntax is K specific datastructures, including modules, definitions, imports, user-syntax declarations, rules, contexts, and claims. + 52 """ + 53 + 54 ...
+ + 55 + 56 +
+[docs] + 57class KProductionItem(KOuter): + 58 """Represents the elements used to declare components of productions in EBNF style.""" + 59 + 60 _NODES: Final = {'KTerminal', 'KRegexTerminal', 'KNonTerminal'} + 61 +
+[docs] + 62 @staticmethod + 63 def from_dict(d: Mapping[str, Any]) -> KProductionItem: + 64 node = d['node'] + 65 if node not in KProductionItem._NODES: + 66 raise ValueError(f'Invalid KProductionItem node: {node!r}') + 67 cls = globals()[node] + 68 return cls._from_dict(d)
+
+ + 69 + 70 +
+[docs] + 71@final + 72@dataclass(frozen=True) + 73class KRegexTerminal(KProductionItem): + 74 """Represents a regular-expression terminal in EBNF production, to be matched against input text.""" + 75 + 76 regex: str + 77 + 78 def __init__(self, regex: str): + 79 object.__setattr__(self, 'regex', regex) + 80 + 81 @classmethod + 82 def _from_dict(cls: type[KRegexTerminal], d: Mapping[str, Any]) -> KRegexTerminal: + 83 return KRegexTerminal(regex=d['regex']) + 84 +
+[docs] + 85 def to_dict(self) -> dict[str, Any]: + 86 return { + 87 'node': 'KRegexTerminal', + 88 'regex': self.regex, + 89 }
+ + 90 +
+[docs] + 91 def let(self, *, regex: str | None = None) -> KRegexTerminal: + 92 regex = regex if regex is not None else self.regex + 93 return KRegexTerminal(regex=regex)
+
+ + 94 + 95 +
+[docs] + 96@final + 97@dataclass(frozen=True) + 98class KNonTerminal(KProductionItem): + 99 """Represents a non-terminal of a given sort in EBNF productions, for defining arguments to to production.""" + 100 + 101 sort: KSort + 102 name: str | None + 103 + 104 def __init__(self, sort: KSort, name: str | None = None): + 105 object.__setattr__(self, 'sort', sort) + 106 object.__setattr__(self, 'name', name) + 107 + 108 @classmethod + 109 def _from_dict(cls: type[KNonTerminal], d: Mapping[str, Any]) -> KNonTerminal: + 110 name = d['name'] if 'name' in d else None + 111 return KNonTerminal(sort=KSort.from_dict(d['sort']), name=name) + 112 +
+[docs] + 113 def to_dict(self) -> dict[str, Any]: + 114 d = {'node': 'KNonTerminal', 'sort': self.sort.to_dict()} + 115 if self.name is not None: + 116 d['name'] = self.name + 117 return d
+ + 118 +
+[docs] + 119 def let(self, *, sort: KSort | None = None, name: str | None = None) -> KNonTerminal: + 120 sort = sort or self.sort + 121 name = name or self.name + 122 return KNonTerminal(sort=sort, name=name)
+
+ + 123 + 124 +
+[docs] + 125@final + 126@dataclass(frozen=True) + 127class KTerminal(KProductionItem): + 128 """Represents a string literal component of a production in EBNF grammar.""" + 129 + 130 value: str + 131 + 132 def __init__(self, value: str): + 133 object.__setattr__(self, 'value', value) + 134 + 135 @classmethod + 136 def _from_dict(cls: type[KTerminal], d: Mapping[str, Any]) -> KTerminal: + 137 return KTerminal(value=d['value']) + 138 +
+[docs] + 139 def to_dict(self) -> dict[str, Any]: + 140 return {'node': 'KTerminal', 'value': self.value}
+ + 141 +
+[docs] + 142 def let(self, *, value: str | None = None) -> KTerminal: + 143 value = value if value is not None else self.value + 144 return KTerminal(value=value)
+
+ + 145 + 146 +
+[docs] + 147class KSentence(KOuter, WithKAtt): + 148 """Represents an individual declaration in a K module.""" + 149 + 150 _NODES: Final = { + 151 'KProduction', + 152 'KSyntaxSort', + 153 'KSortSynonym', + 154 'KSyntaxLexical', + 155 'KSyntaxAssociativity', + 156 'KSyntaxPriority', + 157 'KBubble', + 158 'KRule', + 159 'KClaim', + 160 'KContext', + 161 } + 162 +
+[docs] + 163 @staticmethod + 164 def from_dict(d: Mapping[str, Any]) -> KSentence: + 165 node = d['node'] + 166 if node not in KSentence._NODES: + 167 raise ValueError(f'Invalid KSentence node: {node!r}') + 168 cls = globals()[node] + 169 return cls._from_dict(d)
+ + 170 + 171 @property + 172 def unique_id(self) -> str | None: + 173 """Return the unique ID assigned to this sentence, or None.""" + 174 return self.att.get(Atts.UNIQUE_ID) + 175 + 176 @property + 177 def source(self) -> str | None: + 178 """Return the source assigned to this sentence, or None.""" + 179 if Atts.SOURCE in self.att and Atts.LOCATION in self.att: + 180 return f'{self.att[Atts.SOURCE]}:{self.att[Atts.LOCATION]}' + 181 return None + 182 + 183 @property + 184 def label(self) -> str: + 185 """Return a (hopefully) unique label associated with the given `KSentence`. + 186 + 187 :return: Unique label for the given sentence, either (in order): + 188 - User supplied `label` attribute (or supplied in rule label),or + 189 - Unique identifier computed and inserted by the frontend. + 190 """ + 191 label = self.att.get(Atts.LABEL, self.unique_id) + 192 if label is None: + 193 raise ValueError(f'Found sentence without label or UNIQUE_ID: {self}') + 194 return label
+ + 195 + 196 +
+[docs] + 197@final + 198@dataclass(frozen=True) + 199class KProduction(KSentence): + 200 """Represents a production in K's EBNF grammar definitions, as a sequence of ProductionItem.""" + 201 + 202 # TODO Order in Java implementation: klabel, params, sort, items, att + 203 sort: KSort + 204 items: tuple[KProductionItem, ...] + 205 params: tuple[KSort, ...] + 206 klabel: KLabel | None + 207 att: KAtt + 208 + 209 def __init__( + 210 self, + 211 sort: str | KSort, + 212 items: Iterable[KProductionItem] = (), + 213 params: Iterable[str | KSort] = (), + 214 klabel: str | KLabel | None = None, + 215 att: KAtt = EMPTY_ATT, + 216 ): + 217 if type(sort) is str: + 218 sort = KSort(sort) + 219 if type(klabel) is str: + 220 klabel = KLabel(klabel) + 221 + 222 params = tuple(KSort(param) if type(param) is str else param for param in params) + 223 + 224 object.__setattr__(self, 'sort', sort) + 225 object.__setattr__(self, 'items', tuple(items)) + 226 object.__setattr__(self, 'params', params) + 227 object.__setattr__(self, 'klabel', klabel) + 228 object.__setattr__(self, 'att', att) + 229 + 230 @classmethod + 231 def _from_dict(cls: type[KProduction], d: Mapping[str, Any]) -> KProduction: + 232 return KProduction( + 233 sort=KSort.from_dict(d['sort']), + 234 items=(KProductionItem.from_dict(item) for item in d['productionItems']), + 235 params=(KSort.from_dict(param) for param in d['params']), + 236 klabel=KLabel.from_dict(d['klabel']) if d.get('klabel') else None, + 237 att=KAtt.from_dict(d['att']) if d.get('att') else EMPTY_ATT, + 238 ) + 239 +
+[docs] + 240 def to_dict(self) -> dict[str, Any]: + 241 return filter_none( + 242 { + 243 'node': 'KProduction', + 244 'sort': self.sort.to_dict(), + 245 'productionItems': [item.to_dict() for item in self.items], + 246 'params': [param.to_dict() for param in self.params], + 247 'klabel': self.klabel.to_dict() if self.klabel else None, + 248 'att': self.att.to_dict(), + 249 } + 250 )
+ + 251 +
+[docs] + 252 def let( + 253 self, + 254 *, + 255 sort: str | KSort | None = None, + 256 items: Iterable[KProductionItem] | None = None, + 257 params: Iterable[str | KSort] | None = None, + 258 klabel: str | KLabel | None = None, + 259 att: KAtt | None = None, + 260 ) -> KProduction: + 261 sort = sort if sort is not None else self.sort + 262 items = items if items is not None else self.items + 263 params = params if params is not None else self.params + 264 klabel = klabel if klabel is not None else self.klabel # TODO figure out a way to set klabel to None + 265 att = att if att is not None else self.att + 266 return KProduction(sort=sort, items=items, params=params, klabel=klabel, att=att)
+ + 267 +
+[docs] + 268 def let_att(self, att: KAtt) -> KProduction: + 269 return self.let(att=att)
+ + 270 + 271 @cached_property + 272 def as_subsort(self) -> tuple[KSort, KSort] | None: + 273 """Return a pair `(supersort, subsort)` if `self` is a subsort production, and `None` otherwise.""" + 274 if self.klabel: + 275 return None + 276 if len(self.items) != 1: + 277 return None + 278 item = self.items[0] + 279 if not isinstance(item, KNonTerminal): + 280 return None + 281 assert not self.klabel + 282 return self.sort, item.sort + 283 + 284 @cached_property + 285 def non_terminals(self) -> tuple[KNonTerminal, ...]: + 286 """Return the non-terminals of the production.""" + 287 return tuple(item for item in self.items if isinstance(item, KNonTerminal)) + 288 + 289 @property + 290 def argument_sorts(self) -> list[KSort]: + 291 """Return the sorts of the non-terminal positions of the productions.""" + 292 return [knt.sort for knt in self.non_terminals] + 293 + 294 @cached_property + 295 def is_prefix(self) -> bool: + 296 """The production is of the form ``t* "(" (n ("," n)*)? ")"``. + 297 + 298 Here, ``t`` is a terminal other than ``"("``, ``","`` or ``")"``, and ``n`` a non-terminal. + 299 + 300 Example: ``syntax Int ::= "mul" "(" Int "," Int ")"`` + 301 """ + 302 + 303 def encode(item: KProductionItem) -> str: + 304 match item: + 305 case KTerminal(value): + 306 if value in ['(', ',', ')']: + 307 return value + 308 return 't' + 309 case KNonTerminal(): + 310 return 'n' + 311 case KRegexTerminal(): + 312 return 'r' + 313 case _: + 314 raise AssertionError() + 315 + 316 string = ''.join(encode(item) for item in self.items) + 317 pattern = r't*\((n(,n)*)?\)' + 318 return bool(re.fullmatch(pattern, string)) + 319 + 320 @cached_property + 321 def is_record(self) -> bool: + 322 """The production is prefix with labelled nonterminals.""" + 323 return bool(self.is_prefix and self.non_terminals and all(item.name is not None for item in self.non_terminals)) + 324 + 325 @property + 326 def default_format(self) -> Format: + 327 format_str: str + 328 if self.is_record: + 329 tokens = [] + 330 for i, item in enumerate(self.items): + 331 match item: + 332 case KTerminal('('): + 333 tokens.append(f'%{i + 1}...') + 334 case KTerminal(_): + 335 tokens.append(f'%{i + 1}') + 336 case KNonTerminal(_, name): + 337 assert name is not None + 338 tokens.append(f'{name}:') + 339 tokens.append(f'%{i + 1}') + 340 case KRegexTerminal(): + 341 raise ValueError('Default format is not supported for productions with regex terminals') + 342 case _: + 343 raise AssertionError() + 344 format_str = ' '.join(tokens) + 345 else: + 346 format_str = ' '.join(f'%{i}' for i in range(1, len(self.items) + 1)) + 347 + 348 return Format.parse(format_str)
+ + 349 + 350 +
+[docs] + 351@final + 352@dataclass(frozen=True) + 353class KSyntaxSort(KSentence): + 354 """Represents a sort declaration, potentially parametric.""" + 355 + 356 sort: KSort + 357 params: tuple[KSort, ...] + 358 att: KAtt + 359 + 360 def __init__(self, sort: KSort, params: Iterable[str | KSort] = (), att: KAtt = EMPTY_ATT): + 361 params = tuple(KSort(param) if type(param) is str else param for param in params) + 362 object.__setattr__(self, 'sort', sort) + 363 object.__setattr__(self, 'params', params) + 364 object.__setattr__(self, 'att', att) + 365 + 366 @classmethod + 367 def _from_dict(cls: type[KSyntaxSort], d: Mapping[str, Any]) -> KSyntaxSort: + 368 return KSyntaxSort( + 369 sort=KSort.from_dict(d['sort']), + 370 params=(KSort.from_dict(param) for param in d['params']), + 371 att=KAtt.from_dict(d['att']) if d.get('att') else EMPTY_ATT, + 372 ) + 373 +
+[docs] + 374 def to_dict(self) -> dict[str, Any]: + 375 return { + 376 'node': 'KSyntaxSort', + 377 'sort': self.sort.to_dict(), + 378 'params': [param.to_dict() for param in self.params], + 379 'att': self.att.to_dict(), + 380 }
+ + 381 +
+[docs] + 382 def let( + 383 self, + 384 *, + 385 sort: KSort | None = None, + 386 params: Iterable[str | KSort] | None = None, + 387 att: KAtt | None = None, + 388 ) -> KSyntaxSort: + 389 sort = sort or self.sort + 390 params = params if params is not None else self.params + 391 att = att if att is not None else self.att + 392 return KSyntaxSort(sort=sort, params=params, att=att)
+ + 393 +
+[docs] + 394 def let_att(self, att: KAtt) -> KSyntaxSort: + 395 return self.let(att=att)
+
+ + 396 + 397 +
+[docs] + 398@final + 399@dataclass(frozen=True) + 400class KSortSynonym(KSentence): + 401 """Represents a sort synonym, allowing declaring a new name for a given sort.""" + 402 + 403 new_sort: KSort + 404 old_sort: KSort + 405 att: KAtt + 406 + 407 def __init__(self, new_sort: KSort, old_sort: KSort, att: KAtt = EMPTY_ATT): + 408 object.__setattr__(self, 'new_sort', new_sort) + 409 object.__setattr__(self, 'old_sort', old_sort) + 410 object.__setattr__(self, 'att', att) + 411 + 412 @classmethod + 413 def _from_dict(cls: type[KSortSynonym], d: Mapping[str, Any]) -> KSortSynonym: + 414 return KSortSynonym( + 415 new_sort=KSort.from_dict(d['newSort']), + 416 old_sort=KSort.from_dict(d['oldSort']), + 417 att=KAtt.from_dict(d['att']) if d.get('att') else EMPTY_ATT, + 418 ) + 419 +
+[docs] + 420 def to_dict(self) -> dict[str, Any]: + 421 return { + 422 'node': 'KSortSynonym', + 423 'newSort': self.new_sort.to_dict(), + 424 'oldSort': self.old_sort.to_dict(), + 425 'att': self.att.to_dict(), + 426 }
+ + 427 +
+[docs] + 428 def let( + 429 self, *, old_sort: KSort | None = None, new_sort: KSort | None = None, att: KAtt | None = None + 430 ) -> KSortSynonym: + 431 new_sort = new_sort or self.new_sort + 432 old_sort = old_sort or self.old_sort + 433 att = att if att is not None else self.att + 434 return KSortSynonym(new_sort=new_sort, old_sort=old_sort, att=att)
+ + 435 +
+[docs] + 436 def let_att(self, att: KAtt) -> KSortSynonym: + 437 return self.let(att=att)
+
+ + 438 + 439 +
+[docs] + 440@final + 441@dataclass(frozen=True) + 442class KSyntaxLexical(KSentence): + 443 """Represents a named piece of lexical syntax, definable as a regular expression.""" + 444 + 445 name: str + 446 regex: str + 447 att: KAtt + 448 + 449 def __init__(self, name: str, regex: str, att: KAtt = EMPTY_ATT): + 450 object.__setattr__(self, 'name', name) + 451 object.__setattr__(self, 'regex', regex) + 452 object.__setattr__(self, 'att', att) + 453 + 454 @classmethod + 455 def _from_dict(cls: type[KSyntaxLexical], d: Mapping[str, Any]) -> KSyntaxLexical: + 456 return KSyntaxLexical( + 457 name=d['name'], + 458 regex=d['regex'], + 459 att=KAtt.from_dict(d['att']) if d.get('att') else EMPTY_ATT, + 460 ) + 461 +
+[docs] + 462 def to_dict(self) -> dict[str, Any]: + 463 return { + 464 'node': 'KSyntaxLexcial', + 465 'name': self.name, + 466 'regex': self.regex, + 467 'att': self.att.to_dict(), + 468 }
+ + 469 +
+[docs] + 470 def let(self, *, name: str | None = None, regex: str | None = None, att: KAtt | None = None) -> KSyntaxLexical: + 471 name = name if name is not None else self.name + 472 regex = regex if regex is not None else self.regex + 473 att = att if att is not None else self.att + 474 return KSyntaxLexical(name=name, regex=regex, att=att)
+ + 475 +
+[docs] + 476 def let_att(self, att: KAtt) -> KSyntaxLexical: + 477 return self.let(att=att)
+
+ + 478 + 479 +
+[docs] + 480class KAssoc(Enum): + 481 LEFT = 'Left' + 482 RIGHT = 'Right' + 483 NON_ASSOC = 'NonAssoc'
+ + 484 + 485 +
+[docs] + 486@final + 487@dataclass(frozen=True) + 488class KSyntaxAssociativity(KSentence): + 489 """Represents a standalone declaration of operator associativity for tagged productions.""" + 490 + 491 assoc: KAssoc + 492 tags: frozenset[str] + 493 att: KAtt + 494 + 495 def __init__(self, assoc: KAssoc, tags: Iterable[str] = frozenset(), att: KAtt = EMPTY_ATT): + 496 object.__setattr__(self, 'assoc', assoc) + 497 object.__setattr__(self, 'tags', frozenset(tags)) + 498 object.__setattr__(self, 'att', att) + 499 + 500 @classmethod + 501 def _from_dict(cls: type[KSyntaxAssociativity], d: Mapping[str, Any]) -> KSyntaxAssociativity: + 502 return KSyntaxAssociativity( + 503 assoc=KAssoc(d['assoc']), + 504 tags=d['tags'], + 505 att=KAtt.from_dict(d['att']) if d.get('att') else EMPTY_ATT, + 506 ) + 507 +
+[docs] + 508 def to_dict(self) -> dict[str, Any]: + 509 return { + 510 'node': 'KSyntaxAssociativity', + 511 'assoc': self.assoc.value, + 512 'tags': list(self.tags), + 513 'att': self.att.to_dict(), + 514 }
+ + 515 +
+[docs] + 516 def let( + 517 self, *, assoc: KAssoc | None = None, tags: Iterable[str] | None = None, att: KAtt | None = None + 518 ) -> KSyntaxAssociativity: + 519 assoc = assoc or self.assoc + 520 tags = tags if tags is not None else self.tags + 521 att = att if att is not None else self.att + 522 return KSyntaxAssociativity(assoc=assoc, tags=tags, att=att)
+ + 523 +
+[docs] + 524 def let_att(self, att: KAtt) -> KSyntaxAssociativity: + 525 return self.let(att=att)
+
+ + 526 + 527 +
+[docs] + 528@final + 529@dataclass(frozen=True) + 530class KSyntaxPriority(KSentence): + 531 """Represents a standalone declaration of syntax priorities, using productions tags.""" + 532 + 533 priorities: tuple[frozenset[str], ...] + 534 att: KAtt + 535 + 536 def __init__(self, priorities: Iterable[Iterable[str]] = (), att: KAtt = EMPTY_ATT): + 537 object.__setattr__(self, 'priorities', tuple(frozenset(group) for group in priorities)) + 538 object.__setattr__(self, 'att', att) + 539 + 540 @classmethod + 541 def _from_dict(cls: type[KSyntaxPriority], d: Mapping[str, Any]) -> KSyntaxPriority: + 542 return KSyntaxPriority( + 543 priorities=d['priorities'], + 544 att=KAtt.from_dict(d['att']) if d.get('att') else EMPTY_ATT, + 545 ) + 546 +
+[docs] + 547 def to_dict(self) -> dict[str, Any]: + 548 return { + 549 'node': 'KSyntaxPriority', + 550 'priorities': [list(group) for group in self.priorities], + 551 'att': self.att.to_dict(), + 552 }
+ + 553 +
+[docs] + 554 def let(self, *, priorities: Iterable[Iterable[str]] | None = None, att: KAtt | None = None) -> KSyntaxPriority: + 555 priorities = priorities if priorities is not None else self.priorities + 556 att = att if att is not None else self.att + 557 return KSyntaxPriority(priorities=priorities, att=att)
+ + 558 +
+[docs] + 559 def let_att(self, att: KAtt) -> KSyntaxPriority: + 560 return self.let(att=att)
+
+ + 561 + 562 +
+[docs] + 563@final + 564@dataclass(frozen=True) + 565class KBubble(KSentence): + 566 """Represents an unparsed chunk of AST in user-defined syntax.""" + 567 + 568 sentence_type: str + 569 contents: str + 570 att: KAtt + 571 + 572 def __init__(self, sentence_type: str, contents: str, att: KAtt = EMPTY_ATT): + 573 object.__setattr__(self, 'sentence_type', sentence_type) + 574 object.__setattr__(self, 'contents', contents) + 575 object.__setattr__(self, 'att', att) + 576 + 577 @classmethod + 578 def _from_dict(cls: type[KBubble], d: Mapping[str, Any]) -> KBubble: + 579 return KBubble( + 580 sentence_type=d['sentenceType'], + 581 contents=d['contents'], + 582 att=KAtt.from_dict(d['att']) if d.get('att') else EMPTY_ATT, + 583 ) + 584 +
+[docs] + 585 def to_dict(self) -> dict[str, Any]: + 586 return { + 587 'node': 'KBubble', + 588 'sentenceType': self.sentence_type, + 589 'contents': self.contents, + 590 'att': self.att.to_dict(), + 591 }
+ + 592 +
+[docs] + 593 def let(self, *, sentence_type: str | None = None, contents: str | None = None, att: KAtt | None = None) -> KBubble: + 594 sentence_type = sentence_type if sentence_type is not None else self.sentence_type + 595 contents = contents if contents is not None else self.contents + 596 att = att if att is not None else self.att + 597 return KBubble(sentence_type=sentence_type, contents=contents, att=att)
+ + 598 +
+[docs] + 599 def let_att(self, att: KAtt) -> KBubble: + 600 return self.let(att=att)
+
+ + 601 + 602 +
+[docs] + 603class KRuleLike(KSentence): + 604 """Represents something with rule-like structure (with body, requires, and ensures clauses).""" + 605 + 606 body: KInner + 607 requires: KInner + 608 ensures: KInner + 609 +
+[docs] + 610 @abstractmethod + 611 def let(
+ + 612 self: RL, + 613 *, + 614 body: KInner | None = None, + 615 requires: KInner | None = None, + 616 ensures: KInner | None = None, + 617 att: KAtt | None = None, + 618 ) -> RL: ...
+ + 619 + 620 +
+[docs] + 621@final + 622@dataclass(frozen=True) + 623class KRule(KRuleLike): + 624 """Represents a K rule definition, typically a conditional rewrite/transition.""" + 625 + 626 body: KInner + 627 requires: KInner + 628 ensures: KInner + 629 att: KAtt + 630 + 631 def __init__(self, body: KInner, requires: KInner = TRUE, ensures: KInner = TRUE, att: KAtt = EMPTY_ATT): + 632 object.__setattr__(self, 'body', body) + 633 object.__setattr__(self, 'requires', requires) + 634 object.__setattr__(self, 'ensures', ensures) + 635 object.__setattr__(self, 'att', att) + 636 + 637 @classmethod + 638 def _from_dict(cls: type[KRule], d: Mapping[str, Any]) -> KRule: + 639 return KRule( + 640 body=KInner.from_dict(d['body']), + 641 requires=KInner.from_dict(d['requires']) if d.get('requires') else TRUE, + 642 ensures=KInner.from_dict(d['ensures']) if d.get('ensures') else TRUE, + 643 att=KAtt.from_dict(d['att']) if d.get('att') else EMPTY_ATT, + 644 ) + 645 +
+[docs] + 646 def to_dict(self) -> dict[str, Any]: + 647 return { + 648 'node': 'KRule', + 649 'body': self.body.to_dict(), + 650 'requires': self.requires.to_dict(), + 651 'ensures': self.ensures.to_dict(), + 652 'att': self.att.to_dict(), + 653 }
+ + 654 +
+[docs] + 655 def let( + 656 self, + 657 *, + 658 body: KInner | None = None, + 659 requires: KInner | None = None, + 660 ensures: KInner | None = None, + 661 att: KAtt | None = None, + 662 ) -> KRule: + 663 body = body if body is not None else self.body + 664 requires = requires if requires is not None else self.requires + 665 ensures = ensures if ensures is not None else self.ensures + 666 att = att if att is not None else self.att + 667 return KRule(body=body, requires=requires, ensures=ensures, att=att)
+ + 668 +
+[docs] + 669 def let_att(self, att: KAtt) -> KRule: + 670 return self.let(att=att)
+ + 671 + 672 @property + 673 def priority(self) -> int: + 674 return self.att.get(Atts.PRIORITY, 200 if Atts.OWISE in self.att else 50)
+ + 675 + 676 +
+[docs] + 677@final + 678@dataclass(frozen=True) + 679class KClaim(KRuleLike): + 680 """Represents a K claim, typically a transition with pre/post-conditions.""" + 681 + 682 body: KInner + 683 requires: KInner + 684 ensures: KInner + 685 att: KAtt + 686 + 687 def __init__(self, body: KInner, requires: KInner = TRUE, ensures: KInner = TRUE, att: KAtt = EMPTY_ATT): + 688 object.__setattr__(self, 'body', body) + 689 object.__setattr__(self, 'requires', requires) + 690 object.__setattr__(self, 'ensures', ensures) + 691 object.__setattr__(self, 'att', att) + 692 + 693 @classmethod + 694 def _from_dict(cls: type[KClaim], d: Mapping[str, Any]) -> KClaim: + 695 return KClaim( + 696 body=KInner.from_dict(d['body']), + 697 requires=KInner.from_dict(d['requires']) if d.get('requires') else TRUE, + 698 ensures=KInner.from_dict(d['ensures']) if d.get('ensures') else TRUE, + 699 att=KAtt.from_dict(d['att']) if d.get('att') else EMPTY_ATT, + 700 ) + 701 +
+[docs] + 702 def to_dict(self) -> dict[str, Any]: + 703 return { + 704 'node': 'KClaim', + 705 'body': self.body.to_dict(), + 706 'requires': self.requires.to_dict(), + 707 'ensures': self.ensures.to_dict(), + 708 'att': self.att.to_dict(), + 709 }
+ + 710 +
+[docs] + 711 def let( + 712 self, + 713 *, + 714 body: KInner | None = None, + 715 requires: KInner | None = None, + 716 ensures: KInner | None = None, + 717 att: KAtt | None = None, + 718 ) -> KClaim: + 719 body = body if body is not None else self.body + 720 requires = requires if requires is not None else self.requires + 721 ensures = ensures if ensures is not None else self.ensures + 722 att = att if att is not None else self.att + 723 return KClaim(body=body, requires=requires, ensures=ensures, att=att)
+ + 724 +
+[docs] + 725 def let_att(self, att: KAtt) -> KClaim: + 726 return self.let(att=att)
+ + 727 + 728 @property + 729 def is_circularity(self) -> bool: + 730 """Return whether this claim is a circularity (must be used coinductively to prove itself).""" + 731 return Atts.CIRCULARITY in self.att + 732 + 733 @property + 734 def is_trusted(self) -> bool: + 735 """Return whether this claim is trusted (does not need to be proven to be considered true).""" + 736 return Atts.TRUSTED in self.att + 737 + 738 @property + 739 def dependencies(self) -> list[str]: + 740 """Return the dependencies of this claim (list of other claims needed to help prove this one or speed up this ones proof).""" + 741 deps = self.att.get(Atts.DEPENDS) + 742 if deps is None: + 743 return [] + 744 return [x.strip() for x in deps.split(',')]
+ + 745 + 746 +
+[docs] + 747@final + 748@dataclass(frozen=True) + 749class KContext(KSentence): + 750 """Represents a K evaluation context, used for isolating chunks of computation and focusing on them.""" + 751 + 752 body: KInner + 753 requires: KInner + 754 att: KAtt + 755 + 756 def __init__(self, body: KInner, requires: KInner = TRUE, att: KAtt = EMPTY_ATT): + 757 object.__setattr__(self, 'body', body) + 758 object.__setattr__(self, 'requires', requires) + 759 object.__setattr__(self, 'att', att) + 760 + 761 @classmethod + 762 def _from_dict(cls: type[KContext], d: Mapping[str, Any]) -> KContext: + 763 return KContext( + 764 body=KInner.from_dict(d['body']), + 765 requires=KInner.from_dict(d['requires']) if d.get('requires') else TRUE, + 766 att=KAtt.from_dict(d['att']) if d.get('att') else EMPTY_ATT, + 767 ) + 768 +
+[docs] + 769 def to_dict(self) -> dict[str, Any]: + 770 return { + 771 'node': 'KContext', + 772 'body': self.body.to_dict(), + 773 'requires': self.requires.to_dict(), + 774 'att': self.att.to_dict(), + 775 }
+ + 776 +
+[docs] + 777 def let(self, *, body: KInner | None = None, requires: KInner | None = None, att: KAtt | None = None) -> KContext: + 778 body = body if body is not None else self.body + 779 requires = requires if requires is not None else self.requires + 780 att = att if att is not None else self.att + 781 return KContext(body=body, requires=requires, att=att)
+ + 782 +
+[docs] + 783 def let_att(self, att: KAtt) -> KContext: + 784 return self.let(att=att)
+
+ + 785 + 786 +
+[docs] + 787@final + 788@dataclass(frozen=True) + 789class KImport(KOuter): + 790 """Represents a K module import, used for inheriting all the sentences of the imported module into this one.""" + 791 + 792 name: str + 793 public: bool + 794 + 795 def __init__(self, name: str, public: bool = True): + 796 object.__setattr__(self, 'name', name) + 797 object.__setattr__(self, 'public', public) + 798 +
+[docs] + 799 @staticmethod + 800 def from_dict(d: Mapping[str, Any]) -> KImport: + 801 return KImport(name=d['name'], public=d['isPublic'])
+ + 802 +
+[docs] + 803 def to_dict(self) -> dict[str, Any]: + 804 return {'node': 'KImport', 'name': self.name, 'isPublic': self.public}
+ + 805 +
+[docs] + 806 def let(self, *, name: str | None = None, public: bool | None = None) -> KImport: + 807 name = name if name is not None else self.name + 808 public = public if public is not None else self.public + 809 return KImport(name=name, public=public)
+
+ + 810 + 811 +
+[docs] + 812@final + 813@dataclass(frozen=True) + 814class KFlatModule(KOuter, WithKAtt, Iterable[KSentence]): + 815 """Represents a K module, with a name, list of imports, and list of sentences.""" + 816 + 817 name: str + 818 sentences: tuple[KSentence, ...] + 819 imports: tuple[KImport, ...] + 820 att: KAtt + 821 + 822 def __init__( + 823 self, name: str, sentences: Iterable[KSentence] = (), imports: Iterable[KImport] = (), att: KAtt = EMPTY_ATT + 824 ): + 825 object.__setattr__(self, 'name', name) + 826 object.__setattr__(self, 'sentences', tuple(sentences)) + 827 object.__setattr__(self, 'imports', tuple(imports)) + 828 object.__setattr__(self, 'att', att) + 829 + 830 def __iter__(self) -> Iterator[KSentence]: + 831 return iter(self.sentences) + 832 + 833 @cached_property + 834 def productions(self) -> tuple[KProduction, ...]: + 835 """Return all the `KProduction` sentences from this module.""" + 836 return tuple(sentence for sentence in self if type(sentence) is KProduction) + 837 + 838 @cached_property + 839 def syntax_productions(self) -> tuple[KProduction, ...]: + 840 """Return all the `KProduction` sentences from this module that contain `KLabel` (are EBNF syntax declarations).""" + 841 return tuple(prod for prod in self.productions if prod.klabel) + 842 + 843 @cached_property + 844 def functions(self) -> tuple[KProduction, ...]: + 845 """Return all the `KProduction` sentences from this module that are function declarations.""" + 846 return tuple(prod for prod in self.syntax_productions if self._is_function(prod)) + 847 + 848 @cached_property + 849 def constructors(self) -> tuple[KProduction, ...]: + 850 """Return all the `KProduction` sentences from this module that are constructor declarations.""" + 851 return tuple(prod for prod in self.syntax_productions if not self._is_function(prod)) + 852 + 853 @cached_property + 854 def cell_collection_productions(self) -> tuple[KProduction, ...]: + 855 """Return all the `KProduction` sentences from this module that are cell collection declarations.""" + 856 return tuple(prod for prod in self.syntax_productions if Atts.CELL_COLLECTION in prod.att) + 857 + 858 @staticmethod + 859 def _is_function(prod: KProduction) -> bool: + 860 def is_not_actually_function(label: str) -> bool: + 861 is_cell_map_constructor = label.endswith('CellMapItem') or label.endswith('CellMap_') + 862 is_builtin_data_constructor = label in { + 863 '_Set_', + 864 '_List_', + 865 '_Map_', + 866 '_RangeMap_', + 867 '.Set', + 868 '.List', + 869 '.Map', + 870 '.RangeMap', + 871 'SetItem', + 872 'ListItem', + 873 '_|->_', + 874 '_r|->_', + 875 } + 876 return is_cell_map_constructor or is_builtin_data_constructor + 877 + 878 return (Atts.FUNCTION in prod.att or Atts.FUNCTIONAL in prod.att) and not ( + 879 prod.klabel and is_not_actually_function(prod.klabel.name) + 880 ) + 881 + 882 @cached_property + 883 def syntax_sorts(self) -> tuple[KSyntaxSort, ...]: + 884 """Return all the `KSyntaxSort` sentences from this module.""" + 885 return tuple(sentence for sentence in self if isinstance(sentence, KSyntaxSort)) + 886 + 887 @cached_property + 888 def rules(self) -> tuple[KRule, ...]: + 889 """Return all the `KRule` declared in this module.""" + 890 return tuple(sentence for sentence in self if type(sentence) is KRule) + 891 + 892 @cached_property + 893 def claims(self) -> tuple[KClaim, ...]: + 894 """Return all the `KClaim` declared in this module.""" + 895 return tuple(sentence for sentence in self if type(sentence) is KClaim) + 896 + 897 @cached_property + 898 def sentence_by_unique_id(self) -> dict[str, KSentence]: + 899 return {sent.unique_id: sent for sent in self.sentences if sent.unique_id is not None} + 900 + 901 @overload + 902 def map_sentences(self, f: Callable[[S], S], *, of_type: type[S]) -> KFlatModule: ... + 903 + 904 @overload + 905 def map_sentences(self, f: Callable[[KSentence], KSentence], *, of_type: None = None) -> KFlatModule: ... + 906 + 907 # Uses overload instead of default argument as a workaround: https://github.com/python/mypy/issues/3737 +
+[docs] + 908 def map_sentences(self, f: Callable, *, of_type: Any = None) -> KFlatModule: + 909 if of_type is None: + 910 of_type = KSentence + 911 return self.let(sentences=tuple(f(sent) if isinstance(sent, of_type) else sent for sent in self.sentences))
+ + 912 +
+[docs] + 913 @staticmethod + 914 def from_dict(d: Mapping[str, Any]) -> KFlatModule: + 915 return KFlatModule( + 916 name=d['name'], + 917 sentences=(KSentence.from_dict(sentence) for sentence in d['localSentences']), + 918 imports=(KImport.from_dict(imp) for imp in d['imports']), + 919 att=KAtt.from_dict(d['att']) if d.get('att') else EMPTY_ATT, + 920 )
+ + 921 +
+[docs] + 922 def to_dict(self) -> dict[str, Any]: + 923 return { + 924 'node': 'KFlatModule', + 925 'name': self.name, + 926 'localSentences': [sentence.to_dict() for sentence in self.sentences], + 927 'imports': [imp.to_dict() for imp in self.imports], + 928 'att': self.att.to_dict(), + 929 }
+ + 930 +
+[docs] + 931 def let( + 932 self, + 933 *, + 934 name: str | None = None, + 935 sentences: Iterable[KSentence] | None = None, + 936 imports: Iterable[KImport] | None = None, + 937 att: KAtt | None = None, + 938 ) -> KFlatModule: + 939 name = name if name is not None else self.name + 940 sentences = sentences if sentences is not None else self.sentences + 941 imports = imports if imports is not None else self.imports + 942 att = att if att is not None else self.att + 943 return KFlatModule(name=name, imports=imports, sentences=sentences, att=att)
+ + 944 +
+[docs] + 945 def let_att(self, att: KAtt) -> KFlatModule: + 946 return self.let(att=att)
+
+ + 947 + 948 +
+[docs] + 949@final + 950@dataclass(frozen=True) + 951class KFlatModuleList(KOuter): + 952 """Represents a list of K modules, as returned by the prover parser for example, with a given module called out as the main module.""" + 953 + 954 main_module: str + 955 modules: tuple[KFlatModule, ...] + 956 + 957 def __init__(self, main_module: str, modules: Iterable[KFlatModule]): + 958 object.__setattr__(self, 'main_module', main_module) + 959 object.__setattr__(self, 'modules', tuple(modules)) + 960 +
+[docs] + 961 @staticmethod + 962 def from_dict(d: Mapping[str, Any]) -> KFlatModuleList: + 963 return KFlatModuleList(main_module=d['mainModule'], modules=(KFlatModule.from_dict(kfm) for kfm in d['term']))
+ + 964 +
+[docs] + 965 def to_dict(self) -> dict[str, Any]: + 966 return { + 967 'node': 'KFlatModuleList', + 968 'mainModule': self.main_module, + 969 'term': [mod.to_dict() for mod in self.modules], + 970 }
+ + 971 +
+[docs] + 972 def let(self, *, main_module: str | None = None, modules: Iterable[KFlatModule] | None = None) -> KFlatModuleList: + 973 main_module = main_module if main_module is not None else self.main_module + 974 modules = modules if modules is not None else self.modules + 975 return KFlatModuleList(main_module=main_module, modules=modules)
+
+ + 976 + 977 +
+[docs] + 978@final + 979@dataclass(frozen=True) + 980class KRequire(KOuter): + 981 """Represents a K file import of another file.""" + 982 + 983 require: str + 984 + 985 def __init__(self, require: str): + 986 object.__setattr__(self, 'require', require) + 987 +
+[docs] + 988 @staticmethod + 989 def from_dict(d: Mapping[str, Any]) -> KRequire: + 990 return KRequire(require=d['require'])
+ + 991 +
+[docs] + 992 def to_dict(self) -> dict[str, Any]: + 993 return {'node': 'KRequire', 'require': self.require}
+ + 994 +
+[docs] + 995 def let(self, *, require: str | None = None) -> KRequire: + 996 require = require if require is not None else self.require + 997 return KRequire(require=require)
+
+ + 998 + 999 +
+[docs] +1000@final +1001@dataclass(frozen=True) +1002class KDefinition(KOuter, WithKAtt, Iterable[KFlatModule]): +1003 """Represents an entire K definition, with file imports and modules in place, and a given module called out as main module.""" +1004 +1005 main_module_name: str +1006 all_modules: tuple[KFlatModule, ...] +1007 requires: tuple[KRequire, ...] +1008 att: KAtt +1009 +1010 main_module: InitVar[KFlatModule] +1011 +1012 _init_config: dict[KSort, KInner] +1013 _empty_config: dict[KSort, KInner] +1014 +1015 def __init__( +1016 self, +1017 main_module_name: str, +1018 all_modules: Iterable[KFlatModule], +1019 requires: Iterable[KRequire] = (), +1020 att: KAtt = EMPTY_ATT, +1021 ): +1022 all_modules = tuple(all_modules) +1023 main_modules = [module for module in all_modules if module.name == main_module_name] +1024 +1025 if not main_modules: +1026 raise ValueError(f'Module not found: {main_module_name}') +1027 if len(main_modules) > 1: +1028 raise ValueError(f'Module is not unique: {main_module_name}') +1029 +1030 main_module = main_modules[0] +1031 +1032 object.__setattr__(self, 'main_module_name', main_module_name) +1033 object.__setattr__(self, 'all_modules', tuple(all_modules)) +1034 object.__setattr__(self, 'requires', tuple(requires)) +1035 object.__setattr__(self, 'att', att) +1036 object.__setattr__(self, 'main_module', main_module) +1037 object.__setattr__(self, '_init_config', {}) +1038 object.__setattr__(self, '_empty_config', {}) +1039 +1040 def __iter__(self) -> Iterator[KFlatModule]: +1041 return iter(self.all_modules) +1042 +
+[docs] +1043 @staticmethod +1044 def from_dict(d: Mapping[str, Any]) -> KDefinition: +1045 return KDefinition( +1046 main_module_name=d['mainModule'], +1047 all_modules=(KFlatModule.from_dict(module) for module in d['modules']), +1048 requires=(KRequire.from_dict(require) for require in d['requires']) if d.get('requires') else (), +1049 att=KAtt.from_dict(d['att']) if d.get('att') else EMPTY_ATT, +1050 )
+ +1051 +
+[docs] +1052 def to_dict(self) -> dict[str, Any]: +1053 return { +1054 'node': 'KDefinition', +1055 'mainModule': self.main_module_name, +1056 'modules': [module.to_dict() for module in self.all_modules], +1057 'requires': [require.to_dict() for require in self.requires], +1058 'att': self.att.to_dict(), +1059 }
+ +1060 +
+[docs] +1061 def let( +1062 self, +1063 *, +1064 main_module_name: str | None = None, +1065 all_modules: Iterable[KFlatModule] | None = None, +1066 requires: Iterable[KRequire] | None = None, +1067 att: KAtt | None = None, +1068 ) -> KDefinition: +1069 main_module_name = main_module_name if main_module_name is not None else self.main_module_name +1070 all_modules = all_modules if all_modules is not None else self.all_modules +1071 requires = requires if requires is not None else self.requires +1072 att = att if att is not None else self.att +1073 return KDefinition(main_module_name=main_module_name, all_modules=all_modules, requires=requires, att=att)
+ +1074 +
+[docs] +1075 def let_att(self, att: KAtt) -> KDefinition: +1076 return self.let(att=att)
+ +1077 +1078 @cached_property +1079 def all_module_names(self) -> tuple[str, ...]: +1080 """Return the name of all modules in this `KDefinition`.""" +1081 return tuple(module.name for module in self.all_modules) +1082 +1083 @cached_property +1084 def module_names(self) -> tuple[str, ...]: +1085 """Return the list of module names transitively imported by the main module of this definition.""" +1086 module_names = [self.main_module_name] +1087 seen_modules = [] +1088 while len(module_names) > 0: +1089 mname = module_names.pop(0) +1090 if mname not in seen_modules: +1091 seen_modules.append(mname) +1092 module_names.extend([i.name for i in self.all_modules_dict[mname].imports]) +1093 return tuple(seen_modules) +1094 +1095 @cached_property +1096 def all_modules_dict(self) -> dict[str, KFlatModule]: +1097 """Returns a dictionary of all the modules (with names as keys) defined in this definition.""" +1098 return {m.name: m for m in self.all_modules} +1099 +1100 @cached_property +1101 def modules(self) -> tuple[KFlatModule, ...]: +1102 """Returns the list of modules transitively imported by th emain module of this definition.""" +1103 return tuple(self.all_modules_dict[mname] for mname in self.module_names) +1104 +1105 @cached_property +1106 def productions(self) -> tuple[KProduction, ...]: +1107 """Returns the `KProduction` transitively imported by the main module of this definition.""" +1108 return tuple(prod for module in self.modules for prod in module.productions) +1109 +1110 @cached_property +1111 def syntax_productions(self) -> tuple[KProduction, ...]: +1112 """Returns the `KProduction` which are syntax declarations transitively imported by the main module of this definition.""" +1113 return tuple(prod for module in self.modules for prod in module.syntax_productions) +1114 +1115 @cached_property +1116 def functions(self) -> tuple[KProduction, ...]: +1117 """Returns the `KProduction` which are function declarations transitively imported by the main module of this definition.""" +1118 return tuple(func for module in self.modules for func in module.functions) +1119 +1120 @cached_property +1121 def function_labels(self) -> tuple[str, ...]: +1122 """Returns the label names of all the `KProduction` which are function symbols for all modules in this definition.""" +1123 return tuple(not_none(func.klabel).name for func in self.functions) +1124 +1125 @cached_property +1126 def constructors(self) -> tuple[KProduction, ...]: +1127 """Returns the `KProduction` which are constructor declarations transitively imported by the main module of this definition.""" +1128 return tuple(ctor for module in self.modules for ctor in module.constructors) +1129 +1130 @cached_property +1131 def cell_collection_productions(self) -> tuple[KProduction, ...]: +1132 """Returns the `KProduction` which are cell collection declarations transitively imported by the main module of this definition.""" +1133 return tuple(prod for module in self.modules for prod in module.cell_collection_productions) +1134 +1135 @cached_property +1136 def rules(self) -> tuple[KRule, ...]: +1137 """Returns the `KRule` sentences transitively imported by the main module of this definition.""" +1138 return tuple(rule for module in self.modules for rule in module.rules) +1139 +1140 @cached_property +1141 def alias_rules(self) -> tuple[KRule, ...]: +1142 """Returns the `KRule` sentences which are `alias` transitively imported by the main module of this definition.""" +1143 return tuple(rule for rule in self.rules if Atts.ALIAS in rule.att) +1144 +1145 @cached_property +1146 def macro_rules(self) -> tuple[KRule, ...]: +1147 """Returns the `KRule` sentences which are `alias` or `macro` transitively imported by the main module of this definition.""" +1148 return tuple(rule for rule in self.rules if Atts.MACRO in rule.att) + self.alias_rules +1149 +1150 @cached_property +1151 def semantic_rules(self) -> tuple[KRule, ...]: +1152 """Returns the `KRule` sentences which are topmost transitively imported by the main module of this definition.""" +1153 +1154 def is_semantic(rule: KRule) -> bool: +1155 return (type(rule.body) is KApply and rule.body.label.name == '<generatedTop>') or ( +1156 type(rule.body) is KRewrite +1157 and type(rule.body.lhs) is KApply +1158 and rule.body.lhs.label.name == '<generatedTop>' +1159 ) +1160 +1161 return tuple(rule for rule in self.rules if is_semantic(rule)) +1162 +1163 @cached_property +1164 def sentence_by_unique_id(self) -> dict[str, KSentence]: +1165 unique_id_map: dict[str, KSentence] = {} +1166 for module in self.all_modules: +1167 for unique_id, sent in module.sentence_by_unique_id.items(): +1168 if unique_id in unique_id_map and sent != unique_id_map[unique_id]: +1169 _LOGGER.debug( +1170 f'Same UNIQUE_ID found for two different sentences: {(sent, unique_id_map[unique_id])}' +1171 ) +1172 else: +1173 unique_id_map[unique_id] = sent +1174 return unique_id_map +1175 +
+[docs] +1176 def production_for_cell_sort(self, sort: KSort) -> KProduction: +1177 """Return the production for a given cell-declaration syntax from the cell's declared sort.""" +1178 # Typical cell production has 3 productions: +1179 # syntax KCell ::= "project:KCell" "(" K ")" [function, projection] +1180 # syntax KCell ::= "initKCell" "(" Map ")" [function, initializer, noThread] +1181 # syntax KCell ::= "<k>" K "</k>" [cell, cellName(k), format(%1%i%n%2%d%n%3), maincell, org.kframework.definition.Production(syntax #RuleContent ::= #RuleBody [klabel(#ruleNoConditions), symbol])] +1182 # And it may have a 4th: +1183 # syntax GeneratedCounterCell ::= "getGeneratedCounterCell" "(" GeneratedTopCell ")" [function] +1184 # We want the actual label one (3rd one in the list). +1185 if not sort.name.endswith('Cell'): +1186 raise ValueError( +1187 f'Method production_for_cell_sort only intended to be called on sorts ending in "Cell", not: {sort}' +1188 ) +1189 try: +1190 return single(prod for prod in self.productions if prod.sort == sort and Atts.CELL in prod.att) +1191 except ValueError as err: +1192 raise ValueError(f'Expected a single cell production for sort {sort}') from err
+ +1193 +
+[docs] +1194 def module(self, name: str) -> KFlatModule: +1195 """Return the module associated with a given name.""" +1196 return self.all_modules_dict[name]
+ +1197 +1198 @cached_property +1199 def subsort_table(self) -> FrozenDict[KSort, frozenset[KSort]]: +1200 """Return a mapping from sorts to all their proper subsorts.""" +1201 poset = POSet(subsort for prod in self.productions if (subsort := prod.as_subsort) is not None) +1202 return poset.image +1203 +
+[docs] +1204 def subsorts(self, sort: KSort) -> frozenset[KSort]: +1205 """Return all subsorts of a given `KSort` by inspecting the definition.""" +1206 return self.subsort_table.get(sort, frozenset())
+ +1207 +1208 @cached_property +1209 def brackets(self) -> FrozenDict[KSort, KProduction]: +1210 brackets: dict[KSort, KProduction] = {} +1211 for prod in self.productions: +1212 if Atts.BRACKET in prod.att: +1213 assert not prod.klabel +1214 sort = prod.sort +1215 if sort in brackets: +1216 raise ValueError(f'Multiple bracket productions for sort: {sort.name}') +1217 brackets[sort] = prod +1218 return FrozenDict(brackets) +1219 +1220 @cached_property +1221 def symbols(self) -> FrozenDict[str, KProduction]: +1222 symbols: dict[str, KProduction] = {} +1223 for prod in self.productions: +1224 if not prod.klabel: +1225 continue +1226 symbol = prod.klabel.name +1227 if symbol in symbols: # Check if duplicate +1228 other = symbols[symbol] +1229 if prod.let(att=prod.att.drop_source()) != other.let(att=prod.att.drop_source()): +1230 prods = [other, prod] +1231 raise AssertionError(f'Found multiple productions for {symbol}: {prods}') +1232 continue +1233 symbols[symbol] = prod +1234 return FrozenDict(symbols) +1235 +1236 @cached_property +1237 def syntax_symbols(self) -> FrozenDict[str, KProduction]: +1238 brackets: dict[str, KProduction] = { +1239 prod.att[Atts.BRACKET_LABEL]['name']: prod for _, prod in self.brackets.items() +1240 } +1241 return FrozenDict({**self.symbols, **brackets}) +1242 +1243 @cached_property +1244 def overloads(self) -> FrozenDict[str, frozenset[str]]: +1245 """Return a mapping from symbols to the sets of symbols that overload them.""" +1246 +1247 def lt(overloader: KProduction, overloaded: KProduction) -> bool: +1248 assert overloader.klabel +1249 assert overloaded.klabel +1250 assert overloader.klabel.name != overloaded.klabel.name +1251 assert Atts.OVERLOAD in overloader.att +1252 assert Atts.OVERLOAD in overloaded.att +1253 assert overloader.att[Atts.OVERLOAD] == overloaded.att[Atts.OVERLOAD] +1254 overloader_sorts = [overloader.sort] + overloader.argument_sorts +1255 overloaded_sorts = [overloaded.sort] + overloaded.argument_sorts +1256 if len(overloader_sorts) != len(overloaded_sorts): +1257 return False +1258 less = False +1259 for overloader_sort, overloaded_sort in zip(overloader_sorts, overloaded_sorts, strict=True): +1260 if overloader_sort == overloaded_sort: +1261 continue +1262 if overloader_sort in self.subsorts(overloaded_sort): +1263 less = True +1264 continue +1265 return False +1266 return less +1267 +1268 symbols_by_overload: dict[str, list[str]] = {} +1269 for symbol in self.symbols: +1270 prod = self.symbols[symbol] +1271 if Atts.OVERLOAD in prod.att: +1272 symbols_by_overload.setdefault(prod.att[Atts.OVERLOAD], []).append(symbol) +1273 +1274 overloads: dict[str, list[str]] = {} +1275 for _, symbols in symbols_by_overload.items(): +1276 for overloader in symbols: +1277 for overloaded in symbols: +1278 if overloader == overloaded: +1279 continue +1280 if lt(overloader=self.symbols[overloader], overloaded=self.symbols[overloaded]): +1281 # Index by overloaded symbol, this way it is easy to look them up +1282 overloads.setdefault(overloaded, []).append(overloader) +1283 return FrozenDict({key: frozenset(values) for key, values in overloads.items()}) +1284 +1285 @cached_property +1286 def priorities(self) -> FrozenDict[str, frozenset[str]]: +1287 """Return a mapping from symbols to the sets of symbols with lower priority.""" +1288 syntax_priorities = ( +1289 sent for module in self.modules for sent in module.sentences if isinstance(sent, KSyntaxPriority) +1290 ) +1291 relation = tuple( +1292 pair +1293 for syntax_priority in syntax_priorities +1294 for highers, lowers in pairwise(syntax_priority.priorities) +1295 for pair in product(highers, lowers) +1296 ) +1297 return POSet(relation).image +1298 +1299 @cached_property +1300 def left_assocs(self) -> FrozenDict[str, frozenset[str]]: +1301 return FrozenDict({key: frozenset(value) for key, value in self._assocs(KAssoc.LEFT).items()}) +1302 +1303 @cached_property +1304 def right_assocs(self) -> FrozenDict[str, frozenset[str]]: +1305 return FrozenDict({key: frozenset(value) for key, value in self._assocs(KAssoc.RIGHT).items()}) +1306 +1307 def _assocs(self, assoc: KAssoc) -> dict[str, set[str]]: +1308 sents = ( +1309 sent +1310 for module in self.modules +1311 for sent in module.sentences +1312 if isinstance(sent, KSyntaxAssociativity) and sent.assoc in (assoc, KAssoc.NON_ASSOC) +1313 ) +1314 pairs = (pair for sent in sents for pair in product(sent.tags, sent.tags)) +1315 +1316 def insert(dct: dict[str, set[str]], *, key: str, value: str) -> dict[str, set[str]]: +1317 dct.setdefault(key, set()).add(value) +1318 return dct +1319 +1320 return reduce(lambda res, pair: insert(res, key=pair[0], value=pair[1]), pairs, {}) +1321 +
+[docs] +1322 def sort(self, kast: KInner) -> KSort | None: +1323 """Compute the sort of a given term using best-effort simple sorting algorithm, returns `None` on algorithm failure.""" +1324 match kast: +1325 case KToken(_, sort) | KVariable(_, sort): +1326 return sort +1327 case KRewrite(lhs, rhs): +1328 lhs_sort = self.sort(lhs) +1329 rhs_sort = self.sort(rhs) +1330 if lhs_sort and rhs_sort: +1331 return self.least_common_supersort(lhs_sort, rhs_sort) +1332 return None +1333 case KSequence(_): +1334 return KSort('K') +1335 case KApply(label, _): +1336 sort, _ = self.resolve_sorts(label) +1337 return sort +1338 case _: +1339 return None
+ +1340 +
+[docs] +1341 def sort_strict(self, kast: KInner) -> KSort: +1342 """Compute the sort of a given term using best-effort simple sorting algorithm, dies on algorithm failure.""" +1343 sort = self.sort(kast) +1344 if sort is None: +1345 raise ValueError(f'Could not determine sort of term: {kast}') +1346 return sort
+ +1347 +
+[docs] +1348 def resolve_sorts(self, label: KLabel) -> tuple[KSort, tuple[KSort, ...]]: +1349 """Compute the result and argument sorts for a given production based on a `KLabel`.""" +1350 prod = self.symbols[label.name] +1351 sorts = dict(zip(prod.params, label.params, strict=True)) +1352 +1353 def resolve(sort: KSort) -> KSort: +1354 return sorts.get(sort, sort) +1355 +1356 return resolve(prod.sort), tuple(resolve(sort) for sort in prod.argument_sorts)
+ +1357 +
+[docs] +1358 def least_common_supersort(self, sort1: KSort, sort2: KSort) -> KSort | None: +1359 """Compute the lowest-upper-bound of two sorts in the sort lattice using very simple approach, returning `None` on failure (not necessarily meaning there isn't a lub).""" +1360 if sort1 == sort2: +1361 return sort1 +1362 if sort1 in self.subsorts(sort2): +1363 return sort2 +1364 if sort2 in self.subsorts(sort1): +1365 return sort1 +1366 # Computing least common supersort is not currently supported if sort1 is not a subsort of sort2 or +1367 # vice versa. In that case there may be more than one LCS. +1368 return None
+ +1369 +
+[docs] +1370 def greatest_common_subsort(self, sort1: KSort, sort2: KSort) -> KSort | None: +1371 """Compute the greatest-lower-bound of two sorts in the sort lattice using very simple approach, returning `None` on failure (not necessarily meaning there isn't a glb).""" +1372 if sort1 == sort2: +1373 return sort1 +1374 if sort1 in self.subsorts(sort2): +1375 return sort1 +1376 if sort2 in self.subsorts(sort1): +1377 return sort2 +1378 # Computing greatest common subsort is not currently supported if sort1 is not a subsort of sort2 or +1379 # vice versa. In that case there may be more than one GCS. +1380 return None
+ +1381 +1382 # Sorts like Int cannot be injected directly into sort K so they are embedded in a KSequence. +
+[docs] +1383 def add_ksequence_under_k_productions(self, kast: KInner) -> KInner: +1384 """Inject a `KSequence` under the given term if it's a subsort of `K` and is being used somewhere that sort `K` is expected (determined by inspecting the definition).""" +1385 +1386 def _add_ksequence_under_k_productions(_kast: KInner) -> KInner: +1387 if type(_kast) is not KApply: +1388 return _kast +1389 +1390 prod = self.symbols[_kast.label.name] +1391 return KApply( +1392 _kast.label, +1393 [ +1394 KSequence(arg) if sort.name == 'K' and not self.sort(arg) == KSort('K') else arg +1395 for arg, sort in zip(_kast.args, prod.argument_sorts, strict=True) +1396 ], +1397 ) +1398 +1399 return top_down(_add_ksequence_under_k_productions, kast)
+ +1400 +
+[docs] +1401 def sort_vars(self, kast: KInner, sort: KSort | None = None) -> KInner: +1402 """Return the original term with all the variables having the sorts added or specialized, failing if recieving conflicting sorts for a given variable.""" +1403 if type(kast) is KVariable and kast.sort is None and sort is not None: +1404 return kast.let(sort=sort) +1405 +1406 def get_quantifier_variable(q: KApply) -> KVariable: +1407 if q.arity != 2: +1408 raise ValueError(f'Expected a quantifier to have 2 children, got {q.arity}.') +1409 var = q.args[0] +1410 if not isinstance(var, KVariable): +1411 raise ValueError(f"Expected a quantifier's first child to be a variable, got {type(var)}.") +1412 return var +1413 +1414 def merge_variables( +1415 term: KInner, occurrences_list: list[dict[str, list[KVariable]]] +1416 ) -> dict[str, list[KVariable]]: +1417 result: dict[str, list[KVariable]] = defaultdict(list) +1418 for occurrences in occurrences_list: +1419 assert isinstance(occurrences, dict), type(occurrences) +1420 for key, value in occurrences.items(): +1421 result[key] += value +1422 if isinstance(term, KVariable): +1423 result[term.name].append(term) +1424 elif isinstance(term, KApply): +1425 if term.label.name in ML_QUANTIFIERS: +1426 var = get_quantifier_variable(term) +1427 result[var.name].append(var) +1428 return result +1429 +1430 def add_var_to_subst(vname: str, vars: list[KVariable], subst: dict[str, KVariable]) -> None: +1431 vsorts = list(unique(v.sort for v in vars if v.sort is not None)) +1432 if len(vsorts) > 0: +1433 vsort = vsorts[0] +1434 for s in vsorts[1:]: +1435 _vsort = self.greatest_common_subsort(vsort, s) +1436 if _vsort is None: +1437 raise ValueError(f'Cannot compute greatest common subsort of {vname}: {(vsort, s)}') +1438 vsort = _vsort +1439 subst[vname] = KVariable(vname, sort=vsort) +1440 +1441 def transform( +1442 term: KInner, child_variables: list[dict[str, list[KVariable]]] +1443 ) -> tuple[KInner, dict[str, list[KVariable]]]: +1444 occurrences = merge_variables(term, child_variables) +1445 +1446 if isinstance(term, KApply): +1447 if term.label.name in ML_QUANTIFIERS: +1448 var = get_quantifier_variable(term) +1449 subst: dict[str, KVariable] = {} +1450 add_var_to_subst(var.name, occurrences[var.name], subst) +1451 del occurrences[var.name] +1452 return (Subst(subst)(term), occurrences) +1453 else: +1454 prod = self.symbols[term.label.name] +1455 if len(prod.params) == 0: +1456 for t, a in zip(prod.argument_sorts, term.args, strict=True): +1457 if type(a) is KVariable: +1458 occurrences[a.name].append(a.let_sort(t)) +1459 elif isinstance(term, KSequence) and term.arity > 0: +1460 for a in term.items[0:-1]: +1461 if type(a) is KVariable: +1462 occurrences[a.name].append(a.let_sort(KSort('KItem'))) +1463 last_a = term.items[-1] +1464 if type(last_a) is KVariable: +1465 occurrences[last_a.name].append(last_a.let_sort(KSort('K'))) +1466 return (term, occurrences) +1467 +1468 (new_term, var_occurrences) = bottom_up_with_summary(transform, kast) +1469 +1470 subst: dict[str, KVariable] = {} +1471 for vname, occurrences in var_occurrences.items(): +1472 add_var_to_subst(vname, occurrences, subst) +1473 +1474 return Subst(subst)(new_term)
+ +1475 +1476 # Best-effort addition of sort parameters to klabels, context insensitive +
+[docs] +1477 def add_sort_params(self, kast: KInner) -> KInner: +1478 """Return a given term with the sort parameters on the `KLabel` filled in (which may be missing because of how the frontend works), best effort.""" +1479 +1480 def _add_sort_params(_k: KInner) -> KInner: +1481 if type(_k) is KApply: +1482 prod = self.symbols[_k.label.name] +1483 if len(_k.label.params) == 0 and len(prod.params) > 0: +1484 sort_dict: dict[KSort, KSort] = {} +1485 for psort, asort in zip(prod.argument_sorts, map(self.sort, _k.args), strict=True): +1486 if asort is None: +1487 _LOGGER.warning( +1488 f'Failed to add sort parameter, unable to determine sort for argument in production: {(prod, psort, asort)}' +1489 ) +1490 return _k +1491 if psort in prod.params: +1492 if psort in sort_dict and sort_dict[psort] != asort: +1493 _LOGGER.warning( +1494 f'Failed to add sort parameter, sort mismatch between different occurances of sort parameter: {(prod, psort, sort_dict[psort], asort)}' +1495 ) +1496 return _k +1497 elif psort not in sort_dict: +1498 sort_dict[psort] = asort +1499 if all(p in sort_dict for p in prod.params): +1500 return _k.let(label=KLabel(_k.label.name, [sort_dict[p] for p in prod.params])) +1501 return _k +1502 +1503 return bottom_up(_add_sort_params, kast)
+ +1504 +
+[docs] +1505 def add_cell_map_items(self, kast: KInner) -> KInner: +1506 """Wrap cell-map items in the syntactical wrapper that the frontend generates for them (see `KDefinition.remove_cell_map_items`).""" +1507 # example: +1508 # syntax AccountCellMap [cellCollection, hook(MAP.Map)] +1509 # syntax AccountCellMap ::= AccountCellMap AccountCellMap [assoc, avoid, cellCollection, comm, element(AccountCellMapItem), function, hook(MAP.concat), unit(.AccountCellMap), wrapElement(<account>)] +1510 +1511 cell_wrappers = {} +1512 for ccp in self.cell_collection_productions: +1513 if Atts.ELEMENT in ccp.att and Atts.WRAP_ELEMENT in ccp.att: +1514 cell_wrappers[ccp.att[Atts.WRAP_ELEMENT]] = ccp.att[Atts.ELEMENT] +1515 +1516 def _wrap_elements(_k: KInner) -> KInner: +1517 if type(_k) is KApply and _k.label.name in cell_wrappers: +1518 return KApply(cell_wrappers[_k.label.name], [_k.args[0], _k]) +1519 return _k +1520 +1521 # To ensure we don't get duplicate wrappers. +1522 _kast = self.remove_cell_map_items(kast) +1523 return bottom_up(_wrap_elements, _kast)
+ +1524 +
+[docs] +1525 def remove_cell_map_items(self, kast: KInner) -> KInner: +1526 """Remove cell-map syntactical wrapper items that the frontend generates (see `KDefinition.add_cell_map_items`).""" +1527 # example: +1528 # syntax AccountCellMap [cellCollection, hook(MAP.Map)] +1529 # syntax AccountCellMap ::= AccountCellMap AccountCellMap [assoc, avoid, cellCollection, comm, element(AccountCellMapItem), function, hook(MAP.concat), unit(.AccountCellMap), wrapElement(<account>)] +1530 +1531 cell_wrappers = {} +1532 for ccp in self.cell_collection_productions: +1533 if Atts.ELEMENT in ccp.att and Atts.WRAP_ELEMENT in ccp.att: +1534 cell_wrappers[ccp.att[Atts.ELEMENT]] = ccp.att[Atts.WRAP_ELEMENT] +1535 +1536 def _wrap_elements(_k: KInner) -> KInner: +1537 if ( +1538 type(_k) is KApply +1539 and _k.label.name in cell_wrappers +1540 and len(_k.args) == 2 +1541 and type(_k.args[1]) is KApply +1542 and _k.args[1].label.name == cell_wrappers[_k.label.name] +1543 ): +1544 return _k.args[1] +1545 return _k +1546 +1547 return bottom_up(_wrap_elements, kast)
+ +1548 +
+[docs] +1549 def empty_config(self, sort: KSort) -> KInner: +1550 """Given a cell-sort, compute an "empty" configuration for it (all the constructor structure of the configuration in place, but variables in cell positions).""" +1551 if sort not in self._empty_config: +1552 self._empty_config[sort] = self._compute_empty_config(sort) +1553 return self._empty_config[sort]
+ +1554 +1555 def _compute_empty_config(self, sort: KSort) -> KInner: +1556 def _kdefinition_empty_config(_sort: KSort) -> KApply: +1557 cell_prod = self.production_for_cell_sort(_sort) +1558 cell_klabel = cell_prod.klabel +1559 assert cell_klabel is not None +1560 production = self.symbols[cell_klabel.name] +1561 args: list[KInner] = [] +1562 num_nonterminals = 0 +1563 num_freshvars = 0 +1564 for p_item in production.items: +1565 if type(p_item) is KNonTerminal: +1566 num_nonterminals += 1 +1567 if p_item.sort.name.endswith('Cell'): +1568 args.append(_kdefinition_empty_config(p_item.sort)) +1569 else: +1570 num_freshvars += 1 +1571 args.append(KVariable(_sort.name[0:-4].upper() + '_CELL')) +1572 if num_nonterminals > 1 and num_freshvars > 0: +1573 raise ValueError(f'Found mixed cell and non-cell arguments to cell constructor for: {sort}') +1574 return KApply(cell_klabel, args) +1575 +1576 return _kdefinition_empty_config(sort) +1577 +
+[docs] +1578 def instantiate_cell_vars(self, term: KInner) -> KInner: +1579 """Given a partially-complete configuration, find positions where there could be more cell structure but instead there are variables and instantiate more cell structure.""" +1580 +1581 def _cell_vars_to_labels(_kast: KInner) -> KInner: +1582 if type(_kast) is KApply and _kast.is_cell: +1583 production = self.symbols[_kast.label.name] +1584 production_arity = [item.sort for item in production.non_terminals] +1585 new_args = [] +1586 for sort, arg in zip(production_arity, _kast.args, strict=True): +1587 if sort.name.endswith('Cell') and type(arg) is KVariable: +1588 new_args.append(self.empty_config(sort)) +1589 else: +1590 new_args.append(arg) +1591 return KApply(_kast.label, new_args) +1592 return _kast +1593 +1594 return bottom_up(_cell_vars_to_labels, term)
+ +1595 +
+[docs] +1596 def init_config(self, sort: KSort) -> KInner: +1597 """Return an initialized configuration as the user declares it in the semantics, complete with configuration variables in place.""" +1598 if sort not in self._init_config: +1599 self._init_config[sort] = self._compute_init_config(sort) +1600 return self._init_config[sort]
+ +1601 +1602 def _compute_init_config(self, sort: KSort) -> KInner: +1603 config_var_map = KVariable('__###CONFIG_VAR_MAP###__') +1604 +1605 def _remove_config_var_lookups(_kast: KInner) -> KInner: +1606 if type(_kast) is KApply and _kast.label.name.startswith('project:') and len(_kast.args) == 1: +1607 _term = _kast.args[0] +1608 if type(_term) is KApply and _term.label == KLabel('Map:lookup') and _term.args[0] == config_var_map: +1609 _token_var = _term.args[1] +1610 if type(_token_var) is KToken and _token_var.sort == KSort('KConfigVar'): +1611 return KVariable(_token_var.token) +1612 return _kast +1613 +1614 init_prods = (prod for prod in self.syntax_productions if Atts.INITIALIZER in prod.att) +1615 try: +1616 init_prod = single(prod for prod in init_prods if prod.sort == sort) +1617 except ValueError as err: +1618 raise ValueError(f'Did not find unique initializer for sort: {sort}') from err +1619 +1620 prod_klabel = init_prod.klabel +1621 assert prod_klabel is not None +1622 arg_sorts = [nt.sort for nt in init_prod.items if type(nt) is KNonTerminal] +1623 init_config: KInner +1624 if len(arg_sorts) == 0: +1625 init_config = KApply(prod_klabel) +1626 elif len(arg_sorts) == 1 and arg_sorts[0] == KSort('Map'): +1627 init_config = KApply(prod_klabel, [config_var_map]) +1628 else: +1629 raise ValueError(f'Cannot handle initializer for label: {prod_klabel}') +1630 +1631 init_rewrites = [ +1632 rule.body for rule in self.rules if Atts.INITIALIZER in rule.att and type(rule.body) is KRewrite +1633 ] +1634 init_config = indexed_rewrite(init_config, init_rewrites) +1635 init_config = top_down(_remove_config_var_lookups, init_config) +1636 +1637 return init_config
+ +1638 +1639 +
+[docs] +1640def read_kast_definition(path: str | PathLike) -> KDefinition: +1641 """Read a `KDefinition` from disk, failing if it's not actually a `KDefinition`.""" +1642 with open(path) as f: +1643 _LOGGER.info(f'Loading JSON definition: {path}') +1644 json_defn = json.load(f) +1645 _LOGGER.info(f'Converting JSON definition to Kast: {path}') +1646 return KDefinition.from_dict(kast_term(json_defn))
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kast/outer_lexer.html b/pyk/_modules/pyk/kast/outer_lexer.html new file mode 100644 index 00000000000..e9ff368b311 --- /dev/null +++ b/pyk/_modules/pyk/kast/outer_lexer.html @@ -0,0 +1,1081 @@ + + + + + + + + pyk.kast.outer_lexer — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kast.outer_lexer

+  1from __future__ import annotations
+  2
+  3import re
+  4from collections.abc import Iterator
+  5from enum import Enum, auto
+  6from typing import TYPE_CHECKING, NamedTuple
+  7
+  8if TYPE_CHECKING:
+  9    from collections.abc import Collection, Generator, Iterable
+ 10    from typing import Final
+ 11
+ 12
+
+[docs] + 13class TokenType(Enum): + 14 EOF = 0 + 15 COMMA = auto() + 16 LPAREN = auto() + 17 RPAREN = auto() + 18 LBRACE = auto() + 19 RBRACE = auto() + 20 LBRACK = auto() + 21 RBRACK = auto() + 22 VBAR = auto() + 23 EQ = auto() + 24 GT = auto() + 25 PLUS = auto() + 26 TIMES = auto() + 27 QUESTION = auto() + 28 TILDE = auto() + 29 COLON = auto() + 30 DCOLONEQ = auto() + 31 KW_ALIAS = auto() + 32 KW_CLAIM = auto() + 33 KW_CONFIG = auto() + 34 KW_CONTEXT = auto() + 35 KW_ENDMODULE = auto() + 36 KW_IMPORTS = auto() + 37 KW_LEFT = auto() + 38 KW_LEXICAL = auto() + 39 KW_MODULE = auto() + 40 KW_NONASSOC = auto() + 41 KW_PRIORITY = auto() + 42 KW_PRIVATE = auto() + 43 KW_PUBLIC = auto() + 44 KW_REQUIRES = auto() + 45 KW_RIGHT = auto() + 46 KW_RULE = auto() + 47 KW_SYNTAX = auto() + 48 NAT = auto() + 49 STRING = auto() + 50 REGEX = auto() + 51 ID_LOWER = auto() + 52 ID_UPPER = auto() + 53 MODNAME = auto() + 54 KLABEL = auto() + 55 RULE_LABEL = auto() + 56 ATTR_KEY = auto() + 57 ATTR_CONTENT = auto() + 58 BUBBLE = auto()
+ + 59 + 60 +
+[docs] + 61class Loc(NamedTuple): + 62 line: int + 63 col: int + 64 + 65 def __add__(self, other: object) -> Loc: + 66 if isinstance(other, str): + 67 """Return the line,column after the additional text""" + 68 line, col = self.line, self.col + 69 for c in other: + 70 if c == '\n': + 71 line += 1 + 72 col = 0 + 73 col += 1 + 74 return Loc(line, col) + 75 return NotImplemented
+ + 76 + 77 + 78INIT_LOC: Final = Loc(1, 0) + 79 + 80 +
+[docs] + 81class Token(NamedTuple): + 82 text: str + 83 type: TokenType + 84 loc: Loc + 85 +
+[docs] + 86 def let(self, *, text: str | None = None, type: TokenType | None = None, loc: Loc | None = None) -> Token: + 87 text = text if text else self.text + 88 type = type if type else self.type + 89 loc = loc if loc else self.loc + 90 return Token(text=text, type=type, loc=loc)
+
+ + 91 + 92 + 93_EOF_TOKEN: Final = Token('', TokenType.EOF, INIT_LOC) + 94 + 95_SIMPLE_CHARS: Final = { + 96 ',': TokenType.COMMA, + 97 '(': TokenType.LPAREN, + 98 ')': TokenType.RPAREN, + 99 '[': TokenType.LBRACK, +100 ']': TokenType.RBRACK, +101 '>': TokenType.GT, +102 '{': TokenType.LBRACE, +103 '}': TokenType.RBRACE, +104 '|': TokenType.VBAR, +105 '=': TokenType.EQ, +106 '+': TokenType.PLUS, +107 '*': TokenType.TIMES, +108 '?': TokenType.QUESTION, +109 '~': TokenType.TILDE, +110} +111 +112_KEYWORDS: Final = { +113 'alias': TokenType.KW_ALIAS, +114 'claim': TokenType.KW_CLAIM, +115 'configuration': TokenType.KW_CONFIG, +116 'context': TokenType.KW_CONTEXT, +117 'endmodule': TokenType.KW_ENDMODULE, +118 'imports': TokenType.KW_IMPORTS, +119 'left': TokenType.KW_LEFT, +120 'lexical': TokenType.KW_LEXICAL, +121 'module': TokenType.KW_MODULE, +122 'non-assoc': TokenType.KW_NONASSOC, +123 'priority': TokenType.KW_PRIORITY, +124 'private': TokenType.KW_PRIVATE, +125 'public': TokenType.KW_PUBLIC, +126 'requires': TokenType.KW_REQUIRES, +127 'right': TokenType.KW_RIGHT, +128 'rule': TokenType.KW_RULE, +129 'syntax': TokenType.KW_SYNTAX, +130} +131 +132_WHITESPACE: Final = {' ', '\t', '\n', '\r'} +133_DIGIT: Final = set('0123456789') +134_LOWER: Final = set('abcdefghijklmnopqrstuvwxyz') +135_UPPER: Final = set('ABCDEFGHIJKLMNOPQRSTUVWXYZ') +136_ALPHA: Final = _LOWER.union(_UPPER) +137_ALNUM: Final = _ALPHA.union(_DIGIT) +138_WORD: Final = {'_'}.union(_ALNUM) +139 +140 +
+[docs] +141class State(Enum): +142 DEFAULT = auto() +143 SYNTAX = auto() +144 KLABEL = auto() +145 BUBBLE = auto() +146 CONTEXT = auto() +147 ATTR = auto() +148 MODNAME = auto()
+ +149 +150 +151_NEXT_STATE: Final = { +152 # (state, token_type): state' +153 (State.BUBBLE, TokenType.KW_CLAIM): State.BUBBLE, +154 (State.BUBBLE, TokenType.KW_CONFIG): State.BUBBLE, +155 (State.BUBBLE, TokenType.KW_CONTEXT): State.CONTEXT, +156 (State.BUBBLE, TokenType.KW_ENDMODULE): State.DEFAULT, +157 (State.BUBBLE, TokenType.KW_RULE): State.BUBBLE, +158 (State.BUBBLE, TokenType.KW_SYNTAX): State.SYNTAX, +159 (State.CONTEXT, TokenType.KW_ALIAS): State.BUBBLE, +160 (State.CONTEXT, TokenType.KW_CLAIM): State.BUBBLE, +161 (State.CONTEXT, TokenType.KW_CONFIG): State.BUBBLE, +162 (State.CONTEXT, TokenType.KW_CONTEXT): State.CONTEXT, +163 (State.CONTEXT, TokenType.KW_ENDMODULE): State.DEFAULT, +164 (State.CONTEXT, TokenType.KW_RULE): State.BUBBLE, +165 (State.CONTEXT, TokenType.KW_SYNTAX): State.SYNTAX, +166 (State.DEFAULT, TokenType.KW_CLAIM): State.BUBBLE, +167 (State.DEFAULT, TokenType.KW_CONFIG): State.BUBBLE, +168 (State.DEFAULT, TokenType.KW_CONTEXT): State.CONTEXT, +169 (State.DEFAULT, TokenType.KW_IMPORTS): State.MODNAME, +170 (State.DEFAULT, TokenType.KW_MODULE): State.MODNAME, +171 (State.DEFAULT, TokenType.KW_RULE): State.BUBBLE, +172 (State.DEFAULT, TokenType.KW_SYNTAX): State.SYNTAX, +173 (State.DEFAULT, TokenType.LBRACK): State.ATTR, +174 (State.KLABEL, TokenType.KW_CLAIM): State.BUBBLE, +175 (State.KLABEL, TokenType.KW_CONFIG): State.BUBBLE, +176 (State.KLABEL, TokenType.KW_CONTEXT): State.CONTEXT, +177 (State.KLABEL, TokenType.KW_ENDMODULE): State.DEFAULT, +178 (State.KLABEL, TokenType.KW_RULE): State.BUBBLE, +179 (State.KLABEL, TokenType.KW_SYNTAX): State.SYNTAX, +180 (State.MODNAME, TokenType.MODNAME): State.DEFAULT, +181 (State.SYNTAX, TokenType.ID_UPPER): State.DEFAULT, +182 (State.SYNTAX, TokenType.KW_LEFT): State.KLABEL, +183 (State.SYNTAX, TokenType.KW_LEXICAL): State.DEFAULT, +184 (State.SYNTAX, TokenType.KW_NONASSOC): State.KLABEL, +185 (State.SYNTAX, TokenType.KW_PRIORITY): State.KLABEL, +186 (State.SYNTAX, TokenType.KW_RIGHT): State.KLABEL, +187 (State.SYNTAX, TokenType.LBRACE): State.DEFAULT, +188} +189 +190_BUBBLY_STATES: Final = {State.BUBBLE, State.CONTEXT} +191 +192 +
+[docs] +193class LocationIterator(Iterator[str]): +194 """A string iterator which tracks the line and column information of the characters in the string.""" +195 +196 _line: int +197 _col: int +198 _iter: Iterator[str] +199 _nextline: bool +200 +201 def __init__(self, text: Iterable[str], line: int = 1, col: int = 0) -> None: +202 self._iter = iter(text) +203 self._line = line +204 self._col = col +205 self._nextline = False +206 +207 def __next__(self) -> str: +208 la = next(self._iter) +209 self._col += 1 +210 if self._nextline: +211 self._line += 1 +212 self._col = 1 +213 self._nextline = la == '\n' +214 return la +215 +216 @property +217 def loc(self) -> Loc: +218 """Return the ``(line, column)`` of the last character returned by the iterator. +219 +220 If no character has been returned yet, it will be the location that this +221 iterator was initialized with. The default is (1,0), which is the only +222 time the column will be 0. +223 """ +224 return Loc(self._line, self._col)
+ +225 +226 +
+[docs] +227def outer_lexer(it: Iterable[str]) -> Iterator[Token]: +228 it = LocationIterator(it) +229 la = next(it, '') +230 state = State.DEFAULT +231 +232 while True: +233 if state in _SIMPLE_STATES: +234 token, la = _SIMPLE_STATES[state](la, it) +235 yield token +236 last_token = token +237 +238 elif state in _BUBBLY_STATES: +239 tokens, la = _bubble_or_context(la, it, context=state is State.CONTEXT) +240 yield from tokens +241 last_token = tokens[-1] +242 +243 elif state is State.ATTR: +244 la = yield from _attr(la, it) +245 state = State.DEFAULT +246 continue +247 +248 else: +249 raise AssertionError() +250 +251 if last_token.type is TokenType.EOF: +252 return +253 state = _NEXT_STATE.get((state, last_token.type), state)
+ +254 +255 +256_DEFAULT_KEYWORDS: Final = { +257 'claim', +258 'configuration', +259 'context', +260 'endmodule', +261 'import', +262 'imports', +263 'left', +264 'module', +265 'non-assoc', +266 'require', +267 'requires', +268 'right', +269 'rule', +270 'syntax', +271} +272 +273 +274def _default(la: str, it: LocationIterator) -> tuple[Token, str]: +275 la = _skip_ws_and_comments(la, it) +276 +277 if not la: +278 return Token('', TokenType.EOF, it.loc), la +279 +280 elif la in _SIMPLE_CHARS: +281 token_func = _simple_char +282 +283 elif la == '"': +284 token_func = _string +285 +286 elif la == 'r': +287 token_func = _regex_or_lower_id_or_keyword +288 +289 elif la in _DIGIT: +290 token_func = _nat +291 +292 elif la in _ALNUM: +293 token_func = _id_or_keyword +294 +295 elif la == '#': +296 token_func = _hash_id +297 +298 elif la == ':': +299 token_func = _colon_or_dcoloneq +300 +301 else: +302 raise _unexpected_character(la) +303 +304 loc = it.loc +305 text, token_type, la = token_func(la, it) +306 return Token(text, token_type, loc), la +307 +308 +309def _skip_ws_and_comments(la: str, it: Iterator[str]) -> str: +310 # Only use in states where "/" can only be lexed as comment +311 while True: +312 if la in _WHITESPACE: +313 la = next(it, '') +314 elif la == '/': +315 is_comment, consumed, la = _maybe_comment(la, it) +316 if not is_comment: +317 raise _unexpected_character(la) +318 la = next(it, '') +319 else: +320 break +321 return la +322 +323 +324def _simple_char(la: str, it: Iterator[str]) -> tuple[str, TokenType, str]: +325 # assert la in _SIMPLE_CHARS +326 +327 text = la +328 token_type = _SIMPLE_CHARS[la] +329 la = next(it, '') +330 return text, token_type, la +331 +332 +333def _nat(la: str, it: Iterator[str]) -> tuple[str, TokenType, str]: +334 # assert la in _DIGIT +335 +336 consumed = [] +337 while la in _DIGIT: +338 consumed.append(la) +339 la = next(it, '') +340 text = ''.join(consumed) +341 return text, TokenType.NAT, la +342 +343 +344def _id_or_keyword(la: str, it: Iterator[str]) -> tuple[str, TokenType, str]: +345 # assert la in _ALPHA +346 +347 if la in _LOWER: +348 token_type = TokenType.ID_LOWER +349 else: +350 token_type = TokenType.ID_UPPER +351 +352 consumed = [] +353 while la in _ALNUM or la == '-': +354 consumed.append(la) +355 la = next(it, '') +356 text = ''.join(consumed) +357 if text in _DEFAULT_KEYWORDS: +358 return text, _KEYWORDS[text], la +359 return text, token_type, la +360 +361 +362def _hash_id(la: str, it: Iterator[str]) -> tuple[str, TokenType, str]: +363 # assert la == '#' +364 +365 consumed = [la] +366 la = next(it, '') +367 +368 if la in _LOWER: +369 token_type = TokenType.ID_LOWER +370 elif la in _UPPER: +371 token_type = TokenType.ID_UPPER +372 else: +373 raise _unexpected_character(la) +374 +375 while la in _ALNUM: +376 consumed.append(la) +377 la = next(it, '') +378 text = ''.join(consumed) +379 return text, token_type, la +380 +381 +382def _colon_or_dcoloneq(la: str, it: Iterator[str]) -> tuple[str, TokenType, str]: +383 # assert la == ':' +384 +385 la = next(it, '') +386 if la != ':': +387 return ':', TokenType.COLON, la +388 la = next(it, '') +389 if la != '=': +390 raise _unexpected_character(la) # Could return [":", ":"], but that never parses +391 la = next(it, '') +392 return '::=', TokenType.DCOLONEQ, la +393 +394 +395def _string(la: str, it: Iterator) -> tuple[str, TokenType, str]: +396 # assert la == '"' +397 consumed: list[str] = [] +398 la = _consume_string(consumed, la, it) +399 return ''.join(consumed), TokenType.STRING, la +400 +401 +402def _regex_or_lower_id_or_keyword(la: str, it: Iterator) -> tuple[str, TokenType, str]: +403 # assert la == 'r' +404 consumed = [la] +405 la = next(it, '') +406 +407 if la == '"': +408 la = _consume_string(consumed, la, it) +409 return ''.join(consumed), TokenType.REGEX, la +410 +411 while la in _ALNUM: +412 consumed.append(la) +413 la = next(it, '') +414 text = ''.join(consumed) +415 if text in _DEFAULT_KEYWORDS: +416 return text, _KEYWORDS[text], la +417 return text, TokenType.ID_LOWER, la +418 +419 +420def _consume_string(consumed: list[str], la: str, it: Iterator[str]) -> str: +421 # assert la == '"' +422 consumed.append(la) # ['"'] +423 +424 la = next(it, '') +425 while la not in {'"', '\n', ''}: +426 consumed.append(la) # ['"', ..., X] +427 if la == '\\': +428 la = next(it, '') +429 if not la or la not in {'\\', '"', 'n', 'r', 't'}: +430 raise _unexpected_character(la) +431 consumed.append(la) # ['"', ..., '//', X] +432 la = next(it, '') +433 +434 if not la or la == '\n': +435 raise _unexpected_character(la) +436 +437 consumed.append(la) # ['"', ..., '"'] +438 la = next(it, '') +439 return la +440 +441 +442_SYNTAX_KEYWORDS: Final = { +443 'left', +444 'lexical', +445 'non-assoc', +446 'priorities', +447 'priority', +448 'right', +449} +450 +451 +452def _syntax(la: str, it: LocationIterator) -> tuple[Token, str]: +453 la = _skip_ws_and_comments(la, it) +454 +455 if not la: +456 return Token('', TokenType.EOF, it.loc), la +457 +458 elif la == '{': +459 token_func = _simple_char +460 +461 elif la in _LOWER: +462 token_func = _syntax_keyword +463 +464 elif la in _UPPER: +465 token_func = _upper_id +466 +467 elif la == '#': +468 token_func = _hash_upper_id +469 +470 else: +471 raise _unexpected_character(la) +472 +473 loc = it.loc +474 text, token_type, la = token_func(la, it) +475 return Token(text, token_type, loc), la +476 +477 +478def _syntax_keyword(la: str, it: Iterator[str]) -> tuple[str, TokenType, str]: +479 if la not in _LOWER: +480 raise _unexpected_character(la) +481 +482 consumed = [] +483 while la in _ALNUM or la == '-': +484 consumed.append(la) +485 la = next(it, '') +486 text = ''.join(consumed) +487 +488 if text not in _SYNTAX_KEYWORDS: +489 raise ValueError(f'Unexpected token: {text}') +490 +491 return text, _KEYWORDS[text], la +492 +493 +494def _upper_id(la: str, it: Iterator[str]) -> tuple[str, TokenType, str]: +495 if la not in _UPPER: +496 raise _unexpected_character(la) +497 +498 consumed = [] +499 while la in _ALNUM: +500 consumed.append(la) +501 la = next(it, '') +502 text = ''.join(consumed) +503 return text, TokenType.ID_UPPER, la +504 +505 +506def _hash_upper_id(la: str, it: Iterator[str]) -> tuple[str, TokenType, str]: +507 # assert la == '#' +508 +509 consumed = [la] +510 la = next(it, '') +511 +512 if la not in _UPPER: +513 raise _unexpected_character(la) +514 +515 while la in _ALNUM: +516 consumed.append(la) +517 la = next(it, '') +518 text = ''.join(consumed) +519 return text, TokenType.ID_UPPER, la +520 +521 +522_MODNAME_KEYWORDS: Final = {'private', 'public'} +523 +524 +525def _modname(la: str, it: LocationIterator) -> tuple[Token, str]: +526 r"""Match a module name. +527 +528 Corresponds to regex: [a-zA-Z]\w*(-\w+)* +529 """ +530 la = _skip_ws_and_comments(la, it) +531 +532 consumed = [] +533 loc = it.loc +534 +535 if la not in _ALPHA: +536 raise _unexpected_character(la) +537 +538 consumed.append(la) +539 la = next(it, '') +540 +541 while la in _WORD: +542 consumed.append(la) +543 la = next(it, '') +544 +545 while True: +546 if la != '-': +547 break +548 +549 consumed.append(la) +550 la = next(it, '') +551 +552 if la not in _WORD: +553 raise _unexpected_character(la) +554 +555 consumed.append(la) +556 la = next(it, '') +557 +558 while la in _WORD: +559 consumed.append(la) +560 la = next(it, '') +561 +562 text = ''.join(consumed) +563 if text in _MODNAME_KEYWORDS: +564 return Token(text, _KEYWORDS[text], loc), la +565 return Token(text, TokenType.MODNAME, loc), la +566 +567 +568_KLABEL_KEYWORDS: Final = {'syntax', 'endmodule', 'rule', 'claim', 'configuration', 'context'} +569 +570 +571def _klabel(la: str, it: LocationIterator) -> tuple[Token, str]: +572 loc: Loc +573 consumed: list[str] +574 while True: +575 while la in _WHITESPACE: +576 la = next(it, '') +577 +578 if not la: +579 return Token('', TokenType.EOF, it.loc), la +580 +581 if la == '/': +582 loc = it.loc +583 is_comment, consumed, la = _maybe_comment(la, it) +584 +585 if not is_comment and len(consumed) > 1: +586 # Differs from K Frontend +587 raise ValueError('Unterminated block comment') +588 +589 if is_comment and (not la or la in _WHITESPACE): +590 continue +591 +592 break +593 +594 loc = it.loc +595 consumed = [] +596 break +597 +598 if la == '>' and not consumed: +599 consumed.append(la) +600 la = next(it, '') +601 if not la or la in _WHITESPACE: +602 return Token('>', TokenType.GT, loc), la +603 +604 while la and la not in _WHITESPACE: +605 consumed.append(la) +606 la = next(it, '') +607 +608 text = ''.join(consumed) +609 if text in _KLABEL_KEYWORDS: +610 token_type = _KEYWORDS[text] +611 else: +612 token_type = TokenType.KLABEL +613 return Token(text, token_type, loc), la +614 +615 +616_SIMPLE_STATES: Final = { +617 State.DEFAULT: _default, +618 State.SYNTAX: _syntax, +619 State.MODNAME: _modname, +620 State.KLABEL: _klabel, +621} +622 +623 +624_BUBBLE_KEYWORDS: Final = {'syntax', 'endmodule', 'rule', 'claim', 'configuration', 'context'} +625_CONTEXT_KEYWORDS: Final = {'alias'}.union(_BUBBLE_KEYWORDS) +626 +627 +628def _bubble_or_context(la: str, it: LocationIterator, *, context: bool = False) -> tuple[list[Token], str]: +629 keywords = _CONTEXT_KEYWORDS if context else _BUBBLE_KEYWORDS +630 +631 tokens: list[Token] = [] +632 +633 bubble, final_token, la, bubble_loc = _raw_bubble(la, it, keywords) +634 if bubble is not None: +635 label_tokens, bubble, bubble_loc = _strip_bubble_label(bubble, bubble_loc) +636 bubble, attr_tokens = _strip_bubble_attr(bubble, bubble_loc) +637 +638 tokens = label_tokens +639 if bubble: +640 bubble_token = Token(bubble, TokenType.BUBBLE, bubble_loc) +641 tokens += [bubble_token] +642 tokens += attr_tokens +643 +644 tokens += [final_token] +645 return tokens, la +646 +647 +648def _raw_bubble(la: str, it: LocationIterator, keywords: Collection[str]) -> tuple[str | None, Token, str, Loc]: +649 bubble: list[str] = [] # text that belongs to the bubble +650 special: list[str] = [] # text that belongs to the bubble iff preceded and followed by bubble text +651 current: list[str] = [] # text that might belong to the bubble or terminate the bubble if keyword +652 bubble_loc: Loc = it.loc +653 current_loc: Loc = it.loc +654 while True: +655 if not la or la in _WHITESPACE: +656 if current: +657 current_str = ''.join(current) +658 if current_str in keywords: # <special><keyword><ws> +659 return ( +660 ''.join(bubble) if bubble else None, +661 Token(current_str, _KEYWORDS[current_str], current_loc), +662 la, +663 bubble_loc, +664 ) +665 else: # <special><current><ws> +666 bubble_loc += '' if bubble else ''.join(special) +667 bubble += special if bubble else [] +668 bubble += current +669 special = [] +670 current = [] +671 current_loc = it.loc +672 +673 else: # <special><ws> +674 pass +675 +676 while la in _WHITESPACE: +677 special.append(la) +678 la = next(it, '') +679 current_loc = it.loc +680 +681 if not la: +682 return ''.join(bubble) if bubble else None, Token('', TokenType.EOF, it.loc), la, bubble_loc +683 +684 elif la == '/': +685 is_comment, consumed, la = _maybe_comment(la, it) +686 if is_comment: +687 if current: +688 current_str = ''.join(current) +689 if current_str in keywords: # <special><keyword><comment> +690 # Differs from K Frontend behavior, see: https://github.com/runtimeverification/k/issues/3501 +691 return ( +692 ''.join(bubble) if bubble else None, +693 Token(current_str, _KEYWORDS[current_str], current_loc), +694 la, +695 bubble_loc, +696 ) +697 else: # <special><current><comment> +698 bubble_loc += '' if bubble else ''.join(special) +699 bubble += special if bubble else [] +700 bubble += current +701 special = consumed +702 current = [] +703 current_loc = it.loc +704 +705 else: # <special><comment> +706 special += consumed +707 +708 else: +709 if len(consumed) > 1: # Unterminated block comment +710 # Differs from K Frontend behavior +711 raise ValueError('Unterminated block comment') +712 current += consumed +713 +714 else: # <special><current> +715 while la and la not in _WHITESPACE and la != '/': +716 current.append(la) +717 la = next(it, '') +718 +719 +720RULE_LABEL_PATTERN: Final = re.compile( +721 r'(?s)\s*(?P<lbrack>\[)\s*(?P<label>[^\[\]\_\n\r\t ]+)\s*(?P<rbrack>\])\s*(?P<colon>:)\s*(?P<rest>.*)' +722) +723 +724 +725def _strip_bubble_label(bubble: str, loc: Loc) -> tuple[list[Token], str, Loc]: +726 match = RULE_LABEL_PATTERN.fullmatch(bubble) +727 if not match: +728 return [], bubble, loc +729 +730 lbrack_loc = loc + bubble[: match.start('lbrack')] +731 label_loc = lbrack_loc + bubble[match.start('lbrack') : match.start('label')] +732 rbrack_loc = label_loc + bubble[match.start('label') : match.start('rbrack')] +733 colon_loc = rbrack_loc + bubble[match.start('rbrack') : match.start('colon')] +734 return ( +735 [ +736 Token('[', TokenType.LBRACK, lbrack_loc), +737 Token(match['label'], TokenType.RULE_LABEL, label_loc), +738 Token(']', TokenType.RBRACK, rbrack_loc), +739 Token(':', TokenType.COLON, colon_loc), +740 ], +741 match['rest'], +742 colon_loc + bubble[match.start('colon') : match.start('rest')], +743 ) +744 +745 +746def _strip_bubble_attr(bubble: str, loc: Loc) -> tuple[str, list[Token]]: +747 for i in range(len(bubble) - 1, -1, -1): +748 if bubble[i] != '[': +749 continue +750 +751 prefix = bubble[:i] +752 suffix = bubble[i + 1 :] +753 start_loc = loc + prefix +754 +755 it = LocationIterator(suffix, *start_loc) +756 la = next(it, '') +757 +758 tokens = [Token('[', TokenType.LBRACK, start_loc)] +759 attr_tokens = _attr(la, it) +760 try: +761 while True: +762 tokens.append(next(attr_tokens)) +763 except ValueError: +764 continue +765 except StopIteration as err: +766 la = err.value +767 +768 if la: +769 continue +770 +771 return prefix.rstrip(' \t\n\r'), tokens +772 +773 return bubble, [] +774 +775 +776def _attr(la: str, it: LocationIterator) -> Generator[Token, None, str]: +777 la = _skip_ws_and_comments(la, it) +778 if not la: +779 raise _unexpected_character(la) +780 +781 while True: +782 key, la = _attr_key(la, it) +783 yield key +784 +785 la = _skip_ws_and_comments(la, it) +786 +787 if la == '(': # TAG_STATE +788 yield Token('(', TokenType.LPAREN, it.loc) +789 la = next(it, '') +790 loc = it.loc +791 +792 if la == '"': +793 text, token_type, la = _string(la, it) +794 yield Token(text, token_type, loc) +795 else: +796 content, la = _attr_content(la, it) +797 if content: +798 # allows 'key()' +799 yield Token(content, TokenType.ATTR_CONTENT, loc) +800 +801 if la != ')': +802 raise _unexpected_character(la) +803 +804 yield Token(')', TokenType.RPAREN, it.loc) +805 +806 la = next(it, '') +807 la = _skip_ws_and_comments(la, it) +808 +809 if la != ',': +810 break +811 +812 yield Token(',', TokenType.COMMA, it.loc) +813 la = next(it, '') +814 la = _skip_ws_and_comments(la, it) +815 +816 if la != ']': +817 raise _unexpected_character(la) +818 +819 yield Token(']', TokenType.RBRACK, it.loc) +820 la = next(it, '') +821 +822 return la # noqa: B901 +823 +824 +825def _attr_key(la: str, it: LocationIterator) -> tuple[Token, str]: +826 # ["a"-"z","1"-"9"](["A"-"Z", "a"-"z", "-", "0"-"9", "."])*("<" (["A"-"Z", "a"-"z", "-", "0"-"9"])+ ">")? +827 +828 consumed: list[str] = [] +829 loc = it.loc +830 if la not in _LOWER and la not in _DIGIT: +831 raise _unexpected_character(la) +832 +833 consumed.append(la) +834 la = next(it, '') +835 +836 while la in _ALNUM or la == '-' or la == '.': +837 consumed.append(la) +838 la = next(it, '') +839 +840 if la == '<': +841 consumed.append(la) +842 la = next(it, '') +843 +844 if not la in _ALNUM and la != '-' and la != '.': +845 raise _unexpected_character(la) +846 +847 consumed.append(la) +848 la = next(it, '') +849 +850 while la in _ALNUM or la == '-' or la == '.': +851 consumed.append(la) +852 la = next(it, '') +853 +854 if la != '>': +855 raise _unexpected_character(la) +856 +857 consumed.append(la) +858 la = next(it, '') +859 +860 attr_key = ''.join(consumed) +861 return Token(attr_key, TokenType.ATTR_KEY, loc), la +862 +863 +864_ATTR_CONTENT_FORBIDDEN: Final = {'', '\n', '\r', '"'} +865 +866 +867def _attr_content(la: str, it: Iterator[str]) -> tuple[str, str]: +868 consumed: list[str] = [] +869 open_parens = 0 +870 +871 while la not in _ATTR_CONTENT_FORBIDDEN: +872 if la == ')': +873 if not open_parens: +874 break +875 open_parens -= 1 +876 +877 elif la == '(': +878 open_parens += 1 +879 +880 consumed.append(la) +881 la = next(it, '') +882 +883 if la in _ATTR_CONTENT_FORBIDDEN: +884 raise _unexpected_character(la) +885 +886 # assert la == ')' +887 +888 attr_content = ''.join(consumed) +889 return attr_content, la +890 +891 +892def _maybe_comment(la: str, it: Iterator[str]) -> tuple[bool, list[str], str]: +893 """Attempt to consume a line or block comment from the iterator. +894 +895 Expects la to be ``'/'``. +896 +897 Args: +898 la: The current lookahead. +899 it: The iterator. +900 +901 Returns: +902 A tuple ``(success, consumed, la)`` where +903 +904 - ``success``: Indicates whether `consumed` is a comment. +905 - ``consumed``: The list of consumed characters. +906 - ``la``: The current lookahead. +907 """ +908 assert la == '/' +909 consumed = [la] # ['/'] +910 +911 la = next(it, '') +912 if la == '': +913 return False, consumed, la +914 +915 elif la == '/': +916 consumed.append(la) # ['/', '/'] +917 la = next(it, '') +918 while la and la != '\n': +919 consumed.append(la) # ['/', '/', ..., X] +920 la = next(it, '') +921 return True, consumed, la +922 +923 elif la == '*': +924 consumed.append(la) # ['/', '*'] +925 +926 la = next(it, '') +927 while True: +928 if la == '': +929 return False, consumed, la +930 +931 elif la == '*': +932 consumed.append(la) # ['/', '*', ..., '*'] +933 +934 la = next(it, '') +935 if la == '': +936 return False, consumed, la +937 elif la == '/': +938 consumed.append(la) # ['/', '*', ..., '*', '/'] +939 la = next(it, '') +940 return True, consumed, la +941 else: +942 consumed.append(la) # ['/', '*', ..., '*', X] +943 la = next(it, '') +944 continue +945 +946 else: +947 consumed.append(la) # ['/', '*', ..., X] +948 la = next(it, '') +949 continue +950 +951 else: +952 return False, consumed, la +953 +954 +955def _unexpected_character(la: str) -> ValueError: +956 if la: +957 return ValueError(f'Unexpected character: {la!r}') +958 +959 return ValueError('Unexpected end of file') +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kast/outer_parser.html b/pyk/_modules/pyk/kast/outer_parser.html new file mode 100644 index 00000000000..487295d05ef --- /dev/null +++ b/pyk/_modules/pyk/kast/outer_parser.html @@ -0,0 +1,535 @@ + + + + + + + + pyk.kast.outer_parser — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kast.outer_parser

+  1from __future__ import annotations
+  2
+  3from typing import TYPE_CHECKING
+  4
+  5from ..dequote import dequote_string
+  6from .outer_lexer import _EOF_TOKEN, TokenType, outer_lexer
+  7from .outer_syntax import (
+  8    EMPTY_ATT,
+  9    Alias,
+ 10    Assoc,
+ 11    Att,
+ 12    Claim,
+ 13    Config,
+ 14    Context,
+ 15    Definition,
+ 16    Import,
+ 17    Lexical,
+ 18    Module,
+ 19    NonTerminal,
+ 20    PriorityBlock,
+ 21    Production,
+ 22    Require,
+ 23    Rule,
+ 24    Sort,
+ 25    SortDecl,
+ 26    SyntaxAssoc,
+ 27    SyntaxDecl,
+ 28    SyntaxDefn,
+ 29    SyntaxLexical,
+ 30    SyntaxPriority,
+ 31    SyntaxSynonym,
+ 32    Terminal,
+ 33    UserList,
+ 34)
+ 35
+ 36if TYPE_CHECKING:
+ 37    from collections.abc import Collection, Iterable, Iterator
+ 38    from pathlib import Path
+ 39    from typing import Final
+ 40
+ 41    from .outer_lexer import Token
+ 42    from .outer_syntax import ProductionItem, ProductionLike, Sentence, StringSentence, SyntaxSentence
+ 43
+ 44
+ 45_STRING_SENTENCE: Final = {
+ 46    TokenType.KW_ALIAS.value: Alias,
+ 47    TokenType.KW_CLAIM.value: Claim,
+ 48    TokenType.KW_CONFIG.value: Config,
+ 49    TokenType.KW_CONTEXT.value: Context,
+ 50    TokenType.KW_RULE.value: Rule,
+ 51}
+ 52
+ 53_ASSOC_TOKENS: Final = (TokenType.KW_LEFT, TokenType.KW_RIGHT, TokenType.KW_NONASSOC)
+ 54_PRODUCTION_TOKENS: Final = (TokenType.ID_LOWER, TokenType.ID_UPPER, TokenType.STRING, TokenType.REGEX)
+ 55_PRODUCTION_ITEM_TOKENS: Final = (TokenType.STRING, TokenType.ID_LOWER, TokenType.ID_UPPER)
+ 56_ID_TOKENS: Final = (TokenType.ID_LOWER, TokenType.ID_UPPER)
+ 57_SIMPLE_BUBBLE_TOKENS: Final = (TokenType.KW_CLAIM, TokenType.KW_CONFIG, TokenType.KW_RULE)
+ 58_SORT_DECL_TOKENS: Final = (TokenType.LBRACE, TokenType.ID_UPPER)
+ 59_USER_LIST_IDS: Final = ('List', 'NeList')
+ 60
+ 61
+
+[docs] + 62class OuterParser: + 63 _lexer: Iterator[Token] + 64 _la: Token + 65 _la2: Token + 66 _source: Path | None + 67 + 68 def __init__(self, it: Iterable[str], source: Path | None = None): + 69 self._lexer = outer_lexer(it) + 70 self._la = next(self._lexer) + 71 self._la2 = next(self._lexer, _EOF_TOKEN) + 72 self._source = source + 73 + 74 def _consume(self) -> str: + 75 res = self._la.text + 76 self._la, self._la2 = self._la2, next(self._lexer, _EOF_TOKEN) + 77 return res + 78 + 79 def _error_location_string(self, t: Token) -> str: + 80 if not self._source: + 81 return '' + 82 return f'{self._source}:{t.loc.line}:{t.loc.col}: ' + 83 + 84 def _unexpected_token(self, token: Token, expected_types: Iterable[TokenType] = ()) -> ValueError: + 85 location = '' + 86 message = f'Unexpected token: {token.type.name}' + 87 if self._source: + 88 location = f'{self._source}:{token.loc.line}:{token.loc.col}: ' + 89 if expected_types: + 90 expected = ', '.join(sorted(token_type.name for token_type in expected_types)) + 91 message = f'Expected {expected}, got: {token.type.name}' + 92 return ValueError(f'{location}{message}') + 93 + 94 def _match(self, token_type: TokenType) -> str: + 95 if self._la.type != token_type: + 96 raise self._unexpected_token(self._la, (token_type,)) + 97 # _consume() inlined for efficiency + 98 res = self._la.text + 99 self._la, self._la2 = self._la2, next(self._lexer, _EOF_TOKEN) +100 return res +101 +102 def _match_any(self, token_types: Collection[TokenType]) -> str: +103 if self._la.type not in token_types: +104 raise self._unexpected_token(self._la, token_types) +105 # _consume() inlined for efficiency +106 res = self._la.text +107 self._la, self._la2 = self._la2, next(self._lexer, _EOF_TOKEN) +108 return res +109 +
+[docs] +110 def definition(self) -> Definition: +111 requires: list[Require] = [] +112 while self._la.type is TokenType.KW_REQUIRES: +113 requires.append(self.require()) +114 +115 modules: list[Module] = [] +116 while self._la.type is TokenType.KW_MODULE: +117 modules.append(self.module()) +118 +119 return Definition(modules, requires)
+ +120 +
+[docs] +121 def require(self) -> Require: +122 self._match(TokenType.KW_REQUIRES) +123 path = _dequote_string(self._match(TokenType.STRING)) +124 return Require(path)
+ +125 +
+[docs] +126 def module(self) -> Module: +127 begin_loc = self._la.loc +128 +129 self._match(TokenType.KW_MODULE) +130 +131 name = self._match(TokenType.MODNAME) +132 att = self._maybe_att() +133 +134 imports: list[Import] = [] +135 while self._la.type is TokenType.KW_IMPORTS: +136 imports.append(self.importt()) +137 +138 sentences: list[Sentence] = [] +139 while self._la.type is not TokenType.KW_ENDMODULE: +140 sentences.append(self.sentence()) +141 +142 end_loc = self._la.loc + self._la.text +143 self._consume() +144 +145 return Module(name, sentences, imports, att, source=self._source, location=(*begin_loc, *end_loc))
+ +146 +
+[docs] +147 def importt(self) -> Import: +148 self._match(TokenType.KW_IMPORTS) +149 +150 public = True +151 if self._la.type is TokenType.KW_PRIVATE: +152 public = False +153 self._consume() +154 elif self._la.type is TokenType.KW_PUBLIC: +155 self._consume() +156 +157 module_name = self._match(TokenType.MODNAME) +158 +159 return Import(module_name, public=public)
+ +160 +
+[docs] +161 def sentence(self) -> Sentence: +162 if self._la.type is TokenType.KW_SYNTAX: +163 return self.syntax_sentence() +164 +165 return self.string_sentence()
+ +166 +
+[docs] +167 def syntax_sentence(self) -> SyntaxSentence: +168 self._match(TokenType.KW_SYNTAX) +169 +170 if self._la.type in _SORT_DECL_TOKENS: +171 decl = self._sort_decl() +172 +173 if self._la.type is TokenType.EQ: +174 self._consume() +175 sort = self._sort() +176 att = self._maybe_att() +177 return SyntaxSynonym(decl, sort, att) +178 +179 if self._la.type is TokenType.DCOLONEQ: +180 self._consume() +181 blocks: list[PriorityBlock] = [] +182 blocks.append(self._priority_block()) +183 while self._la.type is TokenType.GT: +184 self._consume() +185 blocks.append(self._priority_block()) +186 return SyntaxDefn(decl, blocks) +187 +188 att = self._maybe_att() +189 return SyntaxDecl(decl, att) +190 +191 if self._la.type is TokenType.KW_PRIORITY: +192 self._consume() +193 groups: list[list[str]] = [] +194 group: list[str] = [] +195 group.append(self._match(TokenType.KLABEL)) +196 while self._la.type is TokenType.KLABEL: +197 group.append(self._consume()) +198 groups.append(group) +199 while self._la.type is TokenType.GT: +200 self._consume() +201 group = [] +202 group.append(self._match(TokenType.KLABEL)) +203 while self._la.type is TokenType.KLABEL: +204 group.append(self._consume()) +205 groups.append(group) +206 return SyntaxPriority(groups) +207 +208 if self._la.type in _ASSOC_TOKENS: +209 assoc = Assoc(self._consume()) +210 klabels: list[str] = [] +211 klabels.append(self._match(TokenType.KLABEL)) +212 while self._la.type is TokenType.KLABEL: +213 klabels.append(self._consume()) +214 return SyntaxAssoc(assoc, klabels) +215 +216 if self._la.type is TokenType.KW_LEXICAL: +217 self._consume() +218 name = self._match(TokenType.ID_UPPER) +219 self._match(TokenType.EQ) +220 regex = _dequote_regex(self._match(TokenType.REGEX)) +221 return SyntaxLexical(name, regex) +222 +223 raise self._unexpected_token(self._la)
+ +224 +225 def _sort_decl(self) -> SortDecl: +226 params: list[str] = [] +227 if self._la.type is TokenType.LBRACE: +228 self._consume() +229 params.append(self._match(TokenType.ID_UPPER)) +230 while self._la.type is TokenType.COMMA: +231 self._consume() +232 params.append(self._match(TokenType.ID_UPPER)) +233 self._match(TokenType.RBRACE) +234 +235 name = self._match(TokenType.ID_UPPER) +236 +237 args: list[str] = [] +238 if self._la.type is TokenType.LBRACE: +239 self._consume() +240 args.append(self._match(TokenType.ID_UPPER)) +241 while self._la.type is TokenType.COMMA: +242 self._consume() +243 args.append(self._match(TokenType.ID_UPPER)) +244 self._match(TokenType.RBRACE) +245 +246 return SortDecl(name, params, args) +247 +248 def _sort(self) -> Sort: +249 name = self._match(TokenType.ID_UPPER) +250 +251 args: list[int | str] = [] +252 if self._la.type is TokenType.LBRACE: +253 self._consume() +254 if self._la.type is TokenType.NAT: +255 args.append(int(self._consume())) +256 else: +257 args.append(self._match(TokenType.ID_UPPER)) +258 +259 while self._la.type is TokenType.COMMA: +260 self._consume() +261 if self._la.type is TokenType.NAT: +262 args.append(int(self._consume())) +263 else: +264 args.append(self._match(TokenType.ID_UPPER)) +265 +266 self._match(TokenType.RBRACE) +267 +268 return Sort(name, args) +269 +270 def _priority_block(self) -> PriorityBlock: +271 assoc: Assoc | None +272 if self._la.type in _ASSOC_TOKENS: +273 assoc = Assoc(self._consume()) +274 self._match(TokenType.COLON) +275 else: +276 assoc = None +277 +278 productions: list[ProductionLike] = [] +279 productions.append(self._production_like()) +280 while self._la.type is TokenType.VBAR: +281 self._consume() +282 productions.append(self._production_like()) +283 return PriorityBlock(productions, assoc) +284 +285 def _production_like(self) -> ProductionLike: +286 if ( +287 self._la2.type is TokenType.LBRACE +288 and self._la.type is TokenType.ID_UPPER +289 and self._la.text in _USER_LIST_IDS +290 ): +291 non_empty = self._la.text[0] == 'N' +292 self._consume() +293 self._consume() +294 sort = self._match(TokenType.ID_UPPER) +295 self._match(TokenType.COMMA) +296 sep = _dequote_string(self._match(TokenType.STRING)) +297 self._match(TokenType.RBRACE) +298 att = self._maybe_att() +299 return UserList(sort, sep, non_empty, att) +300 +301 items: list[ProductionItem] = [] +302 +303 if self._la2.type is TokenType.LPAREN: +304 items.append(Terminal(self._match_any(_ID_TOKENS))) +305 items.append(Terminal(self._consume())) +306 while self._la.type is not TokenType.RPAREN: +307 items.append(self._non_terminal()) +308 if self._la.type is TokenType.COMMA: +309 items.append(Terminal(self._consume())) +310 continue +311 break +312 items.append(Terminal(self._match(TokenType.RPAREN))) +313 +314 else: +315 items.append(self._production_item()) +316 while self._la.type in _PRODUCTION_ITEM_TOKENS: +317 items.append(self._production_item()) +318 +319 att = self._maybe_att() +320 return Production(items, att) +321 +322 def _production_item(self) -> ProductionItem: +323 if self._la.type is TokenType.STRING: +324 return Terminal(_dequote_string(self._consume())) +325 +326 if self._la.type is TokenType.REGEX: +327 return Lexical(_dequote_regex(self._consume())) +328 +329 return self._non_terminal() +330 +331 def _non_terminal(self) -> NonTerminal: +332 name: str +333 if self._la.type is TokenType.ID_LOWER or self._la2.type is TokenType.COLON: +334 name = self._match_any(_ID_TOKENS) +335 self._match(TokenType.COLON) +336 else: +337 name = '' +338 +339 sort = self._sort() +340 return NonTerminal(sort, name) +341 +
+[docs] +342 def string_sentence(self) -> StringSentence: +343 cls_key = self._la.type.value +344 +345 if self._la.type is TokenType.KW_CONTEXT: +346 self._consume() +347 if self._la.type is TokenType.KW_ALIAS: +348 cls_key = self._la.type.value +349 self._consume() +350 else: +351 self._match_any(_SIMPLE_BUBBLE_TOKENS) +352 +353 cls = _STRING_SENTENCE[cls_key] +354 +355 label: str +356 if self._la.type == TokenType.LBRACK: +357 self._consume() +358 label = self._match(TokenType.RULE_LABEL) +359 self._match(TokenType.RBRACK) +360 self._match(TokenType.COLON) +361 else: +362 label = '' +363 +364 bubble = self._match(TokenType.BUBBLE) +365 att = self._maybe_att() +366 return cls(bubble, label, att)
+ +367 +368 def _maybe_att(self) -> Att: +369 items: list[tuple[str, str]] = [] +370 +371 if self._la.type is not TokenType.LBRACK: +372 return EMPTY_ATT +373 +374 self._consume() +375 +376 while True: +377 key = self._match(TokenType.ATTR_KEY) +378 +379 value: str +380 if self._la.type == TokenType.LPAREN: +381 self._consume() +382 match self._la.type: +383 case TokenType.ATTR_CONTENT: +384 value = self._consume() +385 case TokenType.STRING: +386 value = _dequote_string(self._consume()) +387 case _: +388 value = '' +389 self._match(TokenType.RPAREN) +390 else: +391 value = '' +392 +393 items.append((key, value)) +394 +395 if self._la.type != TokenType.COMMA: +396 break +397 else: +398 self._consume() +399 +400 self._match(TokenType.RBRACK) +401 +402 return Att(items)
+ +403 +404 +405def _dequote_string(s: str) -> str: +406 return dequote_string(s[1:-1]) +407 +408 +409def _dequote_regex(s: str) -> str: +410 return dequote_string(s[2:-1]) +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kast/outer_syntax.html b/pyk/_modules/pyk/kast/outer_syntax.html new file mode 100644 index 00000000000..7d988a430e6 --- /dev/null +++ b/pyk/_modules/pyk/kast/outer_syntax.html @@ -0,0 +1,489 @@ + + + + + + + + pyk.kast.outer_syntax — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kast.outer_syntax

+  1from __future__ import annotations
+  2
+  3from abc import ABC
+  4from collections.abc import Sequence
+  5from dataclasses import dataclass, field
+  6from enum import Enum
+  7from typing import TYPE_CHECKING, final, overload
+  8
+  9if TYPE_CHECKING:
+ 10    from collections.abc import Iterable
+ 11    from pathlib import Path
+ 12    from typing import Any, Final
+ 13
+ 14
+
+[docs] + 15@dataclass(frozen=True) + 16class AST(ABC): + 17 source: Path | None = field(default=None, kw_only=True) + 18 location: tuple[int, int, int, int] | None = field(default=None, kw_only=True)
+ + 19 + 20 +
+[docs] + 21@final + 22@dataclass(frozen=True) + 23class Att(AST, Sequence[tuple[str, str]]): + 24 items: tuple[tuple[str, str], ...] + 25 + 26 def __init__(self, items: Iterable[tuple[str, str]] = ()): + 27 object.__setattr__(self, 'items', tuple(items)) + 28 + 29 @overload + 30 def __getitem__(self, key: int) -> tuple[str, str]: ... + 31 + 32 @overload + 33 def __getitem__(self, key: slice) -> tuple[tuple[str, str], ...]: ... + 34 + 35 def __getitem__(self, key: Any) -> Any: + 36 return self.items[key] + 37 + 38 def __len__(self) -> int: + 39 return len(self.items)
+ + 40 + 41 + 42EMPTY_ATT: Final = Att() + 43 + 44 +
+[docs] + 45class Sentence(AST, ABC): ...
+ + 46 + 47 +
+[docs] + 48class SyntaxSentence(Sentence, ABC): ...
+ + 49 + 50 +
+[docs] + 51class Assoc(Enum): + 52 LEFT = 'left' + 53 RIGHT = 'right' + 54 NON_ASSOC = 'non-assoc'
+ + 55 + 56 +
+[docs] + 57@final + 58@dataclass(frozen=True) + 59class SortDecl(AST): + 60 name: str + 61 params: tuple[str, ...] + 62 args: tuple[str, ...] + 63 + 64 def __init__(self, name: str, params: Iterable[str] = (), args: Iterable[str] = ()): + 65 object.__setattr__(self, 'name', name) + 66 object.__setattr__(self, 'params', tuple(params)) + 67 object.__setattr__(self, 'args', tuple(args))
+ + 68 + 69 +
+[docs] + 70@final + 71@dataclass(frozen=True) + 72class Sort(AST): + 73 name: str + 74 args: tuple[int | str, ...] + 75 + 76 def __init__(self, name: str, args: Iterable[int | str] = ()): + 77 object.__setattr__(self, 'name', name) + 78 object.__setattr__(self, 'args', tuple(args))
+ + 79 + 80 +
+[docs] + 81@final + 82@dataclass(frozen=True) + 83class SyntaxDecl(SyntaxSentence): + 84 decl: SortDecl + 85 att: Att = field(default=EMPTY_ATT)
+ + 86 + 87 +
+[docs] + 88@final + 89@dataclass(frozen=True) + 90class SyntaxDefn(SyntaxSentence): + 91 decl: SortDecl + 92 blocks: tuple[PriorityBlock, ...] + 93 + 94 def __init__(self, decl: SortDecl, blocks: Iterable[PriorityBlock] = ()): + 95 object.__setattr__(self, 'decl', decl) + 96 object.__setattr__(self, 'blocks', tuple(blocks))
+ + 97 + 98 +
+[docs] + 99@final +100@dataclass(frozen=True) +101class PriorityBlock(AST): +102 productions: tuple[ProductionLike, ...] +103 assoc: Assoc | None +104 +105 def __init__(self, productions: Iterable[ProductionLike], assoc: Assoc | None = None): +106 object.__setattr__(self, 'productions', tuple(productions)) +107 object.__setattr__(self, 'assoc', assoc)
+ +108 +109 +
+[docs] +110class ProductionLike(AST, ABC): +111 att: Att
+ +112 +113 +
+[docs] +114@final +115@dataclass(frozen=True) +116class Production(ProductionLike): +117 items: tuple[ProductionItem, ...] +118 att: Att = field(default=EMPTY_ATT) +119 +120 def __init__(self, items: Iterable[ProductionItem], att: Att = EMPTY_ATT): +121 object.__setattr__(self, 'items', tuple(items)) +122 object.__setattr__(self, 'att', att)
+ +123 +124 +
+[docs] +125class ProductionItem(AST, ABC): ...
+ +126 +127 +
+[docs] +128@final +129@dataclass(frozen=True) +130class Terminal(ProductionItem): +131 value: str
+ +132 +133 +
+[docs] +134@final +135@dataclass(frozen=True) +136class NonTerminal(ProductionItem): +137 sort: Sort +138 name: str = field(default='')
+ +139 +140 +
+[docs] +141@final +142@dataclass(frozen=True) +143class Lexical(ProductionItem): +144 regex: str
+ +145 +146 +
+[docs] +147@final +148@dataclass(frozen=True) +149class UserList(ProductionLike): +150 sort: str +151 sep: str +152 non_empty: bool = field(default=False) +153 att: Att = field(default=EMPTY_ATT)
+ +154 +155 +
+[docs] +156@final +157@dataclass(frozen=True) +158class SyntaxSynonym(SyntaxSentence): +159 new: SortDecl +160 old: Sort +161 att: Att = field(default=EMPTY_ATT)
+ +162 +163 +
+[docs] +164@final +165@dataclass(frozen=True) +166class SyntaxPriority(SyntaxSentence): +167 groups: tuple[tuple[str, ...], ...] +168 +169 def __init__(self, groups: Iterable[Iterable[str]]): +170 object.__setattr__(self, 'groups', tuple(tuple(group) for group in groups))
+ +171 +172 +
+[docs] +173@final +174@dataclass(frozen=True) +175class SyntaxAssoc(SyntaxSentence): +176 assoc: Assoc +177 klabels: tuple[str, ...] +178 +179 def __init__(self, assoc: Assoc, klabels: Iterable[str]): +180 object.__setattr__(self, 'assoc', assoc) +181 object.__setattr__(self, 'klabels', tuple(klabels))
+ +182 +183 +
+[docs] +184@final +185@dataclass(frozen=True) +186class SyntaxLexical(SyntaxSentence): +187 name: str +188 regex: str
+ +189 +190 +
+[docs] +191class StringSentence(Sentence, ABC): +192 _prefix: str +193 +194 bubble: str +195 label: str +196 att: Att
+ +197 +198 +
+[docs] +199@final +200@dataclass(frozen=True) +201class Rule(StringSentence): +202 _prefix = 'rule' +203 +204 bubble: str +205 label: str = field(default='') +206 att: Att = field(default=EMPTY_ATT)
+ +207 +208 +
+[docs] +209@final +210@dataclass(frozen=True) +211class Claim(StringSentence): +212 _prefix = 'claim' +213 +214 bubble: str +215 label: str = field(default='') +216 att: Att = field(default=EMPTY_ATT)
+ +217 +218 +
+[docs] +219@final +220@dataclass(frozen=True) +221class Config(StringSentence): +222 _prefix = 'configuration' +223 +224 bubble: str +225 label: str = field(default='') +226 att: Att = field(default=EMPTY_ATT)
+ +227 +228 +
+[docs] +229@final +230@dataclass(frozen=True) +231class Context(StringSentence): +232 _prefix = 'context' +233 +234 bubble: str +235 label: str = field(default='') +236 att: Att = field(default=EMPTY_ATT)
+ +237 +238 +
+[docs] +239@final +240@dataclass(frozen=True) +241class Alias(StringSentence): +242 _prefix = 'context alias' +243 +244 bubble: str +245 label: str = field(default='') +246 att: Att = field(default=EMPTY_ATT)
+ +247 +248 +
+[docs] +249@final +250@dataclass(frozen=True) +251class Import(AST): +252 module_name: str +253 public: bool = field(default=True, kw_only=True)
+ +254 +255 +
+[docs] +256@final +257@dataclass(frozen=True) +258class Module(AST): +259 name: str +260 sentences: tuple[Sentence, ...] +261 imports: tuple[Import, ...] +262 att: Att +263 +264 def __init__( +265 self, +266 name: str, +267 sentences: Iterable[Sentence] = (), +268 imports: Iterable[Import] = (), +269 att: Att = EMPTY_ATT, +270 source: Path | None = None, +271 location: tuple[int, int, int, int] | None = None, +272 ): +273 object.__setattr__(self, 'name', name) +274 object.__setattr__(self, 'sentences', tuple(sentences)) +275 object.__setattr__(self, 'imports', tuple(imports)) +276 object.__setattr__(self, 'att', att) +277 object.__setattr__(self, 'source', source) +278 object.__setattr__(self, 'location', location)
+ +279 +280 +
+[docs] +281@final +282@dataclass(frozen=True) +283class Require(AST): +284 path: str
+ +285 +286 +
+[docs] +287@final +288@dataclass(frozen=True) +289class Definition(AST): +290 modules: tuple[Module, ...] +291 requires: tuple[Require, ...] +292 +293 def __init__(self, modules: Iterable[Module] = (), requires: Iterable[Require] = ()): +294 object.__setattr__(self, 'modules', tuple(modules)) +295 object.__setattr__(self, 'requires', tuple(requires))
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kast/parser.html b/pyk/_modules/pyk/kast/parser.html new file mode 100644 index 00000000000..4089f50d663 --- /dev/null +++ b/pyk/_modules/pyk/kast/parser.html @@ -0,0 +1,245 @@ + + + + + + + + pyk.kast.parser — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kast.parser

+  1from __future__ import annotations
+  2
+  3import re
+  4from typing import TYPE_CHECKING
+  5
+  6from .inner import KApply, KLabel, KSequence, KToken, KVariable
+  7from .lexer import TokenType, lexer
+  8
+  9if TYPE_CHECKING:
+ 10    from collections.abc import Iterable, Iterator
+ 11    from typing import Final
+ 12
+ 13    from . import KInner
+ 14    from .lexer import Token
+ 15
+ 16
+ 17TT = TokenType
+ 18
+ 19
+
+[docs] + 20class KAstParser: + 21 _it: Iterator[Token] + 22 _la: Token + 23 + 24 def __init__(self, it: Iterable[str]): + 25 self._it = lexer(it) + 26 self._la = next(self._it) + 27 + 28 def _consume(self) -> str: + 29 text = self._la.text + 30 self._la = next(self._it) + 31 return text + 32 + 33 def _match(self, expected: TokenType) -> str: + 34 if self._la.type is not expected: + 35 raise self._unexpected_token(self._la, [expected]) + 36 text = self._la.text + 37 self._la = next(self._it) + 38 return text + 39 + 40 @staticmethod + 41 def _unexpected_token(token: Token, expected: Iterable[TokenType] = ()) -> ValueError: + 42 types = sorted(expected, key=lambda typ: typ.name) + 43 + 44 if not types: + 45 return ValueError(f'Unexpected token: {token.text!r}') + 46 + 47 if len(types) == 1: + 48 typ = types[0] + 49 return ValueError(f'Unexpected token: {token.text!r}. Expected: {typ.name}') + 50 + 51 type_str = ', '.join(typ.name for typ in types) + 52 return ValueError(f'Unexpected token: {token.text!r}. Expected one of: {type_str}') + 53 +
+[docs] + 54 def eof(self) -> bool: + 55 return self._la.type is TT.EOF
+ + 56 +
+[docs] + 57 def k(self) -> KInner: + 58 if self._la.type is TT.DOTK: + 59 self._consume() + 60 return KSequence() + 61 + 62 items = [self.kitem()] + 63 while self._la.type is TT.KSEQ: + 64 self._consume() + 65 items.append(self.kitem()) + 66 + 67 if len(items) > 1: + 68 return KSequence(items) + 69 + 70 return items[0]
+ + 71 +
+[docs] + 72 def kitem(self) -> KInner: + 73 match self._la.type: + 74 case TT.VARIABLE: + 75 name = self._consume() + 76 sort: str | None = None + 77 if self._la.type is TT.COLON: + 78 self._consume() + 79 sort = self._match(TT.SORT) + 80 return KVariable(name, sort) + 81 + 82 case TT.TOKEN: + 83 self._consume() + 84 self._match(TT.LPAREN) + 85 token = _unquote(self._match(TT.STRING)) + 86 self._match(TT.COMMA) + 87 sort = _unquote(self._match(TT.STRING)) + 88 self._match(TT.RPAREN) + 89 return KToken(token, sort) + 90 + 91 case TT.ID | TT.KLABEL: + 92 label = self.klabel() + 93 self._match(TT.LPAREN) + 94 args = self.klist() + 95 self._match(TT.RPAREN) + 96 return KApply(label, args) + 97 + 98 case _: + 99 raise self._unexpected_token(self._la, [TT.VARIABLE, TT.TOKEN, TT.ID, TT.KLABEL])
+ +100 +
+[docs] +101 def klabel(self) -> KLabel: +102 match self._la.type: +103 case TT.ID: +104 return KLabel(self._consume()) +105 case TT.KLABEL: +106 return KLabel(_unquote(self._consume())) +107 case _: +108 raise self._unexpected_token(self._la, [TT.ID, TT.KLABEL])
+ +109 +
+[docs] +110 def klist(self) -> list[KInner]: +111 if self._la.type is TT.DOTKLIST: +112 self._consume() +113 return [] +114 +115 res = [self.k()] +116 while self._la.type is TT.COMMA: +117 self._consume() +118 res.append(self.k()) +119 return res
+
+ +120 +121 +122_UNQUOTE_PATTERN: Final = re.compile(r'\\.') +123 +124 +125def _unquote(s: str) -> str: +126 return _UNQUOTE_PATTERN.sub(lambda m: m.group(0)[1], s[1:-1]) +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kast/pretty.html b/pyk/_modules/pyk/kast/pretty.html new file mode 100644 index 00000000000..9c9be45376f --- /dev/null +++ b/pyk/_modules/pyk/kast/pretty.html @@ -0,0 +1,544 @@ + + + + + + + + pyk.kast.pretty — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kast.pretty

+  1from __future__ import annotations
+  2
+  3import logging
+  4from collections.abc import Callable
+  5from functools import cached_property
+  6from typing import TYPE_CHECKING
+  7
+  8from ..prelude.kbool import TRUE
+  9from .att import Atts, KAtt
+ 10from .inner import KApply, KAs, KInner, KLabel, KRewrite, KSequence, KSort, KToken, KVariable
+ 11from .manip import flatten_label, sort_ac_collections, undo_aliases
+ 12from .outer import (
+ 13    KBubble,
+ 14    KClaim,
+ 15    KContext,
+ 16    KDefinition,
+ 17    KFlatModule,
+ 18    KImport,
+ 19    KNonTerminal,
+ 20    KOuter,
+ 21    KProduction,
+ 22    KRegexTerminal,
+ 23    KRequire,
+ 24    KRule,
+ 25    KRuleLike,
+ 26    KSortSynonym,
+ 27    KSyntaxAssociativity,
+ 28    KSyntaxLexical,
+ 29    KSyntaxPriority,
+ 30    KSyntaxSort,
+ 31    KTerminal,
+ 32)
+ 33
+ 34if TYPE_CHECKING:
+ 35    from collections.abc import Iterable
+ 36    from typing import Any, Final, TypeVar
+ 37
+ 38    from .kast import KAst
+ 39
+ 40    RL = TypeVar('RL', bound='KRuleLike')
+ 41
+ 42_LOGGER: Final = logging.getLogger(__name__)
+ 43
+ 44SymbolTable = dict[str, Callable[..., str]]
+ 45
+ 46
+
+[docs] + 47class PrettyPrinter: + 48 definition: KDefinition + 49 _extra_unparsing_modules: Iterable[KFlatModule] + 50 _patch_symbol_table: Callable[[SymbolTable], None] | None + 51 _unalias: bool + 52 _sort_collections: bool + 53 + 54 def __init__( + 55 self, + 56 definition: KDefinition, + 57 extra_unparsing_modules: Iterable[KFlatModule] = (), + 58 patch_symbol_table: Callable[[SymbolTable], None] | None = None, + 59 unalias: bool = True, + 60 sort_collections: bool = False, + 61 ): + 62 self.definition = definition + 63 self._extra_unparsing_modules = extra_unparsing_modules + 64 self._patch_symbol_table = patch_symbol_table + 65 self._unalias = unalias + 66 self._sort_collections = sort_collections + 67 + 68 @cached_property + 69 def symbol_table(self) -> SymbolTable: + 70 symb_table = build_symbol_table( + 71 self.definition, + 72 extra_modules=self._extra_unparsing_modules, + 73 opinionated=True, + 74 ) + 75 if self._patch_symbol_table is not None: + 76 self._patch_symbol_table(symb_table) + 77 return symb_table + 78 +
+[docs] + 79 def print(self, kast: KAst) -> str: + 80 """Print out KAST terms/outer syntax. + 81 + 82 Args: + 83 kast: KAST term to print. + 84 + 85 Returns: + 86 Best-effort string representation of KAST term. + 87 """ + 88 _LOGGER.debug(f'Unparsing: {kast}') + 89 if type(kast) is KAtt: + 90 return self._print_katt(kast) + 91 if type(kast) is KSort: + 92 return self._print_ksort(kast) + 93 if type(kast) is KLabel: + 94 return self._print_klabel(kast) + 95 elif isinstance(kast, KOuter): + 96 return self._print_kouter(kast) + 97 elif isinstance(kast, KInner): + 98 if self._unalias: + 99 kast = undo_aliases(self.definition, kast) +100 if self._sort_collections: +101 kast = sort_ac_collections(kast) +102 return self._print_kinner(kast) +103 raise AssertionError(f'Error unparsing: {kast}')
+ +104 +105 def _print_kouter(self, kast: KOuter) -> str: +106 match kast: +107 case KTerminal(): +108 return self._print_kterminal(kast) +109 case KRegexTerminal(): +110 return self._print_kregexterminal(kast) +111 case KNonTerminal(): +112 return self._print_knonterminal(kast) +113 case KProduction(): +114 return self._print_kproduction(kast) +115 case KSyntaxSort(): +116 return self._print_ksyntaxsort(kast) +117 case KSortSynonym(): +118 return self._print_ksortsynonym(kast) +119 case KSyntaxLexical(): +120 return self._print_ksyntaxlexical(kast) +121 case KSyntaxAssociativity(): +122 return self._print_ksyntaxassociativity(kast) +123 case KSyntaxPriority(): +124 return self._print_ksyntaxpriority(kast) +125 case KBubble(): +126 return self._print_kbubble(kast) +127 case KRule(): +128 return self._print_krule(kast) +129 case KClaim(): +130 return self._print_kclaim(kast) +131 case KContext(): +132 return self._print_kcontext(kast) +133 case KImport(): +134 return self._print_kimport(kast) +135 case KFlatModule(): +136 return self._print_kflatmodule(kast) +137 case KRequire(): +138 return self._print_krequire(kast) +139 case KDefinition(): +140 return self._print_kdefinition(kast) +141 case _: +142 raise AssertionError(f'Error unparsing: {kast}') +143 +144 def _print_kinner(self, kast: KInner) -> str: +145 match kast: +146 case KVariable(): +147 return self._print_kvariable(kast) +148 case KToken(): +149 return self._print_ktoken(kast) +150 case KApply(): +151 return self._print_kapply(kast) +152 case KAs(): +153 return self._print_kas(kast) +154 case KRewrite(): +155 return self._print_krewrite(kast) +156 case KSequence(): +157 return self._print_ksequence(kast) +158 case _: +159 raise AssertionError(f'Error unparsing: {kast}') +160 +161 def _print_ksort(self, ksort: KSort) -> str: +162 return ksort.name +163 +164 def _print_klabel(self, klabel: KLabel) -> str: +165 return klabel.name +166 +167 def _print_kvariable(self, kvariable: KVariable) -> str: +168 sort = kvariable.sort +169 if not sort: +170 return kvariable.name +171 return kvariable.name + ':' + sort.name +172 +173 def _print_ktoken(self, ktoken: KToken) -> str: +174 return ktoken.token +175 +176 def _print_kapply(self, kapply: KApply) -> str: +177 label = kapply.label.name +178 args = kapply.args +179 unparsed_args = [self._print_kinner(arg) for arg in args] +180 if kapply.is_cell: +181 cell_contents = '\n'.join(unparsed_args).rstrip() +182 cell_str = label + '\n' + indent(cell_contents) + '\n</' + label[1:] +183 return cell_str.rstrip() +184 unparser = self._applied_label_str(label) if label not in self.symbol_table else self.symbol_table[label] +185 return unparser(*unparsed_args) +186 +187 def _print_kas(self, kas: KAs) -> str: +188 pattern_str = self._print_kinner(kas.pattern) +189 alias_str = self._print_kinner(kas.alias) +190 return pattern_str + ' #as ' + alias_str +191 +192 def _print_krewrite(self, krewrite: KRewrite) -> str: +193 lhs_str = self._print_kinner(krewrite.lhs) +194 rhs_str = self._print_kinner(krewrite.rhs) +195 return '( ' + lhs_str + ' => ' + rhs_str + ' )' +196 +197 def _print_ksequence(self, ksequence: KSequence) -> str: +198 if ksequence.arity == 0: +199 # TODO: Would be nice to say `return self._print_kinner(EMPTY_K)` +200 return '.K' +201 if ksequence.arity == 1: +202 return self._print_kinner(ksequence.items[0]) + ' ~> .K' +203 unparsed_k_seq = '\n~> '.join([self._print_kinner(item) for item in ksequence.items[0:-1]]) +204 if ksequence.items[-1] == KToken('...', KSort('K')): +205 unparsed_k_seq = unparsed_k_seq + '\n' + self._print_kinner(KToken('...', KSort('K'))) +206 else: +207 unparsed_k_seq = unparsed_k_seq + '\n~> ' + self._print_kinner(ksequence.items[-1]) +208 return unparsed_k_seq +209 +210 def _print_kterminal(self, kterminal: KTerminal) -> str: +211 return '"' + kterminal.value + '"' +212 +213 def _print_kregexterminal(self, kregexterminal: KRegexTerminal) -> str: +214 return 'r"' + kregexterminal.regex + '"' +215 +216 def _print_knonterminal(self, knonterminal: KNonTerminal) -> str: +217 return self.print(knonterminal.sort) +218 +219 def _print_kproduction(self, kproduction: KProduction) -> str: +220 syntax_str = 'syntax ' + self.print(kproduction.sort) +221 if kproduction.items: +222 syntax_str += ' ::= ' + ' '.join([self._print_kouter(pi) for pi in kproduction.items]) +223 att_str = self.print(kproduction.att) +224 if att_str: +225 syntax_str += ' ' + att_str +226 return syntax_str +227 +228 def _print_ksyntaxsort(self, ksyntaxsort: KSyntaxSort) -> str: +229 sort_str = self.print(ksyntaxsort.sort) +230 att_str = self.print(ksyntaxsort.att) +231 return 'syntax ' + sort_str + ' ' + att_str +232 +233 def _print_ksortsynonym(self, ksortsynonym: KSortSynonym) -> str: +234 new_sort_str = self.print(ksortsynonym.new_sort) +235 old_sort_str = self.print(ksortsynonym.old_sort) +236 att_str = self.print(ksortsynonym.att) +237 return 'syntax ' + new_sort_str + ' = ' + old_sort_str + ' ' + att_str +238 +239 def _print_ksyntaxlexical(self, ksyntaxlexical: KSyntaxLexical) -> str: +240 name_str = ksyntaxlexical.name +241 regex_str = ksyntaxlexical.regex +242 att_str = self.print(ksyntaxlexical.att) +243 # todo: proper escaping +244 return 'syntax lexical ' + name_str + ' = r"' + regex_str + '" ' + att_str +245 +246 def _print_ksyntaxassociativity(self, ksyntaxassociativity: KSyntaxAssociativity) -> str: +247 assoc_str = ksyntaxassociativity.assoc.value +248 tags_str = ' '.join(ksyntaxassociativity.tags) +249 att_str = self.print(ksyntaxassociativity.att) +250 return 'syntax associativity ' + assoc_str + ' ' + tags_str + ' ' + att_str +251 +252 def _print_ksyntaxpriority(self, ksyntaxpriority: KSyntaxPriority) -> str: +253 priorities_str = ' > '.join([' '.join(group) for group in ksyntaxpriority.priorities]) +254 att_str = self.print(ksyntaxpriority.att) +255 return 'syntax priority ' + priorities_str + ' ' + att_str +256 +257 def _print_kbubble(self, kbubble: KBubble) -> str: +258 body = '// KBubble(' + kbubble.sentence_type + ', ' + kbubble.contents + ')' +259 att_str = self.print(kbubble.att) +260 return body + ' ' + att_str +261 +262 def _print_krule(self, kterm: KRule) -> str: +263 body = '\n '.join(self.print(kterm.body).split('\n')) +264 rule_str = 'rule ' +265 if Atts.LABEL in kterm.att: +266 rule_str = rule_str + '[' + kterm.att[Atts.LABEL] + ']:' +267 rule_str = rule_str + ' ' + body +268 atts_str = self.print(kterm.att) +269 if kterm.requires != TRUE: +270 requires_str = 'requires ' + '\n '.join(self._print_kast_bool(kterm.requires).split('\n')) +271 rule_str = rule_str + '\n ' + requires_str +272 if kterm.ensures != TRUE: +273 ensures_str = 'ensures ' + '\n '.join(self._print_kast_bool(kterm.ensures).split('\n')) +274 rule_str = rule_str + '\n ' + ensures_str +275 return rule_str + '\n ' + atts_str +276 +277 def _print_kclaim(self, kterm: KClaim) -> str: +278 body = '\n '.join(self.print(kterm.body).split('\n')) +279 rule_str = 'claim ' +280 if Atts.LABEL in kterm.att: +281 rule_str = rule_str + '[' + kterm.att[Atts.LABEL] + ']:' +282 rule_str = rule_str + ' ' + body +283 atts_str = self.print(kterm.att) +284 if kterm.requires != TRUE: +285 requires_str = 'requires ' + '\n '.join(self._print_kast_bool(kterm.requires).split('\n')) +286 rule_str = rule_str + '\n ' + requires_str +287 if kterm.ensures != TRUE: +288 ensures_str = 'ensures ' + '\n '.join(self._print_kast_bool(kterm.ensures).split('\n')) +289 rule_str = rule_str + '\n ' + ensures_str +290 return rule_str + '\n ' + atts_str +291 +292 def _print_kcontext(self, kcontext: KContext) -> str: +293 body = indent(self.print(kcontext.body)) +294 context_str = 'context alias ' + body +295 requires_str = '' +296 atts_str = self.print(kcontext.att) +297 if kcontext.requires != TRUE: +298 requires_str = self.print(kcontext.requires) +299 requires_str = 'requires ' + indent(requires_str) +300 return context_str + '\n ' + requires_str + '\n ' + atts_str +301 +302 def _print_katt(self, katt: KAtt) -> str: +303 return katt.pretty +304 +305 def _print_kimport(self, kimport: KImport) -> str: +306 return ' '.join(['imports', ('public' if kimport.public else 'private'), kimport.name]) +307 +308 def _print_kflatmodule(self, kflatmodule: KFlatModule) -> str: +309 name = kflatmodule.name +310 imports = '\n'.join([self._print_kouter(kimport) for kimport in kflatmodule.imports]) +311 sentences = '\n\n'.join([self._print_kouter(sentence) for sentence in kflatmodule.sentences]) +312 contents = imports + '\n\n' + sentences +313 return 'module ' + name + '\n ' + '\n '.join(contents.split('\n')) + '\n\nendmodule' +314 +315 def _print_krequire(self, krequire: KRequire) -> str: +316 return 'requires "' + krequire.require + '"' +317 +318 def _print_kdefinition(self, kdefinition: KDefinition) -> str: +319 requires = '\n'.join([self._print_kouter(require) for require in kdefinition.requires]) +320 modules = '\n\n'.join([self._print_kouter(module) for module in kdefinition.all_modules]) +321 return requires + '\n\n' + modules +322 +323 def _print_kast_bool(self, kast: KAst) -> str: +324 """Print out KAST requires/ensures clause. +325 +326 Args: +327 kast: KAST Bool for requires/ensures clause. +328 +329 Returns: +330 Best-effort string representation of KAST term. +331 """ +332 _LOGGER.debug(f'_print_kast_bool: {kast}') +333 if type(kast) is KApply and kast.label.name in ['_andBool_', '_orBool_']: +334 clauses = [self._print_kast_bool(c) for c in flatten_label(kast.label.name, kast)] +335 head = kast.label.name.replace('_', ' ') +336 if head == ' orBool ': +337 head = ' orBool ' +338 separator = ' ' * (len(head) - 7) +339 spacer = ' ' * len(head) +340 +341 def join_sep(s: str) -> str: +342 return ('\n' + separator).join(s.split('\n')) +343 +344 clauses = ( +345 ['( ' + join_sep(clauses[0])] +346 + [head + '( ' + join_sep(c) for c in clauses[1:]] +347 + [spacer + (')' * len(clauses))] +348 ) +349 return '\n'.join(clauses) +350 else: +351 return self.print(kast) +352 +353 def _applied_label_str(self, symbol: str) -> Callable[..., str]: +354 return lambda *args: symbol + ' ( ' + ' , '.join(args) + ' )'
+ +355 +356 +
+[docs] +357def build_symbol_table( +358 definition: KDefinition, +359 extra_modules: Iterable[KFlatModule] = (), +360 opinionated: bool = False, +361) -> SymbolTable: +362 """Build the unparsing symbol table given a JSON encoded definition. +363 +364 Args: +365 definition: JSON encoded K definition. +366 +367 Returns: +368 Python dictionary mapping klabels to automatically generated unparsers. +369 """ +370 symbol_table = {} +371 all_modules = list(definition.all_modules) + ([] if extra_modules is None else list(extra_modules)) +372 for module in all_modules: +373 for prod in module.syntax_productions: +374 assert prod.klabel +375 label = prod.klabel.name +376 unparser = unparser_for_production(prod) +377 +378 symbol_table[label] = unparser +379 if Atts.SYMBOL in prod.att: +380 symbol_table[prod.att[Atts.SYMBOL]] = unparser +381 +382 if opinionated: +383 symbol_table['#And'] = lambda c1, c2: c1 + '\n#And ' + c2 +384 symbol_table['#Or'] = lambda c1, c2: c1 + '\n#Or\n' + indent(c2, size=4) +385 +386 return symbol_table
+ +387 +388 +
+[docs] +389def unparser_for_production(prod: KProduction) -> Callable[..., str]: +390 def _unparser(*args: Any) -> str: +391 index = 0 +392 result = [] +393 num_nonterm = len([item for item in prod.items if type(item) is KNonTerminal]) +394 num_named_nonterm = len([item for item in prod.items if type(item) is KNonTerminal and item.name != None]) +395 for item in prod.items: +396 if type(item) is KTerminal: +397 result.append(item.value) +398 elif type(item) is KNonTerminal and index < len(args): +399 if num_nonterm == num_named_nonterm: +400 if index == 0: +401 result.append('...') +402 result.append(f'{item.name}:') +403 result.append(args[index]) +404 index += 1 +405 return ' '.join(result) +406 +407 return _unparser
+ +408 +409 +
+[docs] +410def indent(text: str, size: int = 2) -> str: +411 return '\n'.join([(' ' * size) + line for line in text.split('\n')])
+ +412 +413 +
+[docs] +414def paren(printer: Callable[..., str]) -> Callable[..., str]: +415 return lambda *args: '( ' + printer(*args) + ' )'
+ +416 +417 +
+[docs] +418def assoc_with_unit(assoc_join: str, unit: str) -> Callable[..., str]: +419 def _assoc_with_unit(*args: str) -> str: +420 return assoc_join.join(arg for arg in args if arg != unit) +421 +422 return _assoc_with_unit
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kast/rewrite.html b/pyk/_modules/pyk/kast/rewrite.html new file mode 100644 index 00000000000..a3bf6c5fbb0 --- /dev/null +++ b/pyk/_modules/pyk/kast/rewrite.html @@ -0,0 +1,161 @@ + + + + + + + + pyk.kast.rewrite — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kast.rewrite

+ 1from __future__ import annotations
+ 2
+ 3import logging
+ 4from typing import TYPE_CHECKING
+ 5
+ 6from .att import WithKAtt
+ 7from .inner import KApply, KToken, bottom_up
+ 8
+ 9if TYPE_CHECKING:
+10    from collections.abc import Iterable
+11    from typing import Final, TypeVar
+12
+13    from .inner import KInner, KRewrite
+14
+15    KI = TypeVar('KI', bound=KInner)
+16    W = TypeVar('W', bound=WithKAtt)
+17
+18_LOGGER: Final = logging.getLogger(__name__)
+19
+20
+
+[docs] +21def indexed_rewrite(kast: KInner, rewrites: Iterable[KRewrite]) -> KInner: +22 token_rewrites: list[KRewrite] = [] +23 apply_rewrites: dict[str, list[KRewrite]] = {} +24 other_rewrites: list[KRewrite] = [] +25 for r in rewrites: +26 if type(r.lhs) is KToken: +27 token_rewrites.append(r) +28 elif type(r.lhs) is KApply: +29 if r.lhs.label.name in apply_rewrites: +30 apply_rewrites[r.lhs.label.name].append(r) +31 else: +32 apply_rewrites[r.lhs.label.name] = [r] +33 else: +34 other_rewrites.append(r) +35 +36 def _apply_rewrites(_kast: KInner) -> KInner: +37 if type(_kast) is KToken: +38 for tr in token_rewrites: +39 _kast = tr.apply_top(_kast) +40 elif type(_kast) is KApply: +41 if _kast.label.name in apply_rewrites: +42 for ar in apply_rewrites[_kast.label.name]: +43 _kast = ar.apply_top(_kast) +44 else: +45 for _or in other_rewrites: +46 _kast = _or.apply_top(_kast) +47 return _kast +48 +49 orig_kast: KInner = kast +50 new_kast: KInner | None = None +51 while orig_kast != new_kast: +52 if new_kast is None: +53 new_kast = orig_kast +54 else: +55 orig_kast = new_kast +56 new_kast = bottom_up(_apply_rewrites, new_kast) +57 return new_kast
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kast/utils.html b/pyk/_modules/pyk/kast/utils.html new file mode 100644 index 00000000000..1a424abf6f1 --- /dev/null +++ b/pyk/_modules/pyk/kast/utils.html @@ -0,0 +1,194 @@ + + + + + + + + pyk.kast.utils — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kast.utils

+ 1from __future__ import annotations
+ 2
+ 3import logging
+ 4from pathlib import Path
+ 5from typing import TYPE_CHECKING
+ 6
+ 7from ._ast_to_kast import _ast_to_kast
+ 8from .markdown import select_code_blocks
+ 9from .outer import KDefinition
+10from .outer_parser import OuterParser
+11from .outer_syntax import Definition
+12
+13if TYPE_CHECKING:
+14    from collections.abc import Iterable
+15    from typing import Final
+16
+17    from .outer_syntax import Require
+18
+19_LOGGER: Final = logging.getLogger(__name__)
+20
+21
+
+[docs] +22def parse_outer( +23 definition_file: str | Path, +24 main_module: str, +25 *, +26 include_dirs: Iterable[str | Path] = (), +27 md_selector: str = 'k', +28 include_source: bool = True, +29) -> KDefinition: +30 parsed_files = slurp_definitions( +31 definition_file, +32 include_dirs=include_dirs, +33 md_selector=md_selector, +34 include_source=include_source, +35 ) +36 modules = tuple(module for _, definition in parsed_files.items() for module in definition.modules) +37 final_definition = _ast_to_kast(Definition(modules), main_module=main_module) +38 assert isinstance(final_definition, KDefinition) +39 return final_definition
+ +40 +41 +
+[docs] +42def slurp_definitions( +43 main_file: str | Path, +44 *, +45 include_dirs: Iterable[str | Path] = (), +46 md_selector: str | None = None, +47 include_source: bool = True, +48) -> dict[Path, Definition]: +49 main_file = Path(main_file).resolve() +50 _include_dirs = [Path(include_dir) for include_dir in include_dirs] +51 md_selector = md_selector or 'k' +52 +53 result: dict[Path, Definition] = {} +54 +55 pending = [main_file] +56 while pending: # DFS +57 current_file = pending.pop() +58 +59 if current_file in result: +60 continue +61 +62 definition = _parse_file(current_file, md_selector, include_source) +63 pending += reversed([_resolve_require(require, current_file, _include_dirs) for require in definition.requires]) +64 +65 result[current_file] = definition +66 +67 return result
+ +68 +69 +70def _parse_file(definition_file: Path, md_selector: str, include_source: bool) -> Definition: +71 _LOGGER.info(f'Reading {definition_file}') +72 +73 text = definition_file.read_text() +74 if definition_file.suffix == '.md': +75 text = select_code_blocks(text, md_selector) +76 +77 parser = OuterParser(text, source=definition_file if include_source else None) +78 return parser.definition() +79 +80 +81def _resolve_require(require: Require, definition_file: Path, include_dirs: list[Path]) -> Path: +82 try_dirs = [definition_file.parent] + include_dirs +83 try_files = [try_dir / require.path for try_dir in try_dirs] +84 for file in try_files: +85 if file.is_file(): +86 return file.resolve() +87 raise FileNotFoundError(f'{require.path} not found. Searched paths: {[str(path) for path in try_dirs]}') +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kbuild/kbuild.html b/pyk/_modules/pyk/kbuild/kbuild.html new file mode 100644 index 00000000000..d2d41d4083a --- /dev/null +++ b/pyk/_modules/pyk/kbuild/kbuild.html @@ -0,0 +1,280 @@ + + + + + + + + pyk.kbuild.kbuild — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kbuild.kbuild

+  1from __future__ import annotations
+  2
+  3import re
+  4from contextlib import contextmanager
+  5from dataclasses import dataclass
+  6from functools import cached_property
+  7from pathlib import Path
+  8from tempfile import TemporaryDirectory
+  9from typing import TYPE_CHECKING, final
+ 10
+ 11from filelock import FileLock
+ 12
+ 13from ..ktool.kompile import kompile
+ 14from ..utils import check_dir_path, single
+ 15from .utils import k_version, sync_files
+ 16
+ 17if TYPE_CHECKING:
+ 18    from collections.abc import Iterator
+ 19    from re import Match
+ 20    from typing import Any
+ 21
+ 22    from .project import Project, Target
+ 23
+ 24
+
+[docs] + 25@final + 26@dataclass(frozen=True) + 27class KBuild: + 28 kdist_dir: Path + 29 + 30 def __init__(self, kdist_dir: str | Path): + 31 kdist_dir = Path(kdist_dir).resolve() + 32 object.__setattr__(self, 'kdist_dir', kdist_dir) + 33 + 34 @cached_property + 35 def k_version(self) -> str: + 36 return k_version().text + 37 +
+[docs] + 38 def definition_dir(self, project: Project, target_name: str) -> Path: + 39 return self.kdist_dir / self.k_version / target_name
+ + 40 +
+[docs] + 41 def kompile(self, project: Project, target_name: str, *, debug: bool = False) -> Path: + 42 self.kdist_dir.mkdir(parents=True, exist_ok=True) + 43 + 44 with FileLock(self.kdist_dir / '.lock'): + 45 output_dir = self.definition_dir(project, target_name) + 46 + 47 if self.up_to_date(project, target_name): + 48 return output_dir + 49 + 50 with KBuildEnv.create_temp(project) as env: + 51 env.kompile(target_name, output_dir, debug=debug) + 52 + 53 return output_dir
+ + 54 +
+[docs] + 55 def up_to_date(self, project: Project, target_name: str) -> bool: + 56 definition_dir = self.definition_dir(project, target_name) + 57 timestamp = definition_dir / 'timestamp' + 58 + 59 if not timestamp.exists(): + 60 return False + 61 + 62 input_timestamps = (input_file.stat().st_mtime for input_file in project.all_files) + 63 target_timestamp = timestamp.stat().st_mtime + 64 return all(input_timestamp < target_timestamp for input_timestamp in input_timestamps)
+
+ + 65 + 66 +
+[docs] + 67@final + 68@dataclass(frozen=True) + 69class KBuildEnv: + 70 project: Project + 71 path: Path + 72 + 73 def __init__(self, project: Project, path: str | Path): + 74 path = Path(path).resolve() + 75 check_dir_path(path) + 76 object.__setattr__(self, 'project', project) + 77 object.__setattr__(self, 'path', path) + 78 +
+[docs] + 79 @staticmethod + 80 @contextmanager + 81 def create_temp(project: Project) -> Iterator[KBuildEnv]: + 82 with TemporaryDirectory(prefix=f'kbuild-{project.name}-') as path_str: + 83 env = KBuildEnv(project, path_str) + 84 env.sync() + 85 yield env
+ + 86 +
+[docs] + 87 def sync(self) -> None: + 88 for sub_project in self.project.sub_projects: + 89 self._sync_project(sub_project)
+ + 90 +
+[docs] + 91 def kompile(self, target_name: str, output_dir: Path, *, debug: bool = False) -> None: + 92 target = self.project.get_target(target_name) + 93 kompile( + 94 output_dir=output_dir, + 95 include_dirs=self._include_dirs, + 96 cwd=self.path, + 97 debug=debug, + 98 **self._kompile_args(target), + 99 )
+ +100 +101 @property +102 def _include_dirs(self) -> list[Path]: +103 return [self._include_dir(sub_project) for sub_project in self.project.sub_projects] +104 +105 def _include_dir(self, project: Project) -> Path: +106 return self.path / project.name / 'include' +107 +108 def _source_dir(self, project: Project) -> Path: +109 return self._include_dir(project) / project.name +110 +111 def _resource_dir(self, project: Project, resource_name: str) -> Path: +112 return self.path / project.name / 'resource' / resource_name +113 +114 def _sync_project(self, project: Project) -> None: +115 # Sync sources +116 sync_files( +117 source_dir=project.source_dir, +118 target_dir=self._source_dir(project), +119 file_names=project.source_file_names, +120 ) +121 +122 # Sync resources +123 for resource_name in project.resources: +124 sync_files( +125 source_dir=project.resources[resource_name], +126 target_dir=self._resource_dir(project, resource_name), +127 file_names=project.resource_file_names[resource_name], +128 ) +129 +130 def _kompile_args(self, target: Target) -> dict[str, Any]: +131 args = dict(target.args) +132 args['main_file'] = self._source_dir(self.project) / args['main_file'] +133 +134 if 'ccopts' in args: +135 args['ccopts'] = [self._render_opt(opt) for opt in args['ccopts']] +136 +137 return args +138 +139 def _render_opt(self, opt: str) -> str: +140 def render(match: Match) -> str: +141 project_name = match.group('project') +142 resource_name = match.group('resource') +143 +144 sub_project = single( +145 sub_project for sub_project in self.project.sub_projects if sub_project.name == project_name +146 ) +147 resource_path = self._resource_dir(sub_project, resource_name) +148 +149 if not resource_path.exists(): +150 raise ValueError('Failed to resolve opt {opt}: resource path {resource_path} does not exist') +151 +152 return str(resource_path) +153 +154 pattern = re.compile(r'{{ *(?P<project>\S+):(?P<resource>\S+) *}}') +155 return pattern.sub(render, opt)
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kbuild/project.html b/pyk/_modules/pyk/kbuild/project.html new file mode 100644 index 00000000000..9f8ce71cb0a --- /dev/null +++ b/pyk/_modules/pyk/kbuild/project.html @@ -0,0 +1,343 @@ + + + + + + + + pyk.kbuild.project — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kbuild.project

+  1from __future__ import annotations
+  2
+  3from abc import ABC, abstractmethod
+  4from dataclasses import dataclass
+  5from functools import cached_property
+  6from importlib import resources
+  7from pathlib import Path, PosixPath
+  8from typing import TYPE_CHECKING, final
+  9
+ 10import tomli
+ 11
+ 12from ..cli.utils import relative_path
+ 13from ..utils import FrozenDict, abs_or_rel_to, check_dir_path, check_file_path, check_relative_path, single
+ 14from .config import PROJECT_FILE_NAME
+ 15
+ 16if TYPE_CHECKING:
+ 17    from collections.abc import Iterable, Mapping
+ 18    from typing import Any
+ 19
+ 20
+
+[docs] + 21class Source(ABC): +
+[docs] + 22 @staticmethod + 23 def from_dict(dct: Mapping[str, Any]) -> Source: + 24 if 'path' in dct: + 25 return PathSource(Path(dct['path'])) + 26 if 'package' in dct: + 27 return PackageSource(dct['package']) + 28 raise ValueError(f'Cannot parse source: {dct}')
+ + 29 +
+[docs] + 30 @abstractmethod + 31 def resolve(self, project_path: Path) -> Path: ...
+
+ + 32 + 33 +
+[docs] + 34@final + 35@dataclass(frozen=True) + 36class PathSource(Source): + 37 path: Path + 38 +
+[docs] + 39 def resolve(self, project_path: Path) -> Path: + 40 return abs_or_rel_to(self.path, project_path)
+
+ + 41 + 42 +
+[docs] + 43@final + 44@dataclass(frozen=True) + 45class PackageSource(Source): + 46 package: str + 47 +
+[docs] + 48 def resolve(self, project_path: Path) -> Path: + 49 traversable = resources.files(self.package) + 50 if not isinstance(traversable, PosixPath): + 51 raise ValueError(f'Package name {self.package!r} does not resolve to a directory') + 52 return traversable.resolve(strict=True)
+
+ + 53 + 54 +
+[docs] + 55@final + 56@dataclass(frozen=True) + 57class Target: + 58 name: str # TODO Maybe remove name and store in project as Dict + 59 + 60 args: dict[str, Any] + 61 + 62 def __init__( + 63 self, + 64 *, + 65 name: str, + 66 args: Mapping[str, Any], + 67 ): + 68 if args['main-file']: + 69 main_file = Path(args['main-file']) + 70 check_relative_path(main_file) + 71 newargs = {key.replace('-', '_'): value for key, value in args.items()} + 72 newargs['main_file'] = main_file + 73 object.__setattr__(self, 'name', name) + 74 object.__setattr__(self, 'args', newargs)
+ + 75 + 76 +
+[docs] + 77@final + 78@dataclass(frozen=True) + 79class Project: + 80 path: Path + 81 name: str + 82 version: str + 83 source_dir: Path + 84 resources: FrozenDict[str, Path] + 85 dependencies: tuple[Project, ...] + 86 targets: tuple[Target, ...] + 87 + 88 def __init__( + 89 self, + 90 *, + 91 path: str | Path, + 92 name: str, + 93 version: str, + 94 source_dir: str | Path, + 95 resources: Mapping[str, str | Path] | None = None, + 96 dependencies: Iterable[Project] = (), + 97 targets: Iterable[Target] = (), + 98 ): + 99 path = Path(path).resolve() +100 check_dir_path(path) +101 +102 source_dir = path / relative_path(source_dir) +103 check_dir_path(source_dir) +104 +105 resources = resources or {} +106 resources = { +107 resource_name: path / relative_path(resource_dir) for resource_name, resource_dir in resources.items() +108 } +109 +110 object.__setattr__(self, 'path', path) +111 object.__setattr__(self, 'name', name) +112 object.__setattr__(self, 'version', version) +113 object.__setattr__(self, 'source_dir', source_dir) +114 object.__setattr__(self, 'resources', FrozenDict(resources)) +115 object.__setattr__(self, 'dependencies', tuple(dependencies)) +116 object.__setattr__(self, 'targets', tuple(targets)) +117 +
+[docs] +118 @staticmethod +119 def load(project_file: str | Path) -> Project: +120 project_file = Path(project_file) +121 check_file_path(project_file) +122 project_path = project_file.parent +123 +124 def _load_dependency(name: str, dct: Any) -> Project: +125 source = Source.from_dict(dct) +126 dependency_path = source.resolve(project_path) +127 project = Project.load_from_dir(dependency_path) +128 if project.name != name: +129 raise ValueError(f'Invalid dependency, expected name {name}, got: {project.name}') +130 return project +131 +132 with open(project_file, 'rb') as f: +133 dct = tomli.load(f) +134 +135 project = Project( +136 path=project_path, +137 name=dct['project']['name'], +138 version=dct['project']['version'], +139 source_dir=dct['project']['source'], +140 resources=dct['project'].get('resources'), +141 dependencies=tuple( +142 _load_dependency(name, source_dct) for name, source_dct in dct.get('dependencies', {}).items() +143 ), +144 targets=tuple(Target(name=name, args=target) for name, target in dct.get('targets', {}).items()), +145 ) +146 +147 return project
+ +148 +
+[docs] +149 @staticmethod +150 def load_from_dir(project_dir: str | Path) -> Project: +151 project_dir = Path(project_dir) +152 check_dir_path(project_dir) +153 return Project.load(project_dir / PROJECT_FILE_NAME)
+ +154 +155 @cached_property +156 def sub_projects(self) -> tuple[Project, ...]: +157 res: tuple[Project, ...] = (self,) +158 for project in self.dependencies: +159 res += project.sub_projects +160 return res +161 +162 @property +163 def project_file(self) -> Path: +164 return self.path / PROJECT_FILE_NAME +165 +166 @property +167 def source_files(self) -> list[Path]: +168 res: list[Path] = [] +169 res.extend(self.source_dir.rglob('*.k')) +170 res.extend(self.source_dir.rglob('*.md')) +171 return res +172 +173 @property +174 def source_file_names(self) -> list[str]: +175 return [str(source_file.relative_to(self.source_dir)) for source_file in self.source_files] +176 +177 @property +178 def resource_files(self) -> dict[str, list[Path]]: +179 res: dict[str, list[Path]] = {} +180 for resource_name, resource_dir in self.resources.items(): +181 check_dir_path(resource_dir) +182 res[resource_name] = [resource_file for resource_file in resource_dir.rglob('*') if resource_file.is_file()] +183 return res +184 +185 @property +186 def resource_file_names(self) -> dict[str, list[str]]: +187 return { +188 resource_name: [ +189 str(resource_file.relative_to(self.resources[resource_name])) for resource_file in resource_files +190 ] +191 for resource_name, resource_files in self.resource_files.items() +192 } +193 +194 @property +195 def all_files(self) -> list[Path]: +196 res: list[Path] = [] +197 for sub_project in self.sub_projects: +198 res.append(sub_project.project_file) +199 res.extend(sub_project.source_files) +200 for resource_name in sub_project.resources: +201 res.extend(sub_project.resource_files[resource_name]) +202 return res +203 +
+[docs] +204 def get_target(self, target_name: str) -> Target: +205 # TODO Should be enforced as a validation rule +206 return single(target for target in self.targets if target.name == target_name)
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kbuild/utils.html b/pyk/_modules/pyk/kbuild/utils.html new file mode 100644 index 00000000000..3d33a548e84 --- /dev/null +++ b/pyk/_modules/pyk/kbuild/utils.html @@ -0,0 +1,227 @@ + + + + + + + + pyk.kbuild.utils — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kbuild.utils

+  1from __future__ import annotations
+  2
+  3import re
+  4import shutil
+  5from dataclasses import dataclass
+  6from typing import ClassVar  # noqa: TC003
+  7from typing import TYPE_CHECKING, final
+  8
+  9from ..cli.utils import check_dir_path, check_file_path
+ 10from ..utils import run_process_2
+ 11
+ 12if TYPE_CHECKING:
+ 13    from collections.abc import Iterable
+ 14    from pathlib import Path
+ 15
+ 16
+
+[docs] + 17@final + 18@dataclass(frozen=True) + 19class KVersion: +
+[docs] + 20 @final + 21 @dataclass(frozen=True) + 22 class Git: + 23 ahead: int + 24 rev: str + 25 dirty: bool
+ + 26 + 27 major: int + 28 minor: int + 29 patch: int + 30 git: Git | None + 31 + 32 _PATTERN_STR: ClassVar = ( + 33 r'v(?P<major>[1-9]+)' + 34 r'\.(?P<minor>[0-9]+)' + 35 r'\.(?P<patch>[0-9]+)' + 36 r'(?P<git>' + 37 r'-(?P<ahead>[0-9]+)' + 38 r'-g(?P<rev>[0-9a-f]{10})' + 39 r'(?P<dirty>-dirty)?)?' + 40 ) + 41 PATTERN: ClassVar = re.compile(_PATTERN_STR) + 42 +
+[docs] + 43 @staticmethod + 44 def parse(text: str) -> KVersion: + 45 match = KVersion.PATTERN.fullmatch(text) + 46 if not match: + 47 raise ValueError(f'Invalid K version string: {text}') + 48 + 49 major = int(match['major']) + 50 minor = int(match['minor']) + 51 patch = int(match['patch']) + 52 git = ( + 53 KVersion.Git( + 54 ahead=int(match['ahead']), + 55 rev=match['rev'], + 56 dirty=bool(match['dirty']), + 57 ) + 58 if match['git'] + 59 else None + 60 ) + 61 + 62 return KVersion(major=major, minor=minor, patch=patch, git=git)
+ + 63 + 64 @property + 65 def text(self) -> str: + 66 version = f'v{self.major}.{self.minor}.{self.patch}' + 67 dirty = '-dirty' if self.git and self.git.dirty else '' + 68 git = f'-{self.git.ahead}-g{self.git.rev}{dirty}' if self.git else '' + 69 return f'{version}{git}'
+ + 70 + 71 +
+[docs] + 72def k_version() -> KVersion: + 73 try: + 74 proc_res = run_process_2(['kompile', '--version']) + 75 except FileNotFoundError as err: + 76 raise RuntimeError('K is not installed') from err + 77 + 78 version = proc_res.stdout.splitlines()[0][14:] # 'K version: ...' + 79 return KVersion.parse(version)
+ + 80 + 81 +
+[docs] + 82def sync_files(source_dir: Path, target_dir: Path, file_names: Iterable[str]) -> list[Path]: + 83 check_dir_path(source_dir) + 84 shutil.rmtree(target_dir, ignore_errors=True) + 85 target_dir.mkdir(parents=True) + 86 + 87 res = [] + 88 for file_name in file_names: + 89 source_file = source_dir / file_name + 90 check_file_path(source_file) + 91 target_file = target_dir / file_name + 92 target_file.parent.mkdir(parents=True, exist_ok=True) + 93 shutil.copy2(source_file, target_file) + 94 res.append(target_file) + 95 + 96 return res
+ + 97 + 98 +
+[docs] + 99def find_file_upwards(file_name: str, start_dir: Path) -> Path: +100 check_dir_path(start_dir) +101 curr_dir = start_dir.resolve() +102 while True: +103 path = curr_dir / file_name +104 if path.is_file(): +105 return path +106 if curr_dir == curr_dir.parent: +107 raise FileNotFoundError(f'{file_name} not found') +108 curr_dir = curr_dir.parent
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kcfg/exploration.html b/pyk/_modules/pyk/kcfg/exploration.html new file mode 100644 index 00000000000..3ec723ecf78 --- /dev/null +++ b/pyk/_modules/pyk/kcfg/exploration.html @@ -0,0 +1,248 @@ + + + + + + + + pyk.kcfg.exploration — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kcfg.exploration

+  1from __future__ import annotations
+  2
+  3from typing import TYPE_CHECKING
+  4
+  5from pyk.kcfg.kcfg import KCFG, NodeAttr
+  6from pyk.kcfg.minimize import KCFGMinimizer
+  7
+  8if TYPE_CHECKING:
+  9    from collections.abc import Iterable, Mapping
+ 10    from typing import Any
+ 11
+ 12    from pyk.kcfg.kcfg import NodeIdLike
+ 13    from pyk.kcfg.semantics import KCFGSemantics
+ 14
+ 15
+
+[docs] + 16class KCFGExplorationNodeAttr(NodeAttr): + 17 TERMINAL = NodeAttr('terminal')
+ + 18 + 19 +
+[docs] + 20class KCFGExploration: + 21 kcfg: KCFG + 22 + 23 def __init__(self, kcfg: KCFG, terminal: Iterable[NodeIdLike] | None = None) -> None: + 24 self.kcfg = kcfg + 25 if terminal: + 26 for node_id in terminal: + 27 self.add_terminal(node_id) + 28 + 29 @property + 30 def terminal_ids(self) -> set[int]: + 31 return {node.id for node in self.kcfg.nodes if KCFGExplorationNodeAttr.TERMINAL in node.attrs} + 32 + 33 # + 34 # Recognisers + 35 # + 36 + 37 # Terminal node recogniser +
+[docs] + 38 def is_terminal(self, node_id: NodeIdLike) -> bool: + 39 return KCFGExplorationNodeAttr.TERMINAL in self.kcfg.node(node_id).attrs
+ + 40 + 41 # Explorable node recogniser +
+[docs] + 42 def is_explorable(self, node_id: NodeIdLike) -> bool: + 43 return ( + 44 self.kcfg.is_leaf(node_id) + 45 and not self.is_terminal(node_id) + 46 and not self.kcfg.is_stuck(node_id) + 47 and not self.kcfg.is_vacuous(node_id) + 48 )
+ + 49 + 50 # + 51 # Collectors + 52 # + 53 + 54 # Terminal node collector + 55 @property + 56 def terminal(self) -> list[KCFG.Node]: + 57 return [node for node in self.kcfg.nodes if self.is_terminal(node.id)] + 58 + 59 # Explorable node collector + 60 @property + 61 def explorable(self) -> list[KCFG.Node]: + 62 return [node for node in self.kcfg.leaves if self.is_explorable(node.id)] + 63 + 64 # + 65 # Terminal node manipulation + 66 # + 67 + 68 # Marking a given node as terminal +
+[docs] + 69 def add_terminal(self, node_id: NodeIdLike) -> None: + 70 self.kcfg.add_attr(node_id, KCFGExplorationNodeAttr.TERMINAL)
+ + 71 + 72 # Unmarking a given node as terminal +
+[docs] + 73 def remove_terminal(self, node_id: int) -> None: + 74 self.kcfg.remove_attr(node_id, KCFGExplorationNodeAttr.TERMINAL)
+ + 75 + 76 # + 77 # Lifted KCFG functions that may affect terminal nodes + 78 # + 79 + 80 # Removing a given node +
+[docs] + 81 def remove_node(self, node_id: NodeIdLike) -> None: + 82 node_id = self.kcfg._resolve(node_id) + 83 self.kcfg.remove_node(node_id)
+ + 84 + 85 # Pruning a KCFG subtree starting from a given node +
+[docs] + 86 def prune(self, node_id: NodeIdLike, keep_nodes: Iterable[NodeIdLike] = ()) -> list[int]: + 87 return self.kcfg.prune(node_id, keep_nodes=keep_nodes)
+ + 88 + 89 # + 90 # Dictionarisation + 91 # + 92 + 93 # Conversion from dictionary +
+[docs] + 94 @staticmethod + 95 def from_dict(dct: Mapping[str, Any]) -> KCFGExploration: + 96 kcfg = KCFG.from_dict(dct['kcfg']) + 97 terminal = dct['terminal'] + 98 + 99 return KCFGExploration(kcfg, terminal)
+ +100 +101 # Conversion to dictionary +
+[docs] +102 def to_dict(self) -> dict[str, Any]: +103 dct: dict[str, Any] = {} +104 dct['kcfg'] = self.kcfg.to_dict() +105 dct['terminal'] = sorted(node.id for node in self.kcfg.nodes if self.is_terminal(node.id)) +106 return dct
+ +107 +108 # +109 # Minimization +110 # +111 +112 # Minimizing the KCFG +
+[docs] +113 def minimize_kcfg(self, heuristics: KCFGSemantics | None = None, merge: bool = False) -> None: +114 KCFGMinimizer(kcfg=self.kcfg, heuristics=heuristics).minimize(merge=merge)
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kcfg/explore.html b/pyk/_modules/pyk/kcfg/explore.html new file mode 100644 index 00000000000..1cf1a4b6ae4 --- /dev/null +++ b/pyk/_modules/pyk/kcfg/explore.html @@ -0,0 +1,405 @@ + + + + + + + + pyk.kcfg.explore — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kcfg.explore

+  1from __future__ import annotations
+  2
+  3import logging
+  4from functools import cached_property
+  5from typing import TYPE_CHECKING
+  6
+  7from ..kast.inner import KApply, KVariable
+  8from ..kast.manip import (
+  9    flatten_label,
+ 10    minimize_term,
+ 11    ml_pred_to_bool,
+ 12    no_cell_rewrite_to_dots,
+ 13    push_down_rewrites,
+ 14    replace_rewrites_with_implies,
+ 15)
+ 16from ..kast.pretty import PrettyPrinter
+ 17from ..kore.rpc import LogRewrite, RewriteSuccess
+ 18from ..prelude.ml import is_top, mlAnd
+ 19from ..utils import not_none, shorten_hashes, single, unique
+ 20from .kcfg import KCFG, Abstract, Branch, NDBranch, Step, Stuck, Vacuous
+ 21from .semantics import DefaultSemantics
+ 22
+ 23if TYPE_CHECKING:
+ 24    from collections.abc import Iterable
+ 25    from typing import Final
+ 26
+ 27    from ..cterm import CTerm, CTermSymbolic
+ 28    from ..kast import KInner
+ 29    from ..kcfg.exploration import KCFGExploration
+ 30    from ..kore.rpc import LogEntry
+ 31    from .kcfg import KCFGExtendResult, NodeIdLike
+ 32    from .semantics import KCFGSemantics
+ 33
+ 34
+ 35_LOGGER: Final = logging.getLogger(__name__)
+ 36
+ 37
+
+[docs] + 38class KCFGExplore: + 39 cterm_symbolic: CTermSymbolic + 40 + 41 kcfg_semantics: KCFGSemantics + 42 id: str + 43 + 44 def __init__( + 45 self, + 46 cterm_symbolic: CTermSymbolic, + 47 *, + 48 kcfg_semantics: KCFGSemantics | None = None, + 49 id: str | None = None, + 50 ): + 51 self.cterm_symbolic = cterm_symbolic + 52 self.kcfg_semantics = kcfg_semantics if kcfg_semantics is not None else DefaultSemantics() + 53 self.id = id if id is not None else 'NO ID' + 54 + 55 @cached_property + 56 def _pretty_printer(self) -> PrettyPrinter: + 57 return PrettyPrinter(self.cterm_symbolic._definition) + 58 +
+[docs] + 59 def pretty_print(self, kinner: KInner) -> str: + 60 return self._pretty_printer.print(kinner)
+ + 61 + 62 def _extract_rule_labels(self, _logs: tuple[LogEntry, ...]) -> list[str]: + 63 _rule_lines = [] + 64 for node_log in _logs: + 65 if isinstance(node_log, LogRewrite) and isinstance(node_log.result, RewriteSuccess): + 66 if node_log.result.rule_id in self.cterm_symbolic._definition.sentence_by_unique_id: + 67 sent = self.cterm_symbolic._definition.sentence_by_unique_id[node_log.result.rule_id] + 68 _rule_lines.append(f'{sent.label}:{sent.source}') + 69 else: + 70 if node_log.result.rule_id == 'UNKNOWN': + 71 _LOGGER.warning(f'Unknown unique id attached to rule log entry: {node_log}') + 72 _rule_lines.append(f'{node_log.result.rule_id}:UNKNOWN') + 73 return _rule_lines + 74 +
+[docs] + 75 def implication_failure_reason( + 76 self, antecedent: CTerm, consequent: CTerm, assume_defined: bool = False + 77 ) -> tuple[bool, str]: + 78 def _is_cell_subst(csubst: KInner) -> bool: + 79 if type(csubst) is KApply and csubst.label.name == '_==K_': + 80 csubst_arg = csubst.args[0] + 81 if type(csubst_arg) is KVariable and csubst_arg.name.endswith('_CELL'): + 82 return True + 83 return False + 84 + 85 def _is_negative_cell_subst(constraint: KInner) -> bool: + 86 constraint_bool = ml_pred_to_bool(constraint) + 87 if type(constraint_bool) is KApply and constraint_bool.label.name == 'notBool_': + 88 negative_constraint = constraint_bool.args[0] + 89 if type(negative_constraint) is KApply and negative_constraint.label.name == '_andBool_': + 90 constraints = flatten_label('_andBool_', negative_constraint) + 91 cell_constraints = list(filter(_is_cell_subst, constraints)) + 92 if len(cell_constraints) > 0: + 93 return True + 94 return False + 95 + 96 cterm_implies = self.cterm_symbolic.implies( + 97 antecedent, consequent, failure_reason=True, assume_defined=assume_defined + 98 ) + 99 if cterm_implies.csubst is not None: +100 return (True, '') +101 +102 failing_cells_strs = [] +103 for name, failing_cell in cterm_implies.failing_cells: +104 failing_cell = push_down_rewrites(failing_cell) +105 failing_cell = no_cell_rewrite_to_dots(failing_cell) +106 failing_cell = replace_rewrites_with_implies(failing_cell) +107 failing_cells_strs.append(f'{name}: {self.pretty_print(minimize_term(failing_cell))}') +108 +109 ret_str = 'Matching failed.' +110 if len(failing_cells_strs) > 0: +111 failing_cells_str = '\n'.join(failing_cells_strs) +112 ret_str = f'{ret_str}\nThe following cells failed matching individually (antecedent #Implies consequent):\n{failing_cells_str}' +113 +114 if cterm_implies.remaining_implication is not None: +115 ret_str = ( +116 f'{ret_str}\nThe remaining implication is:\n{self.pretty_print(cterm_implies.remaining_implication)}' +117 ) +118 +119 if cterm_implies.csubst is not None and not is_top(cterm_implies.remaining_implication): +120 negative_cell_constraints = list(filter(_is_negative_cell_subst, antecedent.constraints)) +121 +122 if len(negative_cell_constraints) > 0: +123 negative_cell_constraints_str = '\n'.join(self.pretty_print(cc) for cc in negative_cell_constraints) +124 ret_str = f'{ret_str}\nNegated cell substitutions found (consider using _ => ?_):\n{negative_cell_constraints_str}' +125 +126 return (False, ret_str)
+ +127 +
+[docs] +128 def simplify(self, cfg: KCFG, logs: dict[int, tuple[LogEntry, ...]]) -> None: +129 for node in cfg.nodes: +130 _LOGGER.info(f'Simplifying node {self.id}: {shorten_hashes(node.id)}') +131 new_term, next_node_logs = self.cterm_symbolic.simplify(node.cterm) +132 if new_term != node.cterm: +133 cfg.let_node(node.id, cterm=new_term) +134 if node.id in logs: +135 logs[node.id] += next_node_logs +136 else: +137 logs[node.id] = next_node_logs
+ +138 +
+[docs] +139 def step( +140 self, +141 cfg: KCFG, +142 node_id: NodeIdLike, +143 logs: dict[int, tuple[LogEntry, ...]], +144 depth: int = 1, +145 module_name: str | None = None, +146 ) -> int: +147 if depth <= 0: +148 raise ValueError(f'Expected positive depth, got: {depth}') +149 node = cfg.node(node_id) +150 successors = list(cfg.successors(node.id)) +151 if len(successors) != 0 and type(successors[0]) is KCFG.Split: +152 raise ValueError(f'Cannot take step from split node {self.id}: {shorten_hashes(node.id)}') +153 _LOGGER.info(f'Taking {depth} steps from node {self.id}: {shorten_hashes(node.id)}') +154 exec_res = self.cterm_symbolic.execute(node.cterm, depth=depth, module_name=module_name) +155 if exec_res.depth != depth: +156 raise ValueError(f'Unable to take {depth} steps from node, got {exec_res.depth} steps {self.id}: {node.id}') +157 if len(exec_res.next_states) > 0: +158 raise ValueError(f'Found branch within {depth} steps {self.id}: {node.id}') +159 new_node = cfg.create_node(exec_res.state) +160 _LOGGER.info(f'Found new node at depth {depth} {self.id}: {shorten_hashes((node.id, new_node.id))}') +161 logs[new_node.id] = exec_res.logs +162 out_edges = cfg.edges(source_id=node.id) +163 rule_logs = self._extract_rule_labels(exec_res.logs) +164 if len(out_edges) == 0: +165 cfg.create_edge(node.id, new_node.id, depth=depth, rules=rule_logs) +166 else: +167 edge = out_edges[0] +168 if depth > edge.depth: +169 raise ValueError( +170 f'Step depth {depth} greater than original edge depth {edge.depth} {self.id}: {shorten_hashes((edge.source.id, edge.target.id))}' +171 ) +172 cfg.remove_edge(edge.source.id, edge.target.id) +173 cfg.create_edge(edge.source.id, new_node.id, depth=depth, rules=rule_logs) +174 cfg.create_edge(new_node.id, edge.target.id, depth=(edge.depth - depth), rules=edge.rules[depth:]) +175 return new_node.id
+ +176 +
+[docs] +177 def section_edge( +178 self, +179 cfg: KCFG, +180 source_id: NodeIdLike, +181 target_id: NodeIdLike, +182 logs: dict[int, tuple[LogEntry, ...]], +183 sections: int = 2, +184 ) -> tuple[int, ...]: +185 if sections <= 1: +186 raise ValueError(f'Cannot section an edge less than twice {self.id}: {sections}') +187 edge = single(cfg.edges(source_id=source_id, target_id=target_id)) +188 section_depth = int(edge.depth / sections) +189 if section_depth == 0: +190 raise ValueError(f'Too many sections, results in 0-length section {self.id}: {sections}') +191 orig_depth = edge.depth +192 new_depth = section_depth +193 new_nodes = [] +194 curr_node_id = edge.source.id +195 while new_depth < orig_depth: +196 curr_node_id = self.step(cfg, curr_node_id, logs, depth=section_depth) +197 new_nodes.append(curr_node_id) +198 new_depth += section_depth +199 return tuple(new_nodes)
+ +200 +
+[docs] +201 def check_extendable(self, kcfg_exploration: KCFGExploration, node: KCFG.Node) -> None: +202 kcfg: KCFG = kcfg_exploration.kcfg +203 if not kcfg.is_leaf(node.id): +204 raise ValueError(f'Cannot extend non-leaf node {self.id}: {node.id}') +205 if kcfg.is_stuck(node.id): +206 raise ValueError(f'Cannot extend stuck node {self.id}: {node.id}') +207 if kcfg.is_vacuous(node.id): +208 raise ValueError(f'Cannot extend vacuous node {self.id}: {node.id}') +209 if kcfg_exploration.is_terminal(node.id): +210 raise ValueError(f'Cannot extend terminal node {self.id}: {node.id}')
+ +211 +
+[docs] +212 def extend_cterm( +213 self, +214 _cterm: CTerm, +215 node_id: int, +216 *, +217 execute_depth: int | None = None, +218 cut_point_rules: Iterable[str] = (), +219 terminal_rules: Iterable[str] = (), +220 module_name: str | None = None, +221 ) -> list[KCFGExtendResult]: +222 +223 custom_step_result = self.kcfg_semantics.custom_step(_cterm, self.cterm_symbolic) +224 if custom_step_result is not None: +225 return [custom_step_result] +226 +227 abstract_cterm = self.kcfg_semantics.abstract_node(_cterm) +228 if _cterm != abstract_cterm: +229 return [Abstract(abstract_cterm)] +230 +231 cterm, next_states, depth, vacuous, next_node_logs = self.cterm_symbolic.execute( +232 _cterm, +233 depth=execute_depth, +234 cut_point_rules=cut_point_rules, +235 terminal_rules=terminal_rules, +236 module_name=module_name, +237 ) +238 +239 extend_results: list[KCFGExtendResult] = [] +240 +241 # Basic block +242 if depth > 0: +243 extend_results.append(Step(cterm, depth, next_node_logs, self._extract_rule_labels(next_node_logs))) +244 +245 # Stuck or vacuous +246 if not next_states: +247 if vacuous: +248 extend_results.append(Vacuous()) +249 elif depth == 0: +250 extend_results.append(Stuck()) +251 # Cut rule +252 elif len(next_states) == 1: +253 if not self.kcfg_semantics.can_make_custom_step(cterm): +254 (next_node_logs, rules) = ( +255 ((), []) if extend_results else (next_node_logs, self._extract_rule_labels(next_node_logs)) +256 ) +257 extend_results.append(Step(next_states[0].state, 1, next_node_logs, rules, cut=True, info='cut-rule')) +258 # Branch +259 elif all(branch_constraint for _, branch_constraint in next_states): +260 branch_preds = [flatten_label('#And', not_none(rule_predicate)) for _, rule_predicate in next_states] +261 common_preds = list( +262 unique( +263 pred +264 for branch_pred in branch_preds +265 for pred in branch_pred +266 if all(pred in bp for bp in branch_preds) +267 ) +268 ) +269 branches = [mlAnd(pred for pred in branch_pred if pred not in common_preds) for branch_pred in branch_preds] +270 info = f'{[self.pretty_print(ml_pred_to_bool(bc)) for bc in branches]}' +271 extend_results.append(Branch(branches, info=info)) +272 else: +273 # NDBranch +274 next_cterms = [cterm for cterm, _ in next_states] +275 (next_node_logs, rules) = ( +276 ((), []) if extend_results else (next_node_logs, self._extract_rule_labels(next_node_logs)) +277 ) +278 extend_results.append(NDBranch(next_cterms, next_node_logs, rules)) +279 +280 return extend_results
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kcfg/kcfg.html b/pyk/_modules/pyk/kcfg/kcfg.html new file mode 100644 index 00000000000..82d8e9b3351 --- /dev/null +++ b/pyk/_modules/pyk/kcfg/kcfg.html @@ -0,0 +1,1859 @@ + + + + + + + + pyk.kcfg.kcfg — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kcfg.kcfg

+   1from __future__ import annotations
+   2
+   3import json
+   4import logging
+   5from abc import ABC, abstractmethod
+   6from collections.abc import Container
+   7from dataclasses import dataclass, field
+   8from threading import RLock
+   9from typing import TYPE_CHECKING, Final, List, Union, cast, final
+  10
+  11from ..cterm import CSubst, CTerm, cterm_build_claim, cterm_build_rule
+  12from ..kast import EMPTY_ATT
+  13from ..kast.inner import KApply
+  14from ..kast.manip import (
+  15    bool_to_ml_pred,
+  16    extract_lhs,
+  17    extract_rhs,
+  18    inline_cell_maps,
+  19    minimize_rule_like,
+  20    rename_generated_vars,
+  21    sort_ac_collections,
+  22)
+  23from ..kast.outer import KFlatModule
+  24from ..prelude.kbool import andBool
+  25from ..utils import ensure_dir_path, not_none
+  26
+  27if TYPE_CHECKING:
+  28    from collections.abc import Iterable, Mapping, MutableMapping
+  29    from pathlib import Path
+  30    from types import TracebackType
+  31    from typing import Any
+  32
+  33    from pyk.kore.rpc import LogEntry
+  34
+  35    from ..kast import KAtt
+  36    from ..kast.inner import KInner
+  37    from ..kast.outer import KClaim, KDefinition, KImport, KRuleLike
+  38
+  39
+  40NodeIdLike = int | str
+  41
+  42_LOGGER: Final = logging.getLogger(__name__)
+  43
+  44
+
+[docs] + 45@dataclass(frozen=True) + 46class NodeAttr: + 47 value: str
+ + 48 + 49 +
+[docs] + 50class KCFGNodeAttr(NodeAttr): + 51 VACUOUS = NodeAttr('vacuous') + 52 STUCK = NodeAttr('stuck')
+ + 53 + 54 +
+[docs] + 55class KCFGStore: + 56 store_path: Path + 57 + 58 def __init__(self, store_path: Path) -> None: + 59 self.store_path = store_path + 60 ensure_dir_path(store_path) + 61 ensure_dir_path(self.kcfg_node_dir) + 62 + 63 @property + 64 def kcfg_json_path(self) -> Path: + 65 return self.store_path / 'kcfg.json' + 66 + 67 @property + 68 def kcfg_node_dir(self) -> Path: + 69 return self.store_path / 'nodes' + 70 +
+[docs] + 71 def kcfg_node_path(self, node_id: int) -> Path: + 72 return self.kcfg_node_dir / f'{node_id}.json'
+ + 73 +
+[docs] + 74 def write_cfg_data( + 75 self, kcfg: KCFG, dct: dict[str, Any], deleted_nodes: Iterable[int] = (), created_nodes: Iterable[int] = () + 76 ) -> None: + 77 vacuous_nodes = [ + 78 node_id for node_id in kcfg._nodes.keys() if KCFGNodeAttr.VACUOUS in kcfg._nodes[node_id].attrs + 79 ] + 80 stuck_nodes = [node_id for node_id in kcfg._nodes.keys() if KCFGNodeAttr.STUCK in kcfg._nodes[node_id].attrs] + 81 dct['vacuous'] = vacuous_nodes + 82 dct['stuck'] = stuck_nodes + 83 for node_id in deleted_nodes: + 84 self.kcfg_node_path(node_id).unlink(missing_ok=True) + 85 for node_id in created_nodes: + 86 self.kcfg_node_path(node_id).write_text(json.dumps(kcfg._nodes[node_id].to_dict())) + 87 self.kcfg_json_path.write_text(json.dumps(dct))
+ + 88 +
+[docs] + 89 def read_cfg_data(self) -> dict[str, Any]: + 90 dct = json.loads(self.kcfg_json_path.read_text()) + 91 nodes = [self.read_node_data(node_id) for node_id in dct.get('nodes') or []] + 92 dct['nodes'] = nodes + 93 + 94 new_nodes = [] + 95 for node in dct['nodes']: + 96 attrs = [] + 97 if node['id'] in dct['vacuous']: + 98 attrs.append(KCFGNodeAttr.VACUOUS.value) + 99 if node['id'] in dct['stuck']: + 100 attrs.append(KCFGNodeAttr.STUCK.value) + 101 new_nodes.append({'id': node['id'], 'cterm': node['cterm'], 'attrs': attrs}) + 102 + 103 dct['nodes'] = new_nodes + 104 + 105 del dct['vacuous'] + 106 del dct['stuck'] + 107 + 108 return dct
+ + 109 +
+[docs] + 110 def read_node_data(self, node_id: int) -> dict[str, Any]: + 111 return json.loads(self.kcfg_node_path(node_id).read_text())
+
+ + 112 + 113 +
+[docs] + 114class KCFG(Container[Union['KCFG.Node', 'KCFG.Successor']]): +
+[docs] + 115 @final + 116 @dataclass(frozen=True, order=True) + 117 class Node: + 118 id: int + 119 cterm: CTerm + 120 attrs: frozenset[NodeAttr] + 121 + 122 def __init__(self, id: int, cterm: CTerm, attrs: Iterable[NodeAttr] = ()) -> None: + 123 object.__setattr__(self, 'id', id) + 124 object.__setattr__(self, 'cterm', cterm) + 125 object.__setattr__(self, 'attrs', frozenset(attrs)) + 126 +
+[docs] + 127 def to_dict(self) -> dict[str, Any]: + 128 return {'id': self.id, 'cterm': self.cterm.to_dict(), 'attrs': [attr.value for attr in self.attrs]}
+ + 129 +
+[docs] + 130 @staticmethod + 131 def from_dict(dct: dict[str, Any]) -> KCFG.Node: + 132 return KCFG.Node(dct['id'], CTerm.from_dict(dct['cterm']), [NodeAttr(attr) for attr in dct['attrs']])
+ + 133 +
+[docs] + 134 def add_attr(self, attr: NodeAttr) -> KCFG.Node: + 135 return KCFG.Node(self.id, self.cterm, list(self.attrs) + [attr])
+ + 136 +
+[docs] + 137 def remove_attr(self, attr: NodeAttr) -> KCFG.Node: + 138 if attr not in self.attrs: + 139 raise ValueError(f'Node {self.id} does not have attribute {attr.value}') + 140 return KCFG.Node(self.id, self.cterm, self.attrs.difference([attr]))
+ + 141 +
+[docs] + 142 def discard_attr(self, attr: NodeAttr) -> KCFG.Node: + 143 return KCFG.Node(self.id, self.cterm, self.attrs.difference([attr]))
+ + 144 +
+[docs] + 145 def let(self, cterm: CTerm | None = None, attrs: Iterable[KCFGNodeAttr] | None = None) -> KCFG.Node: + 146 new_cterm = cterm if cterm is not None else self.cterm + 147 new_attrs = attrs if attrs is not None else self.attrs + 148 return KCFG.Node(self.id, new_cterm, new_attrs)
+ + 149 + 150 @property + 151 def free_vars(self) -> frozenset[str]: + 152 return frozenset(self.cterm.free_vars)
+ + 153 +
+[docs] + 154 class Successor(ABC): + 155 source: KCFG.Node + 156 + 157 def __lt__(self, other: Any) -> bool: + 158 if not isinstance(other, KCFG.Successor): + 159 return NotImplemented + 160 return self.source < other.source + 161 + 162 @property + 163 def source_vars(self) -> frozenset[str]: + 164 return frozenset(self.source.free_vars) + 165 + 166 @property + 167 @abstractmethod + 168 def targets(self) -> tuple[KCFG.Node, ...]: ... + 169 + 170 @property + 171 def target_ids(self) -> list[int]: + 172 return sorted(target.id for target in self.targets) + 173 + 174 @property + 175 def target_vars(self) -> frozenset[str]: + 176 return frozenset(set.union(set(), *(target.free_vars for target in self.targets))) + 177 +
+[docs] + 178 @abstractmethod + 179 def replace_source(self, node: KCFG.Node) -> KCFG.Successor: ...
+ + 180 +
+[docs] + 181 @abstractmethod + 182 def replace_target(self, node: KCFG.Node) -> KCFG.Successor: ...
+ + 183 +
+[docs] + 184 @abstractmethod + 185 def to_dict(self) -> dict[str, Any]: ...
+ + 186 +
+[docs] + 187 @staticmethod + 188 @abstractmethod + 189 def from_dict(dct: dict[str, Any], nodes: Mapping[int, KCFG.Node]) -> KCFG.Successor: ...
+
+ + 190 +
+[docs] + 191 class EdgeLike(Successor): + 192 source: KCFG.Node + 193 target: KCFG.Node + 194 + 195 @property + 196 def targets(self) -> tuple[KCFG.Node, ...]: + 197 return (self.target,)
+ + 198 +
+[docs] + 199 @final + 200 @dataclass(frozen=True) + 201 class Edge(EdgeLike): + 202 source: KCFG.Node + 203 target: KCFG.Node + 204 depth: int + 205 rules: tuple[str, ...] + 206 +
+[docs] + 207 def to_dict(self) -> dict[str, Any]: + 208 return { + 209 'source': self.source.id, + 210 'target': self.target.id, + 211 'depth': self.depth, + 212 'rules': list(self.rules), + 213 }
+ + 214 +
+[docs] + 215 @staticmethod + 216 def from_dict(dct: dict[str, Any], nodes: Mapping[int, KCFG.Node]) -> KCFG.Edge: + 217 return KCFG.Edge(nodes[dct['source']], nodes[dct['target']], dct['depth'], tuple(dct['rules']))
+ + 218 +
+[docs] + 219 def to_rule( + 220 self, + 221 label: str, + 222 claim: bool = False, + 223 priority: int | None = None, + 224 defunc_with: KDefinition | None = None, + 225 minimize: bool = False, + 226 ) -> KRuleLike: + 227 def is_ceil_condition(kast: KInner) -> bool: + 228 return type(kast) is KApply and kast.label.name == '#Ceil' + 229 + 230 def _simplify_config(config: KInner) -> KInner: + 231 return sort_ac_collections(inline_cell_maps(config)) + 232 + 233 sentence_id = f'{label}-{self.source.id}-TO-{self.target.id}' + 234 init_constraints = [c for c in self.source.cterm.constraints if not is_ceil_condition(c)] + 235 init_cterm = CTerm(_simplify_config(self.source.cterm.config), init_constraints) + 236 target_constraints = [c for c in self.target.cterm.constraints if not is_ceil_condition(c)] + 237 target_cterm = CTerm(_simplify_config(self.target.cterm.config), target_constraints) + 238 rule: KRuleLike + 239 if claim: + 240 rule, _ = cterm_build_claim(sentence_id, init_cterm, target_cterm) + 241 else: + 242 rule, _ = cterm_build_rule( + 243 sentence_id, init_cterm, target_cterm, priority=priority, defunc_with=defunc_with + 244 ) + 245 if minimize: + 246 rule = minimize_rule_like(rule) + 247 return rule
+ + 248 +
+[docs] + 249 def replace_source(self, node: KCFG.Node) -> KCFG.Edge: + 250 assert node.id == self.source.id + 251 return KCFG.Edge(node, self.target, self.depth, self.rules)
+ + 252 +
+[docs] + 253 def replace_target(self, node: KCFG.Node) -> KCFG.Edge: + 254 assert node.id == self.target.id + 255 return KCFG.Edge(self.source, node, self.depth, self.rules)
+
+ + 256 +
+[docs] + 257 @final + 258 @dataclass(frozen=True) + 259 class MergedEdge(EdgeLike): + 260 """Merged edge is a collection of edges that have been merged into a single edge.""" + 261 + 262 source: KCFG.Node + 263 target: KCFG.Node + 264 edges: tuple[KCFG.Edge, ...] + 265 +
+[docs] + 266 def to_dict(self) -> dict[str, Any]: + 267 return { + 268 'source': self.source.id, + 269 'target': self.target.id, + 270 'edges': [edge.to_dict() for edge in self.edges], + 271 }
+ + 272 +
+[docs] + 273 @staticmethod + 274 def from_dict(dct: dict[str, Any], nodes: Mapping[int, KCFG.Node]) -> KCFG.Successor: + 275 return KCFG.MergedEdge( + 276 nodes[dct['source']], + 277 nodes[dct['target']], + 278 tuple(KCFG.Edge.from_dict(edge, nodes) for edge in dct['edges']), + 279 )
+ + 280 +
+[docs] + 281 def replace_source(self, node: KCFG.Node) -> KCFG.Successor: + 282 assert node.id == self.source.id + 283 return KCFG.MergedEdge(node, self.target, self.edges)
+ + 284 +
+[docs] + 285 def replace_target(self, node: KCFG.Node) -> KCFG.Successor: + 286 assert node.id == self.target.id + 287 return KCFG.MergedEdge(self.source, node, self.edges)
+ + 288 +
+[docs] + 289 def to_rule(self, label: str, claim: bool = False, priority: int | None = None) -> KRuleLike: + 290 return KCFG.Edge(self.source, self.target, 1, ()).to_rule(label, claim, priority)
+
+ + 291 +
+[docs] + 292 @final + 293 @dataclass(frozen=True) + 294 class Cover(EdgeLike): + 295 source: KCFG.Node + 296 target: KCFG.Node + 297 csubst: CSubst + 298 +
+[docs] + 299 def to_dict(self) -> dict[str, Any]: + 300 return { + 301 'source': self.source.id, + 302 'target': self.target.id, + 303 'csubst': self.csubst.to_dict(), + 304 }
+ + 305 +
+[docs] + 306 @staticmethod + 307 def from_dict(dct: dict[str, Any], nodes: Mapping[int, KCFG.Node]) -> KCFG.Cover: + 308 return KCFG.Cover(nodes[dct['source']], nodes[dct['target']], CSubst.from_dict(dct['csubst']))
+ + 309 +
+[docs] + 310 def replace_source(self, node: KCFG.Node) -> KCFG.Cover: + 311 assert node.id == self.source.id + 312 return KCFG.Cover(node, self.target, self.csubst)
+ + 313 +
+[docs] + 314 def replace_target(self, node: KCFG.Node) -> KCFG.Cover: + 315 assert node.id == self.target.id + 316 return KCFG.Cover(self.source, node, self.csubst)
+
+ + 317 +
+[docs] + 318 @dataclass(frozen=True) + 319 class MultiEdge(Successor): + 320 source: KCFG.Node + 321 + 322 def __lt__(self, other: Any) -> bool: + 323 if not type(other) is type(self): + 324 return NotImplemented + 325 return (self.source, self.target_ids) < (other.source, other.target_ids) + 326 +
+[docs] + 327 @abstractmethod + 328 def with_single_target(self, target: KCFG.Node) -> KCFG.MultiEdge: ...
+
+ + 329 +
+[docs] + 330 @final + 331 @dataclass(frozen=True) + 332 class Split(MultiEdge): + 333 source: KCFG.Node + 334 _targets: tuple[tuple[KCFG.Node, CSubst], ...] + 335 + 336 def __init__(self, source: KCFG.Node, _targets: Iterable[tuple[KCFG.Node, CSubst]]) -> None: + 337 object.__setattr__(self, 'source', source) + 338 object.__setattr__(self, '_targets', tuple(_targets)) + 339 + 340 @property + 341 def targets(self) -> tuple[KCFG.Node, ...]: + 342 return tuple(target for target, _ in self._targets) + 343 + 344 @property + 345 def splits(self) -> dict[int, CSubst]: + 346 return {target.id: csubst for target, csubst in self._targets} + 347 +
+[docs] + 348 def to_dict(self) -> dict[str, Any]: + 349 return { + 350 'source': self.source.id, + 351 'targets': [ + 352 { + 353 'target': target.id, + 354 'csubst': csubst.to_dict(), + 355 } + 356 for target, csubst in self._targets + 357 ], + 358 }
+ + 359 +
+[docs] + 360 @staticmethod + 361 def from_dict(dct: dict[str, Any], nodes: Mapping[int, KCFG.Node]) -> KCFG.Split: + 362 _targets = [(nodes[target['target']], CSubst.from_dict(target['csubst'])) for target in dct['targets']] + 363 return KCFG.Split(nodes[dct['source']], tuple(_targets))
+ + 364 +
+[docs] + 365 def with_single_target(self, target: KCFG.Node) -> KCFG.Split: + 366 return KCFG.Split(self.source, ((target, self.splits[target.id]),))
+ + 367 + 368 @property + 369 def covers(self) -> tuple[KCFG.Cover, ...]: + 370 return tuple(KCFG.Cover(target, self.source, csubst) for target, csubst in self._targets) + 371 +
+[docs] + 372 def replace_source(self, node: KCFG.Node) -> KCFG.Split: + 373 assert node.id == self.source.id + 374 return KCFG.Split(node, self._targets)
+ + 375 +
+[docs] + 376 def replace_target(self, node: KCFG.Node) -> KCFG.Split: + 377 assert node.id in self.target_ids + 378 new_targets = [ + 379 (node, csubst) if node.id == target_node.id else (target_node, csubst) + 380 for target_node, csubst in self._targets + 381 ] + 382 return KCFG.Split(self.source, tuple(new_targets))
+
+ + 383 +
+[docs] + 384 @final + 385 @dataclass(frozen=True) + 386 class NDBranch(MultiEdge): + 387 source: KCFG.Node + 388 _targets: tuple[KCFG.Node, ...] + 389 rules: tuple[str, ...] + 390 + 391 def __init__(self, source: KCFG.Node, _targets: Iterable[KCFG.Node], rules: tuple[str, ...]) -> None: + 392 object.__setattr__(self, 'source', source) + 393 object.__setattr__(self, '_targets', tuple(_targets)) + 394 object.__setattr__(self, 'rules', rules) + 395 + 396 @property + 397 def targets(self) -> tuple[KCFG.Node, ...]: + 398 return self._targets + 399 +
+[docs] + 400 def to_dict(self) -> dict[str, Any]: + 401 return { + 402 'source': self.source.id, + 403 'targets': [target.id for target in self.targets], + 404 'rules': list(self.rules), + 405 }
+ + 406 +
+[docs] + 407 @staticmethod + 408 def from_dict(dct: dict[str, Any], nodes: Mapping[int, KCFG.Node]) -> KCFG.NDBranch: + 409 return KCFG.NDBranch( + 410 nodes[dct['source']], tuple([nodes[target] for target in dct['targets']]), tuple(dct['rules']) + 411 )
+ + 412 +
+[docs] + 413 def with_single_target(self, target: KCFG.Node) -> KCFG.NDBranch: + 414 return KCFG.NDBranch(self.source, (target,), ())
+ + 415 + 416 @property + 417 def edges(self) -> tuple[KCFG.Edge, ...]: + 418 return tuple(KCFG.Edge(self.source, target, 1, ()) for target in self.targets) + 419 +
+[docs] + 420 def replace_source(self, node: KCFG.Node) -> KCFG.NDBranch: + 421 assert node.id == self.source.id + 422 return KCFG.NDBranch(node, self._targets, self.rules)
+ + 423 +
+[docs] + 424 def replace_target(self, node: KCFG.Node) -> KCFG.NDBranch: + 425 assert node.id in self.target_ids + 426 new_targets = [node if node.id == target_node.id else target_node for target_node in self._targets] + 427 return KCFG.NDBranch(self.source, tuple(new_targets), self.rules)
+
+ + 428 + 429 _node_id: int + 430 _nodes: MutableMapping[int, KCFG.Node] + 431 + 432 _created_nodes: set[int] + 433 _deleted_nodes: set[int] + 434 + 435 _edges: dict[int, Edge] + 436 _merged_edges: dict[int, MergedEdge] + 437 _covers: dict[int, Cover] + 438 _splits: dict[int, Split] + 439 _ndbranches: dict[int, NDBranch] + 440 _aliases: dict[str, int] + 441 _lock: RLock + 442 + 443 _kcfg_store: KCFGStore | None + 444 + 445 def __init__(self, cfg_dir: Path | None = None, optimize_memory: bool = True) -> None: + 446 self._node_id = 1 + 447 if optimize_memory: + 448 from .store import OptimizedNodeStore + 449 + 450 self._nodes = OptimizedNodeStore() + 451 else: + 452 self._nodes = {} + 453 self._created_nodes = set() + 454 self._deleted_nodes = set() + 455 self._edges = {} + 456 self._merged_edges = {} + 457 self._covers = {} + 458 self._splits = {} + 459 self._ndbranches = {} + 460 self._aliases = {} + 461 self._lock = RLock() + 462 if cfg_dir is not None: + 463 self._kcfg_store = KCFGStore(cfg_dir) + 464 + 465 def __contains__(self, item: object) -> bool: + 466 if type(item) is KCFG.Node: + 467 return self.contains_node(item) + 468 if type(item) is KCFG.Edge: + 469 return self.contains_edge(item) + 470 if type(item) is KCFG.MergedEdge: + 471 return self.contains_merged_edge(item) + 472 if type(item) is KCFG.Cover: + 473 return self.contains_cover(item) + 474 if type(item) is KCFG.Split: + 475 return self.contains_split(item) + 476 if type(item) is KCFG.NDBranch: + 477 return self.contains_ndbranch(item) + 478 return False + 479 + 480 def __enter__(self) -> KCFG: + 481 self._lock.acquire() + 482 return self + 483 + 484 def __exit__( + 485 self, + 486 exc_type: type[BaseException] | None, + 487 exc_value: BaseException | None, + 488 traceback: TracebackType | None, + 489 ) -> bool: + 490 self._lock.release() + 491 if exc_type is None: + 492 return True + 493 return False + 494 + 495 @property + 496 def nodes(self) -> list[KCFG.Node]: + 497 return list(self._nodes.values()) + 498 + 499 @property + 500 def root(self) -> list[KCFG.Node]: + 501 return [node for node in self.nodes if self.is_root(node.id)] + 502 + 503 @property + 504 def vacuous(self) -> list[KCFG.Node]: + 505 return [node for node in self.nodes if self.is_vacuous(node.id)] + 506 + 507 @property + 508 def stuck(self) -> list[KCFG.Node]: + 509 return [node for node in self.nodes if self.is_stuck(node.id)] + 510 + 511 @property + 512 def leaves(self) -> list[KCFG.Node]: + 513 return [node for node in self.nodes if self.is_leaf(node.id)] + 514 + 515 @property + 516 def covered(self) -> list[KCFG.Node]: + 517 return [node for node in self.nodes if self.is_covered(node.id)] + 518 + 519 @property + 520 def uncovered(self) -> list[KCFG.Node]: + 521 return [node for node in self.nodes if not self.is_covered(node.id)] + 522 +
+[docs] + 523 @staticmethod + 524 def from_claim( + 525 defn: KDefinition, claim: KClaim, cfg_dir: Path | None = None, optimize_memory: bool = True + 526 ) -> tuple[KCFG, NodeIdLike, NodeIdLike]: + 527 cfg = KCFG(cfg_dir=cfg_dir, optimize_memory=optimize_memory) + 528 claim_body = claim.body + 529 claim_body = defn.instantiate_cell_vars(claim_body) + 530 claim_body = rename_generated_vars(claim_body) + 531 + 532 claim_lhs = CTerm.from_kast(extract_lhs(claim_body)).add_constraint(bool_to_ml_pred(claim.requires)) + 533 init_node = cfg.create_node(claim_lhs) + 534 + 535 claim_rhs = CTerm.from_kast(extract_rhs(claim_body)).add_constraint( + 536 bool_to_ml_pred(andBool([claim.requires, claim.ensures])) + 537 ) + 538 target_node = cfg.create_node(claim_rhs) + 539 + 540 return cfg, init_node.id, target_node.id
+ + 541 +
+[docs] + 542 @staticmethod + 543 def path_length(_path: Iterable[KCFG.Successor]) -> int: + 544 _path = tuple(_path) + 545 if len(_path) == 0: + 546 return 0 + 547 if type(_path[0]) is KCFG.Split or type(_path[0]) is KCFG.Cover: + 548 return KCFG.path_length(_path[1:]) + 549 elif type(_path[0]) is KCFG.NDBranch: + 550 return 1 + KCFG.path_length(_path[1:]) + 551 elif type(_path[0]) is KCFG.Edge: + 552 return _path[0].depth + KCFG.path_length(_path[1:]) + 553 elif type(_path[0]) is KCFG.MergedEdge: + 554 return min(edge.depth for edge in _path[0].edges) + KCFG.path_length(_path[1:]) # todo: check this + 555 raise ValueError(f'Cannot handle Successor type: {type(_path[0])}')
+ + 556 +
+[docs] + 557 def extend( + 558 self, + 559 extend_result: KCFGExtendResult, + 560 node: KCFG.Node, + 561 logs: dict[int, tuple[LogEntry, ...]], + 562 *, + 563 optimize_kcfg: bool, + 564 ) -> None: + 565 + 566 def log(message: str, *, warning: bool = False) -> None: + 567 result_info = extend_result.info if type(extend_result) is Step or type(extend_result) is Branch else '' + 568 result_info_message = f': {result_info}' if result_info else '' + 569 _LOGGER.log( + 570 logging.WARNING if warning else logging.INFO, + 571 f'Extending current KCFG with the following: {message}{result_info_message}', + 572 ) + 573 + 574 match extend_result: + 575 case Vacuous(): + 576 self.add_vacuous(node.id) + 577 log(f'vacuous node: {node.id}', warning=True) + 578 + 579 case Stuck(): + 580 self.add_stuck(node.id) + 581 log(f'stuck node: {node.id}') + 582 + 583 case Abstract(cterm): + 584 new_node = self.create_node(cterm) + 585 self.create_cover(node.id, new_node.id) + 586 log(f'abstraction node: {node.id}') + 587 + 588 case Step(cterm, depth, next_node_logs, rule_labels, _): + 589 node_id = node.id + 590 next_node = self.create_node(cterm) + 591 # Optimization for steps consists of on-the-fly merging of consecutive edges and can + 592 # be performed only if the current node has a single predecessor connected by an Edge + 593 if ( + 594 optimize_kcfg + 595 and (len(predecessors := self.predecessors(target_id=node.id)) == 1) + 596 and isinstance(in_edge := predecessors[0], KCFG.Edge) + 597 ): + 598 # The existing edge is removed and the step parameters are updated accordingly + 599 self.remove_edge(in_edge.source.id, node.id) + 600 node_id = in_edge.source.id + 601 depth += in_edge.depth + 602 rule_labels = list(in_edge.rules) + rule_labels + 603 next_node_logs = logs[node.id] + next_node_logs if node.id in logs else next_node_logs + 604 self.remove_node(node.id) + 605 self.create_edge(node_id, next_node.id, depth, rule_labels) + 606 logs[next_node.id] = next_node_logs + 607 log(f'basic block at depth {depth}: {node_id} --> {next_node.id}') + 608 + 609 case Branch(branches, _): + 610 branch_node_ids = self.split_on_constraints(node.id, branches) + 611 log(f'{len(branches)} branches: {node.id} --> {branch_node_ids}') + 612 + 613 case NDBranch(cterms, next_node_logs, rule_labels): + 614 next_ids = [self.create_node(cterm).id for cterm in cterms] + 615 for i in next_ids: + 616 logs[i] = next_node_logs + 617 self.create_ndbranch(node.id, next_ids, rules=rule_labels) + 618 log(f'{len(cterms)} non-deterministic branches: {node.id} --> {next_ids}') + 619 + 620 case _: + 621 raise AssertionError()
+ + 622 +
+[docs] + 623 def to_dict_no_nodes(self) -> dict[str, Any]: + 624 nodes = list(self._nodes.keys()) + 625 edges = [edge.to_dict() for edge in self.edges()] + 626 merged_edges = [merged_edge.to_dict() for merged_edge in self.merged_edges()] + 627 covers = [cover.to_dict() for cover in self.covers()] + 628 splits = [split.to_dict() for split in self.splits()] + 629 ndbranches = [ndbranch.to_dict() for ndbranch in self.ndbranches()] + 630 + 631 aliases = dict(sorted(self._aliases.items())) + 632 + 633 res = { + 634 'next': self._node_id, + 635 'nodes': nodes, + 636 'edges': edges, + 637 'merged_edges': merged_edges, + 638 'covers': covers, + 639 'splits': splits, + 640 'ndbranches': ndbranches, + 641 'aliases': aliases, + 642 } + 643 return {k: v for k, v in res.items() if v}
+ + 644 +
+[docs] + 645 def to_dict(self) -> dict[str, Any]: + 646 nodes = [node.to_dict() for node in self.nodes] + 647 edges = [edge.to_dict() for edge in self.edges()] + 648 merged_edges = [merged_edge.to_dict() for merged_edge in self.merged_edges()] + 649 covers = [cover.to_dict() for cover in self.covers()] + 650 splits = [split.to_dict() for split in self.splits()] + 651 ndbranches = [ndbranch.to_dict() for ndbranch in self.ndbranches()] + 652 + 653 aliases = dict(sorted(self._aliases.items())) + 654 + 655 res = { + 656 'next': self._node_id, + 657 'nodes': nodes, + 658 'edges': edges, + 659 'merged_edges': merged_edges, + 660 'covers': covers, + 661 'splits': splits, + 662 'ndbranches': ndbranches, + 663 'aliases': aliases, + 664 } + 665 return {k: v for k, v in res.items() if v}
+ + 666 +
+[docs] + 667 @staticmethod + 668 def from_dict(dct: Mapping[str, Any], optimize_memory: bool = True) -> KCFG: + 669 cfg = KCFG(optimize_memory=optimize_memory) + 670 + 671 for node_dict in dct.get('nodes') or []: + 672 node = KCFG.Node.from_dict(node_dict) + 673 cfg.add_node(node) + 674 + 675 max_id = max([node.id for node in cfg.nodes], default=0) + 676 cfg._node_id = dct.get('next', max_id + 1) + 677 + 678 for edge_dict in dct.get('edges') or []: + 679 edge = KCFG.Edge.from_dict(edge_dict, cfg._nodes) + 680 cfg.add_successor(edge) + 681 + 682 for edge_dict in dct.get('merged_edges') or []: + 683 merged_edge = KCFG.MergedEdge.from_dict(edge_dict, cfg._nodes) + 684 cfg.add_successor(merged_edge) + 685 + 686 for cover_dict in dct.get('covers') or []: + 687 cover = KCFG.Cover.from_dict(cover_dict, cfg._nodes) + 688 cfg.add_successor(cover) + 689 + 690 for split_dict in dct.get('splits') or []: + 691 split = KCFG.Split.from_dict(split_dict, cfg._nodes) + 692 cfg.add_successor(split) + 693 + 694 for ndbranch_dict in dct.get('ndbranches') or []: + 695 ndbranch = KCFG.NDBranch.from_dict(ndbranch_dict, cfg._nodes) + 696 cfg.add_successor(ndbranch) + 697 + 698 for alias, node_id in dct.get('aliases', {}).items(): + 699 cfg.add_alias(alias=alias, node_id=node_id) + 700 + 701 return cfg
+ + 702 +
+[docs] + 703 def aliases(self, node_id: NodeIdLike) -> list[str]: + 704 node_id = self._resolve(node_id) + 705 return [alias for alias, value in self._aliases.items() if node_id == value]
+ + 706 +
+[docs] + 707 def to_json(self) -> str: + 708 return json.dumps(self.to_dict(), sort_keys=True)
+ + 709 +
+[docs] + 710 @staticmethod + 711 def from_json(s: str, optimize_memory: bool = True) -> KCFG: + 712 return KCFG.from_dict(json.loads(s), optimize_memory=optimize_memory)
+ + 713 +
+[docs] + 714 def to_rules(self, _id: str | None = None, priority: int = 20) -> list[KRuleLike]: + 715 _id = 'BASIC-BLOCK' if _id is None else _id + 716 return [e.to_rule(_id, priority=priority) for e in self.edges()] + [ + 717 m.to_rule(_id, priority=priority) for m in self.merged_edges() + 718 ]
+ + 719 +
+[docs] + 720 def to_module( + 721 self, + 722 module_name: str | None = None, + 723 imports: Iterable[KImport] = (), + 724 priority: int = 20, + 725 att: KAtt = EMPTY_ATT, + 726 ) -> KFlatModule: + 727 module_name = 'KCFG' if module_name is None else module_name + 728 return KFlatModule(module_name, self.to_rules(priority=priority), imports=imports, att=att)
+ + 729 + 730 def _resolve_or_none(self, id_like: NodeIdLike) -> int | None: + 731 if type(id_like) is int: + 732 if id_like in self._nodes: + 733 return id_like + 734 + 735 return None + 736 + 737 if type(id_like) is not str: + 738 raise TypeError(f'Expected int or str for id_like, got: {id_like}') + 739 + 740 if id_like.startswith('@'): + 741 if id_like[1:] in self._aliases: + 742 return self._aliases[id_like[1:]] + 743 raise ValueError(f'Unknown alias: {id_like}') + 744 + 745 return None + 746 + 747 def _resolve(self, id_like: NodeIdLike) -> int: + 748 match = self._resolve_or_none(id_like) + 749 if not match: + 750 raise ValueError(f'Unknown node: {id_like}') + 751 return match + 752 +
+[docs] + 753 def node(self, node_id: NodeIdLike) -> KCFG.Node: + 754 node_id = self._resolve(node_id) + 755 return self._nodes[node_id]
+ + 756 +
+[docs] + 757 def get_node(self, node_id: NodeIdLike) -> KCFG.Node | None: + 758 resolved_id = self._resolve_or_none(node_id) + 759 if resolved_id is None: + 760 return None + 761 return self._nodes[resolved_id]
+ + 762 +
+[docs] + 763 def contains_node(self, node: KCFG.Node) -> bool: + 764 return bool(self.get_node(node.id))
+ + 765 +
+[docs] + 766 def add_node(self, node: KCFG.Node) -> None: + 767 if node.id in self._nodes: + 768 raise ValueError(f'Node with id already exists: {node.id}') + 769 self._nodes[node.id] = node + 770 self._created_nodes.add(node.id)
+ + 771 +
+[docs] + 772 def create_node(self, cterm: CTerm) -> KCFG.Node: + 773 node = KCFG.Node(self._node_id, cterm) + 774 self._node_id += 1 + 775 self._nodes[node.id] = node + 776 self._created_nodes.add(node.id) + 777 return node
+ + 778 +
+[docs] + 779 def remove_node(self, node_id: NodeIdLike) -> None: + 780 self.remove_edges_around(node_id) + 781 + 782 node_id = self._resolve(node_id) + 783 self._nodes.pop(node_id) + 784 self._deleted_nodes.add(node_id) + 785 self._created_nodes.discard(node_id)
+ + 786 +
+[docs] + 787 def remove_edges_around(self, node_id: NodeIdLike) -> None: + 788 node_id = self._resolve(node_id) + 789 + 790 self._edges = {k: s for k, s in self._edges.items() if k != node_id and node_id not in s.target_ids} + 791 self._merged_edges = { + 792 k: s for k, s in self._merged_edges.items() if k != node_id and node_id not in s.target_ids + 793 } + 794 self._covers = {k: s for k, s in self._covers.items() if k != node_id and node_id not in s.target_ids} + 795 + 796 self._splits = {k: s for k, s in self._splits.items() if k != node_id and node_id not in s.target_ids} + 797 self._ndbranches = {k: b for k, b in self._ndbranches.items() if k != node_id and node_id not in b.target_ids} + 798 + 799 for alias in [alias for alias, id in self._aliases.items() if id == node_id]: + 800 self.remove_alias(alias)
+ + 801 + 802 def _update_refs(self, node_id: int) -> None: + 803 node = self.node(node_id) + 804 for succ in self.successors(node_id): + 805 new_succ = succ.replace_source(node) + 806 if type(new_succ) is KCFG.Edge: + 807 self._edges[new_succ.source.id] = new_succ + 808 if type(new_succ) is KCFG.MergedEdge: + 809 self._merged_edges[new_succ.source.id] = new_succ + 810 if type(new_succ) is KCFG.Cover: + 811 self._covers[new_succ.source.id] = new_succ + 812 if type(new_succ) is KCFG.Split: + 813 self._splits[new_succ.source.id] = new_succ + 814 if type(new_succ) is KCFG.NDBranch: + 815 self._ndbranches[new_succ.source.id] = new_succ + 816 + 817 for pred in self.predecessors(node_id): + 818 new_pred = pred.replace_target(node) + 819 if type(new_pred) is KCFG.Edge: + 820 self._edges[new_pred.source.id] = new_pred + 821 if type(new_pred) is KCFG.MergedEdge: + 822 self._merged_edges[new_pred.source.id] = new_pred + 823 if type(new_pred) is KCFG.Cover: + 824 self._covers[new_pred.source.id] = new_pred + 825 if type(new_pred) is KCFG.Split: + 826 self._splits[new_pred.source.id] = new_pred + 827 if type(new_pred) is KCFG.NDBranch: + 828 self._ndbranches[new_pred.source.id] = new_pred + 829 +
+[docs] + 830 def remove_attr(self, node_id: NodeIdLike, attr: NodeAttr) -> None: + 831 node = self.node(node_id) + 832 new_node = node.remove_attr(attr) + 833 self.replace_node(new_node)
+ + 834 +
+[docs] + 835 def discard_attr(self, node_id: NodeIdLike, attr: NodeAttr) -> None: + 836 node = self.node(node_id) + 837 new_node = node.discard_attr(attr) + 838 self.replace_node(new_node)
+ + 839 +
+[docs] + 840 def add_attr(self, node_id: NodeIdLike, attr: NodeAttr) -> None: + 841 node = self.node(node_id) + 842 new_node = node.add_attr(attr) + 843 self.replace_node(new_node)
+ + 844 +
+[docs] + 845 def let_node( + 846 self, node_id: NodeIdLike, cterm: CTerm | None = None, attrs: Iterable[KCFGNodeAttr] | None = None + 847 ) -> None: + 848 node = self.node(node_id) + 849 new_node = node.let(cterm=cterm, attrs=attrs) + 850 self.replace_node(new_node)
+ + 851 +
+[docs] + 852 def replace_node(self, node: KCFG.Node) -> None: + 853 self._nodes[node.id] = node + 854 self._created_nodes.add(node.id) + 855 self._update_refs(node.id)
+ + 856 +
+[docs] + 857 def successors(self, source_id: NodeIdLike) -> list[Successor]: + 858 out_edges: Iterable[KCFG.Successor] = self.edges(source_id=source_id) + 859 out_merged_edges: Iterable[KCFG.Successor] = self.merged_edges(source_id=source_id) + 860 out_covers: Iterable[KCFG.Successor] = self.covers(source_id=source_id) + 861 out_splits: Iterable[KCFG.Successor] = self.splits(source_id=source_id) + 862 out_ndbranches: Iterable[KCFG.Successor] = self.ndbranches(source_id=source_id) + 863 return list(out_edges) + list(out_merged_edges) + list(out_covers) + list(out_splits) + list(out_ndbranches)
+ + 864 +
+[docs] + 865 def predecessors(self, target_id: NodeIdLike) -> list[Successor]: + 866 in_edges: Iterable[KCFG.Successor] = self.edges(target_id=target_id) + 867 in_merged_edges: Iterable[KCFG.Successor] = self.merged_edges(target_id=target_id) + 868 in_covers: Iterable[KCFG.Successor] = self.covers(target_id=target_id) + 869 in_splits: Iterable[KCFG.Successor] = self.splits(target_id=target_id) + 870 in_ndbranches: Iterable[KCFG.Successor] = self.ndbranches(target_id=target_id) + 871 return list(in_edges) + list(in_merged_edges) + list(in_covers) + list(in_splits) + list(in_ndbranches)
+ + 872 + 873 def _check_no_successors(self, source_id: NodeIdLike) -> None: + 874 if len(self.successors(source_id)) > 0: + 875 raise ValueError(f'Node already has successors: {source_id} -> {self.successors(source_id)}') + 876 + 877 def _check_no_zero_loops(self, source_id: NodeIdLike, target_ids: Iterable[NodeIdLike]) -> None: + 878 for target_id in target_ids: + 879 path = self.shortest_path_between(target_id, source_id) + 880 if path is not None and KCFG.path_length(path) == 0: + 881 raise ValueError( + 882 f'Adding successor would create zero-length loop with backedge: {source_id} -> {target_id}' + 883 ) + 884 +
+[docs] + 885 def add_successor(self, succ: KCFG.Successor) -> None: + 886 self._check_no_successors(succ.source.id) + 887 self._check_no_zero_loops(succ.source.id, succ.target_ids) + 888 if type(succ) is KCFG.Edge: + 889 self._edges[succ.source.id] = succ + 890 elif type(succ) is KCFG.MergedEdge: + 891 self._merged_edges[succ.source.id] = succ + 892 elif type(succ) is KCFG.Cover: + 893 self._covers[succ.source.id] = succ + 894 else: + 895 if len(succ.target_ids) <= 1: + 896 raise ValueError( + 897 f'Cannot create {type(succ)} node with less than 2 targets: {succ.source.id} -> {succ.target_ids}' + 898 ) + 899 if type(succ) is KCFG.Split: + 900 self._splits[succ.source.id] = succ + 901 elif type(succ) is KCFG.NDBranch: + 902 self._ndbranches[succ.source.id] = succ
+ + 903 +
+[docs] + 904 def edge(self, source_id: NodeIdLike, target_id: NodeIdLike) -> Edge | None: + 905 source_id = self._resolve(source_id) + 906 target_id = self._resolve(target_id) + 907 edge = self._edges.get(source_id, None) + 908 return edge if edge is not None and edge.target.id == target_id else None
+ + 909 +
+[docs] + 910 def edges(self, *, source_id: NodeIdLike | None = None, target_id: NodeIdLike | None = None) -> list[Edge]: + 911 source_id = self._resolve(source_id) if source_id is not None else None + 912 target_id = self._resolve(target_id) if target_id is not None else None + 913 return [ + 914 edge + 915 for edge in self._edges.values() + 916 if (source_id is None or source_id == edge.source.id) and (target_id is None or target_id == edge.target.id) + 917 ]
+ + 918 +
+[docs] + 919 def contains_edge(self, edge: Edge) -> bool: + 920 if other := self.edge(source_id=edge.source.id, target_id=edge.target.id): + 921 return edge == other + 922 return False
+ + 923 +
+[docs] + 924 def create_edge(self, source_id: NodeIdLike, target_id: NodeIdLike, depth: int, rules: Iterable[str] = ()) -> Edge: + 925 if depth <= 0: + 926 raise ValueError(f'Cannot build KCFG Edge with non-positive depth: {depth}') + 927 source = self.node(source_id) + 928 target = self.node(target_id) + 929 edge = KCFG.Edge(source, target, depth, tuple(rules)) + 930 self.add_successor(edge) + 931 return edge
+ + 932 +
+[docs] + 933 def remove_edge(self, source_id: NodeIdLike, target_id: NodeIdLike) -> None: + 934 source_id = self._resolve(source_id) + 935 target_id = self._resolve(target_id) + 936 edge = self.edge(source_id, target_id) + 937 if not edge: + 938 raise ValueError(f'Edge does not exist: {source_id} -> {target_id}') + 939 self._edges.pop(source_id)
+ + 940 +
+[docs] + 941 def merged_edge(self, source_id: NodeIdLike, target_id: NodeIdLike) -> MergedEdge | None: + 942 source_id = self._resolve(source_id) + 943 target_id = self._resolve(target_id) + 944 merged_edge = self._merged_edges.get(source_id, None) + 945 return merged_edge if merged_edge is not None and merged_edge.target.id == target_id else None
+ + 946 +
+[docs] + 947 def merged_edges( + 948 self, *, source_id: NodeIdLike | None = None, target_id: NodeIdLike | None = None + 949 ) -> list[MergedEdge]: + 950 source_id = self._resolve(source_id) if source_id is not None else None + 951 target_id = self._resolve(target_id) if target_id is not None else None + 952 return [ + 953 merged_edge + 954 for merged_edge in self._merged_edges.values() + 955 if (source_id is None or source_id == merged_edge.source.id) + 956 and (target_id is None or target_id == merged_edge.target.id) + 957 ]
+ + 958 +
+[docs] + 959 def contains_merged_edge(self, edge: MergedEdge) -> bool: + 960 if other := self.merged_edge(source_id=edge.source.id, target_id=edge.target.id): + 961 return edge == other + 962 return False
+ + 963 +
+[docs] + 964 def create_merged_edge( + 965 self, source_id: NodeIdLike, target_id: NodeIdLike, edges: Iterable[Edge | MergedEdge] + 966 ) -> MergedEdge: + 967 if len(list(edges)) == 0: + 968 raise ValueError(f'Cannot build KCFG MergedEdge with no edges: {edges}') + 969 source = self.node(source_id) + 970 target = self.node(target_id) + 971 flatten_edges: list[KCFG.Edge] = [] + 972 for edge in edges: + 973 if isinstance(edge, KCFG.MergedEdge): + 974 flatten_edges.extend(edge.edges) + 975 else: + 976 flatten_edges.append(edge) + 977 merged_edge = KCFG.MergedEdge(source, target, tuple(flatten_edges)) + 978 self.add_successor(merged_edge) + 979 return merged_edge
+ + 980 +
+[docs] + 981 def remove_merged_edge(self, source_id: NodeIdLike, target_id: NodeIdLike) -> None: + 982 source_id = self._resolve(source_id) + 983 target_id = self._resolve(target_id) + 984 merged_edge = self.merged_edge(source_id, target_id) + 985 if not merged_edge: + 986 raise ValueError(f'MergedEdge does not exist: {source_id} -> {target_id}') + 987 self._merged_edges.pop(source_id)
+ + 988 +
+[docs] + 989 def general_edges( + 990 self, *, source_id: NodeIdLike | None = None, target_id: NodeIdLike | None = None + 991 ) -> list[Edge | MergedEdge]: + 992 return self.edges(source_id=source_id, target_id=target_id) + self.merged_edges( + 993 source_id=source_id, target_id=target_id + 994 )
+ + 995 +
+[docs] + 996 def cover(self, source_id: NodeIdLike, target_id: NodeIdLike) -> Cover | None: + 997 source_id = self._resolve(source_id) + 998 target_id = self._resolve(target_id) + 999 cover = self._covers.get(source_id, None) +1000 return cover if cover is not None and cover.target.id == target_id else None
+ +1001 +
+[docs] +1002 def covers(self, *, source_id: NodeIdLike | None = None, target_id: NodeIdLike | None = None) -> list[Cover]: +1003 source_id = self._resolve(source_id) if source_id is not None else None +1004 target_id = self._resolve(target_id) if target_id is not None else None +1005 return [ +1006 cover +1007 for cover in self._covers.values() +1008 if (source_id is None or source_id == cover.source.id) +1009 and (target_id is None or target_id == cover.target.id) +1010 ]
+ +1011 +
+[docs] +1012 def contains_cover(self, cover: Cover) -> bool: +1013 if other := self.cover(source_id=cover.source.id, target_id=cover.target.id): +1014 return cover == other +1015 return False
+ +1016 +
+[docs] +1017 def create_cover(self, source_id: NodeIdLike, target_id: NodeIdLike, csubst: CSubst | None = None) -> Cover: +1018 source = self.node(source_id) +1019 target = self.node(target_id) +1020 if csubst is None: +1021 csubst = target.cterm.match_with_constraint(source.cterm) +1022 if csubst is None: +1023 raise ValueError(f'No matching between: {source.id} and {target.id}') +1024 cover = KCFG.Cover(source, target, csubst=csubst) +1025 self.add_successor(cover) +1026 return cover
+ +1027 +
+[docs] +1028 def remove_cover(self, source_id: NodeIdLike, target_id: NodeIdLike) -> None: +1029 source_id = self._resolve(source_id) +1030 target_id = self._resolve(target_id) +1031 cover = self.cover(source_id, target_id) +1032 if not cover: +1033 raise ValueError(f'Cover does not exist: {source_id} -> {target_id}') +1034 self._covers.pop(source_id)
+ +1035 +
+[docs] +1036 def edge_likes(self, *, source_id: NodeIdLike | None = None, target_id: NodeIdLike | None = None) -> list[EdgeLike]: +1037 return ( +1038 cast('List[KCFG.EdgeLike]', self.edges(source_id=source_id, target_id=target_id)) +1039 + cast('List[KCFG.EdgeLike]', self.covers(source_id=source_id, target_id=target_id)) +1040 + cast('List[KCFG.EdgeLike]', self.merged_edges(source_id=source_id, target_id=target_id)) +1041 )
+ +1042 +
+[docs] +1043 def add_vacuous(self, node_id: NodeIdLike) -> None: +1044 self.add_attr(node_id, KCFGNodeAttr.VACUOUS)
+ +1045 +
+[docs] +1046 def remove_vacuous(self, node_id: NodeIdLike) -> None: +1047 self.remove_attr(node_id, KCFGNodeAttr.VACUOUS)
+ +1048 +
+[docs] +1049 def discard_vacuous(self, node_id: NodeIdLike) -> None: +1050 self.discard_attr(node_id, KCFGNodeAttr.VACUOUS)
+ +1051 +
+[docs] +1052 def add_stuck(self, node_id: NodeIdLike) -> None: +1053 self.add_attr(node_id, KCFGNodeAttr.STUCK)
+ +1054 +
+[docs] +1055 def remove_stuck(self, node_id: NodeIdLike) -> None: +1056 self.remove_attr(node_id, KCFGNodeAttr.STUCK)
+ +1057 +
+[docs] +1058 def discard_stuck(self, node_id: NodeIdLike) -> None: +1059 self.discard_attr(node_id, KCFGNodeAttr.STUCK)
+ +1060 +
+[docs] +1061 def splits(self, *, source_id: NodeIdLike | None = None, target_id: NodeIdLike | None = None) -> list[Split]: +1062 source_id = self._resolve(source_id) if source_id is not None else None +1063 target_id = self._resolve(target_id) if target_id is not None else None +1064 return [ +1065 s +1066 for s in self._splits.values() +1067 if (source_id is None or source_id == s.source.id) and (target_id is None or target_id in s.target_ids) +1068 ]
+ +1069 +
+[docs] +1070 def contains_split(self, split: Split) -> bool: +1071 return split in self._splits.values()
+ +1072 +
+[docs] +1073 def create_split(self, source_id: NodeIdLike, splits: Iterable[tuple[NodeIdLike, CSubst]]) -> KCFG.Split: +1074 source_id = self._resolve(source_id) +1075 split = KCFG.Split(self.node(source_id), tuple((self.node(nid), csubst) for nid, csubst in list(splits))) +1076 self.add_successor(split) +1077 return split
+ +1078 +
+[docs] +1079 def create_split_by_nodes(self, source_id: NodeIdLike, target_ids: Iterable[NodeIdLike]) -> KCFG.Split | None: +1080 """Create a split without crafting a CSubst.""" +1081 source = self.node(source_id) +1082 targets = [self.node(nid) for nid in target_ids] +1083 try: +1084 csubsts = [not_none(source.cterm.match_with_constraint(target.cterm)) for target in targets] +1085 except ValueError: +1086 return None +1087 return self.create_split(source.id, zip(target_ids, csubsts, strict=True))
+ +1088 +
+[docs] +1089 def ndbranches(self, *, source_id: NodeIdLike | None = None, target_id: NodeIdLike | None = None) -> list[NDBranch]: +1090 source_id = self._resolve(source_id) if source_id is not None else None +1091 target_id = self._resolve(target_id) if target_id is not None else None +1092 return [ +1093 b +1094 for b in self._ndbranches.values() +1095 if (source_id is None or source_id == b.source.id) and (target_id is None or target_id in b.target_ids) +1096 ]
+ +1097 +
+[docs] +1098 def contains_ndbranch(self, ndbranch: NDBranch) -> bool: +1099 return ndbranch in self._ndbranches
+ +1100 +
+[docs] +1101 def create_ndbranch( +1102 self, source_id: NodeIdLike, ndbranches: Iterable[NodeIdLike], rules: Iterable[str] = () +1103 ) -> KCFG.NDBranch: +1104 source_id = self._resolve(source_id) +1105 ndbranch = KCFG.NDBranch(self.node(source_id), tuple(self.node(nid) for nid in list(ndbranches)), tuple(rules)) +1106 self.add_successor(ndbranch) +1107 return ndbranch
+ +1108 +
+[docs] +1109 def split_on_constraints(self, source_id: NodeIdLike, constraints: Iterable[KInner]) -> list[int]: +1110 source = self.node(source_id) +1111 branch_node_ids = [self.create_node(source.cterm.add_constraint(c)).id for c in constraints] +1112 csubsts = [not_none(source.cterm.match_with_constraint(self.node(id).cterm)) for id in branch_node_ids] +1113 self.create_split(source.id, zip(branch_node_ids, csubsts, strict=True)) +1114 return branch_node_ids
+ +1115 +
+[docs] +1116 def add_alias(self, alias: str, node_id: NodeIdLike) -> None: +1117 if '@' in alias: +1118 raise ValueError('Alias may not contain "@"') +1119 if alias in self._aliases: +1120 raise ValueError(f'Duplicate alias: {alias}') +1121 node_id = self._resolve(node_id) +1122 self._aliases[alias] = node_id
+ +1123 +
+[docs] +1124 def remove_alias(self, alias: str) -> None: +1125 if alias not in self._aliases: +1126 raise ValueError(f'Alias does not exist: {alias}') +1127 self._aliases.pop(alias)
+ +1128 +
+[docs] +1129 def is_root(self, node_id: NodeIdLike) -> bool: +1130 node_id = self._resolve(node_id) +1131 return len(self.predecessors(node_id)) == 0
+ +1132 +
+[docs] +1133 def is_vacuous(self, node_id: NodeIdLike) -> bool: +1134 return KCFGNodeAttr.VACUOUS in self.node(node_id).attrs
+ +1135 +
+[docs] +1136 def is_stuck(self, node_id: NodeIdLike) -> bool: +1137 return KCFGNodeAttr.STUCK in self.node(node_id).attrs
+ +1138 +
+[docs] +1139 def is_split(self, node_id: NodeIdLike) -> bool: +1140 node_id = self._resolve(node_id) +1141 return node_id in self._splits
+ +1142 +
+[docs] +1143 def is_ndbranch(self, node_id: NodeIdLike) -> bool: +1144 node_id = self._resolve(node_id) +1145 return node_id in self._ndbranches
+ +1146 +
+[docs] +1147 def is_leaf(self, node_id: NodeIdLike) -> bool: +1148 return len(self.successors(node_id)) == 0
+ +1149 +
+[docs] +1150 def is_covered(self, node_id: NodeIdLike) -> bool: +1151 node_id = self._resolve(node_id) +1152 return node_id in self._covers
+ +1153 +
+[docs] +1154 def prune(self, node_id: NodeIdLike, keep_nodes: Iterable[NodeIdLike] = ()) -> list[int]: +1155 nodes = self.reachable_nodes(node_id) +1156 keep_nodes = [self._resolve(nid) for nid in keep_nodes] +1157 pruned_nodes: list[int] = [] +1158 for node in nodes: +1159 if node.id not in keep_nodes: +1160 self.remove_node(node.id) +1161 pruned_nodes.append(node.id) +1162 return pruned_nodes
+ +1163 +
+[docs] +1164 def shortest_path_between( +1165 self, source_node_id: NodeIdLike, target_node_id: NodeIdLike +1166 ) -> tuple[Successor, ...] | None: +1167 paths = self.paths_between(source_node_id, target_node_id) +1168 if len(paths) == 0: +1169 return None +1170 return sorted(paths, key=(lambda path: KCFG.path_length(path)))[0]
+ +1171 +
+[docs] +1172 def shortest_distance_between(self, node_1_id: NodeIdLike, node_2_id: NodeIdLike) -> int | None: +1173 path_1 = self.shortest_path_between(node_1_id, node_2_id) +1174 path_2 = self.shortest_path_between(node_2_id, node_1_id) +1175 distance: int | None = None +1176 if path_1 is not None: +1177 distance = KCFG.path_length(path_1) +1178 if path_2 is not None: +1179 distance_2 = KCFG.path_length(path_2) +1180 if distance is None or distance_2 < distance: +1181 distance = distance_2 +1182 return distance
+ +1183 +
+[docs] +1184 def zero_depth_between(self, node_1_id: NodeIdLike, node_2_id: NodeIdLike) -> bool: +1185 _node_1_id = self._resolve(node_1_id) +1186 _node_2_id = self._resolve(node_2_id) +1187 if _node_1_id == _node_2_id: +1188 return True +1189 # Short-circuit and don't run pathing algorithm if there is no 0 length path on the first step. +1190 path_lengths = [ +1191 self.path_length([successor]) for successor in self.successors(_node_1_id) + self.successors(_node_2_id) +1192 ] +1193 if 0 not in path_lengths: +1194 return False +1195 +1196 shortest_distance = self.shortest_distance_between(_node_1_id, _node_2_id) +1197 +1198 return shortest_distance is not None and shortest_distance == 0
+ +1199 +
+[docs] +1200 def paths_between(self, source_id: NodeIdLike, target_id: NodeIdLike) -> list[tuple[Successor, ...]]: +1201 source_id = self._resolve(source_id) +1202 target_id = self._resolve(target_id) +1203 +1204 if source_id == target_id: +1205 return [()] +1206 +1207 source_successors = list(self.successors(source_id)) +1208 assert len(source_successors) <= 1 +1209 if len(source_successors) == 0: +1210 return [] +1211 +1212 paths: list[tuple[KCFG.Successor, ...]] = [] +1213 worklist: list[list[KCFG.Successor]] = [[source_successors[0]]] +1214 +1215 def _in_path(_nid: int, _path: list[KCFG.Successor]) -> bool: +1216 for succ in _path: +1217 if _nid == succ.source.id: +1218 return True +1219 if len(_path) > 0: +1220 if isinstance(_path[-1], KCFG.EdgeLike) and _path[-1].target.id == _nid: +1221 return True +1222 elif isinstance(_path[-1], KCFG.MultiEdge) and _nid in _path[-1].target_ids: +1223 return True +1224 return False +1225 +1226 while worklist: +1227 curr_path = worklist.pop() +1228 curr_successor = curr_path[-1] +1229 successors: list[KCFG.Successor] = [] +1230 +1231 if isinstance(curr_successor, KCFG.EdgeLike): +1232 if curr_successor.target.id == target_id: +1233 paths.append(tuple(curr_path)) +1234 continue +1235 else: +1236 successors = list(self.successors(curr_successor.target.id)) +1237 +1238 elif isinstance(curr_successor, KCFG.MultiEdge): +1239 if len(list(curr_successor.targets)) == 1: +1240 target = list(curr_successor.targets)[0] +1241 if target.id == target_id: +1242 paths.append(tuple(curr_path)) +1243 continue +1244 else: +1245 successors = list(self.successors(target.id)) +1246 if len(list(curr_successor.targets)) > 1: +1247 curr_path = curr_path[0:-1] +1248 successors = [curr_successor.with_single_target(target) for target in curr_successor.targets] +1249 +1250 for successor in successors: +1251 if isinstance(successor, KCFG.EdgeLike) and not _in_path(successor.target.id, curr_path): +1252 worklist.append(curr_path + [successor]) +1253 elif isinstance(successor, KCFG.MultiEdge): +1254 if len(list(successor.targets)) == 1: +1255 target = list(successor.targets)[0] +1256 if not _in_path(target.id, curr_path): +1257 worklist.append(curr_path + [successor]) +1258 elif len(list(successor.targets)) > 1: +1259 worklist.append(curr_path + [successor]) +1260 +1261 return paths
+ +1262 +
+[docs] +1263 def reachable_nodes(self, source_id: NodeIdLike, *, reverse: bool = False) -> set[KCFG.Node]: +1264 visited: set[KCFG.Node] = set() +1265 worklist: list[KCFG.Node] = [self.node(source_id)] +1266 +1267 while worklist: +1268 node = worklist.pop() +1269 +1270 if node in visited: +1271 continue +1272 +1273 visited.add(node) +1274 +1275 if not reverse: +1276 worklist.extend(target for succ in self.successors(source_id=node.id) for target in succ.targets) +1277 else: +1278 worklist.extend(succ.source for succ in self.predecessors(target_id=node.id)) +1279 +1280 return visited
+ +1281 +
+[docs] +1282 def write_cfg_data(self) -> None: +1283 assert self._kcfg_store is not None +1284 self._kcfg_store.write_cfg_data( +1285 self, self.to_dict_no_nodes(), deleted_nodes=self._deleted_nodes, created_nodes=self._created_nodes +1286 ) +1287 self._deleted_nodes.clear() +1288 self._created_nodes.clear()
+ +1289 +
+[docs] +1290 @staticmethod +1291 def read_cfg_data(cfg_dir: Path) -> KCFG: +1292 store = KCFGStore(cfg_dir) +1293 cfg = KCFG.from_dict(store.read_cfg_data()) +1294 cfg._kcfg_store = store +1295 return cfg
+ +1296 +
+[docs] +1297 @staticmethod +1298 def read_node_data(cfg_dir: Path, node_id: int) -> KCFG.Node: +1299 store = KCFGStore(cfg_dir) +1300 return KCFG.Node.from_dict(store.read_node_data(node_id))
+
+ +1301 +1302 +
+[docs] +1303class KCFGExtendResult(ABC): ...
+ +1304 +1305 +
+[docs] +1306@final +1307@dataclass(frozen=True) +1308class Vacuous(KCFGExtendResult): ...
+ +1309 +1310 +
+[docs] +1311@final +1312@dataclass(frozen=True) +1313class Stuck(KCFGExtendResult): ...
+ +1314 +1315 +
+[docs] +1316@final +1317@dataclass(frozen=True) +1318class Abstract(KCFGExtendResult): +1319 cterm: CTerm
+ +1320 +1321 +
+[docs] +1322@final +1323@dataclass(frozen=True) +1324class Step(KCFGExtendResult): +1325 cterm: CTerm +1326 depth: int +1327 logs: tuple[LogEntry, ...] +1328 rule_labels: list[str] +1329 cut: bool = field(default=False) +1330 info: str = field(default='')
+ +1331 +1332 +
+[docs] +1333@final +1334@dataclass(frozen=True) +1335class Branch(KCFGExtendResult): +1336 constraints: tuple[KInner, ...] +1337 heuristic: bool +1338 info: str = field(default='') +1339 +1340 def __init__(self, constraints: Iterable[KInner], *, heuristic: bool = False, info: str = ''): +1341 object.__setattr__(self, 'constraints', tuple(constraints)) +1342 object.__setattr__(self, 'heuristic', heuristic) +1343 object.__setattr__(self, 'info', info)
+ +1344 +1345 +
+[docs] +1346@final +1347@dataclass(frozen=True) +1348class NDBranch(KCFGExtendResult): +1349 cterms: tuple[CTerm, ...] +1350 logs: tuple[LogEntry, ...] +1351 rule_labels: tuple[str, ...] +1352 +1353 def __init__(self, cterms: Iterable[CTerm], logs: Iterable[LogEntry,], rule_labels: Iterable[str]): +1354 object.__setattr__(self, 'cterms', tuple(cterms)) +1355 object.__setattr__(self, 'logs', tuple(logs)) +1356 object.__setattr__(self, 'rule_labels', tuple(rule_labels))
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kcfg/minimize.html b/pyk/_modules/pyk/kcfg/minimize.html new file mode 100644 index 00000000000..535c62bb5d1 --- /dev/null +++ b/pyk/_modules/pyk/kcfg/minimize.html @@ -0,0 +1,407 @@ + + + + + + + + pyk.kcfg.minimize — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kcfg.minimize

+  1from __future__ import annotations
+  2
+  3from functools import reduce
+  4from typing import TYPE_CHECKING
+  5
+  6from pyk.cterm.cterm import cterms_anti_unify
+  7from pyk.utils import partition, single
+  8
+  9from .semantics import DefaultSemantics
+ 10
+ 11if TYPE_CHECKING:
+ 12    from collections.abc import Callable
+ 13
+ 14    from pyk.kast.outer import KDefinition
+ 15
+ 16    from .kcfg import KCFG, NodeIdLike
+ 17    from .semantics import KCFGSemantics
+ 18
+ 19
+
+[docs] + 20class KCFGMinimizer: + 21 kcfg: KCFG + 22 semantics: KCFGSemantics + 23 kdef: KDefinition | None + 24 + 25 def __init__(self, kcfg: KCFG, heuristics: KCFGSemantics | None = None, kdef: KDefinition | None = None) -> None: + 26 if heuristics is None: + 27 heuristics = DefaultSemantics() + 28 self.kcfg = kcfg + 29 self.semantics = heuristics + 30 self.kdef = kdef + 31 +
+[docs] + 32 def lift_edge(self, b_id: NodeIdLike) -> None: + 33 """Lift an edge up another edge directly preceding it. + 34 + 35 `A --M steps--> B --N steps--> C` becomes `A --(M + N) steps--> C`. Node `B` is removed. + 36 + 37 Args: + 38 b_id: the identifier of the central node `B` of a sequence of edges `A --> B --> C`. + 39 + 40 Raises: + 41 AssertionError: If the edges in question are not in place. + 42 """ + 43 # Obtain edges `A -> B`, `B -> C` + 44 a_to_b = single(self.kcfg.edges(target_id=b_id)) + 45 b_to_c = single(self.kcfg.edges(source_id=b_id)) + 46 # Remove the node `B`, effectively removing the entire initial structure + 47 self.kcfg.remove_node(b_id) + 48 # Create edge `A -> C` + 49 self.kcfg.create_edge( + 50 a_to_b.source.id, b_to_c.target.id, a_to_b.depth + b_to_c.depth, a_to_b.rules + b_to_c.rules + 51 )
+ + 52 +
+[docs] + 53 def lift_edges(self) -> bool: + 54 """Perform all possible edge lifts across the KCFG. + 55 + 56 The KCFG is transformed to an equivalent in which no further edge lifts are possible. + 57 + 58 Given the KCFG design, it is not possible for one edge lift to either disallow another or + 59 allow another that was not previously possible. Therefore, this function is guaranteed to + 60 lift all possible edges without having to loop. + 61 + 62 Returns: + 63 An indicator of whether or not at least one edge lift was performed. + 64 """ + 65 edges_to_lift = sorted( + 66 [ + 67 node.id + 68 for node in self.kcfg.nodes + 69 if len(self.kcfg.edges(source_id=node.id)) > 0 and len(self.kcfg.edges(target_id=node.id)) > 0 + 70 ] + 71 ) + 72 for node_id in edges_to_lift: + 73 self.lift_edge(node_id) + 74 return len(edges_to_lift) > 0
+ + 75 +
+[docs] + 76 def lift_split_edge(self, b_id: NodeIdLike) -> None: + 77 """Lift a split up an edge directly preceding it. + 78 + 79 `A --M steps--> B --[cond_1, ..., cond_N]--> [C_1, ..., C_N]` becomes + 80 `A --[cond_1, ..., cond_N]--> [A #And cond_1 --M steps--> C_1, ..., A #And cond_N --M steps--> C_N]`. + 81 Node `B` is removed. + 82 + 83 Args: + 84 b_id: The identifier of the central node `B` of the structure `A --> B --> [C_1, ..., C_N]`. + 85 + 86 Raises: + 87 AssertionError: If the structure in question is not in place. + 88 AssertionError: If any of the `cond_i` contain variables not present in `A`. + 89 """ + 90 # Obtain edge `A -> B`, split `[cond_I, C_I | I = 1..N ]` + 91 a_to_b = single(self.kcfg.edges(target_id=b_id)) + 92 a = a_to_b.source + 93 split_from_b = single(self.kcfg.splits(source_id=b_id)) + 94 ci, csubsts = list(split_from_b.splits.keys()), list(split_from_b.splits.values()) + 95 # Ensure split can be lifted soundly (i.e., that it does not introduce fresh variables) + 96 assert ( + 97 len(split_from_b.source_vars.difference(a.free_vars)) == 0 + 98 and len(split_from_b.target_vars.difference(split_from_b.source_vars)) == 0 # <-- Can we delete this check? + 99 ) +100 # Create CTerms and CSubsts corresponding to the new targets of the split +101 new_cterms = [csubst(a.cterm) for csubst in csubsts] +102 # Remove the node `B`, effectively removing the entire initial structure +103 self.kcfg.remove_node(b_id) +104 # Create the nodes `[ A #And cond_I | I = 1..N ]`. +105 ai: list[NodeIdLike] = [self.kcfg.create_node(cterm).id for cterm in new_cterms] +106 # Create the edges `[A #And cond_1 --M steps--> C_I | I = 1..N ]` +107 for i in range(len(ai)): +108 self.kcfg.create_edge(ai[i], ci[i], a_to_b.depth, a_to_b.rules) +109 # Create the split `A --[cond_1, ..., cond_N]--> [A #And cond_1, ..., A #And cond_N] +110 self.kcfg.create_split_by_nodes(a.id, ai)
+ +111 +
+[docs] +112 def lift_split_split(self, b_id: NodeIdLike) -> None: +113 """Lift a split up a split directly preceding it, joining them into a single split. +114 +115 `A --[..., cond_B, ...]--> [..., B, ...]` with `B --[cond_1, ..., cond_N]--> [C_1, ..., C_N]` becomes +116 `A --[..., cond_B #And cond_1, ..., cond_B #And cond_N, ...]--> [..., C_1, ..., C_N, ...]`. +117 Node `B` is removed. +118 +119 Args: +120 b_id: the identifier of the node `B` of the structure +121 `A --[..., cond_B, ...]--> [..., B, ...]` with `B --[cond_1, ..., cond_N]--> [C_1, ..., C_N]`. +122 +123 Raises: +124 AssertionError: If the structure in question is not in place. +125 """ +126 # Obtain splits `A --[..., cond_B, ...]--> [..., B, ...]` and +127 # `B --[cond_1, ..., cond_N]--> [C_1, ..., C_N]-> [C_1, ..., C_N]` +128 split_from_a, split_from_b = single(self.kcfg.splits(target_id=b_id)), single(self.kcfg.splits(source_id=b_id)) +129 splits_from_a, splits_from_b = split_from_a.splits, split_from_b.splits +130 a = split_from_a.source +131 list(splits_from_b.keys()) +132 # Ensure split can be lifted soundly (i.e., that it does not introduce fresh variables) +133 assert ( # <-- Does it will be a problem when using merging nodes, because it would introduce new variables? +134 len(split_from_b.source_vars.difference(a.free_vars)) == 0 +135 and len(split_from_b.target_vars.difference(split_from_b.source_vars)) == 0 +136 ) +137 # Remove the node `B`, thereby removing the two splits as well +138 splits_from_a.pop(self.kcfg.node(b_id).id) +139 self.kcfg.remove_node(b_id) +140 # Create the new split `A --[..., cond_B #And cond_1, ..., cond_B #And cond_N, ...]--> [..., C_1, ..., C_N, ...]` +141 self.kcfg.create_split_by_nodes(a.id, list(splits_from_a.keys()) + list(splits_from_b.keys()))
+ +142 +
+[docs] +143 def lift_splits(self) -> bool: +144 """Perform all possible split liftings. +145 +146 The KCFG is transformed to an equivalent in which no further split lifts are possible. +147 +148 Returns: +149 An indicator of whether or not at least one split lift was performed. +150 """ +151 +152 def _lift_split(finder: Callable, lifter: Callable) -> bool: +153 result = False +154 while True: +155 splits_to_lift = sorted( +156 [ +157 node.id +158 for node in self.kcfg.nodes +159 if (splits := self.kcfg.splits(source_id=node.id)) != [] +160 and (sources := finder(target_id=node.id)) != [] +161 and (source := single(sources).source) +162 and (split := single(splits)) +163 and len(split.source_vars.difference(source.free_vars)) == 0 +164 and len(split.target_vars.difference(split.source_vars)) == 0 +165 ] +166 ) +167 for id in splits_to_lift: +168 lifter(id) +169 result = True +170 if len(splits_to_lift) == 0: +171 break +172 return result +173 +174 def _fold_lift(result: bool, finder_lifter: tuple[Callable, Callable]) -> bool: +175 return _lift_split(finder_lifter[0], finder_lifter[1]) or result +176 +177 return reduce( +178 _fold_lift, [(self.kcfg.edges, self.lift_split_edge), (self.kcfg.splits, self.lift_split_split)], False +179 )
+ +180 +
+[docs] +181 def merge_nodes(self) -> bool: +182 """Merge targets of Split for cutting down the number of branches, using heuristics KCFGSemantics.is_mergeable. +183 +184 Side Effect: The KCFG is rewritten by the following rewrite pattern, +185 - Match: A -|Split|-> A_i -|Edge|-> B_i +186 - Rewrite: +187 - if `B_x, B_y, ..., B_z are not mergeable` then unchanged +188 - if `B_x, B_y, ..., B_z are mergeable`, then +189 - A -|Split|-> A_x or A_y or ... or A_z +190 - A_x or A_y or ... or A_z -|Edge|-> B_x or B_y or ... or B_z +191 - B_x or B_y or ... or B_z -|Split|-> B_x, B_y, ..., B_z +192 +193 Specifically, when `B_merge = B_x or B_y or ... or B_z` +194 - `or`: fresh variables in places where the configurations differ +195 - `Edge` in A_merged -|Edge|-> B_merge: list of merged edges is from A_i -|Edge|-> B_i +196 - `Split` in B_merge -|Split|-> B_x, B_y, ..., B_z: subst for it is from A -|Split|-> A_1, A_2, ..., A_n +197 :param semantics: provides the is_mergeable heuristic +198 :return: whether any merge was performed +199 """ +200 +201 def _is_mergeable(x: KCFG.Edge | KCFG.MergedEdge, y: KCFG.Edge | KCFG.MergedEdge) -> bool: +202 return self.semantics.is_mergeable(x.target.cterm, y.target.cterm) +203 +204 # ---- Match ---- +205 +206 # A -|Split|> Ai -|Edge/MergedEdge|> Mergeable Bi +207 sub_graphs: list[tuple[KCFG.Split, list[list[KCFG.Edge | KCFG.MergedEdge]]]] = [] +208 +209 for split in self.kcfg.splits(): +210 _edges = [ +211 single(self.kcfg.general_edges(source_id=ai)) +212 for ai in split.target_ids +213 if self.kcfg.general_edges(source_id=ai) +214 ] +215 _partitions = partition(_edges, _is_mergeable) +216 if len(_partitions) < len(_edges): +217 sub_graphs.append((split, _partitions)) +218 +219 if not sub_graphs: +220 return False +221 +222 # ---- Rewrite ---- +223 +224 for split, edge_partitions in sub_graphs: +225 +226 # Remove the original sub-graphs +227 for p in edge_partitions: +228 if len(p) == 1: +229 continue +230 for e in p: +231 # TODO: remove the split and edges, then safely remove the nodes. +232 self.kcfg.remove_edges_around(e.source.id) +233 +234 # Create A -|MergedEdge|-> Merged_Bi -|Split|-> Bi, if one edge partition covers all the splits +235 if len(edge_partitions) == 1: +236 merged_bi_cterm, merged_bi_subst = cterms_anti_unify( +237 [edge.target.cterm for edge in edge_partitions[0]], keep_values=True, kdef=self.kdef +238 ) +239 merged_bi = self.kcfg.create_node(merged_bi_cterm) +240 self.kcfg.create_merged_edge(split.source.id, merged_bi.id, edge_partitions[0]) +241 self.kcfg.create_split( +242 merged_bi.id, zip([e.target.id for e in edge_partitions[0]], merged_bi_subst, strict=True) +243 ) +244 continue +245 +246 # Create A -|Split|-> Others & Merged_Ai -|MergedEdge|-> Merged_Bi -|Split|-> Bi +247 _split_nodes: list[NodeIdLike] = [] +248 for edge_partition in edge_partitions: +249 if len(edge_partition) == 1: +250 _split_nodes.append(edge_partition[0].source.id) +251 continue +252 merged_ai_cterm, _ = cterms_anti_unify( +253 [ai2bi.source.cterm for ai2bi in edge_partition], keep_values=True, kdef=self.kdef +254 ) +255 merged_bi_cterm, merged_bi_subst = cterms_anti_unify( +256 [ai2bi.target.cterm for ai2bi in edge_partition], keep_values=True, kdef=self.kdef +257 ) +258 merged_ai = self.kcfg.create_node(merged_ai_cterm) +259 _split_nodes.append(merged_ai.id) +260 merged_bi = self.kcfg.create_node(merged_bi_cterm) +261 self.kcfg.create_merged_edge(merged_ai.id, merged_bi.id, edge_partition) +262 self.kcfg.create_split( +263 merged_bi.id, zip([ai2bi.target.id for ai2bi in edge_partition], merged_bi_subst, strict=True) +264 ) +265 self.kcfg.create_split_by_nodes(split.source.id, _split_nodes) +266 +267 return True
+ +268 +
+[docs] +269 def minimize(self, merge: bool = False) -> None: +270 """Minimize KCFG by repeatedly performing the lifting transformations. +271 +272 The KCFG is transformed to an equivalent in which no further lifting transformations are possible. +273 The loop is designed so that each transformation is performed once in each iteration. +274 """ +275 repeat = True +276 while repeat: +277 repeat = self.lift_edges() +278 repeat = self.lift_splits() or repeat +279 +280 repeat = True +281 while repeat and merge: +282 repeat = self.merge_nodes()
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kcfg/semantics.html b/pyk/_modules/pyk/kcfg/semantics.html new file mode 100644 index 00000000000..cef51848a37 --- /dev/null +++ b/pyk/_modules/pyk/kcfg/semantics.html @@ -0,0 +1,217 @@ + + + + + + + + pyk.kcfg.semantics — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kcfg.semantics

+ 1from __future__ import annotations
+ 2
+ 3from abc import ABC, abstractmethod
+ 4from typing import TYPE_CHECKING
+ 5
+ 6if TYPE_CHECKING:
+ 7    from ..cterm import CTerm, CTermSymbolic
+ 8    from .kcfg import KCFGExtendResult
+ 9
+10
+
+[docs] +11class KCFGSemantics(ABC): +
+[docs] +12 @abstractmethod +13 def is_terminal(self, c: CTerm) -> bool: ...
+ +14 +15 """Check whether or not a given ``CTerm`` is terminal.""" +16 +
+[docs] +17 @abstractmethod +18 def abstract_node(self, c: CTerm) -> CTerm: ...
+ +19 +20 """Implement an abstraction mechanism.""" +21 +
+[docs] +22 @abstractmethod +23 def is_loop(self, c: CTerm) -> bool: ...
+ +24 +25 """Check whether or not the given ``CTerm`` represents a loop head.""" +26 +
+[docs] +27 @abstractmethod +28 def same_loop(self, c1: CTerm, c2: CTerm) -> bool: ...
+ +29 +30 """Check whether or not the two given ``CTerm``s represent the same loop head.""" +31 +
+[docs] +32 @abstractmethod +33 def can_make_custom_step(self, c: CTerm) -> bool: ...
+ +34 +35 """Check whether or not the semantics can make a custom step from a given ``CTerm``.""" +36 +
+[docs] +37 @abstractmethod +38 def custom_step(self, c: CTerm, cs: CTermSymbolic) -> KCFGExtendResult | None: ...
+ +39 +40 """Implement a custom semantic step.""" +41 +
+[docs] +42 @abstractmethod +43 def is_mergeable(self, c1: CTerm, c2: CTerm) -> bool: ...
+ +44 +45 """Check whether or not the two given ``CTerm``s are mergeable. Must be transitive, commutative, and reflexive."""
+ +46 +47 +
+[docs] +48class DefaultSemantics(KCFGSemantics): +
+[docs] +49 def is_terminal(self, c: CTerm) -> bool: +50 return False
+ +51 +
+[docs] +52 def abstract_node(self, c: CTerm) -> CTerm: +53 return c
+ +54 +
+[docs] +55 def is_loop(self, c: CTerm) -> bool: +56 return False
+ +57 +
+[docs] +58 def same_loop(self, c1: CTerm, c2: CTerm) -> bool: +59 return False
+ +60 +
+[docs] +61 def can_make_custom_step(self, c: CTerm) -> bool: +62 return False
+ +63 +
+[docs] +64 def custom_step(self, c: CTerm, cs: CTermSymbolic) -> KCFGExtendResult | None: +65 return None
+ +66 +
+[docs] +67 def is_mergeable(self, c1: CTerm, c2: CTerm) -> bool: +68 return False
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kcfg/show.html b/pyk/_modules/pyk/kcfg/show.html new file mode 100644 index 00000000000..09b71825c1e --- /dev/null +++ b/pyk/_modules/pyk/kcfg/show.html @@ -0,0 +1,630 @@ + + + + + + + + pyk.kcfg.show — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kcfg.show

+  1from __future__ import annotations
+  2
+  3import logging
+  4from typing import TYPE_CHECKING
+  5
+  6from graphviz import Digraph
+  7
+  8from ..kast.inner import KApply, KRewrite, top_down
+  9from ..kast.manip import (
+ 10    flatten_label,
+ 11    inline_cell_maps,
+ 12    minimize_rule_like,
+ 13    minimize_term,
+ 14    ml_pred_to_bool,
+ 15    push_down_rewrites,
+ 16    remove_generated_cells,
+ 17    sort_ac_collections,
+ 18)
+ 19from ..kast.outer import KRule
+ 20from ..prelude.k import DOTS
+ 21from ..prelude.ml import mlAnd
+ 22from ..utils import add_indent, ensure_dir_path
+ 23from .kcfg import KCFG
+ 24
+ 25if TYPE_CHECKING:
+ 26    from collections.abc import Iterable
+ 27    from pathlib import Path
+ 28    from typing import Final
+ 29
+ 30    from ..cterm import CSubst
+ 31    from ..kast import KInner
+ 32    from ..kast.outer import KFlatModule, KSentence
+ 33    from ..ktool.kprint import KPrint
+ 34    from .kcfg import NodeIdLike
+ 35
+ 36_LOGGER: Final = logging.getLogger(__name__)
+ 37
+ 38
+
+[docs] + 39class NodePrinter: + 40 kprint: KPrint + 41 full_printer: bool + 42 minimize: bool + 43 + 44 def __init__(self, kprint: KPrint, full_printer: bool = False, minimize: bool = False): + 45 self.kprint = kprint + 46 self.full_printer = full_printer + 47 self.minimize = minimize + 48 +
+[docs] + 49 def print_node(self, kcfg: KCFG, node: KCFG.Node) -> list[str]: + 50 attrs = self.node_attrs(kcfg, node) + 51 attr_str = ' (' + ', '.join(attrs) + ')' if attrs else '' + 52 node_strs = [f'{node.id}{attr_str}'] + 53 if self.full_printer: + 54 kast = node.cterm.kast + 55 if self.minimize: + 56 kast = minimize_term(kast) + 57 node_strs.extend(' ' + line for line in self.kprint.pretty_print(kast).split('\n')) + 58 return node_strs
+ + 59 +
+[docs] + 60 def node_attrs(self, kcfg: KCFG, node: KCFG.Node) -> list[str]: + 61 attrs = [] + 62 if kcfg.is_root(node.id): + 63 attrs.append('root') + 64 if kcfg.is_stuck(node.id): + 65 attrs.append('stuck') + 66 if kcfg.is_vacuous(node.id): + 67 attrs.append('vacuous') + 68 if kcfg.is_leaf(node.id): + 69 attrs.append('leaf') + 70 if kcfg.is_split(node.id): + 71 attrs.append('split') + 72 attrs.extend(['@' + alias for alias in sorted(kcfg.aliases(node.id))]) + 73 return attrs
+
+ + 74 + 75 +
+[docs] + 76class KCFGShow: + 77 kprint: KPrint + 78 node_printer: NodePrinter + 79 + 80 def __init__(self, kprint: KPrint, node_printer: NodePrinter | None = None): + 81 self.kprint = kprint + 82 self.node_printer = node_printer if node_printer is not None else NodePrinter(kprint) + 83 +
+[docs] + 84 def node_short_info(self, kcfg: KCFG, node: KCFG.Node) -> list[str]: + 85 return self.node_printer.print_node(kcfg, node)
+ + 86 +
+[docs] + 87 @staticmethod + 88 def hide_cells(term: KInner, omit_cells: Iterable[str]) -> KInner: + 89 def _hide_cells(_k: KInner) -> KInner: + 90 if type(_k) == KApply and _k.label.name in omit_cells: + 91 return DOTS + 92 return _k + 93 + 94 if omit_cells: + 95 return top_down(_hide_cells, term) + 96 return term
+ + 97 +
+[docs] + 98 @staticmethod + 99 def simplify_config(config: KInner, omit_cells: Iterable[str]) -> KInner: +100 config = inline_cell_maps(config) +101 config = sort_ac_collections(config) +102 config = KCFGShow.hide_cells(config, omit_cells) +103 return config
+ +104 +
+[docs] +105 @staticmethod +106 def make_unique_segments(segments: Iterable[tuple[str, Iterable[str]]]) -> Iterable[tuple[str, Iterable[str]]]: +107 _segments = [] +108 used_ids = [] +109 for id, seg_lines in segments: +110 suffix = '' +111 counter = 0 +112 while f'{id}{suffix}' in used_ids: +113 suffix = f'_{counter}' +114 counter += 1 +115 new_id = f'{id}{suffix}' +116 used_ids.append(new_id) +117 _segments.append((f'{new_id}', [l.rstrip() for l in seg_lines])) +118 return _segments
+ +119 +
+[docs] +120 def pretty_segments(self, kcfg: KCFG, minimize: bool = True) -> Iterable[tuple[str, Iterable[str]]]: +121 """Return a pretty version of the KCFG in segments. +122 +123 Each segment is a tuple of an identifier and a list of lines to be printed for that segment (Tuple[str, Iterable[str]). +124 The identifier tells you whether that segment is for a given node, edge, or just pretty spacing ('unknown'). +125 This is useful for applications which want to pretty print in chunks, so that they can know which printed region corresponds to each node/edge. +126 """ +127 processed_nodes: list[KCFG.Node] = [] +128 ret_lines: list[tuple[str, list[str]]] = [] +129 +130 def _multi_line_print( +131 label: str, lines: list[str], default: str = 'None', indent: int = 4, max_width: int | None = None +132 ) -> list[str]: +133 ret_lines = [] +134 if len(lines) == 0: +135 ret_lines.append(f'{label}: {default}') +136 else: +137 ret_lines.append(f'{label}:') +138 ret_lines.extend([f'{indent * " "}{line}' for line in lines]) +139 if max_width is not None: +140 ret_lines = [ +141 ret_line if len(ret_line) <= max_width else ret_line[0:max_width] + '...' for ret_line in ret_lines +142 ] +143 return ret_lines +144 +145 def _print_csubst( +146 csubst: CSubst, subst_first: bool = False, indent: int = 4, minimize: bool = False +147 ) -> list[str]: +148 max_width = 78 if minimize else None +149 _constraint_strs = [ +150 self.kprint.pretty_print(ml_pred_to_bool(constraint, unsafe=True)) for constraint in csubst.constraints +151 ] +152 constraint_strs = _multi_line_print('constraint', _constraint_strs, 'true') +153 if len(csubst.subst.minimize()) > 0 and minimize: +154 subst_strs = ['subst: ...'] +155 else: +156 _subst_strs = [ +157 line +158 for k, v in csubst.subst.minimize().items() +159 for line in f'{k} <- {self.kprint.pretty_print(v)}'.split('\n') +160 ] +161 subst_strs = _multi_line_print('subst', _subst_strs, '.Subst', max_width=max_width) +162 if subst_first: +163 return subst_strs + constraint_strs +164 return constraint_strs + subst_strs +165 +166 def _print_node(node: KCFG.Node) -> list[str]: +167 return self.node_short_info(kcfg, node) +168 +169 def _print_edge(edge: KCFG.Edge) -> list[str]: +170 if edge.depth == 1: +171 return ['(' + str(edge.depth) + ' step)'] +172 else: +173 return ['(' + str(edge.depth) + ' steps)'] +174 +175 def _print_merged_edge(merged_edge: KCFG.MergedEdge) -> list[str]: +176 res = '(' +177 for edge in merged_edge.edges: +178 res += f'{edge.depth}|' +179 res = res[:-1] + ' steps)' +180 return [res] if len(res) < 78 else ['(merged edge)'] +181 +182 def _print_cover(cover: KCFG.Cover) -> Iterable[str]: +183 return _print_csubst(cover.csubst, subst_first=False, indent=4, minimize=minimize) +184 +185 def _print_split_edge(split: KCFG.Split, target_id: int) -> list[str]: +186 return _print_csubst(split.splits[target_id], subst_first=True, indent=4, minimize=minimize) +187 +188 def _print_subgraph(indent: str, curr_node: KCFG.Node, prior_on_trace: list[KCFG.Node]) -> None: +189 processed = curr_node in processed_nodes +190 processed_nodes.append(curr_node) +191 successors = list(kcfg.successors(curr_node.id)) +192 +193 curr_node_strs = _print_node(curr_node) +194 +195 ret_node_lines = [] +196 suffix = [] +197 elbow = '├─' +198 node_indent = '│ ' +199 if kcfg.is_root(curr_node.id): +200 elbow = '┌─' +201 elif processed or not successors: +202 elbow = '└─' +203 node_indent = ' ' +204 if curr_node in prior_on_trace: +205 suffix = ['(looped back)', ''] +206 elif processed and not kcfg.is_leaf(curr_node.id): +207 suffix = ['(continues as previously)', ''] +208 else: +209 suffix = [''] +210 ret_node_lines.append(indent + elbow + ' ' + curr_node_strs[0]) +211 ret_node_lines.extend(add_indent(indent + node_indent, curr_node_strs[1:])) +212 ret_node_lines.extend(add_indent(indent + ' ', suffix)) +213 ret_lines.append((f'node_{curr_node.id}', ret_node_lines)) +214 +215 if processed or not successors: +216 return +217 successor = successors[0] +218 +219 if isinstance(successor, KCFG.MultiEdge): +220 ret_lines.append(('unknown', [f'{indent}┃'])) +221 multiedge_label = '1 step' if type(successor) is KCFG.NDBranch else 'branch' +222 multiedge_id = 'ndbranch' if type(successor) is KCFG.NDBranch else 'split' +223 ret_lines.append(('unknown', [f'{indent}┃ ({multiedge_label})'])) +224 +225 for target in successor.targets[:-1]: +226 if type(successor) is KCFG.Split: +227 ret_edge_lines = _print_split_edge(successor, target.id) +228 ret_edge_lines = [indent + '┣━━┓ ' + ret_edge_lines[0]] + add_indent( +229 indent + '┃ ┃ ', ret_edge_lines[1:] +230 ) +231 elif type(successor) is KCFG.NDBranch: +232 ret_edge_lines = [indent + '┣━━┓ '] +233 else: +234 raise AssertionError() +235 ret_edge_lines.append(indent + '┃ │') +236 ret_lines.append((f'{multiedge_id}_{curr_node.id}_{target.id}', ret_edge_lines)) +237 _print_subgraph(indent + '┃ ', target, prior_on_trace + [curr_node]) +238 target = successor.targets[-1] +239 if type(successor) is KCFG.Split: +240 ret_edge_lines = _print_split_edge(successor, target.id) +241 ret_edge_lines = [indent + '┗━━┓ ' + ret_edge_lines[0]] + add_indent( +242 indent + ' ┃ ', ret_edge_lines[1:] +243 ) +244 elif type(successor) is KCFG.NDBranch: +245 ret_edge_lines = [indent + '┗━━┓ '] +246 else: +247 raise AssertionError() +248 ret_edge_lines.append(indent + ' │') +249 ret_lines.append((f'{multiedge_id}_{curr_node.id}_{target.id}', ret_edge_lines)) +250 _print_subgraph(indent + ' ', target, prior_on_trace + [curr_node]) +251 +252 elif isinstance(successor, KCFG.EdgeLike): +253 ret_lines.append(('unknown', [f'{indent}│'])) +254 +255 if type(successor) is KCFG.Edge: +256 ret_edge_lines = [] +257 ret_edge_lines.extend(add_indent(indent + '│ ', _print_edge(successor))) +258 ret_lines.append((f'edge_{successor.source.id}_{successor.target.id}', ret_edge_lines)) +259 +260 elif type(successor) is KCFG.MergedEdge: +261 ret_edge_lines = [] +262 ret_edge_lines.extend(add_indent(indent + '│ ', _print_merged_edge(successor))) +263 ret_lines.append((f'merged_edge_{successor.source.id}_{successor.target.id}', ret_edge_lines)) +264 +265 elif type(successor) is KCFG.Cover: +266 ret_edge_lines = [] +267 ret_edge_lines.extend(add_indent(indent + '┊ ', _print_cover(successor))) +268 ret_lines.append((f'cover_{successor.source.id}_{successor.target.id}', ret_edge_lines)) +269 +270 _print_subgraph(indent, successor.target, prior_on_trace + [curr_node]) +271 +272 def _sorted_init_nodes() -> tuple[list[KCFG.Node], list[KCFG.Node]]: +273 sorted_init_nodes = sorted(node for node in kcfg.nodes if node not in processed_nodes) +274 init_nodes = [] +275 init_leaf_nodes = [] +276 remaining_nodes = [] +277 for node in sorted_init_nodes: +278 if kcfg.is_root(node.id): +279 if kcfg.is_leaf(node.id): +280 init_leaf_nodes.append(node) +281 else: +282 init_nodes.append(node) +283 else: +284 remaining_nodes.append(node) +285 return (init_nodes + init_leaf_nodes, remaining_nodes) +286 +287 init, _ = _sorted_init_nodes() +288 while init: +289 ret_lines.append(('unknown', [''])) +290 _print_subgraph('', init[0], []) +291 init, _ = _sorted_init_nodes() +292 _, remaining = _sorted_init_nodes() +293 if remaining: +294 ret_lines.append(('unknown', ['', 'Remaining Nodes:'])) +295 for node in remaining: +296 ret_node_lines = [''] + _print_node(node) +297 ret_lines.append((f'node_{node.id}', ret_node_lines)) +298 +299 return KCFGShow.make_unique_segments(ret_lines)
+ +300 +
+[docs] +301 def pretty( +302 self, +303 kcfg: KCFG, +304 minimize: bool = True, +305 ) -> Iterable[str]: +306 return (line for _, seg_lines in self.pretty_segments(kcfg, minimize=minimize) for line in seg_lines)
+ +307 +
+[docs] +308 def to_module( +309 self, +310 cfg: KCFG, +311 module_name: str | None = None, +312 omit_cells: Iterable[str] = (), +313 parseable_output: bool = True, +314 ) -> KFlatModule: +315 def _process_sentence(sent: KSentence) -> KSentence: +316 if type(sent) is KRule: +317 sent = sent.let(body=KCFGShow.hide_cells(sent.body, omit_cells)) +318 if parseable_output: +319 sent = sent.let(body=remove_generated_cells(sent.body)) +320 sent = minimize_rule_like(sent) +321 return sent +322 +323 module = cfg.to_module(module_name) +324 return module.let(sentences=[_process_sentence(sent) for sent in module.sentences])
+ +325 +
+[docs] +326 def show( +327 self, +328 cfg: KCFG, +329 nodes: Iterable[NodeIdLike] = (), +330 node_deltas: Iterable[tuple[NodeIdLike, NodeIdLike]] = (), +331 to_module: bool = False, +332 minimize: bool = True, +333 sort_collections: bool = False, +334 omit_cells: Iterable[str] = (), +335 module_name: str | None = None, +336 ) -> list[str]: +337 res_lines: list[str] = [] +338 res_lines += self.pretty(cfg, minimize=minimize) +339 +340 nodes_printed = False +341 +342 for node_id in nodes: +343 nodes_printed = True +344 kast = cfg.node(node_id).cterm.kast +345 kast = KCFGShow.hide_cells(kast, omit_cells) +346 if minimize: +347 kast = minimize_term(kast) +348 res_lines.append('') +349 res_lines.append('') +350 res_lines.append(f'Node {node_id}:') +351 res_lines.append('') +352 res_lines.append(self.kprint.pretty_print(kast, sort_collections=sort_collections)) +353 res_lines.append('') +354 +355 for node_id_1, node_id_2 in node_deltas: +356 nodes_printed = True +357 config_1 = KCFGShow.simplify_config(cfg.node(node_id_1).cterm.config, omit_cells) +358 config_2 = KCFGShow.simplify_config(cfg.node(node_id_2).cterm.config, omit_cells) +359 config_delta = push_down_rewrites(KRewrite(config_1, config_2)) +360 if minimize: +361 config_delta = minimize_term(config_delta) +362 res_lines.append('') +363 res_lines.append('') +364 res_lines.append(f'State Delta {node_id_1} => {node_id_2}:') +365 res_lines.append('') +366 res_lines.append(self.kprint.pretty_print(config_delta, sort_collections=sort_collections)) +367 res_lines.append('') +368 +369 if not (nodes_printed): +370 res_lines.append('') +371 res_lines.append('') +372 res_lines.append('') +373 +374 if to_module: +375 module = self.to_module(cfg, module_name, omit_cells=omit_cells) +376 res_lines.append(self.kprint.pretty_print(module, sort_collections=sort_collections)) +377 +378 return res_lines
+ +379 +
+[docs] +380 def dot(self, kcfg: KCFG) -> Digraph: +381 def _short_label(label: str) -> str: +382 return '\n'.join( +383 [ +384 label_line if len(label_line) < 100 else (label_line[0:100] + ' ...') +385 for label_line in label.split('\n') +386 ] +387 ) +388 +389 graph = Digraph() +390 +391 for node in kcfg.nodes: +392 label = '\n'.join(self.node_short_info(kcfg, node)) +393 class_attrs = ' '.join(self.node_printer.node_attrs(kcfg, node)) +394 attrs = {'class': class_attrs} if class_attrs else {} +395 graph.node(name=node.id, label=label, **attrs) +396 +397 for edge in kcfg.edges(): +398 depth = edge.depth +399 label = f'{depth} steps' +400 graph.edge(tail_name=edge.source.id, head_name=edge.target.id, label=f' {label} ') +401 +402 for cover in kcfg.covers(): +403 label = ', '.join( +404 f'{k} |-> {self.kprint.pretty_print(v)}' for k, v in cover.csubst.subst.minimize().items() +405 ) +406 label = _short_label(label) +407 attrs = {'class': 'abstraction', 'style': 'dashed'} +408 graph.edge(tail_name=cover.source.id, head_name=cover.target.id, label=f' {label} ', **attrs) +409 +410 for split in kcfg.splits(): +411 for target_id, csubst in split.splits.items(): +412 label = '\n#And'.join( +413 f'{self.kprint.pretty_print(v)}' for v in split.source.cterm.constraints + csubst.constraints +414 ) +415 graph.edge(tail_name=split.source.id, head_name=target_id, label=f' {label} ') +416 +417 for ndbranch in kcfg.ndbranches(): +418 for target in ndbranch.target_ids: +419 label = '1 step' +420 graph.edge(tail_name=ndbranch.source.id, head_name=target, label=f' {label} ') +421 +422 return graph
+ +423 +
+[docs] +424 def dump(self, cfgid: str, cfg: KCFG, dump_dir: Path, dot: bool = False) -> None: +425 ensure_dir_path(dump_dir) +426 +427 cfg_file = dump_dir / f'{cfgid}.json' +428 cfg_file.write_text(cfg.to_json()) +429 _LOGGER.info(f'Wrote CFG file {cfgid}: {cfg_file}') +430 +431 if dot: +432 cfg_dot = self.dot(cfg) +433 dot_file = dump_dir / f'{cfgid}.dot' +434 dot_file.write_text(cfg_dot.source) +435 _LOGGER.info(f'Wrote DOT file {cfgid}: {dot_file}') +436 +437 nodes_dir = dump_dir / 'nodes' +438 ensure_dir_path(nodes_dir) +439 for node in cfg.nodes: +440 node_file = nodes_dir / f'config_{node.id}.txt' +441 node_minimized_file = nodes_dir / f'config_minimized_{node.id}.txt' +442 node_constraint_file = nodes_dir / f'constraint_{node.id}.txt' +443 +444 config = node.cterm.config +445 if not node_file.exists(): +446 node_file.write_text(self.kprint.pretty_print(config)) +447 _LOGGER.info(f'Wrote node file {cfgid}: {node_file}') +448 config = minimize_term(config) +449 if not node_minimized_file.exists(): +450 node_minimized_file.write_text(self.kprint.pretty_print(config)) +451 _LOGGER.info(f'Wrote node file {cfgid}: {node_minimized_file}') +452 if not node_constraint_file.exists(): +453 constraint = mlAnd(node.cterm.constraints) +454 node_constraint_file.write_text(self.kprint.pretty_print(constraint)) +455 _LOGGER.info(f'Wrote node file {cfgid}: {node_constraint_file}') +456 +457 edges_dir = dump_dir / 'edges' +458 ensure_dir_path(edges_dir) +459 for edge in cfg.edges(): +460 edge_file = edges_dir / f'config_{edge.source.id}_{edge.target.id}.txt' +461 edge_minimized_file = edges_dir / f'config_minimized_{edge.source.id}_{edge.target.id}.txt' +462 +463 config = push_down_rewrites(KRewrite(edge.source.cterm.config, edge.target.cterm.config)) +464 if not edge_file.exists(): +465 edge_file.write_text(self.kprint.pretty_print(config)) +466 _LOGGER.info(f'Wrote edge file {cfgid}: {edge_file}') +467 config = minimize_term(config) +468 if not edge_minimized_file.exists(): +469 edge_minimized_file.write_text(self.kprint.pretty_print(config)) +470 _LOGGER.info(f'Wrote edge file {cfgid}: {edge_minimized_file}') +471 +472 covers_dir = dump_dir / 'covers' +473 ensure_dir_path(covers_dir) +474 for cover in cfg.covers(): +475 cover_file = covers_dir / f'config_{cover.source.id}_{cover.target.id}.txt' +476 cover_constraint_file = covers_dir / f'constraint_{cover.source.id}_{cover.target.id}.txt' +477 +478 subst_equalities = flatten_label( +479 '#And', cover.csubst.pred(sort_with=self.kprint.definition, constraints=False) +480 ) +481 +482 if not cover_file.exists(): +483 cover_file.write_text('\n'.join(self.kprint.pretty_print(se) for se in subst_equalities)) +484 _LOGGER.info(f'Wrote cover file {cfgid}: {cover_file}') +485 if not cover_constraint_file.exists(): +486 cover_constraint_file.write_text(self.kprint.pretty_print(cover.csubst.constraint)) +487 _LOGGER.info(f'Wrote cover file {cfgid}: {cover_constraint_file}')
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kcfg/store.html b/pyk/_modules/pyk/kcfg/store.html new file mode 100644 index 00000000000..c2efeb48c27 --- /dev/null +++ b/pyk/_modules/pyk/kcfg/store.html @@ -0,0 +1,237 @@ + + + + + + + + pyk.kcfg.store — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kcfg.store

+  1from __future__ import annotations
+  2
+  3import threading
+  4from abc import ABC, abstractmethod
+  5from collections.abc import Hashable, MutableMapping
+  6from dataclasses import dataclass
+  7from typing import TYPE_CHECKING, Generic, TypeVar, final
+  8
+  9from ..cterm import CTerm
+ 10from ..kast.inner import KApply, KSequence, KToken, KVariable, bottom_up_with_summary
+ 11from .kcfg import KCFG
+ 12
+ 13if TYPE_CHECKING:
+ 14    from collections.abc import Iterator
+ 15
+ 16    from ..kast.inner import KInner, KLabel
+ 17
+ 18
+ 19A = TypeVar('A', bound=Hashable)
+ 20
+ 21
+
+[docs] + 22class OptimizedNodeStore(MutableMapping[int, KCFG.Node]): + 23 _nodes: dict[int, KCFG.Node] + 24 _optimized_terms: _Cache[_OptInner] + 25 _klabels: _Cache[KLabel] + 26 _terms: list[KInner] + 27 + 28 _lock: threading.Lock + 29 + 30 def __init__(self) -> None: + 31 self._nodes = {} + 32 self._optimized_terms = _Cache() + 33 self._klabels = _Cache() + 34 self._terms = [] + 35 + 36 self._lock = threading.Lock() + 37 + 38 def __getitem__(self, key: int) -> KCFG.Node: + 39 return self._nodes[key] + 40 + 41 def __setitem__(self, key: int, node: KCFG.Node) -> None: + 42 old_cterm = node.cterm + 43 new_config = self._optimize(old_cterm.config) + 44 new_constraints = tuple(self._optimize(c) for c in old_cterm.constraints) + 45 new_node = KCFG.Node(node.id, CTerm(new_config, new_constraints), attrs=node.attrs) + 46 self._nodes[key] = new_node + 47 + 48 def __delitem__(self, key: int) -> None: + 49 del self._nodes[key] + 50 + 51 def __iter__(self) -> Iterator[int]: + 52 return iter(self._nodes) + 53 + 54 def __len__(self) -> int: + 55 return len(self._nodes) + 56 + 57 def _optimize(self, term: KInner) -> KInner: + 58 def optimizer(to_optimize: KInner, children: list[int]) -> tuple[KInner, int]: + 59 if isinstance(to_optimize, KToken) or isinstance(to_optimize, KVariable): + 60 optimized_id = self._cache(_OptBasic(to_optimize)) + 61 elif isinstance(to_optimize, KApply): + 62 klabel_id = self._klabels.cache(to_optimize.label) + 63 optimized_id = self._cache(_OptApply(klabel_id, tuple(children))) + 64 elif isinstance(to_optimize, KSequence): + 65 optimized_id = self._cache(_OptKSequence(tuple(children))) + 66 else: + 67 raise ValueError('Unknown term type: ' + str(type(to_optimize))) + 68 return (self._terms[optimized_id], optimized_id) + 69 + 70 with self._lock: + 71 optimized, _ = bottom_up_with_summary(optimizer, term) + 72 return optimized + 73 + 74 def _cache(self, term: _OptInner) -> int: + 75 id = self._optimized_terms.cache(term) + 76 assert id <= len(self._terms) + 77 if id == len(self._terms): + 78 self._terms.append(term.build(self._klabels, self._terms)) + 79 return id
+ + 80 + 81 + 82class _Cache(Generic[A]): + 83 _value_to_id: dict[A, int] + 84 _values: list[A] + 85 + 86 def __init__(self) -> None: + 87 self._value_to_id = {} + 88 self._values = [] + 89 + 90 def cache(self, value: A) -> int: + 91 idx = self._value_to_id.get(value) + 92 if idx is not None: + 93 return idx + 94 idx = len(self._values) + 95 self._value_to_id[value] = idx + 96 self._values.append(value) + 97 return idx + 98 + 99 def get(self, idx: int) -> A: +100 return self._values[idx] +101 +102 +103class _OptInner(ABC): +104 @abstractmethod +105 def build(self, klabels: _Cache[KLabel], terms: list[KInner]) -> KInner: ... +106 +107 +108@final +109@dataclass(eq=True, frozen=True) +110class _OptBasic(_OptInner): +111 term: KInner +112 +113 def build(self, klabels: _Cache[KLabel], terms: list[KInner]) -> KInner: +114 return self.term +115 +116 +117@final +118@dataclass(eq=True, frozen=True) +119class _OptApply(_OptInner): +120 label: int +121 children: tuple[int, ...] +122 +123 def build(self, klabels: _Cache[KLabel], terms: list[KInner]) -> KInner: +124 return KApply(klabels.get(self.label), tuple(terms[child] for child in self.children)) +125 +126 +127@final +128@dataclass(eq=True, frozen=True) +129class _OptKSequence(_OptInner): +130 children: tuple[int, ...] +131 +132 def build(self, klabels: _Cache[KLabel], terms: list[KInner]) -> KInner: +133 return KSequence(tuple(terms[child] for child in self.children)) +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kcfg/tui.html b/pyk/_modules/pyk/kcfg/tui.html new file mode 100644 index 00000000000..ac2ed5d45d3 --- /dev/null +++ b/pyk/_modules/pyk/kcfg/tui.html @@ -0,0 +1,728 @@ + + + + + + + + pyk.kcfg.tui — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kcfg.tui

+  1from __future__ import annotations
+  2
+  3from typing import TYPE_CHECKING, Union
+  4
+  5from textual.app import App
+  6from textual.binding import Binding
+  7from textual.containers import Horizontal, ScrollableContainer, Vertical
+  8from textual.message import Message
+  9from textual.reactive import reactive
+ 10from textual.widget import Widget
+ 11from textual.widgets import Footer, Static
+ 12
+ 13from ..cterm import CTerm
+ 14from ..kast.inner import KApply, KRewrite
+ 15from ..kast.manip import flatten_label, minimize_term, push_down_rewrites
+ 16from ..prelude.kbool import TRUE
+ 17from ..utils import ROOT, shorten_hashes, single
+ 18from .kcfg import KCFG
+ 19from .show import KCFGShow
+ 20
+ 21if TYPE_CHECKING:
+ 22    from collections.abc import Callable, Iterable
+ 23
+ 24    from textual.app import ComposeResult
+ 25    from textual.events import Click
+ 26
+ 27    from ..kast import KInner
+ 28    from ..ktool.kprint import KPrint
+ 29    from .show import NodePrinter
+ 30
+ 31
+ 32KCFGElem = Union[KCFG.Node, KCFG.Successor]
+ 33
+ 34
+
+[docs] + 35class GraphChunk(Static): + 36 _node_text: str + 37 +
+[docs] + 38 class Selected(Message): + 39 chunk_id: str + 40 + 41 def __init__(self, chunk_id: str) -> None: + 42 self.chunk_id = chunk_id + 43 super().__init__()
+ + 44 + 45 def __init__(self, id: str, node_text: Iterable[str] = ()) -> None: + 46 self._node_text = '\n'.join(node_text) + 47 super().__init__(self._node_text, id=id, classes='cfg-node') + 48 +
+[docs] + 49 def on_enter(self) -> None: + 50 self.styles.border_left = ('double', 'red') # type: ignore
+ + 51 +
+[docs] + 52 def on_leave(self) -> None: + 53 self.styles.border_left = None # type: ignore
+ + 54 +
+[docs] + 55 def on_click(self, click: Click) -> None: + 56 self.post_message(GraphChunk.Selected(self.id or '')) + 57 click.stop()
+
+ + 58 + 59 + + + 83 + 84 +
+[docs] + 85class Info(Widget, can_focus=False): + 86 text: reactive[str] = reactive('', init=False) + 87 + 88 def __init__(self) -> None: + 89 super().__init__(id='info') + 90 +
+[docs] + 91 def update(self, text: str) -> None: + 92 self.text = text
+ + 93 +
+[docs] + 94 def compose(self) -> ComposeResult: + 95 yield Static(self.text)
+ + 96 +
+[docs] + 97 def watch_text(self) -> None: + 98 self.query_one(Static).update(self.text)
+
+ + 99 +100 +
+[docs] +101class Status(NavWidget): +102 def __init__(self) -> None: +103 super().__init__(id='status') +104 +
+[docs] +105 def on_click(self, click: Click) -> None: +106 click.stop() +107 self.post_message(Status.Selected())
+
+ +108 +109 +
+[docs] +110class Term(NavWidget): +111 def __init__(self) -> None: +112 super().__init__(id='term') +113 +
+[docs] +114 def on_click(self, click: Click) -> None: +115 click.stop() +116 self.post_message(Term.Selected())
+
+ +117 +118 +
+[docs] +119class Constraint(NavWidget): +120 def __init__(self) -> None: +121 super().__init__(id='constraint') +122 +
+[docs] +123 def on_click(self, click: Click) -> None: +124 click.stop() +125 self.post_message(Constraint.Selected())
+
+ +126 +127 +
+[docs] +128class Custom(NavWidget): +129 def __init__(self) -> None: +130 super().__init__(id='custom') +131 +
+[docs] +132 def on_click(self, click: Click) -> None: +133 click.stop() +134 self.post_message(Custom.Selected())
+
+ +135 +136 +
+[docs] +137class BehaviorView(ScrollableContainer, can_focus=True): +138 _kcfg: KCFG +139 _kprint: KPrint +140 _minimize: bool +141 _node_printer: NodePrinter | None +142 _kcfg_nodes: Iterable[GraphChunk] +143 +
+[docs] +144 class Selected(Message): +145 def __init__(self) -> None: +146 super().__init__()
+ +147 +148 def __init__( +149 self, +150 kcfg: KCFG, +151 kprint: KPrint, +152 minimize: bool = True, +153 node_printer: NodePrinter | None = None, +154 id: str = '', +155 ): +156 super().__init__(id=id) +157 self._kcfg = kcfg +158 self._kprint = kprint +159 self._minimize = minimize +160 self._node_printer = node_printer +161 self._kcfg_nodes = [] +162 kcfg_show = KCFGShow(kprint, node_printer=node_printer) +163 for lseg_id, node_lines in kcfg_show.pretty_segments(self._kcfg, minimize=self._minimize): +164 self._kcfg_nodes.append(GraphChunk(lseg_id, node_lines)) +165 +
+[docs] +166 def compose(self) -> ComposeResult: +167 return self._kcfg_nodes
+ +168 +
+[docs] +169 def on_click(self, click: Click) -> None: +170 click.stop() +171 self.post_message(BehaviorView.Selected())
+
+ +172 +173 +
+[docs] +174class NodeView(Widget): +175 _kprint: KPrint +176 _custom_view: Callable[[KCFGElem], Iterable[str]] | None +177 +178 _element: KCFGElem | None +179 +180 _minimize: bool +181 _term_on: bool +182 _constraint_on: bool +183 _custom_on: bool +184 _status_on: bool +185 _proof_status: str +186 _proof_id: str +187 _exec_time: float +188 +189 def __init__( +190 self, +191 kprint: KPrint, +192 id: str = '', +193 minimize: bool = True, +194 term_on: bool = True, +195 constraint_on: bool = True, +196 custom_on: bool = False, +197 status_on: bool = True, +198 custom_view: Callable[[KCFGElem], Iterable[str]] | None = None, +199 proof_status: str = '', +200 proof_id: str = '', +201 exec_time: float = 0, +202 ): +203 super().__init__(id=id) +204 self._kprint = kprint +205 self._element = None +206 self._minimize = minimize +207 self._term_on = term_on +208 self._constraint_on = constraint_on +209 self._custom_on = custom_on or custom_view is not None +210 self._custom_view = custom_view +211 self._status_on = status_on +212 self._proof_status = proof_status +213 self._proof_id = proof_id +214 self._exec_time = exec_time +215 +216 def _info_text(self) -> str: +217 term_str = '✅' if self._term_on else '❌' +218 constraint_str = '✅' if self._constraint_on else '❌' +219 custom_str = '' if self._custom_view is None else f'{"✅" if self._custom_on else "❌"} Custom View.' +220 minimize_str = '✅' if self._minimize else '❌' +221 status_str = '✅' if self._status_on else '❌' +222 element_str = 'NOTHING' +223 if type(self._element) is KCFG.Node: +224 element_str = f'node({shorten_hashes(self._element.id)})' +225 elif type(self._element) is KCFG.Edge: +226 element_str = f'edge({shorten_hashes(self._element.source.id)},{shorten_hashes(self._element.target.id)})' +227 elif type(self._element) is KCFG.MergedEdge: +228 element_str = ( +229 f'merged_edge({shorten_hashes(self._element.source.id)},{shorten_hashes(self._element.target.id)})' +230 ) +231 elif type(self._element) is KCFG.Cover: +232 element_str = f'cover({shorten_hashes(self._element.source.id)},{shorten_hashes(self._element.target.id)})' +233 return f'{element_str} selected. {minimize_str} Minimize Output. {term_str} Term View. {constraint_str} Constraint View. {status_str} Status View. {custom_str}' +234 +235 def _status_text(self) -> str: +236 exec_time = str(round(self._exec_time, 2)) +237 return f'Proof ID: {self._proof_id}. Status: {self._proof_status}. Exec Time: {exec_time}s.' +238 +
+[docs] +239 def compose(self) -> ComposeResult: +240 yield Info() +241 yield Status() +242 yield Term() +243 yield Constraint() +244 if self._custom_view is not None: +245 yield Custom()
+ +246 +
+[docs] +247 def toggle_option(self, field: str) -> bool: +248 assert field in ['minimize', 'term_on', 'constraint_on', 'custom_on', 'status_on'] +249 field_attr = f'_{field}' +250 old_value = getattr(self, field_attr) +251 new_value = not old_value +252 # Do not turn on custom view if it's not available +253 if field == 'custom_on' and self._custom_view is None: +254 new_value = False +255 setattr(self, field_attr, new_value) +256 self._update() +257 return new_value
+ +258 +
+[docs] +259 def toggle_view(self, field: str) -> None: +260 assert field in ['term', 'constraint', 'custom', 'status'] +261 if self.toggle_option(f'{field}_on'): +262 self.query_one(f'#{field}').remove_class('hidden') +263 else: +264 self.query_one(f'#{field}').add_class('hidden')
+ +265 +
+[docs] +266 def update(self, element: KCFGElem) -> None: +267 self._element = element +268 self._update()
+ +269 +
+[docs] +270 def on_mount(self) -> None: +271 self._update()
+ +272 +273 def _update(self) -> None: +274 def _boolify(c: KInner) -> KInner: +275 if type(c) is KApply and c.label.name == '#Equals' and c.args[0] == TRUE: +276 return c.args[1] +277 else: +278 return c +279 +280 def _cterm_text(cterm: CTerm) -> tuple[str, str]: +281 config = cterm.config +282 constraints = map(_boolify, cterm.constraints) +283 if self._minimize: +284 config = minimize_term(config) +285 return (self._kprint.pretty_print(config), '\n'.join(self._kprint.pretty_print(c) for c in constraints)) +286 +287 term_str = 'Term' +288 constraint_str = 'Constraint' +289 custom_str = 'Custom' +290 +291 if self._element is not None: +292 if type(self._element) is KCFG.Node: +293 term_str, constraint_str = _cterm_text(self._element.cterm) +294 +295 elif type(self._element) is KCFG.Edge: +296 config_source, *constraints_source = self._element.source.cterm +297 config_target, *constraints_target = self._element.target.cterm +298 constraints_new = [c for c in constraints_target if c not in constraints_source] +299 config = push_down_rewrites(KRewrite(config_source, config_target)) +300 crewrite = CTerm(config, constraints_new) +301 term_str, constraint_str = _cterm_text(crewrite) +302 +303 elif type(self._element) is KCFG.MergedEdge: +304 config_source, *constraints_source = self._element.source.cterm +305 config_target, *constraints_target = self._element.target.cterm +306 constraints_new = [c for c in constraints_target if c not in constraints_source] +307 config = push_down_rewrites(KRewrite(config_source, config_target)) +308 crewrite = CTerm(config, constraints_new) +309 term_str, constraint_str = _cterm_text(crewrite) +310 +311 elif type(self._element) is KCFG.Cover: +312 subst_equalities = map( +313 _boolify, +314 flatten_label( +315 '#And', self._element.csubst.pred(sort_with=self._kprint.definition, constraints=False) +316 ), +317 ) +318 constraints = map(_boolify, flatten_label('#And', self._element.csubst.constraint)) +319 term_str = '\n'.join(self._kprint.pretty_print(se) for se in subst_equalities) +320 constraint_str = '\n'.join(self._kprint.pretty_print(c) for c in constraints) +321 +322 elif type(self._element) is KCFG.Split: +323 term_strs = [f'split: {shorten_hashes(self._element.source.id)}'] +324 for target_id, csubst in self._element.splits.items(): +325 term_strs.append('') +326 term_strs.append(f' - {shorten_hashes(target_id)}') +327 if len(csubst.subst) > 0: +328 subst_equalities = map( +329 _boolify, +330 flatten_label('#And', csubst.pred(sort_with=self._kprint.definition, constraints=False)), +331 ) +332 term_strs.extend(f' {self._kprint.pretty_print(cline)}' for cline in subst_equalities) +333 if len(csubst.constraints) > 0: +334 constraints = map(_boolify, flatten_label('#And', csubst.constraint)) +335 term_strs.extend(f' {self._kprint.pretty_print(cline)}' for cline in constraints) +336 term_str = '\n'.join(term_strs) +337 +338 elif type(self._element) is KCFG.NDBranch: +339 term_strs = [f'ndbranch: {shorten_hashes(self._element.source.id)}'] +340 for target in self._element.targets: +341 term_strs.append('') +342 term_strs.append(f' - {shorten_hashes(target.id)}') +343 term_strs.append(' (1 step)') +344 term_str = '\n'.join(term_strs) +345 +346 if self._custom_view is not None: +347 # To appease the type-checker +348 if type(self._element) is KCFG.Node: +349 custom_str = '\n'.join(self._custom_view(self._element)) +350 elif isinstance(self._element, KCFG.Successor): +351 custom_str = '\n'.join(self._custom_view(self._element)) +352 +353 self.query_one('#info', Info).text = self._info_text() +354 self.query_one('#term', Term).text = term_str +355 self.query_one('#constraint', Constraint).text = constraint_str +356 if self._custom_view is not None: +357 self.query_one('#custom', Custom).text = custom_str +358 self.query_one('#status', Status).text = self._status_text() +359 +
+[docs] +360 def on_behavior_view_selected(self) -> None: +361 self.query_one('#behavior').focus()
+ +362 +
+[docs] +363 def on_term_selected(self) -> None: +364 self.query_one(Term).focus()
+ +365 +
+[docs] +366 def on_constraint_selected(self) -> None: +367 self.query_one(Constraint).focus()
+ +368 +
+[docs] +369 def on_custom_selected(self) -> None: +370 self.query_one(Custom).focus()
+ +371 +
+[docs] +372 def on_status_selected(self) -> None: +373 self.query_one(Status).focus()
+
+ +374 +375 +
+[docs] +376class KCFGViewer(App): +377 CSS_PATH = ROOT / 'kcfg/style.css' +378 +379 _kcfg: KCFG +380 _kprint: KPrint +381 +382 _node_printer: NodePrinter | None +383 _custom_view: Callable[[KCFGElem], Iterable[str]] | None +384 +385 _minimize: bool +386 +387 _hidden_chunks: list[str] +388 _selected_chunk: str | None +389 +390 def __init__( +391 self, +392 kcfg: KCFG, +393 kprint: KPrint, +394 node_printer: NodePrinter | None = None, +395 custom_view: Callable[[KCFGElem], Iterable[str]] | None = None, +396 minimize: bool = True, +397 ) -> None: +398 super().__init__() +399 self._kcfg = kcfg +400 self._kprint = kprint +401 self._node_printer = node_printer +402 self._custom_view = custom_view +403 self._minimize = minimize +404 self._hidden_chunks = [] +405 self._selected_chunk = None +406 if self._custom_view is not None: +407 self.bind('v', 'keystroke("custom")', description='Toggle custom.') +408 +
+[docs] +409 def compose(self) -> ComposeResult: +410 yield Horizontal( +411 Vertical( +412 BehaviorView(self._kcfg, self._kprint, node_printer=self._node_printer, id='behavior'), +413 id='navigation', +414 ), +415 Vertical( +416 NodeView( +417 self._kprint, +418 custom_view=self._custom_view, +419 proof_id=str(self._kcfg._node_id), +420 id='node-view', +421 ), +422 id='display', +423 ), +424 ) +425 yield Footer()
+ +426 +
+[docs] +427 def on_graph_chunk_selected(self, message: GraphChunk.Selected) -> None: +428 self.query_one('#behavior').focus() +429 +430 if message.chunk_id.startswith('node_'): +431 self._selected_chunk = message.chunk_id +432 node, *_ = message.chunk_id[5:].split('_') +433 node_id = int(node) +434 self.query_one('#node-view', NodeView).update(self._kcfg.node(node_id)) +435 +436 elif message.chunk_id.startswith('edge_'): +437 self._selected_chunk = None +438 node_source, node_target, *_ = message.chunk_id[5:].split('_') +439 source_id = int(node_source) +440 target_id = int(node_target) +441 edge = single(self._kcfg.edges(source_id=source_id, target_id=target_id)) +442 self.query_one('#node-view', NodeView).update(edge) +443 +444 elif message.chunk_id.startswith('merged_edge_'): +445 self._selected_chunk = None +446 node_source, node_target, *_ = message.chunk_id[12:].split('_') +447 source_id = int(node_source) +448 target_id = int(node_target) +449 merged_edge = single(self._kcfg.merged_edges(source_id=source_id, target_id=target_id)) +450 self.query_one('#node-view', NodeView).update(merged_edge) +451 +452 elif message.chunk_id.startswith('cover_'): +453 self._selected_chunk = None +454 node_source, node_target, *_ = message.chunk_id[6:].split('_') +455 source_id = int(node_source) +456 target_id = int(node_target) +457 cover = single(self._kcfg.covers(source_id=source_id, target_id=target_id)) +458 self.query_one('#node-view', NodeView).update(cover) +459 +460 elif message.chunk_id.startswith('split_'): +461 self._selected_chunk = None +462 node_source, node_target, *_ = message.chunk_id[6:].split('_') +463 source_id = int(node_source) +464 target_id = int(node_target) +465 split = single(self._kcfg.splits(source_id=source_id, target_id=target_id)) +466 self.query_one('#node-view', NodeView).update(split) +467 +468 elif message.chunk_id.startswith('ndbranch_'): +469 self._selected_chunk = None +470 node_source, node_target, *_ = message.chunk_id[8:].split('_') +471 source_id = int(node_source) +472 target_id = int(node_target) +473 ndbranch = single(self._kcfg.ndbranches(source_id=source_id, target_id=target_id)) +474 self.query_one('#node-view', NodeView).update(ndbranch)
+ +475 +476 BINDINGS = [ +477 ('h', 'keystroke("h")', 'Hide selected node.'), +478 ('H', 'keystroke("H")', 'Unhide all nodes.'), +479 ('t', 'keystroke("term")', 'Toggle term.'), +480 ('c', 'keystroke("constraint")', 'Toggle constraint.'), +481 ('m', 'keystroke("minimize")', 'Toggle minimization.'), +482 ('s', 'keystroke("status")', 'Toggle status.'), +483 Binding('q', 'quit', priority=True), +484 ] +485 +
+[docs] +486 def action_keystroke(self, key: str) -> None: +487 if key == 'h': +488 if self._selected_chunk is not None and self._selected_chunk.startswith('node_'): +489 node_id = self._selected_chunk[5:] +490 self._hidden_chunks.append(self._selected_chunk) +491 self.query_one(f'#{self._selected_chunk}', GraphChunk).add_class('hidden') +492 self.query_one('#info', Info).text = f'HIDDEN: node({shorten_hashes(node_id)})' +493 elif key == 'H': +494 for hc in self._hidden_chunks: +495 self.query_one(f'#{hc}', GraphChunk).remove_class('hidden') +496 node_ids = [nid[5:] for nid in self._hidden_chunks] +497 self.query_one('#info', Info).text = f'UNHIDDEN: nodes({shorten_hashes(node_ids)})' +498 self._hidden_chunks = [] +499 elif key in ['term', 'constraint', 'status']: +500 self.query_one('#node-view', NodeView).toggle_view(key) +501 elif key == 'custom' and self._custom_view is not None: +502 self.query_one('#node-view', NodeView).toggle_view(key) +503 elif key in ['minimize']: +504 self.query_one('#node-view', NodeView).toggle_option(key)
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kcovr.html b/pyk/_modules/pyk/kcovr.html new file mode 100644 index 00000000000..ad16ea93b55 --- /dev/null +++ b/pyk/_modules/pyk/kcovr.html @@ -0,0 +1,404 @@ + + + + + + + + pyk.kcovr — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kcovr

+  1from __future__ import annotations
+  2
+  3import os
+  4import sys
+  5import time
+  6from pathlib import Path
+  7from typing import TYPE_CHECKING
+  8
+  9from .cli.utils import dir_path, file_path
+ 10
+ 11if TYPE_CHECKING:
+ 12    from collections.abc import Iterable, Mapping
+ 13    from typing import Final
+ 14
+ 15
+ 16TEMPLATE: Final = """
+ 17<coverage line-rate="{line_rate}" branch-rate="{rule_rate}" version="1.9" timestamp="{timestamp}">
+ 18  <sources>
+ 19    <source>{source_dir}</source>
+ 20  </sources>
+ 21  <packages>
+ 22    <package name="" line-rate="{line_rate}" branch-rate="{rule_rate}" complexity="{num_rules}.0">
+ 23      <classes>
+ 24        {classes_elem}
+ 25      </classes>
+ 26    </package>
+ 27  </packages>
+ 28</coverage>
+ 29"""
+ 30
+ 31CLASS_TEMPLATE: Final = """
+ 32<class name="{filename}" filename="{filename}" line-rate="{line_rate}" branch-rate="{rule_rate}" complexity="{num_rules}.0">
+ 33  <lines>
+ 34    {lines_elem}
+ 35  </lines>
+ 36</class>
+ 37"""
+ 38
+ 39LINE_TEMPLATE_NO_BRANCH: Final = """
+ 40<line number="{line_num}" hits="{hits}" branch="false"/>
+ 41"""
+ 42
+ 43LINE_TEMPLATE_BRANCH: Final = """
+ 44<line number="{line_num}" hits="{hits}" branch="true" condition-coverage="{rule_rate}% ({rules_covered}/{num_rules})">
+ 45  <conditions>
+ 46    <condition number="0" type="jump" coverage="{rule_rate}%"/>
+ 47  </conditions>
+ 48</line>
+ 49"""
+ 50
+ 51
+
+[docs] + 52def main() -> None: + 53 definition_dirs, source_files = parse_args() + 54 xml = render_coverage_xml(definition_dirs, source_files) + 55 print(xml)
+ + 56 + 57 +
+[docs] + 58def parse_args() -> tuple[tuple[Path, ...], tuple[Path, ...]]: + 59 if len(sys.argv) < 4: + 60 print('usage: ' + sys.argv[0] + ' <definition-dir>... -- <source-file>...') + 61 exit(1) + 62 + 63 def split_at_sep(xs: list[str]) -> tuple[list[str], list[str]]: + 64 for i, x in enumerate(xs): + 65 if x == '++': + 66 return xs[:i], xs[i + 1 :] + 67 return xs, [] + 68 + 69 definition_strs, source_strs = split_at_sep(sys.argv[1:]) + 70 definition_dirs = tuple(dir_path(s).resolve() for s in definition_strs) + 71 source_files = tuple(file_path(s).resolve() for s in source_strs) + 72 + 73 return definition_dirs, source_files
+ + 74 + 75 +
+[docs] + 76def render_coverage_xml(definition_dirs: Iterable[Path], source_files: Iterable[Path]) -> str: + 77 rule_map = create_rule_map(definition_dirs) + 78 cover_map = create_cover_map(definition_dirs) + 79 source_dir = Path(os.path.commonprefix([str(source_file) for source_file in source_files])) + 80 + 81 classes = render_classes(rule_map, cover_map, source_files, source_dir) + 82 classes_elem = ''.join(classes) + 83 + 84 num_rules_covered_global = count_rules_covered(cover_map) + 85 num_rules_global = len(rule_map) + 86 rule_rate_global = float(num_rules_covered_global) / num_rules_global + 87 + 88 lines_covered_global = count_lines_covered(rule_map, cover_map) + 89 num_lines_global = count_lines_global(rule_map) + 90 line_rate_global = float(lines_covered_global) / num_lines_global + 91 + 92 timestamp = int(time.time()) + 93 + 94 xml = TEMPLATE.format( + 95 line_rate=line_rate_global, + 96 rule_rate=rule_rate_global, + 97 timestamp=timestamp, + 98 num_rules=num_rules_global, + 99 source_dir=source_dir, +100 classes_elem=classes_elem, +101 ) +102 +103 return xml
+ +104 +105 +
+[docs] +106def render_classes( +107 rule_map: Mapping[str, tuple[str, int, int]], +108 cover_map: Mapping[str, int], +109 source_files: Iterable[Path], +110 source_dir: Path, +111) -> list[str]: +112 classes = [] +113 +114 rule_map_by_file = create_rule_map_by_file(rule_map) +115 for source_file in source_files: +116 source_file_name = str(source_file) +117 if source_file_name not in rule_map_by_file: +118 continue +119 +120 rule_map_file = rule_map_by_file[source_file_name] +121 cover_map_file = {rule: cnt for rule, cnt in cover_map.items() if rule in rule_map_file} +122 +123 num_rules_covered_file = count_rules_covered(cover_map_file) +124 num_rules_file = len(rule_map_file) +125 rule_rate_file = float(num_rules_covered_file) / num_rules_file +126 +127 num_lines_covered_file = count_lines_covered(rule_map, cover_map_file) +128 num_lines_file = count_lines_file(rule_map_file) +129 line_rate_file = float(num_lines_covered_file) / num_lines_file +130 +131 lines = render_lines(rule_map_file, cover_map_file) +132 lines_elem = ''.join(lines) +133 +134 relative_file = source_file.relative_to(source_dir) +135 +136 classes.append( +137 CLASS_TEMPLATE.format( +138 filename=relative_file, +139 line_rate=line_rate_file, +140 rule_rate=rule_rate_file, +141 num_rules=num_rules_file, +142 lines_elem=lines_elem, +143 ) +144 ) +145 +146 return classes
+ +147 +148 +
+[docs] +149def render_lines( +150 rule_map_file: Mapping[str, tuple[int, int]], +151 cover_map_file: Mapping[str, int], +152) -> list[str]: +153 lines = [] +154 +155 rule_map_by_line = create_rule_map_by_line(rule_map_file) +156 for line_num, rules in rule_map_by_line.items(): +157 line_coverage = {rule: cnt for rule, cnt in cover_map_file.items() if rule in rules} +158 hits = sum(line_coverage.values()) +159 num_covered = len(line_coverage) +160 num_rules_line = len(rules) +161 rule_rate_line = float(num_covered) / num_rules_line +162 if num_rules_line == 1: +163 lines.append(LINE_TEMPLATE_NO_BRANCH.format(line_num=line_num, hits=hits)) +164 else: +165 lines.append( +166 LINE_TEMPLATE_BRANCH.format( +167 line_num=line_num, +168 hits=hits, +169 rule_rate=int(rule_rate_line * 100), +170 rules_covered=num_covered, +171 num_rules=num_rules_line, +172 ) +173 ) +174 +175 return lines
+ +176 +177 +
+[docs] +178def create_rule_map(definition_dirs: Iterable[Path]) -> dict[str, tuple[str, int, int]]: +179 all_rules: set[str] = set() +180 +181 for definition_dir in definition_dirs: +182 with (definition_dir / 'allRules.txt').open() as f: +183 all_rules.update(line.strip() for line in f.readlines()) +184 +185 rule_map: dict[str, tuple[str, int, int]] = {} +186 for line in all_rules: +187 parts = line.split(' ') +188 rule_id = parts[0] +189 location = ' '.join(parts[1:]) +190 parts = location.split(':') +191 rule_map[rule_id] = (os.path.abspath(':'.join(parts[:-2])), int(parts[-2]), int(parts[-1])) +192 +193 assert len(all_rules) == len(rule_map) +194 return rule_map
+ +195 +196 +
+[docs] +197def create_cover_map(definition_dirs: Iterable[Path]) -> dict[str, int]: +198 cover_map: dict[str, int] = {} +199 +200 def add_cover(rule_id: str) -> None: +201 if not rule_id in cover_map: +202 cover_map[rule_id] = 0 +203 cover_map[rule_id] += 1 +204 +205 for definition_dir in definition_dirs: +206 with (definition_dir / 'coverage.txt').open() as f: +207 for line in f: +208 rule_id = line.strip() +209 add_cover(rule_id) +210 +211 for path in definition_dir.glob('*_coverage.txt'): +212 with path.open() as f: +213 for line in f: +214 rule_id = line.strip() +215 add_cover(rule_id) +216 +217 return cover_map
+ +218 +219 +
+[docs] +220def create_rule_map_by_file(rule_map: Mapping[str, tuple[str, int, int]]) -> dict[str, dict[str, tuple[int, int]]]: +221 rule_map_by_file: dict[str, dict[str, tuple[int, int]]] = {} +222 +223 for rule_id, (path, line, pos) in rule_map.items(): +224 if not path in rule_map_by_file: +225 rule_map_by_file[path] = {} +226 rule_map_by_file[path][rule_id] = (line, pos) +227 +228 return rule_map_by_file
+ +229 +230 +
+[docs] +231def create_rule_map_by_line(rule_map_file: Mapping[str, tuple[int, int]]) -> dict[int, list[str]]: +232 rule_map_by_line: dict[int, list[str]] = {} +233 +234 for rule_id, (line, _pos) in rule_map_file.items(): +235 if not line in rule_map_by_line: +236 rule_map_by_line[line] = [rule_id] +237 else: +238 rule_map_by_line[line].append(rule_id) +239 +240 return rule_map_by_line
+ +241 +242 +
+[docs] +243def count_lines_file(rule_map_file: Mapping[str, tuple[int, int]]) -> int: +244 return len({(line, pos) for _, (line, pos) in rule_map_file.items()})
+ +245 +246 +
+[docs] +247def count_lines_global(rule_map: Mapping[str, tuple[str, int, int]]) -> int: +248 return len({(src, line) for src, line, _pos in rule_map.values()})
+ +249 +250 +
+[docs] +251def count_lines_covered(rule_map: Mapping[str, tuple[str, int, int]], cover_map: Mapping[str, int]) -> int: +252 covered_lines = set() +253 for rule_id in cover_map: +254 rule = rule_map[rule_id] +255 covered_lines.add((rule[0], rule[1])) +256 return len(covered_lines)
+ +257 +258 +
+[docs] +259def count_rules_covered(cover_map: Mapping[str, int]) -> int: +260 return len(cover_map)
+ +261 +262 +263if __name__ == '__main__': +264 main() +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kdist/api.html b/pyk/_modules/pyk/kdist/api.html new file mode 100644 index 00000000000..24ba56291b9 --- /dev/null +++ b/pyk/_modules/pyk/kdist/api.html @@ -0,0 +1,211 @@ + + + + + + + + pyk.kdist.api — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kdist.api

+ 1from __future__ import annotations
+ 2
+ 3import re
+ 4from abc import ABC, abstractmethod
+ 5from dataclasses import dataclass
+ 6from typing import TYPE_CHECKING, final
+ 7
+ 8from . import utils
+ 9
+10if TYPE_CHECKING:
+11    from collections.abc import Iterable, Iterator, Mapping
+12    from pathlib import Path
+13    from typing import Any
+14
+15
+16_ID_PATTERN = re.compile('[a-z0-9]+(-[a-z0-9]+)*')
+17
+18
+
+[docs] +19def valid_id(s: str) -> bool: +20 return _ID_PATTERN.fullmatch(s) is not None
+ +21 +22 +
+[docs] +23@final +24@dataclass(frozen=True) +25class TargetId: +26 plugin_name: str +27 target_name: str +28 +29 def __init__(self, plugin_name: str, target_name: str): +30 if not valid_id(plugin_name): +31 raise ValueError(f'Invalid plugin name: {plugin_name!r}') +32 +33 if not valid_id(target_name): +34 raise ValueError(f'Invalid target name: {target_name!r}') +35 +36 object.__setattr__(self, 'plugin_name', plugin_name) +37 object.__setattr__(self, 'target_name', target_name) +38 +39 def __iter__(self) -> Iterator[str]: +40 yield self.plugin_name +41 yield self.target_name +42 +
+[docs] +43 @staticmethod +44 def parse(fqn: str) -> TargetId: +45 segments = fqn.split('.') +46 if len(segments) != 2: +47 raise ValueError(f'Expected fully qualified target name, got: {fqn!r}') +48 +49 plugin_name, target_name = segments +50 return TargetId(plugin_name, target_name)
+ +51 +52 @property +53 def full_name(self) -> str: +54 return f'{self.plugin_name}.{self.target_name}'
+ +55 +56 +
+[docs] +57class Target(ABC): +
+[docs] +58 @abstractmethod +59 def build(self, output_dir: Path, deps: dict[str, Path], args: dict[str, Any], verbose: bool) -> None: ...
+ +60 +
+[docs] +61 def deps(self) -> Iterable[str]: +62 return ()
+ +63 +
+[docs] +64 def source(self) -> Iterable[str | Path]: +65 return ()
+ +66 +
+[docs] +67 def context(self) -> Mapping[str, str]: +68 return {}
+ +69 +
+[docs] +70 @final +71 def manifest(self) -> dict[str, Any]: +72 source = {} +73 package_path = utils.package_path(self) +74 source_files = [file.resolve() for source in self.source() for file in utils.files_for_path(source)] +75 for source_file in source_files: +76 try: +77 file_id = str(source_file.relative_to(package_path)) +78 except ValueError as err: +79 raise ValueError(f'Source file is not within package: {source_file}') from err +80 source[file_id] = utils.timestamp(source_file) +81 +82 context = dict(self.context()) +83 return {'context': context, 'source': source}
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kdist/utils.html b/pyk/_modules/pyk/kdist/utils.html new file mode 100644 index 00000000000..5d7d58b4ff9 --- /dev/null +++ b/pyk/_modules/pyk/kdist/utils.html @@ -0,0 +1,173 @@ + + + + + + + + pyk.kdist.utils — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kdist.utils

+ 1from __future__ import annotations
+ 2
+ 3import inspect
+ 4import os
+ 5from contextlib import contextmanager
+ 6from pathlib import Path
+ 7from typing import TYPE_CHECKING
+ 8
+ 9from pyk.utils import check_dir_path
+10
+11if TYPE_CHECKING:
+12    from collections.abc import Iterator
+13    from typing import Any, Final
+14
+15
+16LOG_FORMAT: Final = '%(levelname)s %(asctime)s %(name)s - %(message)s'
+17
+18
+
+[docs] +19def package_path(obj: Any) -> Path: +20 module = inspect.getmodule(obj) +21 +22 if not module: +23 raise ValueError(f'Module not found for object: {obj}') +24 +25 if not module.__file__: +26 raise ValueError(f'Path not found for module: {module.__name__}') +27 +28 package_path = Path(module.__file__).parent.resolve() +29 while True: +30 init_file = package_path / '__init__.py' +31 if not init_file.exists(): +32 return package_path +33 if not package_path.parent.exists(): +34 return package_path +35 package_path = package_path.parent
+ +36 +37 +
+[docs] +38def files_for_path(path: str | Path) -> list[Path]: +39 path = Path(path) +40 +41 if not path.exists(): +42 raise ValueError(f'Path does not exist: {path}') +43 +44 if path.is_file(): +45 return [path] +46 +47 return [file for file in path.rglob('*') if file.is_file()]
+ +48 +49 +
+[docs] +50def timestamp(path: Path) -> int: +51 return path.stat().st_mtime_ns
+ +52 +53 +
+[docs] +54@contextmanager +55def cwd(path: Path) -> Iterator[None]: +56 check_dir_path(path) +57 old_cwd = os.getcwd() +58 os.chdir(str(path)) +59 yield +60 os.chdir(old_cwd)
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kllvm/compiler.html b/pyk/_modules/pyk/kllvm/compiler.html new file mode 100644 index 00000000000..0746dc37342 --- /dev/null +++ b/pyk/_modules/pyk/kllvm/compiler.html @@ -0,0 +1,240 @@ + + + + + + + + pyk.kllvm.compiler — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kllvm.compiler

+  1from __future__ import annotations
+  2
+  3import logging
+  4import sys
+  5import sysconfig
+  6from pathlib import Path
+  7from typing import TYPE_CHECKING
+  8
+  9from ..cli.utils import check_dir_path, check_file_path
+ 10from ..utils import run_process_2
+ 11
+ 12if TYPE_CHECKING:
+ 13    from collections.abc import Iterable
+ 14    from typing import Final
+ 15
+ 16
+ 17_LOGGER: Final = logging.getLogger(__name__)
+ 18PYTHON_EXTENSION_SUFFIX: Final = sysconfig.get_config_var('EXT_SUFFIX')
+ 19
+ 20
+ 21# ------
+ 22# _kllvm
+ 23# ------
+ 24
+ 25KLLVM_MODULE_NAME: Final = '_kllvm'
+ 26KLLVM_MODULE_FILE_NAME: Final = f'{KLLVM_MODULE_NAME}{PYTHON_EXTENSION_SUFFIX}'
+ 27
+ 28
+
+[docs] + 29def compile_kllvm(target_dir: str | Path, *, verbose: bool = False) -> Path: + 30 target_dir = Path(target_dir).resolve() + 31 check_dir_path(target_dir) + 32 + 33 module_file = target_dir / KLLVM_MODULE_FILE_NAME + 34 + 35 args = ['llvm-kompile', 'pythonast', '--python', sys.executable, '--python-output-dir', str(target_dir)] + 36 if verbose: + 37 args += ['--verbose'] + 38 + 39 _LOGGER.info(f'Compiling pythonast extension: {module_file.name}') + 40 run_process_2(args, logger=_LOGGER) + 41 + 42 assert module_file.is_file() + 43 return module_file
+ + 44 + 45 + 46# -------------- + 47# _kllvm_runtime + 48# -------------- + 49 + 50RUNTIME_MODULE_NAME: Final = '_kllvm_runtime' + 51RUNTIME_MODULE_FILE_NAME: Final = f'{RUNTIME_MODULE_NAME}{PYTHON_EXTENSION_SUFFIX}' + 52 + 53 +
+[docs] + 54def compile_runtime( + 55 definition_dir: str | Path, + 56 target_dir: str | Path | None = None, + 57 *, + 58 ccopts: Iterable[str] = (), + 59 verbose: bool = False, + 60) -> Path: + 61 definition_dir = Path(definition_dir).resolve() + 62 check_dir_path(definition_dir) + 63 + 64 if target_dir is None: + 65 target_dir = definition_dir + 66 else: + 67 target_dir = Path(target_dir).resolve() + 68 check_dir_path(target_dir) + 69 + 70 ccopts = list(ccopts) + 71 + 72 defn_file = definition_dir / 'definition.kore' + 73 check_file_path(defn_file) + 74 + 75 dt_dir = definition_dir / 'dt' + 76 check_dir_path(dt_dir) + 77 + 78 module_file = target_dir / RUNTIME_MODULE_FILE_NAME + 79 + 80 args = ['llvm-kompile', str(defn_file), str(dt_dir), 'python', '--python', sys.executable] + 81 if target_dir: + 82 args += ['--python-output-dir', str(target_dir)] + 83 if verbose: + 84 args += ['--verbose'] + 85 if ccopts: + 86 args += ['--'] + 87 args += ccopts + 88 + 89 _LOGGER.info(f'Compiling python extension: {module_file.name}') + 90 run_process_2(args, logger=_LOGGER) + 91 + 92 assert module_file.is_file() + 93 return module_file
+ + 94 + 95 + 96# ------------------------------- + 97# utility for generation of hints + 98# ------------------------------- + 99 +100 +
+[docs] +101def generate_hints( +102 definition_dir: str | Path, +103 input_kore_file: str | Path, +104 target_dir: str | Path | None = None, +105 hints_file_name: str = 'hints.bin', +106) -> Path: +107 definition_dir = Path(definition_dir).resolve() +108 check_dir_path(definition_dir) +109 +110 input_kore_file = Path(input_kore_file).resolve() +111 check_file_path(input_kore_file) +112 +113 if target_dir is None: +114 target_dir = definition_dir +115 else: +116 target_dir = Path(target_dir).resolve() +117 check_dir_path(target_dir) +118 +119 interpreter = definition_dir / 'interpreter' +120 check_file_path(interpreter) +121 +122 hints_file = target_dir / hints_file_name +123 +124 args = [str(interpreter), str(input_kore_file), '-1', str(hints_file), '--proof-output'] +125 _LOGGER.info(f'Generating hints: {hints_file.name}') +126 run_process_2(args, logger=_LOGGER) +127 +128 assert hints_file.is_file() +129 +130 return hints_file
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kllvm/convert.html b/pyk/_modules/pyk/kllvm/convert.html new file mode 100644 index 00000000000..5092966dff1 --- /dev/null +++ b/pyk/_modules/pyk/kllvm/convert.html @@ -0,0 +1,381 @@ + + + + + + + + pyk.kllvm.convert — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kllvm.convert

+  1from __future__ import annotations
+  2
+  3from typing import TYPE_CHECKING
+  4
+  5from ..kore.syntax import (
+  6    ML_SYMBOLS,
+  7    AliasDecl,
+  8    App,
+  9    Assoc,
+ 10    Axiom,
+ 11    Claim,
+ 12    Definition,
+ 13    EVar,
+ 14    Import,
+ 15    LeftAssoc,
+ 16    MLPattern,
+ 17    Module,
+ 18    RightAssoc,
+ 19    SortApp,
+ 20    SortDecl,
+ 21    SortVar,
+ 22    String,
+ 23    SVar,
+ 24    Symbol,
+ 25    SymbolDecl,
+ 26    VarPattern,
+ 27)
+ 28from . import ast as kllvm
+ 29
+ 30if TYPE_CHECKING:
+ 31    from collections.abc import Iterable
+ 32    from typing import Any
+ 33
+ 34    from ..kore.syntax import Pattern, Sentence, Sort
+ 35
+ 36
+ 37# -----------
+ 38# pyk -> llvm
+ 39# -----------
+ 40
+ 41
+
+[docs] + 42def definition_to_llvm(definition: Definition) -> kllvm.Definition: + 43 res = kllvm.Definition() + 44 for mod in definition.modules: + 45 res.add_module(module_to_llvm(mod)) + 46 _add_attributes(res, definition.attrs) + 47 return res
+ + 48 + 49 +
+[docs] + 50def module_to_llvm(module: Module) -> kllvm.Module: + 51 res = kllvm.Module(module.name) + 52 for sentence in module.sentences: + 53 res.add_declaration(sentence_to_llvm(sentence)) + 54 _add_attributes(res, module.attrs) + 55 return res
+ + 56 + 57 +
+[docs] + 58def sentence_to_llvm(sentence: Sentence) -> kllvm.Declaration: + 59 match sentence: + 60 case Import(mod_name, attrs): + 61 res = kllvm.ModuleImportDeclaration(mod_name) + 62 _add_attributes(res, attrs) + 63 return res + 64 case SortDecl(name, vars, attrs, hooked): + 65 res = kllvm.CompositeSortDeclaration(name, hooked) + 66 for var in vars: + 67 res.add_object_sort_variable(sort_to_llvm(var)) + 68 _add_attributes(res, attrs) + 69 return res + 70 case SymbolDecl(symbol, param_sorts, sort, attrs, hooked): + 71 res = kllvm.SymbolDeclaration(symbol.name, hooked) + 72 for var in symbol.vars: + 73 res.add_object_sort_variable(sort_to_llvm(var)) + 74 for param_sort in param_sorts: + 75 res.symbol.add_argument(sort_to_llvm(param_sort)) + 76 res.symbol.add_sort(sort_to_llvm(sort)) + 77 _add_attributes(res, attrs) + 78 return res + 79 case AliasDecl(alias, param_sorts, sort, left, right, attrs): + 80 res = kllvm.AliasDeclaration(alias.name) + 81 for var in alias.vars: + 82 res.add_object_sort_variable(sort_to_llvm(var)) + 83 for param_sort in param_sorts: + 84 res.symbol.add_argument(sort_to_llvm(param_sort)) + 85 res.symbol.add_sort(sort_to_llvm(sort)) + 86 res.add_variables(_composite_pattern(left.symbol, left.sorts, left.args)) + 87 res.add_pattern(pattern_to_llvm(right)) + 88 _add_attributes(res, attrs) + 89 return res + 90 case Axiom(vars, pattern, attrs): + 91 res = kllvm.AxiomDeclaration(False) + 92 for var in vars: + 93 res.add_object_sort_variable(sort_to_llvm(var)) + 94 res.add_pattern(pattern_to_llvm(pattern)) + 95 _add_attributes(res, attrs) + 96 return res + 97 case Claim(vars, pattern, attrs): + 98 res = kllvm.AxiomDeclaration(True) + 99 for var in vars: +100 res.add_object_sort_variable(sort_to_llvm(var)) +101 res.add_pattern(pattern_to_llvm(pattern)) +102 _add_attributes(res, attrs) +103 return res +104 case _: +105 raise AssertionError()
+ +106 +107 +
+[docs] +108def pattern_to_llvm(pattern: Pattern) -> kllvm.Pattern: +109 match pattern: +110 case String(value): +111 return kllvm.StringPattern(value.encode('latin-1')) +112 case VarPattern(name, sort): +113 return kllvm.VariablePattern(name, sort_to_llvm(sort)) +114 case App(symbol, sorts, args): +115 return _composite_pattern(symbol, sorts, args) +116 case Assoc(): +117 return _composite_pattern(pattern.kore_symbol(), [], [pattern.app]) +118 case MLPattern(): +119 return _composite_pattern(pattern.symbol(), pattern.sorts, pattern.ctor_patterns) +120 case _: +121 raise AssertionError()
+ +122 +123 +
+[docs] +124def sort_to_llvm(sort: Sort) -> kllvm.Sort: +125 match sort: +126 case SortVar(name): +127 return kllvm.SortVariable(name) +128 case SortApp(name, sorts): +129 res = kllvm.CompositeSort(sort.name, kllvm.value_type(kllvm.SortCategory(0))) +130 for subsort in sorts: +131 res.add_argument(sort_to_llvm(subsort)) +132 return res +133 case _: +134 raise AssertionError()
+ +135 +136 +137def _add_attributes(term: Any, attrs: tuple[App, ...]) -> None: +138 for attr in attrs: +139 term.add_attribute(_composite_pattern(attr.symbol, attr.sorts, attr.args)) +140 +141 +142def _composite_pattern(symbol_id: str, sorts: Iterable, patterns: Iterable[Pattern]) -> kllvm.CompositePattern: +143 symbol = kllvm.Symbol(symbol_id) +144 for sort in sorts: +145 symbol.add_formal_argument(sort_to_llvm(sort)) +146 res = kllvm.CompositePattern(symbol) +147 for pattern in patterns: +148 res.add_argument(pattern_to_llvm(pattern)) +149 return res +150 +151 +152# ----------- +153# llvm -> pyk +154# ----------- +155 +156 +
+[docs] +157def llvm_to_definition(definition: kllvm.Definition) -> Definition: +158 modules = (llvm_to_module(mod) for mod in definition.modules) +159 attrs = _attrs(definition.attributes) +160 return Definition(modules, attrs)
+ +161 +162 +
+[docs] +163def llvm_to_module(module: kllvm.Module) -> Module: +164 sentences = (llvm_to_sentence(decl) for decl in module.declarations) +165 attrs = _attrs(module.attributes) +166 return Module(module.name, sentences, attrs)
+ +167 +168 +
+[docs] +169def llvm_to_sentence(decl: kllvm.Declaration) -> Sentence: +170 attrs = _attrs(decl.attributes) +171 vars = tuple(llvm_to_sort_var(var) for var in decl.object_sort_variables) +172 match decl: +173 case kllvm.ModuleImportDeclaration(): # type: ignore +174 return Import(decl.module_name, attrs) +175 case kllvm.CompositeSortDeclaration(): # type: ignore +176 return SortDecl(decl.name, vars, attrs, hooked=decl.is_hooked) +177 case kllvm.SymbolDeclaration(): # type: ignore +178 llvm_to_symbol = decl.symbol +179 symbol = Symbol(llvm_to_symbol.name, vars) +180 param_sorts = (llvm_to_sort(sort) for sort in llvm_to_symbol.arguments) +181 sort = llvm_to_sort(llvm_to_symbol.sort) +182 return SymbolDecl(symbol, param_sorts, sort, attrs, hooked=decl.is_hooked) +183 case kllvm.AliasDeclaration(): # type: ignore +184 llvm_to_symbol = decl.symbol +185 symbol = Symbol(llvm_to_symbol.name, vars) +186 param_sorts = (llvm_to_sort(sort) for sort in llvm_to_symbol.arguments) +187 sort = llvm_to_sort(llvm_to_symbol.sort) +188 left = App(*_unpack_composite_pattern(decl.variables)) +189 right = llvm_to_pattern(decl.pattern) +190 return AliasDecl(symbol, param_sorts, sort, left, right, attrs) +191 case kllvm.AxiomDeclaration(): # type: ignore +192 pattern = llvm_to_pattern(decl.pattern) +193 if decl.is_claim: +194 return Claim(vars, pattern, attrs) +195 else: +196 return Axiom(vars, pattern, attrs) +197 case _: +198 raise AssertionError()
+ +199 +200 +
+[docs] +201def llvm_to_pattern(pattern: kllvm.Pattern) -> Pattern: +202 match pattern: +203 case kllvm.StringPattern(): # type: ignore +204 return String(pattern.contents.decode('latin-1')) +205 case kllvm.VariablePattern(): # type: ignore +206 if pattern.name and pattern.name[0] == '@': +207 return SVar(pattern.name, llvm_to_sort(pattern.sort)) +208 else: +209 return EVar(pattern.name, llvm_to_sort(pattern.sort)) +210 case kllvm.CompositePattern(): # type: ignore +211 symbol, sorts, patterns = _unpack_composite_pattern(pattern) +212 if symbol in ML_SYMBOLS: +213 return MLPattern.of(symbol, sorts, patterns) +214 elif symbol in [r'\left-assoc', r'\right-assoc']: +215 (app,) = patterns +216 assert isinstance(app, App) +217 assoc = LeftAssoc if symbol == r'\left-assoc' else RightAssoc +218 return assoc(app.symbol, app.sorts, app.args) +219 else: +220 return App(symbol, sorts, patterns) +221 case _: +222 raise AssertionError()
+ +223 +224 +
+[docs] +225def llvm_to_sort(sort: kllvm.Sort) -> Sort: +226 match sort: +227 case kllvm.SortVariable(): # type: ignore +228 return SortVar(sort.name) +229 case kllvm.CompositeSort(): # type: ignore +230 return SortApp(sort.name, (llvm_to_sort(subsort) for subsort in sort.arguments)) +231 case _: +232 raise AssertionError()
+ +233 +234 +
+[docs] +235def llvm_to_sort_var(var: kllvm.SortVariable) -> SortVar: +236 return SortVar(var.name)
+ +237 +238 +239def _attrs(attributes: dict[str, kllvm.CompositePattern]) -> tuple[App, ...]: +240 return tuple(App(*_unpack_composite_pattern(attr)) for _, attr in attributes.items()) +241 +242 +243def _unpack_composite_pattern(pattern: kllvm.CompositePattern) -> tuple[str, tuple[Sort, ...], tuple[Pattern, ...]]: +244 symbol = pattern.constructor.name +245 sorts = tuple(llvm_to_sort(sort) for sort in pattern.constructor.formal_arguments) +246 patterns = tuple(llvm_to_pattern(subpattern) for subpattern in pattern.arguments) +247 return symbol, sorts, patterns +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kllvm/hints/prooftrace.html b/pyk/_modules/pyk/kllvm/hints/prooftrace.html new file mode 100644 index 00000000000..e3594cf636c --- /dev/null +++ b/pyk/_modules/pyk/kllvm/hints/prooftrace.html @@ -0,0 +1,797 @@ + + + + + + + + pyk.kllvm.hints.prooftrace — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kllvm.hints.prooftrace

+  1from __future__ import annotations
+  2
+  3from abc import ABC, abstractmethod
+  4from typing import TYPE_CHECKING, final
+  5
+  6# isort: off
+  7import pyk.kllvm.load_static  # noqa: F401
+  8from _kllvm.prooftrace import (  # type: ignore  # noqa: F401, TC002
+  9    kore_header,
+ 10    llvm_rewrite_event,
+ 11    llvm_function_event,
+ 12    llvm_hook_event,
+ 13    llvm_rewrite_trace,
+ 14    llvm_rule_event,
+ 15    llvm_side_condition_end_event,
+ 16    llvm_side_condition_event,
+ 17    llvm_step_event,
+ 18    llvm_pattern_matching_failure_event,
+ 19    annotated_llvm_event,
+ 20    llvm_rewrite_trace_iterator,
+ 21    EventType,
+ 22)
+ 23from ..ast import Pattern
+ 24
+ 25# isort: on
+ 26
+ 27if TYPE_CHECKING:
+ 28    from collections.abc import Generator
+ 29    from pathlib import Path
+ 30
+ 31    from _kllvm.prooftrace import Argument
+ 32
+ 33
+
+[docs] + 34class LLVMStepEvent(ABC): + 35 """Abstract base class representing an LLVM step event."""
+ + 36 + 37 +
+[docs] + 38class LLVMRewriteEvent(LLVMStepEvent): + 39 """Represents LLVM rewrite event.""" + 40 + 41 @property + 42 @abstractmethod + 43 def rule_ordinal(self) -> int: + 44 """Return the axiom ordinal number of the rewrite rule. + 45 + 46 The rule ordinal represents the `nth` axiom in the kore definition. + 47 """ + 48 ... + 49 + 50 @property + 51 @abstractmethod + 52 def substitution(self) -> dict[str, Pattern]: + 53 """Returns the substitution dictionary used to perform the rewrite represented by this event.""" + 54 ...
+ + 55 + 56 +
+[docs] + 57@final + 58class LLVMRuleEvent(LLVMRewriteEvent): + 59 """Represents an LLVM rule event. + 60 + 61 Attributes: + 62 _rule_event (llvm_rule_event): The underlying LLVM rule event. + 63 """ + 64 + 65 _rule_event: llvm_rule_event + 66 +
+[docs] + 67 def __init__(self, rule_event: llvm_rule_event) -> None: + 68 """Initialize a new instance of the LLVMRuleEvent class. + 69 + 70 Args: + 71 rule_event (llvm_rule_event): The LLVM rule event object. + 72 """ + 73 self._rule_event = rule_event
+ + 74 +
+[docs] + 75 def __repr__(self) -> str: + 76 """Return a string representation of the object. + 77 + 78 Returns: + 79 A string representation of the LLVMRuleEvent object using the AST printing method. + 80 """ + 81 return self._rule_event.__repr__()
+ + 82 + 83 @property + 84 def rule_ordinal(self) -> int: + 85 """Returns the axiom ordinal number of the rule event.""" + 86 return self._rule_event.rule_ordinal + 87 + 88 @property + 89 def substitution(self) -> dict[str, Pattern]: + 90 """Returns the substitution dictionary used to perform the rewrite represented by this rule event.""" + 91 return {k: v[0] for k, v in self._rule_event.substitution.items()}
+ + 92 + 93 +
+[docs] + 94@final + 95class LLVMSideConditionEventEnter(LLVMRewriteEvent): + 96 """Represents an event that enters a side condition in LLVM rewriting. + 97 + 98 This event is used to check the side condition of a rule. Mostly used in ensures/requires clauses. + 99 +100 Attributes: +101 _side_condition_event (llvm_side_condition_event): The underlying side condition event. +102 """ +103 +104 _side_condition_event: llvm_side_condition_event +105 +
+[docs] +106 def __init__(self, side_condition_event: llvm_side_condition_event) -> None: +107 """Initialize a new instance of the LLVMSideConditionEventEnter class. +108 +109 Args: +110 side_condition_event (llvm_side_condition_event): The LLVM side condition event object. +111 """ +112 self._side_condition_event = side_condition_event
+ +113 +
+[docs] +114 def __repr__(self) -> str: +115 """Return a string representation of the object. +116 +117 Returns: +118 A string representation of the LLVMSideConditionEventEnter object using the AST printing method. +119 """ +120 return self._side_condition_event.__repr__()
+ +121 +122 @property +123 def rule_ordinal(self) -> int: +124 """Returns the axiom ordinal number associated with the side condition event.""" +125 return self._side_condition_event.rule_ordinal +126 +127 @property +128 def substitution(self) -> dict[str, Pattern]: +129 """Returns the substitution dictionary used to perform the rewrite represented by this side condition event.""" +130 return {k: v[0] for k, v in self._side_condition_event.substitution.items()}
+ +131 +132 +
+[docs] +133@final +134class LLVMSideConditionEventExit(LLVMStepEvent): +135 """Represents an LLVM side condition event indicating the exit of a side condition. +136 +137 This event contains the result of the side condition evaluation. +138 +139 Attributes: +140 _side_condition_end_event (llvm_side_condition_end_event): The underlying side condition end event. +141 """ +142 +143 _side_condition_end_event: llvm_side_condition_end_event +144 +
+[docs] +145 def __init__(self, side_condition_end_event: llvm_side_condition_end_event) -> None: +146 """Initialize a new instance of the LLVMSideConditionEventExit class. +147 +148 Args: +149 side_condition_end_event (llvm_side_condition_end_event): The LLVM side condition end event object. +150 """ +151 self._side_condition_end_event = side_condition_end_event
+ +152 +
+[docs] +153 def __repr__(self) -> str: +154 """Return a string representation of the object. +155 +156 Returns: +157 A string representation of the LLVMSideConditionEventExit object using the AST printing method. +158 """ +159 return self._side_condition_end_event.__repr__()
+ +160 +161 @property +162 def rule_ordinal(self) -> int: +163 """Return the axiom ordinal number associated with the side condition event.""" +164 return self._side_condition_end_event.rule_ordinal +165 +166 @property +167 def check_result(self) -> bool: +168 """Return the boolean result of the evaluation of the side condition that corresponds to this event.""" +169 return self._side_condition_end_event.check_result
+ +170 +171 +
+[docs] +172@final +173class LLVMPatternMatchingFailureEvent(LLVMStepEvent): +174 """Represents an LLVM pattern matching failure event. +175 +176 This event is used to indicate that the pattern matching failed during the rewriting process. +177 +178 Attributes: +179 _pattern_matching_failure_event (llvm_pattern_matching_failure_event): The underlying pattern matching failure event. +180 """ +181 +182 _pattern_matching_failure_event: llvm_pattern_matching_failure_event +183 +
+[docs] +184 def __init__(self, pattern_matching_failure_event: llvm_pattern_matching_failure_event) -> None: +185 """Initialize a new instance of the LLVMPatternMatchingFailureEvent class. +186 +187 Args: +188 pattern_matching_failure_event (llvm_pattern_matching_failure_event): The LLVM pattern matching failure event object. +189 """ +190 self._pattern_matching_failure_event = pattern_matching_failure_event
+ +191 +
+[docs] +192 def __repr__(self) -> str: +193 """Return a string representation of the object. +194 +195 Returns: +196 A string representation of the LLVMPatternMatchingFailureEvent object using the AST printing method. +197 """ +198 return self._pattern_matching_failure_event.__repr__()
+ +199 +200 @property +201 def function_name(self) -> str: +202 """Return the name of the function that failed to match the pattern.""" +203 return self._pattern_matching_failure_event.function_name
+ +204 +205 +
+[docs] +206@final +207class LLVMFunctionEvent(LLVMStepEvent): +208 """Represent an LLVM function event in a proof trace. +209 +210 Attributes: +211 _function_event (llvm_function_event): The underlying LLVM function event object. +212 """ +213 +214 _function_event: llvm_function_event +215 +
+[docs] +216 def __init__(self, function_event: llvm_function_event) -> None: +217 """Initialize a new instance of the LLVMFunctionEvent class. +218 +219 Args: +220 function_event (llvm_function_event): The LLVM function event object. +221 """ +222 self._function_event = function_event
+ +223 +
+[docs] +224 def __repr__(self) -> str: +225 """Return a string representation of the object. +226 +227 Returns: +228 A string representation of the LLVMFunctionEvent object using the AST printing method. +229 """ +230 return self._function_event.__repr__()
+ +231 +232 @property +233 def name(self) -> str: +234 """Return the name of the LLVM function as a KORE Symbol Name.""" +235 return self._function_event.name +236 +237 @property +238 def relative_position(self) -> str: +239 """Return the relative position of the LLVM function event in the proof trace.""" +240 return self._function_event.relative_position +241 +242 @property +243 def args(self) -> list[LLVMArgument]: +244 """Return a list of LLVMArgument objects representing the arguments of the LLVM function.""" +245 return [LLVMArgument(arg) for arg in self._function_event.args]
+ +246 +247 +
+[docs] +248@final +249class LLVMHookEvent(LLVMStepEvent): +250 """Represents a hook event in LLVM execution. +251 +252 Attributes: +253 _hook_event (llvm_hook_event): The underlying hook event object. +254 """ +255 +256 _hook_event: llvm_hook_event +257 +
+[docs] +258 def __init__(self, hook_event: llvm_hook_event) -> None: +259 """Initialize a new instance of the LLVMHookEvent class. +260 +261 Args: +262 hook_event (llvm_hook_event): The LLVM hook event object. +263 """ +264 self._hook_event = hook_event
+ +265 +
+[docs] +266 def __repr__(self) -> str: +267 """Return a string representation of the object. +268 +269 Returns: +270 A string representation of the LLVMHookEvent object using the AST printing method. +271 """ +272 return self._hook_event.__repr__()
+ +273 +274 @property +275 def name(self) -> str: +276 """Return the attribute name of the hook event. Ex.: "INT.add".""" +277 return self._hook_event.name +278 +279 @property +280 def relative_position(self) -> str: +281 """Return the relative position of the hook event in the proof trace.""" +282 return self._hook_event.relative_position +283 +284 @property +285 def args(self) -> list[LLVMArgument]: +286 """Return a list of LLVMArgument objects representing the arguments of the hook event.""" +287 return [LLVMArgument(arg) for arg in self._hook_event.args] +288 +289 @property +290 def result(self) -> Pattern: +291 """Return the result pattern of the hook event evaluation.""" +292 return self._hook_event.result
+ +293 +294 +
+[docs] +295@final +296class LLVMArgument: +297 """Represents an LLVM argument. +298 +299 Attributes: +300 _argument (Argument): The underlying Argument object. An argument is a wrapper object containing either a step +301 event or a KORE pattern. +302 """ +303 +304 _argument: Argument +305 +
+[docs] +306 def __init__(self, argument: Argument) -> None: +307 """Initialize the LLVMArgument object. +308 +309 Args: +310 argument (Argument): The Argument object. +311 """ +312 self._argument = argument
+ +313 +
+[docs] +314 def __repr__(self) -> str: +315 """Return a string representation of the object. +316 +317 Returns: +318 Returns a string representation of the LLVMArgument object using the AST printing method. +319 """ +320 return self._argument.__repr__()
+ +321 +322 @property +323 def step_event(self) -> LLVMStepEvent: +324 """Returns the LLVMStepEvent associated with the argument if any.""" +325 if isinstance(self._argument.step_event, llvm_rule_event): +326 return LLVMRuleEvent(self._argument.step_event) +327 elif isinstance(self._argument.step_event, llvm_side_condition_event): +328 return LLVMSideConditionEventEnter(self._argument.step_event) +329 elif isinstance(self._argument.step_event, llvm_side_condition_end_event): +330 return LLVMSideConditionEventExit(self._argument.step_event) +331 elif isinstance(self._argument.step_event, llvm_function_event): +332 return LLVMFunctionEvent(self._argument.step_event) +333 elif isinstance(self._argument.step_event, llvm_hook_event): +334 return LLVMHookEvent(self._argument.step_event) +335 elif isinstance(self._argument.step_event, llvm_pattern_matching_failure_event): +336 return LLVMPatternMatchingFailureEvent(self._argument.step_event) +337 else: +338 raise AssertionError() +339 +340 @property +341 def kore_pattern(self) -> Pattern: +342 """Return the KORE Pattern associated with the argument if any.""" +343 assert isinstance(self._argument.kore_pattern, Pattern) +344 return self._argument.kore_pattern +345 +
+[docs] +346 def is_kore_pattern(self) -> bool: +347 """Check if the argument is a KORE Pattern.""" +348 return self._argument.is_kore_pattern()
+ +349 +
+[docs] +350 def is_step_event(self) -> bool: +351 """Check if the argument is a step event.""" +352 return self._argument.is_step_event()
+
+ +353 +354 +
+[docs] +355@final +356class LLVMRewriteTrace: +357 """Represents an LLVM rewrite trace. +358 +359 Attributes: +360 _rewrite_trace (llvm_rewrite_trace): The underlying LLVM rewrite trace object. +361 """ +362 +363 _rewrite_trace: llvm_rewrite_trace +364 +
+[docs] +365 def __init__(self, rewrite_trace: llvm_rewrite_trace) -> None: +366 """Initialize a new instance of the LLVMRewriteTrace class. +367 +368 Args: +369 rewrite_trace (llvm_rewrite_trace): The LLVM rewrite trace object. +370 """ +371 self._rewrite_trace = rewrite_trace
+ +372 +
+[docs] +373 def __repr__(self) -> str: +374 """Return a string representation of the object. +375 +376 Returns: +377 A string representation of the LLVMRewriteTrace object using the AST printing method. +378 """ +379 return self._rewrite_trace.__repr__()
+ +380 +381 @property +382 def version(self) -> int: +383 """Returns the version of the binary hints format used by this trace.""" +384 return self._rewrite_trace.version +385 +386 @property +387 def pre_trace(self) -> list[LLVMArgument]: +388 """Returns a list of events that occurred before the initial configuration was constructed.""" +389 return [LLVMArgument(event) for event in self._rewrite_trace.pre_trace] +390 +391 @property +392 def initial_config(self) -> LLVMArgument: +393 """Returns the initial configuration as an LLVMArgument object.""" +394 return LLVMArgument(self._rewrite_trace.initial_config) +395 +396 @property +397 def trace(self) -> list[LLVMArgument]: +398 """Returns the trace. +399 +400 The trace is the list of events that occurred after the initial configurarion was constructed until the end of the +401 proof trace when the final configuration is reached. +402 """ +403 return [LLVMArgument(event) for event in self._rewrite_trace.trace] +404 +
+[docs] +405 @staticmethod +406 def parse(trace: bytes, header: KoreHeader) -> LLVMRewriteTrace: +407 """Parse the given proof hints byte string using the given kore_header object.""" +408 return LLVMRewriteTrace(llvm_rewrite_trace.parse(trace, header._kore_header))
+
+ +409 +410 +
+[docs] +411class KoreHeader: +412 """Represents the Kore header. +413 +414 The Kore header is a file that contains the version of the Binary KORE used to serialize/deserialize the +415 Proof Trace and all the aditional information needed make this process faster the Proof Trace. +416 +417 Attributes: +418 _kore_header (kore_header): The underlying KORE Header object. +419 """ +420 +421 _kore_header: kore_header +422 +
+[docs] +423 def __init__(self, kore_header: kore_header) -> None: +424 """Initialize a new instance of the KoreHeader class. +425 +426 Args: +427 kore_header (kore_header): The KORE Header object. +428 """ +429 self._kore_header = kore_header
+ +430 +
+[docs] +431 @staticmethod +432 def create(header_path: Path) -> KoreHeader: +433 """Create a new KoreHeader object from the given header file path.""" +434 return KoreHeader(kore_header(str(header_path)))
+
+ +435 +436 +
+[docs] +437class LLVMEventType: +438 """Represents an LLVM event type. +439 +440 This works as a wrapper around the EventType enum. +441 It also provides properties to check the type of the event. +442 +443 Attributes: +444 _event_type (EventType): The underlying EventType object. +445 """ +446 +447 _event_type: EventType +448 +
+[docs] +449 def __init__(self, event_type: EventType) -> None: +450 """Initialize a new instance of the LLVMEventType class. +451 +452 Args: +453 event_type (EventType): The EventType object. +454 """ +455 self._event_type = event_type
+ +456 +457 @property +458 def is_pre_trace(self) -> bool: +459 """Checks if the event type is a pre-trace event.""" +460 return self._event_type == EventType.PreTrace +461 +462 @property +463 def is_initial_config(self) -> bool: +464 """Checks if the event type is an initial configuration event.""" +465 return self._event_type == EventType.InitialConfig +466 +467 @property +468 def is_trace(self) -> bool: +469 """Checks if the event type is a trace event.""" +470 return self._event_type == EventType.Trace
+ +471 +472 +
+[docs] +473class LLVMEventAnnotated: +474 """Represents an annotated LLVM event. +475 +476 This class is used to wrap an llvm_event and its corresponding event type. +477 This class is used to iterate over the LLVM rewrite trace events. +478 +479 Attributes: +480 _annotated_llvm_event (annotated_llvm_event): The underlying annotated LLVM event object. +481 """ +482 +483 _annotated_llvm_event: annotated_llvm_event +484 +
+[docs] +485 def __init__(self, annotated_llvm_event: annotated_llvm_event) -> None: +486 """Initialize a new instance of the LLVMEventAnnotated class. +487 +488 Args: +489 annotated_llvm_event (annotated_llvm_event): The annotated LLVM event object. +490 """ +491 self._annotated_llvm_event = annotated_llvm_event
+ +492 +493 @property +494 def type(self) -> LLVMEventType: +495 """Returns the LLVM event type.""" +496 return LLVMEventType(self._annotated_llvm_event.type) +497 +498 @property +499 def event(self) -> LLVMArgument: +500 """Returns the LLVM event as an LLVMArgument object.""" +501 return LLVMArgument(self._annotated_llvm_event.event)
+ +502 +503 +
+[docs] +504class LLVMRewriteTraceIterator: +505 """Represents an LLVM rewrite trace iterator. +506 +507 This class is used to iterate over the LLVM rewrite trace events in the stream parser. +508 +509 Attributes: +510 _rewrite_trace_iterator (llvm_rewrite_trace_iterator): The underlying LLVM rewrite trace iterator object. +511 """ +512 +513 _rewrite_trace_iterator: llvm_rewrite_trace_iterator +514 +
+[docs] +515 def __init__(self, rewrite_trace_iterator: llvm_rewrite_trace_iterator) -> None: +516 """Initialize a new instance of the LLVMRewriteTraceIterator class. +517 +518 Args: +519 rewrite_trace_iterator (llvm_rewrite_trace_iterator): The LLVM rewrite trace iterator object. +520 """ +521 self._rewrite_trace_iterator = rewrite_trace_iterator
+ +522 +
+[docs] +523 def __repr__(self) -> str: +524 """Return a string representation of the object. +525 +526 Returns: +527 A string representation of the LLVMRewriteTraceIterator object using the AST printing method. +528 """ +529 return self._rewrite_trace_iterator.__repr__()
+ +530 +
+[docs] +531 def __iter__(self) -> Generator[LLVMEventAnnotated, None, None]: +532 """Yield LLVMEventAnnotated options. +533 +534 This method is an iterator that yields LLVMEventAnnotated options. +535 It iterates over the events in the trace and returns the next event as an LLVMEventAnnotated object. +536 +537 Yields: +538 LLVMEventAnnotated: The next LLVMEventAnnotated option. +539 """ +540 while True: +541 next_event = self._rewrite_trace_iterator.get_next_event() +542 if next_event is None: +543 return +544 else: +545 yield LLVMEventAnnotated(next_event)
+ +546 +
+[docs] +547 def __next__(self) -> LLVMEventAnnotated: +548 """Yield the next LLVMEventAnnotated object from the iterator. +549 +550 Returns: +551 LLVMEventAnnotated: The next LLVMEventAnnotated object. +552 +553 Raises: +554 StopIteration: If there are no more events in the iterator. +555 """ +556 next_event = self._rewrite_trace_iterator.get_next_event() +557 if next_event is not None: +558 return LLVMEventAnnotated(next_event) +559 else: +560 raise StopIteration
+ +561 +562 @property +563 def version(self) -> int: +564 """Return the version of the HINTS format.""" +565 return self._rewrite_trace_iterator.version +566 +
+[docs] +567 @staticmethod +568 def from_file(trace_path: Path, header: KoreHeader) -> LLVMRewriteTraceIterator: +569 """Create a new LLVMRewriteTraceIterator object from the given trace and header file paths.""" +570 return LLVMRewriteTraceIterator(llvm_rewrite_trace_iterator.from_file(str(trace_path), header._kore_header))
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kllvm/importer.html b/pyk/_modules/pyk/kllvm/importer.html new file mode 100644 index 00000000000..851ae244da4 --- /dev/null +++ b/pyk/_modules/pyk/kllvm/importer.html @@ -0,0 +1,177 @@ + + + + + + + + pyk.kllvm.importer — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kllvm.importer

+ 1from __future__ import annotations
+ 2
+ 3import os
+ 4import sys
+ 5from contextlib import contextmanager
+ 6from importlib.util import module_from_spec, spec_from_file_location
+ 7from pathlib import Path
+ 8from typing import TYPE_CHECKING
+ 9
+10from ..cli.utils import check_dir_path, check_file_path
+11from .compiler import KLLVM_MODULE_FILE_NAME, KLLVM_MODULE_NAME, RUNTIME_MODULE_FILE_NAME, RUNTIME_MODULE_NAME
+12from .runtime import Runtime
+13
+14if TYPE_CHECKING:
+15    from collections.abc import Iterator
+16    from types import ModuleType
+17
+18
+
+[docs] +19@contextmanager +20def rtld_local() -> Iterator[None]: +21 old_flags = sys.getdlopenflags() +22 sys.setdlopenflags(old_flags | os.RTLD_LOCAL) +23 yield +24 sys.setdlopenflags(old_flags)
+ +25 +26 +
+[docs] +27def import_from_file(module_name: str, module_file: str | Path) -> ModuleType: +28 module_file = Path(module_file).resolve() +29 check_file_path(module_file) +30 +31 spec = spec_from_file_location(module_name, module_file) +32 if not spec: +33 raise ValueError('Could not create ModuleSpec') +34 +35 module = module_from_spec(spec) +36 if not module: +37 raise ValueError('Could not create ModuleType') +38 +39 if not spec.loader: +40 raise ValueError('Spec has no loader') +41 +42 spec.loader.exec_module(module) +43 +44 return module
+ +45 +46 +
+[docs] +47def import_kllvm(target_dir: str | Path) -> ModuleType: +48 if '_kllvm' in sys.modules: +49 return sys.modules['_kllvm'] +50 target_dir = Path(target_dir) +51 check_dir_path(target_dir) +52 module_file = target_dir / KLLVM_MODULE_FILE_NAME +53 return import_from_file(KLLVM_MODULE_NAME, module_file)
+ +54 +55 +
+[docs] +56def import_runtime(target_dir: str | Path) -> Runtime: +57 target_dir = Path(target_dir) +58 check_dir_path(target_dir) +59 module_file = target_dir / RUNTIME_MODULE_FILE_NAME +60 +61 with rtld_local(): +62 module = import_from_file(RUNTIME_MODULE_NAME, module_file) +63 +64 return Runtime(module)
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kllvm/load_static.html b/pyk/_modules/pyk/kllvm/load_static.html new file mode 100644 index 00000000000..9a5baaf205d --- /dev/null +++ b/pyk/_modules/pyk/kllvm/load_static.html @@ -0,0 +1,126 @@ + + + + + + + + pyk.kllvm.load_static — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kllvm.load_static

+ 1from __future__ import annotations
+ 2
+ 3from pathlib import Path
+ 4from typing import TYPE_CHECKING
+ 5
+ 6from ..utils import run_process_2
+ 7from .importer import import_kllvm
+ 8
+ 9if TYPE_CHECKING:
+10    from typing import Final
+11
+12
+
+[docs] +13def get_kllvm() -> Path: +14 args = ['llvm-kompile', '--bindings-path'] +15 proc = run_process_2(args) +16 bindings_dir = Path(proc.stdout.rstrip()).resolve() +17 kllvm_dir = bindings_dir / 'kllvm' +18 return kllvm_dir
+ +19 +20 +21KLLVM_MODULE_DIR: Final = get_kllvm() +22KLLVM_MODULE: Final = import_kllvm(KLLVM_MODULE_DIR) +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kllvm/parser.html b/pyk/_modules/pyk/kllvm/parser.html new file mode 100644 index 00000000000..3eb17757bb7 --- /dev/null +++ b/pyk/_modules/pyk/kllvm/parser.html @@ -0,0 +1,160 @@ + + + + + + + + pyk.kllvm.parser — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kllvm.parser

+ 1from __future__ import annotations
+ 2
+ 3from pathlib import Path
+ 4from typing import TYPE_CHECKING
+ 5
+ 6from _kllvm.parser import Parser  # type: ignore
+ 7
+ 8from ..cli.utils import check_file_path
+ 9
+10if TYPE_CHECKING:
+11    from .ast import Definition, Pattern, Sort
+12
+13
+
+[docs] +14def parse_pattern(text: str) -> Pattern: +15 return Parser.from_string(text).pattern()
+ +16 +17 +
+[docs] +18def parse_sort(text: str) -> Sort: +19 return Parser.from_string(text).sort()
+ +20 +21 +
+[docs] +22def parse_definition(text: str) -> Definition: +23 return Parser.from_string(text).definition()
+ +24 +25 +
+[docs] +26def parse_pattern_file(path: str | Path) -> Pattern: +27 return _parser_from_path(path).pattern()
+ +28 +29 +
+[docs] +30def parse_sort_file(path: str | Path) -> Pattern: +31 return _parser_from_path(path).sort()
+ +32 +33 +
+[docs] +34def parse_definition_file(path: str | Path) -> Definition: +35 return _parser_from_path(path).definition()
+ +36 +37 +38def _parser_from_path(path: str | Path) -> Parser: +39 path = Path(path) +40 check_file_path(path) +41 return Parser(str(path)) +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kllvm/runtime.html b/pyk/_modules/pyk/kllvm/runtime.html new file mode 100644 index 00000000000..812e8fa95a1 --- /dev/null +++ b/pyk/_modules/pyk/kllvm/runtime.html @@ -0,0 +1,209 @@ + + + + + + + + pyk.kllvm.runtime — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kllvm.runtime

+ 1from __future__ import annotations
+ 2
+ 3from typing import TYPE_CHECKING
+ 4
+ 5if TYPE_CHECKING:
+ 6    from types import ModuleType
+ 7    from typing import Any
+ 8
+ 9    from .ast import CompositePattern, Pattern, Sort
+10
+11
+
+[docs] +12class Runtime: +13 _module: ModuleType +14 +15 def __init__(self, module: ModuleType): +16 module.init_static_objects() +17 self._module = module +18 +
+[docs] +19 def term(self, pattern: Pattern) -> Term: +20 return Term(self._module.InternalTerm(pattern))
+ +21 +
+[docs] +22 def deserialize(self, bs: bytes) -> Term | None: +23 block = self._module.InternalTerm.deserialize(bs) +24 if block is None: +25 return None +26 return Term(block)
+ +27 +
+[docs] +28 def step(self, pattern: Pattern, depth: int | None = 1) -> Pattern: +29 term = self.term(pattern) +30 term.step(depth=depth) +31 return term.pattern
+ +32 +
+[docs] +33 def run(self, pattern: Pattern) -> Pattern: +34 return self.step(pattern, depth=None)
+ +35 +
+[docs] +36 def simplify(self, pattern: Pattern, sort: Sort) -> Pattern: +37 res = self._module.simplify_pattern(pattern, sort) +38 self._module.free_all_gc_memory() +39 return res
+ +40 +
+[docs] +41 def simplify_bool(self, pattern: Pattern) -> bool: +42 res = self._module.simplify_bool_pattern(pattern) +43 self._module.free_all_gc_memory() +44 return res
+ +45 +
+[docs] +46 def evaluate(self, pattern: CompositePattern) -> Pattern: +47 res = self._module.evaluate_function(pattern) +48 self._module.free_all_gc_memory() +49 return res
+
+ +50 +51 +
+[docs] +52class Term: +53 _block: Any # module.InternalTerm +54 +55 def __init__(self, block: Any): +56 self._block = block +57 +58 @property +59 def pattern(self) -> Pattern: +60 return self._block.to_pattern() +61 +
+[docs] +62 def serialize(self) -> bytes: +63 return self._block.serialize()
+ +64 +
+[docs] +65 def step(self, depth: int | None = 1) -> None: +66 self._block = self._block.step(depth if depth is not None else -1)
+ +67 +
+[docs] +68 def run(self) -> None: +69 self.step(depth=None)
+ +70 +71 def __str__(self) -> str: +72 return str(self._block)
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kllvm/utils.html b/pyk/_modules/pyk/kllvm/utils.html new file mode 100644 index 00000000000..bbe5223bdc6 --- /dev/null +++ b/pyk/_modules/pyk/kllvm/utils.html @@ -0,0 +1,120 @@ + + + + + + + + pyk.kllvm.utils — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kllvm.utils

+ 1from __future__ import annotations
+ 2
+ 3from typing import TYPE_CHECKING
+ 4
+ 5from . import convert
+ 6
+ 7if TYPE_CHECKING:
+ 8    from ..kore.syntax import Axiom, Pattern
+ 9
+10
+
+[docs] +11def get_requires(axiom: Axiom) -> Pattern | None: +12 llvm_axiom = convert.sentence_to_llvm(axiom) +13 llvm_pattern = llvm_axiom.requires +14 if llvm_pattern is None: +15 return None +16 return convert.llvm_to_pattern(llvm_pattern)
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kore/kompiled.html b/pyk/_modules/pyk/kore/kompiled.html new file mode 100644 index 00000000000..55164088552 --- /dev/null +++ b/pyk/_modules/pyk/kore/kompiled.html @@ -0,0 +1,486 @@ + + + + + + + + pyk.kore.kompiled — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kore.kompiled

+  1from __future__ import annotations
+  2
+  3import json
+  4import logging
+  5from dataclasses import dataclass
+  6from itertools import chain
+  7from pathlib import Path
+  8from typing import TYPE_CHECKING, Final, final
+  9
+ 10from ..utils import POSet, check_dir_path, check_file_path
+ 11from .parser import KoreParser
+ 12from .syntax import (
+ 13    DV,
+ 14    ML_SYMBOL_DECLS,
+ 15    App,
+ 16    MLPattern,
+ 17    MLQuant,
+ 18    Pattern,
+ 19    SortApp,
+ 20    SortVar,
+ 21    Symbol,
+ 22    SymbolDecl,
+ 23    WithSort,
+ 24)
+ 25
+ 26if TYPE_CHECKING:
+ 27    from collections.abc import Iterable
+ 28    from typing import Any
+ 29
+ 30    from ..utils import FrozenDict
+ 31    from .syntax import Definition, Kore, Sort
+ 32
+ 33_LOGGER: Final = logging.getLogger(__name__)
+ 34
+ 35
+ 36_PYK_DEFINITION_NAME: Final = 'pyk-definition.kore.json'
+ 37
+ 38
+
+[docs] + 39@final + 40@dataclass(frozen=True) + 41class KompiledKore: + 42 sort_table: KoreSortTable + 43 symbol_table: KoreSymbolTable + 44 +
+[docs] + 45 @staticmethod + 46 def load(definition_dir: str | Path) -> KompiledKore: + 47 definition_dir = Path(definition_dir) + 48 check_dir_path(definition_dir) + 49 + 50 kore_file = definition_dir / 'definition.kore' + 51 check_file_path(kore_file) + 52 + 53 json_file = definition_dir / _PYK_DEFINITION_NAME + 54 if json_file.exists(): + 55 kore_timestamp = kore_file.stat().st_mtime_ns + 56 json_timestamp = json_file.stat().st_mtime_ns + 57 + 58 if kore_timestamp < json_timestamp: + 59 return KompiledKore.load_from_json(json_file) + 60 + 61 _LOGGER.warning(f'File is out of date: {json_file}') + 62 + 63 return KompiledKore.load_from_kore(kore_file)
+ + 64 +
+[docs] + 65 @staticmethod + 66 def load_from_kore(kore_file: str | Path) -> KompiledKore: + 67 kore_file = Path(kore_file) + 68 check_file_path(kore_file) + 69 + 70 _LOGGER.info(f'Reading KORE definition: {kore_file}') + 71 kore_text = kore_file.read_text() + 72 + 73 _LOGGER.info(f'Parsing KORE definition: {kore_file}') + 74 definition = KoreParser(kore_text).definition() + 75 + 76 return KompiledKore.for_definition(definition)
+ + 77 +
+[docs] + 78 @staticmethod + 79 def load_from_json(json_file: str | Path) -> KompiledKore: + 80 json_file = Path(json_file) + 81 check_file_path(json_file) + 82 _LOGGER.info(f'Reading JSON definition: {json_file}') + 83 with json_file.open() as f: + 84 json_data = json.load(f) + 85 return KompiledKore.from_dict(json_data)
+ + 86 +
+[docs] + 87 @staticmethod + 88 def for_definition(definition: Definition) -> KompiledKore: + 89 return KompiledKore( + 90 sort_table=KoreSortTable.for_definition(definition), + 91 symbol_table=KoreSymbolTable.for_definition(definition), + 92 )
+ + 93 +
+[docs] + 94 @staticmethod + 95 def from_dict(dct: dict[str, Any]) -> KompiledKore: + 96 return KompiledKore( + 97 sort_table=KoreSortTable( + 98 (_sort_from_dict(subsort), _sort_from_dict(supersort)) for subsort, supersort in dct['sorts'] + 99 ), +100 symbol_table=KoreSymbolTable(_symbol_decl_from_dict(symbol_decl) for symbol_decl in dct['symbols']), +101 )
+ +102 +
+[docs] +103 def write(self, definition_dir: str | Path) -> None: +104 definition_dir = Path(definition_dir) +105 check_dir_path(definition_dir) +106 json_data = self.to_dict() +107 json_file = definition_dir / _PYK_DEFINITION_NAME +108 with json_file.open('w') as f: +109 json.dump(json_data, f)
+ +110 +
+[docs] +111 def to_dict(self) -> dict[str, Any]: +112 return { +113 'sorts': [ +114 [_to_dict(subsort), _to_dict(supersort)] +115 for supersort, subsorts in self.sort_table._subsort_table.items() +116 for subsort in subsorts +117 ], +118 'symbols': [_to_dict(symbol_decl) for symbol_decl in self.symbol_table._symbol_table.values()], +119 }
+ +120 +
+[docs] +121 def add_injections(self, pattern: Pattern, sort: Sort | None = None) -> Pattern: +122 if sort is None: +123 sort = SortApp('SortK') +124 +125 stack: list = [pattern, sort, self.symbol_table.pattern_sorts(pattern), pattern.patterns, []] +126 while True: +127 done_patterns = stack[-1] +128 patterns = stack[-2] +129 pattern_sorts = stack[-3] +130 _sort = stack[-4] +131 pattern = stack[-5] +132 +133 idx = len(done_patterns) - len(patterns) +134 if not idx: +135 stack.pop() +136 stack.pop() +137 stack.pop() +138 stack.pop() +139 stack.pop() +140 pattern = pattern.let_patterns(done_patterns) +141 pattern = self._inject(pattern, _sort) +142 if not stack: +143 return pattern +144 stack[-1].append(pattern) +145 else: +146 pattern = patterns[idx] +147 stack.append(pattern) +148 stack.append(pattern_sorts[idx]) +149 stack.append(self.symbol_table.pattern_sorts(pattern)) +150 stack.append(pattern.patterns) +151 stack.append([])
+ +152 +153 def _inject(self, pattern: Pattern, sort: Sort) -> Pattern: +154 actual_sort = self.symbol_table.infer_sort(pattern) +155 +156 if actual_sort == sort: +157 return pattern +158 +159 if self.sort_table.is_subsort(actual_sort, sort): +160 return App('inj', (actual_sort, sort), (pattern,)) +161 +162 raise ValueError(f'Sort {actual_sort.name} is not a subsort of {sort.name}: {pattern}')
+ +163 +164 +165def _to_dict(kore: Kore) -> Any: +166 match kore: +167 case Pattern(): +168 return kore.dict +169 case SortVar(name): +170 return name +171 case SortApp(name, sorts): +172 return {'name': name, 'sorts': [_to_dict(sort) for sort in sorts]} +173 case Symbol(name, vars): +174 return {'name': name, 'vars': [_to_dict(var) for var in vars]} +175 case SymbolDecl(symbol, param_sorts, sort, attrs, hooked): +176 return { +177 'symbol': _to_dict(symbol), +178 'param-sorts': [_to_dict(sort) for sort in param_sorts], +179 'sort': _to_dict(sort), +180 'attrs': [_to_dict(attr) for attr in attrs], +181 'hooked': hooked, +182 } +183 case _: +184 raise AssertionError() +185 +186 +187def _sort_from_dict(obj: Any) -> Sort: +188 if isinstance(obj, str): +189 return SortVar(obj) +190 return SortApp(name=obj['name'], sorts=tuple(_to_dict(sort) for sort in obj['sorts'])) +191 +192 +193def _symbol_decl_from_dict(dct: Any) -> SymbolDecl: +194 return SymbolDecl( +195 symbol=Symbol( +196 name=dct['symbol']['name'], +197 vars=tuple(SortVar(var) for var in dct['symbol']['vars']), +198 ), +199 param_sorts=tuple(_sort_from_dict(sort) for sort in dct['param-sorts']), +200 sort=_sort_from_dict(dct['sort']), +201 attrs=tuple(_app_from_dict(attr) for attr in dct['attrs']), +202 hooked=dct['hooked'], +203 ) +204 +205 +206def _app_from_dict(dct: Any) -> App: +207 app = Pattern.from_dict(dct) +208 assert isinstance(app, App) +209 return app +210 +211 +
+[docs] +212@final +213@dataclass +214class KoreSortTable: +215 _subsort_table: FrozenDict[Sort, frozenset[Sort]] +216 +217 def __init__(self, subsorts: Iterable[tuple[Sort, Sort]]): +218 poset = POSet((y, x) for x, y in subsorts) +219 self._subsort_table = poset.image +220 +
+[docs] +221 @staticmethod +222 def for_definition(definition: Definition) -> KoreSortTable: +223 axioms = (axiom for module in definition for axiom in module.axioms) +224 attrs = (attr for axiom in axioms for attr in axiom.attrs) +225 subsort_attrs = (attr for attr in attrs if attr.symbol == 'subsort') +226 subsort_attr_sorts = (attr.sorts for attr in subsort_attrs) +227 subsorts = ((subsort, supersort) for subsort, supersort in subsort_attr_sorts) +228 return KoreSortTable(subsorts)
+ +229 +
+[docs] +230 def is_subsort(self, sort1: Sort, sort2: Sort) -> bool: +231 if sort1 == sort2: +232 return True +233 +234 if sort2 == SortApp('SortK'): +235 return True +236 +237 if sort1 == SortApp('SortK'): +238 return False +239 +240 return sort1 in self._subsort_table.get(sort2, ())
+ +241 +
+[docs] +242 def meet(self, sort1: Sort, sort2: Sort) -> Sort: +243 if self.is_subsort(sort1, sort2): +244 return sort1 +245 +246 if self.is_subsort(sort2, sort1): +247 return sort2 +248 +249 subsorts1 = set(self._subsort_table.get(sort1, set())).union({sort1}) +250 subsorts2 = set(self._subsort_table.get(sort2, set())).union({sort2}) +251 common_subsorts = subsorts1.intersection(subsorts2) +252 if not common_subsorts: +253 raise ValueError(f'Sorts have no common subsort: {sort1}, {sort2}') +254 nr_subsorts = {sort: len(self._subsort_table.get(sort, {})) for sort in common_subsorts} +255 max_subsort_nr = max(n for _, n in nr_subsorts.items()) +256 max_subsorts = {sort for sort, n in nr_subsorts.items() if n == max_subsort_nr} +257 (subsort,) = max_subsorts +258 return subsort
+
+ +259 +260 +
+[docs] +261@final +262@dataclass +263class KoreSymbolTable: +264 _symbol_table: dict[str, SymbolDecl] +265 +266 def __init__(self, symbol_decls: Iterable[SymbolDecl] = ()): +267 self._symbol_table = {symbol_decl.symbol.name: symbol_decl for symbol_decl in symbol_decls} +268 +
+[docs] +269 @staticmethod +270 def for_definition(definition: Definition, *, with_ml_symbols: bool = True) -> KoreSymbolTable: +271 return KoreSymbolTable( +272 chain( +273 (symbol_decl for module in definition for symbol_decl in module.symbol_decls), +274 ML_SYMBOL_DECLS if with_ml_symbols else (), +275 ) +276 )
+ +277 +
+[docs] +278 def resolve(self, symbol_id: str, sorts: Iterable[Sort] = ()) -> tuple[Sort, tuple[Sort, ...]]: +279 symbol_decl = self._symbol_table.get(symbol_id) +280 if not symbol_decl: +281 raise ValueError(f'Undeclared symbol: {symbol_id}') +282 +283 symbol = symbol_decl.symbol +284 sorts = tuple(sorts) +285 +286 nr_sort_vars = len(symbol.vars) +287 nr_sorts = len(sorts) +288 if nr_sort_vars != nr_sorts: +289 raise ValueError(f'Expected {nr_sort_vars} sort parameters, got {nr_sorts} for: {symbol_id}') +290 +291 sort_table: dict[Sort, Sort] = dict(zip(symbol.vars, sorts, strict=True)) +292 +293 def resolve_sort(sort: Sort) -> Sort: +294 if type(sort) is SortVar: +295 return sort_table.get(sort, sort) +296 return sort +297 +298 sort = resolve_sort(symbol_decl.sort) +299 param_sorts = tuple(resolve_sort(sort) for sort in symbol_decl.param_sorts) +300 +301 return sort, param_sorts
+ +302 +
+[docs] +303 def infer_sort(self, pattern: Pattern) -> Sort: +304 if isinstance(pattern, WithSort): +305 return pattern.sort +306 +307 if type(pattern) is App: +308 sort, _ = self.resolve(pattern.symbol, pattern.sorts) +309 return sort +310 +311 raise ValueError(f'Cannot infer sort: {pattern}')
+ +312 +
+[docs] +313 def pattern_sorts(self, pattern: Pattern) -> tuple[Sort, ...]: +314 sorts: tuple[Sort, ...] +315 if isinstance(pattern, DV): +316 sorts = () +317 +318 elif isinstance(pattern, MLQuant): +319 sorts = (pattern.sort,) +320 +321 elif isinstance(pattern, MLPattern): +322 _, sorts = self.resolve(pattern.symbol(), pattern.sorts) +323 +324 elif isinstance(pattern, App): +325 _, sorts = self.resolve(pattern.symbol, pattern.sorts) +326 +327 else: +328 sorts = () +329 +330 assert len(sorts) == len(pattern.patterns) +331 return sorts
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kore/lexer.html b/pyk/_modules/pyk/kore/lexer.html new file mode 100644 index 00000000000..28fbd87bbe3 --- /dev/null +++ b/pyk/_modules/pyk/kore/lexer.html @@ -0,0 +1,366 @@ + + + + + + + + pyk.kore.lexer — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kore.lexer

+  1from __future__ import annotations
+  2
+  3from enum import Enum, auto
+  4from typing import TYPE_CHECKING, NamedTuple
+  5
+  6if TYPE_CHECKING:
+  7    from collections.abc import Callable, Iterable, Iterator, Mapping
+  8    from typing import Final
+  9
+ 10
+
+[docs] + 11class TokenType(Enum): + 12 EOF = 0 + 13 COMMA = auto() + 14 COLON = auto() + 15 WALRUS = auto() + 16 LPAREN = auto() + 17 RPAREN = auto() + 18 LBRACE = auto() + 19 RBRACE = auto() + 20 LBRACK = auto() + 21 RBRACK = auto() + 22 STRING = auto() + 23 ID = auto() + 24 SYMBOL_ID = auto() + 25 SET_VAR_ID = auto() + 26 ML_TOP = auto() + 27 ML_BOTTOM = auto() + 28 ML_NOT = auto() + 29 ML_AND = auto() + 30 ML_OR = auto() + 31 ML_IMPLIES = auto() + 32 ML_IFF = auto() + 33 ML_EXISTS = auto() + 34 ML_FORALL = auto() + 35 ML_MU = auto() + 36 ML_NU = auto() + 37 ML_CEIL = auto() + 38 ML_FLOOR = auto() + 39 ML_EQUALS = auto() + 40 ML_IN = auto() + 41 ML_NEXT = auto() + 42 ML_REWRITES = auto() + 43 ML_DV = auto() + 44 ML_LEFT_ASSOC = auto() + 45 ML_RIGHT_ASSOC = auto() + 46 KW_MODULE = auto() + 47 KW_ENDMODULE = auto() + 48 KW_IMPORT = auto() + 49 KW_SORT = auto() + 50 KW_HOOKED_SORT = auto() + 51 KW_SYMBOL = auto() + 52 KW_HOOKED_SYMBOL = auto() + 53 KW_AXIOM = auto() + 54 KW_CLAIM = auto() + 55 KW_ALIAS = auto() + 56 KW_WHERE = auto()
+ + 57 + 58 +
+[docs] + 59class KoreToken(NamedTuple): + 60 text: str + 61 type: TokenType
+ + 62 + 63 + 64_EOF_TOKEN: Final = KoreToken('', TokenType.EOF) + 65_COLON_TOKEN: Final = KoreToken(':', TokenType.COLON) + 66_WALRUS_TOKEN: Final = KoreToken(':=', TokenType.WALRUS) + 67 + 68_ML_SYMBOLS: Final = { + 69 r'\top': KoreToken(r'\top', TokenType.ML_TOP), + 70 r'\bottom': KoreToken(r'\bottom', TokenType.ML_BOTTOM), + 71 r'\not': KoreToken(r'\not', TokenType.ML_NOT), + 72 r'\and': KoreToken(r'\and', TokenType.ML_AND), + 73 r'\or': KoreToken(r'\or', TokenType.ML_OR), + 74 r'\implies': KoreToken(r'\implies', TokenType.ML_IMPLIES), + 75 r'\iff': KoreToken(r'\iff', TokenType.ML_IFF), + 76 r'\exists': KoreToken(r'\exists', TokenType.ML_EXISTS), + 77 r'\forall': KoreToken(r'\forall', TokenType.ML_FORALL), + 78 r'\mu': KoreToken(r'\mu', TokenType.ML_MU), + 79 r'\nu': KoreToken(r'\nu', TokenType.ML_NU), + 80 r'\ceil': KoreToken(r'\ceil', TokenType.ML_CEIL), + 81 r'\floor': KoreToken(r'\floor', TokenType.ML_FLOOR), + 82 r'\equals': KoreToken(r'\equals', TokenType.ML_EQUALS), + 83 r'\in': KoreToken(r'\in', TokenType.ML_IN), + 84 r'\next': KoreToken(r'\next', TokenType.ML_NEXT), + 85 r'\rewrites': KoreToken(r'\rewrites', TokenType.ML_REWRITES), + 86 r'\dv': KoreToken(r'\dv', TokenType.ML_DV), + 87 r'\left-assoc': KoreToken(r'\left-assoc', TokenType.ML_LEFT_ASSOC), + 88 r'\right-assoc': KoreToken(r'\right-assoc', TokenType.ML_RIGHT_ASSOC), + 89} + 90 + 91_KEYWORDS: Final = { + 92 'module': KoreToken('module', TokenType.KW_MODULE), + 93 'endmodule': KoreToken('endmodule', TokenType.KW_ENDMODULE), + 94 'import': KoreToken('import', TokenType.KW_IMPORT), + 95 'sort': KoreToken('sort', TokenType.KW_SORT), + 96 'hooked-sort': KoreToken('hooked-sort', TokenType.KW_HOOKED_SORT), + 97 'symbol': KoreToken('symbol', TokenType.KW_SYMBOL), + 98 'hooked-symbol': KoreToken('hooked-symbol', TokenType.KW_HOOKED_SYMBOL), + 99 'axiom': KoreToken('axiom', TokenType.KW_AXIOM), +100 'claim': KoreToken('claim', TokenType.KW_CLAIM), +101 'alias': KoreToken('alias', TokenType.KW_ALIAS), +102 'where': KoreToken('where', TokenType.KW_WHERE), +103} +104 +105_SIMPLE_CHARS: Final = { +106 ',': KoreToken(',', TokenType.COMMA), +107 '(': KoreToken('(', TokenType.LPAREN), +108 ')': KoreToken(')', TokenType.RPAREN), +109 '{': KoreToken('{', TokenType.LBRACE), +110 '}': KoreToken('}', TokenType.RBRACE), +111 '[': KoreToken('[', TokenType.LBRACK), +112 ']': KoreToken(']', TokenType.RBRACK), +113} +114 +115_WHITESPACE_CHARS: Final = {' ', '\t', '\n', '\r'} +116_ID_FIRST_CHARS: Final = set('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ') +117_ID_CHARS: Final = set("01234567890'-").union(_ID_FIRST_CHARS) +118 +119 +120def _whitespace(la: str, it: Iterator[str]) -> tuple[str, None]: +121 la = next(it, '') +122 while la in _WHITESPACE_CHARS: +123 la = next(it, '') +124 return la, None +125 +126 +127def _comment(la: str, it: Iterator[str]) -> tuple[str, None]: +128 # line comment +129 la = next(it, '') +130 if la == '/': +131 while la and la != '\n': +132 la = next(it, '') +133 if la: +134 la = next(it, '') +135 +136 # block comment +137 elif la == '*': +138 la = next(it) +139 while True: +140 while la != '*': +141 la = next(it) +142 la = next(it) +143 if la == '/': +144 la = next(it, '') +145 break +146 +147 # mismatch +148 else: +149 raise ValueError(f'Expected / or *, got: {la}') +150 +151 return la, None +152 +153 +154def _simple_char(la: str, it: Iterator[str]) -> tuple[str, KoreToken]: +155 return next(it, ''), _SIMPLE_CHARS[la] +156 +157 +158def _colon_or_walrus(la: str, it: Iterator[str]) -> tuple[str, KoreToken]: +159 la = next(it, '') +160 if la == '=': +161 token = _WALRUS_TOKEN +162 la = next(it, '') +163 else: +164 token = _COLON_TOKEN +165 return la, token +166 +167 +168def _id_or_keyword(la: str, it: Iterator[str]) -> tuple[str, KoreToken]: +169 buf = [la] +170 la = next(it, '') +171 while la in _ID_CHARS: +172 buf.append(la) +173 la = next(it, '') +174 name = ''.join(buf) +175 if name in _KEYWORDS: +176 token = _KEYWORDS[name] +177 else: +178 token = KoreToken(name, TokenType.ID) +179 return la, token +180 +181 +182def _symbol_or_ml_conn(la: str, it: Iterator[str]) -> tuple[str, KoreToken]: +183 buf = [la] +184 la = next(it) +185 if la not in _ID_FIRST_CHARS: +186 raise ValueError(f'Expected letter, got: {la}') +187 buf.append(la) +188 la = next(it, '') +189 while la in _ID_CHARS: +190 buf.append(la) +191 la = next(it, '') +192 symbol = ''.join(buf) +193 if symbol in _ML_SYMBOLS: +194 token = _ML_SYMBOLS[symbol] +195 else: +196 token = KoreToken(symbol, TokenType.SYMBOL_ID) +197 return la, token +198 +199 +200def _set_var_id(la: str, it: Iterator[str]) -> tuple[str, KoreToken]: +201 buf = [la] +202 la = next(it) +203 if la not in _ID_FIRST_CHARS: +204 raise ValueError(f'Expected letter, got: {la}') +205 buf.append(la) +206 la = next(it, '') +207 while la in _ID_CHARS: +208 buf.append(la) +209 la = next(it, '') +210 return la, KoreToken(''.join(buf), TokenType.SET_VAR_ID) +211 +212 +213def _string(la: str, it: Iterator[str]) -> tuple[str, KoreToken]: +214 buf = [la] +215 la = next(it) +216 while la != '"': +217 if la == '\\': +218 buf.append(la) +219 la = next(it) +220 buf.append(la) +221 la = next(it) +222 buf.append(la) +223 return next(it, ''), KoreToken(''.join(buf), TokenType.STRING) +224 +225 +226_DISPATCH_TABLE: Final[Mapping[str, Callable[[str, Iterator[str]], tuple[str, KoreToken | None]]]] = { +227 '/': _comment, +228 ':': _colon_or_walrus, +229 '"': _string, +230 '@': _set_var_id, +231 '\\': _symbol_or_ml_conn, +232 **{c: _whitespace for c in _WHITESPACE_CHARS}, +233 **{c: _id_or_keyword for c in _ID_FIRST_CHARS}, +234 **{c: _simple_char for c in _SIMPLE_CHARS}, +235} +236 +237 +
+[docs] +238def kore_lexer(it: Iterable[str]) -> Iterator[KoreToken]: +239 it = iter(it) +240 la = next(it, '') +241 while True: +242 if not la: +243 yield _EOF_TOKEN +244 return +245 +246 try: +247 next_state = _DISPATCH_TABLE[la] +248 except KeyError as err: +249 raise ValueError(f'Unexpected character: {la}') from err +250 +251 try: +252 la, token = next_state(la, it) +253 if token: +254 yield token +255 except StopIteration as err: +256 raise ValueError('Unexpected end of file') from err
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kore/manip.html b/pyk/_modules/pyk/kore/manip.html new file mode 100644 index 00000000000..0b1d22dbe5b --- /dev/null +++ b/pyk/_modules/pyk/kore/manip.html @@ -0,0 +1,147 @@ + + + + + + + + pyk.kore.manip — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kore.manip

+ 1from __future__ import annotations
+ 2
+ 3from typing import TYPE_CHECKING
+ 4
+ 5from .syntax import And, EVar, MLQuant, Top
+ 6
+ 7if TYPE_CHECKING:
+ 8    from collections.abc import Collection
+ 9
+10    from .syntax import Pattern
+11
+12
+
+[docs] +13def conjuncts(pattern: Pattern) -> tuple[Pattern, ...]: +14 if isinstance(pattern, Top): +15 return () +16 if isinstance(pattern, And): +17 return tuple(conjunct for op in pattern.ops for conjunct in conjuncts(op)) +18 return (pattern,)
+ +19 +20 +
+[docs] +21def free_occs(pattern: Pattern, *, bound_vars: Collection[str] = ()) -> dict[str, list[EVar]]: +22 occurrences: dict[str, list[EVar]] = {} +23 +24 def collect(pattern: Pattern, bound_vars: set[str]) -> None: +25 if type(pattern) is EVar and pattern.name not in bound_vars: +26 if pattern.name in occurrences: +27 occurrences[pattern.name].append(pattern) +28 else: +29 occurrences[pattern.name] = [pattern] +30 +31 elif isinstance(pattern, MLQuant): +32 new_bound_vars = {pattern.var.name}.union(bound_vars) +33 collect(pattern.pattern, new_bound_vars) +34 +35 else: +36 for sub_pattern in pattern.patterns: +37 collect(sub_pattern, bound_vars) +38 +39 collect(pattern, set(bound_vars)) +40 return occurrences
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kore/match.html b/pyk/_modules/pyk/kore/match.html new file mode 100644 index 00000000000..f80f9c3ef37 --- /dev/null +++ b/pyk/_modules/pyk/kore/match.html @@ -0,0 +1,472 @@ + + + + + + + + pyk.kore.match — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kore.match

+  1from __future__ import annotations
+  2
+  3from typing import TYPE_CHECKING, overload
+  4
+  5from ..dequote import bytes_encode
+  6from ..utils import case, check_type
+  7from .prelude import BOOL, BYTES, ID, INT, STRING
+  8from .syntax import DV, App, LeftAssoc
+  9
+ 10if TYPE_CHECKING:
+ 11    from collections.abc import Callable
+ 12    from typing import Any, TypeVar
+ 13
+ 14    from .syntax import Pattern, Sort
+ 15
+ 16    T = TypeVar('T')
+ 17    K = TypeVar('K')
+ 18    V = TypeVar('V')
+ 19
+ 20
+
+[docs] + 21def match_dv(pattern: Pattern, sort: Sort | None = None) -> DV: + 22 dv = check_type(pattern, DV) + 23 if sort and dv.sort != sort: + 24 raise ValueError(f'Expected sort {sort.text}, found: {dv.sort.text}') + 25 return dv
+ + 26 + 27 +
+[docs] + 28def match_symbol(actual: str, expected: str) -> None: + 29 if actual != expected: + 30 raise ValueError(f'Expected symbol {expected}, found: {actual}')
+ + 31 + 32 +
+[docs] + 33def match_app(pattern: Pattern, symbol: str | None = None) -> App: + 34 app = check_type(pattern, App) + 35 if symbol is not None: + 36 match_symbol(app.symbol, symbol) + 37 return app
+ + 38 + 39 +
+[docs] + 40def match_inj(pattern: Pattern) -> App: + 41 return match_app(pattern, 'inj')
+ + 42 + 43 +
+[docs] + 44def match_left_assoc(pattern: Pattern, symbol: str | None = None) -> LeftAssoc: + 45 assoc = check_type(pattern, LeftAssoc) + 46 if symbol is not None: + 47 match_symbol(assoc.symbol, symbol) + 48 return assoc
+ + 49 + 50 +
+[docs] + 51def match_list(pattern: Pattern) -> tuple[Pattern, ...]: + 52 if type(pattern) is App: + 53 match_app(pattern, "Lbl'Stop'List") + 54 return () + 55 + 56 assoc = match_left_assoc(pattern, "Lbl'Unds'List'Unds'") + 57 items = (match_app(arg, 'LblListItem') for arg in assoc.args) + 58 elems = (item.args[0] for item in items) + 59 return tuple(elems)
+ + 60 + 61 +
+[docs] + 62def match_set(pattern: Pattern) -> tuple[Pattern, ...]: + 63 if type(pattern) is App: + 64 match_app(pattern, "Lbl'Stop'Set") + 65 return () + 66 + 67 assoc = match_left_assoc(pattern, "Lbl'Unds'Set'Unds'") + 68 items = (match_app(arg, 'LblSetItem') for arg in assoc.args) + 69 elems = (item.args[0] for item in items) + 70 return tuple(elems)
+ + 71 + 72 +
+[docs] + 73def match_map(pattern: Pattern, *, cell: str | None = None) -> tuple[tuple[Pattern, Pattern], ...]: + 74 cell = cell or '' + 75 stop_symbol = f"Lbl'Stop'{cell}Map" + 76 cons_symbol = f"Lbl'Unds'{cell}Map'Unds'" + 77 item_symbol = "Lbl'UndsPipe'-'-GT-Unds'" if not cell else f'Lbl{cell}MapItem' + 78 + 79 if type(pattern) is App: + 80 match_app(pattern, stop_symbol) + 81 return () + 82 + 83 assoc = match_left_assoc(pattern, cons_symbol) + 84 items = (match_app(arg, item_symbol) for arg in assoc.args) + 85 entries = ((item.args[0], item.args[1]) for item in items) + 86 return tuple(entries)
+ + 87 + 88 +
+[docs] + 89def match_rangemap(pattern: Pattern) -> tuple[tuple[tuple[Pattern, Pattern], Pattern], ...]: + 90 stop_symbol = "Lbl'Stop'RangeMap" + 91 cons_symbol = "Lbl'Unds'RangeMap'Unds'" + 92 item_symbol = "Lbl'Unds'r'Pipe'-'-GT-Unds'" + 93 + 94 if type(pattern) is App: + 95 match_app(pattern, stop_symbol) + 96 return () + 97 + 98 assoc = match_left_assoc(pattern) + 99 cons = match_app(assoc.app, cons_symbol) +100 items = (match_app(arg, item_symbol) for arg in cons.args) +101 entries = ((_match_range(item.args[0]), item.args[1]) for item in items) +102 return tuple(entries)
+ +103 +104 +105def _match_range(pattern: Pattern) -> tuple[Pattern, Pattern]: +106 range_symbol = "LblRangeMap'Coln'Range" +107 range = match_app(pattern, range_symbol) +108 return (range.args[0], range.args[1]) +109 +110 +
+[docs] +111def kore_bool(pattern: Pattern) -> bool: +112 dv = match_dv(pattern, BOOL) +113 match dv.value.value: +114 case 'true': +115 return True +116 case 'false': +117 return False +118 case _: +119 raise ValueError(f'Invalid Boolean domain value: {dv.text}')
+ +120 +121 +
+[docs] +122def kore_int(pattern: Pattern) -> int: +123 dv = match_dv(pattern, INT) +124 return int(dv.value.value)
+ +125 +126 +
+[docs] +127def kore_bytes(pattern: Pattern) -> bytes: +128 dv = match_dv(pattern, BYTES) +129 return bytes_encode(dv.value.value)
+ +130 +131 +
+[docs] +132def kore_str(pattern: Pattern) -> str: +133 dv = match_dv(pattern, STRING) +134 return dv.value.value
+ +135 +136 +
+[docs] +137def kore_id(pattern: Pattern) -> str: +138 dv = match_dv(pattern, ID) +139 return dv.value.value
+ +140 +141 +142# Higher-order functions +143 +144 +
+[docs] +145def app(symbol: str | None = None) -> Callable[[Pattern], App]: +146 def res(pattern: Pattern) -> App: +147 return match_app(pattern, symbol) +148 +149 return res
+ +150 +151 +152@overload +153def arg(n: int, /) -> Callable[[App], Pattern]: ... +154 +155 +156@overload +157def arg(symbol: str, /) -> Callable[[App], App]: ... +158 +159 +
+[docs] +160def arg(id: int | str) -> Callable[[App], Pattern | App]: +161 def res(app: App) -> Pattern | App: +162 if type(id) is int: +163 if len(app.args) <= id: +164 raise ValueError('Argument index is out of range') +165 +166 return app.args[id] +167 +168 try: +169 arg, *_ = (arg for arg in app.args if type(arg) is App and arg.symbol == id) +170 except ValueError: +171 raise ValueError(f'No matching argument found for symbol: {id}') from None +172 return arg +173 +174 return res
+ +175 +176 +177@overload +178def args() -> Callable[[App], tuple[()]]: ... +179 +180 +181@overload +182def args(n1: int, /) -> Callable[[App], tuple[Pattern]]: ... +183 +184 +185@overload +186def args(n1: int, n2: int, /) -> Callable[[App], tuple[Pattern, Pattern]]: ... +187 +188 +189@overload +190def args(n1: int, n2: int, n3: int, /) -> Callable[[App], tuple[Pattern, Pattern, Pattern]]: ... +191 +192 +193@overload +194def args(n1: int, n2: int, n3: int, n4: int, /) -> Callable[[App], tuple[Pattern, Pattern, Pattern, Pattern]]: ... +195 +196 +197@overload +198def args(*ns: int) -> Callable[[App], tuple[Pattern, ...]]: ... +199 +200 +201@overload +202def args(s1: str, /) -> Callable[[App], tuple[App]]: ... +203 +204 +205@overload +206def args(s1: str, s2: str, /) -> Callable[[App], tuple[App, App]]: ... +207 +208 +209@overload +210def args(s1: str, s2: str, s3: str, /) -> Callable[[App], tuple[App, App, App]]: ... +211 +212 +213@overload +214def args(s1: str, s2: str, s3: str, s4: str, /) -> Callable[[App], tuple[App, App, App, App]]: ... +215 +216 +217@overload +218def args(*ss: str) -> Callable[[App], tuple[App, ...]]: ... +219 +220 +
+[docs] +221def args(*ids: Any) -> Callable[[App], tuple]: +222 def res(app: App) -> tuple[Pattern, ...]: +223 if not ids: +224 return () +225 +226 fst = ids[0] +227 if type(fst) is int: +228 return tuple(arg(n)(app) for n in ids) +229 +230 symbol_match: dict[str, App] = {} +231 symbols = set(ids) +232 +233 for _arg in app.args: +234 if type(_arg) is App and _arg.symbol in symbols and _arg.symbol not in symbol_match: +235 symbol_match[_arg.symbol] = _arg +236 +237 if len(symbol_match) == len(symbols): +238 return tuple(symbol_match[symbol] for symbol in ids) +239 +240 unmatched_symbols = symbols - set(symbol_match) +241 assert unmatched_symbols +242 unmatched_symbol_str = ', '.join(unmatched_symbols) +243 raise ValueError(f'No matching arguments found for symbols: {unmatched_symbol_str}') +244 +245 return res
+ +246 +247 +
+[docs] +248def inj(pattern: Pattern) -> Pattern: +249 return arg(0)(app('inj')(pattern))
+ +250 +251 +
+[docs] +252def kore_list_of(item: Callable[[Pattern], T]) -> Callable[[Pattern], tuple[T, ...]]: +253 def res(pattern: Pattern) -> tuple[T, ...]: +254 return tuple(item(e) for e in match_list(pattern)) +255 +256 return res
+ +257 +258 +
+[docs] +259def kore_set_of(item: Callable[[Pattern], T]) -> Callable[[Pattern], tuple[T, ...]]: +260 def res(pattern: Pattern) -> tuple[T, ...]: +261 return tuple(item(e) for e in match_set(pattern)) +262 +263 return res
+ +264 +265 +
+[docs] +266def kore_map_of( +267 key: Callable[[Pattern], K], +268 value: Callable[[Pattern], V], +269 *, +270 cell: str | None = None, +271) -> Callable[[Pattern], tuple[tuple[K, V], ...]]: +272 def res(pattern: Pattern) -> tuple[tuple[K, V], ...]: +273 return tuple((key(k), value(v)) for k, v in match_map(pattern, cell=cell)) +274 +275 return res
+ +276 +277 +
+[docs] +278def kore_rangemap_of( +279 key: Callable[[Pattern], K], +280 value: Callable[[Pattern], V], +281) -> Callable[[Pattern], tuple[tuple[tuple[K, K], V], ...]]: +282 def res(pattern: Pattern) -> tuple[tuple[tuple[K, K], V], ...]: +283 return tuple(((key(k[0]), key(k[1])), value(v)) for k, v in match_rangemap(pattern)) +284 +285 return res
+ +286 +287 +
+[docs] +288def case_symbol( +289 *cases: tuple[str, Callable[[App], T]], +290 default: Callable[[App], T] | None = None, +291) -> Callable[[Pattern], T]: +292 def cond(symbol: str) -> Callable[[App], bool]: +293 return lambda app: app.symbol == symbol +294 +295 def res(pattern: Pattern) -> T: +296 app = match_app(pattern) +297 return case( +298 cases=((cond(symbol), then) for symbol, then in cases), +299 default=default, +300 )(app) +301 +302 return res
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kore/parser.html b/pyk/_modules/pyk/kore/parser.html new file mode 100644 index 00000000000..141731bef92 --- /dev/null +++ b/pyk/_modules/pyk/kore/parser.html @@ -0,0 +1,780 @@ + + + + + + + + pyk.kore.parser — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kore.parser

+  1from __future__ import annotations
+  2
+  3from typing import TYPE_CHECKING
+  4
+  5from ..dequote import dequote_string
+  6from .lexer import TokenType, kore_lexer
+  7from .syntax import (
+  8    DV,
+  9    AliasDecl,
+ 10    And,
+ 11    App,
+ 12    Axiom,
+ 13    Bottom,
+ 14    Ceil,
+ 15    Claim,
+ 16    Definition,
+ 17    Equals,
+ 18    EVar,
+ 19    Exists,
+ 20    Floor,
+ 21    Forall,
+ 22    Iff,
+ 23    Implies,
+ 24    Import,
+ 25    In,
+ 26    LeftAssoc,
+ 27    Module,
+ 28    Mu,
+ 29    Next,
+ 30    Not,
+ 31    Nu,
+ 32    Or,
+ 33    Rewrites,
+ 34    RightAssoc,
+ 35    SortApp,
+ 36    SortDecl,
+ 37    SortVar,
+ 38    String,
+ 39    SVar,
+ 40    Symbol,
+ 41    SymbolDecl,
+ 42    Top,
+ 43)
+ 44
+ 45if TYPE_CHECKING:
+ 46    from collections.abc import Callable, Iterator
+ 47    from typing import Final, TypeVar, Union
+ 48
+ 49    from .lexer import KoreToken
+ 50    from .syntax import (
+ 51        Assoc,
+ 52        BinaryConn,
+ 53        BinaryPred,
+ 54        MLFixpoint,
+ 55        MLPattern,
+ 56        MLQuant,
+ 57        MultiaryConn,
+ 58        NullaryConn,
+ 59        Pattern,
+ 60        RoundPred,
+ 61        Sentence,
+ 62        Sort,
+ 63        UnaryConn,
+ 64        VarPattern,
+ 65    )
+ 66
+ 67    NC = TypeVar('NC', bound=NullaryConn)
+ 68    UC = TypeVar('UC', bound=Union[UnaryConn, Next])
+ 69    BC = TypeVar('BC', bound=Union[BinaryConn, Rewrites])
+ 70    MC = TypeVar('MC', bound=MultiaryConn)
+ 71    QF = TypeVar('QF', bound=MLQuant)
+ 72    FP = TypeVar('FP', bound=MLFixpoint)
+ 73    RP = TypeVar('RP', bound=RoundPred)
+ 74    BP = TypeVar('BP', bound=BinaryPred)
+ 75    AS = TypeVar('AS', bound=Assoc)
+ 76
+ 77    T = TypeVar('T')
+ 78
+ 79
+
+[docs] + 80class KoreParser: + 81 _ML_SYMBOLS: Final = { + 82 TokenType.ML_TOP: 'top', + 83 TokenType.ML_BOTTOM: 'bottom', + 84 TokenType.ML_NOT: 'nott', + 85 TokenType.ML_AND: 'andd', + 86 TokenType.ML_OR: 'orr', + 87 TokenType.ML_IMPLIES: 'implies', + 88 TokenType.ML_IFF: 'iff', + 89 TokenType.ML_EXISTS: 'exists', + 90 TokenType.ML_FORALL: 'forall', + 91 TokenType.ML_MU: 'mu', + 92 TokenType.ML_NU: 'nu', + 93 TokenType.ML_CEIL: 'ceil', + 94 TokenType.ML_FLOOR: 'floor', + 95 TokenType.ML_EQUALS: 'equals', + 96 TokenType.ML_IN: 'inn', + 97 TokenType.ML_NEXT: 'next', + 98 TokenType.ML_REWRITES: 'rewrites', + 99 TokenType.ML_DV: 'dv', +100 TokenType.ML_LEFT_ASSOC: 'left_assoc', +101 TokenType.ML_RIGHT_ASSOC: 'right_assoc', +102 } +103 +104 _SENTENCE_KWS: Final = { +105 TokenType.KW_IMPORT: 'importt', +106 TokenType.KW_SORT: 'sort_decl', +107 TokenType.KW_HOOKED_SORT: 'hooked_sort_decl', +108 TokenType.KW_SYMBOL: 'symbol_decl', +109 TokenType.KW_HOOKED_SYMBOL: 'hooked_symbol_decl', +110 TokenType.KW_ALIAS: 'alias_decl', +111 TokenType.KW_AXIOM: 'axiom', +112 TokenType.KW_CLAIM: 'claim', +113 } +114 +115 _iter: Iterator[KoreToken] +116 _la: KoreToken +117 +118 def __init__(self, text: str): +119 self._iter = kore_lexer(text) +120 self._la = next(self._iter) +121 +122 @property +123 def eof(self) -> bool: +124 return self._la.type == TokenType.EOF +125 +126 def _consume(self) -> str: +127 text = self._la.text +128 self._la = next(self._iter) +129 return text +130 +131 def _match(self, token_type: TokenType) -> str: +132 if self._la.type != token_type: +133 raise ValueError(f'Expected {token_type.name}, found: {self._la.type.name}') +134 +135 return self._consume() +136 +137 def _delimited_list_of( +138 self, +139 parse: Callable[[], T], +140 ldelim: TokenType, +141 rdelim: TokenType, +142 sep: TokenType = TokenType.COMMA, +143 ) -> list[T]: +144 res: list[T] = [] +145 +146 self._match(ldelim) +147 while self._la.type != rdelim: +148 res.append(parse()) +149 if self._la.type != sep: +150 break +151 self._consume() +152 self._consume() +153 +154 return res +155 +
+[docs] +156 def id(self) -> str: +157 return self._match(TokenType.ID)
+ +158 +
+[docs] +159 def symbol_id(self) -> str: +160 if self._la.type == TokenType.SYMBOL_ID: +161 return self._consume() +162 +163 return self._match(TokenType.ID)
+ +164 +
+[docs] +165 def set_var_id(self) -> str: +166 return self._match(TokenType.SET_VAR_ID)
+ +167 +
+[docs] +168 def sort(self) -> Sort: +169 name = self.id() +170 +171 if self._la.type == TokenType.LBRACE: +172 sorts = self._sort_list() +173 return SortApp(name, sorts) +174 +175 return SortVar(name)
+ +176 +177 def _sort_list(self) -> list[Sort]: +178 return self._delimited_list_of(self.sort, TokenType.LBRACE, TokenType.RBRACE) +179 +
+[docs] +180 def sort_var(self) -> SortVar: +181 name = self._match(TokenType.ID) +182 return SortVar(name)
+ +183 +
+[docs] +184 def sort_app(self) -> SortApp: +185 name = self._match(TokenType.ID) +186 sorts = self._sort_list() +187 return SortApp(name, sorts)
+ +188 +
+[docs] +189 def pattern(self) -> Pattern: +190 if self._la.type == TokenType.STRING: +191 return self.string() +192 +193 if self._la.type in self._ML_SYMBOLS: +194 return self.ml_pattern() +195 +196 if self._la.type == TokenType.SYMBOL_ID: +197 return self.app() +198 +199 if self._la.type == TokenType.SET_VAR_ID: +200 return self.set_var() +201 +202 name = self._match(TokenType.ID) +203 if self._la.type == TokenType.COLON: +204 self._consume() +205 sort = self.sort() +206 return EVar(name, sort) +207 +208 sorts = self._sort_list() +209 patterns = self._pattern_list() +210 return App(name, sorts, patterns)
+ +211 +212 def _pattern_list(self) -> list[Pattern]: +213 return self._delimited_list_of(self.pattern, TokenType.LPAREN, TokenType.RPAREN) +214 +
+[docs] +215 def string(self) -> String: +216 value = self._match(TokenType.STRING) +217 return String(dequote_string(value[1:-1]))
+ +218 +
+[docs] +219 def app(self) -> App: +220 symbol = self.symbol_id() +221 sorts = self._sort_list() +222 patterns = self._pattern_list() +223 return App(symbol, sorts, patterns)
+ +224 +
+[docs] +225 def var_pattern(self) -> VarPattern: +226 if self._la.type == TokenType.SET_VAR_ID: +227 return self.set_var() +228 +229 return self.elem_var()
+ +230 +
+[docs] +231 def set_var(self) -> SVar: +232 name = self._match(TokenType.SET_VAR_ID) +233 self._match(TokenType.COLON) +234 sort = self.sort() +235 return SVar(name, sort)
+ +236 +
+[docs] +237 def elem_var(self) -> EVar: +238 name = self._match(TokenType.ID) +239 self._match(TokenType.COLON) +240 sort = self.sort() +241 return EVar(name, sort)
+ +242 +
+[docs] +243 def ml_pattern(self) -> MLPattern: +244 token_type = self._la.type +245 if token_type not in self._ML_SYMBOLS: +246 raise ValueError(f'Exected matching logic symbol, found: {self._la.text}') +247 parse = getattr(self, self._ML_SYMBOLS[token_type]) +248 return parse()
+ +249 +250 def _nullary(self, token_type: TokenType, cls: type[NC]) -> NC: +251 self._match(token_type) +252 self._match(TokenType.LBRACE) +253 sort = self.sort() +254 self._match(TokenType.RBRACE) +255 self._match(TokenType.LPAREN) +256 self._match(TokenType.RPAREN) +257 # TODO Implement NullaryConn.create(symbol, sort) instead +258 # TODO Consider MLConn.create(symbol, sort, patterns) as well +259 return cls(sort) # type: ignore +260 +
+[docs] +261 def top(self) -> Top: +262 return self._nullary(TokenType.ML_TOP, Top)
+ +263 +
+[docs] +264 def bottom(self) -> Bottom: +265 return self._nullary(TokenType.ML_BOTTOM, Bottom)
+ +266 +267 def _unary(self, token_type: TokenType, cls: type[UC]) -> UC: +268 self._match(token_type) +269 self._match(TokenType.LBRACE) +270 sort = self.sort() +271 self._match(TokenType.RBRACE) +272 self._match(TokenType.LPAREN) +273 pattern = self.pattern() +274 self._match(TokenType.RPAREN) +275 return cls(sort, pattern) # type: ignore +276 +
+[docs] +277 def nott(self) -> Not: +278 return self._unary(TokenType.ML_NOT, Not)
+ +279 +280 def _binary(self, token_type: TokenType, cls: type[BC]) -> BC: +281 self._match(token_type) +282 self._match(TokenType.LBRACE) +283 sort = self.sort() +284 self._match(TokenType.RBRACE) +285 self._match(TokenType.LPAREN) +286 left = self.pattern() +287 self._match(TokenType.COMMA) +288 right = self.pattern() +289 self._match(TokenType.RPAREN) +290 return cls(sort, left, right) # type: ignore +291 +
+[docs] +292 def implies(self) -> Implies: +293 return self._binary(TokenType.ML_IMPLIES, Implies)
+ +294 +
+[docs] +295 def iff(self) -> Iff: +296 return self._binary(TokenType.ML_IFF, Iff)
+ +297 +298 def _multiary(self, token_type: TokenType, cls: type[MC]) -> MC: +299 self._match(token_type) +300 self._match(TokenType.LBRACE) +301 sort = self.sort() +302 self._match(TokenType.RBRACE) +303 ops = self._delimited_list_of(self.pattern, TokenType.LPAREN, TokenType.RPAREN) +304 return cls(sort, ops) # type: ignore +305 +
+[docs] +306 def andd(self) -> And: +307 return self._multiary(TokenType.ML_AND, And)
+ +308 +
+[docs] +309 def orr(self) -> Or: +310 return self._multiary(TokenType.ML_OR, Or)
+ +311 +312 def _quantifier(self, token_type: TokenType, cls: type[QF]) -> QF: +313 self._match(token_type) +314 self._match(TokenType.LBRACE) +315 sort = self.sort() +316 self._match(TokenType.RBRACE) +317 self._match(TokenType.LPAREN) +318 var = self.elem_var() +319 self._match(TokenType.COMMA) +320 pattern = self.pattern() +321 self._match(TokenType.RPAREN) +322 return cls(sort, var, pattern) # type: ignore +323 +
+[docs] +324 def exists(self) -> Exists: +325 return self._quantifier(TokenType.ML_EXISTS, Exists)
+ +326 +
+[docs] +327 def forall(self) -> Forall: +328 return self._quantifier(TokenType.ML_FORALL, Forall)
+ +329 +330 def _fixpoint(self, token_type: TokenType, cls: type[FP]) -> FP: +331 self._match(token_type) +332 self._match(TokenType.LBRACE) +333 self._match(TokenType.RBRACE) +334 self._match(TokenType.LPAREN) +335 var = self.set_var() +336 self._match(TokenType.COMMA) +337 pattern = self.pattern() +338 self._match(TokenType.RPAREN) +339 return cls(var, pattern) # type: ignore +340 +
+[docs] +341 def mu(self) -> Mu: +342 return self._fixpoint(TokenType.ML_MU, Mu)
+ +343 +
+[docs] +344 def nu(self) -> Nu: +345 return self._fixpoint(TokenType.ML_NU, Nu)
+ +346 +347 def _round_pred(self, token_type: TokenType, cls: type[RP]) -> RP: +348 self._match(token_type) +349 self._match(TokenType.LBRACE) +350 op_sort = self.sort() +351 self._match(TokenType.COMMA) +352 sort = self.sort() +353 self._match(TokenType.RBRACE) +354 self._match(TokenType.LPAREN) +355 pattern = self.pattern() +356 self._match(TokenType.RPAREN) +357 return cls(op_sort, sort, pattern) # type: ignore +358 +
+[docs] +359 def ceil(self) -> Ceil: +360 return self._round_pred(TokenType.ML_CEIL, Ceil)
+ +361 +
+[docs] +362 def floor(self) -> Floor: +363 return self._round_pred(TokenType.ML_FLOOR, Floor)
+ +364 +365 def _binary_pred(self, token_type: TokenType, cls: type[BP]) -> BP: +366 self._match(token_type) +367 self._match(TokenType.LBRACE) +368 left_sort = self.sort() +369 self._match(TokenType.COMMA) +370 right_sort = self.sort() +371 self._match(TokenType.RBRACE) +372 self._match(TokenType.LPAREN) +373 left = self.pattern() +374 self._match(TokenType.COMMA) +375 right = self.pattern() +376 self._match(TokenType.RPAREN) +377 return cls(left_sort, right_sort, left, right) # type: ignore +378 +
+[docs] +379 def equals(self) -> Equals: +380 return self._binary_pred(TokenType.ML_EQUALS, Equals)
+ +381 +
+[docs] +382 def inn(self) -> In: +383 return self._binary_pred(TokenType.ML_IN, In)
+ +384 +
+[docs] +385 def next(self) -> Next: +386 return self._unary(TokenType.ML_NEXT, Next)
+ +387 +
+[docs] +388 def rewrites(self) -> Rewrites: +389 return self._binary(TokenType.ML_REWRITES, Rewrites)
+ +390 +
+[docs] +391 def dv(self) -> DV: +392 self._match(TokenType.ML_DV) +393 self._match(TokenType.LBRACE) +394 sort = self.sort() +395 self._match(TokenType.RBRACE) +396 self._match(TokenType.LPAREN) +397 value = self.string() +398 self._match(TokenType.RPAREN) +399 return DV(sort, value)
+ +400 +401 def _assoc(self, token_type: TokenType, cls: type[AS]) -> AS: +402 self._match(token_type) +403 self._match(TokenType.LBRACE) +404 self._match(TokenType.RBRACE) +405 self._match(TokenType.LPAREN) +406 app = self.app() +407 self._match(TokenType.RPAREN) +408 return cls(app.symbol, app.sorts, app.args) # type: ignore +409 +
+[docs] +410 def left_assoc(self) -> LeftAssoc: +411 return self._assoc(TokenType.ML_LEFT_ASSOC, LeftAssoc)
+ +412 +
+[docs] +413 def right_assoc(self) -> RightAssoc: +414 return self._assoc(TokenType.ML_RIGHT_ASSOC, RightAssoc)
+ +415 +416 def _attr_list(self) -> list[App]: +417 return self._delimited_list_of(self.app, TokenType.LBRACK, TokenType.RBRACK) +418 +
+[docs] +419 def sentence(self) -> Sentence: +420 token_type = self._la.type +421 +422 if token_type not in self._SENTENCE_KWS: +423 raise ValueError(f'Expected {[kw.name for kw in self._SENTENCE_KWS]}, found: {token_type.name}') +424 +425 parse = getattr(self, self._SENTENCE_KWS[token_type]) +426 return parse()
+ +427 +
+[docs] +428 def importt(self) -> Import: +429 self._match(TokenType.KW_IMPORT) +430 module_name = self.id() +431 attrs = self._attr_list() +432 return Import(module_name, attrs)
+ +433 +
+[docs] +434 def sort_decl(self) -> SortDecl: +435 self._match(TokenType.KW_SORT) +436 name = self.id() +437 vars = self._sort_var_list() +438 attrs = self._attr_list() +439 return SortDecl(name, vars, attrs, hooked=False)
+ +440 +
+[docs] +441 def hooked_sort_decl(self) -> SortDecl: +442 self._match(TokenType.KW_HOOKED_SORT) +443 name = self.id() +444 vars = self._sort_var_list() +445 attrs = self._attr_list() +446 return SortDecl(name, vars, attrs, hooked=True)
+ +447 +448 def _sort_var_list(self) -> list[SortVar]: +449 return self._delimited_list_of(self.sort_var, TokenType.LBRACE, TokenType.RBRACE) +450 +
+[docs] +451 def symbol_decl(self) -> SymbolDecl: +452 self._match(TokenType.KW_SYMBOL) +453 symbol = self.symbol() +454 sort_params = self._sort_param_list() +455 self._match(TokenType.COLON) +456 sort = self.sort() +457 attrs = self._attr_list() +458 return SymbolDecl(symbol, sort_params, sort, attrs, hooked=False)
+ +459 +
+[docs] +460 def hooked_symbol_decl(self) -> SymbolDecl: +461 self._match(TokenType.KW_HOOKED_SYMBOL) +462 symbol = self.symbol() +463 sort_params = self._sort_param_list() +464 self._match(TokenType.COLON) +465 sort = self.sort() +466 attrs = self._attr_list() +467 return SymbolDecl(symbol, sort_params, sort, attrs, hooked=True)
+ +468 +
+[docs] +469 def alias_decl(self) -> AliasDecl: +470 self._match(TokenType.KW_ALIAS) +471 symbol = self.symbol() +472 sort_params = self._sort_param_list() +473 self._match(TokenType.COLON) +474 sort = self.sort() +475 self._match(TokenType.KW_WHERE) +476 left = self.app() +477 self._match(TokenType.WALRUS) +478 right = self.pattern() +479 attrs = self._attr_list() +480 return AliasDecl(symbol, sort_params, sort, left, right, attrs)
+ +481 +482 def _sort_param_list(self) -> list[Sort]: +483 return self._delimited_list_of(self.sort, TokenType.LPAREN, TokenType.RPAREN) +484 +485 # TODO remove once \left-assoc{}(\or{...}(...)) is no longer supported +
+[docs] +486 def multi_or(self) -> list[Pattern]: +487 self._match(TokenType.ML_LEFT_ASSOC) +488 self._match(TokenType.LBRACE) +489 self._match(TokenType.RBRACE) +490 self._match(TokenType.LPAREN) +491 self._match(TokenType.ML_OR) +492 self._match(TokenType.LBRACE) +493 self.sort() +494 self._match(TokenType.RBRACE) +495 patterns = self._pattern_list() +496 self._match(TokenType.RPAREN) +497 return patterns
+ +498 +
+[docs] +499 def symbol(self) -> Symbol: +500 name = self.symbol_id() +501 vars = self._sort_var_list() +502 return Symbol(name, vars)
+ +503 +
+[docs] +504 def axiom(self) -> Axiom: +505 self._match(TokenType.KW_AXIOM) +506 vars = self._sort_var_list() +507 pattern = self.pattern() +508 attrs = self._attr_list() +509 return Axiom(vars, pattern, attrs)
+ +510 +
+[docs] +511 def claim(self) -> Claim: +512 self._match(TokenType.KW_CLAIM) +513 vars = self._sort_var_list() +514 pattern = self.pattern() +515 attrs = self._attr_list() +516 return Claim(vars, pattern, attrs)
+ +517 +
+[docs] +518 def module(self) -> Module: +519 self._match(TokenType.KW_MODULE) +520 name = self.id() +521 +522 sentences: list[Sentence] = [] +523 while self._la.type != TokenType.KW_ENDMODULE: +524 sentences.append(self.sentence()) +525 self._consume() +526 +527 attrs = self._attr_list() +528 +529 return Module(name, sentences, attrs)
+ +530 +
+[docs] +531 def definition(self) -> Definition: +532 attrs = self._attr_list() +533 +534 modules: list[Module] = [] +535 while self._la.type != TokenType.EOF: +536 modules.append(self.module()) +537 +538 return Definition(modules, attrs)
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kore/pool.html b/pyk/_modules/pyk/kore/pool.html new file mode 100644 index 00000000000..be7ca058f3f --- /dev/null +++ b/pyk/_modules/pyk/kore/pool.html @@ -0,0 +1,171 @@ + + + + + + + + pyk.kore.pool — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kore.pool

+ 1from __future__ import annotations
+ 2
+ 3from concurrent.futures import ThreadPoolExecutor
+ 4from threading import current_thread
+ 5from typing import TYPE_CHECKING, ContextManager
+ 6
+ 7if TYPE_CHECKING:
+ 8    from collections.abc import Callable
+ 9    from concurrent.futures import Executor, Future
+10    from typing import Any, Concatenate, ParamSpec, TypeVar
+11
+12    from .rpc import KoreServer
+13
+14    P = ParamSpec('P')
+15    T = TypeVar('T')
+16
+17
+
+[docs] +18class KoreServerPool(ContextManager['KoreServerPool']): +19 _create_server: Callable[[], KoreServer] +20 _servers: dict[str, KoreServer] +21 _executor: Executor +22 _closed: bool +23 +24 def __init__( +25 self, +26 create_server: Callable[[], KoreServer], +27 *, +28 max_workers: int | None = None, +29 ) -> None: +30 self._create_server = create_server +31 self._servers = {} +32 self._executor = ThreadPoolExecutor(max_workers) +33 self._closed = False +34 +35 def __enter__(self) -> KoreServerPool: +36 return self +37 +38 def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: +39 self.close() +40 +
+[docs] +41 def close(self) -> None: +42 self._executor.shutdown() +43 for server in self._servers.values(): +44 server.close() +45 self._closed = True
+ +46 +
+[docs] +47 def submit(self, fn: Callable[Concatenate[int, P], T], /, *args: P.args, **kwargs: P.kwargs) -> Future[T]: +48 if self._closed: +49 raise ValueError('KoreServerPool has been closed') +50 return self._executor.submit(self._with_port(fn), *args, **kwargs)
+ +51 +52 def _with_port(self, fn: Callable[Concatenate[int, P], T]) -> Callable[P, T]: +53 def execute(*args: P.args, **kwargs: P.kwargs) -> T: +54 thread_name = current_thread().name +55 server = self._servers.get(thread_name) +56 if server is None: +57 server = self._servers.setdefault(thread_name, self._create_server()) +58 server_port = server.port +59 return fn(server_port, *args, **kwargs) +60 +61 return execute
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kore/prelude.html b/pyk/_modules/pyk/kore/prelude.html new file mode 100644 index 00000000000..e58e8ced854 --- /dev/null +++ b/pyk/_modules/pyk/kore/prelude.html @@ -0,0 +1,606 @@ + + + + + + + + pyk.kore.prelude — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kore.prelude

+  1from __future__ import annotations
+  2
+  3from itertools import chain
+  4from typing import TYPE_CHECKING
+  5
+  6from ..dequote import bytes_decode
+  7from ..utils import check_type
+  8from .syntax import DV, App, LeftAssoc, RightAssoc, SortApp, String, SymbolId
+  9
+ 10if TYPE_CHECKING:
+ 11    from collections.abc import Iterable, Iterator, Mapping
+ 12    from typing import Any, Final
+ 13
+ 14    from .syntax import EVar, Pattern, Sort
+ 15
+ 16
+ 17# ----------
+ 18# Base types
+ 19# ----------
+ 20
+ 21BOOL: Final = SortApp('SortBool')
+ 22INT: Final = SortApp('SortInt')
+ 23BYTES: Final = SortApp('SortBytes')
+ 24STRING: Final = SortApp('SortString')
+ 25ID: Final = SortApp('SortId')
+ 26
+ 27TRUE: Final = DV(BOOL, String('true'))
+ 28FALSE: Final = DV(BOOL, String('false'))
+ 29
+ 30
+
+[docs] + 31def dv(val: bool | int | bytes | str) -> DV: + 32 if type(val) is bool: + 33 return bool_dv(val) + 34 if type(val) is int: + 35 return int_dv(val) + 36 if type(val) is bytes: + 37 return bytes_dv(val) + 38 if type(val) is str: + 39 return str_dv(val) + 40 raise TypeError(f'Illegal type: {type(val)}')
+ + 41 + 42 +
+[docs] + 43def bool_dv(val: bool) -> DV: + 44 return TRUE if val else FALSE
+ + 45 + 46 +
+[docs] + 47def int_dv(val: int) -> DV: + 48 return DV(INT, String(str(val)))
+ + 49 + 50 +
+[docs] + 51def bytes_dv(val: bytes) -> DV: + 52 return DV(BYTES, String(bytes_decode(val)))
+ + 53 + 54 +
+[docs] + 55def str_dv(val: str) -> DV: + 56 return DV(STRING, String(val))
+ + 57 + 58 + 59# ------------ + 60# Bool + 61# ------------ + 62 + 63LBL_NOT_BOOL: Final = SymbolId("LblnotBool'Unds'") + 64LBL_AND_BOOL: Final = SymbolId("Lbl'Unds'andBool'Unds'") + 65LBL_OR_BOOL: Final = SymbolId("Lbl'Unds'orBool'Unds'") + 66LBL_IMPLIES_BOOL: Final = SymbolId("Lbl'Unds'impliesBool'Unds'") + 67LBL_XOR_BOOL: Final = SymbolId("Lbl'Unds'xorBool'Unds'") + 68LBL_EQ_BOOL: Final = SymbolId("Lbl'UndsEqlsEqls'Bool'Unds'") + 69LBL_NE_BOOL: Final = SymbolId("Lbl'UndsEqlsSlshEqls'Bool'Unds'") + 70 + 71 +
+[docs] + 72def not_bool(pattern: Pattern) -> Pattern: + 73 return App(LBL_NOT_BOOL, (), (pattern,))
+ + 74 + 75 +
+[docs] + 76def and_bool(left: Pattern, right: Pattern) -> Pattern: + 77 return App(LBL_AND_BOOL, (), (left, right))
+ + 78 + 79 +
+[docs] + 80def or_bool(left: Pattern, right: Pattern) -> Pattern: + 81 return App(LBL_OR_BOOL, (), (left, right))
+ + 82 + 83 +
+[docs] + 84def implies_bool(left: Pattern, right: Pattern) -> Pattern: + 85 return App(LBL_IMPLIES_BOOL, (), (left, right))
+ + 86 + 87 +
+[docs] + 88def xor_bool(left: Pattern, right: Pattern) -> Pattern: + 89 return App(LBL_XOR_BOOL, (), (left, right))
+ + 90 + 91 +
+[docs] + 92def eq_bool(left: Pattern, right: Pattern) -> Pattern: + 93 return App(LBL_EQ_BOOL, (), (left, right))
+ + 94 + 95 +
+[docs] + 96def ne_bool(left: Pattern, right: Pattern) -> Pattern: + 97 return App(LBL_NE_BOOL, (), (left, right))
+ + 98 + 99 +100# ------------ +101# Int +102# ------------ +103 +104LBL_EQ_INT: Final = SymbolId("Lbl'UndsEqlsEqls'Int'Unds'") +105LBL_NE_INT: Final = SymbolId("Lbl'UndsEqlsSlshEqls'Int'Unds'") +106LBL_GT_INT: Final = SymbolId("Lbl'Unds-GT-'Int'Unds'") +107LBL_GE_INT: Final = SymbolId("Lbl'Unds-GT-Eqls'Int'Unds'") +108LBL_LT_INT: Final = SymbolId("Lbl'Unds-LT-'Int'Unds'") +109LBL_LE_INT: Final = SymbolId("Lbl'Unds-LT-Eqls'Int'Unds'") +110 +111 +
+[docs] +112def eq_int(left: Pattern, right: Pattern) -> Pattern: +113 return App(LBL_EQ_INT, (), (left, right))
+ +114 +115 +
+[docs] +116def ne_int(left: Pattern, right: Pattern) -> Pattern: +117 return App(LBL_NE_INT, (), (left, right))
+ +118 +119 +
+[docs] +120def gt_int(left: Pattern, right: Pattern) -> Pattern: +121 return App(LBL_GT_INT, (), (left, right))
+ +122 +123 +
+[docs] +124def ge_int(left: Pattern, right: Pattern) -> Pattern: +125 return App(LBL_GE_INT, (), (left, right))
+ +126 +127 +
+[docs] +128def lt_int(left: Pattern, right: Pattern) -> Pattern: +129 return App(LBL_LT_INT, (), (left, right))
+ +130 +131 +
+[docs] +132def le_int(left: Pattern, right: Pattern) -> Pattern: +133 return App(LBL_LE_INT, (), (left, right))
+ +134 +135 +136# ------------ +137# K constructs +138# ------------ +139 +140# TODO auto injections +141 +142SORT_K: Final = SortApp('SortK') +143SORT_K_ITEM: Final = SortApp('SortKItem') +144SORT_K_CONFIG_VAR: Final = SortApp('SortKConfigVar') +145SORT_GENERATED_TOP_CELL: Final = SortApp('SortGeneratedTopCell') +146SORT_GENERATED_COUNTER_CELL: Final = SortApp('SortGeneratedCounterCell') +147 +148LBL_INIT_GENERATED_TOP_CELL: Final = SymbolId('LblinitGeneratedTopCell') +149LBL_GENERATED_TOP: Final = SymbolId("Lbl'-LT-'generatedTop'-GT-'") +150LBL_GENERATED_COUNTER: Final = SymbolId("Lbl'-LT-'generatedCounter'-GT-'") +151LBL_K: Final = SymbolId("Lbl'-LT-'k'-GT-'") +152LBL_ITE: Final = SymbolId('Lblite') +153INJ: Final = SymbolId('inj') +154KSEQ: Final = SymbolId('kseq') +155 +156DOTK: Final = App('dotk', (), ()) +157 +158 +
+[docs] +159def init_generated_top_cell(pattern: Pattern) -> App: +160 return App(LBL_INIT_GENERATED_TOP_CELL, (), (pattern,))
+ +161 +162 +
+[docs] +163def generated_top(patterns: Iterable[Pattern]) -> App: +164 return App(LBL_GENERATED_TOP, (), patterns)
+ +165 +166 +
+[docs] +167def generated_counter(pattern: Pattern) -> App: +168 return App(LBL_GENERATED_COUNTER, (), (pattern,))
+ +169 +170 +
+[docs] +171def k(pattern: Pattern) -> App: +172 return App(LBL_K, (), (pattern,))
+ +173 +174 +
+[docs] +175def inj(sort1: Sort, sort2: Sort, pattern: Pattern) -> App: +176 return App(INJ, (sort1, sort2), (pattern,))
+ +177 +178 +
+[docs] +179def kseq(kitems: Iterable[Pattern], *, dotvar: EVar | None = None) -> Pattern: +180 if dotvar and dotvar.sort != SORT_K: +181 raise ValueError(f'Expected {SORT_K.text} as dotvar sort, got: {dotvar.sort.text}') +182 +183 tail = dotvar or DOTK +184 args = tuple(chain(kitems, (tail,))) +185 +186 if len(args) == 1: +187 return tail +188 +189 if len(args) == 2: +190 return App(KSEQ, (), args) +191 +192 return RightAssoc(KSEQ, (), args)
+ +193 +194 +
+[docs] +195def k_config_var(var: str) -> DV: +196 return DV(SORT_K_CONFIG_VAR, String(var))
+ +197 +198 +
+[docs] +199def top_cell_initializer(config: Mapping[str, Pattern]) -> App: +200 return init_generated_top_cell( +201 map_pattern( +202 *( +203 ( +204 inj(SORT_K_CONFIG_VAR, SORT_K_ITEM, k_config_var(key)), +205 value, +206 ) +207 for key, value in config.items() +208 ) +209 ) +210 )
+ +211 +212 +213# ----------- +214# Collections +215# ----------- +216 +217STOP_LIST: Final = App("Lbl'Stop'List") +218LBL_LIST: Final = SymbolId("Lbl'Unds'List'Unds'") +219LBL_LIST_ITEM: Final = SymbolId('LblListItem') +220 +221 +
+[docs] +222def list_pattern(*args: Pattern) -> Pattern: +223 if not args: +224 return STOP_LIST +225 return LeftAssoc(LBL_LIST, args=(App(LBL_LIST_ITEM, args=(arg,)) for arg in args))
+ +226 +227 +228STOP_SET: Final = App("Lbl'Stop'Set") +229LBL_SET: Final = SymbolId("Lbl'Unds'Set'Unds'") +230LBL_SET_ITEM: Final = SymbolId('LblSetItem') +231 +232 +
+[docs] +233def set_pattern(*args: Pattern) -> Pattern: +234 if not args: +235 return STOP_SET +236 return LeftAssoc(LBL_SET, args=(App(LBL_SET_ITEM, args=(arg,)) for arg in args))
+ +237 +238 +239STOP_MAP: Final = App("Lbl'Stop'Map") +240LBL_MAP: Final = SymbolId("Lbl'Unds'Map'Unds'") +241LBL_MAP_ITEM: Final = SymbolId("Lbl'UndsPipe'-'-GT-Unds'") +242 +243 +
+[docs] +244def map_pattern(*args: tuple[Pattern, Pattern], cell: str | None = None) -> Pattern: +245 if not args: +246 return App(f"Lbl'Stop'{cell}Map") if cell else STOP_MAP +247 +248 cons_symbol = SymbolId(f"Lbl'Unds'{cell}Map'Unds'") if cell else LBL_MAP +249 item_symbol = SymbolId(f'Lbl{cell}MapItem') if cell else LBL_MAP_ITEM +250 return LeftAssoc(cons_symbol, args=(App(item_symbol, args=arg) for arg in args))
+ +251 +252 +253STOP_RANGEMAP: Final = App("Lbl'Stop'RangeMap") +254LBL_RANGEMAP: Final = SymbolId("Lbl'Unds'RangeMap'Unds'") +255LBL_RANGEMAP_ITEM: Final = SymbolId("Lbl'Unds'r'Pipe'-'-GT-Unds'") +256LBL_RANGEMAP_RANGE: Final = SymbolId("LblRangeMap'Coln'Range") +257 +258 +
+[docs] +259def rangemap_pattern(*args: tuple[tuple[Pattern, Pattern], Pattern]) -> Pattern: +260 if not args: +261 return STOP_RANGEMAP +262 +263 return LeftAssoc( +264 LBL_RANGEMAP, +265 args=(App(LBL_RANGEMAP_ITEM, args=(App(LBL_RANGEMAP_RANGE, args=arg[0]), arg[1])) for arg in args), +266 )
+ +267 +268 +269# ---- +270# JSON +271# ---- +272 +273SORT_JSON: Final = SortApp('SortJSON') +274SORT_JSON_KEY: Final = SortApp('SortJSONKey') +275 +276LBL_JSONS: Final = SymbolId('LblJSONs') +277LBL_JSON_LIST: Final = SymbolId('LblJSONList') +278LBL_JSON_OBJECT: Final = SymbolId('LblJSONObject') +279LBL_JSON_ENTRY: Final = SymbolId('LblJSONEntry') +280 +281JSON_NULL: Final = App('LblJSONnull') +282STOP_JSONS: Final = App("Lbl'Stop'List'LBraQuot'JSONs'QuotRBra'") +283 +284LBL_STRING2JSON: Final = SymbolId('LblString2JSON') +285LBL_JSON2STRING: Final = SymbolId('LblJSON2String') +286 +287 +
+[docs] +288def string2json(pattern: Pattern) -> App: +289 return App(LBL_STRING2JSON, (), (pattern,))
+ +290 +291 +
+[docs] +292def json2string(pattern: Pattern) -> App: +293 return App(LBL_JSON2STRING, (), (pattern,))
+ +294 +295 +
+[docs] +296def json_list(pattern: Pattern) -> App: +297 return App(LBL_JSON_LIST, (), (pattern,))
+ +298 +299 +
+[docs] +300def json_object(pattern: Pattern) -> App: +301 return App(LBL_JSON_OBJECT, (), (pattern,))
+ +302 +303 +
+[docs] +304def jsons(patterns: Iterable[Pattern]) -> RightAssoc: +305 return RightAssoc(LBL_JSONS, (), chain(patterns, (STOP_JSONS,)))
+ +306 +307 +
+[docs] +308def json_key(key: str) -> App: +309 return inj(STRING, SORT_JSON_KEY, str_dv(key))
+ +310 +311 +
+[docs] +312def json_entry(key: Pattern, value: Pattern) -> App: +313 return App(LBL_JSON_ENTRY, (), (key, value))
+ +314 +315 +
+[docs] +316def json_to_kore(data: Any) -> Pattern: +317 match data: +318 case None: +319 return JSON_NULL +320 case bool(): +321 return inj(BOOL, SORT_JSON, bool_dv(data)) +322 case int(): +323 return inj(INT, SORT_JSON, int_dv(data)) +324 case str(): +325 return inj(STRING, SORT_JSON, str_dv(data)) +326 case list(): +327 return json_list(jsons(json_to_kore(elem) for elem in data)) +328 case dict(): +329 return json_object( +330 jsons(json_entry(json_key(check_type(key, str)), json_to_kore(value)) for key, value in data.items()) +331 ) +332 case _: +333 raise TypeError(f'Unsupported object of type: {type(data).__name__}: {data}')
+ +334 +335 +336# TODO Eliminate circularity with kore.match +
+[docs] +337def kore_to_json(pattern: Pattern) -> Any: +338 from . import match as km +339 +340 if isinstance(pattern, DV): +341 if pattern.sort == BOOL: +342 return km.kore_bool(pattern) +343 +344 if pattern.sort == INT: +345 return km.kore_int(pattern) +346 +347 if pattern.sort == STRING: +348 return km.kore_str(pattern) +349 +350 if isinstance(pattern, App): +351 if pattern.symbol == JSON_NULL.symbol: +352 return None +353 +354 if pattern.symbol == INJ.value: # can be further refined: arg is DV, ... +355 return kore_to_json(km.inj(pattern)) +356 +357 if pattern.symbol == LBL_JSON_LIST.value: +358 return [kore_to_json(elem) for elem in _iter_json_list(pattern)] +359 +360 if pattern.symbol == LBL_JSON_OBJECT.value: +361 return {key: kore_to_json(value) for key, value in _iter_json_object(pattern)} +362 +363 raise ValueError(f'Not a JSON pattern: {pattern.text}')
+ +364 +365 +366def _iter_json_list(app: App) -> Iterator[Pattern]: +367 from . import match as km +368 +369 km.match_app(app, LBL_JSON_LIST.value) +370 curr = km.match_app(app.args[0]) +371 while curr.symbol != STOP_JSONS.symbol: +372 km.match_app(curr, LBL_JSONS.value) +373 yield curr.args[0] +374 curr = km.match_app(curr.args[1]) +375 +376 +377def _iter_json_object(app: App) -> Iterator[tuple[str, Pattern]]: +378 from . import match as km +379 +380 km.match_app(app, LBL_JSON_OBJECT.value) +381 curr = km.match_app(app.args[0]) +382 while curr.symbol != STOP_JSONS.symbol: +383 km.match_app(curr, LBL_JSONS.value) +384 entry = km.match_app(curr.args[0], LBL_JSON_ENTRY.value) +385 key = km.kore_str(km.inj(entry.args[0])) +386 value = entry.args[1] +387 yield key, value +388 curr = km.match_app(curr.args[1]) +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kore/rpc.html b/pyk/_modules/pyk/kore/rpc.html new file mode 100644 index 00000000000..b07746d7610 --- /dev/null +++ b/pyk/_modules/pyk/kore/rpc.html @@ -0,0 +1,1754 @@ + + + + + + + + pyk.kore.rpc — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kore.rpc

+   1from __future__ import annotations
+   2
+   3import http.client
+   4import json
+   5import logging
+   6import os
+   7import socket
+   8import sys
+   9from abc import ABC, abstractmethod
+  10from dataclasses import dataclass
+  11from datetime import datetime, timedelta
+  12from enum import Enum, auto
+  13from pathlib import Path
+  14from signal import SIGINT
+  15from subprocess import DEVNULL, PIPE, Popen
+  16from threading import Thread
+  17from time import sleep
+  18from typing import ClassVar  # noqa: TC003
+  19from typing import TYPE_CHECKING, ContextManager, NamedTuple, TypedDict, final
+  20
+  21from psutil import Process
+  22
+  23from ..utils import FrozenDict, check_dir_path, check_file_path, filter_none, run_process_2
+  24from . import manip
+  25from .prelude import SORT_GENERATED_TOP_CELL
+  26from .syntax import And, Equals, EVar, kore_term
+  27
+  28if TYPE_CHECKING:
+  29    from collections.abc import Iterable, Mapping
+  30    from typing import IO, Any, Final, TypeVar
+  31
+  32    from typing_extensions import Required
+  33
+  34    from ..utils import BugReport
+  35    from .syntax import Module, Pattern
+  36
+  37    ER = TypeVar('ER', bound='ExecuteResult')
+  38    RR = TypeVar('RR', bound='RewriteResult')
+  39    LE = TypeVar('LE', bound='LogEntry')
+  40
+  41_LOGGER: Final = logging.getLogger(__name__)
+  42
+  43
+
+[docs] + 44class KoreExecLogFormat(Enum): + 45 STANDARD = 'standard' + 46 ONELINE = 'oneline'
+ + 47 + 48 +
+[docs] + 49@final + 50@dataclass + 51class JsonRpcError(Exception): + 52 def __init__(self, message: str, code: int, data: Any = None): + 53 super().__init__(message) + 54 self.message = message + 55 self.code = code + 56 self.data = data
+ + 57 + 58 +
+[docs] + 59class Transport(ContextManager['Transport'], ABC): + 60 +
+[docs] + 61 def request(self, req: str, request_id: str, method_name: str) -> str: + 62 server_addr = self._description() + 63 _LOGGER.info(f'Sending request to {server_addr}: {request_id} - {method_name}') + 64 _LOGGER.debug(f'Sending request to {server_addr}: {req}') + 65 resp = self._request(req) + 66 _LOGGER.info(f'Received response from {server_addr}: {request_id} - {method_name}') + 67 _LOGGER.debug(f'Received response from {server_addr}: {resp}') + 68 return resp
+ + 69 + 70 def __enter__(self) -> Transport: + 71 return self + 72 + 73 def __exit__(self, *args: Any) -> None: + 74 self.close() + 75 +
+[docs] + 76 @abstractmethod + 77 def close(self) -> None: ...
+ + 78 + 79 @abstractmethod + 80 def _request(self, req: str) -> str: ... + 81 + 82 @abstractmethod + 83 def _description(self) -> str: ...
+ + 84 + 85 +
+[docs] + 86class TransportType(Enum): + 87 SINGLE_SOCKET = auto() + 88 HTTP = auto()
+ + 89 + 90 +
+[docs] + 91@final + 92class SingleSocketTransport(Transport): + 93 _host: str + 94 _port: int + 95 _sock: socket.socket + 96 _file: IO[str] + 97 + 98 def __init__( + 99 self, + 100 host: str, + 101 port: int, + 102 *, + 103 timeout: int | None = None, + 104 ): + 105 self._host = host + 106 self._port = port + 107 self._sock = self._create_connection(host, port, timeout) + 108 self._file = self._sock.makefile('r') + 109 + 110 @staticmethod + 111 def _create_connection(host: str, port: int, timeout: int | None) -> socket.socket: + 112 if timeout is not None and timeout < 0: + 113 raise ValueError(f'Expected nonnegative timeout value, got: {timeout}') + 114 + 115 _LOGGER.info(f'Connecting to host: {host}:{port}') + 116 + 117 timeout_datetime = datetime.now() + timedelta(milliseconds=timeout) if timeout is not None else None + 118 while timeout_datetime is None or datetime.now() < timeout_datetime: + 119 try: + 120 sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + 121 sock.connect((host, port)) + 122 _LOGGER.info(f'Connected to host: {host}:{port}') + 123 return sock + 124 except ConnectionRefusedError: + 125 sock.close() + 126 sleep(0.1) + 127 + 128 raise RuntimeError(f'Connection timed out: {host}:{port}') + 129 +
+[docs] + 130 def close(self) -> None: + 131 self._file.close() + 132 self._sock.close()
+ + 133 + 134 def _request(self, req: str) -> str: + 135 self._sock.sendall(req.encode()) + 136 server_addr = self._description() + 137 _LOGGER.debug(f'Waiting for response from {server_addr}...') + 138 return self._file.readline().rstrip() + 139 + 140 def _description(self) -> str: + 141 return f'{self._host}:{self._port}'
+ + 142 + 143 +
+[docs] + 144@final + 145class HttpTransport(Transport): + 146 _host: str + 147 _port: int + 148 _timeout: int | None + 149 + 150 def __init__( + 151 self, + 152 host: str, + 153 port: int, + 154 *, + 155 timeout: int | None = None, + 156 ): + 157 self._host = host + 158 self._port = port + 159 self._timeout = timeout + 160 +
+[docs] + 161 def close(self) -> None: + 162 pass
+ + 163 + 164 def _request(self, req: str) -> str: + 165 connection = http.client.HTTPConnection(self._host, self._port, timeout=self._timeout) + 166 connection.request('POST', '/', body=req, headers={'Content-Type': 'application/json'}) + 167 server_addr = self._description() + 168 _LOGGER.debug(f'Waiting for response from {server_addr}...') + 169 response = connection.getresponse() + 170 if response.status != 200: + 171 raise JsonRpcError('Internal server error', -32603) + 172 return response.read().decode() + 173 + 174 def _description(self) -> str: + 175 return f'{self._host}:{self._port}'
+ + 176 + 177 +
+[docs] + 178class JsonRpcClientFacade(ContextManager['JsonRpcClientFacade']): + 179 _JSON_RPC_VERSION: Final = '2.0' + 180 + 181 _clients: dict[str, list[JsonRpcClient]] + 182 _default_client: JsonRpcClient + 183 + 184 def __init__( + 185 self, + 186 default_host: str, + 187 default_port: int, + 188 default_transport: TransportType, + 189 dispatch: dict[str, list[tuple[str, int, TransportType]]], + 190 *, + 191 timeout: int | None = None, + 192 bug_report: BugReport | None = None, + 193 bug_report_id: str | None = None, + 194 ): + 195 client_cache = {} + 196 self._clients = {} + 197 self._default_client = JsonRpcClient( + 198 default_host, + 199 default_port, + 200 timeout=timeout, + 201 bug_report=bug_report, + 202 bug_report_id=bug_report_id, + 203 transport=default_transport, + 204 ) + 205 client_cache[(default_host, default_port)] = self._default_client + 206 for method, servers in dispatch.items(): + 207 for host, port, transport in servers: + 208 if (host, port) in client_cache: + 209 self._update_clients(method, client_cache[(host, port)]) + 210 else: + 211 new_id = None if bug_report_id is None else bug_report_id + '_' + str(transport) + 212 new_client = JsonRpcClient( + 213 host, port, timeout=timeout, bug_report=bug_report, bug_report_id=new_id, transport=transport + 214 ) + 215 self._update_clients(method, new_client) + 216 client_cache[(host, port)] = new_client + 217 + 218 def _update_clients(self, method: str, client: JsonRpcClient) -> None: + 219 clients = self._clients.get(method, []) + 220 self._clients[method] = clients + 221 clients.append(client) + 222 + 223 def __enter__(self) -> JsonRpcClientFacade: + 224 return self + 225 + 226 def __exit__(self, *args: Any) -> None: + 227 self._default_client.__exit__(*args) + 228 for clients in self._clients.values(): + 229 for client in clients: + 230 client.__exit__(*args) + 231 +
+[docs] + 232 def close(self) -> None: + 233 self._default_client.close() + 234 for clients in self._clients.values(): + 235 for client in clients: + 236 client.close()
+ + 237 +
+[docs] + 238 def request(self, method: str, **params: Any) -> dict[str, Any]: + 239 if method in self._clients: + 240 for client in self._clients[method]: + 241 response = client.request(method, **params) + 242 if 'error' in response: + 243 return response + 244 return response + 245 else: + 246 return self._default_client.request(method, **params)
+
+ + 247 + 248 +
+[docs] + 249class JsonRpcClient(ContextManager['JsonRpcClient']): + 250 _JSON_RPC_VERSION: Final = '2.0' + 251 + 252 _transport: Transport + 253 _req_id: int + 254 + 255 _bug_report: BugReport | None + 256 _bug_report_id: str | None + 257 + 258 def __init__( + 259 self, + 260 host: str, + 261 port: int, + 262 *, + 263 timeout: int | None = None, + 264 bug_report: BugReport | None = None, + 265 bug_report_id: str | None = None, + 266 transport: TransportType = TransportType.SINGLE_SOCKET, + 267 ): + 268 if (bug_report is None) != (bug_report_id is None): + 269 raise ValueError('bug_report and bug_report_id must be passed together.') + 270 + 271 self._transport = self._create_transport(transport, host=host, port=port, timeout=timeout) + 272 self._req_id = 1 + 273 self._bug_report_id = bug_report_id + 274 self._bug_report = bug_report + 275 + 276 @staticmethod + 277 def _create_transport(transport: TransportType, *, host: str, port: int, timeout: int | None) -> Transport: + 278 match transport: + 279 case TransportType.SINGLE_SOCKET: + 280 return SingleSocketTransport(host, port, timeout=timeout) + 281 case TransportType.HTTP: + 282 return HttpTransport(host, port, timeout=timeout) + 283 case _: + 284 raise AssertionError() + 285 + 286 def __enter__(self) -> JsonRpcClient: + 287 return self + 288 + 289 def __exit__(self, *args: Any) -> None: + 290 self._transport.__exit__(*args) + 291 +
+[docs] + 292 def close(self) -> None: + 293 self._transport.close()
+ + 294 +
+[docs] + 295 def request(self, method: str, **params: Any) -> dict[str, Any]: + 296 req_id = f'{id(self)}-{self._req_id:03}' + 297 self._req_id += 1 + 298 + 299 payload = { + 300 'jsonrpc': self._JSON_RPC_VERSION, + 301 'id': req_id, + 302 'method': method, + 303 'params': params, + 304 } + 305 + 306 req = json.dumps(payload) + 307 + 308 base_name = self._bug_report_id if self._bug_report_id is not None else 'kore_rpc' + 309 req_name = f'{base_name}/{id(self)}/{req_id}' + 310 if self._bug_report: + 311 bug_report_request = f'{req_name}_request.json' + 312 self._bug_report.add_file_contents(req, Path(bug_report_request)) + 313 self._bug_report.add_request(f'{req_name}_request.json') + 314 + 315 resp = self._transport.request(req, req_id, method) + 316 if not resp: + 317 raise RuntimeError('Empty response received') + 318 + 319 if self._bug_report: + 320 bug_report_response = f'{req_name}_response.json' + 321 self._bug_report.add_file_contents(resp, Path(bug_report_response)) + 322 self._bug_report.add_request(f'{req_name}_response.json') + 323 + 324 data = json.loads(resp) + 325 self._check(data) + 326 assert data['id'] == req_id + 327 + 328 return data['result']
+ + 329 + 330 @staticmethod + 331 def _check(response: Mapping[str, Any]) -> None: + 332 if 'error' not in response: + 333 return + 334 + 335 assert response['error']['code'] not in {-32700, -32600}, 'Malformed JSON-RPC request' + 336 raise JsonRpcError(**response['error'])
+ + 337 + 338 +
+[docs] + 339class KoreClientError(Exception, ABC): + 340 def __init__(self, message: str): + 341 super().__init__(message)
+ + 342 + 343 +
+[docs] + 344@final + 345@dataclass + 346class ParseError(KoreClientError): + 347 error: str + 348 + 349 def __init__(self, error: str): + 350 self.error = error + 351 super().__init__(f'Could not parse pattern: {self.error}')
+ + 352 + 353 +
+[docs] + 354@final + 355@dataclass + 356class PatternError(KoreClientError): + 357 error: str + 358 context: tuple[str, ...] + 359 + 360 def __init__(self, error: str, context: Iterable[str]): + 361 self.error = error + 362 self.context = tuple(context) + 363 context_str = ' ;; '.join(self.context) + 364 super().__init__(f'Could not verify pattern: {self.error} Context: {context_str}')
+ + 365 + 366 +
+[docs] + 367@final + 368@dataclass + 369class UnknownModuleError(KoreClientError): + 370 module_name: str + 371 + 372 def __init__(self, module_name: str): + 373 self.module_name = module_name + 374 super().__init__(f'Could not find module: {self.module_name}')
+ + 375 + 376 +
+[docs] + 377@final + 378@dataclass + 379class InvalidModuleError(KoreClientError): + 380 error: str + 381 context: tuple[str, ...] | None + 382 + 383 def __init__(self, error: str, context: Iterable[str] | None): + 384 self.error = error + 385 self.context = tuple(context) if context else None + 386 context_str = ' Context: ' + ' ;; '.join(self.context) if self.context else '' + 387 super().__init__(f'Could not verify module: {self.error}{context_str}')
+ + 388 + 389 +
+[docs] + 390@final + 391@dataclass + 392class DuplicateModuleError(KoreClientError): + 393 module_name: str + 394 + 395 def __init__(self, module_name: str): + 396 self.module_name = module_name + 397 super().__init__(f'Duplicate module name: {self.module_name}')
+ + 398 + 399 +
+[docs] + 400@final + 401@dataclass + 402class ImplicationError(KoreClientError): + 403 error: str + 404 context: tuple[str, ...] + 405 + 406 def __init__(self, error: str, context: Iterable[str]): + 407 self.error = error + 408 self.context = tuple(context) + 409 context_str = ' ;; '.join(self.context) + 410 super().__init__(f'Implication check error: {self.error} Context: {context_str}')
+ + 411 + 412 +
+[docs] + 413@final + 414@dataclass + 415class SmtSolverError(KoreClientError): + 416 error: str + 417 pattern: Pattern + 418 + 419 def __init__(self, error: str, pattern: Pattern): + 420 self.error = error + 421 self.pattern = pattern + 422 super().__init__(f'SMT solver error: {self.error} Pattern: {self.pattern.text}')
+ + 423 + 424 +
+[docs] + 425@final + 426@dataclass + 427class DefaultError(KoreClientError): + 428 message: str + 429 code: int + 430 data: Any + 431 + 432 def __init__(self, message: str, code: int, data: Any = None): + 433 self.message = message + 434 self.code = code + 435 self.data = data + 436 message = f'{self.message} | code: {self.code}' + (f' | data: {self.data}' if data is not None else '') + 437 super().__init__(message)
+ + 438 + 439 +
+[docs] + 440class StopReason(str, Enum): + 441 STUCK = 'stuck' + 442 DEPTH_BOUND = 'depth-bound' + 443 TIMEOUT = 'timeout' + 444 BRANCHING = 'branching' + 445 CUT_POINT_RULE = 'cut-point-rule' + 446 TERMINAL_RULE = 'terminal-rule' + 447 VACUOUS = 'vacuous' + 448 ABORTED = 'aborted'
+ + 449 + 450 +
+[docs] + 451@final + 452@dataclass(frozen=True) + 453class State: + 454 term: Pattern + 455 substitution: FrozenDict[EVar, Pattern] | None + 456 predicate: Pattern | None + 457 rule_id: str | None + 458 rule_substitution: FrozenDict[EVar, Pattern] | None + 459 rule_predicate: Pattern | None + 460 + 461 def __init__( + 462 self, + 463 term: Pattern, + 464 *, + 465 substitution: Mapping[EVar, Pattern] | None = None, + 466 predicate: Pattern | None = None, + 467 rule_id: str | None = None, + 468 rule_substitution: Mapping[EVar, Pattern] | None = None, + 469 rule_predicate: Pattern | None = None, + 470 ): + 471 substitution = FrozenDict(substitution) if substitution is not None else None + 472 rule_substitution = FrozenDict(rule_substitution) if rule_substitution is not None else None + 473 object.__setattr__(self, 'term', term) + 474 object.__setattr__(self, 'substitution', substitution) + 475 object.__setattr__(self, 'predicate', predicate) + 476 object.__setattr__(self, 'rule_id', rule_id) + 477 object.__setattr__(self, 'rule_substitution', rule_substitution) + 478 object.__setattr__(self, 'rule_predicate', rule_predicate) + 479 +
+[docs] + 480 @staticmethod + 481 def from_dict(dct: Mapping[str, Any]) -> State: + 482 return State( + 483 term=kore_term(dct['term']), + 484 substitution=State._subst_to_dict(kore_term(dct['substitution'])) if 'substitution' in dct else None, + 485 predicate=kore_term(dct['predicate']) if 'predicate' in dct else None, + 486 rule_id=dct.get('rule-id'), + 487 rule_substitution=( + 488 State._subst_to_dict(kore_term(dct['rule-substitution'])) if 'rule-substitution' in dct else None + 489 ), + 490 rule_predicate=kore_term(dct['rule-predicate']) if 'rule-predicate' in dct else None, + 491 )
+ + 492 + 493 @staticmethod + 494 def _subst_to_dict(pattern: Pattern) -> dict[EVar, Pattern]: + 495 def extract_entry(pattern: Pattern) -> tuple[EVar, Pattern]: + 496 if not isinstance(pattern, Equals): + 497 raise ValueError(fr'Expected \equals as substituion entry, got: {pattern.text}') + 498 if pattern.sort != SORT_GENERATED_TOP_CELL: + 499 raise ValueError( + 500 f'Expected {SORT_GENERATED_TOP_CELL.text} as substitution entry sort, got: {pattern.sort.text}' + 501 ) + 502 if not isinstance(pattern.left, EVar): + 503 raise ValueError(f'Expected EVar as substitution entry key, got: {pattern.left.text}') + 504 if pattern.left.sort != pattern.op_sort: + 505 raise ValueError( + 506 f'Mismatch between substitution entry and key sort: {pattern.op_sort.text} and {pattern.left.sort.text}' + 507 ) + 508 return pattern.left, pattern.right + 509 + 510 res: dict[EVar, Pattern] = {} + 511 for conjunct in manip.conjuncts(pattern): + 512 key, value = extract_entry(conjunct) + 513 if key in res: + 514 raise ValueError(f'Duplicate substitution entry key: {key.text} -> {[res[key].text, value.text]}') + 515 res[key] = value + 516 return res + 517 + 518 @staticmethod + 519 def _dict_to_subst(dct: Mapping[EVar, Pattern]) -> And: + 520 return And( + 521 SORT_GENERATED_TOP_CELL, + 522 tuple(Equals(var.sort, SORT_GENERATED_TOP_CELL, var, val) for var, val in dct.items()), + 523 ) + 524 + 525 @property + 526 def kore(self) -> Pattern: + 527 _kore = self.term + 528 if self.substitution is not None: + 529 _kore = And(SORT_GENERATED_TOP_CELL, (_kore,) + self._dict_to_subst(self.substitution).ops) + 530 if self.predicate is not None: + 531 _kore = And(SORT_GENERATED_TOP_CELL, (_kore, self.predicate)) + 532 return _kore
+ + 533 + 534 +
+[docs] + 535class LogEntry(ABC): +
+[docs] + 536 @classmethod + 537 def from_dict(cls: type[LE], dct: Mapping[str, Any]) -> LE: + 538 match dct['tag']: + 539 case 'rewrite': + 540 return LogRewrite.from_dict(dct) # type: ignore + 541 case _: + 542 raise ValueError(f'Unsupported LogEntry tag: {dct["tag"]!r}')
+ + 543 +
+[docs] + 544 @abstractmethod + 545 def to_dict(self) -> dict[str, Any]: ...
+
+ + 546 + 547 +
+[docs] + 548@final + 549@dataclass(frozen=True) + 550class LogRewrite(LogEntry): + 551 origin: LogOrigin + 552 result: RewriteResult + 553 +
+[docs] + 554 @classmethod + 555 def from_dict(cls: type[LogRewrite], dct: Mapping[str, Any]) -> LogRewrite: + 556 return LogRewrite( + 557 origin=LogOrigin(dct['origin']), + 558 result=RewriteResult.from_dict(dct['result']), + 559 )
+ + 560 +
+[docs] + 561 def to_dict(self) -> dict[str, Any]: + 562 return {'tag': 'rewrite', 'origin': self.origin.value, 'result': self.result.to_dict()}
+
+ + 563 + 564 +
+[docs] + 565class LogOrigin(str, Enum): + 566 KORE_RPC = 'kore-rpc' + 567 BOOSTER = 'booster' + 568 PROXY = 'proxy' + 569 LLVM = 'llvm'
+ + 570 + 571 +
+[docs] + 572class RewriteResult(ABC): + 573 rule_id: str | None + 574 +
+[docs] + 575 @classmethod + 576 def from_dict(cls: type[RR], dct: Mapping[str, Any]) -> RR: + 577 if dct['tag'] == 'success': + 578 return globals()['RewriteSuccess'].from_dict(dct) + 579 elif dct['tag'] == 'failure': + 580 return globals()['RewriteFailure'].from_dict(dct) + 581 else: + 582 raise ValueError(f"Expected {dct['tag']} as 'success'/'failure'")
+ + 583 +
+[docs] + 584 @abstractmethod + 585 def to_dict(self) -> dict[str, Any]: ...
+
+ + 586 + 587 +
+[docs] + 588@final + 589@dataclass(frozen=True) + 590class RewriteSuccess(RewriteResult): + 591 rule_id: str + 592 rewritten_term: Pattern | None = None + 593 +
+[docs] + 594 @classmethod + 595 def from_dict(cls: type[RewriteSuccess], dct: Mapping[str, Any]) -> RewriteSuccess: + 596 return RewriteSuccess( + 597 rule_id=dct['rule-id'], + 598 rewritten_term=kore_term(dct['rewritten-term']) if 'rewritten-term' in dct else None, + 599 )
+ + 600 +
+[docs] + 601 def to_dict(self) -> dict[str, Any]: + 602 rewritten_term = {'rewritten-term': KoreClient._state(self.rewritten_term)} if self.rewritten_term else {} + 603 return {'tag': 'success', 'rule-id': self.rule_id} | rewritten_term
+
+ + 604 + 605 +
+[docs] + 606@final + 607@dataclass(frozen=True) + 608class RewriteFailure(RewriteResult): + 609 rule_id: str | None + 610 reason: str + 611 +
+[docs] + 612 @classmethod + 613 def from_dict(cls: type[RewriteFailure], dct: Mapping[str, Any]) -> RewriteFailure: + 614 return RewriteFailure(rule_id=dct.get('rule-id'), reason=dct['reason'])
+ + 615 +
+[docs] + 616 def to_dict(self) -> dict[str, Any]: + 617 return {'tag': 'failure', 'rule-id': self.rule_id, 'reason': self.reason}
+
+ + 618 + 619 +
+[docs] + 620class ExecuteResult(ABC): + 621 _TYPES: Mapping[StopReason, str] = { + 622 StopReason.STUCK: 'StuckResult', + 623 StopReason.DEPTH_BOUND: 'DepthBoundResult', + 624 StopReason.TIMEOUT: 'TimeoutResult', + 625 StopReason.BRANCHING: 'BranchingResult', + 626 StopReason.CUT_POINT_RULE: 'CutPointResult', + 627 StopReason.TERMINAL_RULE: 'TerminalResult', + 628 StopReason.VACUOUS: 'VacuousResult', + 629 StopReason.ABORTED: 'AbortedResult', + 630 } + 631 + 632 reason: ClassVar[StopReason] + 633 + 634 state: State + 635 depth: int + 636 next_states: tuple[State, ...] | None + 637 rule: str | None + 638 logs: tuple[LogEntry, ...] + 639 +
+[docs] + 640 @classmethod + 641 def from_dict(cls: type[ER], dct: Mapping[str, Any]) -> ER: + 642 return globals()[ExecuteResult._TYPES[StopReason(dct['reason'])]].from_dict(dct) # type: ignore
+ + 643 + 644 @classmethod + 645 def _check_reason(cls: type[ER], dct: Mapping[str, Any]) -> None: + 646 reason = StopReason(dct['reason']) + 647 if reason is not cls.reason: + 648 raise AssertionError(f"Expected {cls.reason} as 'reason', found: {reason}")
+ + 649 + 650 +
+[docs] + 651@final + 652@dataclass(frozen=True) + 653class StuckResult(ExecuteResult): + 654 # These fields should be Final, but it makes mypy crash + 655 # https://github.com/python/mypy/issues/10090 + 656 reason = StopReason.STUCK + 657 next_states = None + 658 rule = None + 659 + 660 state: State + 661 depth: int + 662 logs: tuple[LogEntry, ...] + 663 +
+[docs] + 664 @classmethod + 665 def from_dict(cls: type[StuckResult], dct: Mapping[str, Any]) -> StuckResult: + 666 cls._check_reason(dct) + 667 logs = tuple(LogEntry.from_dict(l) for l in dct['logs']) if 'logs' in dct else () + 668 return StuckResult( + 669 state=State.from_dict(dct['state']), + 670 depth=dct['depth'], + 671 logs=logs, + 672 )
+
+ + 673 + 674 +
+[docs] + 675@final + 676@dataclass(frozen=True) + 677class DepthBoundResult(ExecuteResult): + 678 reason = StopReason.DEPTH_BOUND + 679 next_states = None + 680 rule = None + 681 + 682 state: State + 683 depth: int + 684 logs: tuple[LogEntry, ...] + 685 +
+[docs] + 686 @classmethod + 687 def from_dict(cls: type[DepthBoundResult], dct: Mapping[str, Any]) -> DepthBoundResult: + 688 cls._check_reason(dct) + 689 logs = tuple(LogEntry.from_dict(l) for l in dct['logs']) if 'logs' in dct else () + 690 return DepthBoundResult( + 691 state=State.from_dict(dct['state']), + 692 depth=dct['depth'], + 693 logs=logs, + 694 )
+
+ + 695 + 696 +
+[docs] + 697@final + 698@dataclass(frozen=True) + 699class TimeoutResult(ExecuteResult): + 700 reason = StopReason.TIMEOUT + 701 next_states = None + 702 rule = None + 703 + 704 state: State + 705 depth: int + 706 logs: tuple[LogEntry, ...] + 707 +
+[docs] + 708 @classmethod + 709 def from_dict(cls: type[TimeoutResult], dct: Mapping[str, Any]) -> TimeoutResult: + 710 cls._check_reason(dct) + 711 logs = tuple(LogEntry.from_dict(l) for l in dct['logs']) if 'logs' in dct else () + 712 return TimeoutResult( + 713 state=State.from_dict(dct['state']), + 714 depth=dct['depth'], + 715 logs=logs, + 716 )
+
+ + 717 + 718 +
+[docs] + 719@final + 720@dataclass(frozen=True) + 721class BranchingResult(ExecuteResult): + 722 reason = StopReason.BRANCHING + 723 rule = None + 724 + 725 state: State + 726 depth: int + 727 next_states: tuple[State, ...] + 728 logs: tuple[LogEntry, ...] + 729 +
+[docs] + 730 @classmethod + 731 def from_dict(cls: type[BranchingResult], dct: Mapping[str, Any]) -> BranchingResult: + 732 cls._check_reason(dct) + 733 logs = tuple(LogEntry.from_dict(l) for l in dct['logs']) if 'logs' in dct else () + 734 return BranchingResult( + 735 state=State.from_dict(dct['state']), + 736 depth=dct['depth'], + 737 next_states=tuple(State.from_dict(next_state) for next_state in dct['next-states']), + 738 logs=logs, + 739 )
+
+ + 740 + 741 +
+[docs] + 742@final + 743@dataclass(frozen=True) + 744class CutPointResult(ExecuteResult): + 745 reason = StopReason.CUT_POINT_RULE + 746 + 747 state: State + 748 depth: int + 749 next_states: tuple[State, ...] + 750 rule: str + 751 logs: tuple[LogEntry, ...] + 752 +
+[docs] + 753 @classmethod + 754 def from_dict(cls: type[CutPointResult], dct: Mapping[str, Any]) -> CutPointResult: + 755 cls._check_reason(dct) + 756 logs = tuple(LogEntry.from_dict(l) for l in dct['logs']) if 'logs' in dct else () + 757 return CutPointResult( + 758 state=State.from_dict(dct['state']), + 759 depth=dct['depth'], + 760 next_states=tuple(State.from_dict(next_state) for next_state in dct['next-states']), + 761 rule=dct['rule'], + 762 logs=logs, + 763 )
+
+ + 764 + 765 +
+[docs] + 766@final + 767@dataclass(frozen=True) + 768class TerminalResult(ExecuteResult): + 769 reason = StopReason.TERMINAL_RULE + 770 next_states = None + 771 + 772 state: State + 773 depth: int + 774 rule: str + 775 logs: tuple[LogEntry, ...] + 776 +
+[docs] + 777 @classmethod + 778 def from_dict(cls: type[TerminalResult], dct: Mapping[str, Any]) -> TerminalResult: + 779 cls._check_reason(dct) + 780 logs = tuple(LogEntry.from_dict(l) for l in dct['logs']) if 'logs' in dct else () + 781 return TerminalResult(state=State.from_dict(dct['state']), depth=dct['depth'], rule=dct['rule'], logs=logs)
+
+ + 782 + 783 +
+[docs] + 784@final + 785@dataclass(frozen=True) + 786class VacuousResult(ExecuteResult): + 787 reason = StopReason.VACUOUS + 788 next_states = None + 789 rule = None + 790 + 791 state: State + 792 depth: int + 793 logs: tuple[LogEntry, ...] + 794 +
+[docs] + 795 @classmethod + 796 def from_dict(cls: type[VacuousResult], dct: Mapping[str, Any]) -> VacuousResult: + 797 cls._check_reason(dct) + 798 logs = tuple(LogEntry.from_dict(l) for l in dct['logs']) if 'logs' in dct else () + 799 return VacuousResult( + 800 state=State.from_dict(dct['state']), + 801 depth=dct['depth'], + 802 logs=logs, + 803 )
+
+ + 804 + 805 +
+[docs] + 806@final + 807@dataclass(frozen=True) + 808class AbortedResult(ExecuteResult): + 809 reason = StopReason.ABORTED + 810 next_states = None + 811 rule = None + 812 + 813 state: State + 814 depth: int + 815 unknown_predicate: Pattern | None + 816 logs: tuple[LogEntry, ...] + 817 +
+[docs] + 818 @classmethod + 819 def from_dict(cls: type[AbortedResult], dct: Mapping[str, Any]) -> AbortedResult: + 820 cls._check_reason(dct) + 821 logs = tuple(LogEntry.from_dict(l) for l in dct['logs']) if 'logs' in dct else () + 822 return AbortedResult( + 823 state=State.from_dict(dct['state']), + 824 depth=dct['depth'], + 825 unknown_predicate=kore_term(dct['unknown-predicate']) if dct.get('unknown-predicate') else None, + 826 logs=logs, + 827 )
+
+ + 828 + 829 +
+[docs] + 830@final + 831@dataclass(frozen=True) + 832class ImpliesResult: + 833 valid: bool + 834 implication: Pattern + 835 substitution: Pattern | None + 836 predicate: Pattern | None + 837 logs: tuple[LogEntry, ...] + 838 +
+[docs] + 839 @staticmethod + 840 def from_dict(dct: Mapping[str, Any]) -> ImpliesResult: + 841 substitution = dct.get('condition', {}).get('substitution') + 842 predicate = dct.get('condition', {}).get('predicate') + 843 logs = tuple(LogEntry.from_dict(l) for l in dct['logs']) if 'logs' in dct else () + 844 return ImpliesResult( + 845 valid=dct['valid'], + 846 implication=kore_term(dct['implication']), + 847 substitution=kore_term(substitution) if substitution is not None else None, + 848 predicate=kore_term(predicate) if predicate is not None else None, + 849 logs=logs, + 850 )
+
+ + 851 + 852 +
+[docs] + 853class GetModelResult(ABC): # noqa: B024 +
+[docs] + 854 @staticmethod + 855 def from_dict(dct: Mapping[str, Any]) -> GetModelResult: + 856 status = dct['satisfiable'] + 857 match status: + 858 case 'Unknown': + 859 return UnknownResult() + 860 case 'Unsat': + 861 return UnsatResult() + 862 case 'Sat': + 863 substitution = dct.get('substitution') + 864 return SatResult(model=kore_term(substitution) if substitution else None) + 865 case _: + 866 raise ValueError(f'Unknown status: {status}')
+
+ + 867 + 868 +
+[docs] + 869@final + 870@dataclass(frozen=True) + 871class UnknownResult(GetModelResult): ...
+ + 872 + 873 +
+[docs] + 874@final + 875@dataclass(frozen=True) + 876class UnsatResult(GetModelResult): ...
+ + 877 + 878 +
+[docs] + 879@final + 880@dataclass(frozen=True) + 881class SatResult(GetModelResult): + 882 model: Pattern | None
+ + 883 + 884 +
+[docs] + 885class KoreClient(ContextManager['KoreClient']): + 886 _KORE_JSON_VERSION: Final = 1 + 887 + 888 port: int + 889 _client: JsonRpcClientFacade + 890 + 891 def __init__( + 892 self, + 893 host: str, + 894 port: int, + 895 *, + 896 timeout: int | None = None, + 897 bug_report: BugReport | None = None, + 898 bug_report_id: str | None = None, + 899 transport: TransportType = TransportType.SINGLE_SOCKET, + 900 dispatch: dict[str, list[tuple[str, int, TransportType]]] | None = None, + 901 ): + 902 if dispatch is None: + 903 dispatch = {} + 904 self.port = port + 905 self._client = JsonRpcClientFacade( + 906 host, + 907 port, + 908 transport, + 909 timeout=timeout, + 910 bug_report=bug_report, + 911 bug_report_id=bug_report_id, + 912 dispatch=dispatch, + 913 ) + 914 + 915 def __enter__(self) -> KoreClient: + 916 return self + 917 + 918 def __exit__(self, *args: Any) -> None: + 919 self._client.__exit__(*args) + 920 +
+[docs] + 921 def close(self) -> None: + 922 self._client.close()
+ + 923 + 924 def _request(self, method: str, **params: Any) -> dict[str, Any]: + 925 try: + 926 return self._client.request(method, **params) + 927 except JsonRpcError as err: + 928 raise self._error(err) from err + 929 + 930 def _error(self, err: JsonRpcError) -> KoreClientError: + 931 assert err.code not in {-32601, -32602}, 'Malformed Kore-RPC request' + 932 match err.code: + 933 case 1: + 934 return ParseError(error=err.data) + 935 case 2: + 936 return PatternError(error=err.data['error'], context=err.data['context']) + 937 case 3: + 938 return UnknownModuleError(module_name=err.data) + 939 case 4: + 940 return ImplicationError(error=err.data['error'], context=err.data['context']) + 941 case 5: + 942 return SmtSolverError(error=err.data['error'], pattern=kore_term(err.data['term'])) + 943 case 8: + 944 return InvalidModuleError(error=err.data['error'], context=err.data.get('context')) + 945 case 9: + 946 return DuplicateModuleError(module_name=err.data) + 947 case _: + 948 return DefaultError(message=err.message, code=err.code, data=err.data) + 949 + 950 @staticmethod + 951 def _state(pattern: Pattern) -> dict[str, Any]: + 952 return { + 953 'format': 'KORE', + 954 'version': KoreClient._KORE_JSON_VERSION, + 955 'term': pattern.dict, + 956 } + 957 +
+[docs] + 958 def execute( + 959 self, + 960 pattern: Pattern, + 961 *, + 962 max_depth: int | None = None, + 963 assume_state_defined: bool | None = None, + 964 cut_point_rules: Iterable[str] | None = None, + 965 terminal_rules: Iterable[str] | None = None, + 966 moving_average_step_timeout: bool | None = None, + 967 step_timeout: int | None = None, + 968 module_name: str | None = None, + 969 log_successful_rewrites: bool | None = None, + 970 log_failed_rewrites: bool | None = None, + 971 ) -> ExecuteResult: + 972 params = filter_none( + 973 { + 974 'max-depth': max_depth, + 975 'assume-state-defined': assume_state_defined, + 976 'cut-point-rules': list(cut_point_rules) if cut_point_rules is not None else None, + 977 'terminal-rules': list(terminal_rules) if terminal_rules is not None else None, + 978 'moving-average-step-timeout': moving_average_step_timeout, + 979 'step-timeout': step_timeout, + 980 'module': module_name, + 981 'state': self._state(pattern), + 982 'log-successful-rewrites': log_successful_rewrites, + 983 'log-failed-rewrites': log_failed_rewrites, + 984 } + 985 ) + 986 + 987 result = self._request('execute', **params) + 988 return ExecuteResult.from_dict(result)
+ + 989 +
+[docs] + 990 def implies( + 991 self, + 992 antecedent: Pattern, + 993 consequent: Pattern, + 994 *, + 995 module_name: str | None = None, + 996 assume_defined: bool = False, + 997 ) -> ImpliesResult: + 998 params = filter_none( + 999 { +1000 'antecedent': self._state(antecedent), +1001 'consequent': self._state(consequent), +1002 'module': module_name, +1003 'assume-defined': assume_defined, +1004 } +1005 ) +1006 +1007 result = self._request('implies', **params) +1008 return ImpliesResult.from_dict(result)
+ +1009 +
+[docs] +1010 def simplify( +1011 self, +1012 pattern: Pattern, +1013 *, +1014 module_name: str | None = None, +1015 ) -> tuple[Pattern, tuple[LogEntry, ...]]: +1016 params = filter_none( +1017 { +1018 'state': self._state(pattern), +1019 'module': module_name, +1020 } +1021 ) +1022 +1023 result = self._request('simplify', **params) +1024 logs = tuple(LogEntry.from_dict(l) for l in result['logs']) if 'logs' in result else () +1025 return kore_term(result['state']), logs
+ +1026 +
+[docs] +1027 def get_model(self, pattern: Pattern, module_name: str | None = None) -> GetModelResult: +1028 params = filter_none( +1029 { +1030 'state': self._state(pattern), +1031 'module': module_name, +1032 } +1033 ) +1034 +1035 result = self._request('get-model', **params) +1036 return GetModelResult.from_dict(result)
+ +1037 +
+[docs] +1038 def add_module(self, module: Module, *, name_as_id: bool | None = None) -> str: +1039 params = filter_none( +1040 { +1041 'module': module.text, +1042 'name-as-id': name_as_id, +1043 } +1044 ) +1045 result = self._request('add-module', **params) +1046 return result['module']
+
+ +1047 +1048 +
+[docs] +1049class KoreServerArgs(TypedDict, total=False): +1050 kompiled_dir: Required[str | Path] +1051 module_name: Required[str] +1052 port: int | None +1053 command: str | Iterable[str] | None +1054 smt_timeout: int | None +1055 smt_retry_limit: int | None +1056 smt_reset_interval: int | None +1057 smt_tactic: str | None +1058 log_axioms_file: Path | None +1059 haskell_log_format: KoreExecLogFormat | None +1060 haskell_log_entries: Iterable[str] | None +1061 bug_report: BugReport | None +1062 haskell_threads: int | None
+ +1063 +1064 +
+[docs] +1065class KoreServerInfo(NamedTuple): +1066 pid: int +1067 host: str +1068 port: int
+ +1069 +1070 +
+[docs] +1071class KoreServer(ContextManager['KoreServer']): +1072 _proc: Popen +1073 _stdout_reader: Thread +1074 _stderr_reader: Thread +1075 _info: KoreServerInfo +1076 +1077 _kompiled_dir: Path +1078 _definition_file: Path +1079 _module_name: str +1080 _port: int +1081 _command: list[str] +1082 _smt_timeout: int | None +1083 _smt_retry_limit: int | None +1084 _smt_reset_interval: int | None +1085 _smt_tactic: str | None +1086 _log_axioms_file: Path | None +1087 _haskell_log_format: KoreExecLogFormat +1088 _haskell_log_entries: list[str] +1089 _haskell_threads: int | None +1090 +1091 _bug_report: BugReport | None +1092 +1093 def __init__(self, args: KoreServerArgs): +1094 self._kompiled_dir = Path(args['kompiled_dir']) +1095 self._definition_file = self._kompiled_dir / 'definition.kore' +1096 self._module_name = args['module_name'] +1097 self._port = args.get('port') or 0 +1098 +1099 if not (command := args.get('command')): +1100 self._command = ['kore-rpc'] +1101 elif type(command) is str: +1102 self._command = command.split() +1103 else: +1104 self._command = list(command) +1105 +1106 self._smt_timeout = args.get('smt_timeout') +1107 self._smt_retry_limit = args.get('smt_retry_limit') +1108 self._smt_reset_interval = args.get('smt_reset_interval') +1109 self._smt_tactic = args.get('smt_tactic') +1110 self._log_axioms_file = args.get('log_axioms_file') +1111 +1112 self._haskell_log_format = args.get('haskell_log_format') or KoreExecLogFormat.ONELINE +1113 +1114 if haskell_log_entries := args.get('haskell_log_entries'): +1115 self._haskell_log_entries = list(haskell_log_entries) +1116 else: +1117 self._haskell_log_entries = [] +1118 +1119 self._haskell_threads = args.get('haskell_threads') or 1 +1120 +1121 self._bug_report = args.get('bug_report') +1122 +1123 self._validate() +1124 self.start() +1125 +1126 @property +1127 def pid(self) -> int: +1128 return self._info.pid +1129 +1130 @property +1131 def host(self) -> str: +1132 return self._info.host +1133 +1134 @property +1135 def port(self) -> int: +1136 return self._info.port +1137 +1138 def __enter__(self) -> KoreServer: +1139 return self +1140 +1141 def __exit__(self, *args: Any) -> None: +1142 self.close() +1143 +
+[docs] +1144 def start(self) -> None: +1145 if self._bug_report: +1146 self._populate_bug_report(self._bug_report) +1147 +1148 cli_args = self._cli_args() +1149 +1150 new_env = os.environ.copy() +1151 new_env['GHCRTS'] = f'-N{self._haskell_threads}' +1152 +1153 _LOGGER.info(f'Starting KoreServer: {" ".join(cli_args)}') +1154 self._proc, self._stdout_reader, self._stderr_reader = self._create_proc(cli_args, new_env) +1155 pid = self._proc.pid +1156 host, port = self._get_host_and_port(pid) +1157 if self._port: +1158 assert port == self._port +1159 self._info = KoreServerInfo(pid=pid, host=host, port=port) +1160 _LOGGER.info(f'KoreServer started: {self.host}:{self.port}, pid={self.pid}')
+ +1161 +1162 @staticmethod +1163 def _create_proc(args: list[str], env: dict[str, str]) -> tuple[Popen, Thread, Thread]: +1164 popen = Popen(args, env=env, stdin=DEVNULL, stdout=PIPE, stderr=PIPE, text=True) +1165 +1166 def reader(fh: IO[str], prefix: str) -> None: +1167 for line in fh: +1168 _LOGGER.info(f'[PID={popen.pid}][{prefix}] {line.rstrip()}') +1169 +1170 stdout_reader = Thread(target=reader, args=(popen.stdout, 'stdo')) +1171 stdout_reader.daemon = True +1172 stdout_reader.start() +1173 +1174 stderr_reader = Thread(target=reader, args=(popen.stderr, 'stde')) +1175 stderr_reader.daemon = True +1176 stderr_reader.start() +1177 +1178 return popen, stdout_reader, stderr_reader +1179 +
+[docs] +1180 def close(self) -> None: +1181 _LOGGER.info(f'Stopping KoreServer: {self.host}:{self.port}, pid={self.pid}') +1182 if '--solver-transcript' in self._command: +1183 self._proc.send_signal(SIGINT) +1184 else: +1185 self._proc.terminate() +1186 self._proc.wait() +1187 self._stdout_reader.join() +1188 self._stderr_reader.join() +1189 _LOGGER.info(f'KoreServer stopped: {self.host}:{self.port}, pid={self.pid}')
+ +1190 +1191 def _validate(self) -> None: +1192 def _check_none_or_positive(n: int | None, param_name: str) -> None: +1193 if n is not None and n <= 0: +1194 raise ValueError(f'Expected positive integer for: {param_name}, got: {n}') +1195 +1196 def _check_none_or_nonnegative(n: int | None, param_name: str) -> None: +1197 if n is not None and n < 0: +1198 raise ValueError(f'Expected non-negative integer for: {param_name}, got: {n}') +1199 +1200 check_dir_path(self._kompiled_dir) +1201 check_file_path(self._definition_file) +1202 _check_none_or_positive(self._smt_timeout, 'smt_timeout') +1203 _check_none_or_nonnegative(self._smt_retry_limit, 'smt_retry_limit') +1204 _check_none_or_positive(self._smt_reset_interval, 'smt_reset_interval') +1205 +1206 def _cli_args(self) -> list[str]: +1207 server_args = ['--module', self._module_name, '--server-port', str(self._port)] +1208 res = list(self._command) +1209 res += [str(self._definition_file)] +1210 res += server_args +1211 res += self._extra_args() +1212 return res +1213 +1214 def _extra_args(self) -> list[str]: +1215 """Command line arguments that are intended to be included in the bug report.""" +1216 smt_server_args = [] +1217 if self._smt_timeout is not None: +1218 smt_server_args += ['--smt-timeout', str(self._smt_timeout)] +1219 if self._smt_retry_limit is not None: +1220 smt_server_args += ['--smt-retry-limit', str(self._smt_retry_limit)] +1221 if self._smt_reset_interval is not None: +1222 smt_server_args += ['--smt-reset-interval', str(self._smt_reset_interval)] +1223 if self._smt_tactic is not None: +1224 smt_server_args += ['--smt-tactic', self._smt_tactic] +1225 +1226 if self._log_axioms_file is not None: +1227 haskell_log_args = [ +1228 '--log', +1229 str(self._log_axioms_file), +1230 '--log-format', +1231 self._haskell_log_format.value, +1232 '--log-entries', +1233 ','.join(self._haskell_log_entries), +1234 ] +1235 else: +1236 haskell_log_args = [] +1237 +1238 return smt_server_args + haskell_log_args +1239 +1240 def _populate_bug_report(self, bug_report: BugReport) -> None: +1241 prog_name = self._command[0] +1242 bug_report.add_file(self._definition_file, Path('definition.kore')) +1243 version_info = run_process_2((prog_name, '--version'), logger=_LOGGER).stdout.strip() +1244 bug_report.add_file_contents(version_info, Path('server_version.txt')) +1245 server_instance = { +1246 'exe': prog_name, +1247 'module': self._module_name, +1248 'extra_args': self._command[1:] + self._extra_args(), +1249 } +1250 bug_report.add_file_contents(json.dumps(server_instance), Path('server_instance.json')) +1251 +1252 @staticmethod +1253 def _get_host_and_port(pid: int) -> tuple[str, int]: +1254 proc = Process(pid) +1255 while not proc.connections(): +1256 sleep(0.01) +1257 conns = proc.connections() +1258 assert len(conns) == 1 +1259 conn = conns[0] +1260 return conn.laddr
+ +1261 +1262 +
+[docs] +1263class FallbackReason(Enum): +1264 BRANCHING = 'Branching' +1265 STUCK = 'Stuck' +1266 ABORTED = 'Aborted'
+ +1267 +1268 +
+[docs] +1269class BoosterServerArgs(KoreServerArgs, total=False): +1270 llvm_kompiled_dir: Required[str | Path] +1271 fallback_on: Iterable[str | FallbackReason] | None +1272 interim_simplification: int | None +1273 no_post_exec_simplify: bool | None +1274 log_context: Iterable[str] | None +1275 not_log_context: Iterable[str] | None
+ +1276 +1277 +
+[docs] +1278class BoosterServer(KoreServer): +1279 _llvm_kompiled_dir: Path +1280 _dylib: Path +1281 _llvm_definition: Path +1282 _llvm_dt: Path +1283 +1284 _fallback_on: list[FallbackReason] | None +1285 _interim_simplification: int | None +1286 _no_post_exec_simplify: bool +1287 _log_context: list[str] +1288 _not_log_context: list[str] +1289 +1290 def __init__(self, args: BoosterServerArgs): +1291 self._llvm_kompiled_dir = Path(args['llvm_kompiled_dir']) +1292 +1293 ext: str +1294 match sys.platform: +1295 case 'linux': +1296 ext = 'so' +1297 case 'darwin': +1298 ext = 'dylib' +1299 case _: +1300 raise ValueError('Unsupported platform: {sys.platform}') +1301 +1302 self._dylib = self._llvm_kompiled_dir / f'interpreter.{ext}' +1303 self._llvm_definition = self._llvm_kompiled_dir / 'definition.kore' +1304 self._llvm_dt = self._llvm_kompiled_dir / 'dt' +1305 +1306 if fallback_on := args.get('fallback_on'): +1307 self._fallback_on = [FallbackReason(reason) for reason in fallback_on] +1308 else: +1309 self._fallback_on = None +1310 +1311 self._interim_simplification = args.get('interim_simplification') +1312 self._no_post_exec_simplify = bool(args.get('no_post_exec_simplify')) +1313 self._log_context = list(args.get('log_context') or []) +1314 self._not_log_context = list(args.get('not_log_context') or []) +1315 +1316 if not args.get('command'): +1317 args['command'] = 'kore-rpc-booster' +1318 +1319 super().__init__(args) +1320 +1321 def _validate(self) -> None: +1322 check_dir_path(self._llvm_kompiled_dir) +1323 check_file_path(self._dylib) +1324 check_file_path(self._llvm_definition) +1325 check_dir_path(self._llvm_dt) +1326 +1327 if self._fallback_on is not None and not self._fallback_on: +1328 raise ValueError("'fallback_on' must not be empty") +1329 +1330 if self._interim_simplification and self._interim_simplification < 0: +1331 raise ValueError(f"'interim_simplification' must not be negative, got: {self._interim_simplification}") +1332 super()._validate() +1333 +1334 def _extra_args(self) -> list[str]: +1335 res = super()._extra_args() +1336 res += ['--llvm-backend-library', str(self._dylib)] +1337 if self._fallback_on is not None: +1338 res += ['--fallback-on', ','.join(reason.value for reason in self._fallback_on)] +1339 if self._interim_simplification is not None: +1340 res += ['--interim-simplification', str(self._interim_simplification)] +1341 if self._no_post_exec_simplify: +1342 res += ['--no-post-exec-simplify'] +1343 res += [arg for glob in self._log_context for arg in ['--log-context', glob]] +1344 res += [arg for glob in self._not_log_context for arg in ['--not-log-context', glob]] +1345 return res +1346 +1347 def _populate_bug_report(self, bug_report: BugReport) -> None: +1348 super()._populate_bug_report(bug_report) +1349 bug_report.add_file(self._llvm_definition, Path('llvm_definition/definition.kore')) +1350 llvm_version = run_process_2('llvm-backend-version', logger=_LOGGER).stdout.strip() +1351 bug_report.add_file_contents(llvm_version, Path('llvm_version.txt'))
+ +1352 +1353 +
+[docs] +1354def kore_server( +1355 definition_dir: str | Path, +1356 module_name: str, +1357 *, +1358 port: int | None = None, +1359 command: str | Iterable[str] | None = None, +1360 smt_timeout: int | None = None, +1361 smt_retry_limit: int | None = None, +1362 smt_tactic: str | None = None, +1363 log_axioms_file: Path | None = None, +1364 haskell_log_format: KoreExecLogFormat | None = None, +1365 haskell_log_entries: Iterable[str] | None = None, +1366 haskell_threads: int | None = None, +1367 # booster +1368 llvm_definition_dir: Path | None = None, +1369 fallback_on: Iterable[str | FallbackReason] | None = None, +1370 interim_simplification: int | None = None, +1371 no_post_exec_simplify: bool | None = None, +1372 # --- +1373 bug_report: BugReport | None = None, +1374) -> KoreServer: +1375 kore_args: KoreServerArgs = { +1376 'kompiled_dir': definition_dir, +1377 'module_name': module_name, +1378 'port': port, +1379 'command': command, +1380 'smt_timeout': smt_timeout, +1381 'smt_retry_limit': smt_retry_limit, +1382 'log_axioms_file': log_axioms_file, +1383 'smt_tactic': smt_tactic, +1384 'haskell_log_format': haskell_log_format, +1385 'haskell_log_entries': haskell_log_entries, +1386 'haskell_threads': haskell_threads, +1387 'bug_report': bug_report, +1388 } +1389 if llvm_definition_dir: +1390 booster_args: BoosterServerArgs = { +1391 'llvm_kompiled_dir': llvm_definition_dir, +1392 'fallback_on': fallback_on, +1393 'interim_simplification': interim_simplification, +1394 'no_post_exec_simplify': no_post_exec_simplify, +1395 **kore_args, +1396 } +1397 return BoosterServer(booster_args) +1398 return KoreServer(kore_args)
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kore/rule.html b/pyk/_modules/pyk/kore/rule.html new file mode 100644 index 00000000000..e2898221ce4 --- /dev/null +++ b/pyk/_modules/pyk/kore/rule.html @@ -0,0 +1,608 @@ + + + + + + + + pyk.kore.rule — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kore.rule

+  1from __future__ import annotations
+  2
+  3import logging
+  4from abc import ABC, abstractmethod
+  5from dataclasses import dataclass
+  6from functools import reduce
+  7from typing import TYPE_CHECKING, Generic, TypeVar, cast, final
+  8
+  9from .prelude import BOOL, SORT_GENERATED_TOP_CELL, TRUE, inj
+ 10from .syntax import (
+ 11    DV,
+ 12    And,
+ 13    App,
+ 14    Axiom,
+ 15    Ceil,
+ 16    Equals,
+ 17    EVar,
+ 18    Implies,
+ 19    In,
+ 20    Not,
+ 21    Pattern,
+ 22    Rewrites,
+ 23    SortApp,
+ 24    SortVar,
+ 25    String,
+ 26    Top,
+ 27)
+ 28
+ 29if TYPE_CHECKING:
+ 30    from typing import Final
+ 31
+ 32    from .syntax import Definition, Sort
+ 33
+ 34    Attrs = dict[str, tuple[Pattern, ...]]
+ 35
+ 36
+ 37P = TypeVar('P', bound=Pattern)
+ 38
+ 39
+ 40_LOGGER: Final = logging.getLogger(__name__)
+ 41
+ 42
+ 43# There's a simplification rule with irregular form in the prelude module INJ.
+ 44# This rule is skipped in Rule.extract_all.
+ 45_S1, _S2, _S3, _R = (SortVar(name) for name in ['S1', 'S2', 'S3', 'R'])
+ 46_T: Final = EVar('T', _S1)
+ 47# axiom {S1, S2, S3, R} \equals{S3, R}(inj{S2, S3}(inj{S1, S2}(T:S1)), inj{S1, S3}(T:S1)) [simplification{}()]
+ 48_INJ_AXIOM: Final = Axiom(
+ 49    vars=(_S1, _S2, _S3, _R),
+ 50    pattern=Equals(_S3, _R, inj(_S2, _S3, inj(_S1, _S2, _T)), inj(_S1, _S3, _T)),
+ 51    attrs=(App('simplification'),),
+ 52)
+ 53
+ 54# The following attributes mark axioms that are not rule axioms.
+ 55# Such axioms are skipped in Rule.extract_all.
+ 56_SKIPPED_ATTRS: Final = (
+ 57    'assoc',
+ 58    'constructor',
+ 59    'functional',
+ 60    'idem',
+ 61    'symbol-overload',
+ 62    'subsort',
+ 63    'unit',
+ 64)
+ 65
+ 66
+
+[docs] + 67class Rule(ABC): + 68 lhs: Pattern + 69 rhs: Pattern + 70 req: Pattern | None + 71 ens: Pattern | None + 72 sort: Sort + 73 priority: int + 74 +
+[docs] + 75 @abstractmethod + 76 def to_axiom(self) -> Axiom: ...
+ + 77 +
+[docs] + 78 @staticmethod + 79 def from_axiom(axiom: Axiom) -> Rule: + 80 if isinstance(axiom.pattern, Rewrites): + 81 return RewriteRule.from_axiom(axiom) + 82 + 83 if 'simplification' not in axiom.attrs_by_key: + 84 return FunctionRule.from_axiom(axiom) + 85 + 86 match axiom.pattern: + 87 case Implies(right=Equals(left=App())): + 88 return AppRule.from_axiom(axiom) + 89 case Implies(right=Equals(left=Ceil())): + 90 return CeilRule.from_axiom(axiom) + 91 case Implies(right=Equals(left=Equals())): + 92 return EqualsRule.from_axiom(axiom) + 93 case _: + 94 raise ValueError(f'Cannot parse simplification rule: {axiom.text}')
+ + 95 +
+[docs] + 96 @staticmethod + 97 def is_rule(axiom: Axiom) -> bool: + 98 if axiom == _INJ_AXIOM: + 99 return False +100 +101 if any(attr in axiom.attrs_by_key for attr in _SKIPPED_ATTRS): +102 return False +103 +104 return True
+ +105 +
+[docs] +106 @staticmethod +107 def extract_all(defn: Definition) -> list[Rule]: +108 return [Rule.from_axiom(axiom) for axiom in defn.axioms if Rule.is_rule(axiom)]
+
+ +109 +110 +
+[docs] +111@final +112@dataclass(frozen=True) +113class RewriteRule(Rule): +114 sort = SORT_GENERATED_TOP_CELL +115 +116 lhs: App +117 rhs: App +118 req: Pattern | None +119 ens: Pattern | None +120 ctx: EVar | None +121 priority: int +122 uid: str +123 label: str | None +124 +
+[docs] +125 def to_axiom(self) -> Axiom: +126 lhs = self.lhs if self.ctx is None else And(self.sort, (self.lhs, self.ctx)) +127 req = _to_ml_pred(self.req, self.sort) +128 ens = _to_ml_pred(self.ens, self.sort) +129 return Axiom( +130 (), +131 Rewrites( +132 self.sort, +133 And(self.sort, (lhs, req)), +134 And(self.sort, (self.rhs, ens)), +135 ), +136 )
+ +137 +
+[docs] +138 @staticmethod +139 def from_axiom(axiom: Axiom) -> RewriteRule: +140 lhs, rhs, req, ens, ctx = RewriteRule._extract(axiom) +141 priority = _extract_priority(axiom) +142 uid = _extract_uid(axiom) +143 label = _extract_label(axiom) +144 return RewriteRule( +145 lhs=lhs, +146 rhs=rhs, +147 req=req, +148 ens=ens, +149 ctx=ctx, +150 priority=priority, +151 uid=uid, +152 label=label, +153 )
+ +154 +155 @staticmethod +156 def _extract(axiom: Axiom) -> tuple[App, App, Pattern | None, Pattern | None, EVar | None]: +157 match axiom.pattern: +158 case Rewrites(left=And(ops=(_lhs, _req)), right=_rhs): +159 pass +160 case _: +161 raise ValueError(f'Cannot extract rewrite rule from axiom: {axiom.text}') +162 +163 ctx: EVar | None = None +164 match _lhs: +165 case App("Lbl'-LT-'generatedTop'-GT-'") as lhs: +166 pass +167 case And(_, (App("Lbl'-LT-'generatedTop'-GT-'") as lhs, EVar("Var'Hash'Configuration") as ctx)): +168 pass +169 case _: +170 raise ValueError(f'Cannot extract LHS configuration from axiom: {axiom.text}') +171 +172 req = _extract_condition(_req) +173 rhs, ens = _extract_rhs(_rhs) +174 match rhs: +175 case App("Lbl'-LT-'generatedTop'-GT-'"): +176 pass +177 case _: +178 raise ValueError(f'Cannot extract RHS configuration from axiom: {axiom.text}') +179 +180 return lhs, rhs, req, ens, ctx
+ +181 +182 +
+[docs] +183@final +184@dataclass(frozen=True) +185class FunctionRule(Rule): +186 lhs: App +187 rhs: Pattern +188 req: Pattern | None +189 ens: Pattern | None +190 sort: Sort +191 arg_sorts: tuple[Sort, ...] +192 anti_left: Pattern | None +193 priority: int +194 +
+[docs] +195 def to_axiom(self) -> Axiom: +196 R = SortVar('R') # noqa N806 +197 +198 def arg_list(rest: Pattern, arg_pair: tuple[EVar, Pattern]) -> Pattern: +199 var, arg = arg_pair +200 return And(R, (In(var.sort, R, var, arg), rest)) +201 +202 vars = tuple(EVar(f'X{i}', sort) for i, sort in enumerate(self.arg_sorts)) +203 +204 # \and{R}(\in{S1, R}(X1 : S1, Arg1), \and{R}(\in{S2, R}(X2 : S2, Arg2), \top{R}())) etc. +205 _args = reduce( +206 arg_list, +207 reversed(tuple(zip(vars, self.lhs.args, strict=True))), +208 cast('Pattern', Top(R)), +209 ) +210 +211 _req = _to_ml_pred(self.req, R) +212 req = And(R, (_req, _args)) +213 if self.anti_left: +214 req = And(R, (Not(R, self.anti_left), req)) +215 +216 app = self.lhs.let(args=vars) +217 ens = _to_ml_pred(self.ens, self.sort) +218 +219 return Axiom( +220 (R,), +221 Implies( +222 R, +223 req, +224 Equals(self.sort, R, app, And(self.sort, (self.rhs, ens))), +225 ), +226 )
+ +227 +
+[docs] +228 @staticmethod +229 def from_axiom(axiom: Axiom) -> FunctionRule: +230 anti_left: Pattern | None = None +231 match axiom.pattern: +232 case Implies( +233 left=And(ops=(Not(pattern=anti_left), And(ops=(_req, _args)))), +234 right=Equals(op_sort=sort, left=App() as app, right=_rhs), +235 ): +236 pass +237 case Implies( +238 left=And(ops=(_req, _args)), +239 right=Equals(op_sort=sort, left=App() as app, right=_rhs), +240 ): +241 pass +242 case _: +243 raise ValueError(f'Cannot extract function rule from axiom: {axiom.text}') +244 +245 arg_sorts, args = FunctionRule._extract_args(_args) +246 lhs = app.let(args=args) +247 req = _extract_condition(_req) +248 rhs, ens = _extract_rhs(_rhs) +249 +250 priority = _extract_priority(axiom) +251 return FunctionRule( +252 lhs=lhs, +253 rhs=rhs, +254 req=req, +255 ens=ens, +256 sort=sort, +257 arg_sorts=arg_sorts, +258 anti_left=anti_left, +259 priority=priority, +260 )
+ +261 +262 @staticmethod +263 def _extract_args(pattern: Pattern) -> tuple[tuple[Sort, ...], tuple[Pattern, ...]]: +264 match pattern: +265 case Top(): +266 return (), () +267 case And(ops=(In(left=EVar(sort=sort), right=arg), rest)): +268 sorts, args = FunctionRule._extract_args(rest) +269 return (sort,) + sorts, (arg,) + args +270 case _: +271 raise ValueError(f'Cannot extract argument list from pattern: {pattern.text}')
+ +272 +273 +
+[docs] +274class SimpliRule(Rule, Generic[P], ABC): +275 lhs: P +276 sort: Sort +277 +
+[docs] +278 def to_axiom(self) -> Axiom: +279 R = SortVar('R') # noqa N806 +280 +281 vars = (R, self.sort) if isinstance(self.sort, SortVar) else (R,) +282 req = _to_ml_pred(self.req, R) +283 ens = _to_ml_pred(self.ens, self.sort) +284 +285 return Axiom( +286 vars, +287 Implies( +288 R, +289 req, +290 Equals(self.sort, R, self.lhs, And(self.sort, (self.rhs, ens))), +291 ), +292 attrs=( +293 App( +294 'simplification', +295 args=() if self.priority == 50 else (String(str(self.priority)),), +296 ), +297 ), +298 )
+ +299 +300 @staticmethod +301 def _extract(axiom: Axiom, lhs_type: type[P]) -> tuple[P, Pattern, Pattern | None, Pattern | None, Sort]: +302 match axiom.pattern: +303 case Implies(left=_req, right=Equals(op_sort=sort, left=lhs, right=_rhs)): +304 req = _extract_condition(_req) +305 rhs, ens = _extract_rhs(_rhs) +306 if not isinstance(lhs, lhs_type): +307 raise ValueError(f'Invalid LHS type from simplification axiom: {axiom.text}') +308 return lhs, rhs, req, ens, sort +309 case _: +310 raise ValueError(f'Cannot extract simplification rule from axiom: {axiom.text}')
+ +311 +312 +
+[docs] +313@final +314@dataclass(frozen=True) +315class AppRule(SimpliRule[App]): +316 lhs: App +317 rhs: Pattern +318 req: Pattern | None +319 ens: Pattern | None +320 sort: Sort +321 priority: int +322 +
+[docs] +323 @staticmethod +324 def from_axiom(axiom: Axiom) -> AppRule: +325 lhs, rhs, req, ens, sort = SimpliRule._extract(axiom, App) +326 priority = _extract_simpl_priority(axiom) +327 return AppRule( +328 lhs=lhs, +329 rhs=rhs, +330 req=req, +331 ens=ens, +332 sort=sort, +333 priority=priority, +334 )
+
+ +335 +336 +
+[docs] +337@final +338@dataclass(frozen=True) +339class CeilRule(SimpliRule[Ceil]): +340 lhs: Ceil +341 rhs: Pattern +342 req: Pattern | None +343 ens: Pattern | None +344 sort: Sort +345 priority: int +346 +
+[docs] +347 @staticmethod +348 def from_axiom(axiom: Axiom) -> CeilRule: +349 lhs, rhs, req, ens, sort = SimpliRule._extract(axiom, Ceil) +350 priority = _extract_simpl_priority(axiom) +351 return CeilRule( +352 lhs=lhs, +353 rhs=rhs, +354 req=req, +355 ens=ens, +356 sort=sort, +357 priority=priority, +358 )
+
+ +359 +360 +
+[docs] +361@final +362@dataclass(frozen=True) +363class EqualsRule(SimpliRule[Equals]): +364 lhs: Equals +365 rhs: Pattern +366 req: Pattern | None +367 ens: Pattern | None +368 sort: Sort +369 priority: int +370 +
+[docs] +371 @staticmethod +372 def from_axiom(axiom: Axiom) -> EqualsRule: +373 lhs, rhs, req, ens, sort = SimpliRule._extract(axiom, Equals) +374 priority = _extract_simpl_priority(axiom) +375 return EqualsRule( +376 lhs=lhs, +377 rhs=rhs, +378 req=req, +379 ens=ens, +380 sort=sort, +381 priority=priority, +382 )
+
+ +383 +384 +385def _extract_rhs(pattern: Pattern) -> tuple[Pattern, Pattern | None]: +386 match pattern: +387 case And(ops=(rhs, _ens)): +388 return rhs, _extract_condition(_ens) +389 case _: +390 raise ValueError(f'Cannot extract RHS from pattern: {pattern.text}') +391 +392 +393def _extract_condition(pattern: Pattern) -> Pattern | None: +394 match pattern: +395 case Top(): +396 return None +397 case Equals(left=cond, right=DV(SortApp('SortBool'), String('true'))): +398 return cond +399 case _: +400 raise ValueError(f'Cannot extract condition from pattern: {pattern.text}') +401 +402 +403def _extract_uid(axiom: Axiom) -> str: +404 attrs = axiom.attrs_by_key +405 match attrs["UNIQUE'Unds'ID"]: +406 case App(args=(String(uid),)): +407 return uid +408 case _: +409 raise ValueError(f'Cannot extract uid from axiom: {axiom.text}') +410 +411 +412def _extract_label(axiom: Axiom) -> str | None: +413 attrs = axiom.attrs_by_key +414 match attrs.get('label'): +415 case App(args=(String(label),)): +416 return label +417 case None: +418 return None +419 case _: +420 raise ValueError(f'Cannot extract label from axiom: {axiom.text}') +421 +422 +423def _extract_priority(axiom: Axiom) -> int: +424 attrs = axiom.attrs_by_key +425 match attrs.get('priority'): +426 case App(args=(String(p),)): +427 assert 'owise' not in attrs +428 return int(p) +429 case None: +430 return 200 if 'owise' in attrs else 50 +431 case _: +432 raise ValueError(f'Cannot extract priority from axiom: {axiom.text}') +433 +434 +435def _extract_simpl_priority(axiom: Axiom) -> int: +436 attrs = axiom.attrs_by_key +437 match attrs['simplification']: +438 case App(args=() | (String(''),)): +439 return 50 +440 case App(args=(String(p),)): +441 return int(p) +442 case _: +443 raise ValueError(f'Cannot extract simplification priority from axiom: {axiom.text}') +444 +445 +446def _to_ml_pred(pattern: Pattern | None, sort: Sort) -> Pattern: +447 if pattern is None: +448 return Top(sort) +449 +450 return Equals(BOOL, sort, pattern, TRUE) +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kore/syntax.html b/pyk/_modules/pyk/kore/syntax.html new file mode 100644 index 00000000000..b053742bdaa --- /dev/null +++ b/pyk/_modules/pyk/kore/syntax.html @@ -0,0 +1,2989 @@ + + + + + + + + pyk.kore.syntax — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kore.syntax

+   1from __future__ import annotations
+   2
+   3import json
+   4import re
+   5from abc import ABC, abstractmethod
+   6from collections.abc import Iterable
+   7from dataclasses import dataclass
+   8from functools import cached_property
+   9from io import StringIO
+  10from typing import ClassVar  # noqa: TC003
+  11from typing import TYPE_CHECKING, final
+  12
+  13from ..dequote import enquoted
+  14from ..utils import FrozenDict, check_type
+  15
+  16if TYPE_CHECKING:
+  17    from collections.abc import Callable, Iterator, Mapping
+  18    from typing import IO, Any, Final, TypeVar
+  19
+  20    T = TypeVar('T', bound='Kore')
+  21    P = TypeVar('P', bound='Pattern')
+  22    WS = TypeVar('WS', bound='WithSort')
+  23    WA = TypeVar('WA', bound='WithAttrs')
+  24    ML = TypeVar('ML', bound='MLPattern')
+  25
+  26
+
+[docs] + 27@final + 28@dataclass(frozen=True) + 29class Id: + 30 value: str + 31 + 32 _PATTERN_STR: ClassVar = "[a-zA-Z][0-9a-zA-Z'-]*" + 33 _PATTERN: ClassVar = re.compile(_PATTERN_STR) + 34 + 35 def __init__(self, value: str): + 36 self._check(value) + 37 object.__setattr__(self, 'value', value) + 38 + 39 @staticmethod + 40 def _check(value: str) -> None: + 41 if not Id._PATTERN.fullmatch(value): + 42 raise ValueError(f'Expected identifier, got: {value}')
+ + 43 + 44 +
+[docs] + 45@final + 46@dataclass(frozen=True) + 47class SymbolId: + 48 value: str + 49 + 50 _PATTERN: ClassVar = re.compile(fr'\\?{Id._PATTERN_STR}') + 51 + 52 def __init__(self, value: str): + 53 self._check(value) + 54 object.__setattr__(self, 'value', value) + 55 + 56 @staticmethod + 57 def _check(value: str) -> None: + 58 if not SymbolId._PATTERN.fullmatch(value): + 59 raise ValueError(f'Expected symbol identifier, got: {value}')
+ + 60 + 61 +
+[docs] + 62@final + 63@dataclass(frozen=True) + 64class SetVarId: + 65 value: str + 66 + 67 _PATTERN: ClassVar = re.compile(f'@{Id._PATTERN_STR}') + 68 + 69 def __init__(self, value: str): + 70 self._check(value) + 71 object.__setattr__(self, 'value', value) + 72 + 73 @staticmethod + 74 def _check(value: str) -> None: + 75 if not SetVarId._PATTERN.fullmatch(value): + 76 raise ValueError(f'Expected set variable identifier, got: {value}')
+ + 77 + 78 + 79# TODO Constructor @overloads + 80 + 81 +
+[docs] + 82class Kore(ABC): + 83 @property + 84 def text(self) -> str: + 85 str_io = StringIO() + 86 self.write(str_io) + 87 return str_io.getvalue() + 88 +
+[docs] + 89 @abstractmethod + 90 def write(self, output: IO[str]) -> None: ...
+
+ + 91 + 92 + 93def _write_sep_by_comma(kores: Iterable[Kore], output: IO[str]) -> None: + 94 first = True + 95 for kore in kores: + 96 if first: + 97 first = False + 98 kore.write(output) + 99 else: + 100 output.write(', ') + 101 kore.write(output) + 102 + 103 +
+[docs] + 104class Sort(Kore): + 105 name: str + 106 + 107 @property + 108 def json(self) -> str: + 109 return json.dumps(self.dict, sort_keys=True) + 110 + 111 @property + 112 @abstractmethod + 113 def dict(self) -> dict[str, Any]: ... + 114 +
+[docs] + 115 @staticmethod + 116 def from_dict(dct: Mapping[str, Any]) -> Sort: + 117 tag = dct['tag'] + 118 match tag: + 119 case 'SortVar': + 120 return SortVar(name=dct['name']) + 121 case 'SortApp': + 122 return SortApp(name=dct['name'], sorts=tuple(Sort.from_dict(arg) for arg in dct['args'])) + 123 case _: + 124 raise ValueError(f'Unknown Sort tag value: {tag!r}')
+ + 125 +
+[docs] + 126 @staticmethod + 127 def from_json(s: str) -> Sort: + 128 return Sort.from_dict(json.loads(s))
+
+ + 129 + 130 +
+[docs] + 131class WithSort(ABC): + 132 sort: Sort + 133 +
+[docs] + 134 @abstractmethod + 135 def let_sort(self: WS, sort: Sort) -> WS: ...
+ + 136 +
+[docs] + 137 def map_sort(self: WS, f: Callable[[Sort], Sort]) -> WS: + 138 return self.let_sort(f(self.sort))
+
+ + 139 + 140 +
+[docs] + 141@final + 142@dataclass(frozen=True) + 143class SortVar(Sort): + 144 name: str + 145 + 146 def __init__(self, name: str | Id): + 147 if isinstance(name, str): + 148 name = Id(name) + 149 + 150 object.__setattr__(self, 'name', name.value) + 151 +
+[docs] + 152 def let(self, *, name: str | Id | None = None) -> SortVar: + 153 name = name if name is not None else self.name + 154 return SortVar(name=name)
+ + 155 + 156 @property + 157 def dict(self) -> dict[str, Any]: + 158 return {'tag': 'SortVar', 'name': self.name} + 159 +
+[docs] + 160 def write(self, output: IO[str]) -> None: + 161 output.write(self.name)
+
+ + 162 + 163 +
+[docs] + 164@final + 165@dataclass(frozen=True) + 166class SortApp(Sort): + 167 name: str + 168 sorts: tuple[Sort, ...] + 169 + 170 def __init__(self, name: str | Id, sorts: Iterable[Sort] = ()): + 171 if isinstance(name, str): + 172 name = Id(name) + 173 + 174 object.__setattr__(self, 'name', name.value) + 175 object.__setattr__(self, 'sorts', tuple(sorts)) + 176 +
+[docs] + 177 def let(self, *, name: str | Id | None = None, sorts: Iterable[Sort] | None = None) -> SortApp: + 178 name = name if name is not None else self.name + 179 sorts = sorts if sorts is not None else self.sorts + 180 return SortApp(name=name, sorts=sorts)
+ + 181 + 182 @property + 183 def dict(self) -> dict[str, Any]: + 184 return {'tag': 'SortApp', 'name': self.name, 'args': [sort.dict for sort in self.sorts]} + 185 +
+[docs] + 186 def write(self, output: IO[str]) -> None: + 187 output.write(self.name) + 188 output.write('{') + 189 _write_sep_by_comma(self.sorts, output) + 190 output.write('}')
+
+ + 191 + 192 +
+[docs] + 193class Pattern(Kore): + 194 _TAGS: Final[dict[str, str | list[str]]] = { + 195 # Helper structure for from_dict(dct) + 196 # keys are Pattern subclass names, which coincides with the tag 'field' in dct + 197 # list value indicates fields in dct that transform to Pattern + 198 # str value indicates a field in dct that transforms to list[Pattern] + 199 'String': [], + 200 'EVar': [], + 201 'SVar': [], + 202 'App': 'args', + 203 'Top': [], + 204 'Bottom': [], + 205 'Not': ['arg'], + 206 'Implies': ['first', 'second'], + 207 'Iff': ['first', 'second'], + 208 'And': 'patterns', + 209 'Or': 'patterns', + 210 'Exists': ['arg'], + 211 'Forall': ['arg'], + 212 'Mu': ['arg'], + 213 'Nu': ['arg'], + 214 'Ceil': ['arg'], + 215 'Floor': ['arg'], + 216 'Equals': ['first', 'second'], + 217 'In': ['first', 'second'], + 218 'Next': ['dest'], + 219 'Rewrites': ['source', 'dest'], + 220 'DV': [], + 221 'LeftAssoc': 'argss', + 222 'RightAssoc': 'argss', + 223 } + 224 +
+[docs] + 225 @staticmethod + 226 def from_dict(dct: Mapping[str, Any]) -> Pattern: + 227 stack: list = [dct, Pattern._extract_dicts(dct), []] + 228 while True: + 229 patterns = stack[-1] + 230 dcts = stack[-2] + 231 dct = stack[-3] + 232 idx = len(patterns) - len(dcts) + 233 if not idx: + 234 stack.pop() + 235 stack.pop() + 236 stack.pop() + 237 cls = globals()[dct['tag']] + 238 pattern = cls._from_dict(dct, patterns) + 239 if not stack: + 240 return pattern + 241 stack[-1].append(pattern) + 242 else: + 243 dct = dcts[idx] + 244 stack.append(dct) + 245 stack.append(Pattern._extract_dicts(dct)) + 246 stack.append([])
+ + 247 + 248 @staticmethod + 249 def _extract_dicts(dct: Mapping[str, Any]) -> list[Mapping[str, Any]]: + 250 keys = Pattern._TAGS[dct['tag']] + 251 return dct[keys] if isinstance(keys, str) else [dct[key] for key in keys] + 252 +
+[docs] + 253 @staticmethod + 254 def from_json(s: str) -> Pattern: + 255 return Pattern.from_dict(json.loads(s))
+ + 256 + 257 @classmethod + 258 @abstractmethod + 259 def _from_dict(cls: type[P], dct: Mapping[str, Any], patterns: list[Pattern]) -> P: ... + 260 + 261 @property + 262 def json(self) -> str: + 263 return json.dumps(self.dict, sort_keys=True) + 264 + 265 @abstractmethod + 266 def _dict(self, dicts: list) -> dict[str, Any]: ... + 267 + 268 @classmethod + 269 @abstractmethod + 270 def _tag(cls) -> str: # TODO This should be an abstract immutable class attribute for efficiency + 271 ... + 272 + 273 @final + 274 @property + 275 def dict(self) -> dict[str, Any]: + 276 stack: list = [ + 277 self, + 278 self.patterns, + 279 [], + 280 ] + 281 + 282 while True: + 283 dicts = stack[-1] + 284 patterns = stack[-2] + 285 pattern = stack[-3] + 286 idx = len(dicts) - len(patterns) + 287 if not idx: + 288 stack.pop() + 289 stack.pop() + 290 stack.pop() + 291 dct = pattern._dict(dicts) + 292 if not stack: + 293 return dct + 294 stack[-1].append(dct) + 295 else: + 296 pattern = patterns[idx] + 297 stack.append(pattern) + 298 stack.append(pattern.patterns) + 299 stack.append([]) + 300 + 301 @property + 302 @abstractmethod + 303 def patterns(self) -> tuple[Pattern, ...]: ... + 304 +
+[docs] + 305 @abstractmethod + 306 def let_patterns(self: P, patterns: Iterable[Pattern]) -> P: ...
+ + 307 +
+[docs] + 308 def map_patterns(self: P, f: Callable[[Pattern], Pattern]) -> P: + 309 return self.let_patterns(patterns=(f(pattern) for pattern in self.patterns))
+ + 310 +
+[docs] + 311 def bottom_up(self, f: Callable[[Pattern], Pattern]) -> Pattern: + 312 stack: list = [self, []] + 313 while True: + 314 patterns = stack[-1] + 315 pattern = stack[-2] + 316 idx = len(patterns) - len(pattern.patterns) + 317 if not idx: + 318 stack.pop() + 319 stack.pop() + 320 pattern = f(pattern.let_patterns(patterns)) + 321 if not stack: + 322 return pattern + 323 stack[-1].append(pattern) + 324 else: + 325 stack.append(pattern.patterns[idx]) + 326 stack.append([])
+ + 327 +
+[docs] + 328 def top_down(self, f: Callable[[Pattern], Pattern]) -> Pattern: + 329 stack: list = [f(self), []] + 330 while True: + 331 patterns = stack[-1] + 332 pattern = stack[-2] + 333 idx = len(patterns) - len(pattern.patterns) + 334 if not idx: + 335 stack.pop() + 336 stack.pop() + 337 pattern = pattern.let_patterns(patterns) + 338 if not stack: + 339 return pattern + 340 stack[-1].append(pattern) + 341 else: + 342 stack.append(f(pattern.patterns[idx])) + 343 stack.append([])
+
+ + 344 + 345 +
+[docs] + 346class VarPattern(Pattern, WithSort): + 347 __match_args__ = ('name', 'sort') + 348 + 349 name: str + 350 sort: Sort + 351 + 352 @property + 353 def patterns(self) -> tuple[()]: + 354 return () + 355 + 356 def _dict(self, dicts: list) -> dict[str, Any]: + 357 assert not dicts + 358 return {'tag': self._tag(), 'name': self.name, 'sort': self.sort.dict} + 359 +
+[docs] + 360 def write(self, output: IO[str]) -> None: + 361 output.write(self.name) + 362 output.write(' : ') + 363 self.sort.write(output)
+
+ + 364 + 365 +
+[docs] + 366@final + 367@dataclass(frozen=True) + 368class EVar(VarPattern): + 369 name: str + 370 sort: Sort + 371 + 372 def __init__(self, name: str | Id, sort: Sort): + 373 if isinstance(name, str): + 374 name = Id(name) + 375 + 376 object.__setattr__(self, 'name', name.value) + 377 object.__setattr__(self, 'sort', sort) + 378 +
+[docs] + 379 def let(self, *, name: str | Id | None = None, sort: Sort | None = None) -> EVar: + 380 name = name if name is not None else self.name + 381 sort = sort if sort is not None else self.sort + 382 return EVar(name=name, sort=sort)
+ + 383 +
+[docs] + 384 def let_sort(self, sort: Sort) -> EVar: + 385 return self.let(sort=sort)
+ + 386 +
+[docs] + 387 def let_patterns(self, patterns: Iterable[Pattern]) -> EVar: + 388 () = patterns + 389 return self
+ + 390 + 391 @classmethod + 392 def _tag(cls) -> str: + 393 return 'EVar' + 394 + 395 @classmethod + 396 def _from_dict(cls: type[EVar], dct: Mapping[str, Any], patterns: list[Pattern]) -> EVar: + 397 assert not patterns + 398 return EVar(name=dct['name'], sort=Sort.from_dict(dct['sort']))
+ + 399 + 400 +
+[docs] + 401@final + 402@dataclass(frozen=True) + 403class SVar(VarPattern): + 404 name: str + 405 sort: Sort + 406 + 407 def __init__(self, name: str | SetVarId, sort: Sort): + 408 if isinstance(name, str): + 409 name = SetVarId(name) + 410 + 411 object.__setattr__(self, 'name', name.value) + 412 object.__setattr__(self, 'sort', sort) + 413 +
+[docs] + 414 def let(self, *, name: str | SetVarId | None = None, sort: Sort | None = None) -> SVar: + 415 name = name if name is not None else self.name + 416 sort = sort if sort is not None else self.sort + 417 return SVar(name=name, sort=sort)
+ + 418 +
+[docs] + 419 def let_sort(self, sort: Sort) -> SVar: + 420 return self.let(sort=sort)
+ + 421 +
+[docs] + 422 def let_patterns(self, patterns: Iterable[Pattern]) -> SVar: + 423 () = patterns + 424 return self
+ + 425 + 426 @classmethod + 427 def _tag(cls) -> str: + 428 return 'SVar' + 429 + 430 @classmethod + 431 def _from_dict(cls: type[SVar], dct: Mapping[str, Any], patterns: list[Pattern]) -> SVar: + 432 assert not patterns + 433 return SVar(name=dct['name'], sort=Sort.from_dict(dct['sort']))
+ + 434 + 435 +
+[docs] + 436@final + 437@dataclass(frozen=True) + 438class String(Pattern): + 439 value: str + 440 +
+[docs] + 441 def let(self, *, value: str | None = None) -> String: + 442 value = value if value is not None else self.value + 443 return String(value=value)
+ + 444 +
+[docs] + 445 def let_patterns(self, patterns: Iterable[Pattern]) -> String: + 446 () = patterns + 447 return self
+ + 448 + 449 @classmethod + 450 def _tag(cls) -> str: + 451 return 'String' + 452 + 453 @classmethod + 454 def _from_dict(cls: type[String], dct: Mapping[str, Any], patterns: list[Pattern]) -> String: + 455 assert not patterns + 456 return String(value=dct['value']) + 457 + 458 @property + 459 def patterns(self) -> tuple[()]: + 460 return () + 461 + 462 def _dict(self, dicts: list) -> dict[str, Any]: + 463 assert not dicts + 464 return {'tag': 'String', 'value': self.value} + 465 +
+[docs] + 466 def write(self, output: IO[str]) -> None: + 467 output.write('"') + 468 for char in enquoted(self.value): + 469 output.write(char) + 470 output.write('"')
+
+ + 471 + 472 +
+[docs] + 473@final + 474@dataclass(frozen=True) + 475class App(Pattern): + 476 symbol: str + 477 sorts: tuple[Sort, ...] + 478 args: tuple[Pattern, ...] + 479 + 480 def __init__(self, symbol: str | SymbolId, sorts: Iterable[Sort] = (), args: Iterable[Pattern] = ()): + 481 if isinstance(symbol, str): + 482 symbol = SymbolId(symbol) + 483 + 484 object.__setattr__(self, 'symbol', symbol.value) + 485 object.__setattr__(self, 'sorts', tuple(sorts)) + 486 object.__setattr__(self, 'args', tuple(args)) + 487 +
+[docs] + 488 def let( + 489 self, + 490 *, + 491 symbol: str | SymbolId | None = None, + 492 sorts: Iterable | None = None, + 493 args: Iterable | None = None, + 494 ) -> App: + 495 symbol = symbol if symbol is not None else self.symbol + 496 sorts = sorts if sorts is not None else self.sorts + 497 args = args if args is not None else self.args + 498 return App(symbol=symbol, sorts=sorts, args=args)
+ + 499 +
+[docs] + 500 def let_patterns(self, patterns: Iterable[Pattern]) -> App: + 501 return self.let(args=patterns)
+ + 502 + 503 @classmethod + 504 def _tag(cls) -> str: + 505 return 'App' + 506 + 507 @classmethod + 508 def _from_dict(cls: type[App], dct: Mapping[str, Any], patterns: list[Pattern]) -> App: + 509 return App( + 510 symbol=dct['name'], + 511 sorts=tuple(Sort.from_dict(sort) for sort in dct['sorts']), + 512 args=patterns, + 513 ) + 514 + 515 @property + 516 def patterns(self) -> tuple[Pattern, ...]: + 517 return self.args + 518 + 519 def _dict(self, dicts: list) -> dict[str, Any]: + 520 return { + 521 'tag': 'App', + 522 'name': self.symbol, + 523 'sorts': [sort.dict for sort in self.sorts], + 524 'args': dicts, + 525 } + 526 +
+[docs] + 527 def write(self, output: IO[str]) -> None: + 528 output.write(self.symbol) + 529 output.write('{') + 530 _write_sep_by_comma(self.sorts, output) + 531 output.write('}(') + 532 _write_sep_by_comma(self.args, output) + 533 output.write(')')
+
+ + 534 + 535 +
+[docs] + 536class Assoc(Pattern): + 537 symbol: str + 538 sorts: tuple[Sort, ...] + 539 args: tuple[Pattern, ...] + 540 +
+[docs] + 541 @classmethod + 542 @abstractmethod + 543 def kore_symbol(cls) -> str: ...
+ + 544 + 545 @property + 546 @abstractmethod + 547 def pattern(self) -> Pattern: ... + 548 + 549 @property + 550 def patterns(self) -> tuple[Pattern, ...]: + 551 return self.args + 552 + 553 @cached_property + 554 def app(self) -> App: + 555 return App(symbol=self.symbol, sorts=self.sorts, args=self.args) + 556 + 557 def _dict(self, dicts: list) -> dict[str, Any]: + 558 return { + 559 'tag': self._tag(), + 560 'symbol': self.symbol, + 561 'sorts': [sort.dict for sort in self.sorts], + 562 'argss': dicts, + 563 } + 564 +
+[docs] + 565 def write(self, output: IO[str]) -> None: + 566 output.write(self.kore_symbol()) + 567 output.write('{}(') + 568 self.app.write(output) + 569 output.write(')')
+
+ + 570 + 571 +
+[docs] + 572@final + 573@dataclass(frozen=True) + 574class LeftAssoc(Assoc): + 575 symbol: str + 576 sorts: tuple[Sort, ...] + 577 args: tuple[Pattern, ...] + 578 + 579 def __init__(self, symbol: str | SymbolId, sorts: Iterable[Sort] = (), args: Iterable[Pattern] = ()): + 580 if isinstance(symbol, str): + 581 symbol = SymbolId(symbol) + 582 + 583 args = tuple(args) + 584 if not args: + 585 raise ValueError("Expected non-empty iterable for 'args'") + 586 + 587 object.__setattr__(self, 'symbol', symbol.value) + 588 object.__setattr__(self, 'sorts', tuple(sorts)) + 589 object.__setattr__(self, 'args', args) + 590 +
+[docs] + 591 def let( + 592 self, + 593 *, + 594 symbol: str | SymbolId | None = None, + 595 sorts: Iterable | None = None, + 596 args: Iterable | None = None, + 597 ) -> LeftAssoc: + 598 symbol = symbol if symbol is not None else self.symbol + 599 sorts = sorts if sorts is not None else self.sorts + 600 args = args if args is not None else self.args + 601 return LeftAssoc(symbol=symbol, sorts=sorts, args=args)
+ + 602 +
+[docs] + 603 def let_patterns(self, patterns: Iterable[Pattern]) -> LeftAssoc: + 604 return self.let(args=patterns)
+ + 605 + 606 @property + 607 def pattern(self) -> Pattern: + 608 res = self.args[0] + 609 for arg in self.args[1:]: + 610 res = App(self.symbol, self.sorts, (res, arg)) + 611 return res + 612 + 613 @classmethod + 614 def _tag(cls) -> str: + 615 return 'LeftAssoc' + 616 +
+[docs] + 617 @classmethod + 618 def kore_symbol(cls) -> str: + 619 return '\\left-assoc'
+ + 620 + 621 @classmethod + 622 def _from_dict(cls: type[LeftAssoc], dct: Mapping[str, Any], patterns: list[Pattern]) -> LeftAssoc: + 623 return LeftAssoc( + 624 symbol=dct['symbol'], + 625 sorts=tuple(Sort.from_dict(sort) for sort in dct['sorts']), + 626 args=patterns, + 627 )
+ + 628 + 629 +
+[docs] + 630@final + 631@dataclass(frozen=True) + 632class RightAssoc(Assoc): + 633 symbol: str + 634 sorts: tuple[Sort, ...] + 635 args: tuple[Pattern, ...] + 636 + 637 def __init__(self, symbol: str | SymbolId, sorts: Iterable[Sort] = (), args: Iterable[Pattern] = ()): + 638 if isinstance(symbol, str): + 639 symbol = SymbolId(symbol) + 640 + 641 args = tuple(args) + 642 if not args: + 643 raise ValueError("Expected non-empty iterable for 'args'") + 644 + 645 object.__setattr__(self, 'symbol', symbol.value) + 646 object.__setattr__(self, 'sorts', tuple(sorts)) + 647 object.__setattr__(self, 'args', args) + 648 +
+[docs] + 649 def let( + 650 self, + 651 *, + 652 symbol: str | SymbolId | None = None, + 653 sorts: Iterable | None = None, + 654 args: Iterable | None = None, + 655 ) -> RightAssoc: + 656 symbol = symbol if symbol is not None else self.symbol + 657 sorts = sorts if sorts is not None else self.sorts + 658 args = args if args is not None else self.args + 659 return RightAssoc(symbol=symbol, sorts=sorts, args=args)
+ + 660 +
+[docs] + 661 def let_patterns(self, patterns: Iterable[Pattern]) -> RightAssoc: + 662 return self.let(args=patterns)
+ + 663 + 664 @property + 665 def pattern(self) -> Pattern: + 666 res = self.args[-1] + 667 for arg in reversed(self.args[:-1]): + 668 res = App(self.symbol, (), (arg, res)) + 669 return res + 670 + 671 @classmethod + 672 def _tag(cls) -> str: + 673 return 'RightAssoc' + 674 +
+[docs] + 675 @classmethod + 676 def kore_symbol(cls) -> str: + 677 return '\\right-assoc'
+ + 678 + 679 @classmethod + 680 def _from_dict(cls: type[RightAssoc], dct: Mapping[str, Any], patterns: list[Pattern]) -> RightAssoc: + 681 return RightAssoc( + 682 symbol=dct['symbol'], + 683 sorts=tuple(Sort.from_dict(sort) for sort in dct['sorts']), + 684 args=patterns, + 685 )
+ + 686 + 687 +
+[docs] + 688class MLPattern(Pattern): +
+[docs] + 689 @classmethod + 690 @abstractmethod + 691 def symbol(cls) -> str: ...
+ + 692 +
+[docs] + 693 @classmethod + 694 def of(cls: type[ML], symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) -> ML: + 695 actual_cls = ML_SYMBOLS.get(symbol) + 696 + 697 if not actual_cls: + 698 raise ValueError(f'Invalid MLPattern symbol: {symbol}') + 699 + 700 if not issubclass(actual_cls, cls): + 701 raise ValueError(f'Expected {cls.__name__} symbol, found: {symbol}') + 702 + 703 return actual_cls.of(symbol, sorts, patterns)
+ + 704 + 705 @classmethod + 706 def _check_symbol(cls: type[ML], symbol: str) -> None: + 707 if symbol != cls.symbol(): + 708 raise ValueError(f'Expected "symbol" value: {cls.symbol()}, got: {symbol}') + 709 + 710 @property + 711 @abstractmethod + 712 def sorts(self) -> tuple[Sort, ...]: ... + 713 + 714 @property + 715 def ctor_patterns(self) -> tuple[Pattern, ...]: + 716 """Return patterns used to construct the term with `of`. + 717 + 718 Except for `DV`, `MLFixpoint` and `MLQuant` this coincides with `patterns`. + 719 """ + 720 return self.patterns + 721 +
+[docs] + 722 def write(self, output: IO[str]) -> None: + 723 output.write(self.symbol()) + 724 output.write('{') + 725 _write_sep_by_comma(self.sorts, output) + 726 output.write('}(') + 727 _write_sep_by_comma(self.ctor_patterns, output) + 728 output.write(')')
+
+ + 729 + 730 +
+[docs] + 731class MLConn(MLPattern, WithSort): + 732 @property + 733 def sorts(self) -> tuple[Sort]: + 734 return (self.sort,)
+ + 735 + 736 +
+[docs] + 737class NullaryConn(MLConn): + 738 def _dict(self, dicts: list) -> dict[str, Any]: + 739 assert not dicts + 740 return {'tag': self._tag(), 'sort': self.sort.dict} + 741 + 742 @property + 743 def patterns(self) -> tuple[()]: + 744 return ()
+ + 745 + 746 +
+[docs] + 747@final + 748@dataclass(frozen=True) + 749class Top(NullaryConn): + 750 sort: Sort + 751 +
+[docs] + 752 def let(self, *, sort: Sort | None = None) -> Top: + 753 sort = sort if sort is not None else self.sort + 754 return Top(sort=sort)
+ + 755 +
+[docs] + 756 def let_sort(self: Top, sort: Sort) -> Top: + 757 return self.let(sort=sort)
+ + 758 +
+[docs] + 759 def let_patterns(self, patterns: Iterable[Pattern]) -> Top: + 760 () = patterns + 761 return self
+ + 762 + 763 @classmethod + 764 def _tag(cls) -> str: + 765 return 'Top' + 766 +
+[docs] + 767 @classmethod + 768 def symbol(cls) -> str: + 769 return '\\top'
+ + 770 +
+[docs] + 771 @classmethod + 772 def of(cls: type[Top], symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) -> Top: + 773 cls._check_symbol(symbol) + 774 (sort,) = sorts + 775 () = patterns + 776 return Top(sort=sort)
+ + 777 + 778 @classmethod + 779 def _from_dict(cls: type[Top], dct: Mapping[str, Any], patterns: list[Pattern]) -> Top: + 780 assert not patterns + 781 return Top(sort=Sort.from_dict(dct['sort']))
+ + 782 + 783 +
+[docs] + 784@final + 785@dataclass(frozen=True) + 786class Bottom(NullaryConn): + 787 sort: Sort + 788 +
+[docs] + 789 def let(self, *, sort: Sort | None = None) -> Bottom: + 790 sort = sort if sort is not None else self.sort + 791 return Bottom(sort=sort)
+ + 792 +
+[docs] + 793 def let_sort(self: Bottom, sort: Sort) -> Bottom: + 794 return self.let(sort=sort)
+ + 795 +
+[docs] + 796 def let_patterns(self, patterns: Iterable[Pattern]) -> Bottom: + 797 () = patterns + 798 return self
+ + 799 + 800 @classmethod + 801 def _tag(cls) -> str: + 802 return 'Bottom' + 803 +
+[docs] + 804 @classmethod + 805 def symbol(cls) -> str: + 806 return '\\bottom'
+ + 807 +
+[docs] + 808 @classmethod + 809 def of(cls: type[Bottom], symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) -> Bottom: + 810 cls._check_symbol(symbol) + 811 (sort,) = sorts + 812 () = patterns + 813 return Bottom(sort=sort)
+ + 814 + 815 @classmethod + 816 def _from_dict(cls: type[Bottom], dct: Mapping[str, Any], patterns: list[Pattern]) -> Bottom: + 817 assert not patterns + 818 return Bottom(sort=Sort.from_dict(dct['sort']))
+ + 819 + 820 +
+[docs] + 821class UnaryConn(MLConn): + 822 pattern: Pattern + 823 + 824 @property + 825 def patterns(self) -> tuple[Pattern]: + 826 return (self.pattern,) + 827 + 828 def _dict(self, dicts: list) -> dict[str, Any]: + 829 (arg,) = dicts + 830 return {'tag': self._tag(), 'sort': self.sort.dict, 'arg': arg}
+ + 831 + 832 +
+[docs] + 833@final + 834@dataclass(frozen=True) + 835class Not(UnaryConn): + 836 sort: Sort + 837 pattern: Pattern + 838 +
+[docs] + 839 def let(self, *, sort: Sort | None = None, pattern: Pattern | None = None) -> Not: + 840 sort = sort if sort is not None else self.sort + 841 pattern = pattern if pattern is not None else self.pattern + 842 return Not(sort=sort, pattern=pattern)
+ + 843 +
+[docs] + 844 def let_sort(self: Not, sort: Sort) -> Not: + 845 return self.let(sort=sort)
+ + 846 +
+[docs] + 847 def let_patterns(self, patterns: Iterable[Pattern]) -> Not: + 848 (pattern,) = patterns + 849 return self.let(pattern=pattern)
+ + 850 + 851 @classmethod + 852 def _tag(cls) -> str: + 853 return 'Not' + 854 +
+[docs] + 855 @classmethod + 856 def symbol(cls) -> str: + 857 return '\\not'
+ + 858 +
+[docs] + 859 @classmethod + 860 def of(cls: type[Not], symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) -> Not: + 861 cls._check_symbol(symbol) + 862 (sort,) = sorts + 863 (pattern,) = patterns + 864 return Not(sort=sort, pattern=pattern)
+ + 865 + 866 @classmethod + 867 def _from_dict(cls: type[Not], dct: Mapping[str, Any], patterns: list[Pattern]) -> Not: + 868 (pattern,) = patterns + 869 return Not(sort=Sort.from_dict(dct['sort']), pattern=pattern)
+ + 870 + 871 +
+[docs] + 872class BinaryConn(MLConn): + 873 left: Pattern + 874 right: Pattern + 875 + 876 def __iter__(self) -> Iterator[Pattern]: + 877 yield self.left + 878 yield self.right + 879 + 880 @property + 881 def patterns(self) -> tuple[Pattern, Pattern]: + 882 return (self.left, self.right) + 883 + 884 def _dict(self, dicts: list) -> dict[str, Any]: + 885 first, second = dicts + 886 return {'tag': self._tag(), 'sort': self.sort.dict, 'first': first, 'second': second}
+ + 887 + 888 +
+[docs] + 889@final + 890@dataclass(frozen=True) + 891class Implies(BinaryConn): + 892 sort: Sort + 893 left: Pattern + 894 right: Pattern + 895 +
+[docs] + 896 def let( + 897 self, + 898 *, + 899 sort: Sort | None = None, + 900 left: Pattern | None = None, + 901 right: Pattern | None = None, + 902 ) -> Implies: + 903 sort = sort if sort is not None else self.sort + 904 left = left if left is not None else self.left + 905 right = right if right is not None else self.right + 906 return Implies(sort=sort, left=left, right=right)
+ + 907 +
+[docs] + 908 def let_sort(self: Implies, sort: Sort) -> Implies: + 909 return self.let(sort=sort)
+ + 910 +
+[docs] + 911 def let_patterns(self, patterns: Iterable[Pattern]) -> Implies: + 912 left, right = patterns + 913 return self.let(left=left, right=right)
+ + 914 + 915 @classmethod + 916 def _tag(cls) -> str: + 917 return 'Implies' + 918 +
+[docs] + 919 @classmethod + 920 def symbol(cls) -> str: + 921 return '\\implies'
+ + 922 +
+[docs] + 923 @classmethod + 924 def of(cls: type[Implies], symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) -> Implies: + 925 cls._check_symbol(symbol) + 926 (sort,) = sorts + 927 left, right = patterns + 928 return Implies(sort=sort, left=left, right=right)
+ + 929 + 930 @classmethod + 931 def _from_dict(cls: type[Implies], dct: Mapping[str, Any], patterns: list[Pattern]) -> Implies: + 932 left, right = patterns + 933 return Implies(sort=Sort.from_dict(dct['sort']), left=left, right=right)
+ + 934 + 935 +
+[docs] + 936@final + 937@dataclass(frozen=True) + 938class Iff(BinaryConn): + 939 sort: Sort + 940 left: Pattern + 941 right: Pattern + 942 +
+[docs] + 943 def let( + 944 self, + 945 *, + 946 sort: Sort | None = None, + 947 left: Pattern | None = None, + 948 right: Pattern | None = None, + 949 ) -> Iff: + 950 sort = sort if sort is not None else self.sort + 951 left = left if left is not None else self.left + 952 right = right if right is not None else self.right + 953 return Iff(sort=sort, left=left, right=right)
+ + 954 +
+[docs] + 955 def let_sort(self: Iff, sort: Sort) -> Iff: + 956 return self.let(sort=sort)
+ + 957 +
+[docs] + 958 def let_patterns(self, patterns: Iterable[Pattern]) -> Iff: + 959 left, right = patterns + 960 return self.let(left=left, right=right)
+ + 961 + 962 @classmethod + 963 def _tag(cls) -> str: + 964 return 'Iff' + 965 +
+[docs] + 966 @classmethod + 967 def symbol(cls) -> str: + 968 return '\\iff'
+ + 969 +
+[docs] + 970 @classmethod + 971 def of(cls: type[Iff], symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) -> Iff: + 972 cls._check_symbol(symbol) + 973 (sort,) = sorts + 974 left, right = patterns + 975 return Iff(sort=sort, left=left, right=right)
+ + 976 + 977 @classmethod + 978 def _from_dict(cls: type[Iff], dct: Mapping[str, Any], patterns: list[Pattern]) -> Iff: + 979 left, right = patterns + 980 return Iff(sort=Sort.from_dict(dct['sort']), left=left, right=right)
+ + 981 + 982 +
+[docs] + 983class MultiaryConn(MLConn): + 984 ops: tuple[Pattern, ...] + 985 + 986 def __iter__(self) -> Iterator[Pattern]: + 987 return iter(self.ops) + 988 + 989 @property + 990 def patterns(self) -> tuple[Pattern, ...]: + 991 return self.ops + 992 + 993 def _dict(self, dicts: list) -> dict[str, Any]: + 994 return {'tag': self._tag(), 'sort': self.sort.dict, 'patterns': dicts}
+ + 995 + 996 +
+[docs] + 997@final + 998@dataclass(frozen=True) + 999class And(MultiaryConn): +1000 sort: Sort +1001 ops: tuple[Pattern, ...] +1002 +1003 def __init__(self, sort: Sort, ops: Iterable[Pattern] = ()): +1004 object.__setattr__(self, 'sort', sort) +1005 object.__setattr__(self, 'ops', tuple(ops)) +1006 +
+[docs] +1007 def let( +1008 self, +1009 *, +1010 sort: Sort | None = None, +1011 ops: Iterable[Pattern] | None = None, +1012 ) -> And: +1013 sort = sort if sort is not None else self.sort +1014 ops = ops if ops is not None else self.ops +1015 return And(sort=sort, ops=ops)
+ +1016 +
+[docs] +1017 def let_sort(self, sort: Sort) -> And: +1018 return self.let(sort=sort)
+ +1019 +
+[docs] +1020 def let_patterns(self, patterns: Iterable[Pattern]) -> And: +1021 return self.let(ops=patterns)
+ +1022 +1023 @classmethod +1024 def _tag(cls) -> str: +1025 return 'And' +1026 +
+[docs] +1027 @classmethod +1028 def symbol(cls) -> str: +1029 return '\\and'
+ +1030 +
+[docs] +1031 @classmethod +1032 def of(cls: type[And], symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) -> And: +1033 cls._check_symbol(symbol) +1034 (sort,) = sorts +1035 return And(sort=sort, ops=patterns)
+ +1036 +1037 @classmethod +1038 def _from_dict(cls: type[And], dct: Mapping[str, Any], patterns: list[Pattern]) -> And: +1039 return And(sort=Sort.from_dict(dct['sort']), ops=patterns)
+ +1040 +1041 +
+[docs] +1042@final +1043@dataclass(frozen=True) +1044class Or(MultiaryConn): +1045 sort: Sort +1046 ops: tuple[Pattern, ...] +1047 +1048 def __init__(self, sort: Sort, ops: Iterable[Pattern] = ()): +1049 object.__setattr__(self, 'sort', sort) +1050 object.__setattr__(self, 'ops', tuple(ops)) +1051 +
+[docs] +1052 def let( +1053 self, +1054 *, +1055 sort: Sort | None = None, +1056 ops: Iterable[Pattern] | None = None, +1057 ) -> Or: +1058 sort = sort if sort is not None else self.sort +1059 ops = ops if ops is not None else self.ops +1060 return Or(sort=sort, ops=ops)
+ +1061 +
+[docs] +1062 def let_sort(self, sort: Sort) -> Or: +1063 return self.let(sort=sort)
+ +1064 +
+[docs] +1065 def let_patterns(self, patterns: Iterable[Pattern]) -> Or: +1066 return self.let(ops=patterns)
+ +1067 +1068 @classmethod +1069 def _tag(cls) -> str: +1070 return 'Or' +1071 +
+[docs] +1072 @classmethod +1073 def symbol(cls) -> str: +1074 return '\\or'
+ +1075 +
+[docs] +1076 @classmethod +1077 def of(cls: type[Or], symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) -> Or: +1078 cls._check_symbol(symbol) +1079 (sort,) = sorts +1080 return Or(sort=sort, ops=patterns)
+ +1081 +1082 @classmethod +1083 def _from_dict(cls: type[Or], dct: Mapping[str, Any], patterns: list[Pattern]) -> Or: +1084 return Or(sort=Sort.from_dict(dct['sort']), ops=patterns)
+ +1085 +1086 +
+[docs] +1087class MLQuant(MLPattern, WithSort): +1088 sort: Sort +1089 var: EVar # TODO Should this be inlined to var_name, var_sort? +1090 pattern: Pattern +1091 +1092 @property +1093 def sorts(self) -> tuple[Sort]: +1094 return (self.sort,) +1095 +1096 @property +1097 def patterns(self) -> tuple[Pattern]: +1098 return (self.pattern,) +1099 +1100 @property +1101 def ctor_patterns(self) -> tuple[EVar, Pattern]: +1102 return (self.var, self.pattern) +1103 +1104 def _dict(self, dicts: list) -> dict[str, Any]: +1105 (arg,) = dicts +1106 return { +1107 'tag': self._tag(), +1108 'sort': self.sort.dict, +1109 'var': self.var.name, +1110 'varSort': self.var.sort.dict, +1111 'arg': arg, +1112 }
+ +1113 +1114 +
+[docs] +1115@final +1116@dataclass(frozen=True) +1117class Exists(MLQuant): +1118 sort: Sort +1119 var: EVar +1120 pattern: Pattern +1121 +
+[docs] +1122 def let( +1123 self, +1124 *, +1125 sort: Sort | None = None, +1126 var: EVar | None = None, +1127 pattern: Pattern | None = None, +1128 ) -> Exists: +1129 sort = sort if sort is not None else self.sort +1130 var = var if var is not None else self.var +1131 pattern = pattern if pattern is not None else self.pattern +1132 return Exists(sort=sort, var=var, pattern=pattern)
+ +1133 +
+[docs] +1134 def let_sort(self, sort: Sort) -> Exists: +1135 return self.let(sort=sort)
+ +1136 +
+[docs] +1137 def let_patterns(self, patterns: Iterable[Pattern]) -> Exists: +1138 (pattern,) = patterns +1139 return self.let(pattern=pattern)
+ +1140 +1141 @classmethod +1142 def _tag(cls) -> str: +1143 return 'Exists' +1144 +
+[docs] +1145 @classmethod +1146 def symbol(cls) -> str: +1147 return '\\exists'
+ +1148 +
+[docs] +1149 @classmethod +1150 def of(cls: type[Exists], symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) -> Exists: +1151 cls._check_symbol(symbol) +1152 (sort,) = sorts +1153 var, pattern = patterns +1154 var = check_type(var, EVar) +1155 return Exists(sort=sort, var=var, pattern=pattern)
+ +1156 +1157 @classmethod +1158 def _from_dict(cls: type[Exists], dct: Mapping[str, Any], patterns: list[Pattern]) -> Exists: +1159 (pattern,) = patterns +1160 return Exists( +1161 sort=Sort.from_dict(dct['sort']), +1162 var=EVar(name=dct['var'], sort=Sort.from_dict(dct['varSort'])), +1163 pattern=pattern, +1164 )
+ +1165 +1166 +
+[docs] +1167@final +1168@dataclass(frozen=True) +1169class Forall(MLQuant): +1170 sort: Sort +1171 var: EVar +1172 pattern: Pattern +1173 +
+[docs] +1174 def let( +1175 self, +1176 *, +1177 sort: Sort | None = None, +1178 var: EVar | None = None, +1179 pattern: Pattern | None = None, +1180 ) -> Forall: +1181 sort = sort if sort is not None else self.sort +1182 var = var if var is not None else self.var +1183 pattern = pattern if pattern is not None else self.pattern +1184 return Forall(sort=sort, var=var, pattern=pattern)
+ +1185 +
+[docs] +1186 def let_sort(self, sort: Sort) -> Forall: +1187 return self.let(sort=sort)
+ +1188 +
+[docs] +1189 def let_patterns(self, patterns: Iterable[Pattern]) -> Forall: +1190 (pattern,) = patterns +1191 return self.let(pattern=pattern)
+ +1192 +1193 @classmethod +1194 def _tag(cls) -> str: +1195 return 'Forall' +1196 +
+[docs] +1197 @classmethod +1198 def symbol(cls) -> str: +1199 return '\\forall'
+ +1200 +
+[docs] +1201 @classmethod +1202 def of(cls: type[Forall], symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) -> Forall: +1203 cls._check_symbol(symbol) +1204 (sort,) = sorts +1205 var, pattern = patterns +1206 var = check_type(var, EVar) +1207 return Forall(sort=sort, var=var, pattern=pattern)
+ +1208 +1209 @classmethod +1210 def _from_dict(cls: type[Forall], dct: Mapping[str, Any], patterns: list[Pattern]) -> Forall: +1211 (pattern,) = patterns +1212 return Forall( +1213 sort=Sort.from_dict(dct['sort']), +1214 var=EVar(name=dct['var'], sort=Sort.from_dict(dct['varSort'])), +1215 pattern=pattern, +1216 )
+ +1217 +1218 +
+[docs] +1219class MLFixpoint(MLPattern): +1220 var: SVar # TODO Should this be inlined to var_name, var_sort? +1221 pattern: Pattern +1222 +1223 @property +1224 def sorts(self) -> tuple[()]: +1225 return () +1226 +1227 @property +1228 def patterns(self) -> tuple[Pattern]: +1229 return (self.pattern,) +1230 +1231 @property +1232 def ctor_patterns(self) -> tuple[SVar, Pattern]: +1233 return (self.var, self.pattern) +1234 +1235 def _dict(self, dicts: list) -> dict[str, Any]: +1236 (arg,) = dicts +1237 return { +1238 'tag': self._tag(), +1239 'var': self.var.name, +1240 'varSort': self.var.sort.dict, +1241 'arg': arg, +1242 }
+ +1243 +1244 +
+[docs] +1245@final +1246@dataclass(frozen=True) +1247class Mu(MLFixpoint): +1248 var: SVar +1249 pattern: Pattern +1250 +
+[docs] +1251 def let(self, *, var: SVar | None = None, pattern: Pattern | None = None) -> Mu: +1252 var = var if var is not None else self.var +1253 pattern = pattern if pattern is not None else self.pattern +1254 return Mu(var=var, pattern=pattern)
+ +1255 +
+[docs] +1256 def let_patterns(self, patterns: Iterable[Pattern]) -> Mu: +1257 (pattern,) = patterns +1258 return self.let(pattern=pattern)
+ +1259 +1260 @classmethod +1261 def _tag(cls) -> str: +1262 return 'Mu' +1263 +
+[docs] +1264 @classmethod +1265 def symbol(cls) -> str: +1266 return '\\mu'
+ +1267 +
+[docs] +1268 @classmethod +1269 def of(cls: type[Mu], symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) -> Mu: +1270 cls._check_symbol(symbol) +1271 () = sorts +1272 var, pattern = patterns +1273 var = check_type(var, SVar) +1274 return Mu(var=var, pattern=pattern)
+ +1275 +1276 @classmethod +1277 def _from_dict(cls: type[Mu], dct: Mapping[str, Any], patterns: list[Pattern]) -> Mu: +1278 (pattern,) = patterns +1279 return Mu( +1280 var=SVar(name=dct['var'], sort=Sort.from_dict(dct['varSort'])), +1281 pattern=pattern, +1282 )
+ +1283 +1284 +
+[docs] +1285@final +1286@dataclass(frozen=True) +1287class Nu(MLFixpoint): +1288 var: SVar +1289 pattern: Pattern +1290 +
+[docs] +1291 def let(self, *, var: SVar | None = None, pattern: Pattern | None = None) -> Nu: +1292 var = var if var is not None else self.var +1293 pattern = pattern if pattern is not None else self.pattern +1294 return Nu(var=var, pattern=pattern)
+ +1295 +
+[docs] +1296 def let_patterns(self, patterns: Iterable[Pattern]) -> Nu: +1297 (pattern,) = patterns +1298 return self.let(pattern=pattern)
+ +1299 +1300 @classmethod +1301 def _tag(cls) -> str: +1302 return 'Nu' +1303 +
+[docs] +1304 @classmethod +1305 def symbol(cls) -> str: +1306 return '\\nu'
+ +1307 +
+[docs] +1308 @classmethod +1309 def of(cls: type[Nu], symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) -> Nu: +1310 cls._check_symbol(symbol) +1311 () = sorts +1312 var, pattern = patterns +1313 var = check_type(var, SVar) +1314 return Nu(var=var, pattern=pattern)
+ +1315 +1316 @classmethod +1317 def _from_dict(cls: type[Nu], dct: Mapping[str, Any], patterns: list[Pattern]) -> Nu: +1318 (pattern,) = patterns +1319 return Nu( +1320 var=SVar(name=dct['var'], sort=Sort.from_dict(dct['varSort'])), +1321 pattern=pattern, +1322 )
+ +1323 +1324 +
+[docs] +1325class MLPred(MLPattern, WithSort): +1326 op_sort: Sort
+ +1327 +1328 +
+[docs] +1329class RoundPred(MLPred): +1330 pattern: Pattern +1331 +1332 @property +1333 def sorts(self) -> tuple[Sort, Sort]: +1334 return (self.op_sort, self.sort) +1335 +1336 @property +1337 def patterns(self) -> tuple[Pattern]: +1338 return (self.pattern,) +1339 +1340 def _dict(self, dicts: list) -> dict[str, Any]: +1341 (arg,) = dicts +1342 return { +1343 'tag': self._tag(), +1344 'argSort': self.op_sort.dict, +1345 'sort': self.sort.dict, +1346 'arg': arg, +1347 }
+ +1348 +1349 +
+[docs] +1350@final +1351@dataclass(frozen=True) +1352class Ceil(RoundPred): +1353 op_sort: Sort +1354 sort: Sort +1355 pattern: Pattern +1356 +
+[docs] +1357 def let( +1358 self, +1359 *, +1360 op_sort: Sort | None = None, +1361 sort: Sort | None = None, +1362 pattern: Pattern | None = None, +1363 ) -> Ceil: +1364 op_sort = op_sort if op_sort is not None else self.op_sort +1365 sort = sort if sort is not None else self.sort +1366 pattern = pattern if pattern is not None else self.pattern +1367 return Ceil(op_sort=op_sort, sort=sort, pattern=pattern)
+ +1368 +
+[docs] +1369 def let_sort(self, sort: Sort) -> Ceil: +1370 return self.let(sort=sort)
+ +1371 +
+[docs] +1372 def let_patterns(self, patterns: Iterable[Pattern]) -> Ceil: +1373 (pattern,) = patterns +1374 return self.let(pattern=pattern)
+ +1375 +1376 @classmethod +1377 def _tag(cls) -> str: +1378 return 'Ceil' +1379 +
+[docs] +1380 @classmethod +1381 def symbol(cls) -> str: +1382 return '\\ceil'
+ +1383 +
+[docs] +1384 @classmethod +1385 def of(cls: type[Ceil], symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) -> Ceil: +1386 cls._check_symbol(symbol) +1387 op_sort, sort = sorts +1388 (pattern,) = patterns +1389 return Ceil(op_sort=op_sort, sort=sort, pattern=pattern)
+ +1390 +1391 @classmethod +1392 def _from_dict(cls: type[Ceil], dct: Mapping[str, Any], patterns: list[Pattern]) -> Ceil: +1393 (pattern,) = patterns +1394 return Ceil( +1395 op_sort=Sort.from_dict(dct['argSort']), +1396 sort=Sort.from_dict(dct['sort']), +1397 pattern=pattern, +1398 )
+ +1399 +1400 +
+[docs] +1401@final +1402@dataclass(frozen=True) +1403class Floor(RoundPred): +1404 op_sort: Sort +1405 sort: Sort +1406 pattern: Pattern +1407 +
+[docs] +1408 def let( +1409 self, +1410 *, +1411 op_sort: Sort | None = None, +1412 sort: Sort | None = None, +1413 pattern: Pattern | None = None, +1414 ) -> Floor: +1415 op_sort = op_sort if op_sort is not None else self.op_sort +1416 sort = sort if sort is not None else self.sort +1417 pattern = pattern if pattern is not None else self.pattern +1418 return Floor(op_sort=op_sort, sort=sort, pattern=pattern)
+ +1419 +
+[docs] +1420 def let_sort(self, sort: Sort) -> Floor: +1421 return self.let(sort=sort)
+ +1422 +
+[docs] +1423 def let_patterns(self, patterns: Iterable[Pattern]) -> Floor: +1424 (pattern,) = patterns +1425 return self.let(pattern=pattern)
+ +1426 +1427 @classmethod +1428 def _tag(cls) -> str: +1429 return 'Floor' +1430 +
+[docs] +1431 @classmethod +1432 def symbol(cls) -> str: +1433 return '\\floor'
+ +1434 +
+[docs] +1435 @classmethod +1436 def of(cls: type[Floor], symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) -> Floor: +1437 cls._check_symbol(symbol) +1438 op_sort, sort = sorts +1439 (pattern,) = patterns +1440 return Floor(op_sort=op_sort, sort=sort, pattern=pattern)
+ +1441 +1442 @classmethod +1443 def _from_dict(cls: type[Floor], dct: Mapping[str, Any], patterns: list[Pattern]) -> Floor: +1444 (pattern,) = patterns +1445 return Floor( +1446 op_sort=Sort.from_dict(dct['argSort']), +1447 sort=Sort.from_dict(dct['sort']), +1448 pattern=pattern, +1449 )
+ +1450 +1451 +
+[docs] +1452class BinaryPred(MLPred): +1453 left: Pattern +1454 right: Pattern +1455 +1456 @property +1457 def sorts(self) -> tuple[Sort, Sort]: +1458 return (self.op_sort, self.sort) +1459 +1460 @property +1461 def patterns(self) -> tuple[Pattern, Pattern]: +1462 return (self.left, self.right) +1463 +1464 def _dict(self, dicts: list) -> dict[str, Any]: +1465 first, second = dicts +1466 return { +1467 'tag': self._tag(), +1468 'argSort': self.op_sort.dict, +1469 'sort': self.sort.dict, +1470 'first': first, +1471 'second': second, +1472 }
+ +1473 +1474 +
+[docs] +1475@final +1476@dataclass(frozen=True) +1477class Equals(BinaryPred): +1478 op_sort: Sort +1479 sort: Sort +1480 left: Pattern +1481 right: Pattern +1482 +
+[docs] +1483 def let( +1484 self, +1485 *, +1486 op_sort: Sort | None = None, +1487 sort: Sort | None = None, +1488 left: Pattern | None = None, +1489 right: Pattern | None = None, +1490 ) -> Equals: +1491 op_sort = op_sort if op_sort is not None else self.op_sort +1492 sort = sort if sort is not None else self.sort +1493 left = left if left is not None else self.left +1494 right = right if right is not None else self.right +1495 return Equals(op_sort=op_sort, sort=sort, left=left, right=right)
+ +1496 +
+[docs] +1497 def let_sort(self, sort: Sort) -> Equals: +1498 return self.let(sort=sort)
+ +1499 +
+[docs] +1500 def let_patterns(self, patterns: Iterable[Pattern]) -> Equals: +1501 left, right = patterns +1502 return self.let(left=left, right=right)
+ +1503 +1504 @classmethod +1505 def _tag(cls) -> str: +1506 return 'Equals' +1507 +
+[docs] +1508 @classmethod +1509 def symbol(cls) -> str: +1510 return '\\equals'
+ +1511 +
+[docs] +1512 @classmethod +1513 def of(cls: type[Equals], symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) -> Equals: +1514 cls._check_symbol(symbol) +1515 op_sort, sort = sorts +1516 left, right = patterns +1517 return Equals(op_sort=op_sort, sort=sort, left=left, right=right)
+ +1518 +1519 @classmethod +1520 def _from_dict(cls: type[Equals], dct: Mapping[str, Any], patterns: list[Pattern]) -> Equals: +1521 left, right = patterns +1522 return Equals( +1523 op_sort=Sort.from_dict(dct['argSort']), +1524 sort=Sort.from_dict(dct['sort']), +1525 left=left, +1526 right=right, +1527 )
+ +1528 +1529 +
+[docs] +1530@final +1531@dataclass(frozen=True) +1532class In(BinaryPred): +1533 op_sort: Sort +1534 sort: Sort +1535 left: Pattern +1536 right: Pattern +1537 +
+[docs] +1538 def let( +1539 self, +1540 *, +1541 op_sort: Sort | None = None, +1542 sort: Sort | None = None, +1543 left: Pattern | None = None, +1544 right: Pattern | None = None, +1545 ) -> In: +1546 op_sort = op_sort if op_sort is not None else self.op_sort +1547 sort = sort if sort is not None else self.sort +1548 left = left if left is not None else self.left +1549 right = right if right is not None else self.right +1550 return In(op_sort=op_sort, sort=sort, left=left, right=right)
+ +1551 +
+[docs] +1552 def let_sort(self, sort: Sort) -> In: +1553 return self.let(sort=sort)
+ +1554 +
+[docs] +1555 def let_patterns(self, patterns: Iterable[Pattern]) -> In: +1556 left, right = patterns +1557 return self.let(left=left, right=right)
+ +1558 +1559 @classmethod +1560 def _tag(cls) -> str: +1561 return 'In' +1562 +
+[docs] +1563 @classmethod +1564 def symbol(cls) -> str: +1565 return '\\in'
+ +1566 +
+[docs] +1567 @classmethod +1568 def of(cls: type[In], symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) -> In: +1569 cls._check_symbol(symbol) +1570 op_sort, sort = sorts +1571 left, right = patterns +1572 return In(op_sort=op_sort, sort=sort, left=left, right=right)
+ +1573 +1574 @classmethod +1575 def _from_dict(cls: type[In], dct: Mapping[str, Any], patterns: list[Pattern]) -> In: +1576 left, right = patterns +1577 return In( +1578 op_sort=Sort.from_dict(dct['argSort']), +1579 sort=Sort.from_dict(dct['sort']), +1580 left=left, +1581 right=right, +1582 )
+ +1583 +1584 +
+[docs] +1585class MLRewrite(MLPattern, WithSort): +1586 @property +1587 def sorts(self) -> tuple[Sort]: +1588 return (self.sort,)
+ +1589 +1590 + + +1639 +1640 +
+[docs] +1641@final +1642@dataclass(frozen=True) +1643class Rewrites(MLRewrite): +1644 sort: Sort +1645 left: Pattern +1646 right: Pattern +1647 +
+[docs] +1648 def let( +1649 self, +1650 *, +1651 sort: Sort | None = None, +1652 left: Pattern | None = None, +1653 right: Pattern | None = None, +1654 ) -> Rewrites: +1655 sort = sort if sort is not None else self.sort +1656 left = left if left is not None else self.left +1657 right = right if right is not None else self.right +1658 return Rewrites(sort=sort, left=left, right=right)
+ +1659 +
+[docs] +1660 def let_sort(self, sort: Sort) -> Rewrites: +1661 return self.let(sort=sort)
+ +1662 +
+[docs] +1663 def let_patterns(self, patterns: Iterable[Pattern]) -> Rewrites: +1664 left, right = patterns +1665 return self.let(left=left, right=right)
+ +1666 +1667 @classmethod +1668 def _tag(cls) -> str: +1669 return 'Rewrites' +1670 +
+[docs] +1671 @classmethod +1672 def symbol(cls) -> str: +1673 return '\\rewrites'
+ +1674 +
+[docs] +1675 @classmethod +1676 def of(cls: type[Rewrites], symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) -> Rewrites: +1677 cls._check_symbol(symbol) +1678 (sort,) = sorts +1679 left, right = patterns +1680 return Rewrites(sort=sort, left=left, right=right)
+ +1681 +1682 @classmethod +1683 def _from_dict(cls: type[Rewrites], dct: Mapping[str, Any], patterns: list[Pattern]) -> Rewrites: +1684 left, right = patterns +1685 return Rewrites( +1686 sort=Sort.from_dict(dct['sort']), +1687 left=left, +1688 right=right, +1689 ) +1690 +1691 @property +1692 def patterns(self) -> tuple[Pattern, Pattern]: +1693 return (self.left, self.right) +1694 +1695 def _dict(self, dicts: list) -> dict[str, Any]: +1696 source, dest = dicts +1697 return { +1698 'tag': 'Rewrites', +1699 'sort': self.sort.dict, +1700 'source': source, +1701 'dest': dest, +1702 }
+ +1703 +1704 +
+[docs] +1705@final +1706@dataclass(frozen=True) +1707class DV(MLPattern, WithSort): +1708 sort: Sort +1709 value: String # TODO Should this be changed to str? +1710 +
+[docs] +1711 def let(self, *, sort: Sort | None = None, value: String | None = None) -> DV: +1712 sort = sort if sort is not None else self.sort +1713 value = value if value is not None else self.value +1714 return DV(sort=sort, value=value)
+ +1715 +
+[docs] +1716 def let_sort(self, sort: Sort) -> DV: +1717 return self.let(sort=sort)
+ +1718 +
+[docs] +1719 def let_patterns(self, patterns: Iterable[Pattern]) -> DV: +1720 () = patterns +1721 return self
+ +1722 +1723 @classmethod +1724 def _tag(cls) -> str: +1725 return 'DV' +1726 +
+[docs] +1727 @classmethod +1728 def symbol(cls) -> str: +1729 return '\\dv'
+ +1730 +
+[docs] +1731 @classmethod +1732 def of(cls: type[DV], symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) -> DV: +1733 cls._check_symbol(symbol) +1734 (sort,) = sorts +1735 (value,) = patterns +1736 value = check_type(value, String) +1737 return DV(sort=sort, value=value)
+ +1738 +1739 @classmethod +1740 def _from_dict(cls: type[DV], dct: Mapping[str, Any], patterns: list[Pattern]) -> DV: +1741 assert not patterns +1742 return DV( +1743 sort=Sort.from_dict(dct['sort']), +1744 value=String(dct['value']), +1745 ) +1746 +1747 @property +1748 def sorts(self) -> tuple[Sort]: +1749 return (self.sort,) +1750 +1751 @property +1752 def patterns(self) -> tuple[()]: +1753 return () +1754 +1755 @property +1756 def ctor_patterns(self) -> tuple[String]: +1757 return (self.value,) +1758 +1759 def _dict(self, dicts: list) -> dict[str, Any]: +1760 assert not dicts +1761 return {'tag': 'DV', 'sort': self.sort.dict, 'value': self.value.value}
+ +1762 +1763 +1764ML_SYMBOLS: Final = { +1765 r'\top': Top, +1766 r'\bottom': Bottom, +1767 r'\not': Not, +1768 r'\and': And, +1769 r'\or': Or, +1770 r'\implies': Implies, +1771 r'\iff': Iff, +1772 r'\exists': Exists, +1773 r'\forall': Forall, +1774 r'\mu': Mu, +1775 r'\nu': Nu, +1776 r'\ceil': Ceil, +1777 r'\floor': Floor, +1778 r'\equals': Equals, +1779 r'\in': In, +1780 r'\next': Next, +1781 r'\rewrites': Rewrites, +1782 r'\dv': DV, +1783} +1784 +1785 +
+[docs] +1786class WithAttrs(ABC): +1787 attrs: tuple[App, ...] +1788 +
+[docs] +1789 @abstractmethod +1790 def let_attrs(self: WA, attrs: Iterable[App]) -> WA: ...
+ +1791 +
+[docs] +1792 def map_attrs(self: WA, f: Callable[[tuple[App, ...]], Iterable[App]]) -> WA: +1793 return self.let_attrs(f(self.attrs))
+ +1794 +1795 @cached_property +1796 def attrs_by_key(self) -> FrozenDict[str, App]: +1797 return FrozenDict({attr.symbol: attr for attr in self.attrs})
+ +1798 +1799 +
+[docs] +1800class Sentence(Kore, WithAttrs): ...
+ +1801 +1802 +
+[docs] +1803@final +1804@dataclass(frozen=True) +1805class Import(Sentence): +1806 module_name: str +1807 attrs: tuple[App, ...] +1808 +1809 def __init__(self, module_name: str | Id, attrs: Iterable[App] = ()): +1810 if isinstance(module_name, str): +1811 module_name = Id(module_name) +1812 +1813 object.__setattr__(self, 'module_name', module_name.value) +1814 object.__setattr__(self, 'attrs', tuple(attrs)) +1815 +
+[docs] +1816 def let(self, *, module_name: str | Id | None = None, attrs: Iterable[App] | None = None) -> Import: +1817 module_name = module_name if module_name is not None else self.module_name +1818 attrs = attrs if attrs is not None else self.attrs +1819 return Import(module_name=module_name, attrs=attrs)
+ +1820 +
+[docs] +1821 def let_attrs(self: Import, attrs: Iterable[App]) -> Import: +1822 return self.let(attrs=attrs)
+ +1823 +
+[docs] +1824 def write(self, output: IO[str]) -> None: +1825 output.write('import ') +1826 output.write(self.module_name) +1827 output.write(' [') +1828 _write_sep_by_comma(self.attrs, output) +1829 output.write(']')
+
+ +1830 +1831 +
+[docs] +1832@final +1833@dataclass(frozen=True) +1834class SortDecl(Sentence): +1835 name: str +1836 vars: tuple[SortVar, ...] +1837 attrs: tuple[App, ...] +1838 hooked: bool +1839 +1840 def __init__( +1841 self, +1842 name: str | Id, +1843 vars: Iterable[SortVar], +1844 attrs: Iterable[App] = (), +1845 *, +1846 hooked: bool = False, +1847 ): +1848 if isinstance(name, str): +1849 name = Id(name) +1850 +1851 object.__setattr__(self, 'name', name.value) +1852 object.__setattr__(self, 'vars', tuple(vars)) +1853 object.__setattr__(self, 'attrs', tuple(attrs)) +1854 object.__setattr__(self, 'hooked', hooked) +1855 +
+[docs] +1856 def let( +1857 self, +1858 *, +1859 name: str | Id | None = None, +1860 vars: Iterable[SortVar] | None = None, +1861 attrs: Iterable[App] | None = None, +1862 hooked: bool | None = None, +1863 ) -> SortDecl: +1864 name = name if name is not None else self.name +1865 vars = vars if vars is not None else self.vars +1866 attrs = attrs if attrs is not None else self.attrs +1867 hooked = hooked if hooked is not None else self.hooked +1868 return SortDecl(name=name, vars=vars, attrs=attrs, hooked=hooked)
+ +1869 +
+[docs] +1870 def let_attrs(self: SortDecl, attrs: Iterable[App]) -> SortDecl: +1871 return self.let(attrs=attrs)
+ +1872 +
+[docs] +1873 def write(self, output: IO[str]) -> None: +1874 keyword = 'hooked-sort ' if self.hooked else 'sort ' +1875 output.write(keyword) +1876 output.write(self.name) +1877 output.write('{') +1878 _write_sep_by_comma(self.vars, output) +1879 output.write('} [') +1880 _write_sep_by_comma(self.attrs, output) +1881 output.write(']')
+
+ +1882 +1883 +
+[docs] +1884@final +1885@dataclass(frozen=True) +1886class Symbol(Kore): +1887 name: str +1888 vars: tuple[SortVar, ...] +1889 +1890 def __init__(self, name: str | SymbolId, vars: Iterable[SortVar] = ()): +1891 if isinstance(name, str): +1892 name = SymbolId(name) +1893 +1894 object.__setattr__(self, 'name', name.value) +1895 object.__setattr__(self, 'vars', tuple(vars)) +1896 +
+[docs] +1897 def let(self, *, name: str | SymbolId | None = None, vars: Iterable[SortVar] | None = None) -> Symbol: +1898 name = name if name is not None else self.name +1899 vars = vars if vars is not None else self.vars +1900 return Symbol(name=name, vars=vars)
+ +1901 +
+[docs] +1902 def write(self, output: IO[str]) -> None: +1903 output.write(self.name) +1904 output.write('{') +1905 _write_sep_by_comma(self.vars, output) +1906 output.write('}')
+
+ +1907 +1908 +
+[docs] +1909@final +1910@dataclass(frozen=True) +1911class SymbolDecl(Sentence): +1912 symbol: Symbol +1913 param_sorts: tuple[Sort, ...] +1914 sort: Sort +1915 attrs: tuple[App, ...] +1916 hooked: bool +1917 +1918 def __init__( +1919 self, +1920 symbol: Symbol, +1921 param_sorts: Iterable[Sort], +1922 sort: Sort, +1923 attrs: Iterable[App] = (), +1924 *, +1925 hooked: bool = False, +1926 ): +1927 object.__setattr__(self, 'symbol', symbol) +1928 object.__setattr__(self, 'param_sorts', tuple(param_sorts)) +1929 object.__setattr__(self, 'sort', sort) +1930 object.__setattr__(self, 'attrs', tuple(attrs)) +1931 object.__setattr__(self, 'hooked', hooked) +1932 +
+[docs] +1933 def let( +1934 self, +1935 *, +1936 symbol: Symbol | None = None, +1937 param_sorts: Iterable[Sort] | None = None, +1938 sort: Sort | None = None, +1939 attrs: Iterable[App] | None = None, +1940 hooked: bool | None = None, +1941 ) -> SymbolDecl: +1942 symbol = symbol if symbol is not None else self.symbol +1943 param_sorts = param_sorts if param_sorts is not None else self.param_sorts +1944 sort = sort if sort is not None else self.sort +1945 attrs = attrs if attrs is not None else self.attrs +1946 hooked = hooked if hooked is not None else self.hooked +1947 return SymbolDecl(symbol=symbol, param_sorts=param_sorts, sort=sort, attrs=attrs, hooked=hooked)
+ +1948 +
+[docs] +1949 def let_attrs(self: SymbolDecl, attrs: Iterable[App]) -> SymbolDecl: +1950 return self.let(attrs=attrs)
+ +1951 +
+[docs] +1952 def write(self, output: IO[str]) -> None: +1953 keyword = 'hooked-symbol ' if self.hooked else 'symbol ' +1954 output.write(keyword) +1955 self.symbol.write(output) +1956 output.write('(') +1957 _write_sep_by_comma(self.param_sorts, output) +1958 output.write(') : ') +1959 self.sort.write(output) +1960 output.write(' [') +1961 _write_sep_by_comma(self.attrs, output) +1962 output.write(']')
+
+ +1963 +1964 +1965def _ml_symbol_decls() -> tuple[SymbolDecl, ...]: +1966 S = SortVar('S') # noqa: N806 +1967 T = SortVar('T') # noqa: N806 +1968 return ( +1969 SymbolDecl(Symbol(r'\top', (S,)), (), S), +1970 SymbolDecl(Symbol(r'\bottom', (S,)), (), S), +1971 SymbolDecl(Symbol(r'\not', (S,)), (S,), S), +1972 SymbolDecl(Symbol(r'\and', (S,)), (S, S), S), +1973 SymbolDecl(Symbol(r'\or', (S,)), (S, S), S), +1974 SymbolDecl(Symbol(r'\implies', (S,)), (S, S), S), +1975 SymbolDecl(Symbol(r'\iff', (S,)), (S, S), S), +1976 SymbolDecl(Symbol(r'\ceil', (S, T)), (S,), T), +1977 SymbolDecl(Symbol(r'\floor', (S, T)), (S,), T), +1978 SymbolDecl(Symbol(r'\equals', (S, T)), (S, S), T), +1979 SymbolDecl(Symbol(r'\in', (S, T)), (S, S), T), +1980 ) +1981 +1982 +1983ML_SYMBOL_DECLS: Final = _ml_symbol_decls() +1984 +1985 +
+[docs] +1986@final +1987@dataclass(frozen=True) +1988class AliasDecl(Sentence): +1989 alias: Symbol +1990 param_sorts: tuple[Sort, ...] +1991 sort: Sort +1992 left: App +1993 right: Pattern +1994 attrs: tuple[App, ...] +1995 +1996 def __init__( +1997 self, +1998 alias: Symbol, +1999 param_sorts: Iterable[Sort], +2000 sort: Sort, +2001 left: App, +2002 right: Pattern, +2003 attrs: Iterable[App] = (), +2004 ): +2005 object.__setattr__(self, 'alias', alias) +2006 object.__setattr__(self, 'param_sorts', tuple(param_sorts)) +2007 object.__setattr__(self, 'sort', sort) +2008 object.__setattr__(self, 'left', left) +2009 object.__setattr__(self, 'right', right) +2010 object.__setattr__(self, 'attrs', tuple(attrs)) +2011 +
+[docs] +2012 def let( +2013 self, +2014 *, +2015 alias: Symbol | None = None, +2016 param_sorts: Iterable[Sort] | None = None, +2017 sort: Sort | None = None, +2018 left: App | None = None, +2019 right: Pattern | None = None, +2020 attrs: Iterable[App] | None = None, +2021 ) -> AliasDecl: +2022 alias = alias if alias is not None else self.alias +2023 param_sorts = param_sorts if param_sorts is not None else self.param_sorts +2024 sort = sort if sort is not None else self.sort +2025 left = left if left is not None else self.left +2026 right = right if right is not None else self.right +2027 attrs = attrs if attrs is not None else self.attrs +2028 return AliasDecl(alias=alias, param_sorts=param_sorts, sort=sort, left=left, right=right, attrs=attrs)
+ +2029 +
+[docs] +2030 def let_attrs(self: AliasDecl, attrs: Iterable[App]) -> AliasDecl: +2031 return self.let(attrs=attrs)
+ +2032 +
+[docs] +2033 def write(self, output: IO[str]) -> None: +2034 output.write('alias ') +2035 self.alias.write(output) +2036 output.write('(') +2037 _write_sep_by_comma(self.param_sorts, output) +2038 output.write(') : ') +2039 self.sort.write(output) +2040 output.write(' where ') +2041 self.left.write(output) +2042 output.write(' := ') +2043 self.right.write(output) +2044 output.write(' [') +2045 _write_sep_by_comma(self.attrs, output) +2046 output.write(']')
+
+ +2047 +2048 +
+[docs] +2049class AxiomLike(Sentence): +2050 _label: ClassVar[str] +2051 +2052 vars: tuple[SortVar, ...] +2053 pattern: Pattern +2054 +
+[docs] +2055 def write(self, output: IO[str]) -> None: +2056 output.write(self._label) +2057 output.write('{') +2058 _write_sep_by_comma(self.vars, output) +2059 output.write('} ') +2060 self.pattern.write(output) +2061 output.write(' [') +2062 _write_sep_by_comma(self.attrs, output) +2063 output.write(']')
+
+ +2064 +2065 +
+[docs] +2066@final +2067@dataclass(frozen=True) +2068class Axiom(AxiomLike): +2069 _label = 'axiom' +2070 +2071 vars: tuple[SortVar, ...] +2072 pattern: Pattern +2073 attrs: tuple[App, ...] +2074 +2075 def __init__(self, vars: Iterable[SortVar], pattern: Pattern, attrs: Iterable[App] = ()): +2076 object.__setattr__(self, 'vars', tuple(vars)) +2077 object.__setattr__(self, 'pattern', pattern) +2078 object.__setattr__(self, 'attrs', tuple(attrs)) +2079 +
+[docs] +2080 def let( +2081 self, +2082 *, +2083 vars: Iterable[SortVar] | None = None, +2084 pattern: Pattern | None = None, +2085 attrs: Iterable[App] | None = None, +2086 ) -> Axiom: +2087 vars = vars if vars is not None else self.vars +2088 pattern = pattern if pattern is not None else self.pattern +2089 attrs = attrs if attrs is not None else self.attrs +2090 return Axiom(vars=vars, pattern=pattern, attrs=attrs)
+ +2091 +
+[docs] +2092 def let_attrs(self: Axiom, attrs: Iterable[App]) -> Axiom: +2093 return self.let(attrs=attrs)
+
+ +2094 +2095 +
+[docs] +2096@final +2097@dataclass(frozen=True) +2098class Claim(AxiomLike): +2099 _label = 'claim' +2100 +2101 vars: tuple[SortVar, ...] +2102 pattern: Pattern +2103 attrs: tuple[App, ...] +2104 +2105 def __init__(self, vars: Iterable[SortVar], pattern: Pattern, attrs: Iterable[App] = ()): +2106 object.__setattr__(self, 'vars', tuple(vars)) +2107 object.__setattr__(self, 'pattern', pattern) +2108 object.__setattr__(self, 'attrs', tuple(attrs)) +2109 +
+[docs] +2110 def let( +2111 self, +2112 *, +2113 vars: Iterable[SortVar] | None = None, +2114 pattern: Pattern | None = None, +2115 attrs: Iterable[App] | None = None, +2116 ) -> Claim: +2117 vars = vars if vars is not None else self.vars +2118 pattern = pattern if pattern is not None else self.pattern +2119 attrs = attrs if attrs is not None else self.attrs +2120 return Claim(vars=vars, pattern=pattern, attrs=attrs)
+ +2121 +
+[docs] +2122 def let_attrs(self: Claim, attrs: Iterable[App]) -> Claim: +2123 return self.let(attrs=attrs)
+
+ +2124 +2125 +
+[docs] +2126@final +2127@dataclass(frozen=True) +2128class Module(Kore, WithAttrs, Iterable[Sentence]): +2129 name: str +2130 sentences: tuple[Sentence, ...] +2131 attrs: tuple[App, ...] +2132 +2133 def __init__(self, name: str | Id, sentences: Iterable[Sentence] = (), attrs: Iterable[App] = ()): +2134 if isinstance(name, str): +2135 name = Id(name) +2136 +2137 object.__setattr__(self, 'name', name.value) +2138 object.__setattr__(self, 'sentences', tuple(sentences)) +2139 object.__setattr__(self, 'attrs', tuple(attrs)) +2140 +2141 def __iter__(self) -> Iterator[Sentence]: +2142 return iter(self.sentences) +2143 +
+[docs] +2144 def let( +2145 self, +2146 *, +2147 name: str | Id | None = None, +2148 sentences: Iterable[Sentence] | None = None, +2149 attrs: Iterable[App] | None = None, +2150 ) -> Module: +2151 name = name if name is not None else self.name +2152 sentences = sentences if sentences is not None else self.sentences +2153 attrs = attrs if attrs is not None else self.attrs +2154 return Module(name=name, sentences=sentences, attrs=attrs)
+ +2155 +
+[docs] +2156 def let_attrs(self: Module, attrs: Iterable[App]) -> Module: +2157 return self.let(attrs=attrs)
+ +2158 +
+[docs] +2159 def write(self, output: IO[str]) -> None: +2160 output.write('module ') +2161 output.write(self.name) +2162 for sentence in self.sentences: +2163 output.write('\n ') +2164 sentence.write(output) +2165 output.write('\nendmodule [') +2166 _write_sep_by_comma(self.attrs, output) +2167 output.write(']')
+ +2168 +2169 @cached_property +2170 def symbol_decls(self) -> tuple[SymbolDecl, ...]: +2171 return tuple(sentence for sentence in self if type(sentence) is SymbolDecl) +2172 +2173 @cached_property +2174 def axioms(self) -> tuple[Axiom, ...]: +2175 return tuple(sentence for sentence in self if type(sentence) is Axiom)
+ +2176 +2177 +
+[docs] +2178@final +2179@dataclass(frozen=True) +2180class Definition(Kore, WithAttrs, Iterable[Module]): +2181 modules: tuple[Module, ...] +2182 attrs: tuple[App, ...] +2183 +2184 def __init__(self, modules: Iterable[Module] = (), attrs: Iterable[App] = ()): +2185 object.__setattr__(self, 'modules', tuple(modules)) +2186 object.__setattr__(self, 'attrs', tuple(attrs)) +2187 +2188 def __iter__(self) -> Iterator[Module]: +2189 return iter(self.modules) +2190 +
+[docs] +2191 def let(self, *, modules: Iterable[Module] | None = None, attrs: Iterable[App] | None = None) -> Definition: +2192 modules = modules if modules is not None else self.modules +2193 attrs = attrs if attrs is not None else self.attrs +2194 return Definition(modules=modules, attrs=attrs)
+ +2195 +
+[docs] +2196 def let_attrs(self: Definition, attrs: Iterable[App]) -> Definition: +2197 return self.let(attrs=attrs)
+ +2198 +
+[docs] +2199 def write(self, output: IO[str]) -> None: +2200 output.write('[') +2201 _write_sep_by_comma(self.attrs, output) +2202 output.write(']') +2203 for module in self.modules: +2204 output.write('\n\n') +2205 module.write(output)
+ +2206 +2207 @cached_property +2208 def axioms(self) -> tuple[Axiom, ...]: +2209 return tuple(sent for module in self.modules for sent in module if isinstance(sent, Axiom)) +2210 +
+[docs] +2211 def get_axiom_by_ordinal(self, ordinal: int) -> Axiom: +2212 return self.axioms[ordinal]
+ +2213 +
+[docs] +2214 def compute_ordinals(self) -> Definition: +2215 new_modules = [] +2216 rule_ordinal = 0 +2217 for module in self.modules: +2218 new_sentences: list[Sentence] = [] +2219 for sentence in module.sentences: +2220 if type(sentence) is Axiom: +2221 ordinal_attr = App('ordinal', (), [String(str(rule_ordinal))]) +2222 new_sentence = sentence.let_attrs(sentence.attrs + (ordinal_attr,)) +2223 new_sentences.append(new_sentence) +2224 rule_ordinal += 1 +2225 else: +2226 new_sentences.append(sentence) +2227 new_modules.append(module.let(sentences=new_sentences)) +2228 +2229 new_definition = self.let(modules=new_modules) +2230 return new_definition
+
+ +2231 +2232 +
+[docs] +2233def kore_term(dct: Mapping[str, Any]) -> Pattern: +2234 if dct['format'] != 'KORE': +2235 raise ValueError(f"Invalid format: {dct['format']}") +2236 +2237 if dct['version'] != 1: +2238 raise ValueError(f"Invalid version: {dct['version']}") +2239 +2240 return Pattern.from_dict(dct['term'])
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kore/tools.html b/pyk/_modules/pyk/kore/tools.html new file mode 100644 index 00000000000..98a08544882 --- /dev/null +++ b/pyk/_modules/pyk/kore/tools.html @@ -0,0 +1,190 @@ + + + + + + + + pyk.kore.tools — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.kore.tools

+ 1from __future__ import annotations
+ 2
+ 3from enum import Enum
+ 4from pathlib import Path
+ 5from typing import TYPE_CHECKING
+ 6
+ 7from ..cli.utils import check_dir_path, check_file_path
+ 8from ..utils import run_process_2
+ 9
+10if TYPE_CHECKING:
+11    from .syntax import Pattern
+12
+13
+
+[docs] +14class PrintOutput(Enum): +15 PRETTY = 'pretty' +16 PROGRAM = 'program' +17 KAST = 'kast' +18 BINARY = 'binary' +19 JSON = 'json' +20 LATEX = 'latex' +21 KORE = 'kore' +22 NONE = 'none'
+ +23 +24 +
+[docs] +25def kore_print( +26 pattern: str | Pattern, +27 *, +28 definition_dir: str | Path | None = None, +29 output_file: str | Path | None = None, +30 output: str | PrintOutput | None = None, +31 color: bool | None = None, +32) -> str: +33 input = pattern if isinstance(pattern, str) else pattern.text +34 if output is not None: +35 output = PrintOutput(output) +36 if output is PrintOutput.KORE: +37 return input +38 return _kore_print( +39 '/dev/stdin', +40 definition_dir=definition_dir, +41 output_file=output_file, +42 output=output, +43 color=color, +44 input=input, +45 )
+ +46 +47 +48def _kore_print( +49 input_file: str | Path, +50 *, +51 definition_dir: str | Path | None = None, +52 output_file: str | Path | None = None, +53 output: str | PrintOutput | None = None, +54 color: bool | None = None, +55 # --- +56 input: str | None, +57) -> str: +58 args = ['kore-print'] +59 +60 input_file = Path(input_file) +61 if not input_file.is_char_device(): +62 check_file_path(input_file) +63 args += [str(input_file)] +64 +65 if definition_dir is not None: +66 definition_dir = Path(definition_dir) +67 check_dir_path(definition_dir) +68 args += ['--definition', str(definition_dir)] +69 +70 if output_file is not None: +71 output_file = Path(output_file) +72 check_file_path(output_file) +73 args += ['--output_file', str(output_file)] +74 +75 if output is not None: +76 output = PrintOutput(output) +77 args += ['--output', output.value] +78 +79 if color is not None: +80 args += ['--color', 'on' if color else 'off'] +81 +82 run_res = run_process_2(args, input=input) +83 return run_res.stdout.strip() +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/kore_exec_covr/kore_exec_covr.html b/pyk/_modules/pyk/kore_exec_covr/kore_exec_covr.html new file mode 100644 index 00000000000..3e94a419920 --- /dev/null +++ b/pyk/_modules/pyk/kore_exec_covr/kore_exec_covr.html @@ -0,0 +1,217 @@ + + + + + + + + pyk.kore_exec_covr.kore_exec_covr — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for pyk.kore_exec_covr.kore_exec_covr

+  1from __future__ import annotations
+  2
+  3import logging
+  4import re
+  5from collections import defaultdict
+  6from enum import Enum
+  7from typing import TYPE_CHECKING
+  8
+  9from ..kast import Atts
+ 10
+ 11if TYPE_CHECKING:
+ 12    from pathlib import Path
+ 13    from typing import Final
+ 14
+ 15    from ..kast.outer import KDefinition, KRule
+ 16
+ 17
+ 18_LOG_FORMAT: Final = '%(levelname)s %(name)s - %(message)s'
+ 19_LOGGER: Final = logging.getLogger(__name__)
+ 20
+ 21_HASKELL_LOG_ENTRY_REGEXP: Final = re.compile(r'(kore-exec|kore-rpc): \[\d*\] Debug \(([a-zA-Z]*)\):(.*)')
+ 22
+ 23
+
+[docs] + 24class HaskellLogEntry(Enum): + 25 DEBUG_APPLY_EQUATION = 'DebugApplyEquation' + 26 DEBUG_APPLIED_REWRITE_RULES = 'DebugAppliedRewriteRules'
+ + 27 + 28 +
+[docs] + 29def parse_rule_applications(haskell_backend_oneline_log_file: Path) -> dict[HaskellLogEntry, dict[str, int]]: + 30 """Process a one-line log file produced by K's Haskell backend. + 31 + 32 Extracts information about: + 33 + 34 - Applied rewrites (DebugAppliedRewriteRules). + 35 - Applied simplifications (DEBUG_APPLY_EQUATION). + 36 + 37 Note: + 38 Haskell backend logs often contain rule applications with empty locations. + 39 It seems likely that those are generated projection rules. + 40 We report their applications in bulk with UNKNOWN location. + 41 """ + 42 rewrites: dict[str, int] = defaultdict(int) + 43 simplifications: dict[str, int] = defaultdict(int) + 44 + 45 log_entries = haskell_backend_oneline_log_file.read_text().splitlines() + 46 for log_entry in log_entries: + 47 parsed = _parse_haskell_oneline_log(log_entry) + 48 if parsed: + 49 entry_type, location_str = parsed + 50 else: + 51 _LOGGER.warning(f'Skipping log entry, failed to parse: {log_entry}') + 52 continue + 53 if location_str == '': + 54 location_str = 'UNKNOWN' + 55 if entry_type == HaskellLogEntry.DEBUG_APPLIED_REWRITE_RULES: + 56 rewrites[location_str] += 1 + 57 else: + 58 assert entry_type == HaskellLogEntry.DEBUG_APPLY_EQUATION + 59 simplifications[location_str] += 1 + 60 return { + 61 HaskellLogEntry.DEBUG_APPLIED_REWRITE_RULES: rewrites, + 62 HaskellLogEntry.DEBUG_APPLY_EQUATION: simplifications, + 63 }
+ + 64 + 65 + 66def _parse_haskell_oneline_log(log_entry: str) -> tuple[HaskellLogEntry, str] | None: + 67 """Attempt to parse a one-line log string emmitted by K's Haskell backend.""" + 68 matches = _HASKELL_LOG_ENTRY_REGEXP.match(log_entry) + 69 try: + 70 assert matches + 71 entry = matches.groups()[1] + 72 location_str = matches.groups()[2].strip() + 73 return HaskellLogEntry(entry), location_str + 74 except (AssertionError, KeyError, ValueError): + 75 return None + 76 + 77 +
+[docs] + 78def build_rule_dict( + 79 definition: KDefinition, *, skip_projections: bool = True, skip_initializers: bool = True + 80) -> dict[str, KRule]: + 81 """Traverse the kompiled definition and build a dictionary mapping str(file:location) to KRule.""" + 82 rule_dict: dict[str, KRule] = {} + 83 + 84 for rule in definition.rules: + 85 if skip_projections and Atts.PROJECTION in rule.att: + 86 continue + 87 if skip_initializers and Atts.INITIALIZER in rule.att: + 88 continue + 89 try: + 90 rule_source = rule.att[Atts.SOURCE] + 91 rule_location = rule.att[Atts.LOCATION] + 92 except KeyError: + 93 _LOGGER.warning(f'Skipping rule with no location information {str(rule.body):.100}...<truncated>') + 94 rule_source = None + 95 rule_location = None + 96 continue + 97 if rule_source and rule_location: + 98 rule_dict[f'{rule_source}:{_location_tuple_to_str(rule_location)}'] = rule + 99 else: +100 raise ValueError(str(rule.body)) +101 +102 return rule_dict
+ +103 +104 +105def _location_tuple_to_str(location: tuple[int, int, int, int]) -> str: +106 start_line, start_col, end_line, end_col = location +107 return f'{start_line}:{start_col}-{end_line}:{end_col}' +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/krepl/repl.html b/pyk/_modules/pyk/krepl/repl.html new file mode 100644 index 00000000000..c97aa34f904 --- /dev/null +++ b/pyk/_modules/pyk/krepl/repl.html @@ -0,0 +1,326 @@ + + + + + + + + pyk.krepl.repl — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.krepl.repl

+  1from __future__ import annotations
+  2
+  3from abc import ABC, abstractmethod
+  4from argparse import ArgumentParser
+  5from dataclasses import dataclass
+  6from functools import cached_property
+  7from subprocess import CalledProcessError
+  8from typing import TYPE_CHECKING, Generic, TypeVar, final
+  9
+ 10from cmd2 import Cmd, with_argparser, with_category
+ 11
+ 12from ..cli.utils import check_dir_path, check_file_path, file_path
+ 13from ..kore.parser import KoreParser
+ 14from ..ktool.kprint import KPrint
+ 15from ..ktool.krun import KRun, KRunOutput, _krun
+ 16
+ 17if TYPE_CHECKING:
+ 18    from argparse import Namespace
+ 19    from collections.abc import Iterator
+ 20    from pathlib import Path
+ 21    from typing import Any, Final
+ 22
+ 23    from ..kore.syntax import Pattern
+ 24
+ 25T = TypeVar('T')
+ 26
+ 27
+
+[docs] + 28class Interpreter(Generic[T], ABC): + 29 def __iter__(self) -> Iterator[T]: + 30 state = self.init_state() + 31 while True: + 32 yield state + 33 state = self.next_state(state) + 34 +
+[docs] + 35 @abstractmethod + 36 def init_state(self) -> T: ...
+ + 37 +
+[docs] + 38 @abstractmethod + 39 def next_state(self, state: T, steps: int | None = None) -> T: ...
+
+ + 40 + 41 +
+[docs] + 42@final + 43@dataclass(frozen=True) + 44class KState: + 45 definition_dir: Path + 46 pattern: Pattern + 47 + 48 def __init__(self, definition_dir: Path, pattern: Pattern): + 49 definition_dir = definition_dir.resolve() + 50 check_dir_path(definition_dir) + 51 object.__setattr__(self, 'pattern', pattern) + 52 object.__setattr__(self, 'definition_dir', definition_dir) + 53 + 54 @cached_property + 55 def pretty(self) -> str: + 56 return KPrint(self.definition_dir).kore_to_pretty(self.pattern) + 57 + 58 def __str__(self) -> str: + 59 return self.pretty
+ + 60 + 61 +
+[docs] + 62class KInterpreter(Interpreter[KState]): + 63 definition_dir: Path + 64 program_file: Path + 65 + 66 def __init__(self, definition_dir: Path, program_file: Path) -> None: + 67 check_dir_path(definition_dir) + 68 check_file_path(program_file) + 69 self.definition_dir = definition_dir + 70 self.program_file = program_file + 71 +
+[docs] + 72 def init_state(self) -> KState: + 73 try: + 74 proc_res = _krun( + 75 input_file=self.program_file, + 76 definition_dir=self.definition_dir, + 77 output=KRunOutput.KORE, + 78 depth=0, + 79 ) + 80 except CalledProcessError as err: + 81 raise ReplError('Failed to load program') from err + 82 + 83 pattern = KoreParser(proc_res.stdout).pattern() + 84 return KState(self.definition_dir, pattern)
+ + 85 +
+[docs] + 86 def next_state(self, state: KState, steps: int | None = None) -> KState: + 87 pattern = KRun(self.definition_dir).run_pattern(state.pattern, depth=steps) + 88 return KState(self.definition_dir, pattern)
+
+ + 89 + 90 + 91def _step_parser() -> ArgumentParser: + 92 parser = ArgumentParser(description='Execute steps in the program') + 93 parser.add_argument('steps', type=int, nargs='?', default=1, metavar='STEPS', help='number of steps to take') + 94 return parser + 95 + 96 + 97def _show_parser() -> ArgumentParser: + 98 return ArgumentParser(description='Show the current configuration') + 99 +100 +
+[docs] +101class ReplError(Exception): ...
+ +102 +103 +
+[docs] +104class BaseRepl(Cmd, Generic[T], ABC): +105 CAT_DEBUG: Final = 'Debugger Commands' +106 CAT_BUILTIN: Final = 'Built-in Commands' +107 +108 prompt = '> ' +109 +110 interpreter: Interpreter[T] | None +111 state: T | None +112 +113 def __init__(self) -> None: +114 super().__init__(allow_cli_args=False) +115 self.default_category = self.CAT_BUILTIN +116 +117 self.interpreter = None +118 self.state = None +119 +
+[docs] +120 @abstractmethod +121 def do_load(self, args: Any) -> bool | None: # Leaky abstraction - make extension mechanism more robust +122 """Set up the interpreter. +123 +124 Subclasses are expected to +125 +126 - Decorate the method with `with_argparser` to ensure the right set of arguments is parsed. +127 - Instantiate an `Interpreter[T]` based on `args`, then set `self.interpreter`. +128 - Set `self.state` to `self.interpreter.init_state()`. +129 """ +130 ...
+ +131 +
+[docs] +132 @with_argparser(_step_parser()) +133 @with_category(CAT_DEBUG) +134 def do_step(self, args: Namespace) -> None: +135 try: +136 interpreter, state = self._check_state() +137 self._check_steps(args.steps) +138 self.state = interpreter.next_state(state, args.steps) +139 except ReplError as err: +140 self.poutput(err)
+ +141 +
+[docs] +142 @with_argparser(_show_parser()) +143 @with_category(CAT_DEBUG) +144 def do_show(self, args: Namespace) -> None: +145 try: +146 _, state = self._check_state() +147 self.poutput(state) +148 except ReplError as err: +149 self.poutput(err)
+ +150 +151 def _check_state(self) -> tuple[Interpreter, T]: +152 if self.interpreter is None: +153 raise ReplError('No program is loaded') +154 assert self.state is not None +155 return self.interpreter, self.state +156 +157 def _check_steps(self, steps: int | None = None) -> None: +158 if steps and steps < 0: +159 raise ReplError('Depth should be non-negative')
+ +160 +161 +162def _load_parser() -> ArgumentParser: +163 parser = ArgumentParser(description='Load a program') +164 parser.add_argument('program', type=file_path, metavar='PROGRAM', help='program to load') +165 return parser +166 +167 +
+[docs] +168class KRepl(BaseRepl[KState]): +169 intro = 'K-REPL Shell\nType "help" or "?" for more information.' +170 +171 def __init__(self, definition_dir: Path): +172 check_dir_path(definition_dir) +173 super().__init__() +174 self.definition_dir = definition_dir +175 +
+[docs] +176 @with_argparser(_load_parser()) +177 @with_category(BaseRepl.CAT_DEBUG) +178 def do_load(self, args: Namespace) -> None: +179 try: +180 self.interpreter = KInterpreter(self.definition_dir, args.program) +181 self.state = self.interpreter.init_state() +182 except ReplError as err: +183 self.poutput(err)
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/ktool/claim_index.html b/pyk/_modules/pyk/ktool/claim_index.html new file mode 100644 index 00000000000..5c602f1a670 --- /dev/null +++ b/pyk/_modules/pyk/ktool/claim_index.html @@ -0,0 +1,304 @@ + + + + + + + + pyk.ktool.claim_index — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.ktool.claim_index

+  1from __future__ import annotations
+  2
+  3from collections.abc import Mapping
+  4from dataclasses import dataclass
+  5from functools import partial
+  6from graphlib import TopologicalSorter
+  7from typing import TYPE_CHECKING
+  8
+  9from ..kast import Atts
+ 10from ..kast.outer import KClaim
+ 11from ..utils import FrozenDict, unique
+ 12
+ 13if TYPE_CHECKING:
+ 14    from collections.abc import Container, Iterable, Iterator
+ 15
+ 16    from ..kast.outer import KFlatModule, KFlatModuleList
+ 17
+ 18
+
+[docs] + 19@dataclass(frozen=True) + 20class ClaimIndex(Mapping[str, KClaim]): + 21 claims: FrozenDict[str, KClaim] + 22 main_module_name: str | None + 23 + 24 def __init__( + 25 self, + 26 claims: Mapping[str, KClaim], + 27 main_module_name: str | None = None, + 28 ): + 29 self._validate(claims) + 30 object.__setattr__(self, 'claims', FrozenDict(claims)) + 31 object.__setattr__(self, 'main_module_name', main_module_name) + 32 +
+[docs] + 33 @staticmethod + 34 def from_module_list(module_list: KFlatModuleList) -> ClaimIndex: + 35 module_list = ClaimIndex._resolve_depends(module_list) + 36 return ClaimIndex( + 37 claims={claim.label: claim for module in module_list.modules for claim in module.claims}, + 38 main_module_name=module_list.main_module, + 39 )
+ + 40 + 41 @staticmethod + 42 def _validate(claims: Mapping[str, KClaim]) -> None: + 43 for label, claim in claims.items(): + 44 if claim.label != label: + 45 raise ValueError(f'Claim label mismatch, expected: {label}, found: {claim.label}') + 46 + 47 for depend in claim.dependencies: + 48 if depend not in claims: + 49 raise ValueError(f'Invalid dependency label: {depend}') + 50 + 51 @staticmethod + 52 def _resolve_depends(module_list: KFlatModuleList) -> KFlatModuleList: + 53 """Resolve each depends value relative to the module the claim belongs to. + 54 + 55 Example: + 56 ``` + 57 module THIS-MODULE + 58 claim ... [depends(foo,OTHER-MODULE.bar)] + 59 endmodule + 60 ``` + 61 + 62 becomes + 63 + 64 ``` + 65 module THIS-MODULE + 66 claim ... [depends(THIS-MODULE.foo,OTHER-MODULE.bar)] + 67 endmodule + 68 ``` + 69 """ + 70 labels = {claim.label for module in module_list.modules for claim in module.claims} + 71 + 72 def resolve_claim_depends(module_name: str, claim: KClaim) -> KClaim: + 73 depends = claim.dependencies + 74 if not depends: + 75 return claim + 76 + 77 resolve = partial(ClaimIndex._resolve_claim_label, labels, module_name) + 78 resolved = [resolve(label) for label in depends] + 79 return claim.let(att=claim.att.update([Atts.DEPENDS(','.join(resolved))])) + 80 + 81 modules: list[KFlatModule] = [] + 82 for module in module_list.modules: + 83 resolve_depends = partial(resolve_claim_depends, module.name) + 84 module = module.map_sentences(resolve_depends, of_type=KClaim) + 85 modules.append(module) + 86 + 87 return module_list.let(modules=modules) + 88 + 89 @staticmethod + 90 def _resolve_claim_label(labels: Container[str], module_name: str | None, label: str) -> str: + 91 """Resolve `label` to a valid label in `labels`, or raise. + 92 + 93 If a `label` is not found and `module_name` is set, the label is tried after qualifying. + 94 """ + 95 if label in labels: + 96 return label + 97 + 98 if module_name is not None: + 99 qualified = f'{module_name}.{label}' +100 if qualified in labels: +101 return qualified +102 +103 raise ValueError(f'Claim label not found: {label}') +104 +105 def __iter__(self) -> Iterator[str]: +106 return iter(self.claims) +107 +108 def __len__(self) -> int: +109 return len(self.claims) +110 +111 def __getitem__(self, label: str) -> KClaim: +112 try: +113 label = self.resolve(label) +114 except ValueError: +115 raise KeyError(f'Claim not found: {label}') from None +116 return self.claims[label] +117 +
+[docs] +118 def resolve(self, label: str) -> str: +119 return self._resolve_claim_label(self.claims, self.main_module_name, label)
+ +120 +
+[docs] +121 def resolve_all(self, labels: Iterable[str]) -> list[str]: +122 return [self.resolve(label) for label in unique(labels)]
+ +123 +
+[docs] +124 def labels( +125 self, +126 *, +127 include: Iterable[str] | None = None, +128 exclude: Iterable[str] | None = None, +129 with_depends: bool = True, +130 ordered: bool = False, +131 ) -> list[str]: +132 """Return a list of labels from the index. +133 +134 Args: +135 include: Labels to include in the result. If `None`, all labels are included. +136 exclude: Labels to exclude from the result. If `None`, no labels are excluded. +137 Takes precedence over `include`. +138 with_depends: If `True`, the result is transitively closed w.r.t. the dependency relation. +139 Labels in `exclude` are pruned, and their dependencies are not considered on the given path. +140 ordered: If `True`, the result is topologically sorted w.r.t. the dependency relation. +141 +142 Returns: +143 A list of labels from the index. +144 +145 Raises: +146 ValueError: If an item in `include` or `exclude` cannot be resolved to a valid label. +147 """ +148 include = self.resolve_all(include) if include is not None else self.claims +149 exclude = self.resolve_all(exclude) if exclude is not None else [] +150 +151 labels: list[str] +152 +153 if with_depends: +154 labels = self._close_dependencies(labels=include, prune=exclude) +155 else: +156 labels = [label for label in include if label not in set(exclude)] +157 +158 if ordered: +159 return self._sort_topologically(labels) +160 +161 return labels
+ +162 +163 def _close_dependencies(self, labels: Iterable[str], prune: Iterable[str]) -> list[str]: +164 res: list[str] = [] +165 +166 pending = list(labels) +167 done = set(prune) +168 +169 while pending: +170 label = pending.pop(0) # BFS +171 +172 if label in done: +173 continue +174 +175 res.append(label) +176 pending += self.claims[label].dependencies +177 done.add(label) +178 +179 return res +180 +181 def _sort_topologically(self, labels: list[str]) -> list[str]: +182 label_set = set(labels) +183 graph = { +184 label: [dep for dep in claim.dependencies if dep in label_set] +185 for label, claim in self.claims.items() +186 if label in labels +187 } +188 return list(TopologicalSorter(graph).static_order())
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/ktool/claim_loader.html b/pyk/_modules/pyk/ktool/claim_loader.html new file mode 100644 index 00000000000..1da876d1178 --- /dev/null +++ b/pyk/_modules/pyk/ktool/claim_loader.html @@ -0,0 +1,228 @@ + + + + + + + + pyk.ktool.claim_loader — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.ktool.claim_loader

+  1from __future__ import annotations
+  2
+  3import json
+  4import logging
+  5from typing import TYPE_CHECKING, NamedTuple
+  6
+  7from ..kast.outer import KFlatModuleList
+  8from ..kast.utils import slurp_definitions
+  9from ..utils import hash_str
+ 10from .claim_index import ClaimIndex
+ 11
+ 12if TYPE_CHECKING:
+ 13    from collections.abc import Iterable, Mapping
+ 14    from pathlib import Path
+ 15    from typing import Any, Final
+ 16
+ 17    from ..kast.outer import KClaim
+ 18    from . import TypeInferenceMode
+ 19    from .kprove import KProve
+ 20
+ 21
+ 22_LOGGER: Final = logging.getLogger(__name__)
+ 23
+ 24
+
+[docs] + 25class ClaimLoader: + 26 """Load and cache spec files as JSON.""" + 27 + 28 _kprove: KProve + 29 + 30 def __init__(self, kprove: KProve): + 31 self._kprove = kprove + 32 +
+[docs] + 33 def load_claims( + 34 self, + 35 spec_file: Path, + 36 *, + 37 spec_module_name: str | None = None, + 38 include_dirs: Iterable[Path] = (), + 39 md_selector: str | None = None, + 40 claim_labels: Iterable[str] | None = None, + 41 exclude_claim_labels: Iterable[str] | None = None, + 42 include_dependencies: bool = True, + 43 type_inference_mode: TypeInferenceMode | None = None, + 44 ) -> list[KClaim]: + 45 """Attempt to load a spec from JSON, write file on cache miss. + 46 + 47 Args: + 48 spec_file: Spec file to load. + 49 spec_module_name (optional): Spec module to load. + 50 include_dirs (optional): Includes. + 51 md_selector (optional): Selector expression for Markdown tags. + 52 claim_labels (optional): Claim labels to include in the result. + 53 exclude_claim_labels (optional): Claim labels to exclude from the result. + 54 include_dependencies (optional): If ``True``, claim dependencies are transitively included. + 55 type_inference_mode (optional): Type inference mode. + 56 """ + 57 _LOGGER.info(f'Loading spec file: {spec_file}') + 58 + 59 digest = self._digest(spec_file, include_dirs=include_dirs, md_selector=md_selector) + 60 _LOGGER.info(f'Calculated digest: {digest}') + 61 + 62 claim_file = spec_file.with_suffix('.json') + 63 + 64 cache_hit = False + 65 if claim_file.exists(): + 66 _LOGGER.info(f'Loading claim file: {claim_file}') + 67 module_list, loaded_digest = _ClaimModuleList.from_dict(json.loads(claim_file.read_text())) + 68 cache_hit = digest == loaded_digest + 69 + 70 if not cache_hit: + 71 _LOGGER.info('Generating claim modules') + 72 module_list = self._kprove.parse_modules( + 73 file_path=spec_file, + 74 module_name=spec_module_name, + 75 include_dirs=include_dirs, + 76 md_selector=md_selector, + 77 type_inference_mode=type_inference_mode, + 78 ) + 79 claim_module_list = _ClaimModuleList(module_list=module_list, digest=digest) + 80 _LOGGER.info(f'Writing claim file: {claim_file}') + 81 claim_file.write_text(json.dumps(claim_module_list.to_dict())) + 82 + 83 claim_index = ClaimIndex.from_module_list(module_list) + 84 + 85 labels = claim_index.labels( + 86 include=claim_labels, + 87 exclude=exclude_claim_labels, + 88 with_depends=include_dependencies, + 89 ) + 90 + 91 return [claim_index[label] for label in labels]
+ + 92 + 93 @staticmethod + 94 def _digest(spec_file: Path, *, include_dirs: Iterable[Path], md_selector: str | None) -> str: + 95 from .utils import K_DISTRIBUTION + 96 + 97 definitions = slurp_definitions( + 98 spec_file, + 99 include_dirs=list(include_dirs) + ([K_DISTRIBUTION.builtin_dir] if K_DISTRIBUTION else []), +100 md_selector=md_selector, +101 ) +102 definitions = {key: definitions[key] for key in sorted(definitions)} +103 return hash_str(definitions)
+ +104 +105 +106class _ClaimModuleList(NamedTuple): +107 module_list: KFlatModuleList +108 digest: str +109 +110 @staticmethod +111 def from_dict(dct: Mapping[str, Any]) -> _ClaimModuleList: +112 return _ClaimModuleList( +113 module_list=KFlatModuleList.from_dict(dct['moduleList']), +114 digest=dct['digest'], +115 ) +116 +117 def to_dict(self) -> dict[str, Any]: +118 return { +119 'moduleList': self.module_list.to_dict(), +120 'digest': self.digest, +121 } +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/ktool/kfuzz.html b/pyk/_modules/pyk/ktool/kfuzz.html new file mode 100644 index 00000000000..d10f65645ff --- /dev/null +++ b/pyk/_modules/pyk/ktool/kfuzz.html @@ -0,0 +1,317 @@ + + + + + + + + pyk.ktool.kfuzz — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.ktool.kfuzz

+  1from __future__ import annotations
+  2
+  3from abc import ABC, abstractmethod
+  4from typing import TYPE_CHECKING
+  5
+  6from hypothesis import Phase, given, settings
+  7from hypothesis.strategies import fixed_dictionaries, integers
+  8
+  9from ..kore.parser import KoreParser
+ 10from ..kore.prelude import inj
+ 11from ..kore.syntax import DV, EVar, SortApp, String
+ 12from .krun import llvm_interpret_raw
+ 13
+ 14if TYPE_CHECKING:
+ 15    from collections.abc import Callable, Mapping
+ 16    from pathlib import Path
+ 17    from typing import Any
+ 18
+ 19    from hypothesis.strategies import SearchStrategy
+ 20
+ 21    from ..kore.syntax import Pattern
+ 22
+ 23
+
+[docs] + 24class KFuzzHandler(ABC): + 25 """Allows custom behavior (ie. printing) during fuzzing for each test case and on a test failure. + 26 + 27 Can be passed to the `KFuzz` constructor or to :any:`fuzz` with the `handler` keyword argument. + 28 """ + 29 +
+[docs] + 30 @abstractmethod + 31 def handle_test(self, args: Mapping[EVar, Pattern]) -> None: + 32 """Handle each test case with the variable substitutions that are being used.""" + 33 ...
+ + 34 +
+[docs] + 35 @abstractmethod + 36 def handle_failure(self, args: Mapping[EVar, Pattern]) -> None: + 37 """Handle a test case failure, before the `AssertionError` is raised.""" + 38 ...
+
+ + 39 + 40 + 41class _KFuzzNullHandler(KFuzzHandler): + 42 def handle_test(self, args: Mapping[EVar, Pattern]) -> None: + 43 pass + 44 + 45 def handle_failure(self, args: Mapping[EVar, Pattern]) -> None: + 46 pass + 47 + 48 + 49_DEFAULT_HANDLER = _KFuzzNullHandler() + 50 + 51 +
+[docs] + 52class KFuzz: + 53 """Interface for fuzzing over property tests in K.""" + 54 + 55 definition_dir: Path + 56 handler: KFuzzHandler + 57 + 58 def __init__(self, definition_dir: Path, handler: KFuzzHandler = _DEFAULT_HANDLER) -> None: + 59 self.definition_dir = definition_dir + 60 self.handler = handler + 61 +
+[docs] + 62 def fuzz_with_check( + 63 self, + 64 template: Pattern, + 65 subst_strategy: dict[EVar, SearchStrategy[Pattern]], + 66 check_func: Callable[[Pattern], Any], + 67 **hypothesis_args: Any, + 68 ) -> None: + 69 """Fuzz over a property test using check_func to check for a passing test. + 70 + 71 See :any:`fuzz` for info on the parameters. + 72 """ + 73 fuzz( + 74 self.definition_dir, + 75 template, + 76 subst_strategy, + 77 check_func=check_func, + 78 handler=self.handler, + 79 **hypothesis_args, + 80 )
+ + 81 +
+[docs] + 82 def fuzz_with_exit_code( + 83 self, + 84 template: Pattern, + 85 subst_strategy: dict[EVar, SearchStrategy[Pattern]], + 86 **hypothesis_args: Any, + 87 ) -> None: + 88 """Fuzz over a property test using the exit code from the interpreter to check for a passing test. + 89 + 90 See :any:`fuzz` for info on the parameters. + 91 """ + 92 fuzz( + 93 self.definition_dir, + 94 template, + 95 subst_strategy, + 96 check_exit_code=True, + 97 handler=self.handler, + 98 **hypothesis_args, + 99 )
+
+ +100 +101 +
+[docs] +102def kintegers( +103 *, +104 min_value: int | None = None, +105 max_value: int | None = None, +106 with_inj: str | None = None, +107) -> SearchStrategy[Pattern]: +108 """Return a search strategy for K integers. +109 +110 Args: +111 min_value: Minimum value for the generated integers +112 max_value: Maximum value for the generated integers +113 with_inj: Return the integer as an injection into this sort +114 +115 Returns: +116 A strategy which generates integer domain values. +117 """ +118 +119 def int_dv(value: int) -> Pattern: +120 res: Pattern = DV(SortApp('SortInt'), value=String(str(value))) +121 if with_inj is not None: +122 res = inj(SortApp('SortInt'), SortApp(f'Sort{with_inj}'), res) +123 return res +124 +125 return integers(min_value=min_value, max_value=max_value).map(int_dv)
+ +126 +127 +
+[docs] +128def fuzz( +129 definition_dir: str | Path, +130 template: Pattern, +131 subst_strategy: dict[EVar, SearchStrategy[Pattern]], +132 *, +133 check_func: Callable[[Pattern], Any] | None = None, +134 check_exit_code: bool = False, +135 handler: KFuzzHandler = _DEFAULT_HANDLER, +136 **hypothesis_args: Any, +137) -> None: +138 """Fuzz a property test with concrete execution over a K term. +139 +140 Args: +141 definition_dir: The location of the K definition to run the interpreter for. +142 template: The term which will be sent to the interpreter after randomizing inputs. It should contain at least one variable which will be substituted for a value. +143 subst_strategy: Should have each variable in the template term mapped to a strategy for generating values for it. +144 check_func: Will be called on the kore output from the interpreter. +145 Should throw an AssertionError if it determines that the output indicates a test failure. +146 A RuntimeError will be thrown if this is passed as an argument and check_exit_code is True. +147 check_exit_code: Check the exit code of the interpreter for a test failure instead of using check_func. +148 An exit code of 0 indicates a passing test. +149 A RuntimeError will be thrown if this is True and check_func is also passed as an argument. +150 handler: An instance of a `KFuzzHandler` implementing custom behavior while fuzzing. +151 hypothesis_args: Keyword arguments that will be passed as settings for the hypothesis test. Defaults: +152 +153 deadline: 5000 +154 +155 phases: (Phase.explicit, Phase.reuse, Phase.generate) +156 +157 +158 Raises: +159 RuntimeError: If check_func exists and check_exit_code is set, or check_func doesn't exist and check_exit_code is cleared. +160 """ +161 if bool(check_func) == check_exit_code: +162 raise RuntimeError('Must pass one of check_func or check_exit_code, and not both!') +163 +164 def test(subst_case: Mapping[EVar, Pattern]) -> None: +165 def sub(p: Pattern) -> Pattern: +166 if isinstance(p, EVar) and p in subst_case: +167 return subst_case[p] +168 else: +169 return p +170 +171 handler.handle_test(subst_case) +172 test_pattern = template.top_down(sub) +173 res = llvm_interpret_raw(definition_dir, test_pattern.text, check=False) +174 +175 try: +176 if check_exit_code: +177 assert res.returncode == 0 +178 else: +179 assert check_func +180 res_pattern = KoreParser(res.stdout).pattern() +181 check_func(res_pattern) +182 except AssertionError: +183 handler.handle_failure(subst_case) +184 raise +185 +186 strat: SearchStrategy = fixed_dictionaries(subst_strategy) +187 +188 # Default settings for hypothesis +189 hypothesis_args.setdefault('deadline', 5000) +190 hypothesis_args.setdefault('phases', (Phase.explicit, Phase.reuse, Phase.generate)) +191 +192 given(strat)(settings(**hypothesis_args)(test))()
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/ktool/kompile.html b/pyk/_modules/pyk/ktool/kompile.html new file mode 100644 index 00000000000..c4d1c48fe59 --- /dev/null +++ b/pyk/_modules/pyk/ktool/kompile.html @@ -0,0 +1,769 @@ + + + + + + + + pyk.ktool.kompile — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.ktool.kompile

+  1from __future__ import annotations
+  2
+  3__all__ = ['PykBackend', 'kompile']
+  4
+  5import concurrent.futures
+  6import dataclasses
+  7import logging
+  8import shlex
+  9import shutil
+ 10from abc import ABC, abstractmethod
+ 11from dataclasses import dataclass
+ 12from enum import Enum
+ 13from functools import cached_property
+ 14from pathlib import Path
+ 15from typing import TYPE_CHECKING, final
+ 16
+ 17from ..utils import abs_or_rel_to, check_dir_path, check_file_path, run_process_2, single
+ 18from . import TypeInferenceMode
+ 19
+ 20if TYPE_CHECKING:
+ 21    from collections.abc import Iterable, Mapping
+ 22    from fractions import Fraction
+ 23    from typing import Any, Final, Literal
+ 24
+ 25    from ..utils import BugReport
+ 26
+ 27_LOGGER: Final = logging.getLogger(__name__)
+ 28
+ 29
+ 30class KompileNotFoundError(RuntimeError):
+ 31    def __init__(self, kompile_command: str):
+ 32        super().__init__(f'Kompile command not found: {str}')
+ 33
+ 34
+
+[docs] + 35class PykBackend(Enum): + 36 LLVM = 'llvm' + 37 HASKELL = 'haskell' + 38 KORE = 'kore' + 39 MAUDE = 'maude' + 40 BOOSTER = 'booster'
+ + 41 + 42 + 43class Warnings(Enum): + 44 ALL = 'all' + 45 NORMAL = 'normal' + 46 NONE = 'none' + 47 + 48 +
+[docs] + 49def kompile( + 50 main_file: str | Path, + 51 *, + 52 backend: str | PykBackend | None = None, + 53 # --- + 54 command: Iterable[str] = ('kompile',), + 55 output_dir: str | Path | None = None, + 56 temp_dir: str | Path | None = None, + 57 type_inference_mode: str | TypeInferenceMode | None = None, + 58 warnings: str | Warnings | None = None, + 59 warnings_to_errors: bool = False, + 60 ignore_warnings: Iterable[str] = (), + 61 no_exc_wrap: bool = False, + 62 # --- + 63 debug: bool = False, + 64 verbose: bool = False, + 65 cwd: Path | None = None, + 66 check: bool = True, + 67 # --- + 68 **kwargs: Any, + 69) -> Path: + 70 kwargs['main_file'] = main_file + 71 + 72 pyk_backend = PykBackend(backend) if backend else None + 73 if pyk_backend is PykBackend.BOOSTER: + 74 return _booster_kompile( + 75 command=command, + 76 output_dir=output_dir, + 77 temp_dir=temp_dir, + 78 type_inference_mode=type_inference_mode, + 79 warnings=warnings, + 80 warnings_to_errors=warnings_to_errors, + 81 ignore_warnings=ignore_warnings, + 82 no_exc_wrap=no_exc_wrap, + 83 debug=debug, + 84 verbose=verbose, + 85 cwd=cwd, + 86 check=check, + 87 kwargs=kwargs, + 88 ) + 89 + 90 kwargs['backend'] = KompileBackend(pyk_backend.value) if pyk_backend else None + 91 + 92 kompiler = Kompile.from_dict(kwargs) + 93 return kompiler( + 94 command=command, + 95 output_dir=output_dir, + 96 temp_dir=temp_dir, + 97 type_inference_mode=type_inference_mode, + 98 warnings=warnings, + 99 warnings_to_errors=warnings_to_errors, +100 ignore_warnings=ignore_warnings, +101 no_exc_wrap=no_exc_wrap, +102 debug=debug, +103 verbose=verbose, +104 cwd=cwd, +105 check=check, +106 )
+ +107 +108 +109def _booster_kompile( +110 command: Iterable[str], +111 output_dir: str | Path | None, +112 temp_dir: str | Path | None, +113 type_inference_mode: str | TypeInferenceMode | None, +114 warnings: str | Warnings | None, +115 warnings_to_errors: bool, +116 ignore_warnings: Iterable[str], +117 no_exc_wrap: bool, +118 # --- +119 debug: bool, +120 verbose: bool, +121 cwd: Path | None, +122 check: bool, +123 # --- +124 kwargs: Mapping[str, Any], +125) -> Path: +126 llvm_kt = kwargs.get('llvm_kompile_type') +127 llvm_kt = LLVMKompileType(llvm_kt) if llvm_kt else None +128 if llvm_kt and llvm_kt is not LLVMKompileType.C: +129 raise ValueError(f'Unsupported argument value for Booster kompilation: llvm_kompile_type: {llvm_kt.value}') +130 +131 llvm_args, haskell_args = _group_args(kwargs) +132 +133 llvm_args['backend'] = KompileBackend.LLVM +134 llvm_args['llvm_kompile_type'] = LLVMKompileType.C +135 llvm_kompile = LLVMKompile.from_dict(llvm_args) +136 +137 haskell_args['backend'] = KompileBackend.HASKELL +138 haskell_kompile = HaskellKompile.from_dict(haskell_args) +139 +140 main_file = Path(kwargs['main_file']) +141 output_dir = Path(output_dir) if output_dir else _default_output_dir(main_file) +142 temp_dir = Path(temp_dir) if temp_dir else None +143 +144 def kompile_llvm() -> None: +145 llvm_kompile( +146 command=command, +147 output_dir=output_dir / 'llvm-library', +148 temp_dir=temp_dir / 'llvm-library' if temp_dir else None, +149 type_inference_mode=type_inference_mode, +150 warnings=warnings, +151 warnings_to_errors=warnings_to_errors, +152 ignore_warnings=ignore_warnings, +153 no_exc_wrap=no_exc_wrap, +154 debug=debug, +155 verbose=verbose, +156 cwd=cwd, +157 check=check, +158 ) +159 +160 def kompile_haskell() -> None: +161 haskell_kompile( +162 command=command, +163 output_dir=output_dir, +164 temp_dir=temp_dir, +165 type_inference_mode=type_inference_mode, +166 warnings=warnings, +167 warnings_to_errors=warnings_to_errors, +168 ignore_warnings=ignore_warnings, +169 no_exc_wrap=no_exc_wrap, +170 debug=debug, +171 verbose=verbose, +172 cwd=cwd, +173 check=check, +174 ) +175 +176 with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: +177 futures = [executor.submit(f) for f in [kompile_llvm, kompile_haskell]] +178 for future in concurrent.futures.as_completed(futures): +179 future.result() +180 +181 assert output_dir.is_dir() +182 return output_dir +183 +184 +185def _group_args(args: Mapping[str, Any]) -> tuple[dict[str, Any], dict[str, Any]]: +186 llvm_args = {} +187 haskell_args = {} +188 +189 for arg, value in args.items(): +190 if arg in COMMON_ARGS: +191 llvm_args[arg] = value +192 haskell_args[arg] = value +193 elif arg in KompileBackend.LLVM.args: +194 llvm_args[arg] = value +195 elif arg in KompileBackend.HASKELL.args: +196 haskell_args[arg] = value +197 +198 return llvm_args, haskell_args +199 +200 +201# ----------- +202# kompile CLI +203# ----------- +204 +205 +206class KompileBackend(Enum): +207 LLVM = 'llvm' +208 HASKELL = 'haskell' +209 KORE = 'kore' +210 MAUDE = 'maude' +211 +212 @cached_property +213 def args(self) -> frozenset[str]: +214 match self: +215 case KompileBackend.LLVM: +216 return frozenset(field.name for field in dataclasses.fields(LLVMKompile) if field.name != 'base_args') +217 case KompileBackend.HASKELL: +218 return frozenset( +219 field.name for field in dataclasses.fields(HaskellKompile) if field.name != 'base_args' +220 ) +221 case _: +222 raise ValueError(f'Method not supported for backend: {self.value}') +223 +224 +225class Kompile(ABC): +226 base_args: KompileArgs +227 +228 @staticmethod +229 def default_directory() -> Path: +230 try: +231 return single(Path().glob('*-kompiled')) +232 except ValueError as err: +233 if len(err.args) == 1: +234 raise ValueError('Could not find `*-kompiled` directory, use --definition to specify one.') from err +235 else: +236 _, fst, snd = err.args +237 raise ValueError( +238 f'More than one `*-kompiled` directory found ({fst}, {snd}, ...), use `--definition` to specify one.' +239 ) from err +240 +241 @staticmethod +242 def from_dict(dct: Mapping[str, Any]) -> Kompile: +243 backend = KompileBackend(dct.get('backend') or 'llvm') +244 +245 common_args: dict[str, Any] = {} +246 backend_args: dict[str, Any] = {} +247 for key, value in dct.items(): +248 if key == 'backend': +249 continue +250 elif key in COMMON_ARGS: +251 common_args[key] = value +252 elif key in backend.args: +253 backend_args[key] = value +254 else: +255 raise ValueError(f'Unexpected argument for backend: {backend.value}: {key}={value!r}') +256 +257 base_args = KompileArgs(**common_args) +258 match backend: +259 case KompileBackend.HASKELL: +260 return HaskellKompile(base_args, **backend_args) +261 case KompileBackend.LLVM: +262 return LLVMKompile(base_args, **backend_args) +263 case KompileBackend.MAUDE: +264 return MaudeKompile(base_args, **backend_args) +265 case _: +266 raise ValueError(f'Unsupported backend: {backend.value}') +267 +268 @property +269 @abstractmethod +270 def backend(self) -> KompileBackend: ... +271 +272 def __call__( +273 self, +274 command: Iterable[str] | None = None, +275 *, +276 output_dir: str | Path | None = None, +277 temp_dir: str | Path | None = None, +278 type_inference_mode: str | TypeInferenceMode | None = None, +279 warnings: str | Warnings | None = None, +280 warnings_to_errors: bool = False, +281 ignore_warnings: Iterable[str] = (), +282 no_exc_wrap: bool = False, +283 debug: bool = False, +284 verbose: bool = False, +285 # --- +286 cwd: Path | None = None, +287 check: bool = True, +288 tool_mode: bool = False, +289 bug_report: BugReport | None = None, +290 outer_parsed_json: bool = False, +291 ) -> Path: +292 check_file_path(abs_or_rel_to(self.base_args.main_file, cwd or Path())) +293 for include_dir in self.base_args.include_dirs: +294 check_dir_path(abs_or_rel_to(include_dir, cwd or Path())) +295 +296 command = list(command) if command is not None else ['kompile'] +297 if not shutil.which(command[0]): +298 raise KompileNotFoundError(command[0]) +299 args = command + self.args() +300 +301 if output_dir is not None: +302 output_dir = Path(output_dir) +303 args += ['--output-definition', str(output_dir)] +304 +305 if temp_dir is not None: +306 temp_dir = Path(temp_dir) +307 args += ['--temp-dir', str(temp_dir)] +308 +309 if type_inference_mode is not None: +310 type_inference_mode = TypeInferenceMode(type_inference_mode) +311 args += ['--type-inference-mode', type_inference_mode.value] +312 +313 if warnings is not None: +314 warnings = Warnings(warnings) +315 args += ['--warnings', warnings.value] +316 +317 if warnings_to_errors: +318 args += ['--warnings-to-errors'] +319 +320 if no_exc_wrap: +321 args += ['--no-exc-wrap'] +322 +323 if debug: +324 args += ['--debug'] +325 +326 if verbose: +327 args += ['--verbose'] +328 +329 if outer_parsed_json: +330 args += ['--outer-parsed-json'] +331 +332 if ignore_warnings: +333 args += ['-Wno', ','.join(ignore_warnings)] +334 +335 proc_res = run_process_2( +336 args, +337 write_stdout=tool_mode, +338 write_stderr=tool_mode, +339 logger=_LOGGER, +340 cwd=cwd, +341 check=check, +342 ) +343 +344 if bug_report and proc_res.stdout: +345 bug_report.add_file_contents(proc_res.stdout.rstrip(), Path('kompile.log')) +346 +347 definition_dir = output_dir if output_dir else _default_output_dir(self.base_args.main_file) +348 assert definition_dir.is_dir() +349 +350 return definition_dir +351 +352 @abstractmethod +353 def args(self) -> list[str]: ... +354 +355 +356def _default_output_dir(main_file: Path) -> Path: +357 return Path(main_file.stem + '-kompiled') +358 +359 +360@final +361@dataclass(frozen=True) +362class HaskellKompile(Kompile): +363 base_args: KompileArgs +364 concrete_rules: tuple[str, ...] +365 haskell_binary: bool +366 +367 def __init__(self, base_args: KompileArgs, *, concrete_rules: Iterable[str] = (), haskell_binary: bool = True): +368 concrete_rules = tuple(concrete_rules) +369 object.__setattr__(self, 'base_args', base_args) +370 object.__setattr__(self, 'concrete_rules', concrete_rules) +371 object.__setattr__(self, 'haskell_binary', haskell_binary) +372 +373 @property +374 def backend(self) -> Literal[KompileBackend.HASKELL]: +375 return KompileBackend.HASKELL +376 +377 def args(self) -> list[str]: +378 args = self.base_args.args() +379 args += ['--backend', 'haskell'] +380 +381 if self.concrete_rules: +382 args += ['--concrete-rules', ','.join(self.concrete_rules)] +383 +384 if not self.haskell_binary: +385 args += ['--no-haskell-binary'] +386 +387 return args +388 +389 +390@final +391@dataclass(frozen=True) +392class MaudeKompile(Kompile): +393 base_args: KompileArgs +394 +395 def __init__(self, base_args: KompileArgs): +396 object.__setattr__(self, 'base_args', base_args) +397 +398 @property +399 def backend(self) -> Literal[KompileBackend.MAUDE]: +400 return KompileBackend.MAUDE +401 +402 def args(self) -> list[str]: +403 args = self.base_args.args() +404 args += ['--backend', 'maude'] +405 +406 return args +407 +408 +409class LLVMKompileType(Enum): +410 MAIN = 'main' +411 SEARCH = 'search' +412 LIBRARY = 'library' +413 STATIC = 'static' +414 PYTHON = 'python' +415 C = 'c' +416 +417 +418@final +419@dataclass(frozen=True) +420class LLVMKompile(Kompile): +421 base_args: KompileArgs +422 llvm_kompile_type: LLVMKompileType | None +423 llvm_kompile_output: Path | None +424 opt_level: int +425 ccopts: tuple[str, ...] +426 no_llvm_kompile: bool +427 enable_search: bool +428 enable_llvm_debug: bool +429 llvm_proof_hint_instrumentation: bool +430 llvm_proof_hint_debugging: bool +431 llvm_hidden_visibility: bool +432 llvm_mutable_bytes: bool +433 iterated_threshold: Fraction | None +434 heuristic: str | None +435 +436 def __init__( +437 self, +438 base_args: KompileArgs, +439 *, +440 llvm_kompile_type: str | LLVMKompileType | None = None, +441 llvm_kompile_output: str | Path | None = None, +442 opt_level: int | None = None, +443 ccopts: Iterable[str] = (), +444 no_llvm_kompile: bool = False, +445 enable_search: bool = False, +446 enable_llvm_debug: bool = False, +447 llvm_proof_hint_instrumentation: bool = False, +448 llvm_proof_hint_debugging: bool = False, +449 llvm_hidden_visibility: bool = False, +450 llvm_mutable_bytes: bool = False, +451 iterated_threshold: Fraction | None = None, +452 heuristic: str | None = None, +453 ): +454 llvm_kompile_type = LLVMKompileType(llvm_kompile_type) if llvm_kompile_type is not None else None +455 llvm_kompile_output = Path(llvm_kompile_output) if llvm_kompile_output is not None else None +456 +457 opt_level = opt_level or 0 +458 if not (0 <= opt_level <= 3): +459 raise ValueError('Invalid optimization level: {opt_level}') +460 +461 ccopts = tuple(ccopts) +462 +463 object.__setattr__(self, 'base_args', base_args) +464 object.__setattr__(self, 'llvm_kompile_type', llvm_kompile_type) +465 object.__setattr__(self, 'llvm_kompile_output', llvm_kompile_output) +466 object.__setattr__(self, 'opt_level', opt_level) +467 object.__setattr__(self, 'ccopts', ccopts) +468 object.__setattr__(self, 'no_llvm_kompile', no_llvm_kompile) +469 object.__setattr__(self, 'enable_search', enable_search) +470 object.__setattr__(self, 'enable_llvm_debug', enable_llvm_debug) +471 object.__setattr__(self, 'llvm_proof_hint_instrumentation', llvm_proof_hint_instrumentation) +472 object.__setattr__(self, 'llvm_proof_hint_debugging', llvm_proof_hint_debugging) +473 object.__setattr__(self, 'llvm_hidden_visibility', llvm_hidden_visibility) +474 object.__setattr__(self, 'llvm_mutable_bytes', llvm_mutable_bytes) +475 object.__setattr__(self, 'iterated_threshold', iterated_threshold) +476 object.__setattr__(self, 'heuristic', heuristic) +477 +478 @property +479 def backend(self) -> Literal[KompileBackend.LLVM]: +480 return KompileBackend.LLVM +481 +482 def args(self) -> list[str]: +483 args = self.base_args.args() +484 args += ['--backend', 'llvm'] +485 +486 if self.llvm_kompile_type: +487 args += ['--llvm-kompile-type', self.llvm_kompile_type.value] +488 +489 if self.llvm_kompile_output is not None: +490 args += ['--llvm-kompile-output', str(self.llvm_kompile_output)] +491 +492 if self.opt_level: +493 args += [f'-O{self.opt_level}'] +494 +495 for ccopt in self.ccopts: +496 args += ['-ccopt', ccopt] +497 +498 if self.no_llvm_kompile: +499 args += ['--no-llvm-kompile'] +500 +501 if self.enable_search: +502 args += ['--enable-search'] +503 +504 if self.enable_llvm_debug: +505 args += ['--enable-llvm-debug'] +506 +507 if self.llvm_proof_hint_instrumentation: +508 args += ['--llvm-proof-hint-instrumentation'] +509 +510 if self.llvm_proof_hint_debugging: +511 args += ['--llvm-proof-hint-debugging'] +512 +513 if self.llvm_hidden_visibility: +514 args += ['--llvm-hidden-visibility'] +515 +516 if self.llvm_mutable_bytes: +517 args += ['--llvm-mutable-bytes'] +518 +519 if self.iterated_threshold: +520 args += ['--iterated-threshold', str(self.iterated_threshold)] +521 +522 if self.heuristic: +523 args += ['--heuristic', self.heuristic] +524 +525 return args +526 +527 +528@final +529@dataclass(frozen=True) +530class KompileArgs: +531 main_file: Path +532 main_module: str | None +533 syntax_module: str | None +534 include_dirs: tuple[Path, ...] +535 md_selector: str | None +536 hook_namespaces: tuple[str, ...] +537 emit_json: bool +538 gen_bison_parser: bool +539 gen_glr_bison_parser: bool +540 bison_parser_library: bool +541 post_process: str | None +542 read_only: bool +543 coverage: bool +544 bison_lists: bool +545 outer_parsed_json: bool +546 +547 def __init__( +548 self, +549 main_file: str | Path, +550 *, +551 main_module: str | None = None, +552 syntax_module: str | None = None, +553 include_dirs: Iterable[str | Path] = (), +554 md_selector: str | None = None, +555 hook_namespaces: Iterable[str] = (), +556 emit_json: bool = True, +557 gen_bison_parser: bool = False, +558 gen_glr_bison_parser: bool = False, +559 bison_parser_library: bool = False, +560 post_process: str | None = None, +561 read_only: bool = False, +562 coverage: bool = False, +563 bison_lists: bool = False, +564 outer_parsed_json: bool = False, +565 ): +566 main_file = Path(main_file) +567 include_dirs = tuple(sorted(Path(include_dir) for include_dir in include_dirs)) +568 hook_namespaces = tuple(hook_namespaces) +569 +570 object.__setattr__(self, 'main_file', main_file) +571 object.__setattr__(self, 'main_module', main_module) +572 object.__setattr__(self, 'syntax_module', syntax_module) +573 object.__setattr__(self, 'include_dirs', include_dirs) +574 object.__setattr__(self, 'md_selector', md_selector) +575 object.__setattr__(self, 'hook_namespaces', hook_namespaces) +576 object.__setattr__(self, 'emit_json', emit_json) +577 object.__setattr__(self, 'gen_bison_parser', gen_bison_parser) +578 object.__setattr__(self, 'gen_glr_bison_parser', gen_glr_bison_parser) +579 object.__setattr__(self, 'bison_parser_library', bison_parser_library) +580 object.__setattr__(self, 'post_process', post_process) +581 object.__setattr__(self, 'read_only', read_only) +582 object.__setattr__(self, 'coverage', coverage) +583 object.__setattr__(self, 'bison_lists', bison_lists) +584 object.__setattr__(self, 'outer_parsed_json', outer_parsed_json) +585 +586 def args(self) -> list[str]: +587 args = [str(self.main_file)] +588 +589 if self.main_module: +590 args += ['--main-module', self.main_module] +591 +592 if self.syntax_module: +593 args += ['--syntax-module', self.syntax_module] +594 +595 for include_dir in self.include_dirs: +596 args += ['-I', str(include_dir)] +597 +598 if self.md_selector: +599 args += ['--md-selector', self.md_selector] +600 +601 if self.hook_namespaces: +602 args += ['--hook-namespaces', ' '.join(self.hook_namespaces)] +603 +604 if self.emit_json: +605 args += ['--emit-json'] +606 +607 if self.gen_bison_parser: +608 args += ['--gen-bison-parser'] +609 +610 if self.gen_glr_bison_parser: +611 args += ['--gen-glr-bison-parser'] +612 +613 if self.bison_parser_library: +614 args += ['--bison-parser-library'] +615 +616 if self.post_process: +617 args += ['--post-process', shlex.quote(self.post_process)] +618 +619 if self.read_only: +620 args += ['--read-only-kompiled-directory'] +621 +622 if self.coverage: +623 args += ['--coverage'] +624 +625 if self.bison_lists: +626 args += ['--bison-lists'] +627 +628 if self.outer_parsed_json: +629 args += ['--outer-parsed-json'] +630 +631 return args +632 +633 +634COMMON_ARGS: Final = frozenset(field.name for field in dataclasses.fields(KompileArgs)) +635 +636 +637@final +638@dataclass(frozen=True) +639class DefinitionInfo: +640 path: Path +641 +642 def __init__(self, path: str | Path): +643 path = Path(path) +644 check_dir_path(path) +645 object.__setattr__(self, 'path', path) +646 +647 @cached_property +648 def backend(self) -> KompileBackend: +649 backend = (self.path / 'backend.txt').read_text() +650 return KompileBackend(backend) +651 +652 @cached_property +653 def main_module_name(self) -> str: +654 return (self.path / 'mainModule.txt').read_text() +655 +656 @cached_property +657 def syntax_module_name(self) -> str: +658 return (self.path / 'mainSyntaxModule.txt').read_text() +659 +660 @cached_property +661 def timestamp(self) -> int: +662 return (self.path / 'timestamp').stat().st_mtime_ns +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/ktool/kprint.html b/pyk/_modules/pyk/ktool/kprint.html new file mode 100644 index 00000000000..a5dc8cea37b --- /dev/null +++ b/pyk/_modules/pyk/ktool/kprint.html @@ -0,0 +1,463 @@ + + + + + + + + pyk.ktool.kprint — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.ktool.kprint

+  1from __future__ import annotations
+  2
+  3import json
+  4import logging
+  5from contextlib import contextmanager
+  6from enum import Enum
+  7from functools import cached_property
+  8from pathlib import Path
+  9from tempfile import NamedTemporaryFile
+ 10from typing import TYPE_CHECKING
+ 11
+ 12from ..cli.utils import check_dir_path, check_file_path
+ 13from ..kast import KAst, kast_term
+ 14from ..kast.inner import KInner
+ 15from ..kast.outer import read_kast_definition
+ 16from ..kast.pretty import PrettyPrinter
+ 17from ..konvert import kast_to_kore, kore_to_kast
+ 18from ..kore.parser import KoreParser
+ 19from ..kore.syntax import App, SortApp
+ 20from ..kore.tools import PrintOutput, kore_print
+ 21from ..utils import run_process_2
+ 22from .kompile import DefinitionInfo
+ 23
+ 24if TYPE_CHECKING:
+ 25    from collections.abc import Callable, Iterable, Iterator
+ 26    from subprocess import CompletedProcess
+ 27    from tempfile import _TemporaryFileWrapper
+ 28    from typing import Final
+ 29
+ 30    from ..kast.inner import KSort, KToken
+ 31    from ..kast.outer import KDefinition, KFlatModule
+ 32    from ..kast.pretty import SymbolTable
+ 33    from ..kore.syntax import Pattern
+ 34    from ..utils import BugReport
+ 35
+ 36_LOGGER: Final = logging.getLogger(__name__)
+ 37
+ 38
+
+[docs] + 39class KAstInput(Enum): + 40 PROGRAM = 'program' + 41 BINARY = 'binary' + 42 JSON = 'json' + 43 KAST = 'kast' + 44 KORE = 'kore' + 45 RULE = 'rule'
+ + 46 + 47 +
+[docs] + 48class KAstOutput(Enum): + 49 PRETTY = 'pretty' + 50 PROGRAM = 'program' + 51 KAST = 'kast' + 52 BINARY = 'binary' + 53 JSON = 'json' + 54 LATEX = 'latex' + 55 KORE = 'kore' + 56 NONE = 'none'
+ + 57 + 58 + 59def _kast( + 60 file: str | Path | None = None, + 61 *, + 62 command: str | None = None, + 63 definition_dir: str | Path | None = None, + 64 input: str | KAstInput | None = None, + 65 output: str | KAstOutput | None = None, + 66 expression: str | None = None, + 67 module: str | None = None, + 68 sort: str | None = None, + 69 temp_dir: str | Path | None = None, + 70 gen_glr_parser: bool = False, + 71 # --- + 72 check: bool = True, + 73) -> CompletedProcess: + 74 if file is not None: + 75 file = Path(file) + 76 + 77 if file and not gen_glr_parser: + 78 check_file_path(file) + 79 + 80 if not file and gen_glr_parser: + 81 raise ValueError('No output file specified for --gen-glr-parser') + 82 + 83 if definition_dir is not None: + 84 definition_dir = Path(definition_dir) + 85 check_dir_path(definition_dir) + 86 + 87 if temp_dir is not None: + 88 temp_dir = Path(temp_dir) + 89 + 90 if input is not None: + 91 input = KAstInput(input) + 92 + 93 if output is not None: + 94 output = KAstOutput(output) + 95 + 96 args = _build_arg_list( + 97 file=file, + 98 command=command, + 99 definition_dir=definition_dir, +100 input=input, +101 output=output, +102 expression=expression, +103 module=module, +104 sort=sort, +105 temp_dir=temp_dir, +106 gen_glr_parser=gen_glr_parser, +107 ) +108 +109 return run_process_2(args, logger=_LOGGER, check=check) +110 +111 +
+[docs] +112def gen_glr_parser( +113 parser_file: str | Path, +114 *, +115 command: str | None = None, +116 definition_dir: str | Path | None = None, +117 module: str | None = None, +118 sort: str | None = None, +119 temp_dir: str | Path | None = None, +120) -> Path: +121 parser_file = Path(parser_file) +122 _kast( +123 file=parser_file, +124 command=command, +125 definition_dir=definition_dir, +126 module=module, +127 sort=sort, +128 temp_dir=temp_dir, +129 gen_glr_parser=True, +130 check=True, +131 ) +132 assert parser_file.is_file() +133 return parser_file
+ +134 +135 +136def _build_arg_list( +137 *, +138 file: Path | None, +139 command: str | None, +140 definition_dir: Path | None, +141 input: KAstInput | None, +142 output: KAstOutput | None, +143 expression: str | None, +144 module: str | None, +145 sort: str | None, +146 temp_dir: Path | None, +147 gen_glr_parser: bool, +148) -> list[str]: +149 args = [command if command is not None else 'kast'] +150 if file: +151 args += [str(file)] +152 if definition_dir: +153 args += ['--definition', str(definition_dir)] +154 if input: +155 args += ['--input', input.value] +156 if output: +157 args += ['--output', output.value] +158 if expression: +159 args += ['--expression', expression] +160 if module: +161 args += ['--module', module] +162 if sort: +163 args += ['--sort', sort] +164 if temp_dir: +165 args += ['--temp-dir', str(temp_dir)] +166 if gen_glr_parser: +167 args += ['--gen-glr-parser'] +168 return args +169 +170 +
+[docs] +171class KPrint: +172 definition_dir: Path +173 use_directory: Path | None +174 main_module: str +175 backend: str +176 _extra_unparsing_modules: Iterable[KFlatModule] +177 _patch_symbol_table: Callable[[SymbolTable], None] | None +178 +179 _bug_report: BugReport | None +180 +181 def __init__( +182 self, +183 definition_dir: Path, +184 use_directory: Path | None = None, +185 bug_report: BugReport | None = None, +186 extra_unparsing_modules: Iterable[KFlatModule] = (), +187 patch_symbol_table: Callable[[SymbolTable], None] | None = None, +188 ) -> None: +189 self.definition_dir = definition_dir +190 +191 if use_directory: +192 check_dir_path(use_directory) +193 +194 self.use_directory = use_directory +195 self._definition = None +196 self._symbol_table = None +197 +198 info = DefinitionInfo(self.definition_dir) +199 self.main_module = info.main_module_name +200 self.backend = info.backend.value +201 +202 self._extra_unparsing_modules = extra_unparsing_modules +203 self._patch_symbol_table = patch_symbol_table +204 self._bug_report = bug_report +205 +206 @contextmanager +207 def _temp_file(self, prefix: str | None = None, suffix: str | None = None) -> Iterator[_TemporaryFileWrapper]: +208 with NamedTemporaryFile( +209 'w', +210 dir=self.use_directory, +211 delete=not self.use_directory, +212 prefix=prefix, +213 suffix=suffix, +214 ) as ntf: +215 _LOGGER.info(f'Created temporary file: {ntf.name}') +216 yield ntf +217 +218 @cached_property +219 def definition(self) -> KDefinition: +220 return read_kast_definition(self.definition_dir / 'compiled.json') +221 +222 @property +223 def definition_hash(self) -> str: +224 return self.definition.hash +225 +
+[docs] +226 def parse_token(self, ktoken: KToken, *, as_rule: bool = False) -> KInner: +227 input = KAstInput('rule' if as_rule else 'program') +228 proc_res = self._expression_kast( +229 ktoken.token, +230 input=input, +231 output=KAstOutput.JSON, +232 sort=ktoken.sort.name, +233 ) +234 return KInner.from_dict(kast_term(json.loads(proc_res.stdout)))
+ +235 +
+[docs] +236 def kore_to_pretty(self, pattern: Pattern) -> str: +237 proc_res = self._expression_kast( +238 pattern.text, +239 input=KAstInput.KORE, +240 output=KAstOutput.PRETTY, +241 ) +242 return proc_res.stdout
+ +243 +
+[docs] +244 def kore_to_kast(self, kore: Pattern) -> KInner: +245 try: +246 _LOGGER.info('Invoking kore_to_kast') +247 return kore_to_kast(self.definition, kore) +248 except ValueError as err: +249 _LOGGER.warning(err) +250 +251 _LOGGER.warning(f'Falling back to using `kore-print` for Kore -> Kast: {kore.text}') +252 return KInner.from_dict( +253 kast_term(json.loads(kore_print(kore, definition_dir=self.definition_dir, output=PrintOutput.JSON))) +254 )
+ +255 +
+[docs] +256 def kast_to_kore(self, kast: KInner, sort: KSort | None = None, *, force_kast: bool = False) -> Pattern: +257 if not force_kast: +258 try: +259 _LOGGER.info('Invoking kast_to_kore') +260 return kast_to_kore(self.definition, kast, sort) +261 except ValueError as ve: +262 _LOGGER.warning(ve) +263 +264 _LOGGER.warning(f'Falling back to using `kast` for KAst -> Kore: {kast}') +265 kast_json = {'format': 'KAST', 'version': KAst.version(), 'term': kast.to_dict()} +266 proc_res = self._expression_kast( +267 json.dumps(kast_json), +268 input=KAstInput.JSON, +269 output=KAstOutput.KORE, +270 sort=sort.name if sort is not None else None, +271 ) +272 return KoreParser(proc_res.stdout).pattern()
+ +273 +274 def _add_sort_injection(self, pat: Pattern, isort: KSort, osort: KSort) -> Pattern: +275 if isort == osort: +276 return pat +277 if isort not in self.definition.subsorts(osort): +278 raise ValueError( +279 f'Could not find injection from subsort to supersort {isort} -> {osort} for pattern: {pat}' +280 ) +281 return App('inj', [SortApp('Sort' + isort.name), SortApp('Sort' + osort.name)], [pat]) +282 +
+[docs] +283 def pretty_print( +284 self, kast: KAst, *, in_module: str | None = None, unalias: bool = True, sort_collections: bool = False +285 ) -> str: +286 defn = self.definition.let(main_module_name=in_module) +287 +288 return PrettyPrinter( +289 defn, +290 extra_unparsing_modules=self._extra_unparsing_modules, +291 patch_symbol_table=self._patch_symbol_table, +292 unalias=unalias, +293 sort_collections=sort_collections, +294 ).print(kast)
+ +295 +296 def _expression_kast( +297 self, +298 expression: str, +299 *, +300 command: str | None = None, +301 input: str | KAstInput | None = None, +302 output: str | KAstOutput | None = None, +303 module: str | None = None, +304 sort: str | None = None, +305 # --- +306 check: bool = True, +307 ) -> CompletedProcess: +308 if len(expression) < 128 * 1024: +309 return _kast( +310 expression=expression, +311 command=command, +312 definition_dir=self.definition_dir, +313 input=input, +314 output=output, +315 module=module, +316 sort=sort, +317 temp_dir=self.use_directory, +318 check=check, +319 ) +320 +321 with self._temp_file() as ntf: +322 ntf.write(expression) +323 ntf.flush() +324 +325 return _kast( +326 ntf.name, +327 command=command, +328 definition_dir=self.definition_dir, +329 input=input, +330 output=output, +331 module=module, +332 sort=sort, +333 temp_dir=self.use_directory, +334 check=check, +335 )
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/ktool/kprove.html b/pyk/_modules/pyk/ktool/kprove.html new file mode 100644 index 00000000000..48c402018c5 --- /dev/null +++ b/pyk/_modules/pyk/ktool/kprove.html @@ -0,0 +1,514 @@ + + + + + + + + pyk.ktool.kprove — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.ktool.kprove

+  1from __future__ import annotations
+  2
+  3import json
+  4import logging
+  5import os
+  6import re
+  7from contextlib import contextmanager
+  8from enum import Enum
+  9from itertools import chain
+ 10from pathlib import Path
+ 11from typing import TYPE_CHECKING
+ 12
+ 13from ..cli.utils import check_dir_path, check_file_path
+ 14from ..cterm import CTerm
+ 15from ..kast import kast_term
+ 16from ..kast.inner import KInner
+ 17from ..kast.manip import flatten_label
+ 18from ..kast.outer import KDefinition, KFlatModule, KFlatModuleList, KImport, KRequire
+ 19from ..kore.rpc import KoreExecLogFormat
+ 20from ..prelude.ml import is_top
+ 21from ..utils import gen_file_timestamp, run_process_2
+ 22from . import TypeInferenceMode
+ 23from .claim_index import ClaimIndex
+ 24from .kprint import KPrint
+ 25
+ 26if TYPE_CHECKING:
+ 27    from collections.abc import Callable, Iterable, Iterator, Mapping
+ 28    from subprocess import CompletedProcess
+ 29    from typing import Final
+ 30
+ 31    from ..kast.outer import KClaim, KRule, KRuleLike
+ 32    from ..kast.pretty import SymbolTable
+ 33    from ..kcfg import KCFGExplore
+ 34    from ..utils import BugReport
+ 35
+ 36_LOGGER: Final = logging.getLogger(__name__)
+ 37
+ 38
+
+[docs] + 39class KProveOutput(Enum): + 40 PRETTY = 'pretty' + 41 PROGAM = 'program' + 42 KAST = 'KAST' + 43 BINARY = 'binary' + 44 JSON = 'json' + 45 LATEX = 'latex' + 46 KORE = 'kore' + 47 NONE = 'none'
+ + 48 + 49 + 50def _kprove( + 51 spec_file: Path, + 52 *, + 53 command: Iterable[str] = ('kprove',), + 54 kompiled_dir: Path | None = None, + 55 spec_module_name: str | None = None, + 56 md_selector: str | None = None, + 57 include_dirs: Iterable[Path] = (), + 58 emit_json_spec: Path | None = None, + 59 output: KProveOutput | None = None, + 60 depth: int | None = None, + 61 claims: Iterable[str] = (), + 62 type_inference_mode: str | TypeInferenceMode | None = None, + 63 temp_dir: Path | None = None, + 64 haskell_backend_command: str | None = None, + 65 dry_run: bool = False, + 66 # -- + 67 args: Iterable[str] = (), + 68 # -- + 69 env: Mapping[str, str] | None = None, + 70 check: bool = True, + 71) -> CompletedProcess: + 72 check_file_path(spec_file) + 73 + 74 for include_dir in include_dirs: + 75 check_dir_path(include_dir) + 76 + 77 if depth is not None and depth < 0: + 78 raise ValueError(f'Argument "depth" must be non-negative, got: {depth}') + 79 + 80 if type_inference_mode is not None: + 81 type_inference_mode = TypeInferenceMode(type_inference_mode) + 82 + 83 typed_args = _build_arg_list( + 84 kompiled_dir=kompiled_dir, + 85 spec_module_name=spec_module_name, + 86 md_selector=md_selector, + 87 include_dirs=include_dirs, + 88 emit_json_spec=emit_json_spec, + 89 output=output, + 90 depth=depth, + 91 claims=claims, + 92 type_inference_mode=type_inference_mode, + 93 temp_dir=temp_dir, + 94 haskell_backend_command=haskell_backend_command, + 95 dry_run=dry_run, + 96 ) + 97 + 98 run_args = tuple(chain(command, [str(spec_file)], typed_args, args)) + 99 +100 return run_process_2(run_args, logger=_LOGGER, env=env, check=check) +101 +102 +103def _build_arg_list( +104 *, +105 kompiled_dir: Path | None, +106 spec_module_name: str | None, +107 md_selector: str | None, +108 include_dirs: Iterable[Path], +109 emit_json_spec: Path | None, +110 output: KProveOutput | None, +111 depth: int | None, +112 claims: Iterable[str], +113 type_inference_mode: TypeInferenceMode | None, +114 temp_dir: Path | None, +115 haskell_backend_command: str | None, +116 dry_run: bool, +117) -> list[str]: +118 args = [] +119 +120 if kompiled_dir: +121 args += ['--definition', str(kompiled_dir)] +122 +123 if spec_module_name: +124 args += ['--spec-module', spec_module_name] +125 +126 if md_selector: +127 args += ['--md-selector', md_selector] +128 +129 for include_dir in include_dirs: +130 args += ['-I', str(include_dir)] +131 +132 if emit_json_spec: +133 args += ['--emit-json-spec', str(emit_json_spec)] +134 +135 if output: +136 args += ['--output', output.value] +137 +138 if claims: +139 args += ['--claims', ','.join(claims)] +140 +141 if type_inference_mode: +142 args += ['--type-inference-mode', type_inference_mode.value] +143 +144 if temp_dir: +145 args += ['--temp-dir', str(temp_dir)] +146 +147 if haskell_backend_command: +148 args += ['--haskell-backend-command', haskell_backend_command] +149 +150 if depth: +151 args += ['--depth', str(depth)] +152 +153 if dry_run: +154 args.append('--dry-run') +155 +156 return args +157 +158 +
+[docs] +159class KProve(KPrint): +160 main_file: Path | None +161 prover: list[str] +162 prover_args: list[str] +163 _kcfg_explore: KCFGExplore | None +164 +165 def __init__( +166 self, +167 definition_dir: Path, +168 main_file: Path | None = None, +169 use_directory: Path | None = None, +170 command: str = 'kprove', +171 bug_report: BugReport | None = None, +172 extra_unparsing_modules: Iterable[KFlatModule] = (), +173 patch_symbol_table: Callable[[SymbolTable], None] | None = None, +174 ): +175 super().__init__( +176 definition_dir, +177 use_directory=use_directory, +178 bug_report=bug_report, +179 extra_unparsing_modules=extra_unparsing_modules, +180 patch_symbol_table=patch_symbol_table, +181 ) +182 # TODO: we should not have to supply main_file, it should be read +183 self.main_file = main_file +184 self.prover = [command] +185 self.prover_args = [] +186 self._kcfg_explore = None +187 +
+[docs] +188 def prove( +189 self, +190 spec_file: Path, +191 spec_module_name: str | None = None, +192 args: Iterable[str] = (), +193 include_dirs: Iterable[Path] = (), +194 md_selector: str | None = None, +195 haskell_args: Iterable[str] = (), +196 depth: int | None = None, +197 ) -> list[CTerm]: +198 log_file = spec_file.with_suffix('.debug-log') +199 if log_file.exists(): +200 log_file.unlink() +201 haskell_log_args = [ +202 '--log', +203 str(log_file), +204 '--log-format', +205 KoreExecLogFormat.ONELINE.value, +206 '--log-entries', +207 'DebugTransition', +208 ] +209 +210 env = os.environ.copy() +211 existing_opts = os.getenv('KORE_EXEC_OPTS') +212 kore_exec_opts = ' '.join(list(haskell_args) + haskell_log_args + ([existing_opts] if existing_opts else [])) +213 _LOGGER.debug(f'export KORE_EXEC_OPTS={kore_exec_opts!r}') +214 env['KORE_EXEC_OPTS'] = kore_exec_opts +215 +216 proc_result = _kprove( +217 spec_file=spec_file, +218 command=self.prover, +219 kompiled_dir=self.definition_dir, +220 spec_module_name=spec_module_name, +221 include_dirs=include_dirs, +222 md_selector=md_selector, +223 output=KProveOutput.JSON, +224 temp_dir=self.use_directory, +225 args=self.prover_args + list(args), +226 env=env, +227 check=False, +228 depth=depth, +229 ) +230 +231 if proc_result.returncode not in (0, 1): +232 raise RuntimeError('kprove failed!') +233 +234 debug_log = _get_rule_log(log_file) +235 final_state = KInner.from_dict(kast_term(json.loads(proc_result.stdout))) +236 if is_top(final_state) and len(debug_log) == 0: +237 raise ValueError(f'Proof took zero steps, likely the LHS is invalid: {spec_file}') +238 return [CTerm.from_kast(disjunct) for disjunct in flatten_label('#Or', final_state)]
+ +239 +
+[docs] +240 def prove_claim( +241 self, +242 claim: KClaim, +243 claim_id: str, +244 lemmas: Iterable[KRule] = (), +245 args: Iterable[str] = (), +246 haskell_args: Iterable[str] = (), +247 depth: int | None = None, +248 ) -> list[CTerm]: +249 with self._tmp_claim_definition(claim, claim_id, lemmas=lemmas) as (claim_path, claim_module_name): +250 return self.prove( +251 claim_path, +252 spec_module_name=claim_module_name, +253 args=args, +254 haskell_args=haskell_args, +255 depth=depth, +256 )
+ +257 +
+[docs] +258 def parse_modules( +259 self, +260 file_path: Path, +261 module_name: str | None = None, +262 include_dirs: Iterable[Path] = (), +263 md_selector: str | None = None, +264 type_inference_mode: TypeInferenceMode | None = None, +265 ) -> KFlatModuleList: +266 with self._temp_file(prefix=f'{file_path.name}.parsed.json.') as ntf: +267 _kprove( +268 spec_file=file_path, +269 kompiled_dir=self.definition_dir, +270 spec_module_name=module_name, +271 include_dirs=include_dirs, +272 md_selector=md_selector, +273 output=KProveOutput.JSON, +274 temp_dir=self.use_directory, +275 dry_run=True, +276 type_inference_mode=type_inference_mode, +277 args=['--emit-json-spec', ntf.name], +278 ) +279 json_data = json.loads(Path(ntf.name).read_text()) +280 +281 return KFlatModuleList.from_dict(kast_term(json_data))
+ +282 +
+[docs] +283 def get_claim_index( +284 self, +285 spec_file: Path, +286 spec_module_name: str | None = None, +287 include_dirs: Iterable[Path] = (), +288 md_selector: str | None = None, +289 type_inference_mode: TypeInferenceMode | None = None, +290 ) -> ClaimIndex: +291 module_list = self.parse_modules( +292 file_path=spec_file, +293 module_name=spec_module_name, +294 include_dirs=include_dirs, +295 md_selector=md_selector, +296 type_inference_mode=type_inference_mode, +297 ) +298 return ClaimIndex.from_module_list(module_list)
+ +299 +
+[docs] +300 def get_claims( +301 self, +302 spec_file: Path, +303 spec_module_name: str | None = None, +304 include_dirs: Iterable[Path] = (), +305 md_selector: str | None = None, +306 claim_labels: Iterable[str] | None = None, +307 exclude_claim_labels: Iterable[str] | None = None, +308 include_dependencies: bool = True, +309 type_inference_mode: TypeInferenceMode | None = None, +310 ) -> list[KClaim]: +311 claim_index = self.get_claim_index( +312 spec_file=spec_file, +313 spec_module_name=spec_module_name, +314 include_dirs=include_dirs, +315 md_selector=md_selector, +316 type_inference_mode=type_inference_mode, +317 ) +318 +319 labels = claim_index.labels( +320 include=claim_labels, +321 exclude=exclude_claim_labels, +322 with_depends=include_dependencies, +323 ) +324 +325 return [claim_index[label] for label in labels]
+ +326 +327 @contextmanager +328 def _tmp_claim_definition( +329 self, +330 claim: KClaim, +331 claim_id: str, +332 lemmas: Iterable[KRule] = (), +333 ) -> Iterator[tuple[Path, str]]: +334 with self._temp_file(suffix='-spec.k') as ntf: +335 tmp_claim_file = Path(ntf.name) +336 tmp_module_name = tmp_claim_file.stem.removesuffix('-spec').rstrip('_').replace('_', '-').upper() + '-SPEC' +337 tmp_module_name = re.sub(r'-+', '-', tmp_module_name) +338 +339 sentences: list[KRuleLike] = [] +340 sentences += lemmas +341 sentences += [claim] +342 +343 claim_module = KFlatModule(tmp_module_name, sentences, imports=[KImport(self.main_module, True)]) +344 requires = [KRequire(str(self.main_file))] if self.main_file is not None else [] +345 claim_definition = KDefinition(tmp_module_name, [claim_module], requires=requires) +346 +347 ntf.write(gen_file_timestamp() + '\n') +348 ntf.write(self.pretty_print(claim_definition) + '\n\n') +349 ntf.flush() +350 +351 _LOGGER.info(f'Wrote claim file: {tmp_claim_file}') +352 yield tmp_claim_file, tmp_module_name
+ +353 +354 +355def _get_rule_log(debug_log_file: Path) -> list[list[tuple[str, bool, int]]]: +356 # rule_loc, is_success, ellapsed_time_since_start +357 def _get_rule_line(_line: str) -> tuple[str, bool, int] | None: +358 if _line.startswith('kore-exec: ['): +359 time = int(_line.split('[')[1].split(']')[0]) +360 if _line.find('(DebugTransition): after apply axioms: ') > 0: +361 rule_name = ':'.join(_line.split(':')[-4:]).strip() +362 return (rule_name, True, time) +363 elif _line.find('(DebugAttemptedRewriteRules): ') > 0: +364 rule_name = ':'.join(_line.split(':')[-4:]).strip() +365 return (rule_name, False, time) +366 return None +367 +368 log_lines: list[tuple[str, bool, int]] = [] +369 with open(debug_log_file) as log_file: +370 for line in log_file.read().split('\n'): +371 if processed_line := _get_rule_line(line): +372 log_lines.append(processed_line) +373 +374 # rule_loc, is_success, time_delta +375 axioms: list[list[tuple[str, bool, int]]] = [[]] +376 just_applied = True +377 prev_time = 0 +378 for rule_name, is_application, rule_time in log_lines: +379 rtime = rule_time - prev_time +380 prev_time = rule_time +381 if not is_application: +382 if just_applied: +383 axioms.append([]) +384 just_applied = False +385 else: +386 just_applied = True +387 axioms[-1].append((rule_name, is_application, rtime)) +388 +389 if len(axioms[-1]) == 0: +390 axioms.pop(-1) +391 +392 return axioms +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/ktool/krun.html b/pyk/_modules/pyk/ktool/krun.html new file mode 100644 index 00000000000..4f46a878cdf --- /dev/null +++ b/pyk/_modules/pyk/ktool/krun.html @@ -0,0 +1,581 @@ + + + + + + + + pyk.ktool.krun — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.ktool.krun

+  1from __future__ import annotations
+  2
+  3import logging
+  4from enum import Enum
+  5from os import execvp
+  6from pathlib import Path
+  7from shlex import join, split
+  8from subprocess import PIPE, CalledProcessError, run
+  9from sys import stderr, stdout
+ 10from time import time
+ 11from typing import TYPE_CHECKING
+ 12
+ 13from ..cli.utils import check_dir_path, check_file_path
+ 14from ..kore.parser import KoreParser
+ 15from ..kore.tools import PrintOutput, kore_print
+ 16from ..utils import run_process, run_process_2
+ 17from .kprint import KPrint
+ 18
+ 19if TYPE_CHECKING:
+ 20    from collections.abc import Callable, Iterable, Mapping
+ 21    from logging import Logger
+ 22    from subprocess import CompletedProcess
+ 23    from typing import Final
+ 24
+ 25    from ..kast.inner import KInner
+ 26    from ..kast.outer import KFlatModule
+ 27    from ..kast.pretty import SymbolTable
+ 28    from ..kore.syntax import Pattern
+ 29    from ..utils import BugReport
+ 30
+ 31_LOGGER: Final = logging.getLogger(__name__)
+ 32
+ 33
+
+[docs] + 34class KRunOutput(Enum): + 35 PRETTY = 'pretty' + 36 PROGRAM = 'program' + 37 KAST = 'kast' + 38 BINARY = 'binary' + 39 JSON = 'json' + 40 LATEX = 'latex' + 41 KORE = 'kore' + 42 NONE = 'none'
+ + 43 + 44 +
+[docs] + 45class KRun(KPrint): + 46 command: str + 47 + 48 def __init__( + 49 self, + 50 definition_dir: Path, + 51 use_directory: Path | None = None, + 52 command: str = 'krun', + 53 bug_report: BugReport | None = None, + 54 extra_unparsing_modules: Iterable[KFlatModule] = (), + 55 patch_symbol_table: Callable[[SymbolTable], None] | None = None, + 56 ) -> None: + 57 super().__init__( + 58 definition_dir, + 59 use_directory=use_directory, + 60 bug_report=bug_report, + 61 extra_unparsing_modules=extra_unparsing_modules, + 62 patch_symbol_table=patch_symbol_table, + 63 ) + 64 self.command = command + 65 +
+[docs] + 66 def run_process( + 67 self, + 68 pgm: Pattern, + 69 *, + 70 cmap: Mapping[str, str] | None = None, + 71 pmap: Mapping[str, str] | None = None, + 72 term: bool = False, + 73 depth: int | None = None, + 74 expand_macros: bool = True, + 75 search_final: bool = False, + 76 no_pattern: bool = False, + 77 output: KRunOutput | None = KRunOutput.PRETTY, + 78 pipe_stderr: bool = True, + 79 bug_report: BugReport | None = None, + 80 debugger: bool = False, + 81 ) -> CompletedProcess: + 82 with self._temp_file() as ntf: + 83 pgm.write(ntf) + 84 ntf.flush() + 85 + 86 return _krun( + 87 command=self.command, + 88 input_file=Path(ntf.name), + 89 definition_dir=self.definition_dir, + 90 output=KRunOutput.KORE, + 91 depth=depth, + 92 parser='cat', + 93 cmap=cmap, + 94 pmap=pmap, + 95 term=term, + 96 temp_dir=self.use_directory, + 97 no_expand_macros=not expand_macros, + 98 search_final=search_final, + 99 no_pattern=no_pattern, +100 bug_report=self._bug_report, +101 check=False, +102 pipe_stderr=pipe_stderr, +103 debugger=debugger, +104 )
+ +105 +
+[docs] +106 def run_proof_hint( +107 self, +108 pgm: Pattern, +109 *, +110 cmap: Mapping[str, str] | None = None, +111 pmap: Mapping[str, str] | None = None, +112 output: KRunOutput | None = None, +113 parser: str | None = None, +114 term: bool = False, +115 temp_dir: Path | None = None, +116 depth: int | None = None, +117 expand_macros: bool = True, +118 search_final: bool = False, +119 no_pattern: bool = False, +120 check: bool = False, +121 pipe_stderr: bool = True, +122 debugger: bool = False, +123 proof_hint: bool = False, +124 ) -> bytes: +125 with self._temp_file() as ntf: +126 pgm.write(ntf) +127 ntf.flush() +128 +129 args = _build_arg_list( +130 command='krun', +131 input_file=Path(ntf.name), +132 definition_dir=self.definition_dir, +133 output=output, +134 parser=parser, +135 depth=depth, +136 pmap=pmap, +137 cmap=cmap, +138 term=term, +139 temp_dir=temp_dir, +140 no_expand_macros=not expand_macros, +141 search_final=search_final, +142 no_pattern=no_pattern, +143 debugger=debugger, +144 proof_hint=proof_hint, +145 ) +146 +147 hints_bytes = self.__run_proof_hint_process( +148 args=args, check=check, pipe_stderr=pipe_stderr, logger=_LOGGER, exec_process=debugger +149 ) +150 +151 return hints_bytes.stdout
+ +152 +153 def __run_proof_hint_process( +154 self, +155 args: str | Iterable[str], +156 *, +157 check: bool = True, +158 input: str | None = None, +159 pipe_stdout: bool = True, +160 pipe_stderr: bool = False, +161 cwd: str | Path | None = None, +162 logger: Logger | None = None, +163 exec_process: bool = False, +164 ) -> CompletedProcess: +165 +166 if cwd is not None: +167 cwd = Path(cwd) +168 check_dir_path(cwd) +169 +170 if type(args) is str: +171 command = args +172 else: +173 args = tuple(args) +174 command = join(args) +175 +176 if not logger: +177 logger = _LOGGER +178 +179 proc_stdout = PIPE if pipe_stdout else None +180 proc_stderr = PIPE if pipe_stderr else None +181 +182 logger.info(f'Running: {command}') +183 +184 if exec_process: +185 stdout.flush() +186 stderr.flush() +187 if type(args) is str: +188 args = split(args) +189 argslist = list(args) +190 execvp(argslist[0], argslist) +191 +192 start_time = time() +193 +194 res = run(args, input=input, cwd=cwd, stdout=proc_stdout, stderr=proc_stderr, text=False) +195 +196 delta_time = time() - start_time +197 logger.info(f'Completed in {delta_time:.3f}s with status {res.returncode}: {command}') +198 +199 if check: +200 res.check_returncode() +201 +202 return res +203 +
+[docs] +204 def run( +205 self, +206 pgm: Pattern, +207 *, +208 cmap: Mapping[str, str] | None = None, +209 pmap: Mapping[str, str] | None = None, +210 term: bool = False, +211 depth: int | None = None, +212 expand_macros: bool = True, +213 search_final: bool = False, +214 no_pattern: bool = False, +215 output: KRunOutput | None = KRunOutput.PRETTY, +216 check: bool = False, +217 pipe_stderr: bool = True, +218 bug_report: BugReport | None = None, +219 debugger: bool = False, +220 ) -> None: +221 result = self.run_process( +222 pgm, +223 cmap=cmap, +224 pmap=pmap, +225 term=term, +226 depth=depth, +227 expand_macros=expand_macros, +228 search_final=search_final, +229 no_pattern=no_pattern, +230 output=output, +231 pipe_stderr=pipe_stderr, +232 bug_report=bug_report, +233 debugger=debugger, +234 ) +235 +236 if output != KRunOutput.NONE: +237 output_kore = KoreParser(result.stdout).pattern() +238 match output: +239 case KRunOutput.JSON: +240 print(self.kore_to_kast(output_kore).to_json()) +241 case KRunOutput.KORE: +242 print(output_kore.text) +243 case KRunOutput.PRETTY | KRunOutput.PROGRAM | KRunOutput.KAST | KRunOutput.BINARY | KRunOutput.LATEX: +244 print(kore_print(output_kore, definition_dir=self.definition_dir, output=PrintOutput(output.value))) +245 case KRunOutput.NONE: +246 raise AssertionError() +247 +248 if check: +249 result.check_returncode()
+ +250 +
+[docs] +251 def run_pattern( +252 self, +253 pattern: Pattern, +254 *, +255 depth: int | None = None, +256 expand_macros: bool = False, +257 search_final: bool = False, +258 no_pattern: bool = False, +259 pipe_stderr: bool = True, +260 check: bool = False, +261 bug_report: BugReport | None = None, +262 debugger: bool = False, +263 ) -> Pattern: +264 proc_res = self.run_process( +265 pattern, +266 depth=depth, +267 term=True, +268 expand_macros=expand_macros, +269 search_final=search_final, +270 no_pattern=no_pattern, +271 output=KRunOutput.NONE, +272 pipe_stderr=pipe_stderr, +273 bug_report=bug_report, +274 debugger=debugger, +275 ) +276 +277 if check: +278 proc_res.check_returncode() +279 +280 parser = KoreParser(proc_res.stdout) +281 res = parser.pattern() +282 assert parser.eof +283 return res
+ +284 +
+[docs] +285 def krun(self, input_file: Path) -> tuple[int, KInner]: +286 result = _krun(input_file=input_file, definition_dir=self.definition_dir, output=KRunOutput.KORE) +287 kore = KoreParser(result.stdout).pattern() +288 kast = self.kore_to_kast(kore) +289 return (result.returncode, kast)
+
+ +290 +291 +292def _krun( +293 command: str = 'krun', +294 *, +295 input_file: Path | None = None, +296 definition_dir: Path | None = None, +297 output: KRunOutput | None = None, +298 parser: str | None = None, +299 depth: int | None = None, +300 cmap: Mapping[str, str] | None = None, +301 pmap: Mapping[str, str] | None = None, +302 term: bool = False, +303 temp_dir: Path | None = None, +304 no_expand_macros: bool = False, +305 search_final: bool = False, +306 no_pattern: bool = False, +307 # --- +308 check: bool = True, +309 pipe_stderr: bool = True, +310 logger: Logger | None = None, +311 bug_report: BugReport | None = None, +312 debugger: bool = False, +313) -> CompletedProcess: +314 if input_file: +315 check_file_path(input_file) +316 +317 if definition_dir: +318 check_dir_path(definition_dir) +319 +320 if depth and depth < 0: +321 raise ValueError(f'Expected non-negative depth, got: {depth}') +322 +323 if term and (cmap is not None or pmap is not None): +324 raise ValueError('Cannot supply both term and cmap/pmap') +325 +326 args = _build_arg_list( +327 command=command, +328 input_file=input_file, +329 definition_dir=definition_dir, +330 output=output, +331 parser=parser, +332 depth=depth, +333 pmap=pmap, +334 cmap=cmap, +335 term=term, +336 temp_dir=temp_dir, +337 no_expand_macros=no_expand_macros, +338 search_final=search_final, +339 no_pattern=no_pattern, +340 debugger=debugger, +341 proof_hint=False, +342 ) +343 +344 if bug_report is not None: +345 if input_file is not None: +346 new_input_file = Path(f'krun_inputs/{input_file}') +347 bug_report.add_file(input_file, new_input_file) +348 bug_report.add_command([a if a != str(input_file) else str(new_input_file) for a in args]) +349 else: +350 bug_report.add_command(args) +351 +352 return run_process(args, check=check, pipe_stderr=pipe_stderr, logger=logger or _LOGGER, exec_process=debugger) +353 +354 +355def _build_arg_list( +356 *, +357 command: str, +358 input_file: Path | None, +359 definition_dir: Path | None, +360 output: KRunOutput | None, +361 parser: str | None, +362 depth: int | None, +363 pmap: Mapping[str, str] | None, +364 cmap: Mapping[str, str] | None, +365 term: bool, +366 temp_dir: Path | None, +367 no_expand_macros: bool, +368 search_final: bool, +369 no_pattern: bool, +370 debugger: bool, +371 proof_hint: bool, +372) -> list[str]: +373 args = [command] +374 if input_file: +375 args += [str(input_file)] +376 if definition_dir: +377 args += ['--definition', str(definition_dir)] +378 if output: +379 args += ['--output', output.value] +380 if parser: +381 args += ['--parser', parser] +382 if depth is not None: +383 args += ['--depth', str(depth)] +384 for name, value in (pmap or {}).items(): +385 args += [f'-p{name}={value}'] +386 for name, value in (cmap or {}).items(): +387 args += [f'-c{name}={value}'] +388 if term: +389 args += ['--term'] +390 if temp_dir: +391 args += ['--temp-dir', str(temp_dir)] +392 if no_expand_macros: +393 args += ['--no-expand-macros'] +394 if search_final: +395 args += ['--search-final'] +396 if no_pattern: +397 args += ['--no-pattern'] +398 if debugger: +399 args += ['--debugger'] +400 if proof_hint: +401 args += ['--proof-hint'] +402 return args +403 +404 +
+[docs] +405def llvm_interpret( +406 definition_dir: str | Path, pattern: Pattern, *, depth: int | None = None, check: bool = True +407) -> Pattern: +408 """Execute the `interpreter` binary generated by the LLVM Backend. +409 +410 Args: +411 definition_dir: Path to the kompiled definition directory. +412 pattern: KORE pattern to start rewriting from. +413 depth: Maximal number of rewrite steps to take. +414 +415 Returns: +416 The pattern resulting from the rewrites. +417 +418 Raises: +419 RuntimeError: If ``check`` and the interpreter fails. +420 """ +421 try: +422 res = llvm_interpret_raw(definition_dir, pattern.text, depth=depth, check=check) +423 except CalledProcessError as err: +424 raise RuntimeError(f'Interpreter failed with status {err.returncode}: {err.stderr}') from err +425 +426 return KoreParser(res.stdout).pattern()
+ +427 +428 +
+[docs] +429def llvm_interpret_raw( +430 definition_dir: str | Path, kore: str, *, depth: int | None = None, check: bool = True +431) -> CompletedProcess: +432 """Execute the `interpreter` binary generated by the LLVM Backend, with no processing of input/output. +433 +434 Args: +435 definition_dir: Path to the kompiled definition directory. +436 pattern: KORE string to start rewriting from. +437 depth: Maximal number of rewrite steps to take. +438 check: check the return code of the CompletedProcess +439 +440 Returns: +441 The CompletedProcess of the interpreter. +442 +443 Raises: +444 CalledProcessError: If ``check`` and the interpreter fails. +445 """ +446 definition_dir = Path(definition_dir) +447 interpreter_file = definition_dir / 'interpreter' +448 check_file_path(interpreter_file) +449 +450 depth = depth if depth is not None else -1 +451 args = [str(interpreter_file), '/dev/stdin', str(depth), '/dev/stdout'] +452 +453 return run_process_2(args, input=kore, logger=_LOGGER, loglevel=logging.DEBUG, check=check)
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/ktool/prove_rpc.html b/pyk/_modules/pyk/ktool/prove_rpc.html new file mode 100644 index 00000000000..339f94bdb95 --- /dev/null +++ b/pyk/_modules/pyk/ktool/prove_rpc.html @@ -0,0 +1,209 @@ + + + + + + + + pyk.ktool.prove_rpc — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.ktool.prove_rpc

+  1from __future__ import annotations
+  2
+  3import logging
+  4from typing import TYPE_CHECKING
+  5
+  6from ..kast.manip import extract_lhs
+  7from ..kast.outer import KApply
+  8from ..proof import APRProof, APRProver, EqualityProof, ImpliesProver
+  9
+ 10if TYPE_CHECKING:
+ 11    from collections.abc import Callable
+ 12    from pathlib import Path
+ 13    from typing import ContextManager, Final
+ 14
+ 15    from ..cli.pyk import ProveOptions
+ 16    from ..kast.outer import KClaim
+ 17    from ..kcfg import KCFGExplore
+ 18    from ..proof import Proof, Prover
+ 19    from .kprove import KProve
+ 20
+ 21_LOGGER: Final = logging.getLogger(__name__)
+ 22
+ 23
+
+[docs] + 24class ProveRpc: + 25 _kprove: KProve + 26 _explore_context: Callable[[], ContextManager[KCFGExplore]] + 27 + 28 def __init__( + 29 self, + 30 kprove: KProve, + 31 explore_context: Callable[[], ContextManager[KCFGExplore]], + 32 ): + 33 self._kprove = kprove + 34 self._explore_context = explore_context + 35 +
+[docs] + 36 def prove_rpc(self, options: ProveOptions) -> list[Proof]: + 37 all_claims = self._kprove.get_claims( + 38 options.spec_file, + 39 spec_module_name=options.spec_module, + 40 claim_labels=options.claim_labels, + 41 exclude_claim_labels=options.exclude_claim_labels, + 42 type_inference_mode=options.type_inference_mode, + 43 ) + 44 + 45 if all_claims is None: + 46 raise ValueError(f'No claims found in file: {options.spec_file}') + 47 + 48 return [ + 49 self._prove_claim_rpc( + 50 claim, + 51 assume_defined=options.assume_defined, + 52 max_depth=options.max_depth, + 53 save_directory=options.save_directory, + 54 max_iterations=options.max_iterations, + 55 ) + 56 for claim in all_claims + 57 ]
+ + 58 + 59 def _prove_claim_rpc( + 60 self, + 61 claim: KClaim, + 62 assume_defined: bool, + 63 max_depth: int | None = None, + 64 save_directory: Path | None = None, + 65 max_iterations: int | None = None, + 66 ) -> Proof: + 67 definition = self._kprove.definition + 68 + 69 proof: Proof + 70 prover: Prover + 71 lhs_top = extract_lhs(claim.body) + 72 is_functional_claim = type(lhs_top) is KApply and definition.symbols[lhs_top.label.name] in definition.functions + 73 + 74 if is_functional_claim: + 75 proof = EqualityProof.from_claim(claim, definition, proof_dir=save_directory) + 76 if save_directory is not None and EqualityProof.proof_data_exists(proof.id, save_directory): + 77 _LOGGER.info(f'Reloading from disk {proof.id}: {save_directory}') + 78 proof = EqualityProof.read_proof_data(save_directory, proof.id) + 79 + 80 else: + 81 proof = APRProof.from_claim(definition, claim, {}, proof_dir=save_directory) + 82 if save_directory is not None and APRProof.proof_data_exists(proof.id, save_directory): + 83 _LOGGER.info(f'Reloading from disk {proof.id}: {save_directory}') + 84 proof = APRProof.read_proof_data(save_directory, proof.id) + 85 + 86 if not proof.passed and (max_iterations is None or max_iterations > 0): + 87 with self._explore_context() as kcfg_explore: + 88 if is_functional_claim: + 89 assert type(proof) is EqualityProof + 90 prover = ImpliesProver(proof, kcfg_explore, assume_defined=assume_defined) + 91 else: + 92 assert type(proof) is APRProof + 93 prover = APRProver(kcfg_explore, execute_depth=max_depth, assume_defined=assume_defined) + 94 prover.advance_proof(proof, max_iterations=max_iterations) + 95 + 96 if proof.passed: + 97 _LOGGER.info(f'Proof passed: {proof.id}') + 98 elif proof.failed: + 99 _LOGGER.info(f'Proof failed: {proof.id}') +100 else: +101 _LOGGER.info(f'Proof pending: {proof.id}') +102 return proof
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/ktool/utils.html b/pyk/_modules/pyk/ktool/utils.html new file mode 100644 index 00000000000..9019b77f032 --- /dev/null +++ b/pyk/_modules/pyk/ktool/utils.html @@ -0,0 +1,154 @@ + + + + + + + + pyk.ktool.utils — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.ktool.utils

+ 1from __future__ import annotations
+ 2
+ 3from dataclasses import dataclass
+ 4from pathlib import Path
+ 5from typing import TYPE_CHECKING, final
+ 6
+ 7from ..utils import run_process_2
+ 8
+ 9if TYPE_CHECKING:
+10    from typing import Final
+11
+12
+
+[docs] +13@final +14@dataclass(frozen=True) +15class KDistribution: +16 """Represent the path to the K distribution. +17 +18 Attributes: +19 path: Path to the K distribution. +20 """ +21 +22 path: Path +23 +24 @property +25 def builtin_dir(self) -> Path: +26 """The path to the `builtin` directory.""" +27 return self.path / 'include/kframework/builtin' +28 +
+[docs] +29 @staticmethod +30 def create() -> KDistribution | None: +31 """Instantiate the class based on the path to the `kompile` binary.""" +32 kompile_bin = KDistribution._which_kompile() +33 if kompile_bin is None: +34 return None +35 return KDistribution(kompile_bin.parents[1])
+ +36 +37 @staticmethod +38 def _which_kompile() -> Path | None: +39 proc_res = run_process_2(['which', 'kompile']) +40 if proc_res.returncode: +41 return None +42 res = Path(proc_res.stdout.rstrip()) +43 assert res.is_file() +44 return res
+ +45 +46 +47K_DISTRIBUTION: Final = KDistribution.create() +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/prelude/bytes.html b/pyk/_modules/pyk/prelude/bytes.html new file mode 100644 index 00000000000..3b069fdaa88 --- /dev/null +++ b/pyk/_modules/pyk/prelude/bytes.html @@ -0,0 +1,139 @@ + + + + + + + + pyk.prelude.bytes — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.prelude.bytes

+ 1from typing import Final
+ 2
+ 3from ..dequote import bytes_decode, bytes_encode, dequote_bytes, enquote_bytes
+ 4from ..kast.inner import KSort, KToken
+ 5
+ 6BYTES: Final = KSort('Bytes')
+ 7
+ 8
+
+[docs] + 9def bytesToken_from_str(pretty: str) -> KToken: # noqa: N802 +10 return KToken(f'b"{enquote_bytes(pretty)}"', BYTES)
+ +11 +12 +
+[docs] +13def bytesToken(b: bytes) -> KToken: # noqa: N802 +14 return bytesToken_from_str(bytes_decode(b))
+ +15 +16 +
+[docs] +17def pretty_bytes_str(token: KToken) -> str: +18 if token.sort != BYTES: +19 raise ValueError(f'Expected Bytes token, got: {token}') +20 assert token.token[0:2] == 'b"' +21 assert token.token[-1] == '"' +22 return dequote_bytes(token.token[2:-1])
+ +23 +24 +
+[docs] +25def pretty_bytes(token: KToken) -> bytes: +26 return bytes_encode(pretty_bytes_str(token))
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/prelude/collections.html b/pyk/_modules/pyk/prelude/collections.html new file mode 100644 index 00000000000..fddd6455634 --- /dev/null +++ b/pyk/_modules/pyk/prelude/collections.html @@ -0,0 +1,204 @@ + + + + + + + + pyk.prelude.collections — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.prelude.collections

+ 1from __future__ import annotations
+ 2
+ 3from typing import TYPE_CHECKING
+ 4
+ 5from ..kast.inner import KApply, KLabel, KSort, build_assoc
+ 6
+ 7if TYPE_CHECKING:
+ 8    from collections.abc import Iterable
+ 9    from typing import Final
+10
+11    from ..kast import KInner
+12
+13SET: Final = KSort('Set')
+14LIST: Final = KSort('List')
+15MAP: Final = KSort('Map')
+16RANGEMAP: Final = KSort('RangeMap')
+17BAG: Final = KSort('Bag')
+18
+19
+
+[docs] +20def set_empty() -> KInner: +21 return KApply('.Set')
+ +22 +23 +
+[docs] +24def set_item(k: KInner) -> KInner: +25 return KApply('SetItem', [k])
+ +26 +27 +
+[docs] +28def set_of(ks: Iterable[KInner]) -> KInner: +29 return build_assoc(set_empty(), KLabel('_Set_'), map(set_item, ks))
+ +30 +31 +
+[docs] +32def list_empty() -> KInner: +33 return KApply('.List')
+ +34 +35 +
+[docs] +36def list_item(k: KInner) -> KInner: +37 return KApply('ListItem', [k])
+ +38 +39 +
+[docs] +40def list_of(ks: Iterable[KInner]) -> KInner: +41 return build_assoc(list_empty(), KLabel('_List_'), map(list_item, ks))
+ +42 +43 +
+[docs] +44def map_empty() -> KInner: +45 return KApply('.Map')
+ +46 +47 +
+[docs] +48def map_item(k: KInner, v: KInner) -> KInner: +49 return KApply('_|->_', [k, v])
+ +50 +51 +
+[docs] +52def map_of(ks: dict[KInner, KInner] | Iterable[tuple[KInner, KInner]]) -> KInner: +53 ks = dict(ks) +54 return build_assoc(map_empty(), KLabel('_Map_'), (map_item(k, v) for k, v in ks.items()))
+ +55 +56 +
+[docs] +57def rangemap_empty() -> KInner: +58 return KApply('.RangeMap')
+ +59 +60 +
+[docs] +61def rangemap_item(k: tuple[KInner, KInner], v: KInner) -> KInner: +62 return KApply('_r|->_', (KApply('RangeMap:Range', k), v))
+ +63 +64 +
+[docs] +65def rangemap_of(ks: dict[tuple[KInner, KInner], KInner] | Iterable[tuple[tuple[KInner, KInner], KInner]]) -> KInner: +66 ks_dict: dict[tuple[KInner, KInner], KInner] = dict(ks) +67 return build_assoc(rangemap_empty(), KLabel('_RangeMap_'), (rangemap_item(k, v) for k, v in ks_dict.items()))
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/prelude/k.html b/pyk/_modules/pyk/prelude/k.html new file mode 100644 index 00000000000..b0b77ae6805 --- /dev/null +++ b/pyk/_modules/pyk/prelude/k.html @@ -0,0 +1,125 @@ + + + + + + + + pyk.prelude.k — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.prelude.k

+ 1from __future__ import annotations
+ 2
+ 3from typing import TYPE_CHECKING
+ 4
+ 5from ..kast.inner import KApply, KLabel, KSort, KToken
+ 6
+ 7if TYPE_CHECKING:
+ 8    from typing import Final
+ 9
+10    from ..kast import KInner
+11
+12
+13K: Final = KSort('K')
+14K_ITEM: Final = KSort('KItem')
+15GENERATED_TOP_CELL: Final = KSort('GeneratedTopCell')
+16
+17DOTS: Final = KToken('...', K)
+18
+19
+
+[docs] +20def inj(from_sort: KSort, to_sort: KSort, term: KInner) -> KInner: +21 return KApply(KLabel('inj', (from_sort, to_sort)), (term,))
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/prelude/kbool.html b/pyk/_modules/pyk/prelude/kbool.html new file mode 100644 index 00000000000..123402919b9 --- /dev/null +++ b/pyk/_modules/pyk/prelude/kbool.html @@ -0,0 +1,152 @@ + + + + + + + + pyk.prelude.kbool — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.prelude.kbool

+ 1from __future__ import annotations
+ 2
+ 3from typing import TYPE_CHECKING
+ 4
+ 5from ..kast.inner import KApply, KLabel, KSort, KToken, build_assoc
+ 6from ..utils import unique
+ 7
+ 8if TYPE_CHECKING:
+ 9    from collections.abc import Iterable
+10    from typing import Final
+11
+12    from ..kast import KInner
+13
+14BOOL: Final = KSort('Bool')
+15TRUE: Final = KToken('true', BOOL)
+16FALSE: Final = KToken('false', BOOL)
+17
+18
+
+[docs] +19def boolToken(b: bool) -> KToken: # noqa: N802 +20 return TRUE if b else FALSE
+ +21 +22 +
+[docs] +23def andBool(items: Iterable[KInner]) -> KInner: # noqa: N802 +24 return build_assoc(TRUE, KLabel('_andBool_'), unique(items))
+ +25 +26 +
+[docs] +27def orBool(items: Iterable[KInner]) -> KInner: # noqa: N802 +28 return build_assoc(FALSE, KLabel('_orBool_'), unique(items))
+ +29 +30 +
+[docs] +31def notBool(item: KInner) -> KApply: # noqa: N802 +32 return KApply(KLabel('notBool_'), [item])
+ +33 +34 +
+[docs] +35def impliesBool(antecedent: KInner, consequent: KInner) -> KApply: # noqa: N802 +36 return KApply(KLabel('_impliesBool_'), [antecedent, consequent])
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/prelude/kint.html b/pyk/_modules/pyk/prelude/kint.html new file mode 100644 index 00000000000..8e32a78f97a --- /dev/null +++ b/pyk/_modules/pyk/prelude/kint.html @@ -0,0 +1,511 @@ + + + + + + + + pyk.prelude.kint — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.prelude.kint

+  1from __future__ import annotations
+  2
+  3from typing import TYPE_CHECKING
+  4
+  5from ..kast.inner import KApply, KSort, KToken
+  6
+  7if TYPE_CHECKING:
+  8    from typing import Final
+  9
+ 10    from ..kast import KInner
+ 11
+ 12INT: Final = KSort('Int')
+ 13
+ 14
+
+[docs] + 15def intToken(i: int) -> KToken: # noqa: N802 + 16 r"""Instantiate the KAST term ``#token(i, "Int")``. + 17 + 18 Args: + 19 i: The integer literal. + 20 + 21 Returns: + 22 The KAST term ``#token(i, "Int")``. + 23 """ + 24 return KToken(str(i), INT)
+ + 25 + 26 +
+[docs] + 27def ltInt(i1: KInner, i2: KInner) -> KApply: # noqa: N802 + 28 r"""Instantiate the KAST term ```_<Int_`(i1, i2)``. + 29 + 30 Args: + 31 i1: The left operand. + 32 i2: The right operand. + 33 + 34 Returns: + 35 The KAST term ```_<Int_`(i1, i2)``. + 36 """ + 37 return KApply('_<Int_', i1, i2)
+ + 38 + 39 +
+[docs] + 40def leInt(i1: KInner, i2: KInner) -> KApply: # noqa: N802 + 41 r"""Instantiate the KAST term ```_<=Int_`(i1, i2)``. + 42 + 43 Args: + 44 i1: The left operand. + 45 i2: The right operand. + 46 + 47 Returns: + 48 The KAST term ```_<=Int_`(i1, i2)``. + 49 """ + 50 return KApply('_<=Int_', i1, i2)
+ + 51 + 52 +
+[docs] + 53def gtInt(i1: KInner, i2: KInner) -> KApply: # noqa: N802 + 54 r"""Instantiate the KAST term ```_>Int_`(i1, i2)``. + 55 + 56 Args: + 57 i1: The left operand. + 58 i2: The right operand. + 59 + 60 Returns: + 61 The KAST term ```_>Int_`(i1, i2)``. + 62 """ + 63 return KApply('_>Int_', i1, i2)
+ + 64 + 65 +
+[docs] + 66def geInt(i1: KInner, i2: KInner) -> KApply: # noqa: N802 + 67 r"""Instantiate the KAST term ```_>=Int_`(i1, i2)``. + 68 + 69 Args: + 70 i1: The left operand. + 71 i2: The right operand. + 72 + 73 Returns: + 74 The KAST term ```_>=Int_`(i1, i2)``. + 75 """ + 76 return KApply('_>=Int_', i1, i2)
+ + 77 + 78 +
+[docs] + 79def eqInt(i1: KInner, i2: KInner) -> KApply: # noqa: N802 + 80 r"""Instantiate the KAST term ```_==Int_`(i1, i2)``. + 81 + 82 Args: + 83 i1: The left operand. + 84 i2: The right operand. + 85 + 86 Returns: + 87 The KAST term ```_==Int_`(i1, i2)``. + 88 """ + 89 return KApply('_==Int_', i1, i2)
+ + 90 + 91 +
+[docs] + 92def neqInt(i1: KInner, i2: KInner) -> KApply: # noqa: N802 + 93 r"""Instantiate the KAST term ```_=/=Int_`(i1, i2)``. + 94 + 95 Args: + 96 i1: The left operand. + 97 i2: The right operand. + 98 + 99 Returns: +100 The KAST term ```_=/=Int_`(i1, i2)``. +101 """ +102 return KApply('_=/=Int_', i1, i2)
+ +103 +104 +
+[docs] +105def notInt(i: KInner) -> KApply: # noqa: N802 +106 r"""Instantiate the KAST term ```~Int_`(i)``. +107 +108 Args: +109 i: The integer operand. +110 +111 Returns: +112 The KAST term ```Int_`(i)``. +113 """ +114 return KApply('~Int_', i)
+ +115 +116 +
+[docs] +117def expInt(i1: KInner, i2: KInner) -> KApply: # noqa: N802 +118 r"""Instantiate the KAST term ```_^Int_`(i1, i2)``. +119 +120 Args: +121 i1: The base. +122 i2: The exponent. +123 +124 Returns: +125 The KAST term ```_^Int_`(i1, i2)``. +126 """ +127 return KApply('_^Int_', i1, i2)
+ +128 +129 +
+[docs] +130def expModInt(i1: KInner, i2: KInner, i3: KInner) -> KApply: # noqa: N802 +131 r"""Instantiate the KAST term ```_^%Int__`(i1, i2, i3)``. +132 +133 Args: +134 i1: The dividend. +135 i2: The divisior. +136 i3: The modulus. +137 +138 Returns: +139 The KAST term ```_^%Int__`(i1, i2, i3)``. +140 """ +141 return KApply('_^%Int__', i1, i2, i3)
+ +142 +143 +
+[docs] +144def mulInt(i1: KInner, i2: KInner) -> KApply: # noqa: N802 +145 r"""Instantiate the KAST term ```_*Int_`(i1, i2)``. +146 +147 Args: +148 i1: The left operand. +149 i2: The right operand. +150 +151 Returns: +152 The KAST term ```_*Int_`(i1, i2)``. +153 """ +154 return KApply('_*Int_', i1, i2)
+ +155 +156 +
+[docs] +157def divInt(i1: KInner, i2: KInner) -> KApply: # noqa: N802 +158 r"""Instantiate the KAST term ```_/Int_`(i1, i2)``. +159 +160 Args: +161 i1: The dividend. +162 i2: The divisor. +163 +164 Returns: +165 The KAST term ```_/Int_`(i1, i2)``. +166 """ +167 return KApply('_/Int_', i1, i2)
+ +168 +169 +
+[docs] +170def modInt(i1: KInner, i2: KInner) -> KApply: # noqa: N802 +171 r"""Instantiate the KAST term ```_%Int_`(i1, i2)``. +172 +173 Args: +174 i1: The dividend. +175 i2: The divisor. +176 +177 Returns: +178 The KAST term ```_%Int_`(i1, i2)``. +179 """ +180 return KApply('_%Int_', i1, i2)
+ +181 +182 +
+[docs] +183def euclidDivInt(i1: KInner, i2: KInner) -> KApply: # noqa: N802 +184 r"""Instantiate the KAST term ```_divInt_`(i1, i2)``. +185 +186 Args: +187 i1: The dividend. +188 i2: The divisor. +189 +190 Returns: +191 The KAST term ```_divInt_`(i1, i2)``. +192 """ +193 return KApply('_divInt_', i1, i2)
+ +194 +195 +
+[docs] +196def euclidModInt(i1: KInner, i2: KInner) -> KApply: # noqa: N802 +197 r"""Instantiate the KAST term ```_modInt_`(i1, i2)``. +198 +199 Args: +200 i1: The dividend. +201 i2: The divisor. +202 +203 Returns: +204 The KAST term ```_modInt_`(i1, i2)``. +205 """ +206 return KApply('_modInt_', i1, i2)
+ +207 +208 +
+[docs] +209def addInt(i1: KInner, i2: KInner) -> KApply: # noqa: N802 +210 r"""Instantiate the KAST term ```_+Int_`(i1, i2)``. +211 +212 Args: +213 i1: The left operand. +214 i2: The right operand. +215 +216 Returns: +217 The KAST term ```_+Int_`(i1, i2)``. +218 """ +219 return KApply('_+Int_', i1, i2)
+ +220 +221 +
+[docs] +222def subInt(i1: KInner, i2: KInner) -> KApply: # noqa: N802 +223 r"""Instantiate the KAST term ```_-Int_`(i1, i2)``. +224 +225 Args: +226 i1: The left operand. +227 i2: The right operand. +228 +229 Returns: +230 The KAST term ```_-Int_`(i1, i2)``. +231 """ +232 return KApply('_-Int_', i1, i2)
+ +233 +234 +
+[docs] +235def rshiftInt(i1: KInner, i2: KInner) -> KApply: # noqa: N802 +236 r"""Instantiate the KAST term ```_>>Int_`(i1, i2)``. +237 +238 Args: +239 i1: The left operand. +240 i2: The right operand. +241 +242 Returns: +243 The KAST term ```_>>Int_`(i1, i2)``. +244 """ +245 return KApply('_>>Int_', i1, i2)
+ +246 +247 +
+[docs] +248def lshiftInt(i1: KInner, i2: KInner) -> KApply: # noqa: N802 +249 r"""Instantiate the KAST term ```_<<Int_`(i1, i2)``. +250 +251 Args: +252 i1: The left operand. +253 i2: The right operand. +254 +255 Returns: +256 The KAST term ```_<<Int_`(i1, i2)``. +257 """ +258 return KApply('_<<Int_', i1, i2)
+ +259 +260 +
+[docs] +261def andInt(i1: KInner, i2: KInner) -> KApply: # noqa: N802 +262 r"""Instantiate the KAST term ```_&Int_`(i1, i2)``. +263 +264 Args: +265 i1: The left operand. +266 i2: The right operand. +267 +268 Returns: +269 The KAST term ```_&Int_`(i1, i2)``. +270 """ +271 return KApply('_&Int_', i1, i2)
+ +272 +273 +
+[docs] +274def xorInt(i1: KInner, i2: KInner) -> KApply: # noqa: N802 +275 r"""Instantiate the KAST term ```_xorInt_`(i1, i2)``. +276 +277 Args: +278 i1: The left operand. +279 i2: The right operand. +280 +281 Returns: +282 The KAST term ```_xorInt_`(i1, i2)``. +283 """ +284 return KApply('_xorInt_', i1, i2)
+ +285 +286 +
+[docs] +287def orInt(i1: KInner, i2: KInner) -> KApply: # noqa: N802 +288 r"""Instantiate the KAST term ```_|Int_`(i1, i2)``. +289 +290 Args: +291 i1: The left operand. +292 i2: The right operand. +293 +294 Returns: +295 The KAST term ```_|Int_`(i1, i2)``. +296 """ +297 return KApply('_|Int_', i1, i2)
+ +298 +299 +
+[docs] +300def minInt(i1: KInner, i2: KInner) -> KApply: # noqa: N802 +301 r"""Instantiate the KAST term ```minInt`(i1, i2)``. +302 +303 Args: +304 i1: The left operand. +305 i2: The right operand. +306 +307 Returns: +308 The KAST term ```minInt`(i1, i2)``. +309 """ +310 return KApply('minInt', i1, i2)
+ +311 +312 +
+[docs] +313def maxInt(i1: KInner, i2: KInner) -> KApply: # noqa: N802 +314 r"""Instantiate the KAST term ```maxInt`(i1, i2)``. +315 +316 Args: +317 i1: The left operand. +318 i2: The right operand. +319 +320 Returns: +321 The KAST term ```maxInt`(i1, i2)``. +322 """ +323 return KApply('maxInt', i1, i2)
+ +324 +325 +
+[docs] +326def absInt(i: KInner) -> KApply: # noqa: N802 +327 r"""Instantiate the KAST term ```absInt`(i)``. +328 +329 Args: +330 i: The integer operand. +331 +332 Returns: +333 The KAST term ```absInt`(i)``. +334 """ +335 return KApply('absInt', i)
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/prelude/ml.html b/pyk/_modules/pyk/prelude/ml.html new file mode 100644 index 00000000000..fb7276e69ed --- /dev/null +++ b/pyk/_modules/pyk/prelude/ml.html @@ -0,0 +1,250 @@ + + + + + + + + pyk.prelude.ml — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.prelude.ml

+  1from __future__ import annotations
+  2
+  3from typing import TYPE_CHECKING
+  4
+  5from pyk.utils import single
+  6
+  7from ..kast.inner import KApply, KLabel, build_assoc, flatten_label
+  8from .k import GENERATED_TOP_CELL, K_ITEM, K
+  9from .kbool import BOOL, FALSE, TRUE
+ 10
+ 11if TYPE_CHECKING:
+ 12    from collections.abc import Iterable
+ 13    from typing import Final
+ 14
+ 15    from ..kast import KInner
+ 16    from ..kast.inner import KSort, KVariable
+ 17
+ 18
+ 19ML_QUANTIFIERS: Final = {
+ 20    '#Exists',
+ 21    '#Forall',
+ 22}
+ 23
+ 24
+ 25def _is_top(term: KInner) -> bool:
+ 26    return isinstance(term, KApply) and term.label.name == '#Top'
+ 27
+ 28
+
+[docs] + 29def is_top(term: KInner, *, weak: bool = False) -> bool: + 30 if _is_top(term): + 31 return True + 32 if not weak: + 33 return False + 34 flat = flatten_label('#And', term) + 35 if len(flat) == 1: + 36 return is_top(single(flat)) + 37 return all(is_top(term, weak=True) for term in flat)
+ + 38 + 39 + 40def _is_bottom(term: KInner) -> bool: + 41 return isinstance(term, KApply) and term.label.name == '#Bottom' + 42 + 43 +
+[docs] + 44def is_bottom(term: KInner, *, weak: bool = False) -> bool: + 45 if _is_bottom(term): + 46 return True + 47 if not weak: + 48 return False + 49 flat = flatten_label('#And', term) + 50 if len(flat) == 1: + 51 return is_bottom(single(flat)) + 52 return any(is_bottom(term, weak=True) for term in flat)
+ + 53 + 54 +
+[docs] + 55def mlEquals( # noqa: N802 + 56 term1: KInner, + 57 term2: KInner, + 58 arg_sort: str | KSort = K, + 59 sort: str | KSort = GENERATED_TOP_CELL, + 60) -> KApply: + 61 return KLabel('#Equals', arg_sort, sort)(term1, term2)
+ + 62 + 63 +
+[docs] + 64def mlEqualsTrue(term: KInner, sort: str | KSort = GENERATED_TOP_CELL) -> KApply: # noqa: N802 + 65 return mlEquals(TRUE, term, arg_sort=BOOL, sort=sort)
+ + 66 + 67 +
+[docs] + 68def mlEqualsFalse(term: KInner, sort: str | KSort = GENERATED_TOP_CELL) -> KApply: # noqa: N802 + 69 return mlEquals(FALSE, term, arg_sort=BOOL, sort=sort)
+ + 70 + 71 +
+[docs] + 72def mlTop(sort: str | KSort = GENERATED_TOP_CELL) -> KApply: # noqa: N802 + 73 return KLabel('#Top', sort)()
+ + 74 + 75 +
+[docs] + 76def mlBottom(sort: str | KSort = GENERATED_TOP_CELL) -> KApply: # noqa: N802 + 77 return KLabel('#Bottom', sort)()
+ + 78 + 79 +
+[docs] + 80def mlNot(term: KInner, sort: str | KSort = GENERATED_TOP_CELL) -> KApply: # noqa: N802 + 81 return KLabel('#Not', sort)(term)
+ + 82 + 83 +
+[docs] + 84def mlAnd(conjuncts: Iterable[KInner], sort: str | KSort = GENERATED_TOP_CELL) -> KInner: # noqa: N802 + 85 return build_assoc(mlTop(sort), KLabel('#And', sort), filter(lambda x: not is_top(x), conjuncts))
+ + 86 + 87 +
+[docs] + 88def mlOr(disjuncts: Iterable[KInner], sort: str | KSort = GENERATED_TOP_CELL) -> KInner: # noqa: N802 + 89 return build_assoc(mlBottom(sort), KLabel('#Or', sort), filter(lambda x: not is_bottom(x), disjuncts))
+ + 90 + 91 +
+[docs] + 92def mlImplies(antecedent: KInner, consequent: KInner, sort: str | KSort = GENERATED_TOP_CELL) -> KApply: # noqa: N802 + 93 return KLabel('#Implies', sort)(antecedent, consequent)
+ + 94 + 95 +
+[docs] + 96def mlExists( # noqa: N802 + 97 var: KVariable, + 98 body: KInner, + 99 sort1: str | KSort = K_ITEM, +100 sort2: str | KSort = GENERATED_TOP_CELL, +101) -> KApply: +102 return KLabel('#Exists', sort1, sort2)(var, body)
+ +103 +104 +
+[docs] +105def mlCeil( # noqa: N802 +106 term: KInner, +107 arg_sort: str | KSort = GENERATED_TOP_CELL, +108 sort: str | KSort = GENERATED_TOP_CELL, +109) -> KApply: +110 return KLabel('#Ceil', arg_sort, sort)(term)
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/prelude/string.html b/pyk/_modules/pyk/prelude/string.html new file mode 100644 index 00000000000..cbecceafde4 --- /dev/null +++ b/pyk/_modules/pyk/prelude/string.html @@ -0,0 +1,124 @@ + + + + + + + + pyk.prelude.string — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.prelude.string

+ 1from typing import Final
+ 2
+ 3from ..dequote import dequote_string, enquote_string
+ 4from ..kast.inner import KSort, KToken
+ 5
+ 6STRING: Final = KSort('String')
+ 7
+ 8
+
+[docs] + 9def stringToken(pretty: str) -> KToken: # noqa: N802 +10 return KToken(f'"{enquote_string(pretty)}"', STRING)
+ +11 +12 +
+[docs] +13def pretty_string(token: KToken) -> str: +14 if token.sort != STRING: +15 raise ValueError(f'Expected String token, got: {token}') +16 assert token.token[0] == '"' == token.token[-1] +17 return dequote_string(token.token[1:-1])
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/prelude/utils.html b/pyk/_modules/pyk/prelude/utils.html new file mode 100644 index 00000000000..67c78189d36 --- /dev/null +++ b/pyk/_modules/pyk/prelude/utils.html @@ -0,0 +1,127 @@ + + + + + + + + pyk.prelude.utils — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.prelude.utils

+ 1from __future__ import annotations
+ 2
+ 3from typing import TYPE_CHECKING
+ 4
+ 5from .bytes import bytesToken
+ 6from .kbool import boolToken
+ 7from .kint import intToken
+ 8from .string import stringToken
+ 9
+10if TYPE_CHECKING:
+11    from ..kast.inner import KToken
+12
+13
+
+[docs] +14def token(x: bool | int | str | bytes) -> KToken: +15 if type(x) is bool: +16 return boolToken(x) +17 if type(x) is int: +18 return intToken(x) +19 if type(x) is str: +20 return stringToken(x) +21 if type(x) is bytes: +22 return bytesToken(x) +23 raise AssertionError()
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/proof/implies.html b/pyk/_modules/pyk/proof/implies.html new file mode 100644 index 00000000000..ac608e4dad3 --- /dev/null +++ b/pyk/_modules/pyk/proof/implies.html @@ -0,0 +1,646 @@ + + + + + + + + pyk.proof.implies — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.proof.implies

+  1from __future__ import annotations
+  2
+  3import json
+  4import logging
+  5from dataclasses import dataclass
+  6from typing import TYPE_CHECKING, Any, Final
+  7
+  8from ..cterm import CSubst, CTerm, build_claim
+  9from ..kast.inner import KApply, KInner, Subst
+ 10from ..kast.manip import extract_lhs, extract_rhs, flatten_label
+ 11from ..prelude.k import GENERATED_TOP_CELL
+ 12from ..prelude.kbool import BOOL, FALSE, TRUE
+ 13from ..prelude.ml import is_bottom, is_top, mlAnd, mlEquals, mlEqualsFalse, mlEqualsTrue
+ 14from ..utils import ensure_dir_path
+ 15from .proof import FailureInfo, Proof, ProofStatus, ProofSummary, Prover
+ 16
+ 17if TYPE_CHECKING:
+ 18    from collections.abc import Iterable, Mapping
+ 19    from pathlib import Path
+ 20
+ 21    from ..kast.inner import KSort
+ 22    from ..kast.outer import KClaim, KDefinition
+ 23    from ..kcfg import KCFGExplore
+ 24    from ..ktool.kprint import KPrint
+ 25
+ 26_LOGGER: Final = logging.getLogger(__name__)
+ 27
+ 28
+
+[docs] + 29@dataclass(frozen=True) + 30class ImpliesProofStep: + 31 proof: ImpliesProof
+ + 32 + 33 +
+[docs] + 34@dataclass + 35class ImpliesProofResult: + 36 csubst: CSubst | None + 37 simplified_antecedent: KInner | None + 38 simplified_consequent: KInner | None
+ + 39 + 40 +
+[docs] + 41class ImpliesProof(Proof[ImpliesProofStep, ImpliesProofResult]): + 42 antecedent: KInner + 43 consequent: KInner + 44 bind_universally: bool + 45 simplified_antecedent: KInner | None + 46 simplified_consequent: KInner | None + 47 csubst: CSubst | None + 48 + 49 def __init__( + 50 self, + 51 id: str, + 52 antecedent: KInner, + 53 consequent: KInner, + 54 bind_universally: bool = False, + 55 simplified_antecedent: KInner | None = None, + 56 simplified_consequent: KInner | None = None, + 57 csubst: CSubst | None = None, + 58 proof_dir: Path | None = None, + 59 subproof_ids: Iterable[str] = (), + 60 admitted: bool = False, + 61 ): + 62 super().__init__(id=id, proof_dir=proof_dir, subproof_ids=subproof_ids, admitted=admitted) + 63 self.antecedent = antecedent + 64 self.consequent = consequent + 65 self.bind_universally = bind_universally + 66 self.simplified_antecedent = simplified_antecedent + 67 self.simplified_consequent = simplified_consequent + 68 self.csubst = csubst + 69 +
+[docs] + 70 def get_steps(self) -> list[ImpliesProofStep]: + 71 if not self.can_progress: + 72 return [] + 73 return [ImpliesProofStep(self)]
+ + 74 +
+[docs] + 75 def commit(self, result: ImpliesProofResult) -> None: + 76 proof_type = type(self).__name__ + 77 if isinstance(result, ImpliesProofResult): + 78 self.csubst = result.csubst + 79 self.simplified_antecedent = result.simplified_antecedent + 80 self.simplified_consequent = result.simplified_consequent + 81 _LOGGER.info(f'{proof_type} finished {self.id}: {self.status}') + 82 else: + 83 raise ValueError(f'Incorrect result type, expected ImpliesProofResult: {result}')
+ + 84 + 85 @property + 86 def own_status(self) -> ProofStatus: + 87 if self.admitted: + 88 return ProofStatus.PASSED + 89 if self.simplified_antecedent is None or self.simplified_consequent is None: + 90 return ProofStatus.PENDING + 91 if self.csubst is None: + 92 return ProofStatus.FAILED + 93 return ProofStatus.PASSED + 94 + 95 @property + 96 def can_progress(self) -> bool: + 97 return self.simplified_antecedent is None or self.simplified_consequent is None + 98 +
+[docs] + 99 def write_proof_data(self, subproofs: bool = False) -> None: +100 super().write_proof_data() +101 if not self.proof_dir: +102 return +103 ensure_dir_path(self.proof_dir) +104 ensure_dir_path(self.proof_dir / self.id) +105 proof_path = self.proof_dir / self.id / 'proof.json' +106 if not self.up_to_date: +107 proof_json = json.dumps(self.dict) +108 proof_path.write_text(proof_json) +109 _LOGGER.info(f'Updated proof file {self.id}: {proof_path}')
+ +110 +
+[docs] +111 @classmethod +112 def from_dict(cls: type[ImpliesProof], dct: Mapping[str, Any], proof_dir: Path | None = None) -> ImpliesProof: +113 id = dct['id'] +114 antecedent = KInner.from_dict(dct['antecedent']) +115 consequent = KInner.from_dict(dct['consequent']) +116 simplified_antecedent = ( +117 KInner.from_dict(dct['simplified_antecedent']) if 'simplified_antecedent' in dct else None +118 ) +119 simplified_consequent = ( +120 KInner.from_dict(dct['simplified_consequent']) if 'simplified_consequent' in dct else None +121 ) +122 csubst = CSubst.from_dict(dct['csubst']) if 'csubst' in dct else None +123 subproof_ids = dct['subproof_ids'] +124 admitted = dct.get('admitted', False) +125 return ImpliesProof( +126 id, +127 antecedent, +128 consequent, +129 simplified_antecedent=simplified_antecedent, +130 simplified_consequent=simplified_consequent, +131 csubst=csubst, +132 admitted=admitted, +133 subproof_ids=subproof_ids, +134 proof_dir=proof_dir, +135 )
+ +136 +137 @property +138 def dict(self) -> dict[str, Any]: +139 dct = super().dict +140 dct['type'] = 'ImpliesProof' +141 dct['antecedent'] = self.antecedent.to_dict() +142 dct['consequent'] = self.consequent.to_dict() +143 if self.simplified_antecedent is not None: +144 dct['simplified_antecedent'] = self.simplified_antecedent.to_dict() +145 if self.simplified_consequent is not None: +146 dct['simplified_consequent'] = self.simplified_consequent.to_dict() +147 if self.csubst is not None: +148 dct['csubst'] = self.csubst.to_dict() +149 return dct
+ +150 +151 +
+[docs] +152class EqualityProof(ImpliesProof): +153 def __init__( +154 self, +155 id: str, +156 lhs_body: KInner, +157 rhs_body: KInner, +158 sort: KSort, +159 constraints: Iterable[KInner] = (), +160 simplified_constraints: KInner | None = None, +161 simplified_equality: KInner | None = None, +162 csubst: CSubst | None = None, +163 proof_dir: Path | None = None, +164 subproof_ids: Iterable[str] = (), +165 admitted: bool = False, +166 ): +167 antecedent = mlAnd(constraints) +168 consequent = mlEquals(lhs_body, rhs_body, arg_sort=sort, sort=GENERATED_TOP_CELL) +169 super().__init__( +170 id, +171 antecedent, +172 consequent, +173 bind_universally=True, +174 simplified_antecedent=simplified_constraints, +175 simplified_consequent=simplified_equality, +176 csubst=csubst, +177 proof_dir=proof_dir, +178 subproof_ids=subproof_ids, +179 admitted=admitted, +180 ) +181 _LOGGER.warning( +182 'Building an EqualityProof that has known soundness issues: See https://github.com/runtimeverification/haskell-backend/issues/3605.' +183 ) +184 +
+[docs] +185 @staticmethod +186 def read_proof_data(proof_dir: Path, id: str) -> EqualityProof: +187 proof_path = proof_dir / id / 'proof.json' +188 if Proof.proof_data_exists(id, proof_dir): +189 proof_dict = json.loads(proof_path.read_text()) +190 return EqualityProof.from_dict(proof_dict, proof_dir) +191 +192 raise ValueError(f'Could not load Proof from file {id}: {proof_path}')
+ +193 +
+[docs] +194 @staticmethod +195 def from_claim(claim: KClaim, defn: KDefinition, proof_dir: Path | None = None) -> EqualityProof: +196 claim_body = defn.add_sort_params(claim.body) +197 sort = defn.sort_strict(claim_body) +198 lhs_body = extract_lhs(claim_body) +199 rhs_body = extract_rhs(claim_body) +200 if not (claim.ensures is None or claim.ensures == TRUE): +201 raise ValueError(f'Cannot convert claim to EqualityProof due to non-trival ensures clause {claim.ensures}') +202 constraints = [mlEquals(TRUE, c, arg_sort=BOOL) for c in flatten_label('_andBool_', claim.requires)] +203 return EqualityProof(claim.label, lhs_body, rhs_body, sort, constraints=constraints, proof_dir=proof_dir)
+ +204 +205 @property +206 def equality(self) -> KApply: +207 assert type(self.consequent) is KApply +208 return self.consequent +209 +210 @property +211 def lhs_body(self) -> KInner: +212 return self.equality.args[0] +213 +214 @property +215 def rhs_body(self) -> KInner: +216 return self.equality.args[1] +217 +218 @property +219 def sort(self) -> KSort: +220 return self.equality.label.params[0] +221 +222 @property +223 def constraint(self) -> KInner: +224 return self.antecedent +225 +226 @property +227 def constraints(self) -> list[KInner]: +228 return flatten_label('#And', self.constraint) +229 +230 @property +231 def simplified_constraints(self) -> KInner | None: +232 return self.simplified_antecedent +233 +234 @property +235 def simplified_equality(self) -> KInner | None: +236 return self.simplified_consequent +237 +
+[docs] +238 @classmethod +239 def from_dict(cls: type[EqualityProof], dct: Mapping[str, Any], proof_dir: Path | None = None) -> EqualityProof: +240 implies_proof = ImpliesProof.from_dict(dct, proof_dir=proof_dir) +241 assert type(implies_proof.consequent) is KApply +242 return EqualityProof( +243 id=implies_proof.id, +244 lhs_body=implies_proof.consequent.args[0], +245 rhs_body=implies_proof.consequent.args[1], +246 sort=implies_proof.consequent.label.params[0], +247 constraints=flatten_label('#And', implies_proof.antecedent), +248 simplified_constraints=implies_proof.simplified_antecedent, +249 simplified_equality=implies_proof.simplified_consequent, +250 csubst=implies_proof.csubst, +251 proof_dir=implies_proof.proof_dir, +252 subproof_ids=implies_proof.subproof_ids, +253 admitted=implies_proof.admitted, +254 )
+ +255 +256 @property +257 def dict(self) -> dict[str, Any]: +258 dct = super().dict +259 dct['type'] = 'EqualityProof' +260 return dct +261 +
+[docs] +262 def pretty(self, kprint: KPrint) -> Iterable[str]: +263 lines = [ +264 f'LHS: {kprint.pretty_print(self.lhs_body)}', +265 f'RHS: {kprint.pretty_print(self.rhs_body)}', +266 f'Constraints: {kprint.pretty_print(mlAnd(self.constraints))}', +267 f'Equality: {kprint.pretty_print(self.equality)}', +268 ] +269 if self.simplified_constraints: +270 lines.append(f'Simplified constraints: {kprint.pretty_print(self.simplified_constraints)}') +271 if self.simplified_equality: +272 lines.append(f'Simplified equality: {kprint.pretty_print(self.simplified_equality)}') +273 if self.csubst is not None: +274 lines.append(f'Implication csubst: {self.csubst}') +275 lines.append(f'Status: {self.status}') +276 return lines
+ +277 +278 @property +279 def summary(self) -> EqualitySummary: +280 return EqualitySummary(self.id, self.status, self.admitted)
+ +281 +282 +
+[docs] +283@dataclass(frozen=True) +284class EqualitySummary(ProofSummary): +285 id: str +286 status: ProofStatus +287 admitted: bool +288 +289 @property +290 def lines(self) -> list[str]: +291 return [ +292 f'EqualityProof: {self.id}', +293 f' status: {self.status}', +294 f' admitted: {self.admitted}', +295 ]
+ +296 +297 +
+[docs] +298class RefutationProof(ImpliesProof): +299 def __init__( +300 self, +301 id: str, +302 pre_constraints: Iterable[KInner], +303 last_constraint: KInner, +304 simplified_antecedent: KInner | None = None, +305 simplified_consequent: KInner | None = None, +306 csubst: CSubst | None = None, +307 proof_dir: Path | None = None, +308 subproof_ids: Iterable[str] = (), +309 admitted: bool = False, +310 ): +311 antecedent = mlAnd(mlEqualsTrue(c) for c in pre_constraints) +312 consequent = mlEqualsFalse(last_constraint) +313 super().__init__( +314 id, +315 antecedent, +316 consequent, +317 bind_universally=True, +318 simplified_antecedent=simplified_antecedent, +319 simplified_consequent=simplified_consequent, +320 csubst=csubst, +321 subproof_ids=subproof_ids, +322 proof_dir=proof_dir, +323 admitted=admitted, +324 ) +325 _LOGGER.warning( +326 'Building a RefutationProof that has known soundness issues: See https://github.com/runtimeverification/haskell-backend/issues/3605.' +327 ) +328 +
+[docs] +329 @staticmethod +330 def read_proof_data(proof_dir: Path, id: str) -> RefutationProof: +331 proof_path = proof_dir / id / 'proof.json' +332 if Proof.proof_data_exists(id, proof_dir): +333 proof_dict = json.loads(proof_path.read_text()) +334 return RefutationProof.from_dict(proof_dict, proof_dir) +335 +336 raise ValueError(f'Could not load Proof from file {id}: {proof_path}')
+ +337 +338 @property +339 def pre_constraints(self) -> list[KInner]: +340 return flatten_label('#And', self.antecedent) +341 +342 @property +343 def last_constraint(self) -> KInner: +344 assert type(self.consequent) is KApply +345 return self.consequent.args[1] +346 +347 @property +348 def simplified_constraints(self) -> KInner | None: +349 return self.simplified_antecedent +350 +
+[docs] +351 @classmethod +352 def from_dict(cls: type[RefutationProof], dct: Mapping[str, Any], proof_dir: Path | None = None) -> RefutationProof: +353 implies_proof = ImpliesProof.from_dict(dct, proof_dir=proof_dir) +354 assert type(implies_proof.consequent) is KApply +355 return RefutationProof( +356 id=implies_proof.id, +357 pre_constraints=flatten_label('#And', implies_proof.antecedent), +358 last_constraint=implies_proof.consequent.args[1], +359 simplified_antecedent=implies_proof.simplified_antecedent, +360 simplified_consequent=implies_proof.simplified_consequent, +361 csubst=implies_proof.csubst, +362 proof_dir=implies_proof.proof_dir, +363 subproof_ids=implies_proof.subproof_ids, +364 admitted=implies_proof.admitted, +365 )
+ +366 +367 @property +368 def dict(self) -> dict[str, Any]: +369 dct = super().dict +370 dct['type'] = 'RefutationProof' +371 return dct +372 +373 @property +374 def summary(self) -> RefutationSummary: +375 return RefutationSummary(self.id, self.status) +376 +
+[docs] +377 def pretty(self, kprint: KPrint) -> Iterable[str]: +378 lines = [ +379 f'Constraints: {kprint.pretty_print(mlAnd(self.pre_constraints))}', +380 f'Last constraint: {kprint.pretty_print(self.last_constraint)}', +381 ] +382 if self.csubst is not None: +383 lines.append(f'Implication csubst: {self.csubst}') +384 lines.append(f'Status: {self.status}') +385 return lines
+ +386 +
+[docs] +387 def to_claim(self, claim_id: str) -> tuple[KClaim, Subst]: +388 return build_claim( +389 claim_id, +390 init_config=self.last_constraint, +391 final_config=FALSE, +392 init_constraints=self.pre_constraints, +393 final_constraints=[], +394 )
+
+ +395 +396 +
+[docs] +397@dataclass(frozen=True) +398class RefutationSummary(ProofSummary): +399 id: str +400 status: ProofStatus +401 +402 @property +403 def lines(self) -> list[str]: +404 return [ +405 f'RefutationProof: {self.id}', +406 f' status: {self.status}', +407 ]
+ +408 +409 +
+[docs] +410class ImpliesProver(Prover[ImpliesProof, ImpliesProofStep, ImpliesProofResult]): +411 proof: ImpliesProof +412 kcfg_explore: KCFGExplore +413 assume_defined: bool +414 +
+[docs] +415 def close(self) -> None: +416 self.kcfg_explore.cterm_symbolic._kore_client.close()
+ +417 +418 def __init__(self, proof: ImpliesProof, kcfg_explore: KCFGExplore, assume_defined: bool = False): +419 self.kcfg_explore = kcfg_explore +420 self.proof = proof +421 self.assume_defined = assume_defined +422 +
+[docs] +423 def step_proof(self, step: ImpliesProofStep) -> list[ImpliesProofResult]: +424 proof_type = type(step.proof).__name__ +425 _LOGGER.info(f'Attempting {proof_type} {step.proof.id}') +426 +427 if step.proof.status is not ProofStatus.PENDING: +428 _LOGGER.info(f'{proof_type} finished {step.proof.id}: {step.proof.status}') +429 return [] +430 +431 # to prove the equality, we check the implication of the form `constraints #Implies LHS #Equals RHS`, i.e. +432 # "LHS equals RHS under these constraints" +433 simplified_antecedent, _ = self.kcfg_explore.cterm_symbolic.kast_simplify(step.proof.antecedent) +434 simplified_consequent, _ = self.kcfg_explore.cterm_symbolic.kast_simplify(step.proof.consequent) +435 _LOGGER.debug(f'Simplified antecedent: {self.kcfg_explore.pretty_print(simplified_antecedent)}') +436 _LOGGER.debug(f'Simplified consequent: {self.kcfg_explore.pretty_print(simplified_consequent)}') +437 +438 csubst: CSubst | None = None +439 +440 if is_bottom(simplified_antecedent): +441 _LOGGER.warning(f'Antecedent of implication (proof constraints) simplifies to #Bottom {step.proof.id}') +442 csubst = CSubst(Subst({}), ()) +443 +444 elif is_top(simplified_consequent): +445 _LOGGER.warning(f'Consequent of implication (proof equality) simplifies to #Top {step.proof.id}') +446 csubst = CSubst(Subst({}), ()) +447 +448 else: +449 # TODO: we should not be forced to include the dummy configuration in the antecedent and consequent +450 dummy_config = self.kcfg_explore.cterm_symbolic._definition.empty_config(sort=GENERATED_TOP_CELL) +451 _result = self.kcfg_explore.cterm_symbolic.implies( +452 antecedent=CTerm(config=dummy_config, constraints=[simplified_antecedent]), +453 consequent=CTerm(config=dummy_config, constraints=[simplified_consequent]), +454 bind_universally=step.proof.bind_universally, +455 assume_defined=self.assume_defined, +456 ) +457 result = _result.csubst +458 if result is not None: +459 csubst = result +460 +461 _LOGGER.info(f'{proof_type} finished {step.proof.id}: {step.proof.status}') +462 return [ +463 ImpliesProofResult( +464 csubst=csubst, simplified_antecedent=simplified_antecedent, simplified_consequent=simplified_consequent +465 ) +466 ]
+ +467 +
+[docs] +468 def init_proof(self, proof: ImpliesProof) -> None: +469 pass
+ +470 +
+[docs] +471 def failure_info(self, proof: ImpliesProof) -> FailureInfo: +472 # TODO add implementation +473 return FailureInfo()
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/proof/proof.html b/pyk/_modules/pyk/proof/proof.html new file mode 100644 index 00000000000..504c57f8dd3 --- /dev/null +++ b/pyk/_modules/pyk/proof/proof.html @@ -0,0 +1,731 @@ + + + + + + + + pyk.proof.proof — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.proof.proof

+  1from __future__ import annotations
+  2
+  3import json
+  4import logging
+  5from abc import ABC, abstractmethod
+  6from concurrent.futures import ThreadPoolExecutor, wait
+  7from dataclasses import dataclass
+  8from enum import Enum
+  9from itertools import chain
+ 10from threading import current_thread
+ 11from typing import TYPE_CHECKING, ContextManager, Generic, TypeVar
+ 12
+ 13from ..utils import ensure_dir_path, hash_file, hash_str
+ 14
+ 15if TYPE_CHECKING:
+ 16    from collections.abc import Callable, Hashable, Iterable, Mapping
+ 17    from concurrent.futures import Executor, Future
+ 18    from pathlib import Path
+ 19    from typing import Any, Final
+ 20
+ 21    T = TypeVar('T', bound='Proof')
+ 22
+ 23P = TypeVar('P', bound='Proof')
+ 24PS = TypeVar('PS', bound='Hashable')
+ 25SR = TypeVar('SR')
+ 26
+ 27_LOGGER: Final = logging.getLogger(__name__)
+ 28
+ 29
+
+[docs] + 30class ProofStatus(Enum): + 31 PASSED = 'passed' + 32 FAILED = 'failed' + 33 PENDING = 'pending'
+ + 34 + 35 +
+[docs] + 36class Proof(Generic[PS, SR]): + 37 """Abstract representation of a proof that can be executed in one or more discrete steps. + 38 + 39 Generic type variables: + 40 + 41 - PS: Proof step: data required to perform a step of the proof. + 42 - SR: Step result: data produced by executing a PS with ``Prover.step_proof`` used to update the `Proof`. + 43 """ + 44 + 45 _PROOF_TYPES: Final = {'APRProof', 'EqualityProof', 'RefutationProof'} + 46 + 47 id: str + 48 proof_dir: Path | None + 49 _subproofs: dict[str, Proof] + 50 admitted: bool + 51 failure_info: FailureInfo | None + 52 + 53 @property + 54 def proof_subdir(self) -> Path | None: + 55 if self.proof_dir is None: + 56 return None + 57 return self.proof_dir / self.id + 58 + 59 def __init__( + 60 self, + 61 id: str, + 62 proof_dir: Path | None = None, + 63 subproof_ids: Iterable[str] = (), + 64 admitted: bool = False, + 65 ) -> None: + 66 self.id = id + 67 self.admitted = admitted + 68 self.proof_dir = proof_dir + 69 self._subproofs = {} + 70 if self.proof_dir is None and len(list(subproof_ids)) > 0: + 71 raise ValueError(f'Cannot read subproofs {subproof_ids} of proof {self.id} with no proof_dir') + 72 if len(list(subproof_ids)) > 0: + 73 for proof_id in subproof_ids: + 74 self.fetch_subproof_data(proof_id, force_reread=True) + 75 if proof_dir is not None: + 76 ensure_dir_path(proof_dir) + 77 if self.proof_dir is not None: + 78 ensure_dir_path(self.proof_dir) + 79 +
+[docs] + 80 @abstractmethod + 81 def commit(self, result: SR) -> None: + 82 """Apply the step result of type `SR` to `self`, modifying `self`.""" + 83 ...
+ + 84 +
+[docs] + 85 def admit(self) -> None: + 86 self.admitted = True
+ + 87 + 88 @property + 89 def subproof_ids(self) -> list[str]: + 90 return [sp.id for sp in self._subproofs.values()] + 91 +
+[docs] + 92 def write_proof(self, subproofs: bool = False) -> None: + 93 if not self.proof_dir: + 94 return + 95 proof_path = self.proof_dir / f'{hash_str(self.id)}.json' + 96 if not self.up_to_date: + 97 proof_json = json.dumps(self.dict) + 98 proof_path.write_text(proof_json) + 99 _LOGGER.info(f'Updated proof file {self.id}: {proof_path}') +100 if subproofs: +101 for sp in self.subproofs: +102 sp.write_proof(subproofs=subproofs)
+ +103 +
+[docs] +104 @staticmethod +105 def proof_exists(id: str, proof_dir: Path) -> bool: +106 proof_path = proof_dir / f'{hash_str(id)}.json' +107 return proof_path.exists() and proof_path.is_file()
+ +108 +
+[docs] +109 @staticmethod +110 def proof_data_exists(id: str, proof_dir: Path) -> bool: +111 proof_path = proof_dir / id / 'proof.json' +112 return proof_path.exists() and proof_path.is_file()
+ +113 +114 @property +115 def digest(self) -> str: +116 return hash_str(json.dumps(self.dict)) +117 +118 @property +119 def up_to_date(self) -> bool: +120 """Check that the proof's representation on disk is up-to-date.""" +121 if self.proof_dir is None: +122 raise ValueError(f'Cannot check if proof {self.id} with no proof_dir is up-to-date') +123 proof_path = self.proof_dir / f'{hash_str(id)}.json' +124 if proof_path.exists() and proof_path.is_file(): +125 return self.digest == hash_file(proof_path) +126 else: +127 return False +128 +
+[docs] +129 def read_subproof(self, proof_id: str) -> None: +130 if self.proof_dir is None: +131 raise ValueError(f'Cannot add subproof to the proof {self.id} with no proof_dir') +132 assert self.proof_dir +133 if not Proof.proof_exists(proof_id, self.proof_dir): +134 raise ValueError(f"Cannot find subproof {proof_id} in parent proof's {self.id} proof_dir {self.proof_dir}") +135 self._subproofs[proof_id] = self.fetch_subproof(proof_id, force_reread=True)
+ +136 +
+[docs] +137 def read_subproof_data(self, proof_id: str) -> None: +138 if self.proof_dir is None: +139 raise ValueError(f'Cannot add subproof to the proof {self.id} with no proof_dir') +140 assert self.proof_dir +141 if not Proof.proof_data_exists(proof_id, self.proof_dir): +142 raise ValueError(f"Cannot find subproof {proof_id} in parent proof's {self.id} proof_dir {self.proof_dir}") +143 self._subproofs[proof_id] = self.fetch_subproof_data(proof_id, force_reread=True)
+ +144 +
+[docs] +145 def add_subproof(self, proof: Proof) -> None: +146 self._subproofs[proof.id] = proof
+ +147 +
+[docs] +148 def remove_subproof(self, proof_id: str) -> None: +149 del self._subproofs[proof_id]
+ +150 +
+[docs] +151 def fetch_subproof( +152 self, proof_id: str, force_reread: bool = False, uptodate_check_method: str = 'timestamp' +153 ) -> Proof: +154 """Get a subproof, re-reading from disk if it's not up-to-date.""" +155 if self.proof_dir is not None and (force_reread or not self._subproofs[proof_id].up_to_date): +156 updated_subproof = Proof.read_proof(proof_id, self.proof_dir) +157 self._subproofs[proof_id] = updated_subproof +158 return updated_subproof +159 else: +160 return self._subproofs[proof_id]
+ +161 +
+[docs] +162 def fetch_subproof_data( +163 self, proof_id: str, force_reread: bool = False, uptodate_check_method: str = 'timestamp' +164 ) -> Proof: +165 """Get a subproof, re-reading from disk if it's not up-to-date.""" +166 if self.proof_dir is not None and (force_reread or not self._subproofs[proof_id].up_to_date): +167 updated_subproof = Proof.read_proof_data(self.proof_dir, proof_id) +168 self._subproofs[proof_id] = updated_subproof +169 return updated_subproof +170 else: +171 return self._subproofs[proof_id]
+ +172 +173 @property +174 def subproofs(self) -> Iterable[Proof]: +175 """Return the subproofs, re-reading from disk the ones that changed.""" +176 return self._subproofs.values() +177 +178 @property +179 def subproofs_status(self) -> ProofStatus: +180 if any(p.failed for p in self.subproofs): +181 return ProofStatus.FAILED +182 elif all(p.passed for p in self.subproofs): +183 return ProofStatus.PASSED +184 else: +185 return ProofStatus.PENDING +186 +187 @property +188 @abstractmethod +189 def own_status(self) -> ProofStatus: ... +190 +191 @property +192 def status(self) -> ProofStatus: +193 if self.admitted: +194 return ProofStatus.PASSED +195 if self.own_status == ProofStatus.FAILED or self.subproofs_status == ProofStatus.FAILED: +196 return ProofStatus.FAILED +197 if self.own_status == ProofStatus.PENDING or self.subproofs_status == ProofStatus.PENDING: +198 return ProofStatus.PENDING +199 return ProofStatus.PASSED +200 +201 @property +202 @abstractmethod +203 def can_progress(self) -> bool: ... +204 +205 @property +206 def failed(self) -> bool: +207 return self.status == ProofStatus.FAILED +208 +209 @property +210 def passed(self) -> bool: +211 return self.status == ProofStatus.PASSED +212 +213 @property +214 def dict(self) -> dict[str, Any]: +215 return { +216 'id': self.id, +217 'subproof_ids': self.subproof_ids, +218 'admitted': self.admitted, +219 } +220 +
+[docs] +221 @classmethod +222 @abstractmethod +223 def from_dict(cls: type[Proof], dct: Mapping[str, Any], proof_dir: Path | None = None) -> Proof: ...
+ +224 +
+[docs] +225 @classmethod +226 def read_proof(cls: type[Proof], id: str, proof_dir: Path) -> Proof: +227 # these local imports allow us to call .to_dict() based on the proof type we read from JSON +228 from .implies import EqualityProof, RefutationProof # noqa +229 from .reachability import APRProof # noqa +230 +231 proof_path = proof_dir / f'{hash_str(id)}.json' +232 if Proof.proof_exists(id, proof_dir): +233 proof_dict = json.loads(proof_path.read_text()) +234 proof_type = proof_dict['type'] +235 admitted = proof_dict.get('admitted', False) +236 _LOGGER.info(f'Reading {proof_type} from file {id}: {proof_path}') +237 if proof_type in Proof._PROOF_TYPES: +238 return locals()[proof_type].from_dict(proof_dict, proof_dir) +239 +240 raise ValueError(f'Could not load Proof from file {id}: {proof_path}')
+ +241 +
+[docs] +242 @staticmethod +243 def read_proof_data(proof_dir: Path, id: str) -> Proof: +244 # these local imports allow us to call .to_dict() based on the proof type we read from JSON +245 from .implies import EqualityProof, RefutationProof # noqa +246 from .reachability import APRProof # noqa +247 +248 proof_path = proof_dir / id / 'proof.json' +249 if Proof.proof_data_exists(id, proof_dir): +250 proof_dict = json.loads(proof_path.read_text()) +251 proof_type = proof_dict['type'] +252 admitted = proof_dict.get('admitted', False) +253 _LOGGER.info(f'Reading {proof_type} from file {id}: {proof_path}') +254 if proof_type in Proof._PROOF_TYPES: +255 return locals()[proof_type].read_proof_data(proof_dir, id) +256 +257 raise ValueError(f'Could not load Proof from file {id}: {proof_path}')
+ +258 +
+[docs] +259 @abstractmethod +260 def write_proof_data(self) -> None: +261 for sp in self.subproofs: +262 sp.write_proof_data()
+ +263 +264 @property +265 def json(self) -> str: +266 return json.dumps(self.dict) +267 +268 @property +269 def summary(self) -> ProofSummary: +270 @dataclass +271 class BaseSummary(ProofSummary): +272 id: str +273 status: ProofStatus +274 +275 @property +276 def lines(self) -> list[str]: +277 return [f'Proof: {self.id}', f' status: {self.status}'] +278 +279 subproofs_summaries = [subproof.summary for subproof in self.subproofs] +280 return CompositeSummary([BaseSummary(self.id, self.status), *subproofs_summaries]) +281 +282 @property +283 def one_line_summary(self) -> str: +284 return self.status.name +285 +
+[docs] +286 @abstractmethod +287 def get_steps(self) -> Iterable[PS]: +288 """Return all currently available steps associated with this Proof. Should not modify `self`.""" +289 ...
+
+ +290 +291 +
+[docs] +292class ProofSummary(ABC): +293 id: str +294 status: ProofStatus +295 +296 @property +297 @abstractmethod +298 def lines(self) -> list[str]: ... +299 +300 def __str__(self) -> str: +301 return '\n'.join(self.lines)
+ +302 +303 +
+[docs] +304@dataclass +305class CompositeSummary(ProofSummary): +306 summaries: tuple[ProofSummary, ...] +307 +308 def __init__(self, _summaries: Iterable[ProofSummary]): +309 self.summaries = tuple(chain(_summaries)) +310 +311 def __str__(self) -> str: +312 return '\n'.join(str(summary) for summary in self.summaries) +313 +314 @property +315 def lines(self) -> list[str]: +316 return [line for lines in (summary.lines for summary in self.summaries) for line in lines]
+ +317 +318 +
+[docs] +319class FailureInfo: ...
+ +320 +321 +
+[docs] +322def parallel_advance_proof( +323 proof: P, +324 create_prover: Callable[[], Prover[P, PS, SR]], +325 max_iterations: int | None = None, +326 fail_fast: bool = False, +327 max_workers: int = 1, +328 callback: Callable[[P], None] = (lambda x: None), +329 maintenance_rate: int = 1, +330) -> None: +331 """Advance proof with multithreaded strategy. +332 +333 `Prover.step_proof()` to a worker thread pool for each step as available, +334 and `Proof.commit()` results as they become available, +335 and get new steps with `Proof.get_steps()` and submit to thread pool. +336 +337 Generic type variables: +338 +339 - P: Type of proof to be advanced in parallel. +340 - PS: Proof step: data required to perform a step of the proof. +341 - SR: Step result: data produced by executing a PS with `Prover.step_proof` used to update the `Proof`. +342 +343 Args: +344 proof: The proof to advance. +345 create_prover: Function which creates a new `Prover`. These provers must not reference any shared +346 data to be written during `parallel_advance_proof`, to avoid race conditions. +347 max_iterations: Maximum number of steps to take. +348 fail_fast: If the proof is failing after finishing a step, +349 halt execution even if there are still available steps. +350 max_workers: Maximum number of worker threads the pool can spawn. +351 callback: Callable to run during proof maintenance, useful for getting real-time information about the proof. +352 maintenance_rate: Number of iterations between proof maintenance (writing to disk and executing callback). +353 """ +354 pending: set[Future[Any]] = set() +355 explored: set[PS] = set() +356 iterations = 0 +357 +358 with create_prover() as main_prover: +359 main_prover.init_proof(proof) +360 +361 with _ProverPool[P, PS, SR](create_prover=create_prover, max_workers=max_workers) as pool: +362 +363 def submit_steps(_steps: Iterable[PS]) -> None: +364 for step in _steps: +365 if step in explored: +366 continue +367 explored.add(step) +368 future: Future[Any] = pool.submit(step) # <-- schedule steps for execution +369 pending.add(future) +370 +371 submit_steps(proof.get_steps()) +372 +373 while True: +374 if not pending: +375 break +376 done, _ = wait(pending, return_when='FIRST_COMPLETED') +377 future = done.pop() +378 proof_results = future.result() +379 for result in proof_results: +380 proof.commit(result) +381 iterations += 1 +382 if iterations % maintenance_rate == 0: +383 proof.write_proof_data() +384 callback(proof) +385 if max_iterations is not None and max_iterations <= iterations: +386 break +387 if fail_fast and proof.failed: +388 _LOGGER.warning(f'Terminating proof early because fail_fast is set: {proof.id}') +389 break +390 submit_steps(proof.get_steps()) +391 pending.remove(future) +392 +393 if proof.failed: +394 proof.failure_info = main_prover.failure_info(proof) +395 proof.write_proof_data()
+ +396 +397 +398class _ProverPool(ContextManager['_ProverPool'], Generic[P, PS, SR]): +399 """Wrapper for `ThreadPoolExecutor` which spawns one `Prover` for each worker thread. +400 +401 Generic type variables: +402 +403 - P: Type of proof to be advanced in parallel. +404 - PS: Proof step: data required to perform a step of the proof. +405 - SR: Step result: data produced by executing a PS with `Prover.step_proof` used to update the `Proof`. +406 """ +407 +408 _create_prover: Callable[[], Prover[P, PS, SR]] +409 _provers: dict[str, Prover[P, PS, SR]] +410 _executor: Executor +411 _closed: bool +412 +413 def __init__( +414 self, +415 create_prover: Callable[[], Prover[P, PS, SR]], +416 *, +417 max_workers: int | None = None, +418 ) -> None: +419 """Initialize an instance. +420 +421 Args: +422 create_prover: Function which creates a new `Prover`. These provers must not reference any shared +423 data to be written during `parallel_advance_proof`, to avoid race conditions. +424 max_workers (optional): Maximum number of worker threads the pool can spawn. +425 """ +426 self._create_prover = create_prover +427 self._provers = {} +428 self._executor = ThreadPoolExecutor(max_workers) +429 self._closed = False +430 +431 def __enter__(self) -> _ProverPool[P, PS, SR]: +432 self._executor.__enter__() +433 return self +434 +435 def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: +436 self._executor.__exit__(exc_type, exc_val, exc_tb) +437 self.close() +438 +439 def close(self) -> None: +440 self._closed = True +441 for prover in self._provers.values(): +442 prover.close() +443 +444 def submit(self, proof_step: PS) -> Future[Iterable[SR]]: +445 if self._closed: +446 raise ValueError('ProverPool has been closed') +447 return self._executor.submit(self._with_prover(proof_step)) +448 +449 def _with_prover(self, proof_step: PS) -> Callable[[], Iterable[SR]]: +450 +451 def step() -> Iterable[SR]: +452 thread_name = current_thread().name +453 prover: Prover[P, PS, SR] | None = self._provers.get(thread_name) +454 if prover is None: +455 prover = self._create_prover() +456 self._provers[thread_name] = prover +457 return prover.step_proof(proof_step) +458 +459 return step +460 +461 +
+[docs] +462class Prover(ContextManager['Prover'], Generic[P, PS, SR]): +463 """Abstract class which advances `Proof`s with `init_proof()` and `step_proof()`. +464 +465 Generic type variables: +466 +467 - P: Type of proof this `Prover` operates on. +468 - PS: Proof step: data required to perform a step of the proof. +469 - SR: Step result: data produced by executing a PS with `Prover.step_proof` used to update the `Proof`. +470 """ +471 +472 def __enter__(self) -> Prover[P, PS, SR]: +473 return self +474 +475 def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: +476 self.close() +477 +
+[docs] +478 @abstractmethod +479 def close(self) -> None: ...
+ +480 +
+[docs] +481 @abstractmethod +482 def failure_info(self, proof: P) -> FailureInfo: ...
+ +483 +
+[docs] +484 @abstractmethod +485 def step_proof(self, step: PS) -> Iterable[SR]: +486 """Do the work associated with a `PS`, a proof step. +487 +488 Should not modify a `Proof` or `self`, but may read from `self` as long as +489 those fields are not being modified during `step_proof()`, `get_steps()`, and `commit()`. +490 """ +491 ...
+ +492 +
+[docs] +493 @abstractmethod +494 def init_proof(self, proof: P) -> None: +495 """Perform any initialization steps needed at the beginning of proof execution. +496 +497 For example, for `APRProver`, upload circularity and depends module of the proof +498 to the `KoreServer` via `add_module`. +499 """ +500 ...
+ +501 +
+[docs] +502 def advance_proof( +503 self, +504 proof: P, +505 max_iterations: int | None = None, +506 fail_fast: bool = False, +507 callback: Callable[[P], None] = (lambda x: None), +508 maintenance_rate: int = 1, +509 ) -> None: +510 """Advance a proof. +511 +512 Performs loop `Proof.get_steps()` -> `Prover.step_proof()` -> `Proof.commit()`. +513 +514 Args: +515 proof: proof to advance. +516 max_iterations (optional): Maximum number of steps to take. +517 fail_fast: If the proof is failing after finishing a step, +518 halt execution even if there are still available steps. +519 callback: Callable to run in between each completed step, useful for getting real-time information about the proof. +520 maintenance_rate: Number of iterations between proof maintenance (writing to disk and executing callback). +521 """ +522 iterations = 0 +523 _LOGGER.info(f'Initializing proof: {proof.id}') +524 self.init_proof(proof) +525 while True: +526 steps = list(proof.get_steps()) +527 _LOGGER.info(f'Found {len(steps)} next steps for proof: {proof.id}') +528 if len(steps) == 0: +529 break +530 for step in steps: +531 if fail_fast and proof.failed: +532 _LOGGER.warning(f'Terminating proof early because fail_fast is set: {proof.id}') +533 proof.failure_info = self.failure_info(proof) +534 return +535 if max_iterations is not None and max_iterations <= iterations: +536 return +537 iterations += 1 +538 results = self.step_proof(step) +539 for result in results: +540 proof.commit(result) +541 if iterations % maintenance_rate == 0: +542 proof.write_proof_data() +543 callback(proof) +544 +545 if proof.failed: +546 proof.failure_info = self.failure_info(proof)
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/proof/reachability.html b/pyk/_modules/pyk/proof/reachability.html new file mode 100644 index 00000000000..4302b5367cc --- /dev/null +++ b/pyk/_modules/pyk/proof/reachability.html @@ -0,0 +1,1294 @@ + + + + + + + + pyk.proof.reachability — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.proof.reachability

+   1from __future__ import annotations
+   2
+   3import json
+   4import logging
+   5import re
+   6from dataclasses import dataclass, field
+   7from typing import TYPE_CHECKING
+   8
+   9from pyk.kore.rpc import LogEntry
+  10
+  11from ..cterm.cterm import remove_useless_constraints
+  12from ..kast.inner import KInner, Subst
+  13from ..kast.manip import flatten_label, free_vars, ml_pred_to_bool
+  14from ..kast.outer import KFlatModule, KImport, KRule
+  15from ..kcfg import KCFG, KCFGStore
+  16from ..kcfg.exploration import KCFGExploration
+  17from ..konvert import kflatmodule_to_kore
+  18from ..ktool.claim_index import ClaimIndex
+  19from ..prelude.ml import mlAnd, mlTop
+  20from ..utils import FrozenDict, ensure_dir_path, hash_str, shorten_hashes, single
+  21from .implies import ProofSummary, Prover, RefutationProof
+  22from .proof import CompositeSummary, FailureInfo, Proof, ProofStatus
+  23
+  24if TYPE_CHECKING:
+  25    from collections.abc import Iterable, Mapping
+  26    from pathlib import Path
+  27    from typing import Any, Final, TypeVar
+  28
+  29    from ..kast.outer import KClaim, KDefinition, KFlatModuleList, KRuleLike
+  30    from ..kcfg import KCFGExplore
+  31    from ..kcfg.explore import KCFGExtendResult
+  32    from ..kcfg.kcfg import CSubst, NodeIdLike
+  33
+  34    T = TypeVar('T', bound='Proof')
+  35
+  36_LOGGER: Final = logging.getLogger(__name__)
+  37
+  38
+
+[docs] + 39@dataclass + 40class APRProofResult: + 41 node_id: int + 42 prior_loops_cache_update: tuple[int, ...] + 43 optimize_kcfg: bool
+ + 44 + 45 +
+[docs] + 46@dataclass + 47class APRProofExtendResult(APRProofResult): + 48 """Proof extension to be applied.""" + 49 + 50 extension_to_apply: KCFGExtendResult
+ + 51 + 52 +
+[docs] + 53@dataclass + 54class APRProofExtendAndCacheResult(APRProofExtendResult): + 55 """Proof extension to be cached.""" + 56 + 57 extension_to_cache: KCFGExtendResult
+ + 58 + 59 +
+[docs] + 60@dataclass + 61class APRProofUseCacheResult(APRProofResult): + 62 """Proof extension to be applied using the extension cache.""" + 63 + 64 cached_node_id: NodeIdLike
+ + 65 + 66 +
+[docs] + 67@dataclass + 68class APRProofSubsumeResult(APRProofResult): + 69 csubst: CSubst
+ + 70 + 71 +
+[docs] + 72@dataclass + 73class APRProofTerminalResult(APRProofResult): ...
+ + 74 + 75 +
+[docs] + 76@dataclass + 77class APRProofBoundedResult(APRProofResult): ...
+ + 78 + 79 +
+[docs] + 80@dataclass(frozen=True) + 81class APRProofStep: + 82 node: KCFG.Node + 83 target: KCFG.Node + 84 proof_id: str + 85 bmc_depth: int | None + 86 use_cache: NodeIdLike | None + 87 module_name: str + 88 shortest_path_to_node: tuple[KCFG.Node, ...] + 89 prior_loops_cache: FrozenDict[int, tuple[int, ...]] = field(compare=False) + 90 circularity: bool + 91 nonzero_depth: bool + 92 circularity_rule_id: str
+ + 93 + 94 +
+[docs] + 95class APRProof(Proof[APRProofStep, APRProofResult], KCFGExploration): + 96 """Represent an all-path reachability proof. + 97 + 98 APRProof and APRProver implement all-path reachability logic, + 99 as introduced by A. Stefanescu and others in their paper 'All-Path Reachability Logic': + 100 https://doi.org/10.23638/LMCS-15(2:5)2019 + 101 + 102 Note that reachability logic formula `phi =>A psi` has *not* the same meaning + 103 as CTL/CTL*'s `phi -> AF psi`, since reachability logic ignores infinite traces. + 104 This implementation extends the above with bounded model checking, allowing the user + 105 to specify an optional loop iteration bound for each loop in execution. + 106 """ + 107 + 108 node_refutations: dict[int, RefutationProof] # TODO _node_refutatations + 109 init: int + 110 target: int + 111 bmc_depth: int | None + 112 _bounded: set[int] + 113 logs: dict[int, tuple[LogEntry, ...]] + 114 circularity: bool + 115 _exec_time: float + 116 error_info: Exception | None + 117 prior_loops_cache: dict[int, tuple[int, ...]] + 118 + 119 _checked_for_bounded: set[int] + 120 _next_steps: dict[NodeIdLike, KCFGExtendResult] + 121 + 122 def __init__( + 123 self, + 124 id: str, + 125 kcfg: KCFG, + 126 terminal: Iterable[int], + 127 init: NodeIdLike, + 128 target: NodeIdLike, + 129 logs: dict[int, tuple[LogEntry, ...]], + 130 bmc_depth: int | None = None, + 131 bounded: Iterable[int] | None = None, + 132 proof_dir: Path | None = None, + 133 node_refutations: dict[int, str] | None = None, + 134 subproof_ids: Iterable[str] = (), + 135 circularity: bool = False, + 136 admitted: bool = False, + 137 _exec_time: float = 0, + 138 error_info: Exception | None = None, + 139 prior_loops_cache: dict[int, Iterable[int]] | None = None, + 140 ): + 141 Proof.__init__(self, id, proof_dir=proof_dir, subproof_ids=subproof_ids, admitted=admitted) + 142 KCFGExploration.__init__(self, kcfg, terminal) + 143 + 144 self.failure_info = None + 145 self.init = kcfg._resolve(init) + 146 self.target = kcfg._resolve(target) + 147 self.bmc_depth = bmc_depth + 148 self._bounded = set(bounded) if bounded is not None else set() + 149 self.logs = logs + 150 self.circularity = circularity + 151 self.node_refutations = {} + 152 self.prior_loops_cache = ( + 153 {int(k): tuple(v) for k, v in prior_loops_cache.items()} if prior_loops_cache is not None else {} + 154 ) + 155 self.kcfg._kcfg_store = KCFGStore(self.proof_subdir / 'kcfg') if self.proof_subdir else None + 156 self._exec_time = _exec_time + 157 self.error_info = error_info + 158 + 159 self._checked_for_bounded = set() + 160 self._next_steps = {} + 161 + 162 if self.proof_dir is not None and self.proof_subdir is not None: + 163 ensure_dir_path(self.proof_dir) + 164 ensure_dir_path(self.proof_subdir) + 165 + 166 if node_refutations is not None: + 167 refutations_not_in_subprroofs = set(node_refutations.values()).difference( + 168 set(subproof_ids if subproof_ids else []) + 169 ) + 170 if refutations_not_in_subprroofs: + 171 raise ValueError( + 172 f'All node refutations must be included in subproofs, violators are {refutations_not_in_subprroofs}' + 173 ) + 174 for node_id, proof_id in node_refutations.items(): + 175 subproof = self._subproofs[proof_id] + 176 assert type(subproof) is RefutationProof + 177 self.node_refutations[node_id] = subproof + 178 +
+[docs] + 179 def get_steps(self) -> list[APRProofStep]: + 180 steps: list[APRProofStep] = [] + 181 for node in self.pending: + 182 + 183 shortest_path: list[KCFG.Node] = [] + 184 if self.bmc_depth is not None: + 185 shortest_path = [] + 186 for succ in reversed(self.shortest_path_to(node.id)): + 187 if self.kcfg.zero_depth_between(succ.source.id, node.id): + 188 ... + 189 else: + 190 shortest_path.append(succ.source) + 191 + 192 nonzero_depth = self.nonzero_depth(node) + 193 module_name = self.circularities_module_name if nonzero_depth else self.dependencies_module_name + 194 + 195 predecessor_edges = self.kcfg.edges(target_id=node.id) + 196 predecessor_node_id: NodeIdLike | None = ( + 197 single(predecessor_edges).source.id if predecessor_edges != [] else None + 198 ) + 199 + 200 steps.append( + 201 APRProofStep( + 202 bmc_depth=self.bmc_depth, + 203 module_name=module_name, + 204 node=node, + 205 use_cache=predecessor_node_id if predecessor_node_id in self._next_steps else None, + 206 proof_id=self.id, + 207 target=self.kcfg.node(self.target), + 208 shortest_path_to_node=tuple(shortest_path), + 209 prior_loops_cache=FrozenDict(self.prior_loops_cache), + 210 circularity=self.circularity, + 211 nonzero_depth=nonzero_depth, + 212 circularity_rule_id=f'{self.rule_id}-{self.init}-TO-{self.target}', + 213 ) + 214 ) + 215 return steps
+ + 216 +
+[docs] + 217 def commit(self, result: APRProofResult) -> None: + 218 self.prior_loops_cache[result.node_id] = result.prior_loops_cache_update + 219 # Result has been cached, use the cache + 220 if isinstance(result, APRProofUseCacheResult): + 221 assert result.cached_node_id in self._next_steps + 222 self.kcfg.extend( + 223 extend_result=self._next_steps.pop(result.cached_node_id), + 224 optimize_kcfg=result.optimize_kcfg, + 225 node=self.kcfg.node(result.node_id), + 226 logs=self.logs, + 227 ) + 228 elif isinstance(result, APRProofExtendResult): + 229 # Result contains two steps, one to be applied, one to be cached + 230 if isinstance(result, APRProofExtendAndCacheResult): + 231 assert result.node_id not in self._next_steps + 232 self._next_steps[result.node_id] = result.extension_to_cache + 233 self.kcfg.extend( + 234 extend_result=result.extension_to_apply, + 235 optimize_kcfg=result.optimize_kcfg, + 236 node=self.kcfg.node(result.node_id), + 237 logs=self.logs, + 238 ) + 239 elif isinstance(result, APRProofSubsumeResult): + 240 self.kcfg.create_cover(result.node_id, self.target, csubst=result.csubst) + 241 elif isinstance(result, APRProofTerminalResult): + 242 self.add_terminal(result.node_id) + 243 elif isinstance(result, APRProofBoundedResult): + 244 self.add_bounded(result.node_id) + 245 else: + 246 raise ValueError(f'Incorrect result type, expected APRProofResult: {result}')
+ + 247 +
+[docs] + 248 def nonzero_depth(self, node: KCFG.Node) -> bool: + 249 return not self.kcfg.zero_depth_between(self.init, node.id)
+ + 250 + 251 @property + 252 def rule_id(self) -> str: + 253 return f'APRPROOF-{self.id.upper()}' + 254 + 255 @property + 256 def module_name(self) -> str: + 257 return self._make_module_name(self.id) + 258 + 259 @property + 260 def pending(self) -> list[KCFG.Node]: + 261 return [node for node in self.explorable if self.is_pending(node.id)] + 262 + 263 @property + 264 def failing(self) -> list[KCFG.Node]: + 265 return [nd for nd in self.kcfg.leaves if self.is_failing(nd.id)] + 266 + 267 @property + 268 def bounded(self) -> list[KCFG.Node]: + 269 return [nd for nd in self.kcfg.leaves if self.is_bounded(nd.id)] + 270 +
+[docs] + 271 def is_refuted(self, node_id: NodeIdLike) -> bool: + 272 return self.kcfg._resolve(node_id) in self.node_refutations.keys()
+ + 273 +
+[docs] + 274 def is_pending(self, node_id: NodeIdLike) -> bool: + 275 return ( + 276 self.is_explorable(node_id) + 277 and not self.is_target(node_id) + 278 and not self.is_refuted(node_id) + 279 and not self.is_bounded(node_id) + 280 )
+ + 281 + 282 @property + 283 def circularities_module_name(self) -> str: + 284 return self.module_name + '-CIRCULARITIES-MODULE' + 285 + 286 @property + 287 def dependencies_module_name(self) -> str: + 288 return self.module_name + '-DEPENDS-MODULE' + 289 +
+[docs] + 290 def is_init(self, node_id: NodeIdLike) -> bool: + 291 return self.kcfg._resolve(node_id) == self.kcfg._resolve(self.init)
+ + 292 +
+[docs] + 293 def is_target(self, node_id: NodeIdLike) -> bool: + 294 return self.kcfg._resolve(node_id) == self.kcfg._resolve(self.target)
+ + 295 +
+[docs] + 296 def is_failing(self, node_id: NodeIdLike) -> bool: + 297 return ( + 298 self.kcfg.is_leaf(node_id) + 299 and not self.is_explorable(node_id) + 300 and not self.is_target(node_id) + 301 and not self.is_refuted(node_id) + 302 and not self.kcfg.is_vacuous(node_id) + 303 and not self.is_bounded(node_id) + 304 )
+ + 305 +
+[docs] + 306 def is_bounded(self, node_id: NodeIdLike) -> bool: + 307 return self.kcfg._resolve(node_id) in self._bounded
+ + 308 +
+[docs] + 309 def add_bounded(self, nid: NodeIdLike) -> None: + 310 self._bounded.add(self.kcfg._resolve(nid))
+ + 311 +
+[docs] + 312 def shortest_path_to(self, node_id: NodeIdLike) -> tuple[KCFG.Successor, ...]: + 313 spb = self.kcfg.shortest_path_between(self.init, node_id) + 314 assert spb is not None + 315 return spb
+ + 316 +
+[docs] + 317 def prune(self, node_id: NodeIdLike, keep_nodes: Iterable[NodeIdLike] = ()) -> list[int]: + 318 pruned_nodes = super().prune(node_id, keep_nodes=list(keep_nodes) + [self.init, self.target]) + 319 for nid in pruned_nodes: + 320 self._bounded.discard(nid) + 321 self.prior_loops_cache = {k: v for (k, v) in self.prior_loops_cache.items() if k != nid} + 322 for k, v in self.prior_loops_cache.items(): + 323 if nid in v: + 324 self.prior_loops_cache[k] = tuple(_nid for _nid in self.prior_loops_cache[k] if _nid != nid) + 325 + 326 return pruned_nodes
+ + 327 + 328 @property + 329 def exec_time(self) -> float: + 330 return self._exec_time + 331 +
+[docs] + 332 def add_exec_time(self, exec_time: float) -> None: + 333 self._exec_time += exec_time
+ + 334 +
+[docs] + 335 def set_exec_time(self, exec_time: float) -> None: + 336 self._exec_time = exec_time
+ + 337 +
+[docs] + 338 def formatted_exec_time(self) -> str: + 339 exec_time = round(self.exec_time) + 340 h, remainder = divmod(exec_time, 3600) + 341 m, s = divmod(remainder, 60) + 342 formatted = [] + 343 if h: + 344 formatted.append(f'{h}h') + 345 if m or h: + 346 formatted.append(f'{m}m') + 347 formatted.append(f'{s}s') + 348 return ' '.join(formatted)
+ + 349 + 350 @staticmethod + 351 def _make_module_name(proof_id: str) -> str: + 352 return 'M-' + re.sub( + 353 r'[\[\]]|[_%().:,@]+', lambda match: 'bkt' if match.group(0) in ['[', ']'] else '-', proof_id.upper() + 354 ) + 355 +
+[docs] + 356 @staticmethod + 357 def read_proof(id: str, proof_dir: Path) -> APRProof: + 358 proof_path = proof_dir / f'{hash_str(id)}.json' + 359 if APRProof.proof_exists(id, proof_dir): + 360 proof_dict = json.loads(proof_path.read_text()) + 361 _LOGGER.info(f'Reading APRProof from file {id}: {proof_path}') + 362 return APRProof.from_dict(proof_dict, proof_dir=proof_dir) + 363 raise ValueError(f'Could not load APRProof from file {id}: {proof_path}')
+ + 364 + 365 @property + 366 def own_status(self) -> ProofStatus: + 367 if self.admitted: + 368 return ProofStatus.PASSED + 369 if len(self.failing) > 0: + 370 return ProofStatus.FAILED + 371 if len(self.pending) > 0: + 372 return ProofStatus.PENDING + 373 return ProofStatus.PASSED + 374 + 375 @property + 376 def can_progress(self) -> bool: + 377 return len(self.pending) > 0 + 378 +
+[docs] + 379 @classmethod + 380 def from_dict(cls: type[APRProof], dct: Mapping[str, Any], proof_dir: Path | None = None) -> APRProof: + 381 kcfg = KCFG.from_dict(dct['kcfg']) + 382 terminal = dct['terminal'] + 383 init_node = dct['init'] + 384 target_node = dct['target'] + 385 id = dct['id'] + 386 circularity = dct.get('circularity', False) + 387 admitted = dct.get('admitted', False) + 388 subproof_ids = dct['subproof_ids'] if 'subproof_ids' in dct else [] + 389 node_refutations: dict[int, str] = {} + 390 if 'node_refutation' in dct: + 391 node_refutations = { + 392 kcfg._resolve(int(node_id)): proof_id for node_id, proof_id in dct['node_refutations'].items() + 393 } + 394 if 'logs' in dct: + 395 logs = {int(k): tuple(LogEntry.from_dict(l) for l in ls) for k, ls in dct['logs'].items()} + 396 else: + 397 logs = {} + 398 + 399 bounded = dct['bounded'] + 400 bmc_depth = dct['bmc_depth'] if 'bmc_depth' in dct else None + 401 + 402 return APRProof( + 403 id, + 404 kcfg, + 405 terminal, + 406 init_node, + 407 target_node, + 408 logs=logs, + 409 bmc_depth=bmc_depth, + 410 bounded=bounded, + 411 circularity=circularity, + 412 admitted=admitted, + 413 proof_dir=proof_dir, + 414 subproof_ids=subproof_ids, + 415 node_refutations=node_refutations, + 416 )
+ + 417 +
+[docs] + 418 @staticmethod + 419 def from_claim( + 420 defn: KDefinition, + 421 claim: KClaim, + 422 logs: dict[int, tuple[LogEntry, ...]], + 423 proof_dir: Path | None = None, + 424 bmc_depth: int | None = None, + 425 **kwargs: Any, + 426 ) -> APRProof: + 427 kcfg_dir = proof_dir / claim.label / 'kcfg' if proof_dir is not None else None + 428 + 429 kcfg, init_node, target_node = KCFG.from_claim(defn, claim, cfg_dir=kcfg_dir) + 430 return APRProof( + 431 claim.label, + 432 kcfg, + 433 [], + 434 init=init_node, + 435 target=target_node, + 436 logs=logs, + 437 bmc_depth=bmc_depth, + 438 proof_dir=proof_dir, + 439 circularity=claim.is_circularity, + 440 admitted=claim.is_trusted, + 441 subproof_ids=claim.dependencies, + 442 **kwargs, + 443 )
+ + 444 +
+[docs] + 445 def as_rules(self, priority: int = 20, direct_rule: bool = False) -> list[KRule]: + 446 if ( + 447 self.circularity + 448 or (self.passed and direct_rule) + 449 or (self.admitted and not self.kcfg.predecessors(self.target)) + 450 ): + 451 return [self.as_rule(priority=priority)] + 452 + 453 def _return_rule(r: KRuleLike) -> KRule: + 454 assert isinstance(r, KRule) + 455 return r + 456 + 457 return [_return_rule(rule) for rule in self.kcfg.to_rules(self.rule_id, priority=priority)]
+ + 458 +
+[docs] + 459 def as_rule(self, priority: int = 20) -> KRule: + 460 _edge = KCFG.Edge(self.kcfg.node(self.init), self.kcfg.node(self.target), depth=0, rules=()) + 461 _rule = _edge.to_rule(self.rule_id, priority=priority) + 462 assert type(_rule) is KRule + 463 return _rule
+ + 464 +
+[docs] + 465 @staticmethod + 466 def from_spec_modules( + 467 defn: KDefinition, + 468 spec_modules: KFlatModuleList, + 469 logs: dict[int, tuple[LogEntry, ...]], + 470 proof_dir: Path | None = None, + 471 spec_labels: Iterable[str] | None = None, + 472 ) -> list[APRProof]: + 473 claim_index = ClaimIndex.from_module_list(spec_modules) + 474 spec_labels = claim_index.labels(include=spec_labels, with_depends=True, ordered=True) + 475 + 476 res: list[APRProof] = [] + 477 + 478 for label in spec_labels: + 479 if proof_dir is not None and Proof.proof_data_exists(label, proof_dir): + 480 apr_proof = APRProof.read_proof_data(proof_dir, label) + 481 else: + 482 _LOGGER.info(f'Building APRProof for claim: {label}') + 483 claim = claim_index[label] + 484 apr_proof = APRProof.from_claim( + 485 defn, + 486 claim, + 487 logs=logs, + 488 proof_dir=proof_dir, + 489 ) + 490 apr_proof.write_proof_data() + 491 res.append(apr_proof) + 492 + 493 return res
+ + 494 +
+[docs] + 495 def path_constraints(self, final_node_id: NodeIdLike, sort_with: KDefinition | None = None) -> KInner: + 496 path = self.shortest_path_to(final_node_id) + 497 curr_constraint: KInner = mlTop() + 498 for edge in reversed(path): + 499 if type(edge) is KCFG.Split: + 500 assert len(edge.targets) == 1 + 501 csubst = edge.splits[edge.targets[0].id] + 502 curr_constraint = mlAnd( + 503 [csubst.pred(sort_with=sort_with, constraints=False), csubst.constraint, curr_constraint] + 504 ) + 505 if type(edge) is KCFG.Cover: + 506 curr_constraint = mlAnd([edge.csubst.constraint, edge.csubst.subst.apply(curr_constraint)]) + 507 return mlAnd(flatten_label('#And', curr_constraint))
+ + 508 + 509 @property + 510 def dict(self) -> dict[str, Any]: + 511 # Note: We are relying on the order of inheritance to + 512 # access `dict` of `Proof`, since mypy is having issues + 513 # with the two correct solutions. + 514 dct = super().dict + 515 dct['type'] = 'APRProof' + 516 dct['kcfg'] = self.kcfg.to_dict() + 517 dct['terminal'] = sorted(node.id for node in self.kcfg.nodes if self.is_terminal(node.id)) + 518 dct['init'] = self.init + 519 dct['target'] = self.target + 520 dct['bounded'] = list(self._bounded) + 521 if self.bmc_depth is not None: + 522 dct['bmc_depth'] = self.bmc_depth + 523 dct['node_refutations'] = {node_id: proof.id for (node_id, proof) in self.node_refutations.items()} + 524 dct['circularity'] = self.circularity + 525 logs = {int(k): [l.to_dict() for l in ls] for k, ls in self.logs.items()} + 526 dct['logs'] = logs + 527 return dct + 528 + 529 @property + 530 def summary(self) -> CompositeSummary: + 531 subproofs_summaries = [subproof.summary for subproof in self.subproofs] + 532 return CompositeSummary( + 533 [ + 534 APRSummary( + 535 self.id, + 536 self.status, + 537 self.admitted, + 538 len(self.kcfg.nodes), + 539 len(self.pending), + 540 len(self.failing), + 541 len(self.kcfg.vacuous), + 542 len(self.kcfg.stuck), + 543 len([node for node in self.kcfg.nodes if self.is_terminal(node.id)]), + 544 len(self.node_refutations), + 545 self.bmc_depth, + 546 len(self._bounded), + 547 len(self.subproof_ids), + 548 self.formatted_exec_time(), + 549 ), + 550 *subproofs_summaries, + 551 ] + 552 ) + 553 + 554 @property + 555 def one_line_summary(self) -> str: + 556 nodes = len(self.kcfg.nodes) + 557 pending = len(self.pending) + 558 failing = len(self.failing) + 559 vacuous = len(self.kcfg.vacuous) + 560 stuck = len(self.kcfg.stuck) + 561 passed = len([cover for cover in self.kcfg.covers() if cover.target.id == self.target]) + 562 refuted = len(self.node_refutations) + 563 return ( + 564 super().one_line_summary + 565 + f': {nodes} nodes: {pending} pending|{passed} passed|{failing} failing|{vacuous} vacuous|{refuted} refuted|{stuck} stuck' + 566 ) + 567 +
+[docs] + 568 def get_refutation_id(self, node_id: int) -> str: + 569 return f'{self.id}.node-infeasible-{node_id}'
+ + 570 +
+[docs] + 571 @staticmethod + 572 def read_proof_data(proof_dir: Path, id: str) -> APRProof: + 573 proof_subdir = proof_dir / id + 574 proof_json = proof_subdir / 'proof.json' + 575 proof_dict = json.loads(proof_json.read_text()) + 576 cfg_dir = proof_subdir / 'kcfg' + 577 kcfg = KCFG.read_cfg_data(cfg_dir) + 578 init = int(proof_dict['init']) + 579 target = int(proof_dict['target']) + 580 bounded = proof_dict['bounded'] + 581 bmc_depth = int(proof_dict['bmc_depth']) if 'bmc_depth' in proof_dict else None + 582 circularity = bool(proof_dict['circularity']) + 583 admitted = bool(proof_dict['admitted']) + 584 exec_time = float(proof_dict['execution_time']) if 'execution_time' in proof_dict else 0.0 + 585 terminal = proof_dict['terminal'] + 586 logs = {int(k): tuple(LogEntry.from_dict(l) for l in ls) for k, ls in proof_dict['logs'].items()} + 587 subproof_ids = proof_dict['subproof_ids'] + 588 node_refutations = { + 589 kcfg._resolve(int(node_id)): proof_id for node_id, proof_id in proof_dict['node_refutations'].items() + 590 } + 591 + 592 prior_loops_cache = {int(k): v for k, v in proof_dict.get('loops_cache', {}).items()} + 593 + 594 return APRProof( + 595 id=id, + 596 kcfg=kcfg, + 597 terminal=terminal, + 598 init=init, + 599 target=target, + 600 bounded=bounded, + 601 bmc_depth=bmc_depth, + 602 logs=logs, + 603 circularity=circularity, + 604 admitted=admitted, + 605 proof_dir=proof_dir, + 606 subproof_ids=subproof_ids, + 607 node_refutations=node_refutations, + 608 prior_loops_cache=prior_loops_cache, + 609 _exec_time=exec_time, + 610 )
+ + 611 +
+[docs] + 612 def write_proof_data(self) -> None: + 613 if self.proof_dir is None or self.proof_subdir is None: + 614 _LOGGER.info(f'Skipped saving proof {self.id} since no save dir was specified.') + 615 return + 616 ensure_dir_path(self.proof_dir) + 617 ensure_dir_path(self.proof_subdir) + 618 proof_json = self.proof_subdir / 'proof.json' + 619 dct: dict[str, Any] = {} + 620 + 621 dct['id'] = self.id + 622 dct['subproof_ids'] = self.subproof_ids + 623 dct['admitted'] = self.admitted + 624 dct['execution_time'] = self._exec_time + 625 dct['type'] = 'APRProof' + 626 dct['init'] = self.kcfg._resolve(self.init) + 627 dct['target'] = self.kcfg._resolve(self.target) + 628 dct['terminal'] = sorted(node.id for node in self.kcfg.nodes if self.is_terminal(node.id)) + 629 dct['node_refutations'] = { + 630 self.kcfg._resolve(node_id): proof.id for (node_id, proof) in self.node_refutations.items() + 631 } + 632 dct['circularity'] = self.circularity + 633 logs = {int(k): [l.to_dict() for l in ls] for k, ls in self.logs.items()} + 634 dct['logs'] = logs + 635 + 636 dct['bounded'] = sorted(self._bounded) + 637 if self.bmc_depth is not None: + 638 dct['bmc_depth'] = self.bmc_depth + 639 + 640 dct['loops_cache'] = self.prior_loops_cache + 641 + 642 proof_json.write_text(json.dumps(dct)) + 643 _LOGGER.info(f'Wrote proof data for {self.id}: {proof_json}') + 644 self.kcfg.write_cfg_data()
+ + 645 +
+[docs] + 646 def refute_node(self, node: KCFG.Node) -> RefutationProof | None: + 647 _LOGGER.info(f'Attempting to refute node {node.id}') + 648 refutation = self.construct_node_refutation(node) + 649 if refutation is None: + 650 _LOGGER.error(f'Failed to refute node {node.id}') + 651 return None + 652 refutation.write_proof_data() + 653 + 654 self.node_refutations[node.id] = refutation + 655 + 656 self.write_proof_data() + 657 + 658 return refutation
+ + 659 +
+[docs] + 660 def unrefute_node(self, node: KCFG.Node) -> None: + 661 self.remove_subproof(self.get_refutation_id(node.id)) + 662 del self.node_refutations[node.id] + 663 self.write_proof_data() + 664 _LOGGER.info(f'Disabled refutation of node {node.id}.')
+ + 665 +
+[docs] + 666 def construct_node_refutation(self, node: KCFG.Node) -> RefutationProof | None: # TODO put into prover class + 667 if len(self.kcfg.successors(node.id)) > 0: + 668 _LOGGER.error(f'Cannot refute node {node.id} that already has successors') + 669 return None + 670 + 671 path = single(self.kcfg.paths_between(source_id=self.init, target_id=node.id)) + 672 branches_on_path = list(filter(lambda x: type(x) is KCFG.Split or type(x) is KCFG.NDBranch, reversed(path))) + 673 if len(branches_on_path) == 0: + 674 _LOGGER.error(f'Cannot refute node {node.id} in linear KCFG') + 675 return None + 676 closest_branch = branches_on_path[0] + 677 if type(closest_branch) is KCFG.NDBranch: + 678 _LOGGER.error(f'Cannot refute node {node.id} following a non-deterministic branch: not yet implemented') + 679 return None + 680 + 681 assert type(closest_branch) is KCFG.Split + 682 refuted_branch_root = closest_branch.targets[0] + 683 csubst = closest_branch.splits[refuted_branch_root.id] + 684 if not (csubst.subst.is_identity): + 685 _LOGGER.error( + 686 f'Cannot refute node {node.id}: unexpected non-identity substitution {csubst.subst} in Split from {closest_branch.source.id}' + 687 ) + 688 return None + 689 + 690 last_constraint = ml_pred_to_bool(csubst.constraint) + 691 relevant_vars = free_vars(last_constraint) + 692 pre_split_constraints = [ + 693 ml_pred_to_bool(c) + 694 for c in remove_useless_constraints(closest_branch.source.cterm.constraints, relevant_vars) + 695 ] + 696 + 697 refutation_id = self.get_refutation_id(node.id) + 698 _LOGGER.info(f'Adding refutation proof {refutation_id} as subproof of {self.id}') + 699 refutation = RefutationProof( + 700 id=refutation_id, + 701 pre_constraints=pre_split_constraints, + 702 last_constraint=last_constraint, + 703 proof_dir=self.proof_dir, + 704 ) + 705 + 706 self.add_subproof(refutation) + 707 return refutation
+
+ + 708 + 709 +
+[docs] + 710class APRProver(Prover[APRProof, APRProofStep, APRProofResult]): + 711 main_module_name: str + 712 execute_depth: int | None + 713 cut_point_rules: Iterable[str] + 714 terminal_rules: Iterable[str] + 715 counterexample_info: bool + 716 fast_check_subsumption: bool + 717 direct_subproof_rules: bool + 718 assume_defined: bool + 719 kcfg_explore: KCFGExplore + 720 extra_module: KFlatModule | None + 721 optimize_kcfg: bool + 722 + 723 def __init__( + 724 self, + 725 kcfg_explore: KCFGExplore, + 726 execute_depth: int | None = None, + 727 cut_point_rules: Iterable[str] = (), + 728 terminal_rules: Iterable[str] = (), + 729 counterexample_info: bool = False, + 730 fast_check_subsumption: bool = False, + 731 direct_subproof_rules: bool = False, + 732 assume_defined: bool = False, + 733 extra_module: KFlatModule | None = None, + 734 optimize_kcfg: bool = False, + 735 ) -> None: + 736 + 737 self.kcfg_explore = kcfg_explore + 738 self.main_module_name = self.kcfg_explore.cterm_symbolic._definition.main_module_name + 739 self.execute_depth = execute_depth + 740 self.cut_point_rules = cut_point_rules + 741 self.terminal_rules = terminal_rules + 742 self.counterexample_info = counterexample_info + 743 self.fast_check_subsumption = fast_check_subsumption + 744 self.direct_subproof_rules = direct_subproof_rules + 745 self.assume_defined = assume_defined + 746 self.extra_module = extra_module + 747 self.optimize_kcfg = optimize_kcfg + 748 +
+[docs] + 749 def close(self) -> None: + 750 self.kcfg_explore.cterm_symbolic._kore_client.close()
+ + 751 +
+[docs] + 752 def init_proof(self, proof: APRProof) -> None: + 753 main_module_name = self.main_module_name + 754 if self.extra_module: + 755 _kore_module = kflatmodule_to_kore(self.kcfg_explore.cterm_symbolic._definition, self.extra_module) + 756 _LOGGER.warning(f'_kore_module: {_kore_module.text}') + 757 self.kcfg_explore.cterm_symbolic._kore_client.add_module(_kore_module, name_as_id=True) + 758 main_module_name = self.extra_module.name + 759 + 760 def _inject_module(module_name: str, import_name: str, sentences: list[KRule]) -> None: + 761 _module = KFlatModule(module_name, sentences, [KImport(import_name)]) + 762 _kore_module = kflatmodule_to_kore(self.kcfg_explore.cterm_symbolic._definition, _module) + 763 self.kcfg_explore.cterm_symbolic._kore_client.add_module(_kore_module, name_as_id=True) + 764 + 765 subproofs: list[Proof] = ( + 766 [Proof.read_proof_data(proof.proof_dir, i) for i in proof.subproof_ids] + 767 if proof.proof_dir is not None + 768 else [] + 769 ) + 770 dependencies_as_rules = [ + 771 rule + 772 for subproof in subproofs + 773 if isinstance(subproof, APRProof) + 774 for rule in subproof.as_rules(priority=20, direct_rule=self.direct_subproof_rules) + 775 ] + 776 circularity_rule = proof.as_rule(priority=20) + 777 + 778 _inject_module(proof.dependencies_module_name, main_module_name, dependencies_as_rules) + 779 _inject_module(proof.circularities_module_name, proof.dependencies_module_name, [circularity_rule]) + 780 + 781 for node_id in [proof.init, proof.target]: + 782 if self.kcfg_explore.kcfg_semantics.is_terminal(proof.kcfg.node(node_id).cterm): + 783 proof.add_terminal(node_id)
+ + 784 + 785 def _may_subsume(self, node: KCFG.Node, target_node: KCFG.Node) -> bool: + 786 node_k_cell = node.cterm.try_cell('K_CELL') + 787 target_k_cell = target_node.cterm.try_cell('K_CELL') + 788 if node_k_cell and target_k_cell and not target_k_cell.match(node_k_cell): + 789 return False + 790 return True + 791 + 792 def _check_subsume(self, node: KCFG.Node, target_node: KCFG.Node, proof_id: str) -> CSubst | None: + 793 target_cterm = target_node.cterm + 794 _LOGGER.debug(f'Checking subsumption into target state {proof_id}: {shorten_hashes((node.id, target_cterm))}') + 795 if self.fast_check_subsumption and not self._may_subsume(node, target_node): + 796 _LOGGER.info(f'Skipping full subsumption check because of fast may subsume check {proof_id}: {node.id}') + 797 return None + 798 _csubst = self.kcfg_explore.cterm_symbolic.implies(node.cterm, target_cterm, assume_defined=self.assume_defined) + 799 csubst = _csubst.csubst + 800 if csubst is not None: + 801 _LOGGER.info(f'Subsumed into target node {proof_id}: {shorten_hashes((node.id, target_node.id))}') + 802 return csubst + 803 +
+[docs] + 804 def step_proof(self, step: APRProofStep) -> list[APRProofResult]: + 805 # Check if the current node should be bounded + 806 prior_loops: tuple[int, ...] = () + 807 if step.bmc_depth is not None and self.kcfg_explore.kcfg_semantics.is_loop(step.node.cterm): + 808 for node in step.shortest_path_to_node: + 809 if self.kcfg_explore.kcfg_semantics.same_loop(node.cterm, step.node.cterm): + 810 if node.id in step.prior_loops_cache: + 811 prior_loops = step.prior_loops_cache[node.id] + (node.id,) + 812 break + 813 + 814 _LOGGER.info(f'Prior loop heads for node {step.node.id}: {(step.node.id, prior_loops)}') + 815 if len(prior_loops) > step.bmc_depth: + 816 _LOGGER.warning(f'Bounded node {step.proof_id}: {step.node.id} at bmc depth {step.bmc_depth}') + 817 return [ + 818 APRProofBoundedResult( + 819 node_id=step.node.id, optimize_kcfg=self.optimize_kcfg, prior_loops_cache_update=prior_loops + 820 ) + 821 ] + 822 + 823 # Check if the current node and target are terminal + 824 is_terminal = self.kcfg_explore.kcfg_semantics.is_terminal(step.node.cterm) + 825 target_is_terminal = self.kcfg_explore.kcfg_semantics.is_terminal(step.target.cterm) + 826 + 827 terminal_result: list[APRProofResult] = ( + 828 [ + 829 APRProofTerminalResult( + 830 node_id=step.node.id, optimize_kcfg=self.optimize_kcfg, prior_loops_cache_update=prior_loops + 831 ) + 832 ] + 833 if is_terminal + 834 else [] + 835 ) + 836 + 837 # Subsumption is checked if and only if the target node + 838 # and the current node are either both terminal or both not terminal + 839 if is_terminal == target_is_terminal: + 840 csubst = self._check_subsume(step.node, step.target, proof_id=step.proof_id) + 841 if csubst is not None: + 842 # Information about the subsumed node being terminal must be returned + 843 # so that the set of terminal nodes is correctly updated + 844 return terminal_result + [ + 845 APRProofSubsumeResult( + 846 csubst=csubst, + 847 optimize_kcfg=self.optimize_kcfg, + 848 node_id=step.node.id, + 849 prior_loops_cache_update=prior_loops, + 850 ) + 851 ] + 852 + 853 if is_terminal: + 854 return terminal_result + 855 + 856 # Ensure that we cut at applications of circularity, so that subsumption into target state will be checked + 857 cut_rules = list(self.cut_point_rules) + 858 if step.circularity and step.nonzero_depth: + 859 cut_rules.append(step.circularity_rule_id) + 860 + 861 # Ensure that we record progress ASAP for circularities, so the circularity rule will be included for execution as soon as possible + 862 execute_depth = self.execute_depth + 863 if step.circularity and not step.nonzero_depth: + 864 execute_depth = 1 + 865 + 866 # If the step has already been cached, do not invoke the backend and only send a signal back to the proof to use the cache + 867 if step.use_cache is not None: + 868 _LOGGER.info(f'Using cached step for edge {step.use_cache} --> {step.node.id}') + 869 return [ + 870 APRProofUseCacheResult( + 871 node_id=step.node.id, + 872 cached_node_id=step.use_cache, + 873 optimize_kcfg=self.optimize_kcfg, + 874 prior_loops_cache_update=prior_loops, + 875 ) + 876 ] + 877 # Invoke the backend to obtain the next KCFG extension + 878 else: + 879 extend_results = self.kcfg_explore.extend_cterm( + 880 step.node.cterm, + 881 execute_depth=execute_depth, + 882 cut_point_rules=cut_rules, + 883 terminal_rules=self.terminal_rules, + 884 module_name=step.module_name, + 885 node_id=step.node.id, + 886 ) + 887 + 888 # We can obtain two results at most + 889 assert len(extend_results) <= 2 + 890 # We have obtained two results: first is to be applied, second to be cached potentially + 891 if len(extend_results) == 2: + 892 # Cache only if the current node is at non-zero depth + 893 if step.nonzero_depth: + 894 _LOGGER.info(f'Caching next step for edge starting from {step.node.id}') + 895 return [ + 896 APRProofExtendAndCacheResult( + 897 node_id=step.node.id, + 898 extension_to_apply=extend_results[0], + 899 extension_to_cache=extend_results[1], + 900 prior_loops_cache_update=prior_loops, + 901 optimize_kcfg=self.optimize_kcfg, + 902 ) + 903 ] + 904 + 905 # Otherwise, discard the second result + 906 return [ + 907 APRProofExtendResult( + 908 node_id=step.node.id, + 909 extension_to_apply=extend_results[0], + 910 prior_loops_cache_update=prior_loops, + 911 optimize_kcfg=self.optimize_kcfg, + 912 ) + 913 ]
+ + 914 +
+[docs] + 915 def failure_info(self, proof: APRProof) -> FailureInfo: + 916 return APRFailureInfo.from_proof( + 917 proof, self.kcfg_explore, counterexample_info=self.counterexample_info, assume_defined=self.assume_defined + 918 )
+
+ + 919 + 920 +
+[docs] + 921@dataclass(frozen=True) + 922class APRSummary(ProofSummary): + 923 id: str + 924 status: ProofStatus + 925 admitted: bool + 926 nodes: int + 927 pending: int + 928 failing: int + 929 vacuous: int + 930 stuck: int + 931 terminal: int + 932 refuted: int + 933 bmc_depth: int | None + 934 bounded: int + 935 subproofs: int + 936 formatted_exec_time: str + 937 + 938 @property + 939 def lines(self) -> list[str]: + 940 _lines = [ + 941 f'APRProof: {self.id}', + 942 f' status: {self.status}', + 943 f' admitted: {self.admitted}', + 944 f' nodes: {self.nodes}', + 945 f' pending: {self.pending}', + 946 f' failing: {self.failing}', + 947 f' vacuous: {self.vacuous}', + 948 f' stuck: {self.stuck}', + 949 f' terminal: {self.terminal}', + 950 f' refuted: {self.refuted}', + 951 f' bounded: {self.bounded}', + 952 f' execution time: {self.formatted_exec_time}', + 953 ] + 954 if self.bmc_depth is not None: + 955 _lines.append(f' bmc depth: {self.bmc_depth}') + 956 _lines.append(f'Subproofs: {self.subproofs}') + 957 return _lines
+ + 958 + 959 +
+[docs] + 960@dataclass(frozen=True) + 961class APRFailureInfo(FailureInfo): + 962 pending_nodes: frozenset[int] + 963 failing_nodes: frozenset[int] + 964 path_conditions: FrozenDict[int, str] + 965 failure_reasons: FrozenDict[int, str] + 966 models: FrozenDict[int, frozenset[tuple[str, str]]] + 967 + 968 def __init__( + 969 self, + 970 failing_nodes: Iterable[int], + 971 pending_nodes: Iterable[int], + 972 path_conditions: Mapping[int, str], + 973 failure_reasons: Mapping[int, str], + 974 models: Mapping[int, Iterable[tuple[str, str]]], + 975 ): + 976 object.__setattr__(self, 'failing_nodes', frozenset(failing_nodes)) + 977 object.__setattr__(self, 'pending_nodes', frozenset(pending_nodes)) + 978 object.__setattr__(self, 'path_conditions', FrozenDict(path_conditions)) + 979 object.__setattr__(self, 'failure_reasons', FrozenDict(failure_reasons)) + 980 object.__setattr__( + 981 self, 'models', FrozenDict({node_id: frozenset(model) for (node_id, model) in models.items()}) + 982 ) + 983 +
+[docs] + 984 @staticmethod + 985 def from_proof( + 986 proof: APRProof, kcfg_explore: KCFGExplore, counterexample_info: bool = False, assume_defined: bool = False + 987 ) -> APRFailureInfo: + 988 target = proof.kcfg.node(proof.target) + 989 pending_nodes = {node.id for node in proof.pending} + 990 failing_nodes = {node.id for node in proof.failing} + 991 path_conditions = {} + 992 failure_reasons = {} + 993 models = {} + 994 for node in proof.failing: + 995 node_cterm, _ = kcfg_explore.cterm_symbolic.simplify(node.cterm) + 996 target_cterm, _ = kcfg_explore.cterm_symbolic.simplify(target.cterm) + 997 _, reason = kcfg_explore.implication_failure_reason(node_cterm, target_cterm, assume_defined=assume_defined) + 998 path_condition = kcfg_explore.pretty_print(proof.path_constraints(node.id)) + 999 failure_reasons[node.id] = reason +1000 path_conditions[node.id] = path_condition +1001 if counterexample_info: +1002 model_subst = kcfg_explore.cterm_symbolic.get_model(node.cterm) +1003 if type(model_subst) is Subst: +1004 model: list[tuple[str, str]] = [] +1005 for var, term in model_subst.to_dict().items(): +1006 term_kast = KInner.from_dict(term) +1007 term_pretty = kcfg_explore.pretty_print(term_kast) +1008 model.append((var, term_pretty)) +1009 models[node.id] = model +1010 return APRFailureInfo( +1011 failing_nodes=failing_nodes, +1012 pending_nodes=pending_nodes, +1013 path_conditions=path_conditions, +1014 failure_reasons=failure_reasons, +1015 models=models, +1016 )
+ +1017 +
+[docs] +1018 def print(self) -> list[str]: +1019 res_lines: list[str] = [] +1020 +1021 num_pending = len(self.pending_nodes) +1022 num_failing = len(self.failing_nodes) +1023 res_lines.append( +1024 f'{num_pending + num_failing} Failure nodes. ({num_pending} pending and {num_failing} failing)' +1025 ) +1026 +1027 if num_pending > 0: +1028 res_lines.append('') +1029 res_lines.append(f'Pending nodes: {sorted(self.pending_nodes)}') +1030 +1031 if num_failing > 0: +1032 res_lines.append('') +1033 res_lines.append('Failing nodes:') +1034 for node_id in self.failing_nodes: +1035 reason = self.failure_reasons[node_id] +1036 path_condition = self.path_conditions[node_id] +1037 res_lines.append('') +1038 res_lines.append(f' Node id: {str(node_id)}') +1039 +1040 res_lines.append(' Failure reason:') +1041 res_lines += [f' {line}' for line in reason.split('\n')] +1042 +1043 res_lines.append(' Path condition:') +1044 res_lines += [f' {path_condition}'] +1045 +1046 if node_id in self.models: +1047 res_lines.append(' Model:') +1048 for var, term in self.models[node_id]: +1049 res_lines.append(f' {var} = {term}') +1050 else: +1051 res_lines.append(' Failed to generate a model.') +1052 +1053 res_lines.append('') +1054 res_lines.append('Join the Runtime Verification Discord server for support: https://discord.gg/CurfmXNtbN') +1055 return res_lines
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/proof/show.html b/pyk/_modules/pyk/proof/show.html new file mode 100644 index 00000000000..96c21c30b5e --- /dev/null +++ b/pyk/_modules/pyk/proof/show.html @@ -0,0 +1,238 @@ + + + + + + + + pyk.proof.show — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.proof.show

+  1from __future__ import annotations
+  2
+  3import logging
+  4from typing import TYPE_CHECKING
+  5
+  6from ..kcfg.show import KCFGShow, NodePrinter
+  7from ..utils import ensure_dir_path
+  8
+  9if TYPE_CHECKING:
+ 10    from collections.abc import Iterable
+ 11    from pathlib import Path
+ 12    from typing import Final
+ 13
+ 14    from graphviz import Digraph
+ 15
+ 16    from ..kcfg import KCFG
+ 17    from ..kcfg.kcfg import NodeIdLike
+ 18    from ..ktool.kprint import KPrint
+ 19    from .reachability import APRProof
+ 20
+ 21_LOGGER: Final = logging.getLogger(__name__)
+ 22
+ 23
+
+[docs] + 24class APRProofNodePrinter(NodePrinter): + 25 proof: APRProof + 26 + 27 def __init__(self, proof: APRProof, kprint: KPrint, full_printer: bool = False, minimize: bool = False): + 28 super().__init__(kprint, full_printer=full_printer, minimize=minimize) + 29 self.proof = proof + 30 +
+[docs] + 31 def node_attrs(self, kcfg: KCFG, node: KCFG.Node) -> list[str]: + 32 attrs = super().node_attrs(kcfg, node) + 33 if self.proof.is_init(node.id): + 34 attrs.append('init') + 35 if self.proof.is_target(node.id): + 36 attrs.append('target') + 37 if self.proof.is_pending(node.id): + 38 attrs.append('pending') + 39 if self.proof.is_refuted(node.id): + 40 attrs.append('refuted') + 41 if self.proof.is_terminal(node.id): + 42 attrs.append('terminal') + 43 if 'stuck' in attrs: + 44 attrs.remove('stuck') + 45 if self.proof.is_bounded(node.id): + 46 attrs.append('bounded') + 47 if 'stuck' in attrs: + 48 attrs.remove('stuck') + 49 return attrs
+
+ + 50 + 51 +
+[docs] + 52class APRProofShow: + 53 kcfg_show: KCFGShow + 54 + 55 def __init__(self, kprint: KPrint, node_printer: NodePrinter | None = None): + 56 self.kcfg_show = KCFGShow(kprint, node_printer=node_printer) + 57 +
+[docs] + 58 def pretty_segments(self, proof: APRProof, minimize: bool = True) -> Iterable[tuple[str, Iterable[str]]]: + 59 ret_lines = list(self.kcfg_show.pretty_segments(proof.kcfg, minimize=minimize)) + 60 if len(proof.pending) > 0: + 61 target_node_lines = ['', 'Target Node:'] + 62 target_node_lines += self.kcfg_show.node_printer.print_node(proof.kcfg, proof.kcfg.node(proof.target)) + 63 ret_lines.append((f'node_{proof.target}', target_node_lines)) + 64 return KCFGShow.make_unique_segments(ret_lines)
+ + 65 +
+[docs] + 66 def pretty(self, proof: APRProof, minimize: bool = True) -> Iterable[str]: + 67 return (line for _, seg_lines in self.pretty_segments(proof, minimize=minimize) for line in seg_lines)
+ + 68 +
+[docs] + 69 def show( + 70 self, + 71 proof: APRProof, + 72 nodes: Iterable[NodeIdLike] = (), + 73 node_deltas: Iterable[tuple[NodeIdLike, NodeIdLike]] = (), + 74 to_module: bool = False, + 75 minimize: bool = True, + 76 sort_collections: bool = False, + 77 omit_cells: Iterable[str] = (), + 78 ) -> list[str]: + 79 res_lines = self.kcfg_show.show( + 80 proof.kcfg, + 81 nodes=nodes, + 82 node_deltas=node_deltas, + 83 to_module=to_module, + 84 minimize=minimize, + 85 sort_collections=sort_collections, + 86 omit_cells=omit_cells, + 87 module_name=f'SUMMARY-{proof.id.upper().replace("_", "-")}', + 88 ) + 89 return res_lines
+ + 90 +
+[docs] + 91 def dot(self, proof: APRProof) -> Digraph: + 92 graph = self.kcfg_show.dot(proof.kcfg) + 93 attrs = {'class': 'target', 'style': 'solid'} + 94 for node in proof.pending: + 95 graph.edge(tail_name=node.id, head_name=proof.target, label=' ???', **attrs) + 96 for node in proof.kcfg.stuck: + 97 graph.edge(tail_name=node.id, head_name=proof.target, label=' false', **attrs) + 98 return graph
+ + 99 +
+[docs] +100 def dump(self, proof: APRProof, dump_dir: Path, dot: bool = False) -> None: +101 ensure_dir_path(dump_dir) +102 +103 proof_file = dump_dir / f'{proof.id}.json' +104 proof_file.write_text(proof.json) +105 _LOGGER.info(f'Wrote CFG file {proof.id}: {proof_file}') +106 +107 if dot: +108 proof_dot = self.dot(proof) +109 dot_file = dump_dir / f'{proof.id}.dot' +110 dot_file.write_text(proof_dot.source) +111 _LOGGER.info(f'Wrote DOT file {proof.id}: {dot_file}') +112 +113 self.kcfg_show.dump(f'{proof.id}_cfg', proof.kcfg, dump_dir, dot=False)
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/proof/tui.html b/pyk/_modules/pyk/proof/tui.html new file mode 100644 index 00000000000..9b5e4ad4777 --- /dev/null +++ b/pyk/_modules/pyk/proof/tui.html @@ -0,0 +1,202 @@ + + + + + + + + pyk.proof.tui — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.proof.tui

+ 1from __future__ import annotations
+ 2
+ 3from typing import TYPE_CHECKING
+ 4
+ 5from textual.containers import Horizontal, ScrollableContainer, Vertical
+ 6from textual.widgets import Footer
+ 7
+ 8from ..kcfg.tui import GraphChunk, KCFGViewer, NodeView
+ 9from .show import APRProofShow
+10
+11if TYPE_CHECKING:
+12    from collections.abc import Callable, Iterable
+13
+14    from textual.app import ComposeResult
+15
+16    from ..kcfg.show import NodePrinter
+17    from ..kcfg.tui import KCFGElem
+18    from ..ktool.kprint import KPrint
+19    from .reachability import APRProof
+20
+21
+
+[docs] +22class APRProofBehaviorView(ScrollableContainer, can_focus=True): +23 _proof: APRProof +24 _kprint: KPrint +25 _minimize: bool +26 _node_printer: NodePrinter | None +27 _proof_nodes: Iterable[GraphChunk] +28 +29 def __init__( +30 self, +31 proof: APRProof, +32 kprint: KPrint, +33 minimize: bool = True, +34 node_printer: NodePrinter | None = None, +35 id: str = '', +36 ): +37 super().__init__(id=id) +38 self._proof = proof +39 self._kprint = kprint +40 self._minimize = minimize +41 self._node_printer = node_printer +42 self._proof_nodes = [] +43 proof_show = APRProofShow(kprint, node_printer=node_printer) +44 for lseg_id, node_lines in proof_show.pretty_segments(self._proof, minimize=self._minimize): +45 self._proof_nodes.append(GraphChunk(lseg_id, node_lines)) +46 +
+[docs] +47 def compose(self) -> ComposeResult: +48 return self._proof_nodes
+
+ +49 +50 +
+[docs] +51class APRProofViewer(KCFGViewer): +52 _proof: APRProof +53 +54 def __init__( +55 self, +56 proof: APRProof, +57 kprint: KPrint, +58 node_printer: NodePrinter | None = None, +59 custom_view: Callable[[KCFGElem], Iterable[str]] | None = None, +60 minimize: bool = True, +61 ) -> None: +62 super().__init__(proof.kcfg, kprint, node_printer=node_printer, custom_view=custom_view, minimize=minimize) +63 self._proof = proof +64 +
+[docs] +65 def on_mount(self) -> None: +66 self.query_one('#behavior', APRProofBehaviorView).focus(scroll_visible=False)
+ +67 +
+[docs] +68 def compose(self) -> ComposeResult: +69 yield Horizontal( +70 Vertical( +71 APRProofBehaviorView(self._proof, self._kprint, node_printer=self._node_printer, id='behavior'), +72 id='navigation', +73 ), +74 Vertical( +75 NodeView( +76 self._kprint, +77 custom_view=self._custom_view, +78 proof_id=self._proof.id, +79 proof_status=self._proof.status.value, +80 exec_time=self._proof.exec_time, +81 id='node-view', +82 ), +83 id='display', +84 ), +85 ) +86 yield Footer()
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/testing/plugin.html b/pyk/_modules/pyk/testing/plugin.html new file mode 100644 index 00000000000..b42da2fa36f --- /dev/null +++ b/pyk/_modules/pyk/testing/plugin.html @@ -0,0 +1,178 @@ + + + + + + + + pyk.testing.plugin — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.testing.plugin

+ 1from __future__ import annotations
+ 2
+ 3from pathlib import Path
+ 4from typing import TYPE_CHECKING
+ 5
+ 6import pytest
+ 7
+ 8from ..utils import BugReport, ensure_dir_path
+ 9from ._kompiler import Kompiler, UseServer
+10from ._profiler import Profiler
+11
+12if TYPE_CHECKING:
+13    from pytest import FixtureRequest, Parser, TempPathFactory
+14
+15
+
+[docs] +16def pytest_addoption(parser: Parser) -> None: +17 parser.addoption( +18 '--bug-report', +19 action='store_true', +20 default=False, +21 help='Generate bug reports', +22 ) +23 parser.addoption( +24 '--bug-report-dir', +25 type=ensure_dir_path, +26 help='Directory to store bug reports', +27 ) +28 parser.addoption( +29 '--use-server', +30 type=UseServer, +31 default=UseServer.BOTH, +32 help='KORE RPC server to use for tests', +33 )
+ +34 +35 +
+[docs] +36@pytest.fixture +37def bug_report(request: FixtureRequest, tmp_path: Path) -> BugReport | None: +38 bug_report = request.config.getoption('--bug-report') +39 if not bug_report: +40 return None +41 bug_report_dir = request.config.getoption('--bug-report-dir') +42 if not bug_report_dir: +43 bug_report_dir = tmp_path +44 br_name = request.node.name.replace('[', '/') +45 br_path = Path(bug_report_dir / br_name) +46 ensure_dir_path(br_path.parent) +47 return BugReport(br_path)
+ +48 +49 +
+[docs] +50@pytest.fixture(scope='session') +51def use_server(request: FixtureRequest) -> UseServer: +52 return request.config.getoption('--use-server')
+ +53 +54 +
+[docs] +55@pytest.fixture +56def profile(tmp_path: Path) -> Profiler: +57 return Profiler(tmp_path)
+ +58 +59 +
+[docs] +60@pytest.fixture(scope='session') +61def kompile(tmp_path_factory: TempPathFactory) -> Kompiler: +62 return Kompiler(tmp_path_factory)
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_modules/pyk/utils.html b/pyk/_modules/pyk/utils.html new file mode 100644 index 00000000000..21918a19660 --- /dev/null +++ b/pyk/_modules/pyk/utils.html @@ -0,0 +1,976 @@ + + + + + + + + pyk.utils — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pyk.utils

+  1from __future__ import annotations
+  2
+  3import hashlib
+  4import logging
+  5import os
+  6import shlex
+  7import string
+  8import subprocess
+  9import sys
+ 10import tarfile
+ 11import threading
+ 12import time
+ 13from collections.abc import Hashable, Mapping
+ 14from dataclasses import dataclass
+ 15from datetime import datetime
+ 16from pathlib import Path
+ 17from subprocess import CompletedProcess, Popen
+ 18from tempfile import NamedTemporaryFile
+ 19from typing import TYPE_CHECKING, Generic, TypeVar, cast, final, overload
+ 20
+ 21if TYPE_CHECKING:
+ 22    from collections.abc import Callable, Iterable, Iterator
+ 23    from logging import Logger
+ 24    from subprocess import CalledProcessError
+ 25    from typing import IO, Any, Final
+ 26
+ 27    P1 = TypeVar('P1')
+ 28    P2 = TypeVar('P2')
+ 29    P3 = TypeVar('P3')
+ 30    P4 = TypeVar('P4')
+ 31    Q = TypeVar('Q')
+ 32    R1 = TypeVar('R1')
+ 33    R2 = TypeVar('R2')
+ 34    R3 = TypeVar('R3')
+ 35    R4 = TypeVar('R4')
+ 36    T = TypeVar('T')
+ 37    S = TypeVar('S')
+ 38
+ 39P = TypeVar('P')
+ 40R = TypeVar('R')
+ 41H = TypeVar('H', bound=Hashable)
+ 42K = TypeVar('K', bound=Hashable)
+ 43V = TypeVar('V')
+ 44
+ 45_LOGGER: Final = logging.getLogger(__name__)
+ 46
+ 47ROOT: Final = Path(os.path.dirname(os.path.abspath(__file__)))
+ 48
+ 49
+ 50# Based on: https://stackoverflow.com/a/2704866
+ 51# Perhaps one day: https://peps.python.org/pep-0603/
+
+[docs] + 52class FrozenDict(Mapping[K, V]): + 53 _dict: dict[K, V] + 54 _hash: int | None + 55 + 56 # TODO overload + 57 # TODO try __init__(self: FrozenDict[str, V], **kwargs: V) + 58 def __init__(self, *args: Any, **kwargs: Any): + 59 self._dict = dict(*args, **kwargs) + 60 self._hash = None + 61 + 62 def __iter__(self) -> Iterator[K]: + 63 return iter(self._dict) + 64 + 65 def __len__(self) -> int: + 66 return len(self._dict) + 67 + 68 def __getitem__(self, key: K) -> V: + 69 return self._dict[key] + 70 + 71 def __hash__(self) -> int: + 72 if self._hash is None: + 73 h = 0 + 74 for pair in self.items(): + 75 h ^= hash(pair) + 76 self._hash = h + 77 return self._hash + 78 + 79 def __str__(self) -> str: + 80 return f'FrozenDict({str(self._dict)})' + 81 + 82 def __repr__(self) -> str: + 83 return f'FrozenDict({repr(self._dict)})'
+ + 84 + 85 + 86EMPTY_FROZEN_DICT: Final[FrozenDict] = FrozenDict() + 87 + 88 +
+[docs] + 89@final + 90@dataclass(frozen=True) + 91class POSet(Generic[H]): + 92 image: FrozenDict[H, frozenset[H]] + 93 + 94 def __init__(self, relation: Iterable[tuple[H, H]]): + 95 _image = self._compute_image(relation) + 96 image: FrozenDict[H, frozenset[H]] = FrozenDict({x: frozenset(y) for x, y in _image.items()}) + 97 object.__setattr__(self, 'image', image) + 98 + 99 @staticmethod +100 def _compute_image(relation: Iterable[tuple[H, H]]) -> dict[H, set[H]]: +101 image: dict[H, set[H]] = {} +102 +103 for x, y in relation: +104 image.setdefault(x, set()).add(y) +105 +106 domain = set(image) +107 for k in domain: +108 for i in domain: +109 if k not in image[i]: +110 continue +111 for j in image[k]: +112 image[i].add(j) +113 +114 return image
+ +115 +116 +
+[docs] +117def check_type(x: Any, typ: type[T]) -> T: +118 if not isinstance(x, typ): +119 raise ValueError(f'Expected object of type {typ.__name__}, got: {x}') +120 return x
+ +121 +122 +
+[docs] +123def raised(f: Callable, *args: Any, **kwargs: Any) -> BaseException | None: +124 try: +125 f(*args, **kwargs) +126 except Exception as e: +127 return e +128 +129 return None
+ +130 +131 +
+[docs] +132def merge_with(f: Callable[[V, V], V], d1: Mapping[K, V], d2: Mapping[K, V]) -> dict[K, V]: +133 res = dict(d1) +134 for k, v2 in d2.items(): +135 if k in d1: +136 v1 = d1[k] +137 res[k] = f(v1, v2) +138 else: +139 res[k] = v2 +140 return res
+ +141 +142 +
+[docs] +143def not_none(x: T | None) -> T: +144 if x is None: +145 raise ValueError('Expected value other than None') +146 return x
+ +147 +148 +
+[docs] +149def filter_none(mapping: Mapping[K, V]) -> dict[K, V]: +150 return {k: v for k, v in mapping.items() if v is not None}
+ +151 +152 +153# Higher-order functions +154 +155 +
+[docs] +156class Chainable(Generic[P, R]): +157 _f: Callable[[P], R] +158 +159 def __init__(self, f: Callable[[P], R]): +160 self._f = f +161 +162 def __call__(self, p: P) -> R: +163 return self._f(p) +164 +165 def __rshift__(self, other: Callable[[R], Q]) -> Chainable[P, Q]: +166 return Chainable(lambda p: other(self(p)))
+ +167 +168 +169chain: Final[Chainable[Any, Any]] = Chainable(lambda x: x) +170 +171 +
+[docs] +172def none(x: Any) -> None: +173 pass
+ +174 +175 +
+[docs] +176def maybe(f: Callable[[P], R]) -> Callable[[P | None], R | None]: +177 def res(p: P | None) -> R | None: +178 return f(p) if p is not None else None +179 +180 return res
+ +181 +182 +183@overload +184def tuple_of() -> Callable[[tuple[()]], tuple[()]]: ... +185 +186 +187@overload +188def tuple_of( +189 f1: Callable[[P1], R1], +190 /, +191) -> Callable[[tuple[P1]], tuple[R1]]: ... +192 +193 +194@overload +195def tuple_of( +196 f1: Callable[[P1], R1], +197 f2: Callable[[P2], R2], +198 /, +199) -> Callable[[tuple[P1, P2]], tuple[R1, R2]]: ... +200 +201 +202@overload +203def tuple_of( +204 f1: Callable[[P1], R1], +205 f2: Callable[[P2], R2], +206 f3: Callable[[P3], R3], +207 /, +208) -> Callable[[tuple[P1, P2, P3]], tuple[R1, R2, R3]]: ... +209 +210 +211@overload +212def tuple_of( +213 f1: Callable[[P1], R1], +214 f2: Callable[[P2], R2], +215 f3: Callable[[P3], R3], +216 f4: Callable[[P4], R4], +217 /, +218) -> Callable[[tuple[P1, P2, P3, P4]], tuple[R1, R2, R3, R4]]: ... +219 +220 +
+[docs] +221def tuple_of(*args: Callable) -> Callable: +222 def res(t: tuple) -> tuple: +223 return tuple(f(x) for f, x in zip(args, t, strict=True)) +224 +225 return res
+ +226 +227 +
+[docs] +228def case( +229 cases: Iterable[tuple[Callable[[P], bool], Callable[[P], R]]], +230 default: Callable[[P], R] | None = None, +231) -> Callable[[P], R]: +232 def res(p: P) -> R: +233 for cond, then in cases: # noqa: B905 +234 if cond(p): +235 return then(p) +236 +237 if default is not None: +238 return default(p) +239 +240 raise ValueError(f'No match found for: {p}') +241 +242 return res
+ +243 +244 +245# Iterables +246 +247 +
+[docs] +248def find_common_items(l1: Iterable[T], l2: Iterable[T]) -> tuple[list[T], list[T], list[T]]: +249 common = [] +250 for i in l1: +251 if i in l2: +252 common.append(i) +253 new_l1 = [] +254 new_l2 = [] +255 for i in l1: +256 if i not in common: +257 new_l1.append(i) +258 for i in l2: +259 if i not in common: +260 new_l2.append(i) +261 return (common, new_l1, new_l2)
+ +262 +263 +
+[docs] +264def intersperse(iterable: Iterable[T], delimiter: T) -> Iterator[T]: +265 it = iter(iterable) +266 +267 try: +268 yield next(it) +269 except StopIteration: +270 return +271 +272 for x in it: +273 yield delimiter +274 yield x
+ +275 +276 +
+[docs] +277def unique(iterable: Iterable[H]) -> Iterator[H]: +278 elems = set() +279 for elem in iterable: +280 if elem in elems: +281 continue +282 else: +283 elems.add(elem) +284 yield elem
+ +285 +286 +
+[docs] +287def single(iterable: Iterable[T]) -> T: +288 it = iter(iterable) +289 sentinel = object() +290 +291 fst = next(it, sentinel) +292 if fst is sentinel: +293 raise ValueError('Expected a single element, found none') +294 fst = cast('T', fst) +295 +296 snd = next(it, sentinel) +297 if snd is not sentinel: +298 raise ValueError('Expected a single element, found more', fst, snd) +299 +300 return fst
+ +301 +302 +
+[docs] +303def some(iterable: Iterable[T]) -> T | None: +304 return next(iter(iterable), None)
+ +305 +306 +
+[docs] +307def repeat_last(iterable: Iterable[T]) -> Iterator[T]: +308 it = iter(iterable) +309 last: T | None = None +310 while True: +311 try: +312 last = next(it) +313 yield last +314 +315 except StopIteration: +316 if last is None: +317 return +318 +319 yield last
+ +320 +321 +
+[docs] +322def partition(iterable: Iterable[T], pred: Callable[[T, T], bool]) -> list[list[T]]: +323 """Partition the iterable into sublists based on the given predicate. +324 +325 predicate pred(_, _) should satisfy: +326 - pred(x, x) +327 - if pred(x, y) and pred(y, z) then pred(x, z); +328 - if pred(x, y) then pred(y, x); +329 """ +330 groups: list[list[T]] = [] +331 for item in iterable: +332 found = False +333 for group in groups: +334 group_matches = [] +335 for group_item in group: +336 group_match = pred(group_item, item) +337 if group_match != pred(item, group_item): +338 raise ValueError(f'Partitioning failed, predicate commutativity failed on: {(item, group_item)}') +339 group_matches.append(group_match) +340 if found and any(group_matches): +341 raise ValueError(f'Partitioning failed, item matched multiple groups: {item}') +342 if all(group_matches): +343 found = True +344 group.append(item) +345 elif any(group_matches): +346 raise ValueError(f'Partitioning failed, item matched only some elements of group: {(item, group)}') +347 if not found: +348 groups.append([item]) +349 return groups
+ +350 +351 +
+[docs] +352def nonempty_str(x: Any) -> str: +353 if x is None: +354 raise ValueError('Expected nonempty string, found: null.') +355 if type(x) is not str: +356 raise TypeError('Expected nonempty string, found: {type(x)}') +357 if x == '': +358 raise ValueError("Expected nonempty string, found: ''") +359 return x
+ +360 +361 +
+[docs] +362def add_indent(indent: str, lines: Iterable[str]) -> list[str]: +363 return [indent + line for line in lines]
+ +364 +365 +
+[docs] +366def is_hexstring(x: str) -> bool: +367 return all(c in string.hexdigits for c in x)
+ +368 +369 +370# Hashes +371 +372 +
+[docs] +373def hash_str(x: object) -> str: +374 hash = hashlib.sha256() +375 hash.update(str(x).encode('utf-8')) +376 return str(hash.hexdigest())
+ +377 +378 +
+[docs] +379def hash_file(file: Path, chunk_num_blocks: int = 128) -> str: +380 h = hashlib.sha256() +381 with open(str(file), 'rb') as f: +382 while chunk := f.read(chunk_num_blocks * h.block_size): +383 h.update(chunk) +384 return str(h.hexdigest())
+ +385 +386 +
+[docs] +387def is_hash(x: Any) -> bool: +388 # NB! currently only sha256 in hexdec form is detected +389 # 2b9e b7c5 441e 9f7e 97f9 a4e5 fc04 a0f7 9f62 c8e9 605a ad1e 02db e8de 3c21 0422 +390 # 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 +391 return type(x) is str and len(x) == 64 and is_hexstring(x)
+ +392 +393 +
+[docs] +394def shorten_hash(h: str, left_chars: int = 6, right_chars: int = 6) -> str: +395 left = h[0:left_chars] if left_chars > 0 else '' +396 right = h[-right_chars:] if right_chars > 0 else '' +397 return left + '..' + right
+ +398 +399 +
+[docs] +400def shorten_hashes(value: Any, left_chars: int = 6, right_chars: int = 6) -> Any: +401 result: Any = None +402 if is_hash(value): +403 result = shorten_hash(value, left_chars, right_chars) +404 elif type(value) is tuple: +405 result = tuple([shorten_hashes(item) for item in value]) +406 elif type(value) is list: +407 result = [shorten_hashes(v) for v in value] +408 elif type(value) is dict: +409 result = {} +410 for k, v in value.items(): +411 result[shorten_hashes(k)] = shorten_hashes(v) +412 elif type(value) is set: +413 result = set() +414 for item in value: +415 result.add(shorten_hashes(item)) +416 else: +417 result = value +418 return result
+ +419 +420 +
+[docs] +421def deconstruct_short_hash(h: str) -> tuple[str, str]: +422 x = h.lower() +423 if is_hash(x): +424 return (x, x) +425 (l, sep, r) = x.partition('..') +426 if sep == '..' and is_hexstring(l) and is_hexstring(r): +427 return (l, r) +428 raise ValueError(f'Bad short hash: {h}')
+ +429 +430 +
+[docs] +431def compare_short_hashes(lhs: str, rhs: str) -> bool: +432 (l0, l1) = deconstruct_short_hash(lhs) +433 (r0, r1) = deconstruct_short_hash(rhs) +434 return (l0.startswith(r0) or r0.startswith(l0)) and (l1.endswith(r1) or r1.endswith(l1))
+ +435 +436 +
+[docs] +437def run_process( +438 args: str | Iterable[str], +439 *, +440 check: bool = True, +441 input: str | None = None, +442 pipe_stdout: bool = True, +443 pipe_stderr: bool = False, +444 cwd: str | Path | None = None, +445 env: Mapping[str, str] | None = None, +446 logger: Logger | None = None, +447 exec_process: bool = False, +448) -> CompletedProcess: +449 if cwd is not None: +450 cwd = Path(cwd) +451 check_dir_path(cwd) +452 +453 if type(args) is str: +454 command = args +455 else: +456 args = tuple(args) +457 command = shlex.join(args) +458 +459 if not logger: +460 logger = _LOGGER +461 +462 stdout = subprocess.PIPE if pipe_stdout else None +463 stderr = subprocess.PIPE if pipe_stderr else None +464 +465 logger.info(f'Running: {command}') +466 +467 if exec_process: +468 sys.stdout.flush() +469 sys.stderr.flush() +470 if type(args) is str: +471 args = shlex.split(args) +472 argslist = list(args) +473 os.execvp(argslist[0], argslist) +474 +475 start_time = time.time() +476 +477 res = subprocess.run(args, input=input, cwd=cwd, env=env, stdout=stdout, stderr=stderr, text=True) +478 +479 delta_time = time.time() - start_time +480 logger.info(f'Completed in {delta_time:.3f}s with status {res.returncode}: {command}') +481 +482 if check: +483 res.check_returncode() +484 +485 return res
+ +486 +487 +
+[docs] +488def run_process_2( +489 args: str | Iterable[str], +490 *, +491 input: str | None = None, +492 write_stdout: bool = False, +493 write_stderr: bool = False, +494 cwd: str | Path | None = None, +495 env: Mapping[str, str] | None = None, +496 logger: Logger | None = None, +497 loglevel: int | None = None, +498 check: bool = True, +499) -> CompletedProcess: +500 if type(args) is str: +501 args = (args,) +502 else: +503 args = tuple(args) +504 +505 if cwd is not None: +506 cwd = Path(cwd) +507 check_dir_path(cwd) +508 +509 if not logger: +510 logger = _LOGGER +511 +512 if loglevel is None: +513 loglevel = logging.INFO +514 +515 res = _subprocess_run( +516 args, +517 input=input, +518 write_stdout=write_stdout, +519 write_stderr=write_stderr, +520 cwd=cwd, +521 env=env, +522 logger=logger, +523 loglevel=loglevel, +524 ) +525 +526 if check: +527 res.check_returncode() +528 +529 return res
+ +530 +531 +532def _subprocess_run( +533 args: tuple[Any, ...], +534 *, +535 input: str | None, +536 write_stdout: bool, +537 write_stderr: bool, +538 env: Mapping[str, str] | None, +539 cwd: Path | None, +540 logger: Logger, +541 loglevel: int, +542) -> CompletedProcess: +543 with Popen( +544 args, +545 stdin=subprocess.PIPE if input is not None else subprocess.DEVNULL, +546 stdout=subprocess.PIPE, +547 stderr=subprocess.PIPE, +548 env=env, +549 cwd=cwd, +550 text=True, +551 ) as popen: +552 log_prefix = f'[PID={popen.pid}]' +553 +554 command = shlex.join(args) +555 for line in command.split('\n'): +556 logger.log(loglevel, f'{log_prefix}[exec] {line}') +557 +558 start_time = time.time() +559 +560 try: +561 returncode, stdout, stderr = _subprocess_communicate( +562 popen, +563 input=input, +564 write_stdout=write_stdout, +565 write_stderr=write_stderr, +566 logger=logger, +567 loglevel=loglevel, +568 ) +569 except BaseException: +570 popen.kill() +571 delta_time = time.time() - start_time +572 logger.log(loglevel, f'{log_prefix}[fail] time={delta_time:.3f}s') +573 raise +574 +575 delta_time = time.time() - start_time +576 logger.log(loglevel, f'{log_prefix}[done] status={returncode} time={delta_time:.3f}s') +577 +578 return CompletedProcess(popen.args, returncode, stdout, stderr) +579 +580 +581def _subprocess_communicate( +582 popen: Popen, +583 *, +584 input: str | None, +585 write_stdout: bool, +586 write_stderr: bool, +587 logger: Logger, +588 loglevel: int, +589) -> tuple[int, str, str]: +590 assert popen.stdout is not None +591 assert popen.stderr is not None +592 +593 log_prefix = f'[PID={popen.pid}]' +594 +595 def readerthread( +596 input_fh: IO[str], +597 buffer: list[str], +598 stream_prefix: str, +599 output_fh: IO[str] | None, +600 ) -> None: +601 for line in input_fh: +602 buffer.append(line) +603 logger.log(loglevel, f'{log_prefix}{stream_prefix} {line.rstrip()}') +604 if output_fh: +605 output_fh.write(line) +606 output_fh.flush() +607 input_fh.close() +608 +609 stdout_buff: list[str] = [] +610 stdout_prefix = '[stdo]' +611 stdout_fh = sys.stdout if write_stdout else None +612 stdout_thread = threading.Thread(target=readerthread, args=(popen.stdout, stdout_buff, stdout_prefix, stdout_fh)) +613 stdout_thread.daemon = True +614 stdout_thread.start() +615 +616 stderr_buff: list[str] = [] +617 stderr_prefix = '[stde]' +618 stderr_fh = sys.stderr if write_stderr else None +619 stderr_thread = threading.Thread(target=readerthread, args=(popen.stderr, stderr_buff, stderr_prefix, stderr_fh)) +620 stderr_thread.daemon = True +621 stderr_thread.start() +622 +623 if input is not None: +624 assert popen.stdin is not None +625 for line in input.split('\n'): +626 logger.log(loglevel, f'{log_prefix}[stdi] {line}') +627 # Note: popen.stdin.write does not work for llvm_interpret_raw +628 popen._stdin_write(input) # type: ignore [attr-defined] +629 +630 stdout_thread.join() +631 stderr_thread.join() +632 +633 # Should be closed by readerthread at this point +634 # popen.stdout.close() +635 # popen.stderr.close() +636 +637 returncode = popen.wait() +638 stdout = ''.join(stdout_buff) +639 stderr = ''.join(stderr_buff) +640 +641 return returncode, stdout, stderr +642 +643 +
+[docs] +644def exit_with_process_error(err: CalledProcessError) -> None: +645 sys.stderr.write(f'[ERROR] Running process failed with returncode {err.returncode}:\n {shlex.join(err.cmd)}\n') +646 sys.stderr.flush() +647 sys.exit(err.returncode)
+ +648 +649 +
+[docs] +650def gen_file_timestamp(comment: str = '//') -> str: +651 return comment + ' This file generated by: ' + sys.argv[0] + '\n' + comment + ' ' + str(datetime.now()) + '\n'
+ +652 +653 +
+[docs] +654def check_dir_path(path: Path) -> None: +655 path = path.resolve() +656 if not path.exists(): +657 raise ValueError(f'Directory does not exist: {path}') +658 if not path.is_dir(): +659 raise ValueError(f'Path is not a directory: {path}')
+ +660 +661 +
+[docs] +662def check_file_path(path: Path) -> None: +663 path = path.resolve() +664 if not path.exists(): +665 raise ValueError(f'File does not exist: {path}') +666 if not path.is_file(): +667 raise ValueError(f'Path is not a file: {path}')
+ +668 +669 +
+[docs] +670def check_absolute_path(path: Path) -> None: +671 if not path.is_absolute(): +672 raise ValueError(f'Path is not absolute: {path}')
+ +673 +674 +
+[docs] +675def check_relative_path(path: Path) -> None: +676 if path.is_absolute(): +677 raise ValueError(f'Path is not relative: {path}')
+ +678 +679 +
+[docs] +680def ensure_dir_path(path: str | Path) -> Path: +681 path = Path(path) +682 if not path.exists(): +683 _LOGGER.info(f'Making directory: {path}') +684 path.mkdir(parents=True, exist_ok=True) +685 else: +686 check_dir_path(path) +687 return path
+ +688 +689 +
+[docs] +690def abs_or_rel_to(path: Path, base: Path) -> Path: +691 if path.is_absolute(): +692 return path +693 return base / path
+ +694 +695 +
+[docs] +696class BugReport: +697 _bug_report: Path +698 _command_id: int +699 _defn_id: int +700 _file_remap: dict[str, str] +701 +702 def __init__(self, bug_report: Path) -> None: +703 self._bug_report = bug_report.with_suffix('.tar') +704 self._command_id = 0 +705 self._defn_id = 0 +706 self._file_remap = {} +707 if self._bug_report.exists(): +708 _LOGGER.warning(f'Bug report exists, removing: {self._bug_report}') +709 self._bug_report.unlink() +710 +
+[docs] +711 def add_file(self, finput: Path, arcname: Path) -> None: +712 if str(finput) not in self._file_remap: +713 self._file_remap[str(finput)] = str(arcname) +714 with tarfile.open(self._bug_report, 'a', format=tarfile.GNU_FORMAT) as tar: +715 tar.add(finput, arcname=arcname) +716 _LOGGER.info(f'Added file to bug report {self._bug_report}:{arcname}: {finput}')
+ +717 +
+[docs] +718 def add_file_contents(self, input: str, arcname: Path) -> None: +719 with NamedTemporaryFile('w') as ntf: +720 ntf.write(input) +721 ntf.flush() +722 self.add_file(Path(ntf.name), arcname)
+ +723 +
+[docs] +724 def add_request(self, req_name: str) -> None: +725 self.add_file_contents(req_name, Path(f'sequence/{self._command_id:03}')) +726 self._command_id += 1
+ +727 +
+[docs] +728 def add_command(self, args: Iterable[str]) -> None: +729 def _remap_arg(_a: str) -> str: +730 if _a in self._file_remap: +731 return self._file_remap[_a] +732 _a_path = Path(_a) +733 for _f in self._file_remap: +734 _f_path = Path(_f) +735 if _a_path.is_relative_to(_f_path): +736 return str(Path(self._file_remap[_f]) / _a_path.relative_to(_f_path)) +737 return _a +738 +739 remapped_args = [_remap_arg(a) for a in args] +740 arcname = Path(f'commands/{self._command_id:03}.sh') +741 shebang = '#!/usr/bin/env bash\nset -euxo pipefail\n' +742 self.add_file_contents(shebang + ' '.join(remapped_args) + '\n', arcname) +743 self._command_id += 1
+
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/_sources/api/modules.rst.txt b/pyk/_sources/api/modules.rst.txt new file mode 100644 index 00000000000..e727e2e97e6 --- /dev/null +++ b/pyk/_sources/api/modules.rst.txt @@ -0,0 +1,7 @@ +pyk +=== + +.. toctree:: + :maxdepth: 4 + + pyk diff --git a/pyk/_sources/api/pyk.coverage.rst.txt b/pyk/_sources/api/pyk.coverage.rst.txt new file mode 100644 index 00000000000..e6f12eb5f8d --- /dev/null +++ b/pyk/_sources/api/pyk.coverage.rst.txt @@ -0,0 +1,7 @@ +pyk.coverage module +=================== + +.. automodule:: pyk.coverage + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.cterm.cterm.rst.txt b/pyk/_sources/api/pyk.cterm.cterm.rst.txt new file mode 100644 index 00000000000..731bf24559c --- /dev/null +++ b/pyk/_sources/api/pyk.cterm.cterm.rst.txt @@ -0,0 +1,7 @@ +pyk.cterm.cterm module +====================== + +.. automodule:: pyk.cterm.cterm + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.cterm.rst.txt b/pyk/_sources/api/pyk.cterm.rst.txt new file mode 100644 index 00000000000..7291410bce7 --- /dev/null +++ b/pyk/_sources/api/pyk.cterm.rst.txt @@ -0,0 +1,16 @@ +pyk.cterm package +================= + +.. automodule:: pyk.cterm + :members: + :undoc-members: + :show-inheritance: + +Submodules +---------- + +.. toctree:: + :maxdepth: 4 + + pyk.cterm.cterm + pyk.cterm.symbolic diff --git a/pyk/_sources/api/pyk.cterm.symbolic.rst.txt b/pyk/_sources/api/pyk.cterm.symbolic.rst.txt new file mode 100644 index 00000000000..9fb05271bda --- /dev/null +++ b/pyk/_sources/api/pyk.cterm.symbolic.rst.txt @@ -0,0 +1,7 @@ +pyk.cterm.symbolic module +========================= + +.. automodule:: pyk.cterm.symbolic + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.dequote.rst.txt b/pyk/_sources/api/pyk.dequote.rst.txt new file mode 100644 index 00000000000..219814217f3 --- /dev/null +++ b/pyk/_sources/api/pyk.dequote.rst.txt @@ -0,0 +1,7 @@ +pyk.dequote module +================== + +.. automodule:: pyk.dequote + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kast.att.rst.txt b/pyk/_sources/api/pyk.kast.att.rst.txt new file mode 100644 index 00000000000..10f1f9aa8b9 --- /dev/null +++ b/pyk/_sources/api/pyk.kast.att.rst.txt @@ -0,0 +1,7 @@ +pyk.kast.att module +=================== + +.. automodule:: pyk.kast.att + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kast.color.rst.txt b/pyk/_sources/api/pyk.kast.color.rst.txt new file mode 100644 index 00000000000..f26198ed5ce --- /dev/null +++ b/pyk/_sources/api/pyk.kast.color.rst.txt @@ -0,0 +1,7 @@ +pyk.kast.color module +===================== + +.. automodule:: pyk.kast.color + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kast.formatter.rst.txt b/pyk/_sources/api/pyk.kast.formatter.rst.txt new file mode 100644 index 00000000000..690c716cc16 --- /dev/null +++ b/pyk/_sources/api/pyk.kast.formatter.rst.txt @@ -0,0 +1,7 @@ +pyk.kast.formatter module +========================= + +.. automodule:: pyk.kast.formatter + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kast.inner.rst.txt b/pyk/_sources/api/pyk.kast.inner.rst.txt new file mode 100644 index 00000000000..aab0b2bba87 --- /dev/null +++ b/pyk/_sources/api/pyk.kast.inner.rst.txt @@ -0,0 +1,7 @@ +pyk.kast.inner module +===================== + +.. automodule:: pyk.kast.inner + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kast.kast.rst.txt b/pyk/_sources/api/pyk.kast.kast.rst.txt new file mode 100644 index 00000000000..00e9dd22d18 --- /dev/null +++ b/pyk/_sources/api/pyk.kast.kast.rst.txt @@ -0,0 +1,7 @@ +pyk.kast.kast module +==================== + +.. automodule:: pyk.kast.kast + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kast.lexer.rst.txt b/pyk/_sources/api/pyk.kast.lexer.rst.txt new file mode 100644 index 00000000000..70157b5c785 --- /dev/null +++ b/pyk/_sources/api/pyk.kast.lexer.rst.txt @@ -0,0 +1,7 @@ +pyk.kast.lexer module +===================== + +.. automodule:: pyk.kast.lexer + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kast.manip.rst.txt b/pyk/_sources/api/pyk.kast.manip.rst.txt new file mode 100644 index 00000000000..551a11a01fe --- /dev/null +++ b/pyk/_sources/api/pyk.kast.manip.rst.txt @@ -0,0 +1,7 @@ +pyk.kast.manip module +===================== + +.. automodule:: pyk.kast.manip + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kast.markdown.rst.txt b/pyk/_sources/api/pyk.kast.markdown.rst.txt new file mode 100644 index 00000000000..7126277a7f8 --- /dev/null +++ b/pyk/_sources/api/pyk.kast.markdown.rst.txt @@ -0,0 +1,7 @@ +pyk.kast.markdown module +======================== + +.. automodule:: pyk.kast.markdown + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kast.outer.rst.txt b/pyk/_sources/api/pyk.kast.outer.rst.txt new file mode 100644 index 00000000000..34fc8e653f3 --- /dev/null +++ b/pyk/_sources/api/pyk.kast.outer.rst.txt @@ -0,0 +1,7 @@ +pyk.kast.outer module +===================== + +.. automodule:: pyk.kast.outer + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kast.outer_lexer.rst.txt b/pyk/_sources/api/pyk.kast.outer_lexer.rst.txt new file mode 100644 index 00000000000..14625abce5a --- /dev/null +++ b/pyk/_sources/api/pyk.kast.outer_lexer.rst.txt @@ -0,0 +1,7 @@ +pyk.kast.outer\_lexer module +============================ + +.. automodule:: pyk.kast.outer_lexer + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kast.outer_parser.rst.txt b/pyk/_sources/api/pyk.kast.outer_parser.rst.txt new file mode 100644 index 00000000000..bf0ee150db8 --- /dev/null +++ b/pyk/_sources/api/pyk.kast.outer_parser.rst.txt @@ -0,0 +1,7 @@ +pyk.kast.outer\_parser module +============================= + +.. automodule:: pyk.kast.outer_parser + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kast.outer_syntax.rst.txt b/pyk/_sources/api/pyk.kast.outer_syntax.rst.txt new file mode 100644 index 00000000000..2566aea3953 --- /dev/null +++ b/pyk/_sources/api/pyk.kast.outer_syntax.rst.txt @@ -0,0 +1,7 @@ +pyk.kast.outer\_syntax module +============================= + +.. automodule:: pyk.kast.outer_syntax + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kast.parser.rst.txt b/pyk/_sources/api/pyk.kast.parser.rst.txt new file mode 100644 index 00000000000..46dd70ed074 --- /dev/null +++ b/pyk/_sources/api/pyk.kast.parser.rst.txt @@ -0,0 +1,7 @@ +pyk.kast.parser module +====================== + +.. automodule:: pyk.kast.parser + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kast.pretty.rst.txt b/pyk/_sources/api/pyk.kast.pretty.rst.txt new file mode 100644 index 00000000000..6e520450980 --- /dev/null +++ b/pyk/_sources/api/pyk.kast.pretty.rst.txt @@ -0,0 +1,7 @@ +pyk.kast.pretty module +====================== + +.. automodule:: pyk.kast.pretty + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kast.rewrite.rst.txt b/pyk/_sources/api/pyk.kast.rewrite.rst.txt new file mode 100644 index 00000000000..2fa7c1cd3f6 --- /dev/null +++ b/pyk/_sources/api/pyk.kast.rewrite.rst.txt @@ -0,0 +1,7 @@ +pyk.kast.rewrite module +======================= + +.. automodule:: pyk.kast.rewrite + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kast.rst.txt b/pyk/_sources/api/pyk.kast.rst.txt new file mode 100644 index 00000000000..c215d8768bb --- /dev/null +++ b/pyk/_sources/api/pyk.kast.rst.txt @@ -0,0 +1,30 @@ +pyk.kast package +================ + +.. automodule:: pyk.kast + :members: + :undoc-members: + :show-inheritance: + +Submodules +---------- + +.. toctree:: + :maxdepth: 4 + + pyk.kast.att + pyk.kast.color + pyk.kast.formatter + pyk.kast.inner + pyk.kast.kast + pyk.kast.lexer + pyk.kast.manip + pyk.kast.markdown + pyk.kast.outer + pyk.kast.outer_lexer + pyk.kast.outer_parser + pyk.kast.outer_syntax + pyk.kast.parser + pyk.kast.pretty + pyk.kast.rewrite + pyk.kast.utils diff --git a/pyk/_sources/api/pyk.kast.utils.rst.txt b/pyk/_sources/api/pyk.kast.utils.rst.txt new file mode 100644 index 00000000000..6dc4febf240 --- /dev/null +++ b/pyk/_sources/api/pyk.kast.utils.rst.txt @@ -0,0 +1,7 @@ +pyk.kast.utils module +===================== + +.. automodule:: pyk.kast.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kbuild.config.rst.txt b/pyk/_sources/api/pyk.kbuild.config.rst.txt new file mode 100644 index 00000000000..91c648a7bd0 --- /dev/null +++ b/pyk/_sources/api/pyk.kbuild.config.rst.txt @@ -0,0 +1,7 @@ +pyk.kbuild.config module +======================== + +.. automodule:: pyk.kbuild.config + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kbuild.kbuild.rst.txt b/pyk/_sources/api/pyk.kbuild.kbuild.rst.txt new file mode 100644 index 00000000000..31a96259502 --- /dev/null +++ b/pyk/_sources/api/pyk.kbuild.kbuild.rst.txt @@ -0,0 +1,7 @@ +pyk.kbuild.kbuild module +======================== + +.. automodule:: pyk.kbuild.kbuild + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kbuild.project.rst.txt b/pyk/_sources/api/pyk.kbuild.project.rst.txt new file mode 100644 index 00000000000..48e1ed372be --- /dev/null +++ b/pyk/_sources/api/pyk.kbuild.project.rst.txt @@ -0,0 +1,7 @@ +pyk.kbuild.project module +========================= + +.. automodule:: pyk.kbuild.project + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kbuild.rst.txt b/pyk/_sources/api/pyk.kbuild.rst.txt new file mode 100644 index 00000000000..99bcfa60b2a --- /dev/null +++ b/pyk/_sources/api/pyk.kbuild.rst.txt @@ -0,0 +1,18 @@ +pyk.kbuild package +================== + +.. automodule:: pyk.kbuild + :members: + :undoc-members: + :show-inheritance: + +Submodules +---------- + +.. toctree:: + :maxdepth: 4 + + pyk.kbuild.config + pyk.kbuild.kbuild + pyk.kbuild.project + pyk.kbuild.utils diff --git a/pyk/_sources/api/pyk.kbuild.utils.rst.txt b/pyk/_sources/api/pyk.kbuild.utils.rst.txt new file mode 100644 index 00000000000..d54121dfaf1 --- /dev/null +++ b/pyk/_sources/api/pyk.kbuild.utils.rst.txt @@ -0,0 +1,7 @@ +pyk.kbuild.utils module +======================= + +.. automodule:: pyk.kbuild.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kcfg.exploration.rst.txt b/pyk/_sources/api/pyk.kcfg.exploration.rst.txt new file mode 100644 index 00000000000..5f5f4d50c86 --- /dev/null +++ b/pyk/_sources/api/pyk.kcfg.exploration.rst.txt @@ -0,0 +1,7 @@ +pyk.kcfg.exploration module +=========================== + +.. automodule:: pyk.kcfg.exploration + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kcfg.explore.rst.txt b/pyk/_sources/api/pyk.kcfg.explore.rst.txt new file mode 100644 index 00000000000..1c5c5b9ca2f --- /dev/null +++ b/pyk/_sources/api/pyk.kcfg.explore.rst.txt @@ -0,0 +1,7 @@ +pyk.kcfg.explore module +======================= + +.. automodule:: pyk.kcfg.explore + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kcfg.kcfg.rst.txt b/pyk/_sources/api/pyk.kcfg.kcfg.rst.txt new file mode 100644 index 00000000000..4b4c87b5fbb --- /dev/null +++ b/pyk/_sources/api/pyk.kcfg.kcfg.rst.txt @@ -0,0 +1,7 @@ +pyk.kcfg.kcfg module +==================== + +.. automodule:: pyk.kcfg.kcfg + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kcfg.minimize.rst.txt b/pyk/_sources/api/pyk.kcfg.minimize.rst.txt new file mode 100644 index 00000000000..ca6986e9d70 --- /dev/null +++ b/pyk/_sources/api/pyk.kcfg.minimize.rst.txt @@ -0,0 +1,7 @@ +pyk.kcfg.minimize module +======================== + +.. automodule:: pyk.kcfg.minimize + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kcfg.rst.txt b/pyk/_sources/api/pyk.kcfg.rst.txt new file mode 100644 index 00000000000..217e6c7d1c5 --- /dev/null +++ b/pyk/_sources/api/pyk.kcfg.rst.txt @@ -0,0 +1,22 @@ +pyk.kcfg package +================ + +.. automodule:: pyk.kcfg + :members: + :undoc-members: + :show-inheritance: + +Submodules +---------- + +.. toctree:: + :maxdepth: 4 + + pyk.kcfg.exploration + pyk.kcfg.explore + pyk.kcfg.kcfg + pyk.kcfg.minimize + pyk.kcfg.semantics + pyk.kcfg.show + pyk.kcfg.store + pyk.kcfg.tui diff --git a/pyk/_sources/api/pyk.kcfg.semantics.rst.txt b/pyk/_sources/api/pyk.kcfg.semantics.rst.txt new file mode 100644 index 00000000000..baa1b1cb7f6 --- /dev/null +++ b/pyk/_sources/api/pyk.kcfg.semantics.rst.txt @@ -0,0 +1,7 @@ +pyk.kcfg.semantics module +========================= + +.. automodule:: pyk.kcfg.semantics + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kcfg.show.rst.txt b/pyk/_sources/api/pyk.kcfg.show.rst.txt new file mode 100644 index 00000000000..d06b0936fbf --- /dev/null +++ b/pyk/_sources/api/pyk.kcfg.show.rst.txt @@ -0,0 +1,7 @@ +pyk.kcfg.show module +==================== + +.. automodule:: pyk.kcfg.show + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kcfg.store.rst.txt b/pyk/_sources/api/pyk.kcfg.store.rst.txt new file mode 100644 index 00000000000..99b591c378b --- /dev/null +++ b/pyk/_sources/api/pyk.kcfg.store.rst.txt @@ -0,0 +1,7 @@ +pyk.kcfg.store module +===================== + +.. automodule:: pyk.kcfg.store + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kcfg.tui.rst.txt b/pyk/_sources/api/pyk.kcfg.tui.rst.txt new file mode 100644 index 00000000000..8cefe671bd5 --- /dev/null +++ b/pyk/_sources/api/pyk.kcfg.tui.rst.txt @@ -0,0 +1,7 @@ +pyk.kcfg.tui module +=================== + +.. automodule:: pyk.kcfg.tui + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kcovr.rst.txt b/pyk/_sources/api/pyk.kcovr.rst.txt new file mode 100644 index 00000000000..41af42d2e83 --- /dev/null +++ b/pyk/_sources/api/pyk.kcovr.rst.txt @@ -0,0 +1,7 @@ +pyk.kcovr module +================ + +.. automodule:: pyk.kcovr + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kdist.api.rst.txt b/pyk/_sources/api/pyk.kdist.api.rst.txt new file mode 100644 index 00000000000..fe283c0f4ef --- /dev/null +++ b/pyk/_sources/api/pyk.kdist.api.rst.txt @@ -0,0 +1,7 @@ +pyk.kdist.api module +==================== + +.. automodule:: pyk.kdist.api + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kdist.rst.txt b/pyk/_sources/api/pyk.kdist.rst.txt new file mode 100644 index 00000000000..b5ddaf70eef --- /dev/null +++ b/pyk/_sources/api/pyk.kdist.rst.txt @@ -0,0 +1,16 @@ +pyk.kdist package +================= + +.. automodule:: pyk.kdist + :members: + :undoc-members: + :show-inheritance: + +Submodules +---------- + +.. toctree:: + :maxdepth: 4 + + pyk.kdist.api + pyk.kdist.utils diff --git a/pyk/_sources/api/pyk.kdist.utils.rst.txt b/pyk/_sources/api/pyk.kdist.utils.rst.txt new file mode 100644 index 00000000000..a39e63139b8 --- /dev/null +++ b/pyk/_sources/api/pyk.kdist.utils.rst.txt @@ -0,0 +1,7 @@ +pyk.kdist.utils module +====================== + +.. automodule:: pyk.kdist.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kllvm.ast.rst.txt b/pyk/_sources/api/pyk.kllvm.ast.rst.txt new file mode 100644 index 00000000000..9d40238c850 --- /dev/null +++ b/pyk/_sources/api/pyk.kllvm.ast.rst.txt @@ -0,0 +1,7 @@ +pyk.kllvm.ast module +==================== + +.. automodule:: pyk.kllvm.ast + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kllvm.compiler.rst.txt b/pyk/_sources/api/pyk.kllvm.compiler.rst.txt new file mode 100644 index 00000000000..f6071320fd8 --- /dev/null +++ b/pyk/_sources/api/pyk.kllvm.compiler.rst.txt @@ -0,0 +1,7 @@ +pyk.kllvm.compiler module +========================= + +.. automodule:: pyk.kllvm.compiler + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kllvm.convert.rst.txt b/pyk/_sources/api/pyk.kllvm.convert.rst.txt new file mode 100644 index 00000000000..d59c2f7029d --- /dev/null +++ b/pyk/_sources/api/pyk.kllvm.convert.rst.txt @@ -0,0 +1,7 @@ +pyk.kllvm.convert module +======================== + +.. automodule:: pyk.kllvm.convert + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kllvm.hints.prooftrace.rst.txt b/pyk/_sources/api/pyk.kllvm.hints.prooftrace.rst.txt new file mode 100644 index 00000000000..b061b4e19b3 --- /dev/null +++ b/pyk/_sources/api/pyk.kllvm.hints.prooftrace.rst.txt @@ -0,0 +1,7 @@ +pyk.kllvm.hints.prooftrace module +================================= + +.. automodule:: pyk.kllvm.hints.prooftrace + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kllvm.hints.rst.txt b/pyk/_sources/api/pyk.kllvm.hints.rst.txt new file mode 100644 index 00000000000..7466db95bb5 --- /dev/null +++ b/pyk/_sources/api/pyk.kllvm.hints.rst.txt @@ -0,0 +1,15 @@ +pyk.kllvm.hints package +======================= + +.. automodule:: pyk.kllvm.hints + :members: + :undoc-members: + :show-inheritance: + +Submodules +---------- + +.. toctree:: + :maxdepth: 4 + + pyk.kllvm.hints.prooftrace diff --git a/pyk/_sources/api/pyk.kllvm.importer.rst.txt b/pyk/_sources/api/pyk.kllvm.importer.rst.txt new file mode 100644 index 00000000000..26a7dc42f46 --- /dev/null +++ b/pyk/_sources/api/pyk.kllvm.importer.rst.txt @@ -0,0 +1,7 @@ +pyk.kllvm.importer module +========================= + +.. automodule:: pyk.kllvm.importer + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kllvm.load.rst.txt b/pyk/_sources/api/pyk.kllvm.load.rst.txt new file mode 100644 index 00000000000..0c0127600c8 --- /dev/null +++ b/pyk/_sources/api/pyk.kllvm.load.rst.txt @@ -0,0 +1,7 @@ +pyk.kllvm.load module +===================== + +.. automodule:: pyk.kllvm.load + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kllvm.load_static.rst.txt b/pyk/_sources/api/pyk.kllvm.load_static.rst.txt new file mode 100644 index 00000000000..a22597334bd --- /dev/null +++ b/pyk/_sources/api/pyk.kllvm.load_static.rst.txt @@ -0,0 +1,7 @@ +pyk.kllvm.load\_static module +============================= + +.. automodule:: pyk.kllvm.load_static + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kllvm.parser.rst.txt b/pyk/_sources/api/pyk.kllvm.parser.rst.txt new file mode 100644 index 00000000000..31c1f4f2d73 --- /dev/null +++ b/pyk/_sources/api/pyk.kllvm.parser.rst.txt @@ -0,0 +1,7 @@ +pyk.kllvm.parser module +======================= + +.. automodule:: pyk.kllvm.parser + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kllvm.rst.txt b/pyk/_sources/api/pyk.kllvm.rst.txt new file mode 100644 index 00000000000..e36a045e0fa --- /dev/null +++ b/pyk/_sources/api/pyk.kllvm.rst.txt @@ -0,0 +1,31 @@ +pyk.kllvm package +================= + +.. automodule:: pyk.kllvm + :members: + :undoc-members: + :show-inheritance: + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + pyk.kllvm.hints + +Submodules +---------- + +.. toctree:: + :maxdepth: 4 + + pyk.kllvm.ast + pyk.kllvm.compiler + pyk.kllvm.convert + pyk.kllvm.importer + pyk.kllvm.load + pyk.kllvm.load_static + pyk.kllvm.parser + pyk.kllvm.runtime + pyk.kllvm.utils diff --git a/pyk/_sources/api/pyk.kllvm.runtime.rst.txt b/pyk/_sources/api/pyk.kllvm.runtime.rst.txt new file mode 100644 index 00000000000..4f810b2e220 --- /dev/null +++ b/pyk/_sources/api/pyk.kllvm.runtime.rst.txt @@ -0,0 +1,7 @@ +pyk.kllvm.runtime module +======================== + +.. automodule:: pyk.kllvm.runtime + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kllvm.utils.rst.txt b/pyk/_sources/api/pyk.kllvm.utils.rst.txt new file mode 100644 index 00000000000..d7b82fc143a --- /dev/null +++ b/pyk/_sources/api/pyk.kllvm.utils.rst.txt @@ -0,0 +1,7 @@ +pyk.kllvm.utils module +====================== + +.. automodule:: pyk.kllvm.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.konvert.rst.txt b/pyk/_sources/api/pyk.konvert.rst.txt new file mode 100644 index 00000000000..f3ec1f33014 --- /dev/null +++ b/pyk/_sources/api/pyk.konvert.rst.txt @@ -0,0 +1,7 @@ +pyk.konvert package +=================== + +.. automodule:: pyk.konvert + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kore.kompiled.rst.txt b/pyk/_sources/api/pyk.kore.kompiled.rst.txt new file mode 100644 index 00000000000..0cfcf395d16 --- /dev/null +++ b/pyk/_sources/api/pyk.kore.kompiled.rst.txt @@ -0,0 +1,7 @@ +pyk.kore.kompiled module +======================== + +.. automodule:: pyk.kore.kompiled + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kore.lexer.rst.txt b/pyk/_sources/api/pyk.kore.lexer.rst.txt new file mode 100644 index 00000000000..52c7d18965d --- /dev/null +++ b/pyk/_sources/api/pyk.kore.lexer.rst.txt @@ -0,0 +1,7 @@ +pyk.kore.lexer module +===================== + +.. automodule:: pyk.kore.lexer + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kore.manip.rst.txt b/pyk/_sources/api/pyk.kore.manip.rst.txt new file mode 100644 index 00000000000..7be74c43cf4 --- /dev/null +++ b/pyk/_sources/api/pyk.kore.manip.rst.txt @@ -0,0 +1,7 @@ +pyk.kore.manip module +===================== + +.. automodule:: pyk.kore.manip + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kore.match.rst.txt b/pyk/_sources/api/pyk.kore.match.rst.txt new file mode 100644 index 00000000000..d9990385bf6 --- /dev/null +++ b/pyk/_sources/api/pyk.kore.match.rst.txt @@ -0,0 +1,7 @@ +pyk.kore.match module +===================== + +.. automodule:: pyk.kore.match + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kore.parser.rst.txt b/pyk/_sources/api/pyk.kore.parser.rst.txt new file mode 100644 index 00000000000..86c138813f9 --- /dev/null +++ b/pyk/_sources/api/pyk.kore.parser.rst.txt @@ -0,0 +1,7 @@ +pyk.kore.parser module +====================== + +.. automodule:: pyk.kore.parser + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kore.pool.rst.txt b/pyk/_sources/api/pyk.kore.pool.rst.txt new file mode 100644 index 00000000000..ae3e738076d --- /dev/null +++ b/pyk/_sources/api/pyk.kore.pool.rst.txt @@ -0,0 +1,7 @@ +pyk.kore.pool module +==================== + +.. automodule:: pyk.kore.pool + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kore.prelude.rst.txt b/pyk/_sources/api/pyk.kore.prelude.rst.txt new file mode 100644 index 00000000000..145900acd9b --- /dev/null +++ b/pyk/_sources/api/pyk.kore.prelude.rst.txt @@ -0,0 +1,7 @@ +pyk.kore.prelude module +======================= + +.. automodule:: pyk.kore.prelude + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kore.rpc.rst.txt b/pyk/_sources/api/pyk.kore.rpc.rst.txt new file mode 100644 index 00000000000..12bba66893f --- /dev/null +++ b/pyk/_sources/api/pyk.kore.rpc.rst.txt @@ -0,0 +1,7 @@ +pyk.kore.rpc module +=================== + +.. automodule:: pyk.kore.rpc + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kore.rst.txt b/pyk/_sources/api/pyk.kore.rst.txt new file mode 100644 index 00000000000..a31c2cbd611 --- /dev/null +++ b/pyk/_sources/api/pyk.kore.rst.txt @@ -0,0 +1,25 @@ +pyk.kore package +================ + +.. automodule:: pyk.kore + :members: + :undoc-members: + :show-inheritance: + +Submodules +---------- + +.. toctree:: + :maxdepth: 4 + + pyk.kore.kompiled + pyk.kore.lexer + pyk.kore.manip + pyk.kore.match + pyk.kore.parser + pyk.kore.pool + pyk.kore.prelude + pyk.kore.rpc + pyk.kore.rule + pyk.kore.syntax + pyk.kore.tools diff --git a/pyk/_sources/api/pyk.kore.rule.rst.txt b/pyk/_sources/api/pyk.kore.rule.rst.txt new file mode 100644 index 00000000000..bfc648f39b5 --- /dev/null +++ b/pyk/_sources/api/pyk.kore.rule.rst.txt @@ -0,0 +1,7 @@ +pyk.kore.rule module +==================== + +.. automodule:: pyk.kore.rule + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kore.syntax.rst.txt b/pyk/_sources/api/pyk.kore.syntax.rst.txt new file mode 100644 index 00000000000..16b997c93c1 --- /dev/null +++ b/pyk/_sources/api/pyk.kore.syntax.rst.txt @@ -0,0 +1,7 @@ +pyk.kore.syntax module +====================== + +.. automodule:: pyk.kore.syntax + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kore.tools.rst.txt b/pyk/_sources/api/pyk.kore.tools.rst.txt new file mode 100644 index 00000000000..ee01c338556 --- /dev/null +++ b/pyk/_sources/api/pyk.kore.tools.rst.txt @@ -0,0 +1,7 @@ +pyk.kore.tools module +===================== + +.. automodule:: pyk.kore.tools + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kore_exec_covr.kore_exec_covr.rst.txt b/pyk/_sources/api/pyk.kore_exec_covr.kore_exec_covr.rst.txt new file mode 100644 index 00000000000..799a41e4ec1 --- /dev/null +++ b/pyk/_sources/api/pyk.kore_exec_covr.kore_exec_covr.rst.txt @@ -0,0 +1,7 @@ +pyk.kore\_exec\_covr.kore\_exec\_covr module +============================================ + +.. automodule:: pyk.kore_exec_covr.kore_exec_covr + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.kore_exec_covr.rst.txt b/pyk/_sources/api/pyk.kore_exec_covr.rst.txt new file mode 100644 index 00000000000..bcf8a9d7698 --- /dev/null +++ b/pyk/_sources/api/pyk.kore_exec_covr.rst.txt @@ -0,0 +1,15 @@ +pyk.kore\_exec\_covr package +============================ + +.. automodule:: pyk.kore_exec_covr + :members: + :undoc-members: + :show-inheritance: + +Submodules +---------- + +.. toctree:: + :maxdepth: 4 + + pyk.kore_exec_covr.kore_exec_covr diff --git a/pyk/_sources/api/pyk.krepl.repl.rst.txt b/pyk/_sources/api/pyk.krepl.repl.rst.txt new file mode 100644 index 00000000000..cb90b48612c --- /dev/null +++ b/pyk/_sources/api/pyk.krepl.repl.rst.txt @@ -0,0 +1,7 @@ +pyk.krepl.repl module +===================== + +.. automodule:: pyk.krepl.repl + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.krepl.rst.txt b/pyk/_sources/api/pyk.krepl.rst.txt new file mode 100644 index 00000000000..a03344a4cda --- /dev/null +++ b/pyk/_sources/api/pyk.krepl.rst.txt @@ -0,0 +1,15 @@ +pyk.krepl package +================= + +.. automodule:: pyk.krepl + :members: + :undoc-members: + :show-inheritance: + +Submodules +---------- + +.. toctree:: + :maxdepth: 4 + + pyk.krepl.repl diff --git a/pyk/_sources/api/pyk.ktool.claim_index.rst.txt b/pyk/_sources/api/pyk.ktool.claim_index.rst.txt new file mode 100644 index 00000000000..a4c0ff1ad38 --- /dev/null +++ b/pyk/_sources/api/pyk.ktool.claim_index.rst.txt @@ -0,0 +1,7 @@ +pyk.ktool.claim\_index module +============================= + +.. automodule:: pyk.ktool.claim_index + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.ktool.claim_loader.rst.txt b/pyk/_sources/api/pyk.ktool.claim_loader.rst.txt new file mode 100644 index 00000000000..ff711e27751 --- /dev/null +++ b/pyk/_sources/api/pyk.ktool.claim_loader.rst.txt @@ -0,0 +1,7 @@ +pyk.ktool.claim\_loader module +============================== + +.. automodule:: pyk.ktool.claim_loader + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.ktool.kfuzz.rst.txt b/pyk/_sources/api/pyk.ktool.kfuzz.rst.txt new file mode 100644 index 00000000000..6e602ec46f0 --- /dev/null +++ b/pyk/_sources/api/pyk.ktool.kfuzz.rst.txt @@ -0,0 +1,7 @@ +pyk.ktool.kfuzz module +====================== + +.. automodule:: pyk.ktool.kfuzz + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.ktool.kompile.rst.txt b/pyk/_sources/api/pyk.ktool.kompile.rst.txt new file mode 100644 index 00000000000..bf4e5653a7e --- /dev/null +++ b/pyk/_sources/api/pyk.ktool.kompile.rst.txt @@ -0,0 +1,7 @@ +pyk.ktool.kompile module +======================== + +.. automodule:: pyk.ktool.kompile + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.ktool.kprint.rst.txt b/pyk/_sources/api/pyk.ktool.kprint.rst.txt new file mode 100644 index 00000000000..737151764fb --- /dev/null +++ b/pyk/_sources/api/pyk.ktool.kprint.rst.txt @@ -0,0 +1,7 @@ +pyk.ktool.kprint module +======================= + +.. automodule:: pyk.ktool.kprint + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.ktool.kprove.rst.txt b/pyk/_sources/api/pyk.ktool.kprove.rst.txt new file mode 100644 index 00000000000..4379d5b9a3d --- /dev/null +++ b/pyk/_sources/api/pyk.ktool.kprove.rst.txt @@ -0,0 +1,7 @@ +pyk.ktool.kprove module +======================= + +.. automodule:: pyk.ktool.kprove + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.ktool.krun.rst.txt b/pyk/_sources/api/pyk.ktool.krun.rst.txt new file mode 100644 index 00000000000..45fbf59b405 --- /dev/null +++ b/pyk/_sources/api/pyk.ktool.krun.rst.txt @@ -0,0 +1,7 @@ +pyk.ktool.krun module +===================== + +.. automodule:: pyk.ktool.krun + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.ktool.prove_rpc.rst.txt b/pyk/_sources/api/pyk.ktool.prove_rpc.rst.txt new file mode 100644 index 00000000000..b030a136561 --- /dev/null +++ b/pyk/_sources/api/pyk.ktool.prove_rpc.rst.txt @@ -0,0 +1,7 @@ +pyk.ktool.prove\_rpc module +=========================== + +.. automodule:: pyk.ktool.prove_rpc + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.ktool.rst.txt b/pyk/_sources/api/pyk.ktool.rst.txt new file mode 100644 index 00000000000..f1faaa428bd --- /dev/null +++ b/pyk/_sources/api/pyk.ktool.rst.txt @@ -0,0 +1,23 @@ +pyk.ktool package +================= + +.. automodule:: pyk.ktool + :members: + :undoc-members: + :show-inheritance: + +Submodules +---------- + +.. toctree:: + :maxdepth: 4 + + pyk.ktool.claim_index + pyk.ktool.claim_loader + pyk.ktool.kfuzz + pyk.ktool.kompile + pyk.ktool.kprint + pyk.ktool.kprove + pyk.ktool.krun + pyk.ktool.prove_rpc + pyk.ktool.utils diff --git a/pyk/_sources/api/pyk.ktool.utils.rst.txt b/pyk/_sources/api/pyk.ktool.utils.rst.txt new file mode 100644 index 00000000000..722e3c184d5 --- /dev/null +++ b/pyk/_sources/api/pyk.ktool.utils.rst.txt @@ -0,0 +1,7 @@ +pyk.ktool.utils module +====================== + +.. automodule:: pyk.ktool.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.prelude.bytes.rst.txt b/pyk/_sources/api/pyk.prelude.bytes.rst.txt new file mode 100644 index 00000000000..61300d8e0ec --- /dev/null +++ b/pyk/_sources/api/pyk.prelude.bytes.rst.txt @@ -0,0 +1,7 @@ +pyk.prelude.bytes module +======================== + +.. automodule:: pyk.prelude.bytes + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.prelude.collections.rst.txt b/pyk/_sources/api/pyk.prelude.collections.rst.txt new file mode 100644 index 00000000000..d49515e6811 --- /dev/null +++ b/pyk/_sources/api/pyk.prelude.collections.rst.txt @@ -0,0 +1,7 @@ +pyk.prelude.collections module +============================== + +.. automodule:: pyk.prelude.collections + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.prelude.k.rst.txt b/pyk/_sources/api/pyk.prelude.k.rst.txt new file mode 100644 index 00000000000..21b42fa13c0 --- /dev/null +++ b/pyk/_sources/api/pyk.prelude.k.rst.txt @@ -0,0 +1,7 @@ +pyk.prelude.k module +==================== + +.. automodule:: pyk.prelude.k + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.prelude.kbool.rst.txt b/pyk/_sources/api/pyk.prelude.kbool.rst.txt new file mode 100644 index 00000000000..977bf4a269f --- /dev/null +++ b/pyk/_sources/api/pyk.prelude.kbool.rst.txt @@ -0,0 +1,7 @@ +pyk.prelude.kbool module +======================== + +.. automodule:: pyk.prelude.kbool + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.prelude.kint.rst.txt b/pyk/_sources/api/pyk.prelude.kint.rst.txt new file mode 100644 index 00000000000..7e50e380c29 --- /dev/null +++ b/pyk/_sources/api/pyk.prelude.kint.rst.txt @@ -0,0 +1,7 @@ +pyk.prelude.kint module +======================= + +.. automodule:: pyk.prelude.kint + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.prelude.ml.rst.txt b/pyk/_sources/api/pyk.prelude.ml.rst.txt new file mode 100644 index 00000000000..3a8afb8503e --- /dev/null +++ b/pyk/_sources/api/pyk.prelude.ml.rst.txt @@ -0,0 +1,7 @@ +pyk.prelude.ml module +===================== + +.. automodule:: pyk.prelude.ml + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.prelude.rst.txt b/pyk/_sources/api/pyk.prelude.rst.txt new file mode 100644 index 00000000000..60bc8be23fe --- /dev/null +++ b/pyk/_sources/api/pyk.prelude.rst.txt @@ -0,0 +1,22 @@ +pyk.prelude package +=================== + +.. automodule:: pyk.prelude + :members: + :undoc-members: + :show-inheritance: + +Submodules +---------- + +.. toctree:: + :maxdepth: 4 + + pyk.prelude.bytes + pyk.prelude.collections + pyk.prelude.k + pyk.prelude.kbool + pyk.prelude.kint + pyk.prelude.ml + pyk.prelude.string + pyk.prelude.utils diff --git a/pyk/_sources/api/pyk.prelude.string.rst.txt b/pyk/_sources/api/pyk.prelude.string.rst.txt new file mode 100644 index 00000000000..4257f7596a0 --- /dev/null +++ b/pyk/_sources/api/pyk.prelude.string.rst.txt @@ -0,0 +1,7 @@ +pyk.prelude.string module +========================= + +.. automodule:: pyk.prelude.string + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.prelude.utils.rst.txt b/pyk/_sources/api/pyk.prelude.utils.rst.txt new file mode 100644 index 00000000000..fd592e6640b --- /dev/null +++ b/pyk/_sources/api/pyk.prelude.utils.rst.txt @@ -0,0 +1,7 @@ +pyk.prelude.utils module +======================== + +.. automodule:: pyk.prelude.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.proof.implies.rst.txt b/pyk/_sources/api/pyk.proof.implies.rst.txt new file mode 100644 index 00000000000..a7e0aeee18a --- /dev/null +++ b/pyk/_sources/api/pyk.proof.implies.rst.txt @@ -0,0 +1,7 @@ +pyk.proof.implies module +======================== + +.. automodule:: pyk.proof.implies + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.proof.proof.rst.txt b/pyk/_sources/api/pyk.proof.proof.rst.txt new file mode 100644 index 00000000000..0eb6a54342b --- /dev/null +++ b/pyk/_sources/api/pyk.proof.proof.rst.txt @@ -0,0 +1,7 @@ +pyk.proof.proof module +====================== + +.. automodule:: pyk.proof.proof + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.proof.reachability.rst.txt b/pyk/_sources/api/pyk.proof.reachability.rst.txt new file mode 100644 index 00000000000..cf9942d34cb --- /dev/null +++ b/pyk/_sources/api/pyk.proof.reachability.rst.txt @@ -0,0 +1,7 @@ +pyk.proof.reachability module +============================= + +.. automodule:: pyk.proof.reachability + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.proof.rst.txt b/pyk/_sources/api/pyk.proof.rst.txt new file mode 100644 index 00000000000..a29b9b8e00a --- /dev/null +++ b/pyk/_sources/api/pyk.proof.rst.txt @@ -0,0 +1,19 @@ +pyk.proof package +================= + +.. automodule:: pyk.proof + :members: + :undoc-members: + :show-inheritance: + +Submodules +---------- + +.. toctree:: + :maxdepth: 4 + + pyk.proof.implies + pyk.proof.proof + pyk.proof.reachability + pyk.proof.show + pyk.proof.tui diff --git a/pyk/_sources/api/pyk.proof.show.rst.txt b/pyk/_sources/api/pyk.proof.show.rst.txt new file mode 100644 index 00000000000..ab669d317eb --- /dev/null +++ b/pyk/_sources/api/pyk.proof.show.rst.txt @@ -0,0 +1,7 @@ +pyk.proof.show module +===================== + +.. automodule:: pyk.proof.show + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.proof.tui.rst.txt b/pyk/_sources/api/pyk.proof.tui.rst.txt new file mode 100644 index 00000000000..6c2e8ce107f --- /dev/null +++ b/pyk/_sources/api/pyk.proof.tui.rst.txt @@ -0,0 +1,7 @@ +pyk.proof.tui module +==================== + +.. automodule:: pyk.proof.tui + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.rst.txt b/pyk/_sources/api/pyk.rst.txt new file mode 100644 index 00000000000..2e3c94a524b --- /dev/null +++ b/pyk/_sources/api/pyk.rst.txt @@ -0,0 +1,39 @@ +pyk package +=========== + +.. automodule:: pyk + :members: + :undoc-members: + :show-inheritance: + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + pyk.cterm + pyk.kast + pyk.kbuild + pyk.kcfg + pyk.kdist + pyk.kllvm + pyk.konvert + pyk.kore + pyk.kore_exec_covr + pyk.krepl + pyk.ktool + pyk.prelude + pyk.proof + pyk.testing + +Submodules +---------- + +.. toctree:: + :maxdepth: 4 + + pyk.coverage + pyk.dequote + pyk.kcovr + pyk.utils diff --git a/pyk/_sources/api/pyk.testing.plugin.rst.txt b/pyk/_sources/api/pyk.testing.plugin.rst.txt new file mode 100644 index 00000000000..9337aa9dbb9 --- /dev/null +++ b/pyk/_sources/api/pyk.testing.plugin.rst.txt @@ -0,0 +1,7 @@ +pyk.testing.plugin module +========================= + +.. automodule:: pyk.testing.plugin + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/api/pyk.testing.rst.txt b/pyk/_sources/api/pyk.testing.rst.txt new file mode 100644 index 00000000000..f657432fbdc --- /dev/null +++ b/pyk/_sources/api/pyk.testing.rst.txt @@ -0,0 +1,15 @@ +pyk.testing package +=================== + +.. automodule:: pyk.testing + :members: + :undoc-members: + :show-inheritance: + +Submodules +---------- + +.. toctree:: + :maxdepth: 4 + + pyk.testing.plugin diff --git a/pyk/_sources/api/pyk.utils.rst.txt b/pyk/_sources/api/pyk.utils.rst.txt new file mode 100644 index 00000000000..a3d145093fe --- /dev/null +++ b/pyk/_sources/api/pyk.utils.rst.txt @@ -0,0 +1,7 @@ +pyk.utils module +================ + +.. automodule:: pyk.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/pyk/_sources/index.rst.txt b/pyk/_sources/index.rst.txt new file mode 100644 index 00000000000..7392a2a50b3 --- /dev/null +++ b/pyk/_sources/index.rst.txt @@ -0,0 +1,21 @@ +.. pyk documentation master file, created by + sphinx-quickstart on Fri Jan 12 09:29:35 2024. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to pyk's documentation! +=============================== + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + api/modules + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/pyk/_static/_sphinx_javascript_frameworks_compat.js b/pyk/_static/_sphinx_javascript_frameworks_compat.js new file mode 100644 index 00000000000..81415803ec2 --- /dev/null +++ b/pyk/_static/_sphinx_javascript_frameworks_compat.js @@ -0,0 +1,123 @@ +/* Compatability shim for jQuery and underscores.js. + * + * Copyright Sphinx contributors + * Released under the two clause BSD licence + */ + +/** + * small helper function to urldecode strings + * + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL + */ +jQuery.urldecode = function(x) { + if (!x) { + return x + } + return decodeURIComponent(x.replace(/\+/g, ' ')); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s === 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node, addItems) { + if (node.nodeType === 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && + !jQuery(node.parentNode).hasClass(className) && + !jQuery(node.parentNode).hasClass("nohighlight")) { + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + var bbox = node.parentElement.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this, addItems); + }); + } + } + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); + }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} diff --git a/pyk/_static/basic.css b/pyk/_static/basic.css new file mode 100644 index 00000000000..f316efcb47b --- /dev/null +++ b/pyk/_static/basic.css @@ -0,0 +1,925 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +a:visited { + color: #551A8B; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} + +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ + +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +.sig dd { + margin-top: 0px; + margin-bottom: 0px; +} + +.sig dl { + margin-top: 0px; + margin-bottom: 0px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +.translated { + background-color: rgba(207, 255, 207, 0.2) +} + +.untranslated { + background-color: rgba(255, 207, 207, 0.2) +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/pyk/_static/css/badge_only.css b/pyk/_static/css/badge_only.css new file mode 100644 index 00000000000..88ba55b965c --- /dev/null +++ b/pyk/_static/css/badge_only.css @@ -0,0 +1 @@ +.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#FontAwesome) format("svg")}.fa:before{font-family:FontAwesome;font-style:normal;font-weight:400;line-height:1}.fa:before,a .fa{text-decoration:inherit}.fa:before,a .fa,li .fa{display:inline-block}li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before,.icon-book:before{content:"\f02d"}.fa-caret-down:before,.icon-caret-down:before{content:"\f0d7"}.fa-caret-up:before,.icon-caret-up:before{content:"\f0d8"}.fa-caret-left:before,.icon-caret-left:before{content:"\f0d9"}.fa-caret-right:before,.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60}.rst-versions .rst-current-version:after{clear:both;content:"";display:block}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions .rst-other-versions .rtd-current-item{font-weight:700}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}#flyout-search-form{padding:6px} \ No newline at end of file diff --git a/pyk/_static/css/fonts/Roboto-Slab-Bold.woff b/pyk/_static/css/fonts/Roboto-Slab-Bold.woff new file mode 100644 index 00000000000..6cb60000181 Binary files /dev/null and b/pyk/_static/css/fonts/Roboto-Slab-Bold.woff differ diff --git a/pyk/_static/css/fonts/Roboto-Slab-Bold.woff2 b/pyk/_static/css/fonts/Roboto-Slab-Bold.woff2 new file mode 100644 index 00000000000..7059e23142a Binary files /dev/null and b/pyk/_static/css/fonts/Roboto-Slab-Bold.woff2 differ diff --git a/pyk/_static/css/fonts/Roboto-Slab-Regular.woff b/pyk/_static/css/fonts/Roboto-Slab-Regular.woff new file mode 100644 index 00000000000..f815f63f99d Binary files /dev/null and b/pyk/_static/css/fonts/Roboto-Slab-Regular.woff differ diff --git a/pyk/_static/css/fonts/Roboto-Slab-Regular.woff2 b/pyk/_static/css/fonts/Roboto-Slab-Regular.woff2 new file mode 100644 index 00000000000..f2c76e5bda1 Binary files /dev/null and b/pyk/_static/css/fonts/Roboto-Slab-Regular.woff2 differ diff --git a/pyk/_static/css/fonts/fontawesome-webfont.eot b/pyk/_static/css/fonts/fontawesome-webfont.eot new file mode 100644 index 00000000000..e9f60ca953f Binary files /dev/null and b/pyk/_static/css/fonts/fontawesome-webfont.eot differ diff --git a/pyk/_static/css/fonts/fontawesome-webfont.svg b/pyk/_static/css/fonts/fontawesome-webfont.svg new file mode 100644 index 00000000000..855c845e538 --- /dev/null +++ b/pyk/_static/css/fonts/fontawesome-webfont.svg @@ -0,0 +1,2671 @@ + + + + +Created by FontForge 20120731 at Mon Oct 24 17:37:40 2016 + By ,,, +Copyright Dave Gandy 2016. All rights reserved. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pyk/_static/css/fonts/fontawesome-webfont.ttf b/pyk/_static/css/fonts/fontawesome-webfont.ttf new file mode 100644 index 00000000000..35acda2fa11 Binary files /dev/null and b/pyk/_static/css/fonts/fontawesome-webfont.ttf differ diff --git a/pyk/_static/css/fonts/fontawesome-webfont.woff b/pyk/_static/css/fonts/fontawesome-webfont.woff new file mode 100644 index 00000000000..400014a4b06 Binary files /dev/null and b/pyk/_static/css/fonts/fontawesome-webfont.woff differ diff --git a/pyk/_static/css/fonts/fontawesome-webfont.woff2 b/pyk/_static/css/fonts/fontawesome-webfont.woff2 new file mode 100644 index 00000000000..4d13fc60404 Binary files /dev/null and b/pyk/_static/css/fonts/fontawesome-webfont.woff2 differ diff --git a/pyk/_static/css/fonts/lato-bold-italic.woff b/pyk/_static/css/fonts/lato-bold-italic.woff new file mode 100644 index 00000000000..88ad05b9ff4 Binary files /dev/null and b/pyk/_static/css/fonts/lato-bold-italic.woff differ diff --git a/pyk/_static/css/fonts/lato-bold-italic.woff2 b/pyk/_static/css/fonts/lato-bold-italic.woff2 new file mode 100644 index 00000000000..c4e3d804b57 Binary files /dev/null and b/pyk/_static/css/fonts/lato-bold-italic.woff2 differ diff --git a/pyk/_static/css/fonts/lato-bold.woff b/pyk/_static/css/fonts/lato-bold.woff new file mode 100644 index 00000000000..c6dff51f063 Binary files /dev/null and b/pyk/_static/css/fonts/lato-bold.woff differ diff --git a/pyk/_static/css/fonts/lato-bold.woff2 b/pyk/_static/css/fonts/lato-bold.woff2 new file mode 100644 index 00000000000..bb195043cfc Binary files /dev/null and b/pyk/_static/css/fonts/lato-bold.woff2 differ diff --git a/pyk/_static/css/fonts/lato-normal-italic.woff b/pyk/_static/css/fonts/lato-normal-italic.woff new file mode 100644 index 00000000000..76114bc0336 Binary files /dev/null and b/pyk/_static/css/fonts/lato-normal-italic.woff differ diff --git a/pyk/_static/css/fonts/lato-normal-italic.woff2 b/pyk/_static/css/fonts/lato-normal-italic.woff2 new file mode 100644 index 00000000000..3404f37e2e3 Binary files /dev/null and b/pyk/_static/css/fonts/lato-normal-italic.woff2 differ diff --git a/pyk/_static/css/fonts/lato-normal.woff b/pyk/_static/css/fonts/lato-normal.woff new file mode 100644 index 00000000000..ae1307ff5f4 Binary files /dev/null and b/pyk/_static/css/fonts/lato-normal.woff differ diff --git a/pyk/_static/css/fonts/lato-normal.woff2 b/pyk/_static/css/fonts/lato-normal.woff2 new file mode 100644 index 00000000000..3bf9843328a Binary files /dev/null and b/pyk/_static/css/fonts/lato-normal.woff2 differ diff --git a/pyk/_static/css/theme.css b/pyk/_static/css/theme.css new file mode 100644 index 00000000000..0f14f106460 --- /dev/null +++ b/pyk/_static/css/theme.css @@ -0,0 +1,4 @@ +html{box-sizing:border-box}*,:after,:before{box-sizing:inherit}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}[hidden],audio:not([controls]){display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;text-decoration:none}ins,mark{color:#000}mark{background:#ff0;font-style:italic;font-weight:700}.rst-content code,.rst-content tt,code,kbd,pre,samp{font-family:monospace,serif;_font-family:courier new,monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:after,q:before{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}dl,ol,ul{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure,form{margin:0}label{cursor:pointer}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type=button],input[type=reset],input[type=submit]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type=search]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}textarea{resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:.2em 0;background:#ccc;color:#000;padding:.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none!important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{body,html,section{background:none!important}*{box-shadow:none!important;text-shadow:none!important;filter:none!important;-ms-filter:none!important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="#"]:after,a[href^="javascript:"]:after{content:""}blockquote,pre{page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}@page{margin:.5cm}.rst-content .toctree-wrapper>p.caption,h2,h3,p{orphans:3;widows:3}.rst-content .toctree-wrapper>p.caption,h2,h3{page-break-after:avoid}}.btn,.fa:before,.icon:before,.rst-content .admonition,.rst-content .admonition-title:before,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .code-block-caption .headerlink:before,.rst-content .danger,.rst-content .eqno .headerlink:before,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content p .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-alert,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before,.wy-menu-vertical li button.toctree-expand:before,input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week],select,textarea{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}/*! + * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome + * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) + */@font-face{font-family:FontAwesome;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713);src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix&v=4.7.0) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#fontawesomeregular) format("svg");font-weight:400;font-style:normal}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li button.toctree-expand{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14286em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14286em;width:2.14286em;top:.14286em;text-align:center}.fa-li.fa-lg{left:-1.85714em}.fa-border{padding:.2em .25em .15em;border:.08em solid #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa-pull-left.icon,.fa.fa-pull-left,.rst-content .code-block-caption .fa-pull-left.headerlink,.rst-content .eqno .fa-pull-left.headerlink,.rst-content .fa-pull-left.admonition-title,.rst-content code.download span.fa-pull-left:first-child,.rst-content dl dt .fa-pull-left.headerlink,.rst-content h1 .fa-pull-left.headerlink,.rst-content h2 .fa-pull-left.headerlink,.rst-content h3 .fa-pull-left.headerlink,.rst-content h4 .fa-pull-left.headerlink,.rst-content h5 .fa-pull-left.headerlink,.rst-content h6 .fa-pull-left.headerlink,.rst-content p .fa-pull-left.headerlink,.rst-content table>caption .fa-pull-left.headerlink,.rst-content tt.download span.fa-pull-left:first-child,.wy-menu-vertical li.current>a button.fa-pull-left.toctree-expand,.wy-menu-vertical li.on a button.fa-pull-left.toctree-expand,.wy-menu-vertical li button.fa-pull-left.toctree-expand{margin-right:.3em}.fa-pull-right.icon,.fa.fa-pull-right,.rst-content .code-block-caption .fa-pull-right.headerlink,.rst-content .eqno .fa-pull-right.headerlink,.rst-content .fa-pull-right.admonition-title,.rst-content code.download span.fa-pull-right:first-child,.rst-content dl dt .fa-pull-right.headerlink,.rst-content h1 .fa-pull-right.headerlink,.rst-content h2 .fa-pull-right.headerlink,.rst-content h3 .fa-pull-right.headerlink,.rst-content h4 .fa-pull-right.headerlink,.rst-content h5 .fa-pull-right.headerlink,.rst-content h6 .fa-pull-right.headerlink,.rst-content p .fa-pull-right.headerlink,.rst-content table>caption .fa-pull-right.headerlink,.rst-content tt.download span.fa-pull-right:first-child,.wy-menu-vertical li.current>a button.fa-pull-right.toctree-expand,.wy-menu-vertical li.on a button.fa-pull-right.toctree-expand,.wy-menu-vertical li button.fa-pull-right.toctree-expand{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.pull-left.icon,.rst-content .code-block-caption .pull-left.headerlink,.rst-content .eqno .pull-left.headerlink,.rst-content .pull-left.admonition-title,.rst-content code.download span.pull-left:first-child,.rst-content dl dt .pull-left.headerlink,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content p .pull-left.headerlink,.rst-content table>caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.wy-menu-vertical li.current>a button.pull-left.toctree-expand,.wy-menu-vertical li.on a button.pull-left.toctree-expand,.wy-menu-vertical li button.pull-left.toctree-expand{margin-right:.3em}.fa.pull-right,.pull-right.icon,.rst-content .code-block-caption .pull-right.headerlink,.rst-content .eqno .pull-right.headerlink,.rst-content .pull-right.admonition-title,.rst-content code.download span.pull-right:first-child,.rst-content dl dt .pull-right.headerlink,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content p .pull-right.headerlink,.rst-content table>caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.wy-menu-vertical li.current>a button.pull-right.toctree-expand,.wy-menu-vertical li.on a button.pull-right.toctree-expand,.wy-menu-vertical li button.pull-right.toctree-expand{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s linear infinite;animation:fa-spin 2s linear infinite}.fa-pulse{-webkit-animation:fa-spin 1s steps(8) infinite;animation:fa-spin 1s steps(8) infinite}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);-ms-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scaleY(-1);-ms-transform:scaleY(-1);transform:scaleY(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-close:before,.fa-remove:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-cog:before,.fa-gear:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-repeat:before,.fa-rotate-right:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-image:before,.fa-photo:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:""}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before,.rst-content .admonition-title:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before,.icon-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-exclamation-triangle:before,.fa-warning:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-cogs:before,.fa-gears:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook-f:before,.fa-facebook:before{content:""}.fa-github:before,.icon-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-feed:before,.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-floppy-o:before,.fa-save:before{content:""}.fa-square:before{content:""}.fa-bars:before,.fa-navicon:before,.fa-reorder:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.icon-caret-down:before,.wy-dropdown .caret:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-sort:before,.fa-unsorted:before{content:""}.fa-sort-desc:before,.fa-sort-down:before{content:""}.fa-sort-asc:before,.fa-sort-up:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-gavel:before,.fa-legal:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-bolt:before,.fa-flash:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-clipboard:before,.fa-paste:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-chain-broken:before,.fa-unlink:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-caret-square-o-down:before,.fa-toggle-down:before{content:""}.fa-caret-square-o-up:before,.fa-toggle-up:before{content:""}.fa-caret-square-o-right:before,.fa-toggle-right:before{content:""}.fa-eur:before,.fa-euro:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-inr:before,.fa-rupee:before{content:""}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen:before{content:""}.fa-rouble:before,.fa-rub:before,.fa-ruble:before{content:""}.fa-krw:before,.fa-won:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before,.icon-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-caret-square-o-left:before,.fa-toggle-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-try:before,.fa-turkish-lira:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li button.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-bank:before,.fa-institution:before,.fa-university:before{content:""}.fa-graduation-cap:before,.fa-mortar-board:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-image-o:before,.fa-file-photo-o:before,.fa-file-picture-o:before{content:""}.fa-file-archive-o:before,.fa-file-zip-o:before{content:""}.fa-file-audio-o:before,.fa-file-sound-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-ring:before,.fa-life-saver:before,.fa-support:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-rebel:before,.fa-resistance:before{content:""}.fa-empire:before,.fa-ge:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-hacker-news:before,.fa-y-combinator-square:before,.fa-yc-square:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-paper-plane:before,.fa-send:before{content:""}.fa-paper-plane-o:before,.fa-send-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-futbol-o:before,.fa-soccer-ball-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-ils:before,.fa-shekel:before,.fa-sheqel:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:""}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:""}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:""}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-bed:before,.fa-hotel:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-y-combinator:before,.fa-yc:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery-full:before,.fa-battery:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:""}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:""}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:""}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-paper-o:before,.fa-hand-stop-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:""}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-television:before,.fa-tv:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:""}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:""}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:""}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before,.icon-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:""}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before{content:""}.fa-deaf:before,.fa-deafness:before,.fa-hard-of-hearing:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-sign-language:before,.fa-signing:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-address-card:before,.fa-vcard:before{content:""}.fa-address-card-o:before,.fa-vcard-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:""}.fa-id-badge:before{content:""}.fa-drivers-license:before,.fa-id-card:before{content:""}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:""}.fa-free-code-camp:before{content:""}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer-full:before,.fa-thermometer:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:""}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:""}.fa-shower:before{content:""}.fa-bath:before,.fa-bathtub:before,.fa-s15:before{content:""}.fa-podcast:before{content:""}.fa-window-maximize:before{content:""}.fa-window-minimize:before{content:""}.fa-window-restore:before{content:""}.fa-times-rectangle:before,.fa-window-close:before{content:""}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:""}.fa-bandcamp:before{content:""}.fa-grav:before{content:""}.fa-etsy:before{content:""}.fa-imdb:before{content:""}.fa-ravelry:before{content:""}.fa-eercast:before{content:""}.fa-microchip:before{content:""}.fa-snowflake-o:before{content:""}.fa-superpowers:before{content:""}.fa-wpexplorer:before{content:""}.fa-meetup:before{content:""}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li button.toctree-expand{font-family:inherit}.fa:before,.icon:before,.rst-content .admonition-title:before,.rst-content .code-block-caption .headerlink:before,.rst-content .eqno .headerlink:before,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content p .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before,.wy-menu-vertical li button.toctree-expand:before{font-family:FontAwesome;display:inline-block;font-style:normal;font-weight:400;line-height:1;text-decoration:inherit}.rst-content .code-block-caption a .headerlink,.rst-content .eqno a .headerlink,.rst-content a .admonition-title,.rst-content code.download a span:first-child,.rst-content dl dt a .headerlink,.rst-content h1 a .headerlink,.rst-content h2 a .headerlink,.rst-content h3 a .headerlink,.rst-content h4 a .headerlink,.rst-content h5 a .headerlink,.rst-content h6 a .headerlink,.rst-content p.caption a .headerlink,.rst-content p a .headerlink,.rst-content table>caption a .headerlink,.rst-content tt.download a span:first-child,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li a button.toctree-expand,a .fa,a .icon,a .rst-content .admonition-title,a .rst-content .code-block-caption .headerlink,a .rst-content .eqno .headerlink,a .rst-content code.download span:first-child,a .rst-content dl dt .headerlink,a .rst-content h1 .headerlink,a .rst-content h2 .headerlink,a .rst-content h3 .headerlink,a .rst-content h4 .headerlink,a .rst-content h5 .headerlink,a .rst-content h6 .headerlink,a .rst-content p.caption .headerlink,a .rst-content p .headerlink,a .rst-content table>caption .headerlink,a .rst-content tt.download span:first-child,a .wy-menu-vertical li button.toctree-expand{display:inline-block;text-decoration:inherit}.btn .fa,.btn .icon,.btn .rst-content .admonition-title,.btn .rst-content .code-block-caption .headerlink,.btn .rst-content .eqno .headerlink,.btn .rst-content code.download span:first-child,.btn .rst-content dl dt .headerlink,.btn .rst-content h1 .headerlink,.btn .rst-content h2 .headerlink,.btn .rst-content h3 .headerlink,.btn .rst-content h4 .headerlink,.btn .rst-content h5 .headerlink,.btn .rst-content h6 .headerlink,.btn .rst-content p .headerlink,.btn .rst-content table>caption .headerlink,.btn .rst-content tt.download span:first-child,.btn .wy-menu-vertical li.current>a button.toctree-expand,.btn .wy-menu-vertical li.on a button.toctree-expand,.btn .wy-menu-vertical li button.toctree-expand,.nav .fa,.nav .icon,.nav .rst-content .admonition-title,.nav .rst-content .code-block-caption .headerlink,.nav .rst-content .eqno .headerlink,.nav .rst-content code.download span:first-child,.nav .rst-content dl dt .headerlink,.nav .rst-content h1 .headerlink,.nav .rst-content h2 .headerlink,.nav .rst-content h3 .headerlink,.nav .rst-content h4 .headerlink,.nav .rst-content h5 .headerlink,.nav .rst-content h6 .headerlink,.nav .rst-content p .headerlink,.nav .rst-content table>caption .headerlink,.nav .rst-content tt.download span:first-child,.nav .wy-menu-vertical li.current>a button.toctree-expand,.nav .wy-menu-vertical li.on a button.toctree-expand,.nav .wy-menu-vertical li button.toctree-expand,.rst-content .btn .admonition-title,.rst-content .code-block-caption .btn .headerlink,.rst-content .code-block-caption .nav .headerlink,.rst-content .eqno .btn .headerlink,.rst-content .eqno .nav .headerlink,.rst-content .nav .admonition-title,.rst-content code.download .btn span:first-child,.rst-content code.download .nav span:first-child,.rst-content dl dt .btn .headerlink,.rst-content dl dt .nav .headerlink,.rst-content h1 .btn .headerlink,.rst-content h1 .nav .headerlink,.rst-content h2 .btn .headerlink,.rst-content h2 .nav .headerlink,.rst-content h3 .btn .headerlink,.rst-content h3 .nav .headerlink,.rst-content h4 .btn .headerlink,.rst-content h4 .nav .headerlink,.rst-content h5 .btn .headerlink,.rst-content h5 .nav .headerlink,.rst-content h6 .btn .headerlink,.rst-content h6 .nav .headerlink,.rst-content p .btn .headerlink,.rst-content p .nav .headerlink,.rst-content table>caption .btn .headerlink,.rst-content table>caption .nav .headerlink,.rst-content tt.download .btn span:first-child,.rst-content tt.download .nav span:first-child,.wy-menu-vertical li .btn button.toctree-expand,.wy-menu-vertical li.current>a .btn button.toctree-expand,.wy-menu-vertical li.current>a .nav button.toctree-expand,.wy-menu-vertical li .nav button.toctree-expand,.wy-menu-vertical li.on a .btn button.toctree-expand,.wy-menu-vertical li.on a .nav button.toctree-expand{display:inline}.btn .fa-large.icon,.btn .fa.fa-large,.btn .rst-content .code-block-caption .fa-large.headerlink,.btn .rst-content .eqno .fa-large.headerlink,.btn .rst-content .fa-large.admonition-title,.btn .rst-content code.download span.fa-large:first-child,.btn .rst-content dl dt .fa-large.headerlink,.btn .rst-content h1 .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.btn .rst-content p .fa-large.headerlink,.btn .rst-content table>caption .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.btn .wy-menu-vertical li button.fa-large.toctree-expand,.nav .fa-large.icon,.nav .fa.fa-large,.nav .rst-content .code-block-caption .fa-large.headerlink,.nav .rst-content .eqno .fa-large.headerlink,.nav .rst-content .fa-large.admonition-title,.nav .rst-content code.download span.fa-large:first-child,.nav .rst-content dl dt .fa-large.headerlink,.nav .rst-content h1 .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.nav .rst-content p .fa-large.headerlink,.nav .rst-content table>caption .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.nav .wy-menu-vertical li button.fa-large.toctree-expand,.rst-content .btn .fa-large.admonition-title,.rst-content .code-block-caption .btn .fa-large.headerlink,.rst-content .code-block-caption .nav .fa-large.headerlink,.rst-content .eqno .btn .fa-large.headerlink,.rst-content .eqno .nav .fa-large.headerlink,.rst-content .nav .fa-large.admonition-title,.rst-content code.download .btn span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.rst-content dl dt .btn .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.rst-content p .btn .fa-large.headerlink,.rst-content p .nav .fa-large.headerlink,.rst-content table>caption .btn .fa-large.headerlink,.rst-content table>caption .nav .fa-large.headerlink,.rst-content tt.download .btn span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.wy-menu-vertical li .btn button.fa-large.toctree-expand,.wy-menu-vertical li .nav button.fa-large.toctree-expand{line-height:.9em}.btn .fa-spin.icon,.btn .fa.fa-spin,.btn .rst-content .code-block-caption .fa-spin.headerlink,.btn .rst-content .eqno .fa-spin.headerlink,.btn .rst-content .fa-spin.admonition-title,.btn .rst-content code.download span.fa-spin:first-child,.btn .rst-content dl dt .fa-spin.headerlink,.btn .rst-content h1 .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.btn .rst-content p .fa-spin.headerlink,.btn .rst-content table>caption .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.btn .wy-menu-vertical li button.fa-spin.toctree-expand,.nav .fa-spin.icon,.nav .fa.fa-spin,.nav .rst-content .code-block-caption .fa-spin.headerlink,.nav .rst-content .eqno .fa-spin.headerlink,.nav .rst-content .fa-spin.admonition-title,.nav .rst-content code.download span.fa-spin:first-child,.nav .rst-content dl dt .fa-spin.headerlink,.nav .rst-content h1 .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.nav .rst-content p .fa-spin.headerlink,.nav .rst-content table>caption .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.nav .wy-menu-vertical li button.fa-spin.toctree-expand,.rst-content .btn .fa-spin.admonition-title,.rst-content .code-block-caption .btn .fa-spin.headerlink,.rst-content .code-block-caption .nav .fa-spin.headerlink,.rst-content .eqno .btn .fa-spin.headerlink,.rst-content .eqno .nav .fa-spin.headerlink,.rst-content .nav .fa-spin.admonition-title,.rst-content code.download .btn span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.rst-content dl dt .btn .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.rst-content p .btn .fa-spin.headerlink,.rst-content p .nav .fa-spin.headerlink,.rst-content table>caption .btn .fa-spin.headerlink,.rst-content table>caption .nav .fa-spin.headerlink,.rst-content tt.download .btn span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.wy-menu-vertical li .btn button.fa-spin.toctree-expand,.wy-menu-vertical li .nav button.fa-spin.toctree-expand{display:inline-block}.btn.fa:before,.btn.icon:before,.rst-content .btn.admonition-title:before,.rst-content .code-block-caption .btn.headerlink:before,.rst-content .eqno .btn.headerlink:before,.rst-content code.download span.btn:first-child:before,.rst-content dl dt .btn.headerlink:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content p .btn.headerlink:before,.rst-content table>caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.wy-menu-vertical li button.btn.toctree-expand:before{opacity:.5;-webkit-transition:opacity .05s ease-in;-moz-transition:opacity .05s ease-in;transition:opacity .05s ease-in}.btn.fa:hover:before,.btn.icon:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content .code-block-caption .btn.headerlink:hover:before,.rst-content .eqno .btn.headerlink:hover:before,.rst-content code.download span.btn:first-child:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content p .btn.headerlink:hover:before,.rst-content table>caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.wy-menu-vertical li button.btn.toctree-expand:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .icon:before,.btn-mini .rst-content .admonition-title:before,.btn-mini .rst-content .code-block-caption .headerlink:before,.btn-mini .rst-content .eqno .headerlink:before,.btn-mini .rst-content code.download span:first-child:before,.btn-mini .rst-content dl dt .headerlink:before,.btn-mini .rst-content h1 .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.btn-mini .rst-content p .headerlink:before,.btn-mini .rst-content table>caption .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.btn-mini .wy-menu-vertical li button.toctree-expand:before,.rst-content .btn-mini .admonition-title:before,.rst-content .code-block-caption .btn-mini .headerlink:before,.rst-content .eqno .btn-mini .headerlink:before,.rst-content code.download .btn-mini span:first-child:before,.rst-content dl dt .btn-mini .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.rst-content p .btn-mini .headerlink:before,.rst-content table>caption .btn-mini .headerlink:before,.rst-content tt.download .btn-mini span:first-child:before,.wy-menu-vertical li .btn-mini button.toctree-expand:before{font-size:14px;vertical-align:-15%}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.wy-alert{padding:12px;line-height:24px;margin-bottom:24px;background:#e7f2fa}.rst-content .admonition-title,.wy-alert-title{font-weight:700;display:block;color:#fff;background:#6ab0de;padding:6px 12px;margin:-12px -12px 12px}.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.admonition,.rst-content .wy-alert-danger.admonition-todo,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.wy-alert.wy-alert-danger{background:#fdf3f2}.rst-content .danger .admonition-title,.rst-content .danger .wy-alert-title,.rst-content .error .admonition-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.rst-content .wy-alert-danger.admonition .admonition-title,.rst-content .wy-alert-danger.admonition .wy-alert-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.wy-alert.wy-alert-danger .wy-alert-title{background:#f29f97}.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .warning,.rst-content .wy-alert-warning.admonition,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.note,.rst-content .wy-alert-warning.seealso,.rst-content .wy-alert-warning.tip,.wy-alert.wy-alert-warning{background:#ffedcc}.rst-content .admonition-todo .admonition-title,.rst-content .admonition-todo .wy-alert-title,.rst-content .attention .admonition-title,.rst-content .attention .wy-alert-title,.rst-content .caution .admonition-title,.rst-content .caution .wy-alert-title,.rst-content .warning .admonition-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.admonition .admonition-title,.rst-content .wy-alert-warning.admonition .wy-alert-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.wy-alert.wy-alert-warning .wy-alert-title{background:#f0b37e}.rst-content .note,.rst-content .seealso,.rst-content .wy-alert-info.admonition,.rst-content .wy-alert-info.admonition-todo,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.wy-alert.wy-alert-info{background:#e7f2fa}.rst-content .note .admonition-title,.rst-content .note .wy-alert-title,.rst-content .seealso .admonition-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .admonition-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.rst-content .wy-alert-info.admonition .admonition-title,.rst-content .wy-alert-info.admonition .wy-alert-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.wy-alert.wy-alert-info .wy-alert-title{background:#6ab0de}.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.admonition,.rst-content .wy-alert-success.admonition-todo,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.warning,.wy-alert.wy-alert-success{background:#dbfaf4}.rst-content .hint .admonition-title,.rst-content .hint .wy-alert-title,.rst-content .important .admonition-title,.rst-content .important .wy-alert-title,.rst-content .tip .admonition-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .admonition-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.rst-content .wy-alert-success.admonition .admonition-title,.rst-content .wy-alert-success.admonition .wy-alert-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.wy-alert.wy-alert-success .wy-alert-title{background:#1abc9c}.rst-content .wy-alert-neutral.admonition,.rst-content .wy-alert-neutral.admonition-todo,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.wy-alert.wy-alert-neutral{background:#f3f6f6}.rst-content .wy-alert-neutral.admonition-todo .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.rst-content .wy-alert-neutral.admonition .admonition-title,.rst-content .wy-alert-neutral.admonition .wy-alert-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.wy-alert.wy-alert-neutral .wy-alert-title{color:#404040;background:#e1e4e5}.rst-content .wy-alert-neutral.admonition-todo a,.rst-content .wy-alert-neutral.admonition a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.wy-alert.wy-alert-neutral a{color:#2980b9}.rst-content .admonition-todo p:last-child,.rst-content .admonition p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .note p:last-child,.rst-content .seealso p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.wy-alert p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all .3s ease-in;-moz-transition:all .3s ease-in;transition:all .3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#27ae60}.wy-tray-container li.wy-tray-item-info{background:#2980b9}.wy-tray-container li.wy-tray-item-warning{background:#e67e22}.wy-tray-container li.wy-tray-item-danger{background:#e74c3c}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width:768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px;color:#fff;border:1px solid rgba(0,0,0,.1);background-color:#27ae60;text-decoration:none;font-weight:400;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 2px -1px hsla(0,0%,100%,.5),inset 0 -2px 0 0 rgba(0,0,0,.1);outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all .1s linear;-moz-transition:all .1s linear;transition:all .1s linear}.btn-hover{background:#2e8ece;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:inset 0 -1px 0 0 rgba(0,0,0,.05),inset 0 2px 0 0 rgba(0,0,0,.1);padding:8px 12px 6px}.btn:visited{color:#fff}.btn-disabled,.btn-disabled:active,.btn-disabled:focus,.btn-disabled:hover,.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#2980b9!important}.btn-info:hover{background-color:#2e8ece!important}.btn-neutral{background-color:#f3f6f6!important;color:#404040!important}.btn-neutral:hover{background-color:#e5ebeb!important;color:#404040}.btn-neutral:visited{color:#404040!important}.btn-success{background-color:#27ae60!important}.btn-success:hover{background-color:#295!important}.btn-danger{background-color:#e74c3c!important}.btn-danger:hover{background-color:#ea6153!important}.btn-warning{background-color:#e67e22!important}.btn-warning:hover{background-color:#e98b39!important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f!important}.btn-link{background-color:transparent!important;color:#2980b9;box-shadow:none;border-color:transparent!important}.btn-link:active,.btn-link:hover{background-color:transparent!important;color:#409ad5!important;box-shadow:none}.btn-link:visited{color:#9b59b6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:after,.wy-btn-group:before{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#fcfcfc;z-index:100;border:1px solid #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#2980b9;color:#fff}.wy-dropdown-menu>dd.divider{border-top:1px solid #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type=search]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#fcfcfc;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#2980b9;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned .wy-help-inline,.wy-form-aligned input,.wy-form-aligned label,.wy-form-aligned select,.wy-form-aligned textarea{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{margin:0}fieldset,legend{border:0;padding:0}legend{width:100%;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label,legend{display:block}label{margin:0 0 .3125em;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;max-width:1200px;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:after,.wy-control-group:before{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#e74c3c}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full input[type=color],.wy-control-group .wy-form-full input[type=date],.wy-control-group .wy-form-full input[type=datetime-local],.wy-control-group .wy-form-full input[type=datetime],.wy-control-group .wy-form-full input[type=email],.wy-control-group .wy-form-full input[type=month],.wy-control-group .wy-form-full input[type=number],.wy-control-group .wy-form-full input[type=password],.wy-control-group .wy-form-full input[type=search],.wy-control-group .wy-form-full input[type=tel],.wy-control-group .wy-form-full input[type=text],.wy-control-group .wy-form-full input[type=time],.wy-control-group .wy-form-full input[type=url],.wy-control-group .wy-form-full input[type=week],.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves input[type=color],.wy-control-group .wy-form-halves input[type=date],.wy-control-group .wy-form-halves input[type=datetime-local],.wy-control-group .wy-form-halves input[type=datetime],.wy-control-group .wy-form-halves input[type=email],.wy-control-group .wy-form-halves input[type=month],.wy-control-group .wy-form-halves input[type=number],.wy-control-group .wy-form-halves input[type=password],.wy-control-group .wy-form-halves input[type=search],.wy-control-group .wy-form-halves input[type=tel],.wy-control-group .wy-form-halves input[type=text],.wy-control-group .wy-form-halves input[type=time],.wy-control-group .wy-form-halves input[type=url],.wy-control-group .wy-form-halves input[type=week],.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds input[type=color],.wy-control-group .wy-form-thirds input[type=date],.wy-control-group .wy-form-thirds input[type=datetime-local],.wy-control-group .wy-form-thirds input[type=datetime],.wy-control-group .wy-form-thirds input[type=email],.wy-control-group .wy-form-thirds input[type=month],.wy-control-group .wy-form-thirds input[type=number],.wy-control-group .wy-form-thirds input[type=password],.wy-control-group .wy-form-thirds input[type=search],.wy-control-group .wy-form-thirds input[type=tel],.wy-control-group .wy-form-thirds input[type=text],.wy-control-group .wy-form-thirds input[type=time],.wy-control-group .wy-form-thirds input[type=url],.wy-control-group .wy-form-thirds input[type=week],.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full{float:left;display:block;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.35765%;width:48.82117%}.wy-control-group .wy-form-halves:last-child,.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(odd){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.35765%;width:31.76157%}.wy-control-group .wy-form-thirds:last-child,.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control,.wy-control-no-input{margin:6px 0 0;font-size:90%}.wy-control-no-input{display:inline-block}.wy-control-group.fluid-input input[type=color],.wy-control-group.fluid-input input[type=date],.wy-control-group.fluid-input input[type=datetime-local],.wy-control-group.fluid-input input[type=datetime],.wy-control-group.fluid-input input[type=email],.wy-control-group.fluid-input input[type=month],.wy-control-group.fluid-input input[type=number],.wy-control-group.fluid-input input[type=password],.wy-control-group.fluid-input input[type=search],.wy-control-group.fluid-input input[type=tel],.wy-control-group.fluid-input input[type=text],.wy-control-group.fluid-input input[type=time],.wy-control-group.fluid-input input[type=url],.wy-control-group.fluid-input input[type=week]{width:100%}.wy-form-message-inline{padding-left:.3em;color:#666;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;*overflow:visible}input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}input[type=datetime-local]{padding:.34375em .625em}input[disabled]{cursor:default}input[type=checkbox],input[type=radio]{padding:0;margin-right:.3125em;*height:13px;*width:13px}input[type=checkbox],input[type=radio],input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}input[type=color]:focus,input[type=date]:focus,input[type=datetime-local]:focus,input[type=datetime]:focus,input[type=email]:focus,input[type=month]:focus,input[type=number]:focus,input[type=password]:focus,input[type=search]:focus,input[type=tel]:focus,input[type=text]:focus,input[type=time]:focus,input[type=url]:focus,input[type=week]:focus{outline:0;outline:thin dotted\9;border-color:#333}input.no-focus:focus{border-color:#ccc!important}input[type=checkbox]:focus,input[type=file]:focus,input[type=radio]:focus{outline:thin dotted #333;outline:1px auto #129fea}input[type=color][disabled],input[type=date][disabled],input[type=datetime-local][disabled],input[type=datetime][disabled],input[type=email][disabled],input[type=month][disabled],input[type=number][disabled],input[type=password][disabled],input[type=search][disabled],input[type=tel][disabled],input[type=text][disabled],input[type=time][disabled],input[type=url][disabled],input[type=week][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,select:focus:invalid,textarea:focus:invalid{color:#e74c3c;border:1px solid #e74c3c}input:focus:invalid:focus,select:focus:invalid:focus,textarea:focus:invalid:focus{border-color:#e74c3c}input[type=checkbox]:focus:invalid:focus,input[type=file]:focus:invalid:focus,input[type=radio]:focus:invalid:focus{outline-color:#e74c3c}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif}select,textarea{padding:.5em .625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}input[readonly],select[disabled],select[readonly],textarea[disabled],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type=checkbox][disabled],input[type=radio][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:#f3f6f6;border:1px solid #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{position:relative;display:block;height:24px;margin-top:12px;cursor:pointer}.wy-switch:before{left:0;top:0;width:36px;height:12px;background:#ccc}.wy-switch:after,.wy-switch:before{position:absolute;content:"";display:block;border-radius:4px;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch:after{width:18px;height:18px;background:#999;left:-3px;top:-3px}.wy-switch span{position:absolute;left:48px;display:block;font-size:12px;color:#ccc;line-height:1}.wy-switch.active:before{background:#1e8449}.wy-switch.active:after{left:24px;background:#27ae60}.wy-switch.disabled{cursor:not-allowed;opacity:.8}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#e74c3c}.wy-control-group.wy-control-group-error input[type=color],.wy-control-group.wy-control-group-error input[type=date],.wy-control-group.wy-control-group-error input[type=datetime-local],.wy-control-group.wy-control-group-error input[type=datetime],.wy-control-group.wy-control-group-error input[type=email],.wy-control-group.wy-control-group-error input[type=month],.wy-control-group.wy-control-group-error input[type=number],.wy-control-group.wy-control-group-error input[type=password],.wy-control-group.wy-control-group-error input[type=search],.wy-control-group.wy-control-group-error input[type=tel],.wy-control-group.wy-control-group-error input[type=text],.wy-control-group.wy-control-group-error input[type=time],.wy-control-group.wy-control-group-error input[type=url],.wy-control-group.wy-control-group-error input[type=week],.wy-control-group.wy-control-group-error textarea{border:1px solid #e74c3c}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:.5em .625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#27ae60}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#e74c3c}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#e67e22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#2980b9}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width:480px){.wy-form button[type=submit]{margin:.7em 0 0}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=text],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week],.wy-form label{margin-bottom:.3em;display:block}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0}.wy-form-message,.wy-form-message-inline,.wy-form .wy-help-inline{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width:768px){.tablet-hide{display:none}}@media screen and (max-width:480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.rst-content table.docutils,.rst-content table.field-list,.wy-table{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.rst-content table.docutils caption,.rst-content table.field-list caption,.wy-table caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.rst-content table.docutils td,.rst-content table.docutils th,.rst-content table.field-list td,.rst-content table.field-list th,.wy-table td,.wy-table th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.rst-content table.docutils td:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list td:first-child,.rst-content table.field-list th:first-child,.wy-table td:first-child,.wy-table th:first-child{border-left-width:0}.rst-content table.docutils thead,.rst-content table.field-list thead,.wy-table thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.rst-content table.docutils thead th,.rst-content table.field-list thead th,.wy-table thead th{font-weight:700;border-bottom:2px solid #e1e4e5}.rst-content table.docutils td,.rst-content table.field-list td,.wy-table td{background-color:transparent;vertical-align:middle}.rst-content table.docutils td p,.rst-content table.field-list td p,.wy-table td p{line-height:18px}.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child,.wy-table td p:last-child{margin-bottom:0}.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min,.wy-table .wy-table-cell-min{width:1%;padding-right:0}.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:grey;font-size:90%}.wy-table-tertiary{color:grey;font-size:80%}.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td,.wy-table-backed,.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td{background-color:#f3f6f6}.rst-content table.docutils,.wy-table-bordered-all{border:1px solid #e1e4e5}.rst-content table.docutils td,.wy-table-bordered-all td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.rst-content table.docutils tbody>tr:last-child td,.wy-table-bordered-all tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0!important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#2980b9;text-decoration:none;cursor:pointer}a:hover{color:#3091d1}a:visited{color:#9b59b6}html{height:100%}body,html{overflow-x:hidden}body{font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;font-weight:400;color:#404040;min-height:100%;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#e67e22!important}a.wy-text-warning:hover{color:#eb9950!important}.wy-text-info{color:#2980b9!important}a.wy-text-info:hover{color:#409ad5!important}.wy-text-success{color:#27ae60!important}a.wy-text-success:hover{color:#36d278!important}.wy-text-danger{color:#e74c3c!important}a.wy-text-danger:hover{color:#ed7669!important}.wy-text-neutral{color:#404040!important}a.wy-text-neutral:hover{color:#595959!important}.rst-content .toctree-wrapper>p.caption,h1,h2,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif}p{line-height:24px;font-size:16px;margin:0 0 24px}h1{font-size:175%}.rst-content .toctree-wrapper>p.caption,h2{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #e1e4e5;margin:24px 0;padding:0}.rst-content code,.rst-content tt,code{white-space:nowrap;max-width:100%;background:#fff;border:1px solid #e1e4e5;font-size:75%;padding:0 5px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#e74c3c;overflow-x:auto}.rst-content tt.code-large,code.code-large{font-size:90%}.rst-content .section ul,.rst-content .toctree-wrapper ul,.rst-content section ul,.wy-plain-list-disc,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.rst-content .section ul li,.rst-content .toctree-wrapper ul li,.rst-content section ul li,.wy-plain-list-disc li,article ul li{list-style:disc;margin-left:24px}.rst-content .section ul li p:last-child,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li p:last-child,.rst-content .toctree-wrapper ul li ul,.rst-content section ul li p:last-child,.rst-content section ul li ul,.wy-plain-list-disc li p:last-child,.wy-plain-list-disc li ul,article ul li p:last-child,article ul li ul{margin-bottom:0}.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,.rst-content section ul li li,.wy-plain-list-disc li li,article ul li li{list-style:circle}.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,.rst-content section ul li li li,.wy-plain-list-disc li li li,article ul li li li{list-style:square}.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,.rst-content section ul li ol li,.wy-plain-list-disc li ol li,article ul li ol li{list-style:decimal}.rst-content .section ol,.rst-content .section ol.arabic,.rst-content .toctree-wrapper ol,.rst-content .toctree-wrapper ol.arabic,.rst-content section ol,.rst-content section ol.arabic,.wy-plain-list-decimal,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.rst-content .section ol.arabic li,.rst-content .section ol li,.rst-content .toctree-wrapper ol.arabic li,.rst-content .toctree-wrapper ol li,.rst-content section ol.arabic li,.rst-content section ol li,.wy-plain-list-decimal li,article ol li{list-style:decimal;margin-left:24px}.rst-content .section ol.arabic li ul,.rst-content .section ol li p:last-child,.rst-content .section ol li ul,.rst-content .toctree-wrapper ol.arabic li ul,.rst-content .toctree-wrapper ol li p:last-child,.rst-content .toctree-wrapper ol li ul,.rst-content section ol.arabic li ul,.rst-content section ol li p:last-child,.rst-content section ol li ul,.wy-plain-list-decimal li p:last-child,.wy-plain-list-decimal li ul,article ol li p:last-child,article ol li ul{margin-bottom:0}.rst-content .section ol.arabic li ul li,.rst-content .section ol li ul li,.rst-content .toctree-wrapper ol.arabic li ul li,.rst-content .toctree-wrapper ol li ul li,.rst-content section ol.arabic li ul li,.rst-content section ol li ul li,.wy-plain-list-decimal li ul li,article ol li ul li{list-style:disc}.wy-breadcrumbs{*zoom:1}.wy-breadcrumbs:after,.wy-breadcrumbs:before{display:table;content:""}.wy-breadcrumbs:after{clear:both}.wy-breadcrumbs>li{display:inline-block;padding-top:5px}.wy-breadcrumbs>li.wy-breadcrumbs-aside{float:right}.rst-content .wy-breadcrumbs>li code,.rst-content .wy-breadcrumbs>li tt,.wy-breadcrumbs>li .rst-content tt,.wy-breadcrumbs>li code{all:inherit;color:inherit}.breadcrumb-item:before{content:"/";color:#bbb;font-size:13px;padding:0 6px 0 3px}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width:480px){.wy-breadcrumbs-extra,.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}html{font-size:16px}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:after,.wy-menu-horiz:before{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz li,.wy-menu-horiz ul{display:inline-block}.wy-menu-horiz li:hover{background:hsla(0,0%,100%,.1)}.wy-menu-horiz li.divide-left{border-left:1px solid #404040}.wy-menu-horiz li.divide-right{border-right:1px solid #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{color:#55a5d9;height:32px;line-height:32px;padding:0 1.618em;margin:12px 0 0;display:block;font-weight:700;text-transform:uppercase;font-size:85%;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:1px solid #404040}.wy-menu-vertical li.divide-bottom{border-bottom:1px solid #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:grey;border-right:1px solid #c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.rst-content .wy-menu-vertical li tt,.wy-menu-vertical li .rst-content tt,.wy-menu-vertical li code{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li button.toctree-expand{display:block;float:left;margin-left:-1.2em;line-height:18px;color:#4d4d4d;border:none;background:none;padding:0}.wy-menu-vertical li.current>a,.wy-menu-vertical li.on a{color:#404040;font-weight:700;position:relative;background:#fcfcfc;border:none;padding:.4045em 1.618em}.wy-menu-vertical li.current>a:hover,.wy-menu-vertical li.on a:hover{background:#fcfcfc}.wy-menu-vertical li.current>a:hover button.toctree-expand,.wy-menu-vertical li.on a:hover button.toctree-expand{color:grey}.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand{display:block;line-height:18px;color:#333}.wy-menu-vertical li.toctree-l1.current>a{border-bottom:1px solid #c9c9c9;border-top:1px solid #c9c9c9}.wy-menu-vertical .toctree-l1.current .toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .toctree-l11>ul{display:none}.wy-menu-vertical .toctree-l1.current .current.toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .current.toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .current.toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .current.toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .current.toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .current.toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .current.toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .current.toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .current.toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .current.toctree-l11>ul{display:block}.wy-menu-vertical li.toctree-l3,.wy-menu-vertical li.toctree-l4{font-size:.9em}.wy-menu-vertical li.toctree-l2 a,.wy-menu-vertical li.toctree-l3 a,.wy-menu-vertical li.toctree-l4 a,.wy-menu-vertical li.toctree-l5 a,.wy-menu-vertical li.toctree-l6 a,.wy-menu-vertical li.toctree-l7 a,.wy-menu-vertical li.toctree-l8 a,.wy-menu-vertical li.toctree-l9 a,.wy-menu-vertical li.toctree-l10 a{color:#404040}.wy-menu-vertical li.toctree-l2 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l3 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l4 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l5 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l6 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l7 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l8 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l9 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l10 a:hover button.toctree-expand{color:grey}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{display:block}.wy-menu-vertical li.toctree-l2.current>a{padding:.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{padding:.4045em 1.618em .4045em 4.045em}.wy-menu-vertical li.toctree-l3.current>a{padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{padding:.4045em 1.618em .4045em 5.663em}.wy-menu-vertical li.toctree-l4.current>a{padding:.4045em 5.663em}.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a{padding:.4045em 1.618em .4045em 7.281em}.wy-menu-vertical li.toctree-l5.current>a{padding:.4045em 7.281em}.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a{padding:.4045em 1.618em .4045em 8.899em}.wy-menu-vertical li.toctree-l6.current>a{padding:.4045em 8.899em}.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a{padding:.4045em 1.618em .4045em 10.517em}.wy-menu-vertical li.toctree-l7.current>a{padding:.4045em 10.517em}.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a{padding:.4045em 1.618em .4045em 12.135em}.wy-menu-vertical li.toctree-l8.current>a{padding:.4045em 12.135em}.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a{padding:.4045em 1.618em .4045em 13.753em}.wy-menu-vertical li.toctree-l9.current>a{padding:.4045em 13.753em}.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a{padding:.4045em 1.618em .4045em 15.371em}.wy-menu-vertical li.toctree-l10.current>a{padding:.4045em 15.371em}.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{padding:.4045em 1.618em .4045em 16.989em}.wy-menu-vertical li.toctree-l2.current>a,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{background:#c9c9c9}.wy-menu-vertical li.toctree-l2 button.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3.current>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{background:#bdbdbd}.wy-menu-vertical li.toctree-l3 button.toctree-expand{color:#969696}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical li ul li a{margin-bottom:0;color:#d9d9d9;font-weight:400}.wy-menu-vertical a{line-height:18px;padding:.4045em 1.618em;display:block;position:relative;font-size:90%;color:#d9d9d9}.wy-menu-vertical a:hover{background-color:#4e4a4a;cursor:pointer}.wy-menu-vertical a:hover button.toctree-expand{color:#d9d9d9}.wy-menu-vertical a:active{background-color:#2980b9;cursor:pointer;color:#fff}.wy-menu-vertical a:active button.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:.809em;margin-bottom:.809em;z-index:200;background-color:#2980b9;text-align:center;color:#fcfcfc}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#2472a4}.wy-side-nav-search img{display:block;margin:auto auto .809em;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a{color:#fcfcfc;font-size:100%;font-weight:700;display:inline-block;padding:4px 6px;margin-bottom:.809em;max-width:100%}.wy-side-nav-search .wy-dropdown>a:hover,.wy-side-nav-search .wy-dropdown>aactive,.wy-side-nav-search .wy-dropdown>afocus,.wy-side-nav-search>a:hover,.wy-side-nav-search>aactive,.wy-side-nav-search>afocus{background:hsla(0,0%,100%,.1)}.wy-side-nav-search .wy-dropdown>a img.logo,.wy-side-nav-search>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search .wy-dropdown>a.icon,.wy-side-nav-search>a.icon{display:block}.wy-side-nav-search .wy-dropdown>a.icon img.logo,.wy-side-nav-search>a.icon img.logo{margin-top:.85em}.wy-side-nav-search>div.switch-menus{position:relative;display:block;margin-top:-.4045em;margin-bottom:.809em;font-weight:400;color:hsla(0,0%,100%,.3)}.wy-side-nav-search>div.switch-menus>div.language-switch,.wy-side-nav-search>div.switch-menus>div.version-switch{display:inline-block;padding:.2em}.wy-side-nav-search>div.switch-menus>div.language-switch select,.wy-side-nav-search>div.switch-menus>div.version-switch select{display:inline-block;margin-right:-2rem;padding-right:2rem;max-width:240px;text-align-last:center;background:none;border:none;border-radius:0;box-shadow:none;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;font-size:1em;font-weight:400;color:hsla(0,0%,100%,.3);cursor:pointer;appearance:none;-webkit-appearance:none;-moz-appearance:none}.wy-side-nav-search>div.switch-menus>div.language-switch select:active,.wy-side-nav-search>div.switch-menus>div.language-switch select:focus,.wy-side-nav-search>div.switch-menus>div.language-switch select:hover,.wy-side-nav-search>div.switch-menus>div.version-switch select:active,.wy-side-nav-search>div.switch-menus>div.version-switch select:focus,.wy-side-nav-search>div.switch-menus>div.version-switch select:hover{background:hsla(0,0%,100%,.1);color:hsla(0,0%,100%,.5)}.wy-side-nav-search>div.switch-menus>div.language-switch select option,.wy-side-nav-search>div.switch-menus>div.version-switch select option{color:#000}.wy-side-nav-search>div.switch-menus>div.language-switch:has(>select):after,.wy-side-nav-search>div.switch-menus>div.version-switch:has(>select):after{display:inline-block;width:1.5em;height:100%;padding:.1em;content:"\f0d7";font-size:1em;line-height:1.2em;font-family:FontAwesome;text-align:center;pointer-events:none;box-sizing:border-box}.wy-nav .wy-menu-vertical header{color:#2980b9}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#2980b9;color:#fff}[data-menu-wrap]{-webkit-transition:all .2s ease-in;-moz-transition:all .2s ease-in;transition:all .2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:#fcfcfc}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;color:#9b9b9b;background:#343131;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#2980b9;color:#fff;padding:.4045em .809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:after,.wy-nav-top:before{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:700}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer;padding-top:inherit}.wy-nav-content-wrap{margin-left:300px;background:#fcfcfc;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:grey}footer p{margin-bottom:12px}.rst-content footer span.commit tt,footer span.commit .rst-content tt,footer span.commit code{padding:0;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:1em;background:none;border:none;color:grey}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:after,.rst-footer-buttons:before{width:100%;display:table;content:""}.rst-footer-buttons:after{clear:both}.rst-breadcrumbs-buttons{margin-top:12px;*zoom:1}.rst-breadcrumbs-buttons:after,.rst-breadcrumbs-buttons:before{display:table;content:""}.rst-breadcrumbs-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:1px solid #e1e4e5;padding-bottom:24px}#search-results .search li:first-child{border-top:1px solid #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:grey;font-size:90%}.genindextable li>ul{margin-left:24px}@media screen and (max-width:768px){.wy-body-for-nav{background:#fcfcfc}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-menu.wy-menu-vertical,.wy-side-nav-search,.wy-side-scroll{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width:1100px){.wy-nav-content-wrap{background:rgba(0,0,0,.05)}.wy-nav-content{margin:0;background:#fcfcfc}}@media print{.rst-versions,.wy-nav-side,footer{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60;*zoom:1}.rst-versions .rst-current-version:after,.rst-versions .rst-current-version:before{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-content .code-block-caption .rst-versions .rst-current-version .headerlink,.rst-content .eqno .rst-versions .rst-current-version .headerlink,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-content p .rst-versions .rst-current-version .headerlink,.rst-content table>caption .rst-versions .rst-current-version .headerlink,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .icon,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-versions .rst-current-version .rst-content .code-block-caption .headerlink,.rst-versions .rst-current-version .rst-content .eqno .headerlink,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-versions .rst-current-version .rst-content p .headerlink,.rst-versions .rst-current-version .rst-content table>caption .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-versions .rst-current-version .wy-menu-vertical li button.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version button.toctree-expand{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions .rst-other-versions .rtd-current-item{font-weight:700}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}#flyout-search-form{padding:6px}.rst-content .toctree-wrapper>p.caption,.rst-content h1,.rst-content h2,.rst-content h3,.rst-content h4,.rst-content h5,.rst-content h6{margin-bottom:24px}.rst-content img{max-width:100%;height:auto}.rst-content div.figure,.rst-content figure{margin-bottom:24px}.rst-content div.figure .caption-text,.rst-content figure .caption-text{font-style:italic}.rst-content div.figure p:last-child.caption,.rst-content figure p:last-child.caption{margin-bottom:0}.rst-content div.figure.align-center,.rst-content figure.align-center{text-align:center}.rst-content .section>a>img,.rst-content .section>img,.rst-content section>a>img,.rst-content section>img{margin-bottom:24px}.rst-content abbr[title]{text-decoration:none}.rst-content.style-external-links a.reference.external:after{font-family:FontAwesome;content:"\f08e";color:#b3b3b3;vertical-align:super;font-size:60%;margin:0 .2em}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content pre.literal-block{white-space:pre;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;display:block;overflow:auto}.rst-content div[class^=highlight],.rst-content pre.literal-block{border:1px solid #e1e4e5;overflow-x:auto;margin:1px 0 24px}.rst-content div[class^=highlight] div[class^=highlight],.rst-content pre.literal-block div[class^=highlight]{padding:0;border:none;margin:0}.rst-content div[class^=highlight] td.code{width:100%}.rst-content .linenodiv pre{border-right:1px solid #e6e9ea;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;user-select:none;pointer-events:none}.rst-content div[class^=highlight] pre{white-space:pre;margin:0;padding:12px;display:block;overflow:auto}.rst-content div[class^=highlight] pre .hll{display:block;margin:0 -12px;padding:0 12px}.rst-content .linenodiv pre,.rst-content div[class^=highlight] pre,.rst-content pre.literal-block{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:12px;line-height:1.4}.rst-content div.highlight .gp,.rst-content div.highlight span.linenos{user-select:none;pointer-events:none}.rst-content div.highlight span.linenos{display:inline-block;padding-left:0;padding-right:12px;margin-right:12px;border-right:1px solid #e6e9ea}.rst-content .code-block-caption{font-style:italic;font-size:85%;line-height:1;padding:1em 0;text-align:center}@media print{.rst-content .codeblock,.rst-content div[class^=highlight],.rst-content div[class^=highlight] pre{white-space:pre-wrap}}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning{clear:both}.rst-content .admonition-todo .last,.rst-content .admonition-todo>:last-child,.rst-content .admonition .last,.rst-content .admonition>:last-child,.rst-content .attention .last,.rst-content .attention>:last-child,.rst-content .caution .last,.rst-content .caution>:last-child,.rst-content .danger .last,.rst-content .danger>:last-child,.rst-content .error .last,.rst-content .error>:last-child,.rst-content .hint .last,.rst-content .hint>:last-child,.rst-content .important .last,.rst-content .important>:last-child,.rst-content .note .last,.rst-content .note>:last-child,.rst-content .seealso .last,.rst-content .seealso>:last-child,.rst-content .tip .last,.rst-content .tip>:last-child,.rst-content .warning .last,.rst-content .warning>:last-child{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent!important;border-color:rgba(0,0,0,.1)!important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha>li,.rst-content .toctree-wrapper ol.loweralpha,.rst-content .toctree-wrapper ol.loweralpha>li,.rst-content section ol.loweralpha,.rst-content section ol.loweralpha>li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha>li,.rst-content .toctree-wrapper ol.upperalpha,.rst-content .toctree-wrapper ol.upperalpha>li,.rst-content section ol.upperalpha,.rst-content section ol.upperalpha>li{list-style:upper-alpha}.rst-content .section ol li>*,.rst-content .section ul li>*,.rst-content .toctree-wrapper ol li>*,.rst-content .toctree-wrapper ul li>*,.rst-content section ol li>*,.rst-content section ul li>*{margin-top:12px;margin-bottom:12px}.rst-content .section ol li>:first-child,.rst-content .section ul li>:first-child,.rst-content .toctree-wrapper ol li>:first-child,.rst-content .toctree-wrapper ul li>:first-child,.rst-content section ol li>:first-child,.rst-content section ul li>:first-child{margin-top:0}.rst-content .section ol li>p,.rst-content .section ol li>p:last-child,.rst-content .section ul li>p,.rst-content .section ul li>p:last-child,.rst-content .toctree-wrapper ol li>p,.rst-content .toctree-wrapper ol li>p:last-child,.rst-content .toctree-wrapper ul li>p,.rst-content .toctree-wrapper ul li>p:last-child,.rst-content section ol li>p,.rst-content section ol li>p:last-child,.rst-content section ul li>p,.rst-content section ul li>p:last-child{margin-bottom:12px}.rst-content .section ol li>p:only-child,.rst-content .section ol li>p:only-child:last-child,.rst-content .section ul li>p:only-child,.rst-content .section ul li>p:only-child:last-child,.rst-content .toctree-wrapper ol li>p:only-child,.rst-content .toctree-wrapper ol li>p:only-child:last-child,.rst-content .toctree-wrapper ul li>p:only-child,.rst-content .toctree-wrapper ul li>p:only-child:last-child,.rst-content section ol li>p:only-child,.rst-content section ol li>p:only-child:last-child,.rst-content section ul li>p:only-child,.rst-content section ul li>p:only-child:last-child{margin-bottom:0}.rst-content .section ol li>ol,.rst-content .section ol li>ul,.rst-content .section ul li>ol,.rst-content .section ul li>ul,.rst-content .toctree-wrapper ol li>ol,.rst-content .toctree-wrapper ol li>ul,.rst-content .toctree-wrapper ul li>ol,.rst-content .toctree-wrapper ul li>ul,.rst-content section ol li>ol,.rst-content section ol li>ul,.rst-content section ul li>ol,.rst-content section ul li>ul{margin-bottom:12px}.rst-content .section ol.simple li>*,.rst-content .section ol.simple li ol,.rst-content .section ol.simple li ul,.rst-content .section ul.simple li>*,.rst-content .section ul.simple li ol,.rst-content .section ul.simple li ul,.rst-content .toctree-wrapper ol.simple li>*,.rst-content .toctree-wrapper ol.simple li ol,.rst-content .toctree-wrapper ol.simple li ul,.rst-content .toctree-wrapper ul.simple li>*,.rst-content .toctree-wrapper ul.simple li ol,.rst-content .toctree-wrapper ul.simple li ul,.rst-content section ol.simple li>*,.rst-content section ol.simple li ol,.rst-content section ol.simple li ul,.rst-content section ul.simple li>*,.rst-content section ul.simple li ol,.rst-content section ul.simple li ul{margin-top:0;margin-bottom:0}.rst-content .line-block{margin-left:0;margin-bottom:24px;line-height:24px}.rst-content .line-block .line-block{margin-left:24px;margin-bottom:0}.rst-content .topic-title{font-weight:700;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0 0 24px 24px}.rst-content .align-left{float:left;margin:0 24px 24px 0}.rst-content .align-center{margin:auto}.rst-content .align-center:not(table){display:block}.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink{opacity:0;font-size:14px;font-family:FontAwesome;margin-left:.5em}.rst-content .code-block-caption .headerlink:focus,.rst-content .code-block-caption:hover .headerlink,.rst-content .eqno .headerlink:focus,.rst-content .eqno:hover .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink:focus,.rst-content .toctree-wrapper>p.caption:hover .headerlink,.rst-content dl dt .headerlink:focus,.rst-content dl dt:hover .headerlink,.rst-content h1 .headerlink:focus,.rst-content h1:hover .headerlink,.rst-content h2 .headerlink:focus,.rst-content h2:hover .headerlink,.rst-content h3 .headerlink:focus,.rst-content h3:hover .headerlink,.rst-content h4 .headerlink:focus,.rst-content h4:hover .headerlink,.rst-content h5 .headerlink:focus,.rst-content h5:hover .headerlink,.rst-content h6 .headerlink:focus,.rst-content h6:hover .headerlink,.rst-content p.caption .headerlink:focus,.rst-content p.caption:hover .headerlink,.rst-content p .headerlink:focus,.rst-content p:hover .headerlink,.rst-content table>caption .headerlink:focus,.rst-content table>caption:hover .headerlink{opacity:1}.rst-content p a{overflow-wrap:anywhere}.rst-content .wy-table td p,.rst-content .wy-table td ul,.rst-content .wy-table th p,.rst-content .wy-table th ul,.rst-content table.docutils td p,.rst-content table.docutils td ul,.rst-content table.docutils th p,.rst-content table.docutils th ul,.rst-content table.field-list td p,.rst-content table.field-list td ul,.rst-content table.field-list th p,.rst-content table.field-list th ul{font-size:inherit}.rst-content .btn:focus{outline:2px solid}.rst-content table>caption .headerlink:after{font-size:12px}.rst-content .centered{text-align:center}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:1px solid #e1e4e5}.rst-content .sidebar dl,.rst-content .sidebar p,.rst-content .sidebar ul{font-size:90%}.rst-content .sidebar .last,.rst-content .sidebar>:last-child{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif;font-weight:700;background:#e1e4e5;padding:6px 12px;margin:-24px -24px 24px;font-size:100%}.rst-content .highlighted{background:#f1c40f;box-shadow:0 0 0 2px #f1c40f;display:inline;font-weight:700}.rst-content .citation-reference,.rst-content .footnote-reference{vertical-align:baseline;position:relative;top:-.4em;line-height:0;font-size:90%}.rst-content .citation-reference>span.fn-bracket,.rst-content .footnote-reference>span.fn-bracket{display:none}.rst-content .hlist{width:100%}.rst-content dl dt span.classifier:before{content:" : "}.rst-content dl dt span.classifier-delimiter{display:none!important}html.writer-html4 .rst-content table.docutils.citation,html.writer-html4 .rst-content table.docutils.footnote{background:none;border:none}html.writer-html4 .rst-content table.docutils.citation td,html.writer-html4 .rst-content table.docutils.citation tr,html.writer-html4 .rst-content table.docutils.footnote td,html.writer-html4 .rst-content table.docutils.footnote tr{border:none;background-color:transparent!important;white-space:normal}html.writer-html4 .rst-content table.docutils.citation td.label,html.writer-html4 .rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{display:grid;grid-template-columns:auto minmax(80%,95%)}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{display:inline-grid;grid-template-columns:max-content auto}html.writer-html5 .rst-content aside.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content div.citation{display:grid;grid-template-columns:auto auto minmax(.65rem,auto) minmax(40%,95%)}html.writer-html5 .rst-content aside.citation>span.label,html.writer-html5 .rst-content aside.footnote>span.label,html.writer-html5 .rst-content div.citation>span.label{grid-column-start:1;grid-column-end:2}html.writer-html5 .rst-content aside.citation>span.backrefs,html.writer-html5 .rst-content aside.footnote>span.backrefs,html.writer-html5 .rst-content div.citation>span.backrefs{grid-column-start:2;grid-column-end:3;grid-row-start:1;grid-row-end:3}html.writer-html5 .rst-content aside.citation>p,html.writer-html5 .rst-content aside.footnote>p,html.writer-html5 .rst-content div.citation>p{grid-column-start:4;grid-column-end:5}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{margin-bottom:24px}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{padding-left:1rem}html.writer-html5 .rst-content dl.citation>dd,html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dd,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dd,html.writer-html5 .rst-content dl.footnote>dt{margin-bottom:0}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.footnote{font-size:.9rem}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.footnote>dt{margin:0 .5rem .5rem 0;line-height:1.2rem;word-break:break-all;font-weight:400}html.writer-html5 .rst-content dl.citation>dt>span.brackets:before,html.writer-html5 .rst-content dl.footnote>dt>span.brackets:before{content:"["}html.writer-html5 .rst-content dl.citation>dt>span.brackets:after,html.writer-html5 .rst-content dl.footnote>dt>span.brackets:after{content:"]"}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref{text-align:left;font-style:italic;margin-left:.65rem;word-break:break-word;word-spacing:-.1rem;max-width:5rem}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref>a,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref>a{word-break:keep-all}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref>a:not(:first-child):before,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref>a:not(:first-child):before{content:" "}html.writer-html5 .rst-content dl.citation>dd,html.writer-html5 .rst-content dl.footnote>dd{margin:0 0 .5rem;line-height:1.2rem}html.writer-html5 .rst-content dl.citation>dd p,html.writer-html5 .rst-content dl.footnote>dd p{font-size:.9rem}html.writer-html5 .rst-content aside.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content div.citation{padding-left:1rem;padding-right:1rem;font-size:.9rem;line-height:1.2rem}html.writer-html5 .rst-content aside.citation p,html.writer-html5 .rst-content aside.footnote p,html.writer-html5 .rst-content div.citation p{font-size:.9rem;line-height:1.2rem;margin-bottom:12px}html.writer-html5 .rst-content aside.citation span.backrefs,html.writer-html5 .rst-content aside.footnote span.backrefs,html.writer-html5 .rst-content div.citation span.backrefs{text-align:left;font-style:italic;margin-left:.65rem;word-break:break-word;word-spacing:-.1rem;max-width:5rem}html.writer-html5 .rst-content aside.citation span.backrefs>a,html.writer-html5 .rst-content aside.footnote span.backrefs>a,html.writer-html5 .rst-content div.citation span.backrefs>a{word-break:keep-all}html.writer-html5 .rst-content aside.citation span.backrefs>a:not(:first-child):before,html.writer-html5 .rst-content aside.footnote span.backrefs>a:not(:first-child):before,html.writer-html5 .rst-content div.citation span.backrefs>a:not(:first-child):before{content:" "}html.writer-html5 .rst-content aside.citation span.label,html.writer-html5 .rst-content aside.footnote span.label,html.writer-html5 .rst-content div.citation span.label{line-height:1.2rem}html.writer-html5 .rst-content aside.citation-list,html.writer-html5 .rst-content aside.footnote-list,html.writer-html5 .rst-content div.citation-list{margin-bottom:24px}html.writer-html5 .rst-content dl.option-list kbd{font-size:.9rem}.rst-content table.docutils.footnote,html.writer-html4 .rst-content table.docutils.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content aside.footnote-list aside.footnote,html.writer-html5 .rst-content div.citation-list>div.citation,html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.footnote{color:grey}.rst-content table.docutils.footnote code,.rst-content table.docutils.footnote tt,html.writer-html4 .rst-content table.docutils.citation code,html.writer-html4 .rst-content table.docutils.citation tt,html.writer-html5 .rst-content aside.footnote-list aside.footnote code,html.writer-html5 .rst-content aside.footnote-list aside.footnote tt,html.writer-html5 .rst-content aside.footnote code,html.writer-html5 .rst-content aside.footnote tt,html.writer-html5 .rst-content div.citation-list>div.citation code,html.writer-html5 .rst-content div.citation-list>div.citation tt,html.writer-html5 .rst-content dl.citation code,html.writer-html5 .rst-content dl.citation tt,html.writer-html5 .rst-content dl.footnote code,html.writer-html5 .rst-content dl.footnote tt{color:#555}.rst-content .wy-table-responsive.citation,.rst-content .wy-table-responsive.footnote{margin-bottom:0}.rst-content .wy-table-responsive.citation+:not(.citation),.rst-content .wy-table-responsive.footnote+:not(.footnote){margin-top:24px}.rst-content .wy-table-responsive.citation:last-child,.rst-content .wy-table-responsive.footnote:last-child{margin-bottom:24px}.rst-content table.docutils th{border-color:#e1e4e5}html.writer-html5 .rst-content table.docutils th{border:1px solid #e1e4e5}html.writer-html5 .rst-content table.docutils td>p,html.writer-html5 .rst-content table.docutils th>p{line-height:1rem;margin-bottom:0;font-size:.9rem}.rst-content table.docutils td .last,.rst-content table.docutils td .last>:last-child{margin-bottom:0}.rst-content table.field-list,.rst-content table.field-list td{border:none}.rst-content table.field-list td p{line-height:inherit}.rst-content table.field-list td>strong{display:inline-block}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left}.rst-content code,.rst-content tt{color:#000;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;padding:2px 5px}.rst-content code big,.rst-content code em,.rst-content tt big,.rst-content tt em{font-size:100%!important;line-height:normal}.rst-content code.literal,.rst-content tt.literal{color:#e74c3c;white-space:normal}.rst-content code.xref,.rst-content tt.xref,a .rst-content code,a .rst-content tt{font-weight:700;color:#404040;overflow-wrap:normal}.rst-content kbd,.rst-content pre,.rst-content samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace}.rst-content a code,.rst-content a tt{color:#2980b9}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:700;margin-bottom:12px}.rst-content dl ol,.rst-content dl p,.rst-content dl table,.rst-content dl ul{margin-bottom:12px}.rst-content dl dd{margin:0 0 12px 24px;line-height:24px}.rst-content dl dd>ol:last-child,.rst-content dl dd>p:last-child,.rst-content dl dd>table:last-child,.rst-content dl dd>ul:last-child{margin-bottom:0}html.writer-html4 .rst-content dl:not(.docutils),html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple){margin-bottom:24px}html.writer-html4 .rst-content dl:not(.docutils)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt{display:table;margin:6px 0;font-size:90%;line-height:normal;background:#e7f2fa;color:#2980b9;border-top:3px solid #6ab0de;padding:6px;position:relative}html.writer-html4 .rst-content dl:not(.docutils)>dt:before,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt:before{color:#6ab0de}html.writer-html4 .rst-content dl:not(.docutils)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt{margin-bottom:6px;border:none;border-left:3px solid #ccc;background:#f0f0f0;color:#555}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils)>dt:first-child,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt:first-child{margin-top:0}html.writer-html4 .rst-content dl:not(.docutils) code.descclassname,html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descclassname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descname{background-color:transparent;border:none;padding:0;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descname{font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .optional,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .property,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .property{display:inline-block;padding-right:8px;max-width:100%}html.writer-html4 .rst-content dl:not(.docutils) .k,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .k{font-style:italic}html.writer-html4 .rst-content dl:not(.docutils) .descclassname,html.writer-html4 .rst-content dl:not(.docutils) .descname,html.writer-html4 .rst-content dl:not(.docutils) .sig-name,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .sig-name{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#000}.rst-content .viewcode-back,.rst-content .viewcode-link{display:inline-block;color:#27ae60;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:700}.rst-content code.download,.rst-content tt.download{background:inherit;padding:inherit;font-weight:400;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content code.download span:first-child,.rst-content tt.download span:first-child{-webkit-font-smoothing:subpixel-antialiased}.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{margin-right:4px}.rst-content .guilabel,.rst-content .menuselection{font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}.rst-content .guilabel,.rst-content .menuselection{border:1px solid #7fbbe3;background:#e7f2fa}.rst-content :not(dl.option-list)>:not(dt):not(kbd):not(.kbd)>.kbd,.rst-content :not(dl.option-list)>:not(dt):not(kbd):not(.kbd)>kbd{color:inherit;font-size:80%;background-color:#fff;border:1px solid #a6a6a6;border-radius:4px;box-shadow:0 2px grey;padding:2.4px 6px;margin:auto 0}.rst-content .versionmodified{font-style:italic}@media screen and (max-width:480px){.rst-content .sidebar{width:100%}}span[id*=MathJax-Span]{color:#404040}.math{text-align:center}@font-face{font-family:Lato;src:url(fonts/lato-normal.woff2?bd03a2cc277bbbc338d464e679fe9942) format("woff2"),url(fonts/lato-normal.woff?27bd77b9162d388cb8d4c4217c7c5e2a) format("woff");font-weight:400;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold.woff2?cccb897485813c7c256901dbca54ecf2) format("woff2"),url(fonts/lato-bold.woff?d878b6c29b10beca227e9eef4246111b) format("woff");font-weight:700;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold-italic.woff2?0b6bb6725576b072c5d0b02ecdd1900d) format("woff2"),url(fonts/lato-bold-italic.woff?9c7e4e9eb485b4a121c760e61bc3707c) format("woff");font-weight:700;font-style:italic;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-normal-italic.woff2?4eb103b4d12be57cb1d040ed5e162e9d) format("woff2"),url(fonts/lato-normal-italic.woff?f28f2d6482446544ef1ea1ccc6dd5892) format("woff");font-weight:400;font-style:italic;font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:400;src:url(fonts/Roboto-Slab-Regular.woff2?7abf5b8d04d26a2cafea937019bca958) format("woff2"),url(fonts/Roboto-Slab-Regular.woff?c1be9284088d487c5e3ff0a10a92e58c) format("woff");font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:700;src:url(fonts/Roboto-Slab-Bold.woff2?9984f4a9bda09be08e83f2506954adbe) format("woff2"),url(fonts/Roboto-Slab-Bold.woff?bed5564a116b05148e3b3bea6fb1162a) format("woff");font-display:block} \ No newline at end of file diff --git a/pyk/_static/doctools.js b/pyk/_static/doctools.js new file mode 100644 index 00000000000..4d67807d17d --- /dev/null +++ b/pyk/_static/doctools.js @@ -0,0 +1,156 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Base JavaScript utilities for all Sphinx HTML documentation. + * + * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/pyk/_static/documentation_options.js b/pyk/_static/documentation_options.js new file mode 100644 index 00000000000..61b20d0317f --- /dev/null +++ b/pyk/_static/documentation_options.js @@ -0,0 +1,13 @@ +const DOCUMENTATION_OPTIONS = { + VERSION: '7.1.191', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/pyk/_static/file.png b/pyk/_static/file.png new file mode 100644 index 00000000000..a858a410e4f Binary files /dev/null and b/pyk/_static/file.png differ diff --git a/pyk/_static/fonts/Lato/lato-bold.eot b/pyk/_static/fonts/Lato/lato-bold.eot new file mode 100644 index 00000000000..3361183a419 Binary files /dev/null and b/pyk/_static/fonts/Lato/lato-bold.eot differ diff --git a/pyk/_static/fonts/Lato/lato-bold.ttf b/pyk/_static/fonts/Lato/lato-bold.ttf new file mode 100644 index 00000000000..29f691d5ed0 Binary files /dev/null and b/pyk/_static/fonts/Lato/lato-bold.ttf differ diff --git a/pyk/_static/fonts/Lato/lato-bold.woff b/pyk/_static/fonts/Lato/lato-bold.woff new file mode 100644 index 00000000000..c6dff51f063 Binary files /dev/null and b/pyk/_static/fonts/Lato/lato-bold.woff differ diff --git a/pyk/_static/fonts/Lato/lato-bold.woff2 b/pyk/_static/fonts/Lato/lato-bold.woff2 new file mode 100644 index 00000000000..bb195043cfc Binary files /dev/null and b/pyk/_static/fonts/Lato/lato-bold.woff2 differ diff --git a/pyk/_static/fonts/Lato/lato-bolditalic.eot b/pyk/_static/fonts/Lato/lato-bolditalic.eot new file mode 100644 index 00000000000..3d4154936b4 Binary files /dev/null and b/pyk/_static/fonts/Lato/lato-bolditalic.eot differ diff --git a/pyk/_static/fonts/Lato/lato-bolditalic.ttf b/pyk/_static/fonts/Lato/lato-bolditalic.ttf new file mode 100644 index 00000000000..f402040b3e5 Binary files /dev/null and b/pyk/_static/fonts/Lato/lato-bolditalic.ttf differ diff --git a/pyk/_static/fonts/Lato/lato-bolditalic.woff b/pyk/_static/fonts/Lato/lato-bolditalic.woff new file mode 100644 index 00000000000..88ad05b9ff4 Binary files /dev/null and b/pyk/_static/fonts/Lato/lato-bolditalic.woff differ diff --git a/pyk/_static/fonts/Lato/lato-bolditalic.woff2 b/pyk/_static/fonts/Lato/lato-bolditalic.woff2 new file mode 100644 index 00000000000..c4e3d804b57 Binary files /dev/null and b/pyk/_static/fonts/Lato/lato-bolditalic.woff2 differ diff --git a/pyk/_static/fonts/Lato/lato-italic.eot b/pyk/_static/fonts/Lato/lato-italic.eot new file mode 100644 index 00000000000..3f826421a1d Binary files /dev/null and b/pyk/_static/fonts/Lato/lato-italic.eot differ diff --git a/pyk/_static/fonts/Lato/lato-italic.ttf b/pyk/_static/fonts/Lato/lato-italic.ttf new file mode 100644 index 00000000000..b4bfc9b24aa Binary files /dev/null and b/pyk/_static/fonts/Lato/lato-italic.ttf differ diff --git a/pyk/_static/fonts/Lato/lato-italic.woff b/pyk/_static/fonts/Lato/lato-italic.woff new file mode 100644 index 00000000000..76114bc0336 Binary files /dev/null and b/pyk/_static/fonts/Lato/lato-italic.woff differ diff --git a/pyk/_static/fonts/Lato/lato-italic.woff2 b/pyk/_static/fonts/Lato/lato-italic.woff2 new file mode 100644 index 00000000000..3404f37e2e3 Binary files /dev/null and b/pyk/_static/fonts/Lato/lato-italic.woff2 differ diff --git a/pyk/_static/fonts/Lato/lato-regular.eot b/pyk/_static/fonts/Lato/lato-regular.eot new file mode 100644 index 00000000000..11e3f2a5f0f Binary files /dev/null and b/pyk/_static/fonts/Lato/lato-regular.eot differ diff --git a/pyk/_static/fonts/Lato/lato-regular.ttf b/pyk/_static/fonts/Lato/lato-regular.ttf new file mode 100644 index 00000000000..74decd9ebb8 Binary files /dev/null and b/pyk/_static/fonts/Lato/lato-regular.ttf differ diff --git a/pyk/_static/fonts/Lato/lato-regular.woff b/pyk/_static/fonts/Lato/lato-regular.woff new file mode 100644 index 00000000000..ae1307ff5f4 Binary files /dev/null and b/pyk/_static/fonts/Lato/lato-regular.woff differ diff --git a/pyk/_static/fonts/Lato/lato-regular.woff2 b/pyk/_static/fonts/Lato/lato-regular.woff2 new file mode 100644 index 00000000000..3bf9843328a Binary files /dev/null and b/pyk/_static/fonts/Lato/lato-regular.woff2 differ diff --git a/pyk/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot b/pyk/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot new file mode 100644 index 00000000000..79dc8efed34 Binary files /dev/null and b/pyk/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot differ diff --git a/pyk/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf b/pyk/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf new file mode 100644 index 00000000000..df5d1df2730 Binary files /dev/null and b/pyk/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf differ diff --git a/pyk/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff b/pyk/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff new file mode 100644 index 00000000000..6cb60000181 Binary files /dev/null and b/pyk/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff differ diff --git a/pyk/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 b/pyk/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 new file mode 100644 index 00000000000..7059e23142a Binary files /dev/null and b/pyk/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 differ diff --git a/pyk/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot b/pyk/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot new file mode 100644 index 00000000000..2f7ca78a1eb Binary files /dev/null and b/pyk/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot differ diff --git a/pyk/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf b/pyk/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf new file mode 100644 index 00000000000..eb52a790736 Binary files /dev/null and b/pyk/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf differ diff --git a/pyk/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff b/pyk/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff new file mode 100644 index 00000000000..f815f63f99d Binary files /dev/null and b/pyk/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff differ diff --git a/pyk/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 b/pyk/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 new file mode 100644 index 00000000000..f2c76e5bda1 Binary files /dev/null and b/pyk/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 differ diff --git a/pyk/_static/jquery.js b/pyk/_static/jquery.js new file mode 100644 index 00000000000..c4c6022f298 --- /dev/null +++ b/pyk/_static/jquery.js @@ -0,0 +1,2 @@ +/*! jQuery v3.6.0 | (c) OpenJS Foundation and other contributors | jquery.org/license */ +!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],r=Object.getPrototypeOf,s=t.slice,g=t.flat?function(e){return t.flat.call(e)}:function(e){return t.concat.apply([],e)},u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType&&"function"!=typeof e.item},x=function(e){return null!=e&&e===e.window},E=C.document,c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.6.0",S=function(e,t){return new S.fn.init(e,t)};function p(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp(F),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+F),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\[\\da-fA-F]{1,6}"+M+"?|\\\\([^\\r\\n\\f])","g"),ne=function(e,t){var n="0x"+e.slice(1)-65536;return t||(n<0?String.fromCharCode(n+65536):String.fromCharCode(n>>10|55296,1023&n|56320))},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(p.childNodes),p.childNodes),t[p.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&(T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!N[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&(U.test(t)||z.test(t))){(f=ee.test(t)&&ye(e.parentNode)||e)===e&&d.scope||((s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=S)),o=(l=h(t)).length;while(o--)l[o]=(s?"#"+s:":scope")+" "+xe(l[o]);c=l.join(",")}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){N(t,!0)}finally{s===S&&e.removeAttribute("id")}}}return g(t.replace($,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[S]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e&&e.namespaceURI,n=e&&(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:p;return r!=C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),p!=C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.scope=ce(function(e){return a.appendChild(e).appendChild(C.createElement("div")),"undefined"!=typeof e.querySelectorAll&&!e.querySelectorAll(":scope fieldset div").length}),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=S,!C.getElementsByName||!C.getElementsByName(S).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){var t;a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+S+"-]").length||v.push("~="),(t=C.createElement("input")).setAttribute("name",""),e.appendChild(t),e.querySelectorAll("[name='']").length||v.push("\\["+M+"*name"+M+"*="+M+"*(?:''|\"\")"),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+S+"+*").length||v.push(".#.+[+~]"),e.querySelectorAll("\\\f"),v.push("[\\r\\n\\f]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",F)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},j=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e==C||e.ownerDocument==p&&y(p,e)?-1:t==C||t.ownerDocument==p&&y(p,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e==C?-1:t==C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]==p?-1:s[r]==p?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if(T(e),d.matchesSelector&&E&&!N[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){N(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=m[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&m(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?S.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?S.grep(e,function(e){return e===n!==r}):"string"!=typeof n?S.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(S.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||D,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:q.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof S?t[0]:t,S.merge(this,S.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),N.test(r[1])&&S.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(S):S.makeArray(e,this)}).prototype=S.fn,D=S(E);var L=/^(?:parents|prev(?:Until|All))/,H={children:!0,contents:!0,next:!0,prev:!0};function O(e,t){while((e=e[t])&&1!==e.nodeType);return e}S.fn.extend({has:function(e){var t=S(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i;ce=E.createDocumentFragment().appendChild(E.createElement("div")),(fe=E.createElement("input")).setAttribute("type","radio"),fe.setAttribute("checked","checked"),fe.setAttribute("name","t"),ce.appendChild(fe),y.checkClone=ce.cloneNode(!0).cloneNode(!0).lastChild.checked,ce.innerHTML="",y.noCloneChecked=!!ce.cloneNode(!0).lastChild.defaultValue,ce.innerHTML="",y.option=!!ce.lastChild;var ge={thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?S.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n",""]);var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d\s*$/g;function je(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&S(e).children("tbody")[0]||e}function De(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function qe(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Le(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(Y.hasData(e)&&(s=Y.get(e).events))for(i in Y.remove(t,"handle events"),s)for(n=0,r=s[i].length;n").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var _t,zt=[],Ut=/(=)\?(?=&|$)|\?\?/;S.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=zt.pop()||S.expando+"_"+wt.guid++;return this[e]=!0,e}}),S.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Ut.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Ut.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Ut,"$1"+r):!1!==e.jsonp&&(e.url+=(Tt.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||S.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?S(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,zt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((_t=E.implementation.createHTMLDocument("").body).innerHTML="
",2===_t.childNodes.length),S.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=N.exec(e))?[t.createElement(i[1])]:(i=xe([e],t,o),o&&o.length&&S(o).remove(),S.merge([],i.childNodes)));var r,i,o},S.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(S.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},S.expr.pseudos.animated=function(t){return S.grep(S.timers,function(e){return t===e.elem}).length},S.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=S.css(e,"position"),c=S(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=S.css(e,"top"),u=S.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,S.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},S.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){S.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===S.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===S.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=S(e).offset()).top+=S.css(e,"borderTopWidth",!0),i.left+=S.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-S.css(r,"marginTop",!0),left:t.left-i.left-S.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===S.css(e,"position"))e=e.offsetParent;return e||re})}}),S.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;S.fn[t]=function(e){return $(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),S.each(["top","left"],function(e,n){S.cssHooks[n]=Fe(y.pixelPosition,function(e,t){if(t)return t=We(e,n),Pe.test(t)?S(e).position()[n]+"px":t})}),S.each({Height:"height",Width:"width"},function(a,s){S.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){S.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return $(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?S.css(e,t,i):S.style(e,t,n,i)},s,n?e:void 0,n)}})}),S.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){S.fn[t]=function(e){return this.on(t,e)}}),S.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),S.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){S.fn[n]=function(e,t){return 0"),n("table.docutils.footnote").wrap("
"),n("table.docutils.citation").wrap("
"),n(".wy-menu-vertical ul").not(".simple").siblings("a").each((function(){var t=n(this);expand=n(''),expand.on("click",(function(n){return e.toggleCurrent(t),n.stopPropagation(),!1})),t.prepend(expand)}))},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),t=e.find('[href="'+n+'"]');if(0===t.length){var i=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(t=e.find('[href="#'+i.attr("id")+'"]')).length&&(t=e.find('[href="#"]'))}if(t.length>0){$(".wy-menu-vertical .current").removeClass("current").attr("aria-expanded","false"),t.addClass("current").attr("aria-expanded","true"),t.closest("li.toctree-l1").parent().addClass("current").attr("aria-expanded","true");for(let n=1;n<=10;n++)t.closest("li.toctree-l"+n).addClass("current").attr("aria-expanded","true");t[0].scrollIntoView()}}catch(n){console.log("Error expanding nav for anchor",n)}},onScroll:function(){this.winScroll=!1;var n=this.win.scrollTop(),e=n+this.winHeight,t=this.navBar.scrollTop()+(n-this.winPosition);n<0||e>this.docHeight||(this.navBar.scrollTop(t),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",(function(){this.linkScroll=!1}))},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current").attr("aria-expanded","false"),e.siblings().find("li.current").removeClass("current").attr("aria-expanded","false");var t=e.find("> ul li");t.length&&(t.removeClass("current").attr("aria-expanded","false"),e.toggleClass("current").attr("aria-expanded",(function(n,e){return"true"==e?"false":"true"})))}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:n.exports.ThemeNav,StickyNav:n.exports.ThemeNav}),function(){for(var n=0,e=["ms","moz","webkit","o"],t=0;t a.language.name.localeCompare(b.language.name)); + + const languagesHTML = ` +
+
Languages
+ ${languages + .map( + (translation) => ` +
+ ${translation.language.code} +
+ `, + ) + .join("\n")} +
+ `; + return languagesHTML; + } + + function renderVersions(config) { + if (!config.versions.active.length) { + return ""; + } + const versionsHTML = ` +
+
Versions
+ ${config.versions.active + .map( + (version) => ` +
+ ${version.slug} +
+ `, + ) + .join("\n")} +
+ `; + return versionsHTML; + } + + function renderDownloads(config) { + if (!Object.keys(config.versions.current.downloads).length) { + return ""; + } + const downloadsNameDisplay = { + pdf: "PDF", + epub: "Epub", + htmlzip: "HTML", + }; + + const downloadsHTML = ` +
+
Downloads
+ ${Object.entries(config.versions.current.downloads) + .map( + ([name, url]) => ` +
+ ${downloadsNameDisplay[name]} +
+ `, + ) + .join("\n")} +
+ `; + return downloadsHTML; + } + + document.addEventListener("readthedocs-addons-data-ready", function (event) { + const config = event.detail.data(); + + const flyout = ` +
+ + Read the Docs + v: ${config.versions.current.slug} + + +
+
+ ${renderLanguages(config)} + ${renderVersions(config)} + ${renderDownloads(config)} +
+
On Read the Docs
+
+ Project Home +
+
+ Builds +
+
+ Downloads +
+
+
+
Search
+
+
+ +
+
+
+
+ + Hosted by Read the Docs + +
+
+ `; + + // Inject the generated flyout into the body HTML element. + document.body.insertAdjacentHTML("beforeend", flyout); + + // Trigger the Read the Docs Addons Search modal when clicking on the "Search docs" input from inside the flyout. + document + .querySelector("#flyout-search-form") + .addEventListener("focusin", () => { + const event = new CustomEvent("readthedocs-search-show"); + document.dispatchEvent(event); + }); + }) +} + +if (themeLanguageSelector || themeVersionSelector) { + function onSelectorSwitch(event) { + const option = event.target.selectedIndex; + const item = event.target.options[option]; + window.location.href = item.dataset.url; + } + + document.addEventListener("readthedocs-addons-data-ready", function (event) { + const config = event.detail.data(); + + const versionSwitch = document.querySelector( + "div.switch-menus > div.version-switch", + ); + if (themeVersionSelector) { + let versions = config.versions.active; + if (config.versions.current.hidden || config.versions.current.type === "external") { + versions.unshift(config.versions.current); + } + const versionSelect = ` + + `; + + versionSwitch.innerHTML = versionSelect; + versionSwitch.firstElementChild.addEventListener("change", onSelectorSwitch); + } + + const languageSwitch = document.querySelector( + "div.switch-menus > div.language-switch", + ); + + if (themeLanguageSelector) { + if (config.projects.translations.length) { + // Add the current language to the options on the selector + let languages = config.projects.translations.concat( + config.projects.current, + ); + languages = languages.sort((a, b) => + a.language.name.localeCompare(b.language.name), + ); + + const languageSelect = ` + + `; + + languageSwitch.innerHTML = languageSelect; + languageSwitch.firstElementChild.addEventListener("change", onSelectorSwitch); + } + else { + languageSwitch.remove(); + } + } + }); +} + +document.addEventListener("readthedocs-addons-data-ready", function (event) { + // Trigger the Read the Docs Addons Search modal when clicking on "Search docs" input from the topnav. + document + .querySelector("[role='search'] input") + .addEventListener("focusin", () => { + const event = new CustomEvent("readthedocs-search-show"); + document.dispatchEvent(event); + }); +}); \ No newline at end of file diff --git a/pyk/_static/language_data.js b/pyk/_static/language_data.js new file mode 100644 index 00000000000..367b8ed81b4 --- /dev/null +++ b/pyk/_static/language_data.js @@ -0,0 +1,199 @@ +/* + * language_data.js + * ~~~~~~~~~~~~~~~~ + * + * This script contains the language-specific data used by searchtools.js, + * namely the list of stopwords, stemmer, scorer and splitter. + * + * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"]; + + +/* Non-minified version is copied as a separate JS file, if available */ + +/** + * Porter Stemmer + */ +var Stemmer = function() { + + var step2list = { + ational: 'ate', + tional: 'tion', + enci: 'ence', + anci: 'ance', + izer: 'ize', + bli: 'ble', + alli: 'al', + entli: 'ent', + eli: 'e', + ousli: 'ous', + ization: 'ize', + ation: 'ate', + ator: 'ate', + alism: 'al', + iveness: 'ive', + fulness: 'ful', + ousness: 'ous', + aliti: 'al', + iviti: 'ive', + biliti: 'ble', + logi: 'log' + }; + + var step3list = { + icate: 'ic', + ative: '', + alize: 'al', + iciti: 'ic', + ical: 'ic', + ful: '', + ness: '' + }; + + var c = "[^aeiou]"; // consonant + var v = "[aeiouy]"; // vowel + var C = c + "[^aeiouy]*"; // consonant sequence + var V = v + "[aeiou]*"; // vowel sequence + + var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 + var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 + var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 + var s_v = "^(" + C + ")?" + v; // vowel in stem + + this.stemWord = function (w) { + var stem; + var suffix; + var firstch; + var origword = w; + + if (w.length < 3) + return w; + + var re; + var re2; + var re3; + var re4; + + firstch = w.substr(0,1); + if (firstch == "y") + w = firstch.toUpperCase() + w.substr(1); + + // Step 1a + re = /^(.+?)(ss|i)es$/; + re2 = /^(.+?)([^s])s$/; + + if (re.test(w)) + w = w.replace(re,"$1$2"); + else if (re2.test(w)) + w = w.replace(re2,"$1$2"); + + // Step 1b + re = /^(.+?)eed$/; + re2 = /^(.+?)(ed|ing)$/; + if (re.test(w)) { + var fp = re.exec(w); + re = new RegExp(mgr0); + if (re.test(fp[1])) { + re = /.$/; + w = w.replace(re,""); + } + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = new RegExp(s_v); + if (re2.test(stem)) { + w = stem; + re2 = /(at|bl|iz)$/; + re3 = new RegExp("([^aeiouylsz])\\1$"); + re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re2.test(w)) + w = w + "e"; + else if (re3.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + else if (re4.test(w)) + w = w + "e"; + } + } + + // Step 1c + re = /^(.+?)y$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(s_v); + if (re.test(stem)) + w = stem + "i"; + } + + // Step 2 + re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step2list[suffix]; + } + + // Step 3 + re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step3list[suffix]; + } + + // Step 4 + re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + re2 = /^(.+?)(s|t)(ion)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + if (re.test(stem)) + w = stem; + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = new RegExp(mgr1); + if (re2.test(stem)) + w = stem; + } + + // Step 5 + re = /^(.+?)e$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + re2 = new RegExp(meq1); + re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) + w = stem; + } + re = /ll$/; + re2 = new RegExp(mgr1); + if (re.test(w) && re2.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + + // and turn initial Y back to y + if (firstch == "y") + w = firstch.toLowerCase() + w.substr(1); + return w; + } +} + diff --git a/pyk/_static/minus.png b/pyk/_static/minus.png new file mode 100644 index 00000000000..d96755fdaf8 Binary files /dev/null and b/pyk/_static/minus.png differ diff --git a/pyk/_static/plus.png b/pyk/_static/plus.png new file mode 100644 index 00000000000..7107cec93a9 Binary files /dev/null and b/pyk/_static/plus.png differ diff --git a/pyk/_static/pygments.css b/pyk/_static/pygments.css new file mode 100644 index 00000000000..84ab3030a93 --- /dev/null +++ b/pyk/_static/pygments.css @@ -0,0 +1,75 @@ +pre { line-height: 125%; } +td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.highlight .hll { background-color: #ffffcc } +.highlight { background: #f8f8f8; } +.highlight .c { color: #3D7B7B; font-style: italic } /* Comment */ +.highlight .err { border: 1px solid #FF0000 } /* Error */ +.highlight .k { color: #008000; font-weight: bold } /* Keyword */ +.highlight .o { color: #666666 } /* Operator */ +.highlight .ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */ +.highlight .cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */ +.highlight .cp { color: #9C6500 } /* Comment.Preproc */ +.highlight .cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */ +.highlight .c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */ +.highlight .cs { color: #3D7B7B; font-style: italic } /* Comment.Special */ +.highlight .gd { color: #A00000 } /* Generic.Deleted */ +.highlight .ge { font-style: italic } /* Generic.Emph */ +.highlight .ges { font-weight: bold; font-style: italic } /* Generic.EmphStrong */ +.highlight .gr { color: #E40000 } /* Generic.Error */ +.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.highlight .gi { color: #008400 } /* Generic.Inserted */ +.highlight .go { color: #717171 } /* Generic.Output */ +.highlight .gp { color: #000080; font-weight: bold } /* Generic.Prompt */ +.highlight .gs { font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.highlight .gt { color: #0044DD } /* Generic.Traceback */ +.highlight .kc { color: #008000; font-weight: bold } /* Keyword.Constant */ +.highlight .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */ +.highlight .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */ +.highlight .kp { color: #008000 } /* Keyword.Pseudo */ +.highlight .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */ +.highlight .kt { color: #B00040 } /* Keyword.Type */ +.highlight .m { color: #666666 } /* Literal.Number */ +.highlight .s { color: #BA2121 } /* Literal.String */ +.highlight .na { color: #687822 } /* Name.Attribute */ +.highlight .nb { color: #008000 } /* Name.Builtin */ +.highlight .nc { color: #0000FF; font-weight: bold } /* Name.Class */ +.highlight .no { color: #880000 } /* Name.Constant */ +.highlight .nd { color: #AA22FF } /* Name.Decorator */ +.highlight .ni { color: #717171; font-weight: bold } /* Name.Entity */ +.highlight .ne { color: #CB3F38; font-weight: bold } /* Name.Exception */ +.highlight .nf { color: #0000FF } /* Name.Function */ +.highlight .nl { color: #767600 } /* Name.Label */ +.highlight .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */ +.highlight .nt { color: #008000; font-weight: bold } /* Name.Tag */ +.highlight .nv { color: #19177C } /* Name.Variable */ +.highlight .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */ +.highlight .w { color: #bbbbbb } /* Text.Whitespace */ +.highlight .mb { color: #666666 } /* Literal.Number.Bin */ +.highlight .mf { color: #666666 } /* Literal.Number.Float */ +.highlight .mh { color: #666666 } /* Literal.Number.Hex */ +.highlight .mi { color: #666666 } /* Literal.Number.Integer */ +.highlight .mo { color: #666666 } /* Literal.Number.Oct */ +.highlight .sa { color: #BA2121 } /* Literal.String.Affix */ +.highlight .sb { color: #BA2121 } /* Literal.String.Backtick */ +.highlight .sc { color: #BA2121 } /* Literal.String.Char */ +.highlight .dl { color: #BA2121 } /* Literal.String.Delimiter */ +.highlight .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */ +.highlight .s2 { color: #BA2121 } /* Literal.String.Double */ +.highlight .se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */ +.highlight .sh { color: #BA2121 } /* Literal.String.Heredoc */ +.highlight .si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */ +.highlight .sx { color: #008000 } /* Literal.String.Other */ +.highlight .sr { color: #A45A77 } /* Literal.String.Regex */ +.highlight .s1 { color: #BA2121 } /* Literal.String.Single */ +.highlight .ss { color: #19177C } /* Literal.String.Symbol */ +.highlight .bp { color: #008000 } /* Name.Builtin.Pseudo */ +.highlight .fm { color: #0000FF } /* Name.Function.Magic */ +.highlight .vc { color: #19177C } /* Name.Variable.Class */ +.highlight .vg { color: #19177C } /* Name.Variable.Global */ +.highlight .vi { color: #19177C } /* Name.Variable.Instance */ +.highlight .vm { color: #19177C } /* Name.Variable.Magic */ +.highlight .il { color: #666666 } /* Literal.Number.Integer.Long */ \ No newline at end of file diff --git a/pyk/_static/searchtools.js b/pyk/_static/searchtools.js new file mode 100644 index 00000000000..92da3f8b22c --- /dev/null +++ b/pyk/_static/searchtools.js @@ -0,0 +1,619 @@ +/* + * searchtools.js + * ~~~~~~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for the full-text search. + * + * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +/** + * Simple result scoring code. + */ +if (typeof Scorer === "undefined") { + var Scorer = { + // Implement the following function to further tweak the score for each result + // The function takes a result array [docname, title, anchor, descr, score, filename] + // and returns the new score. + /* + score: result => { + const [docname, title, anchor, descr, score, filename] = result + return score + }, + */ + + // query matches the full name of an object + objNameMatch: 11, + // or matches in the last dotted part of the object name + objPartialMatch: 6, + // Additive scores depending on the priority of the object + objPrio: { + 0: 15, // used to be importantResults + 1: 5, // used to be objectResults + 2: -5, // used to be unimportantResults + }, + // Used when the priority is not in the mapping. + objPrioDefault: 0, + + // query found in title + title: 15, + partialTitle: 7, + // query found in terms + term: 5, + partialTerm: 2, + }; +} + +const _removeChildren = (element) => { + while (element && element.lastChild) element.removeChild(element.lastChild); +}; + +/** + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping + */ +const _escapeRegExp = (string) => + string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string + +const _displayItem = (item, searchTerms, highlightTerms) => { + const docBuilder = DOCUMENTATION_OPTIONS.BUILDER; + const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX; + const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX; + const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY; + const contentRoot = document.documentElement.dataset.content_root; + + const [docName, title, anchor, descr, score, _filename] = item; + + let listItem = document.createElement("li"); + let requestUrl; + let linkUrl; + if (docBuilder === "dirhtml") { + // dirhtml builder + let dirname = docName + "/"; + if (dirname.match(/\/index\/$/)) + dirname = dirname.substring(0, dirname.length - 6); + else if (dirname === "index/") dirname = ""; + requestUrl = contentRoot + dirname; + linkUrl = requestUrl; + } else { + // normal html builders + requestUrl = contentRoot + docName + docFileSuffix; + linkUrl = docName + docLinkSuffix; + } + let linkEl = listItem.appendChild(document.createElement("a")); + linkEl.href = linkUrl + anchor; + linkEl.dataset.score = score; + linkEl.innerHTML = title; + if (descr) { + listItem.appendChild(document.createElement("span")).innerHTML = + " (" + descr + ")"; + // highlight search terms in the description + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + } + else if (showSearchSummary) + fetch(requestUrl) + .then((responseData) => responseData.text()) + .then((data) => { + if (data) + listItem.appendChild( + Search.makeSearchSummary(data, searchTerms, anchor) + ); + // highlight search terms in the summary + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + }); + Search.output.appendChild(listItem); +}; +const _finishSearch = (resultCount) => { + Search.stopPulse(); + Search.title.innerText = _("Search Results"); + if (!resultCount) + Search.status.innerText = Documentation.gettext( + "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories." + ); + else + Search.status.innerText = _( + "Search finished, found ${resultCount} page(s) matching the search query." + ).replace('${resultCount}', resultCount); +}; +const _displayNextItem = ( + results, + resultCount, + searchTerms, + highlightTerms, +) => { + // results left, load the summary and display it + // this is intended to be dynamic (don't sub resultsCount) + if (results.length) { + _displayItem(results.pop(), searchTerms, highlightTerms); + setTimeout( + () => _displayNextItem(results, resultCount, searchTerms, highlightTerms), + 5 + ); + } + // search finished, update title and status message + else _finishSearch(resultCount); +}; +// Helper function used by query() to order search results. +// Each input is an array of [docname, title, anchor, descr, score, filename]. +// Order the results by score (in opposite order of appearance, since the +// `_displayNextItem` function uses pop() to retrieve items) and then alphabetically. +const _orderResultsByScoreThenName = (a, b) => { + const leftScore = a[4]; + const rightScore = b[4]; + if (leftScore === rightScore) { + // same score: sort alphabetically + const leftTitle = a[1].toLowerCase(); + const rightTitle = b[1].toLowerCase(); + if (leftTitle === rightTitle) return 0; + return leftTitle > rightTitle ? -1 : 1; // inverted is intentional + } + return leftScore > rightScore ? 1 : -1; +}; + +/** + * Default splitQuery function. Can be overridden in ``sphinx.search`` with a + * custom function per language. + * + * The regular expression works by splitting the string on consecutive characters + * that are not Unicode letters, numbers, underscores, or emoji characters. + * This is the same as ``\W+`` in Python, preserving the surrogate pair area. + */ +if (typeof splitQuery === "undefined") { + var splitQuery = (query) => query + .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu) + .filter(term => term) // remove remaining empty strings +} + +/** + * Search Module + */ +const Search = { + _index: null, + _queued_query: null, + _pulse_status: -1, + + htmlToText: (htmlString, anchor) => { + const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html'); + for (const removalQuery of [".headerlinks", "script", "style"]) { + htmlElement.querySelectorAll(removalQuery).forEach((el) => { el.remove() }); + } + if (anchor) { + const anchorContent = htmlElement.querySelector(`[role="main"] ${anchor}`); + if (anchorContent) return anchorContent.textContent; + + console.warn( + `Anchored content block not found. Sphinx search tries to obtain it via DOM query '[role=main] ${anchor}'. Check your theme or template.` + ); + } + + // if anchor not specified or not found, fall back to main content + const docContent = htmlElement.querySelector('[role="main"]'); + if (docContent) return docContent.textContent; + + console.warn( + "Content block not found. Sphinx search tries to obtain it via DOM query '[role=main]'. Check your theme or template." + ); + return ""; + }, + + init: () => { + const query = new URLSearchParams(window.location.search).get("q"); + document + .querySelectorAll('input[name="q"]') + .forEach((el) => (el.value = query)); + if (query) Search.performSearch(query); + }, + + loadIndex: (url) => + (document.body.appendChild(document.createElement("script")).src = url), + + setIndex: (index) => { + Search._index = index; + if (Search._queued_query !== null) { + const query = Search._queued_query; + Search._queued_query = null; + Search.query(query); + } + }, + + hasIndex: () => Search._index !== null, + + deferQuery: (query) => (Search._queued_query = query), + + stopPulse: () => (Search._pulse_status = -1), + + startPulse: () => { + if (Search._pulse_status >= 0) return; + + const pulse = () => { + Search._pulse_status = (Search._pulse_status + 1) % 4; + Search.dots.innerText = ".".repeat(Search._pulse_status); + if (Search._pulse_status >= 0) window.setTimeout(pulse, 500); + }; + pulse(); + }, + + /** + * perform a search for something (or wait until index is loaded) + */ + performSearch: (query) => { + // create the required interface elements + const searchText = document.createElement("h2"); + searchText.textContent = _("Searching"); + const searchSummary = document.createElement("p"); + searchSummary.classList.add("search-summary"); + searchSummary.innerText = ""; + const searchList = document.createElement("ul"); + searchList.classList.add("search"); + + const out = document.getElementById("search-results"); + Search.title = out.appendChild(searchText); + Search.dots = Search.title.appendChild(document.createElement("span")); + Search.status = out.appendChild(searchSummary); + Search.output = out.appendChild(searchList); + + const searchProgress = document.getElementById("search-progress"); + // Some themes don't use the search progress node + if (searchProgress) { + searchProgress.innerText = _("Preparing search..."); + } + Search.startPulse(); + + // index already loaded, the browser was quick! + if (Search.hasIndex()) Search.query(query); + else Search.deferQuery(query); + }, + + _parseQuery: (query) => { + // stem the search terms and add them to the correct list + const stemmer = new Stemmer(); + const searchTerms = new Set(); + const excludedTerms = new Set(); + const highlightTerms = new Set(); + const objectTerms = new Set(splitQuery(query.toLowerCase().trim())); + splitQuery(query.trim()).forEach((queryTerm) => { + const queryTermLower = queryTerm.toLowerCase(); + + // maybe skip this "word" + // stopwords array is from language_data.js + if ( + stopwords.indexOf(queryTermLower) !== -1 || + queryTerm.match(/^\d+$/) + ) + return; + + // stem the word + let word = stemmer.stemWord(queryTermLower); + // select the correct list + if (word[0] === "-") excludedTerms.add(word.substr(1)); + else { + searchTerms.add(word); + highlightTerms.add(queryTermLower); + } + }); + + if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js + localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" ")) + } + + // console.debug("SEARCH: searching for:"); + // console.info("required: ", [...searchTerms]); + // console.info("excluded: ", [...excludedTerms]); + + return [query, searchTerms, excludedTerms, highlightTerms, objectTerms]; + }, + + /** + * execute search (requires search index to be loaded) + */ + _performSearch: (query, searchTerms, excludedTerms, highlightTerms, objectTerms) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + const allTitles = Search._index.alltitles; + const indexEntries = Search._index.indexentries; + + // Collect multiple result groups to be sorted separately and then ordered. + // Each is an array of [docname, title, anchor, descr, score, filename]. + const normalResults = []; + const nonMainIndexResults = []; + + _removeChildren(document.getElementById("search-progress")); + + const queryLower = query.toLowerCase().trim(); + for (const [title, foundTitles] of Object.entries(allTitles)) { + if (title.toLowerCase().trim().includes(queryLower) && (queryLower.length >= title.length/2)) { + for (const [file, id] of foundTitles) { + let score = Math.round(100 * queryLower.length / title.length) + normalResults.push([ + docNames[file], + titles[file] !== title ? `${titles[file]} > ${title}` : title, + id !== null ? "#" + id : "", + null, + score, + filenames[file], + ]); + } + } + } + + // search for explicit entries in index directives + for (const [entry, foundEntries] of Object.entries(indexEntries)) { + if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) { + for (const [file, id, isMain] of foundEntries) { + const score = Math.round(100 * queryLower.length / entry.length); + const result = [ + docNames[file], + titles[file], + id ? "#" + id : "", + null, + score, + filenames[file], + ]; + if (isMain) { + normalResults.push(result); + } else { + nonMainIndexResults.push(result); + } + } + } + } + + // lookup as object + objectTerms.forEach((term) => + normalResults.push(...Search.performObjectSearch(term, objectTerms)) + ); + + // lookup as search terms in fulltext + normalResults.push(...Search.performTermsSearch(searchTerms, excludedTerms)); + + // let the scorer override scores with a custom scoring function + if (Scorer.score) { + normalResults.forEach((item) => (item[4] = Scorer.score(item))); + nonMainIndexResults.forEach((item) => (item[4] = Scorer.score(item))); + } + + // Sort each group of results by score and then alphabetically by name. + normalResults.sort(_orderResultsByScoreThenName); + nonMainIndexResults.sort(_orderResultsByScoreThenName); + + // Combine the result groups in (reverse) order. + // Non-main index entries are typically arbitrary cross-references, + // so display them after other results. + let results = [...nonMainIndexResults, ...normalResults]; + + // remove duplicate search results + // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept + let seen = new Set(); + results = results.reverse().reduce((acc, result) => { + let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(','); + if (!seen.has(resultStr)) { + acc.push(result); + seen.add(resultStr); + } + return acc; + }, []); + + return results.reverse(); + }, + + query: (query) => { + const [searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms] = Search._parseQuery(query); + const results = Search._performSearch(searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms); + + // for debugging + //Search.lastresults = results.slice(); // a copy + // console.info("search results:", Search.lastresults); + + // print the results + _displayNextItem(results, results.length, searchTerms, highlightTerms); + }, + + /** + * search for object names + */ + performObjectSearch: (object, objectTerms) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const objects = Search._index.objects; + const objNames = Search._index.objnames; + const titles = Search._index.titles; + + const results = []; + + const objectSearchCallback = (prefix, match) => { + const name = match[4] + const fullname = (prefix ? prefix + "." : "") + name; + const fullnameLower = fullname.toLowerCase(); + if (fullnameLower.indexOf(object) < 0) return; + + let score = 0; + const parts = fullnameLower.split("."); + + // check for different match types: exact matches of full name or + // "last name" (i.e. last dotted part) + if (fullnameLower === object || parts.slice(-1)[0] === object) + score += Scorer.objNameMatch; + else if (parts.slice(-1)[0].indexOf(object) > -1) + score += Scorer.objPartialMatch; // matches in last name + + const objName = objNames[match[1]][2]; + const title = titles[match[0]]; + + // If more than one term searched for, we require other words to be + // found in the name/title/description + const otherTerms = new Set(objectTerms); + otherTerms.delete(object); + if (otherTerms.size > 0) { + const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase(); + if ( + [...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0) + ) + return; + } + + let anchor = match[3]; + if (anchor === "") anchor = fullname; + else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname; + + const descr = objName + _(", in ") + title; + + // add custom score for some objects according to scorer + if (Scorer.objPrio.hasOwnProperty(match[2])) + score += Scorer.objPrio[match[2]]; + else score += Scorer.objPrioDefault; + + results.push([ + docNames[match[0]], + fullname, + "#" + anchor, + descr, + score, + filenames[match[0]], + ]); + }; + Object.keys(objects).forEach((prefix) => + objects[prefix].forEach((array) => + objectSearchCallback(prefix, array) + ) + ); + return results; + }, + + /** + * search for full-text terms in the index + */ + performTermsSearch: (searchTerms, excludedTerms) => { + // prepare search + const terms = Search._index.terms; + const titleTerms = Search._index.titleterms; + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + + const scoreMap = new Map(); + const fileMap = new Map(); + + // perform the search on the required terms + searchTerms.forEach((word) => { + const files = []; + const arr = [ + { files: terms[word], score: Scorer.term }, + { files: titleTerms[word], score: Scorer.title }, + ]; + // add support for partial matches + if (word.length > 2) { + const escapedWord = _escapeRegExp(word); + if (!terms.hasOwnProperty(word)) { + Object.keys(terms).forEach((term) => { + if (term.match(escapedWord)) + arr.push({ files: terms[term], score: Scorer.partialTerm }); + }); + } + if (!titleTerms.hasOwnProperty(word)) { + Object.keys(titleTerms).forEach((term) => { + if (term.match(escapedWord)) + arr.push({ files: titleTerms[term], score: Scorer.partialTitle }); + }); + } + } + + // no match but word was a required one + if (arr.every((record) => record.files === undefined)) return; + + // found search word in contents + arr.forEach((record) => { + if (record.files === undefined) return; + + let recordFiles = record.files; + if (recordFiles.length === undefined) recordFiles = [recordFiles]; + files.push(...recordFiles); + + // set score for the word in each file + recordFiles.forEach((file) => { + if (!scoreMap.has(file)) scoreMap.set(file, {}); + scoreMap.get(file)[word] = record.score; + }); + }); + + // create the mapping + files.forEach((file) => { + if (!fileMap.has(file)) fileMap.set(file, [word]); + else if (fileMap.get(file).indexOf(word) === -1) fileMap.get(file).push(word); + }); + }); + + // now check if the files don't contain excluded terms + const results = []; + for (const [file, wordList] of fileMap) { + // check if all requirements are matched + + // as search terms with length < 3 are discarded + const filteredTermCount = [...searchTerms].filter( + (term) => term.length > 2 + ).length; + if ( + wordList.length !== searchTerms.size && + wordList.length !== filteredTermCount + ) + continue; + + // ensure that none of the excluded terms is in the search result + if ( + [...excludedTerms].some( + (term) => + terms[term] === file || + titleTerms[term] === file || + (terms[term] || []).includes(file) || + (titleTerms[term] || []).includes(file) + ) + ) + break; + + // select one (max) score for the file. + const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w])); + // add result to the result list + results.push([ + docNames[file], + titles[file], + "", + null, + score, + filenames[file], + ]); + } + return results; + }, + + /** + * helper function to return a node containing the + * search summary for a given text. keywords is a list + * of stemmed words. + */ + makeSearchSummary: (htmlText, keywords, anchor) => { + const text = Search.htmlToText(htmlText, anchor); + if (text === "") return null; + + const textLower = text.toLowerCase(); + const actualStartPosition = [...keywords] + .map((k) => textLower.indexOf(k.toLowerCase())) + .filter((i) => i > -1) + .slice(-1)[0]; + const startWithContext = Math.max(actualStartPosition - 120, 0); + + const top = startWithContext === 0 ? "" : "..."; + const tail = startWithContext + 240 < text.length ? "..." : ""; + + let summary = document.createElement("p"); + summary.classList.add("context"); + summary.textContent = top + text.substr(startWithContext, 240).trim() + tail; + + return summary; + }, +}; + +_ready(Search.init); diff --git a/pyk/_static/sphinx_highlight.js b/pyk/_static/sphinx_highlight.js new file mode 100644 index 00000000000..8a96c69a194 --- /dev/null +++ b/pyk/_static/sphinx_highlight.js @@ -0,0 +1,154 @@ +/* Highlighting utilities for Sphinx HTML documentation. */ +"use strict"; + +const SPHINX_HIGHLIGHT_ENABLED = true + +/** + * highlight a given string on a node by wrapping it in + * span elements with the given class name. + */ +const _highlight = (node, addItems, text, className) => { + if (node.nodeType === Node.TEXT_NODE) { + const val = node.nodeValue; + const parent = node.parentNode; + const pos = val.toLowerCase().indexOf(text); + if ( + pos >= 0 && + !parent.classList.contains(className) && + !parent.classList.contains("nohighlight") + ) { + let span; + + const closestNode = parent.closest("body, svg, foreignObject"); + const isInSVG = closestNode && closestNode.matches("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.classList.add(className); + } + + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + const rest = document.createTextNode(val.substr(pos + text.length)); + parent.insertBefore( + span, + parent.insertBefore( + rest, + node.nextSibling + ) + ); + node.nodeValue = val.substr(0, pos); + /* There may be more occurrences of search term in this node. So call this + * function recursively on the remaining fragment. + */ + _highlight(rest, addItems, text, className); + + if (isInSVG) { + const rect = document.createElementNS( + "http://www.w3.org/2000/svg", + "rect" + ); + const bbox = parent.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute("class", className); + addItems.push({ parent: parent, target: rect }); + } + } + } else if (node.matches && !node.matches("button, select, textarea")) { + node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); + } +}; +const _highlightText = (thisNode, text, className) => { + let addItems = []; + _highlight(thisNode, addItems, text, className); + addItems.forEach((obj) => + obj.parent.insertAdjacentElement("beforebegin", obj.target) + ); +}; + +/** + * Small JavaScript module for the documentation. + */ +const SphinxHighlight = { + + /** + * highlight the search words provided in localstorage in the text + */ + highlightSearchWords: () => { + if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight + + // get and clear terms from localstorage + const url = new URL(window.location); + const highlight = + localStorage.getItem("sphinx_highlight_terms") + || url.searchParams.get("highlight") + || ""; + localStorage.removeItem("sphinx_highlight_terms") + url.searchParams.delete("highlight"); + window.history.replaceState({}, "", url); + + // get individual terms from highlight string + const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); + if (terms.length === 0) return; // nothing to do + + // There should never be more than one element matching "div.body" + const divBody = document.querySelectorAll("div.body"); + const body = divBody.length ? divBody[0] : document.querySelector("body"); + window.setTimeout(() => { + terms.forEach((term) => _highlightText(body, term, "highlighted")); + }, 10); + + const searchBox = document.getElementById("searchbox"); + if (searchBox === null) return; + searchBox.appendChild( + document + .createRange() + .createContextualFragment( + '" + ) + ); + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords: () => { + document + .querySelectorAll("#searchbox .highlight-link") + .forEach((el) => el.remove()); + document + .querySelectorAll("span.highlighted") + .forEach((el) => el.classList.remove("highlighted")); + localStorage.removeItem("sphinx_highlight_terms") + }, + + initEscapeListener: () => { + // only install a listener if it is really needed + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; + if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { + SphinxHighlight.hideSearchWords(); + event.preventDefault(); + } + }); + }, +}; + +_ready(() => { + /* Do not call highlightSearchWords() when we are on the search page. + * It will highlight words from the *previous* search query. + */ + if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords(); + SphinxHighlight.initEscapeListener(); +}); diff --git a/pyk/api/modules.html b/pyk/api/modules.html new file mode 100644 index 00000000000..49b0273d924 --- /dev/null +++ b/pyk/api/modules.html @@ -0,0 +1,256 @@ + + + + + + + + + pyk — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk

+
+ +
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.coverage.html b/pyk/api/pyk.coverage.html new file mode 100644 index 00000000000..ad80cc9813d --- /dev/null +++ b/pyk/api/pyk.coverage.html @@ -0,0 +1,183 @@ + + + + + + + + + pyk.coverage module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.coverage module

+
+
+get_rule_by_id(definition: KDefinition, rule_id: str) KRule[source]
+

Get a rule from the definition by coverage rule id.

+
+
Parameters:
+
    +
  • definition – JSON-encoded definition.

  • +
  • rule_id – String of unique rule identifier generated by kompile –coverage.

  • +
+
+
Returns:
+

JSON encoded rule which has identifier rule_id.

+
+
+
+ +
+
+strip_coverage_logger(rule: KRule) KRule[source]
+
+ +
+
+translate_coverage(src_all_rules: Iterable[str], dst_all_rules: Iterable[str], dst_definition: KDefinition, src_rules_list: Iterable[str]) list[str][source]
+

Translate the coverage data from one kompiled definition to another.

+
+
Parameters:
+
    +
  • src_all_rules – Contents of allRules.txt for definition which coverage was generated for.

  • +
  • dst_all_rules – Contents of allRules.txt for definition which you desire coverage for.

  • +
  • dst_definition – JSON encoded definition of dst kompiled definition.

  • +
  • src_rules_list – Actual coverage data produced.

  • +
+
+
Returns:
+

List of non-functional rules applied in dst definition translated from src definition.

+
+
+
+ +
+
+translate_coverage_from_paths(src_kompiled_dir: str, dst_kompiled_dir: str, src_rules_file: PathLike) list[str][source]
+

Translate coverage information given paths to needed files.

+
+
Parameters:
+
    +
  • src_kompiled_dir – Path to kompiled directory of source.

  • +
  • dst_kompiled_dir – Path to kompiled directory of destination.

  • +
  • src_rules_file – Path to generated rules coverage file.

  • +
+
+
Returns:
+

Translated list of rules with non-semantic rules stripped out.

+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.cterm.cterm.html b/pyk/api/pyk.cterm.cterm.html new file mode 100644 index 00000000000..583e6be076f --- /dev/null +++ b/pyk/api/pyk.cterm.cterm.html @@ -0,0 +1,482 @@ + + + + + + + + + pyk.cterm.cterm module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.cterm.cterm module

+
+
+class CSubst(subst: Subst | None = None, constraints: Iterable[KInner] = ())[source]
+

Bases: object

+

Store information about instantiation of a symbolic state (CTerm) to a more specific one.

+

Contains the data: +- subst: assignment to apply to free variables in the state to achieve more specific one +- constraints: additional constraints over the free variables of the original state and the subst to add to the new state

+
+
+__call__(cterm: CTerm) CTerm[source]
+

Overload for CSubst.apply.

+
+ +
+
+__init__(subst: Subst | None = None, constraints: Iterable[KInner] = ()) None[source]
+

Construct a new CSubst given a Subst and set of constraints as KInner, performing basic sanity checks.

+
+ +
+
+__iter__() Iterator[Subst | KInner][source]
+

Return an iterator with the head being the subst and the tail being the constraints.

+
+ +
+
+add_constraint(constraint: KInner) CSubst[source]
+

Return this CSubst with an additional constraint added.

+
+ +
+
+apply(cterm: CTerm) CTerm[source]
+

Apply this CSubst to the given CTerm (instantiating the free variables, and adding the constraints).

+
+ +
+
+property constraint: KInner
+

Return the set of constraints as a single flattened constraint using mlAnd.

+
+ +
+
+constraints: tuple[KInner, ...]
+
+ +
+
+static from_dict(dct: dict[str, Any]) CSubst[source]
+

Deserialize CSubst from a dictionary representation.

+
+ +
+
+static from_pred(pred: KInner) CSubst[source]
+

Extract from a boolean predicate a CSubst.

+
+ +
+
+pred(sort_with: KDefinition | None = None, subst: bool = True, constraints: bool = True) KInner[source]
+

Return an ML predicate representing this substitution.

+
+ +
+
+subst: Subst
+
+ +
+
+to_dict() dict[str, Any][source]
+

Serialize CSubst to dictionary representation.

+
+ +
+ +
+
+class CTerm(config: KInner, constraints: Iterable[KInner] = ())[source]
+

Bases: object

+

Represent a symbolic program state, obtained and manipulated using symbolic execution.

+

Contains the data: +- config: the _configuration_ (structural component of the state, potentially containing free variabls) +- constraints: conditions which limit/constraint the free variables from the config

+
+
+__init__(config: KInner, constraints: Iterable[KInner] = ()) None[source]
+

Instantiate a given CTerm, performing basic sanity checks on the config and constraints.

+
+ +
+
+__iter__() Iterator[KInner][source]
+

Return an iterator with the head being the config and the tail being the constraints.

+
+ +
+
+add_constraint(new_constraint: KInner) CTerm[source]
+

Return a new CTerm with the additional constraints.

+
+ +
+
+anti_unify(other: CTerm, keep_values: bool = False, kdef: KDefinition | None = None) tuple[CTerm, CSubst, CSubst][source]
+

Given two CTerm instances, find a more general CTerm which can instantiate to both.

+
+
Parameters:
+
    +
  • other – other CTerm to consider for finding a more general CTerm with this one.

  • +
  • keep_values – do not discard information about abstracted variables in returned result.

  • +
  • kdef (optional) – KDefinition to make analysis more precise.

  • +
+
+
Returns:
+

A tuple (cterm, csubst1, csubst2) where

+
    +
  • cterm: More general CTerm than either self or other.

  • +
  • csubst1: Constrained substitution to apply to cterm to obtain self.

  • +
  • csubst2: Constrained substitution to apply to cterm to obtain other.

  • +
+

+
+
+
+ +
+
+static bottom() CTerm[source]
+

Construct a CTerm representing no possible states.

+
+ +
+
+cell(cell: str) KInner[source]
+

Access the contents of a named cell in the config, die on failure.

+
+ +
+
+property cells: Subst
+

Return key-value store of the contents of each cell in the config.

+
+ +
+
+config: KInner
+
+ +
+
+constraints: tuple[KInner, ...]
+
+ +
+
+property free_vars: frozenset[str]
+

Return the set of free variable names contained in this CTerm.

+
+ +
+
+static from_dict(dct: dict[str, Any]) CTerm[source]
+

Deserialize a CTerm from its dictionary representation.

+
+ +
+
+static from_kast(kast: KInner) CTerm[source]
+

Interpret a given KInner as a CTerm by splitting the config and constraints (see CTerm.kast).

+
+ +
+
+property hash: str
+

Unique hash representing the contents of this CTerm.

+
+ +
+
+property is_bottom: bool
+

Check if a given CTerm is trivially empty.

+
+ +
+
+property kast: KInner
+

Return the unstructured bare KInner representation of a CTerm (see CTerm.from_kast).

+
+ +
+
+match(cterm: CTerm) Subst | None[source]
+

Find Subst instantiating this CTerm to the other, return None if no such Subst exists.

+
+ +
+
+match_with_constraint(cterm: CTerm) CSubst | None[source]
+

Find CSubst instantiating this CTerm to the other, return None if no such CSubst exists.

+
+ +
+
+remove_useless_constraints(keep_vars: Iterable[str] = ()) CTerm[source]
+

Return a new CTerm with constraints over unbound variables removed.

+
+
Parameters:
+

keep_vars – List of variables to keep constraints for even if unbound in the CTerm.

+
+
Returns:
+

A CTerm with the constraints over unbound variables removed.

+
+
+
+ +
+
+to_dict() dict[str, Any][source]
+

Serialize a CTerm to dictionary representation.

+
+ +
+
+static top() CTerm[source]
+

Construct a CTerm representing all possible states.

+
+ +
+
+try_cell(cell: str) KInner | None[source]
+

Access the contents of a named cell in the config, return None on failure.

+
+ +
+ +
+
+anti_unify(state1: KInner, state2: KInner, kdef: KDefinition | None = None) tuple[KInner, Subst, Subst][source]
+

Return a generalized state over the two input states.

+
+
Parameters:
+
    +
  • state1 – State to generalize over, represented as bare KInner.

  • +
  • state2 – State to generalize over, represented as bare KInner.

  • +
  • kdef (optional) – KDefinition to make the analysis more precise.

  • +
+
+
+
+

Note

+

Both state1 and state2 are expected to be bare configurations with no constraints attached.

+
+
+
Returns:
+

A tuple (state, subst1, subst2) such that

+
    +
  • state: A symbolic state represented as KInner which is more general than state1 or state2.

  • +
  • subst1: A Subst which, when applied to state, recovers state1.

  • +
  • subst2: A Subst which, when applied to state, recovers state2.

  • +
+

+
+
+
+ +
+
+cterm_build_claim(claim_id: str, init_cterm: CTerm, final_cterm: CTerm, keep_vars: Iterable[str] = ()) tuple[KClaim, Subst][source]
+

Return a KClaim between the supplied initial and final states.

+
+
Parameters:
+
    +
  • claim_id – Label to give the claim.

  • +
  • init_cterm – State to put on LHS of the rule (constraints interpreted as requires clause).

  • +
  • final_cterm – State to put on RHS of the rule (constraints interpreted as ensures clause).

  • +
  • keep_vars – Variables to leave in the side-conditions even if not bound in the configuration.

  • +
+
+
Returns:
+

A tuple (claim, var_map) where

+
    +
  • claim: A KClaim with variable naming conventions applied +so that it should be parseable by the K Frontend.

  • +
  • var_map: The variable renamings applied to make the claim parseable by the K Frontend +(which can be undone to recover original variables).

  • +
+

+
+
+
+ +
+
+cterm_build_rule(rule_id: str, init_cterm: CTerm, final_cterm: CTerm, priority: int | None = None, keep_vars: Iterable[str] = (), defunc_with: KDefinition | None = None) tuple[KRule, Subst][source]
+

Return a KRule between the supplied initial and final states.

+
+
Parameters:
+
    +
  • rule_id – Label to give the rule.

  • +
  • init_cterm – State to put on LHS of the rule (constraints interpreted as requires clause).

  • +
  • final_cterm – State to put on RHS of the rule (constraints interpreted as ensures clause).

  • +
  • keep_vars – Variables to leave in the side-conditions even if not bound in the configuration.

  • +
  • priority – Priority index to use for generated rules.

  • +
  • defunc_with (optional) – KDefinition to be able to defunctionalize LHS appropriately.

  • +
+
+
Returns:
+

A tuple (rule, var_map) where

+
    +
  • rule: A KRule with variable naming conventions applied +so that it should be parseable by the K Frontend.

  • +
  • var_map: The variable renamings applied to make the rule parseable by the K Frontend +(which can be undone to recover original variables).

  • +
+

+
+
+
+ +
+
+cterms_anti_unify(cterms: Iterable[CTerm], keep_values: bool = False, kdef: KDefinition | None = None) tuple[CTerm, list[CSubst]][source]
+

Given many CTerm instances, find a more general CTerm which can instantiate to all.

+
+
Parameters:
+
    +
  • ctermsCTerm`s to consider for finding a more general `CTerm with this one.

  • +
  • keep_values – do not discard information about abstracted variables in returned result.

  • +
  • kdef (optional) – KDefinition to make analysis more precise.

  • +
+
+
Returns:
+

A tuple (cterm, csubsts) where

+
    +
  • cterm: More general CTerm than any of the input `CTerm`s.

  • +
  • csubsts: List of CSubst which, when applied to cterm, yield the input `CTerm`s.

  • +
+

+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.cterm.html b/pyk/api/pyk.cterm.html new file mode 100644 index 00000000000..52b771f698a --- /dev/null +++ b/pyk/api/pyk.cterm.html @@ -0,0 +1,223 @@ + + + + + + + + + pyk.cterm package — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.cterm.symbolic.html b/pyk/api/pyk.cterm.symbolic.html new file mode 100644 index 00000000000..1d909bfe48f --- /dev/null +++ b/pyk/api/pyk.cterm.symbolic.html @@ -0,0 +1,276 @@ + + + + + + + + + pyk.cterm.symbolic module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.cterm.symbolic module

+
+
+class CTermExecute(state, next_states, depth, vacuous, logs)[source]
+

Bases: NamedTuple

+
+
+depth: int
+

Alias for field number 2

+
+ +
+
+logs: tuple[LogEntry, ...]
+

Alias for field number 4

+
+ +
+
+next_states: tuple[NextState, ...]
+

Alias for field number 1

+
+ +
+
+state: CTerm
+

Alias for field number 0

+
+ +
+
+vacuous: bool
+

Alias for field number 3

+
+ +
+ +
+
+class CTermImplies(csubst, failing_cells, remaining_implication, logs)[source]
+

Bases: NamedTuple

+
+
+csubst: CSubst | None
+

Alias for field number 0

+
+ +
+
+failing_cells: tuple[tuple[str, KInner], ...]
+

Alias for field number 1

+
+ +
+
+logs: tuple[LogEntry, ...]
+

Alias for field number 3

+
+ +
+
+remaining_implication: KInner | None
+

Alias for field number 2

+
+ +
+ +
+
+final exception CTermSMTError(message: 'str')[source]
+

Bases: Exception

+
+ +
+
+class CTermSymbolic(kore_client: KoreClient, definition: KDefinition, *, log_succ_rewrites: bool = True, log_fail_rewrites: bool = False)[source]
+

Bases: object

+
+
+assume_defined(cterm: CTerm, module_name: str | None = None) CTerm[source]
+
+ +
+
+execute(cterm: CTerm, depth: int | None = None, cut_point_rules: Iterable[str] | None = None, terminal_rules: Iterable[str] | None = None, module_name: str | None = None) CTermExecute[source]
+
+ +
+
+get_model(cterm: CTerm, module_name: str | None = None) Subst | None[source]
+
+ +
+
+implies(antecedent: CTerm, consequent: CTerm, bind_universally: bool = False, failure_reason: bool = False, module_name: str | None = None, assume_defined: bool = False) CTermImplies[source]
+
+ +
+
+kast_simplify(kast: KInner, module_name: str | None = None) tuple[KInner, tuple[LogEntry, ...]][source]
+
+ +
+
+kast_to_kore(kinner: KInner) Pattern[source]
+
+ +
+
+kore_to_kast(pattern: Pattern) KInner[source]
+
+ +
+
+simplify(cterm: CTerm, module_name: str | None = None) tuple[CTerm, tuple[LogEntry, ...]][source]
+
+ +
+ +
+
+class NextState(state, condition)[source]
+

Bases: NamedTuple

+
+
+condition: KInner | None
+

Alias for field number 1

+
+ +
+
+state: CTerm
+

Alias for field number 0

+
+ +
+ +
+
+cterm_symbolic(definition: KDefinition, definition_dir: Path, *, id: str | None = None, port: int | None = None, kore_rpc_command: str | Iterable[str] | None = None, llvm_definition_dir: Path | None = None, smt_timeout: int | None = None, smt_retry_limit: int | None = None, smt_tactic: str | None = None, bug_report: BugReport | None = None, haskell_log_format: KoreExecLogFormat = KoreExecLogFormat.ONELINE, haskell_log_entries: Iterable[str] = (), log_axioms_file: Path | None = None, log_succ_rewrites: bool = True, log_fail_rewrites: bool = False, start_server: bool = True, maude_port: int | None = None, fallback_on: Iterable[FallbackReason] | None = None, interim_simplification: int | None = None, no_post_exec_simplify: bool = False) Iterator[CTermSymbolic][source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.dequote.html b/pyk/api/pyk.dequote.html new file mode 100644 index 00000000000..5ea4864bc80 --- /dev/null +++ b/pyk/api/pyk.dequote.html @@ -0,0 +1,164 @@ + + + + + + + + + pyk.dequote module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.dequote module

+
+
+bytes_decode(b: bytes) str[source]
+
+ +
+
+bytes_encode(s: str) bytes[source]
+
+ +
+
+dequote_bytes(s: str) str[source]
+
+ +
+
+dequote_string(s: str) str[source]
+
+ +
+
+dequoted(it: Iterable[str], *, allow_unicode: bool = True) Iterator[str][source]
+
+ +
+
+enquote_bytes(s: str) str[source]
+
+ +
+
+enquote_string(s: str) str[source]
+
+ +
+
+enquoted(it: Iterable[str], *, allow_unicode: bool = True) Iterator[str][source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.html b/pyk/api/pyk.html new file mode 100644 index 00000000000..2bee0385f14 --- /dev/null +++ b/pyk/api/pyk.html @@ -0,0 +1,1125 @@ + + + + + + + + + pyk package — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk package

+
+

Subpackages

+
+ +
+
+
+

Submodules

+ +
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kast.att.html b/pyk/api/pyk.kast.att.html new file mode 100644 index 00000000000..f9395f1e58f --- /dev/null +++ b/pyk/api/pyk.kast.att.html @@ -0,0 +1,944 @@ + + + + + + + + + pyk.kast.att module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kast.att module

+
+
+class AnyType[source]
+

Bases: AttType[Any]

+
+
+from_dict(obj: Any) Any[source]
+
+ +
+
+parse(text: str) Any[source]
+
+ +
+
+to_dict(value: Any) Any[source]
+
+ +
+
+unparse(value: Any) str[source]
+
+ +
+ +
+
+final class AttEntry(key: 'AttKey[T]', value: 'T')[source]
+

Bases: Generic[T]

+
+
+key: AttKey[T]
+
+ +
+
+value: T
+
+ +
+ +
+
+final class AttKey(name: 'str', *, type: 'AttType[T]')[source]
+

Bases: Generic[T]

+
+
+name: str
+
+ +
+
+type: AttType[T]
+
+ +
+ +
+
+class AttType[source]
+

Bases: Generic[T], ABC

+
+
+abstract from_dict(obj: Any) T[source]
+
+ +
+
+abstract parse(text: str) T[source]
+
+ +
+
+abstract to_dict(value: T) Any[source]
+
+ +
+
+abstract unparse(value: T) str | None[source]
+
+ +
+ +
+
+class Atts[source]
+

Bases: object

+
+
+ALIAS: Final = AttKey(name='alias')
+
+ +
+
+ALIAS_REC: Final = AttKey(name='alias-rec')
+
+ +
+
+ANYWHERE: Final = AttKey(name='anywhere')
+
+ +
+
+ASSOC: Final = AttKey(name='assoc')
+
+ +
+
+AVOID: Final = AttKey(name='avoid')
+
+ +
+
+BRACKET: Final = AttKey(name='bracket')
+
+ +
+
+BRACKET_LABEL: Final = AttKey(name='bracketLabel')
+
+ +
+
+CELL: Final = AttKey(name='cell')
+
+ +
+
+CELL_COLLECTION: Final = AttKey(name='cellCollection')
+
+ +
+
+CELL_FRAGMENT: Final = AttKey(name='cellFragment')
+
+ +
+
+CELL_NAME: Final = AttKey(name='cellName')
+
+ +
+
+CELL_OPT_ABSENT: Final = AttKey(name='cellOptAbsent')
+
+ +
+
+CIRCULARITY: Final = AttKey(name='circularity')
+
+ +
+
+COLOR: Final = AttKey(name='color')
+
+ +
+
+COLORS: Final = AttKey(name='colors')
+
+ +
+
+COMM: Final = AttKey(name='comm')
+
+ +
+
+CONCAT: Final = AttKey(name='concat')
+
+ +
+
+CONCRETE: Final = AttKey(name='concrete')
+
+ +
+
+CONSTRUCTOR: Final = AttKey(name='constructor')
+
+ +
+
+DEPENDS: Final = AttKey(name='depends')
+
+ +
+
+DIGEST: Final = AttKey(name='digest')
+
+ +
+
+ELEMENT: Final = AttKey(name='element')
+
+ +
+
+EXIT: Final = AttKey(name='exit')
+
+ +
+
+FORMAT: Final = AttKey(name='format')
+
+ +
+
+FRESH_GENERATOR: Final = AttKey(name='freshGenerator')
+
+ +
+
+FUNCTION: Final = AttKey(name='function')
+
+ +
+
+FUNCTIONAL: Final = AttKey(name='functional')
+
+ +
+
+GROUP: Final = AttKey(name='group')
+
+ +
+
+HAS_DOMAIN_VALUES: Final = AttKey(name='hasDomainValues')
+
+ +
+
+HOOK: Final = AttKey(name='hook')
+
+ +
+
+IDEM: Final = AttKey(name='idem')
+
+ +
+
+IMPURE: Final = AttKey(name='impure')
+
+ +
+
+INDEX: Final = AttKey(name='index')
+
+ +
+
+INITIALIZER: Final = AttKey(name='initializer')
+
+ +
+
+INJECTIVE: Final = AttKey(name='injective')
+
+ +
+
+LABEL: Final = AttKey(name='label')
+
+ +
+
+LEFT: Final = AttKey(name='left')
+
+ +
+
+LOCATION: Final = AttKey(name='org.kframework.attributes.Location')
+
+ +
+
+MACRO: Final = AttKey(name='macro')
+
+ +
+
+MACRO_REC: Final = AttKey(name='macro-rec')
+
+ +
+
+MAINCELL: Final = AttKey(name='maincell')
+
+ +
+
+MULTIPLICITY: Final = AttKey(name='multiplicity')
+
+ +
+
+NO_EVALUATORS: Final = AttKey(name='no-evaluators')
+
+ +
+
+OVERLOAD: Final = AttKey(name='overload')
+
+ +
+
+OWISE: Final = AttKey(name='owise')
+
+ +
+
+PREDICATE: Final = AttKey(name='predicate')
+
+ +
+
+PREFER: Final = AttKey(name='prefer')
+
+ +
+
+PRIORITIES: Final = AttKey(name='priorities')
+
+ +
+
+PRIORITY: Final = AttKey(name='priority')
+
+ +
+
+PRIVATE: Final = AttKey(name='private')
+
+ +
+
+PRODUCTION: Final = AttKey(name='org.kframework.definition.Production')
+
+ +
+
+PROJECTION: Final = AttKey(name='projection')
+
+ +
+
+RETURNS_UNIT: Final = AttKey(name='returnsUnit')
+
+ +
+
+RIGHT: Final = AttKey(name='right')
+
+ +
+
+SEQSTRICT: Final = AttKey(name='seqstrict')
+
+ +
+
+SIMPLIFICATION: Final = AttKey(name='simplification')
+
+ +
+
+SMTLEMMA: Final = AttKey(name='smt-lemma')
+
+ +
+
+SORT: Final = AttKey(name='org.kframework.kore.Sort')
+
+ +
+
+SOURCE: Final = AttKey(name='org.kframework.attributes.Source')
+
+ +
+
+STRICT: Final = AttKey(name='strict')
+
+ +
+
+SYMBOL: Final = AttKey(name='symbol')
+
+ +
+
+SYMBOLIC: Final = AttKey(name='symbolic')
+
+ +
+
+SYNTAX_MODULE: Final = AttKey(name='syntaxModule')
+
+ +
+
+TERMINALS: Final = AttKey(name='terminals')
+
+ +
+
+TERMINATOR_SYMBOL: Final = AttKey(name='terminator-symbol')
+
+ +
+
+TOKEN: Final = AttKey(name='token')
+
+ +
+
+TOTAL: Final = AttKey(name='total')
+
+ +
+
+TRUSTED: Final = AttKey(name='trusted')
+
+ +
+
+TYPE: Final = AttKey(name='type')
+
+ +
+
+UNIQUE_ID: Final = AttKey(name='UNIQUE_ID')
+
+ +
+
+UNIT: Final = AttKey(name='unit')
+
+ +
+
+UNPARSE_AVOID: Final = AttKey(name='unparseAvoid')
+
+ +
+
+UPDATE: Final = AttKey(name='update')
+
+ +
+
+USER_LIST: Final = AttKey(name='userList')
+
+ +
+
+WRAP_ELEMENT: Final = AttKey(name='wrapElement')
+
+ +
+
+classmethod keys() FrozenDict[str, AttKey][source]
+
+ +
+ +
+
+class ColorType[source]
+

Bases: AttType[Color]

+
+
+from_dict(obj: Any) Color[source]
+
+ +
+
+parse(text: str) Color[source]
+
+ +
+
+to_dict(value: Color) str[source]
+
+ +
+
+unparse(value: Color) str[source]
+
+ +
+ +
+
+class ColorsType[source]
+

Bases: AttType[tuple[Color, …]]

+
+
+from_dict(obj: Any) tuple[Color, ...][source]
+
+ +
+
+parse(text: str) tuple[Color, ...][source]
+
+ +
+
+to_dict(value: tuple[Color, ...]) str[source]
+
+ +
+
+unparse(value: tuple[Color, ...]) str[source]
+
+ +
+ +
+
+final class Format(tokens: 'Iterable[str]' = ())[source]
+

Bases: object

+
+
+classmethod parse(s: str) Format[source]
+
+ +
+
+tokens: tuple[str, ...]
+
+ +
+
+unparse() str[source]
+
+ +
+ +
+
+class FormatType[source]
+

Bases: AttType[Format]

+
+
+from_dict(obj: Any) Format[source]
+
+ +
+
+parse(text: str) Format[source]
+
+ +
+
+to_dict(value: Format) Any[source]
+
+ +
+
+unparse(value: Format) str[source]
+
+ +
+ +
+
+class IntType[source]
+

Bases: AttType[int]

+
+
+from_dict(obj: Any) int[source]
+
+ +
+
+parse(text: str) int[source]
+
+ +
+
+to_dict(value: int) str[source]
+
+ +
+
+unparse(value: int) str[source]
+
+ +
+ +
+
+final class KAtt(entries: 'Iterable[AttEntry]' = ())[source]
+

Bases: KAst, Mapping[AttKey, Any]

+
+
+atts: FrozenDict[AttKey, Any]
+
+ +
+
+discard(keys: Container[AttKey]) KAtt[source]
+
+ +
+
+drop_source() KAtt[source]
+
+ +
+
+entries() Iterator[AttEntry][source]
+
+ +
+
+classmethod from_dict(d: Mapping[str, Any]) KAtt[source]
+
+ +
+
+get(key: AttKey[T], /) T | None[source]
+
+get(key: AttKey[T], /, default: U) T | U
+
+ +
+
+classmethod parse(d: Mapping[str, str]) KAtt[source]
+
+ +
+
+property pretty: str
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+
+update(entries: Iterable[AttEntry]) KAtt[source]
+
+ +
+ +
+
+class LocationType[source]
+

Bases: AttType[tuple[int, int, int, int]]

+
+
+from_dict(obj: Any) tuple[int, int, int, int][source]
+
+ +
+
+parse(text: str) tuple[int, int, int, int][source]
+
+ +
+
+to_dict(value: tuple[int, int, int, int]) Any[source]
+
+ +
+
+unparse(value: tuple[int, int, int, int]) str[source]
+
+ +
+ +
+
+class NoneType[source]
+

Bases: AttType[None]

+
+
+from_dict(obj: Any) None[source]
+
+ +
+
+parse(text: str) None[source]
+
+ +
+
+to_dict(value: None) Any[source]
+
+ +
+
+unparse(value: None) None[source]
+
+ +
+ +
+
+class OptionalType(value_type: AttType[T])[source]
+

Bases: Generic[T], AttType[T | None]

+
+
+from_dict(obj: Any) T | None[source]
+
+ +
+
+parse(text: str) T | None[source]
+
+ +
+
+to_dict(value: T | None) Any[source]
+
+ +
+
+unparse(value: T | None) str | None[source]
+
+ +
+ +
+
+class PathType[source]
+

Bases: AttType[Path]

+
+
+from_dict(obj: Any) Path[source]
+
+ +
+
+parse(text: str) Path[source]
+
+ +
+
+to_dict(value: Path) Any[source]
+
+ +
+
+unparse(value: Path) str[source]
+
+ +
+ +
+
+class StrType[source]
+

Bases: AttType[str]

+
+
+from_dict(obj: Any) str[source]
+
+ +
+
+parse(text: str) str[source]
+
+ +
+
+to_dict(value: str) Any[source]
+
+ +
+
+unparse(value: str) str[source]
+
+ +
+ +
+
+class WithKAtt[source]
+

Bases: ABC

+
+
+att: KAtt
+
+ +
+
+abstract let_att(att: KAtt) W[source]
+
+ +
+
+map_att(f: Callable[[KAtt], KAtt]) W[source]
+
+ +
+
+update_atts(entries: Iterable[AttEntry]) W[source]
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kast.color.html b/pyk/api/pyk.kast.color.html new file mode 100644 index 00000000000..da5396345f2 --- /dev/null +++ b/pyk/api/pyk.kast.color.html @@ -0,0 +1,1067 @@ + + + + + + + + + pyk.kast.color module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kast.color module

+
+
+class Color(value)[source]
+

Bases: Enum

+

An enumeration.

+
+
+ALICE_BLUE = 'AliceBlue'
+
+ +
+
+ANTIQUE_WHITE = 'AntiqueWhite'
+
+ +
+
+APRICOT = 'Apricot'
+
+ +
+
+AQUA = 'Aqua'
+
+ +
+
+AQUAMARINE = 'Aquamarine'
+
+ +
+
+AZURE = 'Azure'
+
+ +
+
+BEIGE = 'Beige'
+
+ +
+
+BISQUE = 'Bisque'
+
+ +
+
+BITTERSWEET = 'Bittersweet'
+
+ +
+
+BLACK = 'black'
+
+ +
+
+BLANCHED_ALMOND = 'BlanchedAlmond'
+
+ +
+
+BLUE = 'blue'
+
+ +
+
+BLUE_GREEN = 'BlueGreen'
+
+ +
+
+BLUE_VIOLET = 'BlueViolet'
+
+ +
+
+BRICK_RED = 'BrickRed'
+
+ +
+
+BROWN = 'brown'
+
+ +
+
+BURLY_WOOD = 'BurlyWood'
+
+ +
+
+BURNT_ORANGE = 'BurntOrange'
+
+ +
+
+CADET_BLUE = 'CadetBlue'
+
+ +
+
+CARNATION_PINK = 'CarnationPink'
+
+ +
+
+CERULEAN = 'Cerulean'
+
+ +
+
+CHARTREUSE = 'Chartreuse'
+
+ +
+
+CHOCOLATE = 'Chocolate'
+
+ +
+
+CORAL = 'Coral'
+
+ +
+
+CORNFLOWER_BLUE = 'CornflowerBlue'
+
+ +
+
+CORNSILK = 'Cornsilk'
+
+ +
+
+CRIMSON = 'Crimson'
+
+ +
+
+CYAN = 'cyan'
+
+ +
+
+DANDELION = 'Dandelion'
+
+ +
+
+DARKGRAY = 'darkgray'
+
+ +
+
+DARK_BLUE = 'DarkBlue'
+
+ +
+
+DARK_CYAN = 'DarkCyan'
+
+ +
+
+DARK_GOLDENROD = 'DarkGoldenrod'
+
+ +
+
+DARK_GRAY = 'DarkGray'
+
+ +
+
+DARK_GREEN = 'DarkGreen'
+
+ +
+
+DARK_GREY = 'DarkGrey'
+
+ +
+
+DARK_KHAKI = 'DarkKhaki'
+
+ +
+
+DARK_MAGENTA = 'DarkMagenta'
+
+ +
+
+DARK_OLIVE_GREEN = 'DarkOliveGreen'
+
+ +
+
+DARK_ORANGE = 'DarkOrange'
+
+ +
+
+DARK_ORCHID = 'DarkOrchid'
+
+ +
+
+DARK_RED = 'DarkRed'
+
+ +
+
+DARK_SALMON = 'DarkSalmon'
+
+ +
+
+DARK_SEA_GREEN = 'DarkSeaGreen'
+
+ +
+
+DARK_SLATE_BLUE = 'DarkSlateBlue'
+
+ +
+
+DARK_SLATE_GRAY = 'DarkSlateGray'
+
+ +
+
+DARK_SLATE_GREY = 'DarkSlateGrey'
+
+ +
+
+DARK_TURQUOISE = 'DarkTurquoise'
+
+ +
+
+DARK_VIOLET = 'DarkViolet'
+
+ +
+
+DEEP_PINK = 'DeepPink'
+
+ +
+
+DEEP_SKY_BLUE = 'DeepSkyBlue'
+
+ +
+
+DIM_GRAY = 'DimGray'
+
+ +
+
+DIM_GREY = 'DimGrey'
+
+ +
+
+DODGER_BLUE = 'DodgerBlue'
+
+ +
+
+EMERALD = 'Emerald'
+
+ +
+
+FIRE_BRICK = 'FireBrick'
+
+ +
+
+FLORAL_WHITE = 'FloralWhite'
+
+ +
+
+FOREST_GREEN = 'ForestGreen'
+
+ +
+
+FUCHSIA = 'Fuchsia'
+
+ +
+
+GAINSBORO = 'Gainsboro'
+
+ +
+
+GHOST_WHITE = 'GhostWhite'
+
+ +
+
+GOLD = 'Gold'
+
+ +
+
+GOLDENROD = 'Goldenrod'
+
+ +
+
+GRAY = 'gray'
+
+ +
+
+GREEN = 'green'
+
+ +
+
+GREEN_YELLOW = 'GreenYellow'
+
+ +
+
+GREY = 'Grey'
+
+ +
+
+HONEYDEW = 'Honeydew'
+
+ +
+
+HOT_PINK = 'HotPink'
+
+ +
+
+INDIAN_RED = 'IndianRed'
+
+ +
+
+INDIGO = 'Indigo'
+
+ +
+
+IVORY = 'Ivory'
+
+ +
+
+JUNGLE_GREEN = 'JungleGreen'
+
+ +
+
+KHAKI = 'Khaki'
+
+ +
+
+LAVENDER = 'Lavender'
+
+ +
+
+LAVENDER_BLUSH = 'LavenderBlush'
+
+ +
+
+LAWN_GREEN = 'LawnGreen'
+
+ +
+
+LEMON_CHIFFON = 'LemonChiffon'
+
+ +
+
+LIGHTGRAY = 'lightgray'
+
+ +
+
+LIGHT_BLUE = 'LightBlue'
+
+ +
+
+LIGHT_CORAL = 'LightCoral'
+
+ +
+
+LIGHT_CYAN = 'LightCyan'
+
+ +
+
+LIGHT_GOLDENROD = 'LightGoldenrod'
+
+ +
+
+LIGHT_GOLDENROD_YELLOW = 'LightGoldenrodYellow'
+
+ +
+
+LIGHT_GRAY = 'LightGray'
+
+ +
+
+LIGHT_GREEN = 'LightGreen'
+
+ +
+
+LIGHT_GREY = 'LightGrey'
+
+ +
+
+LIGHT_PINK = 'LightPink'
+
+ +
+
+LIGHT_SALMON = 'LightSalmon'
+
+ +
+
+LIGHT_SEA_GREEN = 'LightSeaGreen'
+
+ +
+
+LIGHT_SKY_BLUE = 'LightSkyBlue'
+
+ +
+
+LIGHT_SLATE_BLUE = 'LightSlateBlue'
+
+ +
+
+LIGHT_SLATE_GRAY = 'LightSlateGray'
+
+ +
+
+LIGHT_SLATE_GREY = 'LightSlateGrey'
+
+ +
+
+LIGHT_STEEL_BLUE = 'LightSteelBlue'
+
+ +
+
+LIGHT_YELLOW = 'LightYellow'
+
+ +
+
+LIME = 'lime'
+
+ +
+
+LIME_GREEN = 'LimeGreen'
+
+ +
+
+LINEN = 'Linen'
+
+ +
+
+MAGENTA = 'magenta'
+
+ +
+
+MAHOGANY = 'Mahogany'
+
+ +
+
+MAROON = 'Maroon'
+
+ +
+
+MEDIUM_AQUAMARINE = 'MediumAquamarine'
+
+ +
+
+MEDIUM_BLUE = 'MediumBlue'
+
+ +
+
+MEDIUM_ORCHID = 'MediumOrchid'
+
+ +
+
+MEDIUM_PURPLE = 'MediumPurple'
+
+ +
+
+MEDIUM_SEA_GREEN = 'MediumSeaGreen'
+
+ +
+
+MEDIUM_SLATE_BLUE = 'MediumSlateBlue'
+
+ +
+
+MEDIUM_SPRING_GREEN = 'MediumSpringGreen'
+
+ +
+
+MEDIUM_TURQUOISE = 'MediumTurquoise'
+
+ +
+
+MEDIUM_VIOLET_RED = 'MediumVioletRed'
+
+ +
+
+MELON = 'Melon'
+
+ +
+
+MIDNIGHT_BLUE = 'MidnightBlue'
+
+ +
+
+MINT_CREAM = 'MintCream'
+
+ +
+
+MISTY_ROSE = 'MistyRose'
+
+ +
+
+MOCCASIN = 'Moccasin'
+
+ +
+
+MULBERRY = 'Mulberry'
+
+ +
+
+NAVAJO_WHITE = 'NavajoWhite'
+
+ +
+
+NAVY = 'Navy'
+
+ +
+
+NAVY_BLUE = 'NavyBlue'
+
+ +
+
+OLD_LACE = 'OldLace'
+
+ +
+
+OLIVE = 'olive'
+
+ +
+
+OLIVE_DRAB = 'OliveDrab'
+
+ +
+
+OLIVE_GREEN = 'OliveGreen'
+
+ +
+
+ORANGE = 'orange'
+
+ +
+
+ORANGE_RED = 'OrangeRed'
+
+ +
+
+ORCHID = 'Orchid'
+
+ +
+
+PALE_GOLDENROD = 'PaleGoldenrod'
+
+ +
+
+PALE_GREEN = 'PaleGreen'
+
+ +
+
+PALE_TURQUOISE = 'PaleTurquoise'
+
+ +
+
+PALE_VIOLET_RED = 'PaleVioletRed'
+
+ +
+
+PAPAYA_WHIP = 'PapayaWhip'
+
+ +
+
+PEACH = 'Peach'
+
+ +
+
+PEACH_PUFF = 'PeachPuff'
+
+ +
+
+PERIWINKLE = 'Periwinkle'
+
+ +
+
+PERU = 'Peru'
+
+ +
+
+PINE_GREEN = 'PineGreen'
+
+ +
+
+PINK = 'pink'
+
+ +
+
+PLUM = 'Plum'
+
+ +
+
+POWDER_BLUE = 'PowderBlue'
+
+ +
+
+PROCESS_BLUE = 'ProcessBlue'
+
+ +
+
+PURPLE = 'purple'
+
+ +
+
+RAW_SIENNA = 'RawSienna'
+
+ +
+
+RED = 'red'
+
+ +
+
+RED_ORANGE = 'RedOrange'
+
+ +
+
+RED_VIOLET = 'RedViolet'
+
+ +
+
+RHODAMINE = 'Rhodamine'
+
+ +
+
+ROSY_BROWN = 'RosyBrown'
+
+ +
+
+ROYAL_BLUE = 'RoyalBlue'
+
+ +
+
+ROYAL_PURPLE = 'RoyalPurple'
+
+ +
+
+RUBINE_RED = 'RubineRed'
+
+ +
+
+SADDLE_BROWN = 'SaddleBrown'
+
+ +
+
+SALMON = 'Salmon'
+
+ +
+
+SANDY_BROWN = 'SandyBrown'
+
+ +
+
+SEASHELL = 'Seashell'
+
+ +
+
+SEA_GREEN = 'SeaGreen'
+
+ +
+
+SEPIA = 'Sepia'
+
+ +
+
+SIENNA = 'Sienna'
+
+ +
+
+SILVER = 'Silver'
+
+ +
+
+SKY_BLUE = 'SkyBlue'
+
+ +
+
+SLATE_BLUE = 'SlateBlue'
+
+ +
+
+SLATE_GRAY = 'SlateGray'
+
+ +
+
+SLATE_GREY = 'SlateGrey'
+
+ +
+
+SNOW = 'Snow'
+
+ +
+
+SPRING_GREEN = 'SpringGreen'
+
+ +
+
+STEEL_BLUE = 'SteelBlue'
+
+ +
+
+TAN = 'Tan'
+
+ +
+
+TEAL = 'teal'
+
+ +
+
+TEAL_BLUE = 'TealBlue'
+
+ +
+
+THISTLE = 'Thistle'
+
+ +
+
+TOMATO = 'Tomato'
+
+ +
+
+TURQUOISE = 'Turquoise'
+
+ +
+
+VIOLET = 'violet'
+
+ +
+
+VIOLET_RED = 'VioletRed'
+
+ +
+
+WHEAT = 'Wheat'
+
+ +
+
+WHITE = 'white'
+
+ +
+
+WHITE_SMOKE = 'WhiteSmoke'
+
+ +
+
+WILD_STRAWBERRY = 'WildStrawberry'
+
+ +
+
+YELLOW = 'yellow'
+
+ +
+
+YELLOW_GREEN = 'YellowGreen'
+
+ +
+
+YELLOW_ORANGE = 'YellowOrange'
+
+ +
+
+property ansi_code: str
+
+ +
+
+static reset(*, file: IO[str] = <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>) None[source]
+
+ +
+
+static reset_code() str[source]
+
+ +
+
+set(*, file: IO[str] = <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>) None[source]
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kast.formatter.html b/pyk/api/pyk.kast.formatter.html new file mode 100644 index 00000000000..bbb2651f617 --- /dev/null +++ b/pyk/api/pyk.kast.formatter.html @@ -0,0 +1,156 @@ + + + + + + + + + pyk.kast.formatter module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kast.formatter module

+
+
+class Formatter(definition: KDefinition, *, indent: int = 0, brackets: bool = True)[source]
+

Bases: object

+
+
+definition: KDefinition
+
+ +
+
+format(term: KInner) str[source]
+
+ +
+ +
+
+add_brackets(definition: KDefinition, term: KInner) KInner[source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kast.html b/pyk/api/pyk.kast.html new file mode 100644 index 00000000000..5539e9125c4 --- /dev/null +++ b/pyk/api/pyk.kast.html @@ -0,0 +1,1317 @@ + + + + + + + + + pyk.kast package — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kast package

+
+

Submodules

+
+ +
+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kast.inner.html b/pyk/api/pyk.kast.inner.html new file mode 100644 index 00000000000..45bf4d52a49 --- /dev/null +++ b/pyk/api/pyk.kast.inner.html @@ -0,0 +1,860 @@ + + + + + + + + + pyk.kast.inner module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kast.inner module

+
+
+final class KApply(label: str | KLabel, args: Iterable[KInner])[source]
+
+final class KApply(label: str | KLabel, *args: KInner)
+

Bases: KInner

+

Represent the application of a KLabel in a K AST to arguments.

+
+
+__init__(label: str | KLabel, args: Iterable[KInner])[source]
+
+__init__(label: str | KLabel, *args: KInner)
+

Construct a new KApply given the input KLabel or str, applied to arguments.

+
+ +
+
+args: tuple[KInner, ...]
+
+ +
+
+property arity: int
+

Return the count of the arguments.

+
+ +
+
+property is_cell: bool
+

Return whether this is a cell-label application (based on heuristic about label names).

+
+ +
+
+label: KLabel
+
+ +
+
+let(*, label: str | KLabel | None = None, args: Iterable[KInner] | None = None) KApply[source]
+

Return a copy of this KApply with either the label or the arguments updated.

+
+ +
+
+let_terms(terms: Iterable[KInner]) KApply[source]
+
+ +
+
+match(term: KInner) Subst | None[source]
+
+ +
+
+property terms: tuple[KInner, ...]
+
+ +
+ +
+
+final class KAs(pattern: KInner, alias: KInner)[source]
+

Bases: KInner

+

Represent a K #as pattern in the K AST format, with the original pattern and the variabl alias.

+
+
+__init__(pattern: KInner, alias: KInner)[source]
+

Construct a new KAs given the original pattern and the alias.

+
+ +
+
+alias: KInner
+
+ +
+
+let(*, pattern: KInner | None = None, alias: KInner | None = None) KAs[source]
+

Return a copy of this KAs with potentially the pattern or alias updated.

+
+ +
+
+let_terms(terms: Iterable[KInner]) KAs[source]
+
+ +
+
+match(term: KInner) Subst | None[source]
+
+ +
+
+pattern: KInner
+
+ +
+
+property terms: tuple[KInner, KInner]
+
+ +
+ +
+
+class KInner[source]
+

Bases: KAst

+

Represent the AST of a given K inner term.

+

This class represents the AST of a given term. +The nodes in the AST should be coming from a given KDefinition, so that they can be checked for well-typedness.

+
+
+static from_dict(dct: Mapping[str, Any]) KInner[source]
+

Deserialize a given KInner into a more specific type from a dictionary.

+
+ +
+
+static from_json(s: str) KInner[source]
+
+ +
+
+abstract let_terms(terms: Iterable[KInner]) KI[source]
+

Set children of this given KInner.

+
+ +
+
+final map_inner(f: Callable[[KInner], KInner]) KI[source]
+

Apply a transformation to all children of this given KInner.

+
+ +
+
+abstract match(term: KInner) Subst | None[source]
+

Perform syntactic pattern matching and return the substitution.

+
+
Parameters:
+

term – Term to match.

+
+
Returns:
+

A substitution instantiating self to term if one exists, None otherwise.

+
+
+
+ +
+
+abstract property terms: tuple[KInner, ...]
+

Returns the children of this given KInner.

+
+ +
+
+final to_dict() dict[str, Any][source]
+
+ +
+ +
+
+final class KLabel(name: str, params: Iterable[str | KSort])[source]
+
+final class KLabel(name: str, *params: str | KSort)
+

Bases: KAst

+

Represents a symbol that can be applied in a K AST, potentially with sort parameters.

+
+
+__init__(name: str, params: Iterable[str | KSort])[source]
+
+__init__(name: str, *params: str | KSort)
+

Construct a new KLabel, with optional sort parameters.

+
+ +
+
+__iter__() Iterator[str | KSort][source]
+

Return this symbol as iterator with the name as the head and the parameters as the tail.

+
+ +
+
+apply(args: Iterable[KInner]) KApply[source]
+
+apply(*args: KInner) KApply
+

Construct a KApply with this KLabel as the AST head and the supplied parameters as the arguments.

+
+ +
+
+static from_dict(d: Mapping[str, Any]) KLabel[source]
+
+ +
+
+let(*, name: str | None = None, params: Iterable[str | KSort] | None = None) KLabel[source]
+

Return a copy of this KLabel with potentially the name or sort parameters updated.

+
+ +
+
+name: str
+
+ +
+
+params: tuple[KSort, ...]
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+final class KRewrite(lhs: KInner, rhs: KInner)[source]
+

Bases: KInner

+

Represent a K rewrite in the K AST.

+
+
+__init__(lhs: KInner, rhs: KInner)[source]
+

Construct a KRewrite given the LHS (left-hand-side) and RHS (right-hand-side) to use.

+
+ +
+
+__iter__() Iterator[KInner][source]
+

Return a two-element iterator with the LHS first and RHS second.

+
+ +
+
+apply(term: KInner) KInner[source]
+

Attempt rewriting once at every position in a term bottom-up.

+
+
Parameters:
+

term – Term to rewrite.

+
+
Returns:
+

The term with rewrites applied at every node once starting from the bottom.

+
+
+
+ +
+
+apply_top(term: KInner) KInner[source]
+

Rewrite a given term at the top.

+
+
Parameters:
+

term – Term to rewrite.

+
+
Returns:
+

The term with the rewrite applied once at the top.

+
+
+
+ +
+
+let(*, lhs: KInner | None = None, rhs: KInner | None = None) KRewrite[source]
+

Return a copy of this KRewrite with potentially the LHS or RHS updated.

+
+ +
+
+let_terms(terms: Iterable[KInner]) KRewrite[source]
+
+ +
+
+lhs: KInner
+
+ +
+
+match(term: KInner) Subst | None[source]
+
+ +
+
+replace(term: KInner) KInner[source]
+

Similar to apply but using exact syntactic matching instead of pattern matching.

+
+ +
+
+replace_top(term: KInner) KInner[source]
+

Similar to apply_top but using exact syntactic matching instead of pattern matching.

+
+ +
+
+rhs: KInner
+
+ +
+
+property terms: tuple[KInner, KInner]
+
+ +
+ +
+
+final class KSequence(items: Iterable[KInner])[source]
+
+final class KSequence(*items: KInner)
+

Bases: KInner, Sequence[KInner]

+

Represent a associative list of K as a cons-list of KItem for sequencing computation in K AST format.

+
+
+__init__(items: Iterable[KInner])[source]
+
+__init__(*items: KInner)
+

Construct a new KSequence given the arguments.

+
+ +
+
+property arity: int
+

Return the count of KSequence items.

+
+ +
+
+items: tuple[KInner, ...]
+
+ +
+
+let(*, items: Iterable[KInner] | None = None) KSequence[source]
+

Return a copy of this KSequence with the items potentially updated.

+
+ +
+
+let_terms(terms: Iterable[KInner]) KSequence[source]
+
+ +
+
+match(term: KInner) Subst | None[source]
+
+ +
+
+property terms: tuple[KInner, ...]
+
+ +
+ +
+
+final class KSort(name: str)[source]
+

Bases: KAst

+

Store a simple sort name.

+
+
+__init__(name: str)[source]
+

Construct a new sort given the name.

+
+ +
+
+static from_dict(d: Mapping[str, Any]) KSort[source]
+
+ +
+
+let(*, name: str | None = None) KSort[source]
+

Return a new KSort with the name potentially updated.

+
+ +
+
+name: str
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+final class KToken(token: str, sort: str | KSort)[source]
+

Bases: KInner

+

Represent a domain-value in K AST.

+
+
+__init__(token: str, sort: str | KSort)[source]
+

Construct a new KToken with a given string representation in the supplied sort.

+
+ +
+
+let(*, token: str | None = None, sort: str | KSort | None = None) KToken[source]
+

Return a copy of the KToken with the token or sort potentially updated.

+
+ +
+
+let_terms(terms: Iterable[KInner]) KToken[source]
+
+ +
+
+match(term: KInner) Subst | None[source]
+
+ +
+
+sort: KSort
+
+ +
+
+property terms: tuple[()]
+
+ +
+
+token: str
+
+ +
+ +
+
+final class KVariable(name: str, sort: str | KSort | None = None)[source]
+

Bases: KInner

+

Represent a logical variable in a K AST, with a name and optionally a sort.

+
+
+__init__(name: str, sort: str | KSort | None = None)[source]
+

Construct a new KVariable with a given name and optional sort.

+
+ +
+
+__lt__(other: Any) bool[source]
+

Lexicographic comparison of KVariable based on name for sorting.

+
+ +
+
+let(*, name: str | None = None, sort: str | KSort | None = None) KVariable[source]
+

Return a copy of this KVariable with potentially the name or sort updated.

+
+ +
+
+let_sort(sort: KSort | None) KVariable[source]
+

Return a copy of this KVariable with just the sort updated.

+
+ +
+
+let_terms(terms: Iterable[KInner]) KVariable[source]
+
+ +
+
+match(term: KInner) Subst[source]
+
+ +
+
+name: str
+
+ +
+
+sort: KSort | None
+
+ +
+
+property terms: tuple[()]
+
+ +
+ +
+
+class Subst(subst: Mapping[str, KInner] = FrozenDict({}))[source]
+

Bases: Mapping[str, KInner]

+

Represents a substitution, which is a binding of variables to values of KInner.

+
+
+__call__(term: KInner) KInner[source]
+

Overload for Subst.apply.

+
+ +
+
+__getitem__(key: str) KInner[source]
+

Get the KInner associated with the given variable name from the underlying Subst mapping.

+
+ +
+
+__init__(subst: Mapping[str, KInner] = FrozenDict({}))[source]
+

Construct a new Subst given a mapping fo variable names to KInner.

+
+ +
+
+__iter__() Iterator[str][source]
+

Return the underlying Subst mapping as an iterator.

+
+ +
+
+__len__() int[source]
+

Return the length of the underlying Subst mapping.

+
+ +
+
+__mul__(other: Subst) Subst[source]
+

Overload for Subst.compose.

+
+ +
+
+apply(term: KInner) KInner[source]
+

Apply the given substitution to KInner, replacing free variable occurances with their valuations defined in this Subst.

+
+ +
+
+compose(other: Subst) Subst[source]
+

Union two substitutions together, preferring the assignments in self if present in both.

+
+ +
+
+static from_dict(d: Mapping[str, Any]) Subst[source]
+

Deserialize a Subst from a given dictionary representing it.

+
+ +
+
+static from_pred(pred: KInner) Subst[source]
+

Given a generic matching logic predicate, attempt to extract a Subst from it.

+
+ +
+
+property is_identity: bool
+
+ +
+
+minimize() Subst[source]
+

Return a new substitution with any identity items removed.

+
+ +
+
+property pred: KInner
+

Turn this Subst into a boolean predicate using _==K_ operator.

+
+ +
+
+to_dict() dict[str, Any][source]
+

Serialize a Subst to a dictionary representation.

+
+ +
+
+unapply(term: KInner) KInner[source]
+

Replace occurances of valuations from this Subst with the variables that they are assigned to.

+
+ +
+
+union(other: Subst) Subst | None[source]
+

Union two substitutions together, failing with None if there are conflicting assignments.

+
+ +
+ +
+
+bottom_up(f: Callable[[KInner], KInner], kinner: KInner) KInner[source]
+

Transform a term from the bottom moving upward.

+
+
Parameters:
+
    +
  • f – Function to apply to each node in the term.

  • +
  • kinner – Original term to transform.

  • +
+
+
Returns:
+

The transformed term.

+
+
+
+ +
+
+bottom_up_with_summary(f: Callable[[KInner, list[A]], tuple[KInner, A]], kinner: KInner) tuple[KInner, A][source]
+

Traverse a term from the bottom moving upward, collecting information about it.

+
+
Parameters:
+
    +
  • f – Function to apply at each AST node to transform it and collect summary.

  • +
  • kinner – Term to apply this transformation to.

  • +
+
+
Returns:
+

A tuple of the transformed term and the summarized results.

+
+
+
+ +
+
+build_assoc(unit: KInner, label: str | KLabel, terms: Iterable[KInner]) KInner[source]
+

Build an associative list.

+
+
Parameters:
+
    +
  • unit – The empty variant of the given list type.

  • +
  • label – The associative list join operator.

  • +
  • terms – List (potentially empty) of terms to join in an associative list.

  • +
+
+
Returns:
+

The list of terms joined using the supplied label, or the unit element in the case of no terms.

+
+
+
+ +
+
+build_cons(unit: KInner, label: str | KLabel, terms: Iterable[KInner]) KInner[source]
+

Build a cons list.

+
+
Parameters:
+
    +
  • unit – The empty variant of the given list type.

  • +
  • label – The associative list join operator.

  • +
  • terms – List (potentially empty) of terms to join in an associative list.

  • +
+
+
Returns:
+

The list of terms joined using the supplied label, terminated with the unit element.

+
+
+
+ +
+
+collect(callback: Callable[[KInner], None], kinner: KInner) None[source]
+

Collect information about a given term traversing it top-down using a function with side effects.

+
+
Parameters:
+
    +
  • callback – Function with the side effect of collecting desired information at each AST node.

  • +
  • kinner – The term to traverse.

  • +
+
+
+
+ +
+
+flatten_label(label: str, kast: KInner) list[KInner][source]
+

Given a cons list, return a flat Python list of the elements.

+
+
Parameters:
+
    +
  • label – The cons operator.

  • +
  • kast – The cons list to flatten.

  • +
+
+
Returns:
+

Items of cons list.

+
+
+
+ +
+
+top_down(f: Callable[[KInner], KInner], kinner: KInner) KInner[source]
+

Transform a term from the top moving downward.

+
+
Parameters:
+
    +
  • f – Function to apply to each node in the term.

  • +
  • kinner – Original term to transform.

  • +
+
+
Returns:
+

The transformed term.

+
+
+
+ +
+
+var_occurrences(term: KInner) dict[str, list[KVariable]][source]
+

Collect the list of occurrences of each variable in a given term.

+
+
Parameters:
+

term – Term to collect variables from.

+
+
Returns:
+

A dictionary with variable names as keys and the list of all occurrences of the variable as values.

+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kast.kast.html b/pyk/api/pyk.kast.kast.html new file mode 100644 index 00000000000..b75d2c463cc --- /dev/null +++ b/pyk/api/pyk.kast.kast.html @@ -0,0 +1,166 @@ + + + + + + + + + pyk.kast.kast module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kast.kast module

+
+
+class KAst[source]
+

Bases: ABC

+
+
+property hash: str
+
+ +
+
+abstract to_dict() dict[str, Any][source]
+
+ +
+
+final to_json() str[source]
+
+ +
+
+static version() int[source]
+
+ +
+ +
+
+kast_term(dct: Mapping[str, Any]) Mapping[str, Any][source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kast.lexer.html b/pyk/api/pyk.kast.lexer.html new file mode 100644 index 00000000000..be76289d346 --- /dev/null +++ b/pyk/api/pyk.kast.lexer.html @@ -0,0 +1,252 @@ + + + + + + + + + pyk.kast.lexer module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kast.lexer module

+
+
+class State(value)[source]
+

Bases: Enum

+

An enumeration.

+
+
+DEFAULT = 1
+
+ +
+
+SORT = 2
+
+ +
+ +
+
+class Token(text, type)[source]
+

Bases: NamedTuple

+
+
+text: str
+

Alias for field number 0

+
+ +
+
+type: TokenType
+

Alias for field number 1

+
+ +
+ +
+
+class TokenType(value)[source]
+

Bases: Enum

+

An enumeration.

+
+
+COLON = 5
+
+ +
+
+COMMA = 4
+
+ +
+
+DOTK = 7
+
+ +
+
+DOTKLIST = 8
+
+ +
+
+EOF = 1
+
+ +
+
+ID = 10
+
+ +
+
+KLABEL = 13
+
+ +
+
+KSEQ = 6
+
+ +
+
+LPAREN = 2
+
+ +
+
+RPAREN = 3
+
+ +
+
+SORT = 12
+
+ +
+
+STRING = 14
+
+ +
+
+TOKEN = 9
+
+ +
+
+VARIABLE = 11
+
+ +
+ +
+
+lexer(text: Iterable[str]) Iterator[Token][source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kast.manip.html b/pyk/api/pyk.kast.manip.html new file mode 100644 index 00000000000..1f340219f26 --- /dev/null +++ b/pyk/api/pyk.kast.manip.html @@ -0,0 +1,539 @@ + + + + + + + + + pyk.kast.manip module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kast.manip module

+
+
+abstract_term_safely(kast: KInner, base_name: str = 'V', sort: KSort | None = None, existing_var_names: set[str] | None = None) KVariable[source]
+
+ +
+
+apply_existential_substitutions(state: KInner, constraints: Iterable[KInner]) tuple[KInner, Iterable[KInner]][source]
+
+ +
+
+bool_to_ml_pred(kast: KInner, sort: str | KSort = KSort(name='GeneratedTopCell')) KInner[source]
+
+ +
+
+build_claim(claim_id: str, init_config: KInner, final_config: KInner, init_constraints: Iterable[KInner] = (), final_constraints: Iterable[KInner] = (), keep_vars: Iterable[str] = ()) tuple[KClaim, Subst][source]
+

Return a KClaim between the supplied initial and final states.

+
+
Parameters:
+
    +
  • claim_id – Label to give the claim.

  • +
  • init_config – State to put on LHS of the rule.

  • +
  • final_config – State to put on RHS of the rule.

  • +
  • init_constraints – Constraints to use as requires clause.

  • +
  • final_constraints – Constraints to use as ensures clause.

  • +
  • keep_vars – Variables to leave in the side-conditions even if not bound in the configuration.

  • +
+
+
Returns:
+

A tuple (claim, var_map) where

+
    +
  • claim: A KClaim with variable naming conventions applied +so that it should be parseable by the K Frontend.

  • +
  • var_map: The variable renamings applied to make the claim parseable by the K Frontend +(which can be undone to recover the original variables).

  • +
+

+
+
+
+ +
+
+build_rule(rule_id: str, init_config: KInner, final_config: KInner, init_constraints: Iterable[KInner] = (), final_constraints: Iterable[KInner] = (), priority: int | None = None, keep_vars: Iterable[str] = (), defunc_with: KDefinition | None = None) tuple[KRule, Subst][source]
+

Return a KRule between the supplied initial and final states.

+
+
Parameters:
+
    +
  • rule_id – Label to give the rule.

  • +
  • init_config – State to put on LHS of the rule.

  • +
  • final_config – State to put on RHS of the rule.

  • +
  • init_constraints – Constraints to use as requires clause.

  • +
  • final_constraints – Constraints to use as ensures clause.

  • +
  • priority – Priority index to assign to generated rules.

  • +
  • keep_vars – Variables to leave in the side-conditions even if not bound in the configuration.

  • +
  • defunc_with – KDefinition for filtering out function symbols on LHS of rules.

  • +
+
+
Returns:
+

A tuple (rule, var_map) where

+
    +
  • rule: A KRule with variable naming conventions applied +so that it should be parseable by the K Frontend.

  • +
  • var_map: The variable renamings applied to make the rule parseable by the K Frontend +(which can be undone to recover the original variables).

  • +
+

+
+
+
+ +
+
+cell_label_to_var_name(label: str) str[source]
+

Return a variable name based on a cell label.

+
+ +
+
+collapse_dots(kast: KInner) KInner[source]
+

Given a configuration with structural frames , minimize the structural frames needed.

+
+
Parameters:
+

kast – A configuration, potentially with structural frames.

+
+
Returns:
+

The same configuration, with the amount of structural framing minimized.

+
+
+
+ +
+
+count_vars(term: KInner) Counter[str][source]
+
+ +
+
+defunctionalize(defn: KDefinition, kinner: KInner) tuple[KInner, list[KInner]][source]
+

Turn non-constructor arguments into side-conditions so that a term is only constructor-like.

+
+
Parameters:
+
    +
  • defn – The definition to pull function label information from.

  • +
  • kinner – The term to defunctionalize.

  • +
+
+
Returns:
+

A tuple of the defunctionalized term and the list of constraints generated.

+
+
+
+ +
+
+extract_cells(kast: KInner, keep_cells: Collection[str]) KInner[source]
+
+ +
+
+extract_lhs(term: KInner) KInner[source]
+
+ +
+
+extract_rhs(term: KInner) KInner[source]
+
+ +
+
+extract_subst(term: KInner) tuple[Subst, KInner][source]
+
+ +
+
+free_vars(kast: KInner) frozenset[str][source]
+
+ +
+
+if_ktype(ktype: type[KI], then: Callable[[KI], KInner]) Callable[[KInner], KInner][source]
+
+ +
+
+inline_cell_maps(kast: KInner) KInner[source]
+

Ensure that cell map collections are printed nicely, not as Maps.

+
+
Parameters:
+

kast – A KAST term.

+
+
Returns:
+

The KAST term with cell maps inlined.

+
+
+
+ +
+
+is_anon_var(kast: KInner) bool[source]
+
+ +
+
+is_spurious_constraint(term: KInner) bool[source]
+
+ +
+
+is_term_like(kast: KInner) bool[source]
+
+ +
+
+labels_to_dots(kast: KInner, labels: Collection[str]) KInner[source]
+

Abstract specific labels for printing.

+
+
Parameters:
+
    +
  • kast – A term.

  • +
  • labels – List of labels to abstract.

  • +
+
+
+
+
Returns

The term with labels abstracted.

+
+
+
+ +
+
+minimize_rule_like(rule: RL, keep_vars: Iterable[str] = ()) RL[source]
+

Minimize a K rule or claim for pretty-printing.

+
    +
  • Variables only used once will be removed.

  • +
  • Unused cells will be abstracted.

  • +
  • Useless side-conditions will be attempted to be removed.

  • +
+
+
Parameters:
+

rule – A K rule or claim.

+
+
Returns:
+

The rule or claim, minimized.

+
+
+
+ +
+
+minimize_term(term: KInner, keep_vars: Iterable[str] = (), abstract_labels: Collection[str] = (), keep_cells: Collection[str] = ()) KInner[source]
+

Minimize a K term for pretty-printing.

+
    +
  • Variables only used once will be removed.

  • +
  • Unused cells will be abstracted.

  • +
  • Useless conditions will be attempted to be removed.

  • +
+
+
Parameters:
+

kast – A term.

+
+
Returns:
+

The term, minimized.

+
+
+
+ +
+
+ml_pred_to_bool(kast: KInner, unsafe: bool = False) KInner[source]
+
+ +
+
+no_cell_rewrite_to_dots(term: KInner) KInner[source]
+

Transform a given term by replacing the contents of each cell with dots if the LHS and RHS are the same.

+

This function recursively traverses the cells in a term. +When it finds a cell whose left-hand side (LHS) is identical to its right-hand side (RHS), +it replaces the cell’s contents with a predefined DOTS.

+
+
Parameters:
+

term – The term to be transformed.

+
+
Returns:
+

The transformed term, where specific cell contents have been replaced with dots.

+
+
+
+ +
+
+normalize_constraints(constraints: Iterable[KInner]) tuple[KInner, ...][source]
+
+ +
+
+normalize_ml_pred(pred: KInner) KInner[source]
+
+ +
+
+on_attributes(kast: W, f: Callable[[KAtt], KAtt]) W[source]
+
+ +
+
+propagate_up_constraints(k: KInner) KInner[source]
+
+ +
+
+push_down_rewrites(kast: KInner) KInner[source]
+
+ +
+
+remove_attrs(term: KInner) KInner[source]
+
+ +
+
+remove_generated_cells(term: KInner) KInner[source]
+

Remove <generatedTop> and <generatedCounter> from a configuration.

+
+
Parameters:
+

term – A term.

+
+
Returns:
+

The term with those cells removed.

+
+
+
+ +
+
+remove_semantic_casts(kast: KInner) KInner[source]
+

Remove injected #SemanticCast* nodes in AST.

+
+
Parameters:
+

kast – A term (possibly) containing automatically injected #SemanticCast* KApply nodes.

+
+
Returns:
+

The term without the #SemanticCast* nodes.

+
+
+
+ +
+
+remove_source_map(definition: KDefinition) KDefinition[source]
+
+ +
+
+remove_useless_constraints(constraints: Iterable[KInner], initial_vars: Iterable[str]) list[KInner][source]
+

Remove constraints that do not depend on a given iterable of variables (directly or indirectly).

+
+
Parameters:
+
    +
  • constraints – Iterable of constraints to filter.

  • +
  • initial_vars – Initial iterable of variables to keep constraints for.

  • +
+
+
Returns:
+

A list of constraints with only those constraints that contain the initial variables, +or variables that depend on those through other constraints in the list.

+
+
+
+ +
+
+rename_generated_vars(term: KInner) KInner[source]
+
+ +
+
+replace_rewrites_with_implies(kast: KInner) KInner[source]
+
+ +
+
+set_cell(constrained_term: KInner, cell_variable: str, cell_value: KInner) KInner[source]
+
+ +
+
+simplify_bool(k: KInner) KInner[source]
+
+ +
+
+sort_ac_collections(kast: KInner) KInner[source]
+
+ +
+
+sort_assoc_label(label: str, kast: KInner) KInner[source]
+
+ +
+
+split_config_and_constraints(kast: KInner) tuple[KInner, KInner][source]
+
+ +
+
+split_config_from(configuration: KInner) tuple[KInner, dict[str, KInner]][source]
+

Split the substitution from a given configuration.

+

Given an input configuration config, will return a tuple (symbolic_config, subst), where:

+
+
    +
  1. config == substitute(symbolic_config, subst)

  2. +
  3. symbolic_config is the same configuration structure, but where the contents of leaf cells is replaced with a fresh KVariable.

  4. +
  5. subst is the substitution for the generated KVariables back to the original configuration contents.

  6. +
+
+
+ +
+
+undo_aliases(definition: KDefinition, kast: KInner) KInner[source]
+
+ +
+
+useless_vars_to_dots(kast: KInner, keep_vars: Iterable[str] = ()) KInner[source]
+

Structurally abstract away useless variables.

+
+
Parameters:
+
    +
  • kast – A term.

  • +
  • keep_vars – Iterable of variables to keep.

  • +
+
+
Returns:
+

The term with the useless varables structurally abstracted.

+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kast.markdown.html b/pyk/api/pyk.kast.markdown.html new file mode 100644 index 00000000000..93bb45f897b --- /dev/null +++ b/pyk/api/pyk.kast.markdown.html @@ -0,0 +1,259 @@ + + + + + + + + + pyk.kast.markdown module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kast.markdown module

+
+
+final class And(ops: 'tuple[Selector, ...]')[source]
+

Bases: Selector

+
+
+eval(atoms: Container[str]) bool[source]
+
+ +
+
+ops: tuple[Selector, ...]
+
+ +
+ +
+
+final class Atom(name: 'str')[source]
+

Bases: Selector

+
+
+eval(atoms: Container[str]) bool[source]
+
+ +
+
+name: str
+
+ +
+ +
+
+class CodeBlock(info, code)[source]
+

Bases: NamedTuple

+
+
+code: str
+

Alias for field number 1

+
+ +
+
+info: str
+

Alias for field number 0

+
+ +
+ +
+
+final class Not(op: 'Selector')[source]
+

Bases: Selector

+
+
+eval(atoms: Container[str]) bool[source]
+
+ +
+
+op: Selector
+
+ +
+ +
+
+final class Or(ops: 'tuple[Selector, ...]')[source]
+

Bases: Selector

+
+
+eval(atoms: Container[str]) bool[source]
+
+ +
+
+ops: tuple[Selector, ...]
+
+ +
+ +
+
+class Selector[source]
+

Bases: ABC

+
+
+abstract eval(atoms: Container[str]) bool[source]
+
+ +
+ +
+
+class SelectorParser(selector: str)[source]
+

Bases: object

+
+
+parse() Selector[source]
+
+ +
+ +
+
+code_blocks(text: str) Iterator[CodeBlock][source]
+
+ +
+
+parse_tags(text: str) set[str][source]
+
+ +
+
+select_code_blocks(text: str, selector: str | None = None) str[source]
+
+ +
+
+selector_lexer(it: Iterable[str]) Iterator[str][source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kast.outer.html b/pyk/api/pyk.kast.outer.html new file mode 100644 index 00000000000..de69f8fc66f --- /dev/null +++ b/pyk/api/pyk.kast.outer.html @@ -0,0 +1,1241 @@ + + + + + + + + + pyk.kast.outer module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kast.outer module

+
+
+class KAssoc(value)[source]
+

Bases: Enum

+

An enumeration.

+
+
+LEFT = 'Left'
+
+ +
+
+NON_ASSOC = 'NonAssoc'
+
+ +
+
+RIGHT = 'Right'
+
+ +
+ +
+
+final class KBubble(sentence_type: str, contents: str, att: KAtt = KAtt(atts=FrozenDict({})))[source]
+

Bases: KSentence

+

Represents an unparsed chunk of AST in user-defined syntax.

+
+
+att: KAtt
+
+ +
+
+contents: str
+
+ +
+
+let(*, sentence_type: str | None = None, contents: str | None = None, att: KAtt | None = None) KBubble[source]
+
+ +
+
+let_att(att: KAtt) KBubble[source]
+
+ +
+
+sentence_type: str
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+final class KClaim(body: KInner, requires: KInner = KToken(token='true', sort=KSort(name='Bool')), ensures: KInner = KToken(token='true', sort=KSort(name='Bool')), att: KAtt = KAtt(atts=FrozenDict({})))[source]
+

Bases: KRuleLike

+

Represents a K claim, typically a transition with pre/post-conditions.

+
+
+att: KAtt
+
+ +
+
+body: KInner
+
+ +
+
+property dependencies: list[str]
+

Return the dependencies of this claim (list of other claims needed to help prove this one or speed up this ones proof).

+
+ +
+
+ensures: KInner
+
+ +
+
+property is_circularity: bool
+

Return whether this claim is a circularity (must be used coinductively to prove itself).

+
+ +
+
+property is_trusted: bool
+

Return whether this claim is trusted (does not need to be proven to be considered true).

+
+ +
+
+let(*, body: KInner | None = None, requires: KInner | None = None, ensures: KInner | None = None, att: KAtt | None = None) KClaim[source]
+
+ +
+
+let_att(att: KAtt) KClaim[source]
+
+ +
+
+requires: KInner
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+final class KContext(body: KInner, requires: KInner = KToken(token='true', sort=KSort(name='Bool')), att: KAtt = KAtt(atts=FrozenDict({})))[source]
+

Bases: KSentence

+

Represents a K evaluation context, used for isolating chunks of computation and focusing on them.

+
+
+att: KAtt
+
+ +
+
+body: KInner
+
+ +
+
+let(*, body: KInner | None = None, requires: KInner | None = None, att: KAtt | None = None) KContext[source]
+
+ +
+
+let_att(att: KAtt) KContext[source]
+
+ +
+
+requires: KInner
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+final class KDefinition(main_module_name: str, all_modules: Iterable[KFlatModule], requires: Iterable[KRequire] = (), att: KAtt = KAtt(atts=FrozenDict({})))[source]
+

Bases: KOuter, WithKAtt, Iterable[KFlatModule]

+

Represents an entire K definition, with file imports and modules in place, and a given module called out as main module.

+
+
+add_cell_map_items(kast: KInner) KInner[source]
+

Wrap cell-map items in the syntactical wrapper that the frontend generates for them (see KDefinition.remove_cell_map_items).

+
+ +
+
+add_ksequence_under_k_productions(kast: KInner) KInner[source]
+

Inject a KSequence under the given term if it’s a subsort of K and is being used somewhere that sort K is expected (determined by inspecting the definition).

+
+ +
+
+add_sort_params(kast: KInner) KInner[source]
+

Return a given term with the sort parameters on the KLabel filled in (which may be missing because of how the frontend works), best effort.

+
+ +
+
+property alias_rules: tuple[KRule, ...]
+

Returns the KRule sentences which are alias transitively imported by the main module of this definition.

+
+ +
+
+property all_module_names: tuple[str, ...]
+

Return the name of all modules in this KDefinition.

+
+ +
+
+all_modules: tuple[KFlatModule, ...]
+
+ +
+
+property all_modules_dict: dict[str, KFlatModule]
+

Returns a dictionary of all the modules (with names as keys) defined in this definition.

+
+ +
+
+att: KAtt
+
+ +
+
+property brackets: FrozenDict[KSort, KProduction]
+
+ +
+
+property cell_collection_productions: tuple[KProduction, ...]
+

Returns the KProduction which are cell collection declarations transitively imported by the main module of this definition.

+
+ +
+
+property constructors: tuple[KProduction, ...]
+

Returns the KProduction which are constructor declarations transitively imported by the main module of this definition.

+
+ +
+
+empty_config(sort: KSort) KInner[source]
+

Given a cell-sort, compute an “empty” configuration for it (all the constructor structure of the configuration in place, but variables in cell positions).

+
+ +
+
+static from_dict(d: Mapping[str, Any]) KDefinition[source]
+
+ +
+
+property function_labels: tuple[str, ...]
+

Returns the label names of all the KProduction which are function symbols for all modules in this definition.

+
+ +
+
+property functions: tuple[KProduction, ...]
+

Returns the KProduction which are function declarations transitively imported by the main module of this definition.

+
+ +
+
+greatest_common_subsort(sort1: KSort, sort2: KSort) KSort | None[source]
+

Compute the greatest-lower-bound of two sorts in the sort lattice using very simple approach, returning None on failure (not necessarily meaning there isn’t a glb).

+
+ +
+
+init_config(sort: KSort) KInner[source]
+

Return an initialized configuration as the user declares it in the semantics, complete with configuration variables in place.

+
+ +
+
+instantiate_cell_vars(term: KInner) KInner[source]
+

Given a partially-complete configuration, find positions where there could be more cell structure but instead there are variables and instantiate more cell structure.

+
+ +
+
+least_common_supersort(sort1: KSort, sort2: KSort) KSort | None[source]
+

Compute the lowest-upper-bound of two sorts in the sort lattice using very simple approach, returning None on failure (not necessarily meaning there isn’t a lub).

+
+ +
+
+property left_assocs: FrozenDict[str, frozenset[str]]
+
+ +
+
+let(*, main_module_name: str | None = None, all_modules: Iterable[KFlatModule] | None = None, requires: Iterable[KRequire] | None = None, att: KAtt | None = None) KDefinition[source]
+
+ +
+
+let_att(att: KAtt) KDefinition[source]
+
+ +
+
+property macro_rules: tuple[KRule, ...]
+

Returns the KRule sentences which are alias or macro transitively imported by the main module of this definition.

+
+ +
+
+main_module: InitVar[KFlatModule]
+
+ +
+
+main_module_name: str
+
+ +
+
+module(name: str) KFlatModule[source]
+

Return the module associated with a given name.

+
+ +
+
+property module_names: tuple[str, ...]
+

Return the list of module names transitively imported by the main module of this definition.

+
+ +
+
+property modules: tuple[KFlatModule, ...]
+

Returns the list of modules transitively imported by th emain module of this definition.

+
+ +
+
+property overloads: FrozenDict[str, frozenset[str]]
+

Return a mapping from symbols to the sets of symbols that overload them.

+
+ +
+
+property priorities: FrozenDict[str, frozenset[str]]
+

Return a mapping from symbols to the sets of symbols with lower priority.

+
+ +
+
+production_for_cell_sort(sort: KSort) KProduction[source]
+

Return the production for a given cell-declaration syntax from the cell’s declared sort.

+
+ +
+
+property productions: tuple[KProduction, ...]
+

Returns the KProduction transitively imported by the main module of this definition.

+
+ +
+
+remove_cell_map_items(kast: KInner) KInner[source]
+

Remove cell-map syntactical wrapper items that the frontend generates (see KDefinition.add_cell_map_items).

+
+ +
+
+requires: tuple[KRequire, ...]
+
+ +
+
+resolve_sorts(label: KLabel) tuple[KSort, tuple[KSort, ...]][source]
+

Compute the result and argument sorts for a given production based on a KLabel.

+
+ +
+
+property right_assocs: FrozenDict[str, frozenset[str]]
+
+ +
+
+property rules: tuple[KRule, ...]
+

Returns the KRule sentences transitively imported by the main module of this definition.

+
+ +
+
+property semantic_rules: tuple[KRule, ...]
+

Returns the KRule sentences which are topmost transitively imported by the main module of this definition.

+
+ +
+
+property sentence_by_unique_id: dict[str, KSentence]
+
+ +
+
+sort(kast: KInner) KSort | None[source]
+

Compute the sort of a given term using best-effort simple sorting algorithm, returns None on algorithm failure.

+
+ +
+
+sort_strict(kast: KInner) KSort[source]
+

Compute the sort of a given term using best-effort simple sorting algorithm, dies on algorithm failure.

+
+ +
+
+sort_vars(kast: KInner, sort: KSort | None = None) KInner[source]
+

Return the original term with all the variables having the sorts added or specialized, failing if recieving conflicting sorts for a given variable.

+
+ +
+
+property subsort_table: FrozenDict[KSort, frozenset[KSort]]
+

Return a mapping from sorts to all their proper subsorts.

+
+ +
+
+subsorts(sort: KSort) frozenset[KSort][source]
+

Return all subsorts of a given KSort by inspecting the definition.

+
+ +
+
+property symbols: FrozenDict[str, KProduction]
+
+ +
+
+property syntax_productions: tuple[KProduction, ...]
+

Returns the KProduction which are syntax declarations transitively imported by the main module of this definition.

+
+ +
+
+property syntax_symbols: FrozenDict[str, KProduction]
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+final class KFlatModule(name: str, sentences: Iterable[KSentence] = (), imports: Iterable[KImport] = (), att: KAtt = KAtt(atts=FrozenDict({})))[source]
+

Bases: KOuter, WithKAtt, Iterable[KSentence]

+

Represents a K module, with a name, list of imports, and list of sentences.

+
+
+att: KAtt
+
+ +
+
+property cell_collection_productions: tuple[KProduction, ...]
+

Return all the KProduction sentences from this module that are cell collection declarations.

+
+ +
+
+property claims: tuple[KClaim, ...]
+

Return all the KClaim declared in this module.

+
+ +
+
+property constructors: tuple[KProduction, ...]
+

Return all the KProduction sentences from this module that are constructor declarations.

+
+ +
+
+static from_dict(d: Mapping[str, Any]) KFlatModule[source]
+
+ +
+
+property functions: tuple[KProduction, ...]
+

Return all the KProduction sentences from this module that are function declarations.

+
+ +
+
+imports: tuple[KImport, ...]
+
+ +
+
+let(*, name: str | None = None, sentences: Iterable[KSentence] | None = None, imports: Iterable[KImport] | None = None, att: KAtt | None = None) KFlatModule[source]
+
+ +
+
+let_att(att: KAtt) KFlatModule[source]
+
+ +
+
+map_sentences(f: Callable[[S], S], *, of_type: type[S]) KFlatModule[source]
+
+map_sentences(f: Callable[[KSentence], KSentence], *, of_type: None = None) KFlatModule
+
+ +
+
+name: str
+
+ +
+
+property productions: tuple[KProduction, ...]
+

Return all the KProduction sentences from this module.

+
+ +
+
+property rules: tuple[KRule, ...]
+

Return all the KRule declared in this module.

+
+ +
+
+property sentence_by_unique_id: dict[str, KSentence]
+
+ +
+
+sentences: tuple[KSentence, ...]
+
+ +
+
+property syntax_productions: tuple[KProduction, ...]
+

Return all the KProduction sentences from this module that contain KLabel (are EBNF syntax declarations).

+
+ +
+
+property syntax_sorts: tuple[KSyntaxSort, ...]
+

Return all the KSyntaxSort sentences from this module.

+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+final class KFlatModuleList(main_module: str, modules: Iterable[KFlatModule])[source]
+

Bases: KOuter

+

Represents a list of K modules, as returned by the prover parser for example, with a given module called out as the main module.

+
+
+static from_dict(d: Mapping[str, Any]) KFlatModuleList[source]
+
+ +
+
+let(*, main_module: str | None = None, modules: Iterable[KFlatModule] | None = None) KFlatModuleList[source]
+
+ +
+
+main_module: str
+
+ +
+
+modules: tuple[KFlatModule, ...]
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+final class KImport(name: str, public: bool = True)[source]
+

Bases: KOuter

+

Represents a K module import, used for inheriting all the sentences of the imported module into this one.

+
+
+static from_dict(d: Mapping[str, Any]) KImport[source]
+
+ +
+
+let(*, name: str | None = None, public: bool | None = None) KImport[source]
+
+ +
+
+name: str
+
+ +
+
+public: bool
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+final class KNonTerminal(sort: KSort, name: str | None = None)[source]
+

Bases: KProductionItem

+

Represents a non-terminal of a given sort in EBNF productions, for defining arguments to to production.

+
+
+let(*, sort: KSort | None = None, name: str | None = None) KNonTerminal[source]
+
+ +
+
+name: str | None
+
+ +
+
+sort: KSort
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+class KOuter[source]
+

Bases: KAst

+

Represents K definitions in KAST format.

+

Outer syntax is K specific datastructures, including modules, definitions, imports, user-syntax declarations, rules, contexts, and claims.

+
+ +
+
+final class KProduction(sort: str | KSort, items: Iterable[KProductionItem] = (), params: Iterable[str | KSort] = (), klabel: str | KLabel | None = None, att: KAtt = KAtt(atts=FrozenDict({})))[source]
+

Bases: KSentence

+

Represents a production in K’s EBNF grammar definitions, as a sequence of ProductionItem.

+
+
+property argument_sorts: list[KSort]
+

Return the sorts of the non-terminal positions of the productions.

+
+ +
+
+property as_subsort: tuple[KSort, KSort] | None
+

Return a pair (supersort, subsort) if self is a subsort production, and None otherwise.

+
+ +
+
+att: KAtt
+
+ +
+
+property default_format: Format
+
+ +
+
+property is_prefix: bool
+

The production is of the form t* "(" (n ("," n)*)? ")".

+

Here, t is a terminal other than "(", "," or ")", and n a non-terminal.

+

Example: syntax Int ::= "mul" "(" Int "," Int ")"

+
+ +
+
+property is_record: bool
+

The production is prefix with labelled nonterminals.

+
+ +
+
+items: tuple[KProductionItem, ...]
+
+ +
+
+klabel: KLabel | None
+
+ +
+
+let(*, sort: str | KSort | None = None, items: Iterable[KProductionItem] | None = None, params: Iterable[str | KSort] | None = None, klabel: str | KLabel | None = None, att: KAtt | None = None) KProduction[source]
+
+ +
+
+let_att(att: KAtt) KProduction[source]
+
+ +
+
+property non_terminals: tuple[KNonTerminal, ...]
+

Return the non-terminals of the production.

+
+ +
+
+params: tuple[KSort, ...]
+
+ +
+
+sort: KSort
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+class KProductionItem[source]
+

Bases: KOuter

+

Represents the elements used to declare components of productions in EBNF style.

+
+
+static from_dict(d: Mapping[str, Any]) KProductionItem[source]
+
+ +
+ +
+
+final class KRegexTerminal(regex: str)[source]
+

Bases: KProductionItem

+

Represents a regular-expression terminal in EBNF production, to be matched against input text.

+
+
+let(*, regex: str | None = None) KRegexTerminal[source]
+
+ +
+
+regex: str
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+final class KRequire(require: str)[source]
+

Bases: KOuter

+

Represents a K file import of another file.

+
+
+static from_dict(d: Mapping[str, Any]) KRequire[source]
+
+ +
+
+let(*, require: str | None = None) KRequire[source]
+
+ +
+
+require: str
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+final class KRule(body: KInner, requires: KInner = KToken(token='true', sort=KSort(name='Bool')), ensures: KInner = KToken(token='true', sort=KSort(name='Bool')), att: KAtt = KAtt(atts=FrozenDict({})))[source]
+

Bases: KRuleLike

+

Represents a K rule definition, typically a conditional rewrite/transition.

+
+
+att: KAtt
+
+ +
+
+body: KInner
+
+ +
+
+ensures: KInner
+
+ +
+
+let(*, body: KInner | None = None, requires: KInner | None = None, ensures: KInner | None = None, att: KAtt | None = None) KRule[source]
+
+ +
+
+let_att(att: KAtt) KRule[source]
+
+ +
+
+property priority: int
+
+ +
+
+requires: KInner
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+class KRuleLike[source]
+

Bases: KSentence

+

Represents something with rule-like structure (with body, requires, and ensures clauses).

+
+
+body: KInner
+
+ +
+
+ensures: KInner
+
+ +
+
+abstract let(*, body: KInner | None = None, requires: KInner | None = None, ensures: KInner | None = None, att: KAtt | None = None) RL[source]
+
+ +
+
+requires: KInner
+
+ +
+ +
+
+class KSentence[source]
+

Bases: KOuter, WithKAtt

+

Represents an individual declaration in a K module.

+
+
+static from_dict(d: Mapping[str, Any]) KSentence[source]
+
+ +
+
+property label: str
+

Return a (hopefully) unique label associated with the given KSentence.

+
+
Returns:
+

Unique label for the given sentence, either (in order): +- User supplied label attribute (or supplied in rule label),or +- Unique identifier computed and inserted by the frontend.

+
+
+
+ +
+
+property source: str | None
+

Return the source assigned to this sentence, or None.

+
+ +
+
+property unique_id: str | None
+

Return the unique ID assigned to this sentence, or None.

+
+ +
+ +
+
+final class KSortSynonym(new_sort: KSort, old_sort: KSort, att: KAtt = KAtt(atts=FrozenDict({})))[source]
+

Bases: KSentence

+

Represents a sort synonym, allowing declaring a new name for a given sort.

+
+
+att: KAtt
+
+ +
+
+let(*, old_sort: KSort | None = None, new_sort: KSort | None = None, att: KAtt | None = None) KSortSynonym[source]
+
+ +
+
+let_att(att: KAtt) KSortSynonym[source]
+
+ +
+
+new_sort: KSort
+
+ +
+
+old_sort: KSort
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+final class KSyntaxAssociativity(assoc: KAssoc, tags: Iterable[str] = frozenset({}), att: KAtt = KAtt(atts=FrozenDict({})))[source]
+

Bases: KSentence

+

Represents a standalone declaration of operator associativity for tagged productions.

+
+
+assoc: KAssoc
+
+ +
+
+att: KAtt
+
+ +
+
+let(*, assoc: KAssoc | None = None, tags: Iterable[str] | None = None, att: KAtt | None = None) KSyntaxAssociativity[source]
+
+ +
+
+let_att(att: KAtt) KSyntaxAssociativity[source]
+
+ +
+
+tags: frozenset[str]
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+final class KSyntaxLexical(name: str, regex: str, att: KAtt = KAtt(atts=FrozenDict({})))[source]
+

Bases: KSentence

+

Represents a named piece of lexical syntax, definable as a regular expression.

+
+
+att: KAtt
+
+ +
+
+let(*, name: str | None = None, regex: str | None = None, att: KAtt | None = None) KSyntaxLexical[source]
+
+ +
+
+let_att(att: KAtt) KSyntaxLexical[source]
+
+ +
+
+name: str
+
+ +
+
+regex: str
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+final class KSyntaxPriority(priorities: Iterable[Iterable[str]] = (), att: KAtt = KAtt(atts=FrozenDict({})))[source]
+

Bases: KSentence

+

Represents a standalone declaration of syntax priorities, using productions tags.

+
+
+att: KAtt
+
+ +
+
+let(*, priorities: Iterable[Iterable[str]] | None = None, att: KAtt | None = None) KSyntaxPriority[source]
+
+ +
+
+let_att(att: KAtt) KSyntaxPriority[source]
+
+ +
+
+priorities: tuple[frozenset[str], ...]
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+final class KSyntaxSort(sort: KSort, params: Iterable[str | KSort] = (), att: KAtt = KAtt(atts=FrozenDict({})))[source]
+

Bases: KSentence

+

Represents a sort declaration, potentially parametric.

+
+
+att: KAtt
+
+ +
+
+let(*, sort: KSort | None = None, params: Iterable[str | KSort] | None = None, att: KAtt | None = None) KSyntaxSort[source]
+
+ +
+
+let_att(att: KAtt) KSyntaxSort[source]
+
+ +
+
+params: tuple[KSort, ...]
+
+ +
+
+sort: KSort
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+final class KTerminal(value: str)[source]
+

Bases: KProductionItem

+

Represents a string literal component of a production in EBNF grammar.

+
+
+let(*, value: str | None = None) KTerminal[source]
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+
+value: str
+
+ +
+ +
+
+read_kast_definition(path: str | PathLike) KDefinition[source]
+

Read a KDefinition from disk, failing if it’s not actually a KDefinition.

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kast.outer_lexer.html b/pyk/api/pyk.kast.outer_lexer.html new file mode 100644 index 00000000000..eb95db7ee61 --- /dev/null +++ b/pyk/api/pyk.kast.outer_lexer.html @@ -0,0 +1,477 @@ + + + + + + + + + pyk.kast.outer_lexer module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kast.outer_lexer module

+
+
+class Loc(line, col)[source]
+

Bases: NamedTuple

+
+
+col: int
+

Alias for field number 1

+
+ +
+
+line: int
+

Alias for field number 0

+
+ +
+ +
+
+class LocationIterator(text: Iterable[str], line: int = 1, col: int = 0)[source]
+

Bases: Iterator[str]

+

A string iterator which tracks the line and column information of the characters in the string.

+
+
+property loc: Loc
+

Return the (line, column) of the last character returned by the iterator.

+

If no character has been returned yet, it will be the location that this +iterator was initialized with. The default is (1,0), which is the only +time the column will be 0.

+
+ +
+ +
+
+class State(value)[source]
+

Bases: Enum

+

An enumeration.

+
+
+ATTR = 6
+
+ +
+
+BUBBLE = 4
+
+ +
+
+CONTEXT = 5
+
+ +
+
+DEFAULT = 1
+
+ +
+
+KLABEL = 3
+
+ +
+
+MODNAME = 7
+
+ +
+
+SYNTAX = 2
+
+ +
+ +
+
+class Token(text, type, loc)[source]
+

Bases: NamedTuple

+
+
+let(*, text: str | None = None, type: TokenType | None = None, loc: Loc | None = None) Token[source]
+
+ +
+
+loc: Loc
+

Alias for field number 2

+
+ +
+
+text: str
+

Alias for field number 0

+
+ +
+
+type: TokenType
+

Alias for field number 1

+
+ +
+ +
+
+class TokenType(value)[source]
+

Bases: Enum

+

An enumeration.

+
+
+ATTR_CONTENT = 43
+
+ +
+
+ATTR_KEY = 42
+
+ +
+
+BUBBLE = 44
+
+ +
+
+COLON = 15
+
+ +
+
+COMMA = 1
+
+ +
+
+DCOLONEQ = 16
+
+ +
+
+EOF = 0
+
+ +
+
+EQ = 9
+
+ +
+
+GT = 10
+
+ +
+
+ID_LOWER = 37
+
+ +
+
+ID_UPPER = 38
+
+ +
+
+KLABEL = 40
+
+ +
+
+KW_ALIAS = 17
+
+ +
+
+KW_CLAIM = 18
+
+ +
+
+KW_CONFIG = 19
+
+ +
+
+KW_CONTEXT = 20
+
+ +
+
+KW_ENDMODULE = 21
+
+ +
+
+KW_IMPORTS = 22
+
+ +
+
+KW_LEFT = 23
+
+ +
+
+KW_LEXICAL = 24
+
+ +
+
+KW_MODULE = 25
+
+ +
+
+KW_NONASSOC = 26
+
+ +
+
+KW_PRIORITY = 27
+
+ +
+
+KW_PRIVATE = 28
+
+ +
+
+KW_PUBLIC = 29
+
+ +
+
+KW_REQUIRES = 30
+
+ +
+
+KW_RIGHT = 31
+
+ +
+
+KW_RULE = 32
+
+ +
+
+KW_SYNTAX = 33
+
+ +
+
+LBRACE = 4
+
+ +
+
+LBRACK = 6
+
+ +
+
+LPAREN = 2
+
+ +
+
+MODNAME = 39
+
+ +
+
+NAT = 34
+
+ +
+
+PLUS = 11
+
+ +
+
+QUESTION = 13
+
+ +
+
+RBRACE = 5
+
+ +
+
+RBRACK = 7
+
+ +
+
+REGEX = 36
+
+ +
+
+RPAREN = 3
+
+ +
+
+RULE_LABEL = 41
+
+ +
+
+STRING = 35
+
+ +
+
+TILDE = 14
+
+ +
+
+TIMES = 12
+
+ +
+
+VBAR = 8
+
+ +
+ +
+
+outer_lexer(it: Iterable[str]) Iterator[Token][source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kast.outer_parser.html b/pyk/api/pyk.kast.outer_parser.html new file mode 100644 index 00000000000..aec6124bdbd --- /dev/null +++ b/pyk/api/pyk.kast.outer_parser.html @@ -0,0 +1,176 @@ + + + + + + + + + pyk.kast.outer_parser module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kast.outer_parser module

+
+
+class OuterParser(it: Iterable[str], source: Path | None = None)[source]
+

Bases: object

+
+
+definition() Definition[source]
+
+ +
+
+importt() Import[source]
+
+ +
+
+module() Module[source]
+
+ +
+
+require() Require[source]
+
+ +
+
+sentence() Sentence[source]
+
+ +
+
+string_sentence() StringSentence[source]
+
+ +
+
+syntax_sentence() SyntaxSentence[source]
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kast.outer_syntax.html b/pyk/api/pyk.kast.outer_syntax.html new file mode 100644 index 00000000000..e7b009b9501 --- /dev/null +++ b/pyk/api/pyk.kast.outer_syntax.html @@ -0,0 +1,637 @@ + + + + + + + + + pyk.kast.outer_syntax module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kast.outer_syntax module

+
+
+class AST(*, source: 'Path | None' = None, location: 'tuple[int, int, int, int] | None' = None)[source]
+

Bases: ABC

+
+
+location: tuple[int, int, int, int] | None = None
+
+ +
+
+source: Path | None = None
+
+ +
+ +
+
+final class Alias(bubble: 'str', label: 'str' = '', att: 'Att' = Att(source=None, location=None, items=()), *, source: 'Path | None' = None, location: 'tuple[int, int, int, int] | None' = None)[source]
+

Bases: StringSentence

+
+
+att: Att = Att(source=None, location=None, items=())
+
+ +
+
+bubble: str
+
+ +
+
+label: str = ''
+
+ +
+ +
+
+class Assoc(value)[source]
+

Bases: Enum

+

An enumeration.

+
+
+LEFT = 'left'
+
+ +
+
+NON_ASSOC = 'non-assoc'
+
+ +
+
+RIGHT = 'right'
+
+ +
+ +
+
+final class Att(items: 'Iterable[tuple[str, str]]' = ())[source]
+

Bases: AST, Sequence[tuple[str, str]]

+
+
+items: tuple[tuple[str, str], ...]
+
+ +
+ +
+
+final class Claim(bubble: 'str', label: 'str' = '', att: 'Att' = Att(source=None, location=None, items=()), *, source: 'Path | None' = None, location: 'tuple[int, int, int, int] | None' = None)[source]
+

Bases: StringSentence

+
+
+att: Att = Att(source=None, location=None, items=())
+
+ +
+
+bubble: str
+
+ +
+
+label: str = ''
+
+ +
+ +
+
+final class Config(bubble: 'str', label: 'str' = '', att: 'Att' = Att(source=None, location=None, items=()), *, source: 'Path | None' = None, location: 'tuple[int, int, int, int] | None' = None)[source]
+

Bases: StringSentence

+
+
+att: Att = Att(source=None, location=None, items=())
+
+ +
+
+bubble: str
+
+ +
+
+label: str = ''
+
+ +
+ +
+
+final class Context(bubble: 'str', label: 'str' = '', att: 'Att' = Att(source=None, location=None, items=()), *, source: 'Path | None' = None, location: 'tuple[int, int, int, int] | None' = None)[source]
+

Bases: StringSentence

+
+
+att: Att = Att(source=None, location=None, items=())
+
+ +
+
+bubble: str
+
+ +
+
+label: str = ''
+
+ +
+ +
+
+final class Definition(modules: 'Iterable[Module]' = (), requires: 'Iterable[Require]' = ())[source]
+

Bases: AST

+
+
+modules: tuple[Module, ...]
+
+ +
+
+requires: tuple[Require, ...]
+
+ +
+ +
+
+final class Import(module_name: 'str', *, source: 'Path | None' = None, location: 'tuple[int, int, int, int] | None' = None, public: 'bool' = True)[source]
+

Bases: AST

+
+
+module_name: str
+
+ +
+
+public: bool = True
+
+ +
+ +
+
+final class Lexical(regex: 'str', *, source: 'Path | None' = None, location: 'tuple[int, int, int, int] | None' = None)[source]
+

Bases: ProductionItem

+
+
+regex: str
+
+ +
+ +
+
+final class Module(name: 'str', sentences: 'Iterable[Sentence]' = (), imports: 'Iterable[Import]' = (), att: 'Att' = Att(source=None, location=None, items=()), source: 'Path | None' = None, location: 'tuple[int, int, int, int] | None' = None)[source]
+

Bases: AST

+
+
+att: Att
+
+ +
+
+imports: tuple[Import, ...]
+
+ +
+
+name: str
+
+ +
+
+sentences: tuple[Sentence, ...]
+
+ +
+ +
+
+final class NonTerminal(sort: 'Sort', name: 'str' = '', *, source: 'Path | None' = None, location: 'tuple[int, int, int, int] | None' = None)[source]
+

Bases: ProductionItem

+
+
+name: str = ''
+
+ +
+
+sort: Sort
+
+ +
+ +
+
+final class PriorityBlock(productions: 'Iterable[ProductionLike]', assoc: 'Assoc | None' = None)[source]
+

Bases: AST

+
+
+assoc: Assoc | None
+
+ +
+
+productions: tuple[ProductionLike, ...]
+
+ +
+ +
+
+final class Production(items: 'Iterable[ProductionItem]', att: 'Att' = Att(source=None, location=None, items=()))[source]
+

Bases: ProductionLike

+
+
+att: Att = Att(source=None, location=None, items=())
+
+ +
+
+items: tuple[ProductionItem, ...]
+
+ +
+ +
+
+class ProductionItem(*, source: Path | None = None, location: tuple[int, int, int, int] | None = None)[source]
+

Bases: AST, ABC

+
+ +
+
+class ProductionLike(*, source: Path | None = None, location: tuple[int, int, int, int] | None = None)[source]
+

Bases: AST, ABC

+
+
+att: Att
+
+ +
+ +
+
+final class Require(path: 'str', *, source: 'Path | None' = None, location: 'tuple[int, int, int, int] | None' = None)[source]
+

Bases: AST

+
+
+path: str
+
+ +
+ +
+
+final class Rule(bubble: 'str', label: 'str' = '', att: 'Att' = Att(source=None, location=None, items=()), *, source: 'Path | None' = None, location: 'tuple[int, int, int, int] | None' = None)[source]
+

Bases: StringSentence

+
+
+att: Att = Att(source=None, location=None, items=())
+
+ +
+
+bubble: str
+
+ +
+
+label: str = ''
+
+ +
+ +
+
+class Sentence(*, source: Path | None = None, location: tuple[int, int, int, int] | None = None)[source]
+

Bases: AST, ABC

+
+ +
+
+final class Sort(name: 'str', args: 'Iterable[int | str]' = ())[source]
+

Bases: AST

+
+
+args: tuple[int | str, ...]
+
+ +
+
+name: str
+
+ +
+ +
+
+final class SortDecl(name: 'str', params: 'Iterable[str]' = (), args: 'Iterable[str]' = ())[source]
+

Bases: AST

+
+
+args: tuple[str, ...]
+
+ +
+
+name: str
+
+ +
+
+params: tuple[str, ...]
+
+ +
+ +
+
+class StringSentence(*, source: Path | None = None, location: tuple[int, int, int, int] | None = None)[source]
+

Bases: Sentence, ABC

+
+
+att: Att
+
+ +
+
+bubble: str
+
+ +
+
+label: str
+
+ +
+ +
+
+final class SyntaxAssoc(assoc: 'Assoc', klabels: 'Iterable[str]')[source]
+

Bases: SyntaxSentence

+
+
+assoc: Assoc
+
+ +
+
+klabels: tuple[str, ...]
+
+ +
+ +
+
+final class SyntaxDecl(decl: 'SortDecl', att: 'Att' = Att(source=None, location=None, items=()), *, source: 'Path | None' = None, location: 'tuple[int, int, int, int] | None' = None)[source]
+

Bases: SyntaxSentence

+
+
+att: Att = Att(source=None, location=None, items=())
+
+ +
+
+decl: SortDecl
+
+ +
+ +
+
+final class SyntaxDefn(decl: 'SortDecl', blocks: 'Iterable[PriorityBlock]' = ())[source]
+

Bases: SyntaxSentence

+
+
+blocks: tuple[PriorityBlock, ...]
+
+ +
+
+decl: SortDecl
+
+ +
+ +
+
+final class SyntaxLexical(name: 'str', regex: 'str', *, source: 'Path | None' = None, location: 'tuple[int, int, int, int] | None' = None)[source]
+

Bases: SyntaxSentence

+
+
+name: str
+
+ +
+
+regex: str
+
+ +
+ +
+
+final class SyntaxPriority(groups: 'Iterable[Iterable[str]]')[source]
+

Bases: SyntaxSentence

+
+
+groups: tuple[tuple[str, ...], ...]
+
+ +
+ +
+
+class SyntaxSentence(*, source: Path | None = None, location: tuple[int, int, int, int] | None = None)[source]
+

Bases: Sentence, ABC

+
+ +
+
+final class SyntaxSynonym(new: 'SortDecl', old: 'Sort', att: 'Att' = Att(source=None, location=None, items=()), *, source: 'Path | None' = None, location: 'tuple[int, int, int, int] | None' = None)[source]
+

Bases: SyntaxSentence

+
+
+att: Att = Att(source=None, location=None, items=())
+
+ +
+
+new: SortDecl
+
+ +
+
+old: Sort
+
+ +
+ +
+
+final class Terminal(value: 'str', *, source: 'Path | None' = None, location: 'tuple[int, int, int, int] | None' = None)[source]
+

Bases: ProductionItem

+
+
+value: str
+
+ +
+ +
+
+final class UserList(sort: 'str', sep: 'str', non_empty: 'bool' = False, att: 'Att' = Att(source=None, location=None, items=()), *, source: 'Path | None' = None, location: 'tuple[int, int, int, int] | None' = None)[source]
+

Bases: ProductionLike

+
+
+att: Att = Att(source=None, location=None, items=())
+
+ +
+
+non_empty: bool = False
+
+ +
+
+sep: str
+
+ +
+
+sort: str
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kast.parser.html b/pyk/api/pyk.kast.parser.html new file mode 100644 index 00000000000..e939abafd61 --- /dev/null +++ b/pyk/api/pyk.kast.parser.html @@ -0,0 +1,166 @@ + + + + + + + + + pyk.kast.parser module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kast.parser module

+
+
+class KAstParser(it: Iterable[str])[source]
+

Bases: object

+
+
+eof() bool[source]
+
+ +
+
+k() KInner[source]
+
+ +
+
+kitem() KInner[source]
+
+ +
+
+klabel() KLabel[source]
+
+ +
+
+klist() list[KInner][source]
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kast.pretty.html b/pyk/api/pyk.kast.pretty.html new file mode 100644 index 00000000000..f2b87ce1e4a --- /dev/null +++ b/pyk/api/pyk.kast.pretty.html @@ -0,0 +1,199 @@ + + + + + + + + + pyk.kast.pretty module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kast.pretty module

+
+
+class PrettyPrinter(definition: KDefinition, extra_unparsing_modules: Iterable[KFlatModule] = (), patch_symbol_table: Callable[[SymbolTable], None] | None = None, unalias: bool = True, sort_collections: bool = False)[source]
+

Bases: object

+
+
+definition: KDefinition
+
+ +
+
+print(kast: KAst) str[source]
+

Print out KAST terms/outer syntax.

+
+
Parameters:
+

kast – KAST term to print.

+
+
Returns:
+

Best-effort string representation of KAST term.

+
+
+
+ +
+
+property symbol_table: dict[str, Callable[[...], str]]
+
+ +
+ +
+
+assoc_with_unit(assoc_join: str, unit: str) Callable[[...], str][source]
+
+ +
+
+build_symbol_table(definition: KDefinition, extra_modules: Iterable[KFlatModule] = (), opinionated: bool = False) SymbolTable[source]
+

Build the unparsing symbol table given a JSON encoded definition.

+
+
Parameters:
+

definition – JSON encoded K definition.

+
+
Returns:
+

Python dictionary mapping klabels to automatically generated unparsers.

+
+
+
+ +
+
+indent(text: str, size: int = 2) str[source]
+
+ +
+
+paren(printer: Callable[[...], str]) Callable[[...], str][source]
+
+ +
+
+unparser_for_production(prod: KProduction) Callable[[...], str][source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kast.rewrite.html b/pyk/api/pyk.kast.rewrite.html new file mode 100644 index 00000000000..6100336bee9 --- /dev/null +++ b/pyk/api/pyk.kast.rewrite.html @@ -0,0 +1,140 @@ + + + + + + + + + pyk.kast.rewrite module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kast.rewrite module

+
+
+indexed_rewrite(kast: KInner, rewrites: Iterable[KRewrite]) KInner[source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kast.utils.html b/pyk/api/pyk.kast.utils.html new file mode 100644 index 00000000000..2d8e2827373 --- /dev/null +++ b/pyk/api/pyk.kast.utils.html @@ -0,0 +1,145 @@ + + + + + + + + + pyk.kast.utils module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kast.utils module

+
+
+parse_outer(definition_file: str | Path, main_module: str, *, include_dirs: Iterable[str | Path] = (), md_selector: str = 'k', include_source: bool = True) KDefinition[source]
+
+ +
+
+slurp_definitions(main_file: str | Path, *, include_dirs: Iterable[str | Path] = (), md_selector: str | None = None, include_source: bool = True) dict[Path, Definition][source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kbuild.config.html b/pyk/api/pyk.kbuild.config.html new file mode 100644 index 00000000000..03a7b9687d6 --- /dev/null +++ b/pyk/api/pyk.kbuild.config.html @@ -0,0 +1,135 @@ + + + + + + + + + pyk.kbuild.config module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kbuild.config module

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kbuild.html b/pyk/api/pyk.kbuild.html new file mode 100644 index 00000000000..7a61e8badc0 --- /dev/null +++ b/pyk/api/pyk.kbuild.html @@ -0,0 +1,226 @@ + + + + + + + + + pyk.kbuild package — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kbuild.kbuild.html b/pyk/api/pyk.kbuild.kbuild.html new file mode 100644 index 00000000000..523d6ef55f9 --- /dev/null +++ b/pyk/api/pyk.kbuild.kbuild.html @@ -0,0 +1,197 @@ + + + + + + + + + pyk.kbuild.kbuild module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kbuild.kbuild module

+
+
+final class KBuild(kdist_dir: 'str | Path')[source]
+

Bases: object

+
+
+definition_dir(project: Project, target_name: str) Path[source]
+
+ +
+
+property k_version: str
+
+ +
+
+kdist_dir: Path
+
+ +
+
+kompile(project: Project, target_name: str, *, debug: bool = False) Path[source]
+
+ +
+
+up_to_date(project: Project, target_name: str) bool[source]
+
+ +
+ +
+
+final class KBuildEnv(project: 'Project', path: 'str | Path')[source]
+

Bases: object

+
+
+static create_temp(project: Project) Iterator[KBuildEnv][source]
+
+ +
+
+kompile(target_name: str, output_dir: Path, *, debug: bool = False) None[source]
+
+ +
+
+path: Path
+
+ +
+
+project: Project
+
+ +
+
+sync() None[source]
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kbuild.project.html b/pyk/api/pyk.kbuild.project.html new file mode 100644 index 00000000000..8fcc33348bc --- /dev/null +++ b/pyk/api/pyk.kbuild.project.html @@ -0,0 +1,290 @@ + + + + + + + + + pyk.kbuild.project module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kbuild.project module

+
+
+final class PackageSource(package: 'str')[source]
+

Bases: Source

+
+
+package: str
+
+ +
+
+resolve(project_path: Path) Path[source]
+
+ +
+ +
+
+final class PathSource(path: 'Path')[source]
+

Bases: Source

+
+
+path: Path
+
+ +
+
+resolve(project_path: Path) Path[source]
+
+ +
+ +
+
+final class Project(*, path: 'str | Path', name: 'str', version: 'str', source_dir: 'str | Path', resources: 'Mapping[str, str | Path] | None' = None, dependencies: 'Iterable[Project]' = (), targets: 'Iterable[Target]' = ())[source]
+

Bases: object

+
+
+property all_files: list[Path]
+
+ +
+
+dependencies: tuple[Project, ...]
+
+ +
+
+get_target(target_name: str) Target[source]
+
+ +
+
+static load(project_file: str | Path) Project[source]
+
+ +
+
+static load_from_dir(project_dir: str | Path) Project[source]
+
+ +
+
+name: str
+
+ +
+
+path: Path
+
+ +
+
+property project_file: Path
+
+ +
+
+property resource_file_names: dict[str, list[str]]
+
+ +
+
+property resource_files: dict[str, list[Path]]
+
+ +
+
+resources: FrozenDict[str, Path]
+
+ +
+
+source_dir: Path
+
+ +
+
+property source_file_names: list[str]
+
+ +
+
+property source_files: list[Path]
+
+ +
+
+property sub_projects: tuple[Project, ...]
+
+ +
+
+targets: tuple[Target, ...]
+
+ +
+
+version: str
+
+ +
+ +
+
+class Source[source]
+

Bases: ABC

+
+
+static from_dict(dct: Mapping[str, Any]) Source[source]
+
+ +
+
+abstract resolve(project_path: Path) Path[source]
+
+ +
+ +
+
+final class Target(*, name: 'str', args: 'Mapping[str, Any]')[source]
+

Bases: object

+
+
+args: dict[str, Any]
+
+ +
+
+name: str
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kbuild.utils.html b/pyk/api/pyk.kbuild.utils.html new file mode 100644 index 00000000000..2f840ed1d53 --- /dev/null +++ b/pyk/api/pyk.kbuild.utils.html @@ -0,0 +1,212 @@ + + + + + + + + + pyk.kbuild.utils module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kbuild.utils module

+
+
+final class KVersion(major: 'int', minor: 'int', patch: 'int', git: 'Git | None')[source]
+

Bases: object

+
+
+final class Git(ahead: 'int', rev: 'str', dirty: 'bool')[source]
+

Bases: object

+
+
+ahead: int
+
+ +
+
+dirty: bool
+
+ +
+
+rev: str
+
+ +
+ +
+
+PATTERN: ClassVar = re.compile('v(?P<major>[1-9]+)\\.(?P<minor>[0-9]+)\\.(?P<patch>[0-9]+)(?P<git>-(?P<ahead>[0-9]+)-g(?P<rev>[0-9a-f]{10})(?P<dirty>-dirty)?)?')
+
+ +
+
+git: Git | None
+
+ +
+
+major: int
+
+ +
+
+minor: int
+
+ +
+
+static parse(text: str) KVersion[source]
+
+ +
+
+patch: int
+
+ +
+
+property text: str
+
+ +
+ +
+
+find_file_upwards(file_name: str, start_dir: Path) Path[source]
+
+ +
+
+k_version() KVersion[source]
+
+ +
+
+sync_files(source_dir: Path, target_dir: Path, file_names: Iterable[str]) list[Path][source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kcfg.exploration.html b/pyk/api/pyk.kcfg.exploration.html new file mode 100644 index 00000000000..777d150706a --- /dev/null +++ b/pyk/api/pyk.kcfg.exploration.html @@ -0,0 +1,217 @@ + + + + + + + + + pyk.kcfg.exploration module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kcfg.exploration module

+
+
+class KCFGExploration(kcfg: KCFG, terminal: Iterable[NodeIdLike] | None = None)[source]
+

Bases: object

+
+
+add_terminal(node_id: NodeIdLike) None[source]
+
+ +
+
+property explorable: list[Node]
+
+ +
+
+static from_dict(dct: Mapping[str, Any]) KCFGExploration[source]
+
+ +
+
+is_explorable(node_id: NodeIdLike) bool[source]
+
+ +
+
+is_terminal(node_id: NodeIdLike) bool[source]
+
+ +
+
+kcfg: KCFG
+
+ +
+
+minimize_kcfg(heuristics: KCFGSemantics | None = None, merge: bool = False) None[source]
+
+ +
+
+prune(node_id: NodeIdLike, keep_nodes: Iterable[NodeIdLike] = ()) list[int][source]
+
+ +
+
+remove_node(node_id: NodeIdLike) None[source]
+
+ +
+
+remove_terminal(node_id: int) None[source]
+
+ +
+
+property terminal: list[Node]
+
+ +
+
+property terminal_ids: set[int]
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+class KCFGExplorationNodeAttr(value: str)[source]
+

Bases: NodeAttr

+
+
+TERMINAL = NodeAttr(value='terminal')
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kcfg.explore.html b/pyk/api/pyk.kcfg.explore.html new file mode 100644 index 00000000000..533d3b6e1ee --- /dev/null +++ b/pyk/api/pyk.kcfg.explore.html @@ -0,0 +1,191 @@ + + + + + + + + + pyk.kcfg.explore module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kcfg.explore module

+
+
+class KCFGExplore(cterm_symbolic: CTermSymbolic, *, kcfg_semantics: KCFGSemantics | None = None, id: str | None = None)[source]
+

Bases: object

+
+
+check_extendable(kcfg_exploration: KCFGExploration, node: KCFG.Node) None[source]
+
+ +
+
+cterm_symbolic: CTermSymbolic
+
+ +
+
+extend_cterm(_cterm: CTerm, node_id: int, *, execute_depth: int | None = None, cut_point_rules: Iterable[str] = (), terminal_rules: Iterable[str] = (), module_name: str | None = None) list[KCFGExtendResult][source]
+
+ +
+
+id: str
+
+ +
+
+implication_failure_reason(antecedent: CTerm, consequent: CTerm, assume_defined: bool = False) tuple[bool, str][source]
+
+ +
+
+kcfg_semantics: KCFGSemantics
+
+ +
+
+pretty_print(kinner: KInner) str[source]
+
+ +
+
+section_edge(cfg: KCFG, source_id: NodeIdLike, target_id: NodeIdLike, logs: dict[int, tuple[LogEntry, ...]], sections: int = 2) tuple[int, ...][source]
+
+ +
+
+simplify(cfg: KCFG, logs: dict[int, tuple[LogEntry, ...]]) None[source]
+
+ +
+
+step(cfg: KCFG, node_id: NodeIdLike, logs: dict[int, tuple[LogEntry, ...]], depth: int = 1, module_name: str | None = None) int[source]
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kcfg.html b/pyk/api/pyk.kcfg.html new file mode 100644 index 00000000000..406d7dab02a --- /dev/null +++ b/pyk/api/pyk.kcfg.html @@ -0,0 +1,586 @@ + + + + + + + + + pyk.kcfg package — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kcfg package

+
+

Submodules

+
+ +
+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kcfg.kcfg.html b/pyk/api/pyk.kcfg.kcfg.html new file mode 100644 index 00000000000..3a681013d0a --- /dev/null +++ b/pyk/api/pyk.kcfg.kcfg.html @@ -0,0 +1,1112 @@ + + + + + + + + + pyk.kcfg.kcfg module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kcfg.kcfg module

+
+
+final class Abstract(cterm: 'CTerm')[source]
+

Bases: KCFGExtendResult

+
+
+cterm: CTerm
+
+ +
+ +
+
+final class Branch(constraints: 'Iterable[KInner]', *, heuristic: 'bool' = False, info: 'str' = '')[source]
+

Bases: KCFGExtendResult

+
+
+constraints: tuple[KInner, ...]
+
+ +
+
+heuristic: bool
+
+ +
+
+info: str = ''
+
+ +
+ +
+
+class KCFG(cfg_dir: Path | None = None, optimize_memory: bool = True)[source]
+

Bases: Container[KCFG.Node | KCFG.Successor]

+
+
+final class Cover(source: 'KCFG.Node', target: 'KCFG.Node', csubst: 'CSubst')[source]
+

Bases: EdgeLike

+
+
+csubst: CSubst
+
+ +
+
+static from_dict(dct: dict[str, Any], nodes: Mapping[int, KCFG.Node]) KCFG.Cover[source]
+
+ +
+
+replace_source(node: Node) Cover[source]
+
+ +
+
+replace_target(node: Node) Cover[source]
+
+ +
+
+source: Node
+
+ +
+
+target: Node
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+final class Edge(source: 'KCFG.Node', target: 'KCFG.Node', depth: 'int', rules: 'tuple[str, ...]')[source]
+

Bases: EdgeLike

+
+
+depth: int
+
+ +
+
+static from_dict(dct: dict[str, Any], nodes: Mapping[int, KCFG.Node]) KCFG.Edge[source]
+
+ +
+
+replace_source(node: Node) Edge[source]
+
+ +
+
+replace_target(node: Node) Edge[source]
+
+ +
+
+rules: tuple[str, ...]
+
+ +
+
+source: Node
+
+ +
+
+target: Node
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+
+to_rule(label: str, claim: bool = False, priority: int | None = None, defunc_with: KDefinition | None = None, minimize: bool = False) KRuleLike[source]
+
+ +
+ +
+
+class EdgeLike[source]
+

Bases: Successor

+
+
+source: Node
+
+ +
+
+target: Node
+
+ +
+
+property targets: tuple[Node, ...]
+
+ +
+ +
+
+final class MergedEdge(source: Node, target: Node, edges: tuple[Edge, ...])[source]
+

Bases: EdgeLike

+

Merged edge is a collection of edges that have been merged into a single edge.

+
+
+edges: tuple[Edge, ...]
+
+ +
+
+static from_dict(dct: dict[str, Any], nodes: Mapping[int, KCFG.Node]) KCFG.Successor[source]
+
+ +
+
+replace_source(node: Node) Successor[source]
+
+ +
+
+replace_target(node: Node) Successor[source]
+
+ +
+
+source: Node
+
+ +
+
+target: Node
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+
+to_rule(label: str, claim: bool = False, priority: int | None = None) KRuleLike[source]
+
+ +
+ +
+
+class MultiEdge(source: 'KCFG.Node')[source]
+

Bases: Successor

+
+
+source: Node
+
+ +
+
+abstract with_single_target(target: Node) MultiEdge[source]
+
+ +
+ +
+
+final class NDBranch(source: 'KCFG.Node', _targets: 'Iterable[KCFG.Node]', rules: 'tuple[str, ...]')[source]
+

Bases: MultiEdge

+
+
+property edges: tuple[Edge, ...]
+
+ +
+
+static from_dict(dct: dict[str, Any], nodes: Mapping[int, KCFG.Node]) KCFG.NDBranch[source]
+
+ +
+
+replace_source(node: Node) NDBranch[source]
+
+ +
+
+replace_target(node: Node) NDBranch[source]
+
+ +
+
+rules: tuple[str, ...]
+
+ +
+
+source: Node
+
+ +
+
+property targets: tuple[Node, ...]
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+
+with_single_target(target: Node) NDBranch[source]
+
+ +
+ +
+
+final class Node(id: 'int', cterm: 'CTerm', attrs: 'Iterable[NodeAttr]' = ())[source]
+

Bases: object

+
+
+add_attr(attr: NodeAttr) Node[source]
+
+ +
+
+attrs: frozenset[NodeAttr]
+
+ +
+
+cterm: CTerm
+
+ +
+
+discard_attr(attr: NodeAttr) Node[source]
+
+ +
+
+property free_vars: frozenset[str]
+
+ +
+
+static from_dict(dct: dict[str, Any]) KCFG.Node[source]
+
+ +
+
+id: int
+
+ +
+
+let(cterm: CTerm | None = None, attrs: Iterable[KCFGNodeAttr] | None = None) KCFG.Node[source]
+
+ +
+
+remove_attr(attr: NodeAttr) Node[source]
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+final class Split(source: 'KCFG.Node', _targets: 'Iterable[tuple[KCFG.Node, CSubst]]')[source]
+

Bases: MultiEdge

+
+
+property covers: tuple[Cover, ...]
+
+ +
+
+static from_dict(dct: dict[str, Any], nodes: Mapping[int, KCFG.Node]) KCFG.Split[source]
+
+ +
+
+replace_source(node: Node) Split[source]
+
+ +
+
+replace_target(node: Node) Split[source]
+
+ +
+
+source: Node
+
+ +
+
+property splits: dict[int, CSubst]
+
+ +
+
+property targets: tuple[Node, ...]
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+
+with_single_target(target: Node) Split[source]
+
+ +
+ +
+
+class Successor[source]
+

Bases: ABC

+
+
+abstract static from_dict(dct: dict[str, Any], nodes: Mapping[int, KCFG.Node]) KCFG.Successor[source]
+
+ +
+
+abstract replace_source(node: Node) Successor[source]
+
+ +
+
+abstract replace_target(node: Node) Successor[source]
+
+ +
+
+source: Node
+
+ +
+
+property source_vars: frozenset[str]
+
+ +
+
+property target_ids: list[int]
+
+ +
+
+property target_vars: frozenset[str]
+
+ +
+
+abstract property targets: tuple[Node, ...]
+
+ +
+
+abstract to_dict() dict[str, Any][source]
+
+ +
+ +
+
+add_alias(alias: str, node_id: int | str) None[source]
+
+ +
+
+add_attr(node_id: int | str, attr: NodeAttr) None[source]
+
+ +
+
+add_node(node: Node) None[source]
+
+ +
+
+add_stuck(node_id: int | str) None[source]
+
+ +
+
+add_successor(succ: Successor) None[source]
+
+ +
+
+add_vacuous(node_id: int | str) None[source]
+
+ +
+
+aliases(node_id: int | str) list[str][source]
+
+ +
+
+contains_cover(cover: Cover) bool[source]
+
+ +
+
+contains_edge(edge: Edge) bool[source]
+
+ +
+
+contains_merged_edge(edge: MergedEdge) bool[source]
+
+ +
+
+contains_ndbranch(ndbranch: NDBranch) bool[source]
+
+ +
+
+contains_node(node: Node) bool[source]
+
+ +
+
+contains_split(split: Split) bool[source]
+
+ +
+
+cover(source_id: NodeIdLike, target_id: NodeIdLike) Cover | None[source]
+
+ +
+
+property covered: list[Node]
+
+ +
+
+covers(*, source_id: NodeIdLike | None = None, target_id: NodeIdLike | None = None) list[Cover][source]
+
+ +
+
+create_cover(source_id: NodeIdLike, target_id: NodeIdLike, csubst: CSubst | None = None) Cover[source]
+
+ +
+
+create_edge(source_id: NodeIdLike, target_id: NodeIdLike, depth: int, rules: Iterable[str] = ()) Edge[source]
+
+ +
+
+create_merged_edge(source_id: NodeIdLike, target_id: NodeIdLike, edges: Iterable[Edge | MergedEdge]) MergedEdge[source]
+
+ +
+
+create_ndbranch(source_id: NodeIdLike, ndbranches: Iterable[NodeIdLike], rules: Iterable[str] = ()) KCFG.NDBranch[source]
+
+ +
+
+create_node(cterm: CTerm) Node[source]
+
+ +
+
+create_split(source_id: NodeIdLike, splits: Iterable[tuple[NodeIdLike, CSubst]]) KCFG.Split[source]
+
+ +
+
+create_split_by_nodes(source_id: NodeIdLike, target_ids: Iterable[NodeIdLike]) KCFG.Split | None[source]
+

Create a split without crafting a CSubst.

+
+ +
+
+discard_attr(node_id: int | str, attr: NodeAttr) None[source]
+
+ +
+
+discard_stuck(node_id: int | str) None[source]
+
+ +
+
+discard_vacuous(node_id: int | str) None[source]
+
+ +
+
+edge(source_id: NodeIdLike, target_id: NodeIdLike) Edge | None[source]
+
+ +
+
+edge_likes(*, source_id: NodeIdLike | None = None, target_id: NodeIdLike | None = None) list[EdgeLike][source]
+
+ +
+
+edges(*, source_id: NodeIdLike | None = None, target_id: NodeIdLike | None = None) list[Edge][source]
+
+ +
+
+extend(extend_result: KCFGExtendResult, node: KCFG.Node, logs: dict[int, tuple[LogEntry, ...]], *, optimize_kcfg: bool) None[source]
+
+ +
+
+static from_claim(defn: KDefinition, claim: KClaim, cfg_dir: Path | None = None, optimize_memory: bool = True) tuple[KCFG, NodeIdLike, NodeIdLike][source]
+
+ +
+
+static from_dict(dct: Mapping[str, Any], optimize_memory: bool = True) KCFG[source]
+
+ +
+
+static from_json(s: str, optimize_memory: bool = True) KCFG[source]
+
+ +
+
+general_edges(*, source_id: NodeIdLike | None = None, target_id: NodeIdLike | None = None) list[Edge | MergedEdge][source]
+
+ +
+
+get_node(node_id: int | str) Node | None[source]
+
+ +
+
+is_covered(node_id: int | str) bool[source]
+
+ +
+
+is_leaf(node_id: int | str) bool[source]
+
+ +
+
+is_ndbranch(node_id: int | str) bool[source]
+
+ +
+
+is_root(node_id: int | str) bool[source]
+
+ +
+
+is_split(node_id: int | str) bool[source]
+
+ +
+
+is_stuck(node_id: int | str) bool[source]
+
+ +
+
+is_vacuous(node_id: int | str) bool[source]
+
+ +
+
+property leaves: list[Node]
+
+ +
+
+let_node(node_id: NodeIdLike, cterm: CTerm | None = None, attrs: Iterable[KCFGNodeAttr] | None = None) None[source]
+
+ +
+
+merged_edge(source_id: NodeIdLike, target_id: NodeIdLike) MergedEdge | None[source]
+
+ +
+
+merged_edges(*, source_id: NodeIdLike | None = None, target_id: NodeIdLike | None = None) list[MergedEdge][source]
+
+ +
+
+ndbranches(*, source_id: int | str | None = None, target_id: int | str | None = None) list[NDBranch][source]
+
+ +
+
+node(node_id: int | str) Node[source]
+
+ +
+
+property nodes: list[Node]
+
+ +
+
+static path_length(_path: Iterable[KCFG.Successor]) int[source]
+
+ +
+
+paths_between(source_id: NodeIdLike, target_id: NodeIdLike) list[tuple[Successor, ...]][source]
+
+ +
+
+predecessors(target_id: NodeIdLike) list[Successor][source]
+
+ +
+
+prune(node_id: NodeIdLike, keep_nodes: Iterable[NodeIdLike] = ()) list[int][source]
+
+ +
+
+reachable_nodes(source_id: int | str, *, reverse: bool = False) set[Node][source]
+
+ +
+
+static read_cfg_data(cfg_dir: Path) KCFG[source]
+
+ +
+
+static read_node_data(cfg_dir: Path, node_id: int) KCFG.Node[source]
+
+ +
+
+remove_alias(alias: str) None[source]
+
+ +
+
+remove_attr(node_id: int | str, attr: NodeAttr) None[source]
+
+ +
+
+remove_cover(source_id: int | str, target_id: int | str) None[source]
+
+ +
+
+remove_edge(source_id: int | str, target_id: int | str) None[source]
+
+ +
+
+remove_edges_around(node_id: int | str) None[source]
+
+ +
+
+remove_merged_edge(source_id: int | str, target_id: int | str) None[source]
+
+ +
+
+remove_node(node_id: int | str) None[source]
+
+ +
+
+remove_stuck(node_id: int | str) None[source]
+
+ +
+
+remove_vacuous(node_id: int | str) None[source]
+
+ +
+
+replace_node(node: Node) None[source]
+
+ +
+
+property root: list[Node]
+
+ +
+
+shortest_distance_between(node_1_id: int | str, node_2_id: int | str) int | None[source]
+
+ +
+
+shortest_path_between(source_node_id: NodeIdLike, target_node_id: NodeIdLike) tuple[Successor, ...] | None[source]
+
+ +
+
+split_on_constraints(source_id: NodeIdLike, constraints: Iterable[KInner]) list[int][source]
+
+ +
+
+splits(*, source_id: NodeIdLike | None = None, target_id: NodeIdLike | None = None) list[Split][source]
+
+ +
+
+property stuck: list[Node]
+
+ +
+
+successors(source_id: NodeIdLike) list[Successor][source]
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+
+to_dict_no_nodes() dict[str, Any][source]
+
+ +
+
+to_json() str[source]
+
+ +
+
+to_module(module_name: str | None = None, imports: Iterable[KImport] = (), priority: int = 20, att: KAtt = KAtt(atts=FrozenDict({}))) KFlatModule[source]
+
+ +
+
+to_rules(_id: str | None = None, priority: int = 20) list[KRuleLike][source]
+
+ +
+
+property uncovered: list[Node]
+
+ +
+
+property vacuous: list[Node]
+
+ +
+
+write_cfg_data() None[source]
+
+ +
+
+zero_depth_between(node_1_id: int | str, node_2_id: int | str) bool[source]
+
+ +
+ +
+
+class KCFGExtendResult[source]
+

Bases: ABC

+
+ +
+
+class KCFGNodeAttr(value: str)[source]
+

Bases: NodeAttr

+
+
+STUCK = NodeAttr(value='stuck')
+
+ +
+
+VACUOUS = NodeAttr(value='vacuous')
+
+ +
+ +
+
+class KCFGStore(store_path: Path)[source]
+

Bases: object

+
+
+property kcfg_json_path: Path
+
+ +
+
+property kcfg_node_dir: Path
+
+ +
+
+kcfg_node_path(node_id: int) Path[source]
+
+ +
+
+read_cfg_data() dict[str, Any][source]
+
+ +
+
+read_node_data(node_id: int) dict[str, Any][source]
+
+ +
+
+store_path: Path
+
+ +
+
+write_cfg_data(kcfg: KCFG, dct: dict[str, Any], deleted_nodes: Iterable[int] = (), created_nodes: Iterable[int] = ()) None[source]
+
+ +
+ +
+
+final class NDBranch(cterms: 'Iterable[CTerm]', logs: 'Iterable[LogEntry,]', rule_labels: 'Iterable[str]')[source]
+

Bases: KCFGExtendResult

+
+
+cterms: tuple[CTerm, ...]
+
+ +
+
+logs: tuple[LogEntry, ...]
+
+ +
+
+rule_labels: tuple[str, ...]
+
+ +
+ +
+
+class NodeAttr(value: 'str')[source]
+

Bases: object

+
+
+value: str
+
+ +
+ +
+
+final class Step(cterm: 'CTerm', depth: 'int', logs: 'tuple[LogEntry, ...]', rule_labels: 'list[str]', cut: 'bool' = False, info: 'str' = '')[source]
+

Bases: KCFGExtendResult

+
+
+cterm: CTerm
+
+ +
+
+cut: bool = False
+
+ +
+
+depth: int
+
+ +
+
+info: str = ''
+
+ +
+
+logs: tuple[LogEntry, ...]
+
+ +
+
+rule_labels: list[str]
+
+ +
+ +
+
+final class Stuck[source]
+

Bases: KCFGExtendResult

+
+ +
+
+final class Vacuous[source]
+

Bases: KCFGExtendResult

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kcfg.minimize.html b/pyk/api/pyk.kcfg.minimize.html new file mode 100644 index 00000000000..b73946d0263 --- /dev/null +++ b/pyk/api/pyk.kcfg.minimize.html @@ -0,0 +1,278 @@ + + + + + + + + + pyk.kcfg.minimize module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kcfg.minimize module

+
+
+class KCFGMinimizer(kcfg: KCFG, heuristics: KCFGSemantics | None = None, kdef: KDefinition | None = None)[source]
+

Bases: object

+
+
+kcfg: KCFG
+
+ +
+
+kdef: KDefinition | None
+
+ +
+
+lift_edge(b_id: NodeIdLike) None[source]
+

Lift an edge up another edge directly preceding it.

+

A –M steps–> B –N steps–> C becomes A –(M + N) steps–> C. Node B is removed.

+
+
Parameters:
+

b_id – the identifier of the central node B of a sequence of edges A –> B –> C.

+
+
Raises:
+

AssertionError – If the edges in question are not in place.

+
+
+
+ +
+
+lift_edges() bool[source]
+

Perform all possible edge lifts across the KCFG.

+

The KCFG is transformed to an equivalent in which no further edge lifts are possible.

+

Given the KCFG design, it is not possible for one edge lift to either disallow another or +allow another that was not previously possible. Therefore, this function is guaranteed to +lift all possible edges without having to loop.

+
+
Returns:
+

An indicator of whether or not at least one edge lift was performed.

+
+
+
+ +
+
+lift_split_edge(b_id: NodeIdLike) None[source]
+

Lift a split up an edge directly preceding it.

+

A –M steps–> B –[cond_1, …, cond_N]–> [C_1, …, C_N] becomes +A –[cond_1, …, cond_N]–> [A #And cond_1 –M steps–> C_1, …, A #And cond_N –M steps–> C_N]. +Node B is removed.

+
+
Parameters:
+

b_id – The identifier of the central node B of the structure A –> B –> [C_1, …, C_N].

+
+
Raises:
+
    +
  • AssertionError – If the structure in question is not in place.

  • +
  • AssertionError – If any of the cond_i contain variables not present in A.

  • +
+
+
+
+ +
+
+lift_split_split(b_id: NodeIdLike) None[source]
+

Lift a split up a split directly preceding it, joining them into a single split.

+

A –[…, cond_B, …]–> […, B, …] with B –[cond_1, …, cond_N]–> [C_1, …, C_N] becomes +A –[…, cond_B #And cond_1, …, cond_B #And cond_N, …]–> […, C_1, …, C_N, …]. +Node B is removed.

+
+
Parameters:
+

b_id – the identifier of the node B of the structure +A –[…, cond_B, …]–> […, B, …] with B –[cond_1, …, cond_N]–> [C_1, …, C_N].

+
+
Raises:
+

AssertionError – If the structure in question is not in place.

+
+
+
+ +
+
+lift_splits() bool[source]
+

Perform all possible split liftings.

+

The KCFG is transformed to an equivalent in which no further split lifts are possible.

+
+
Returns:
+

An indicator of whether or not at least one split lift was performed.

+
+
+
+ +
+
+merge_nodes() bool[source]
+

Merge targets of Split for cutting down the number of branches, using heuristics KCFGSemantics.is_mergeable.

+
+
Side Effect: The KCFG is rewritten by the following rewrite pattern,
    +
  • Match: A -|Split|-> A_i -|Edge|-> B_i

  • +
  • +
    Rewrite:
      +
    • if B_x, B_y, …, B_z are not mergeable then unchanged

    • +
    • +
      if B_x, B_y, …, B_z are mergeable, then
        +
      • A -|Split|-> A_x or A_y or … or A_z

      • +
      • A_x or A_y or … or A_z -|Edge|-> B_x or B_y or … or B_z

      • +
      • B_x or B_y or … or B_z -|Split|-> B_x, B_y, …, B_z

      • +
      +
      +
      +
    • +
    +
    +
    +
  • +
+
+
+

Specifically, when B_merge = B_x or B_y or … or B_z +- or: fresh variables in places where the configurations differ +- Edge in A_merged -|Edge|-> B_merge: list of merged edges is from A_i -|Edge|-> B_i +- Split in B_merge -|Split|-> B_x, B_y, …, B_z: subst for it is from A -|Split|-> A_1, A_2, …, A_n +:param semantics: provides the is_mergeable heuristic +:return: whether any merge was performed

+
+ +
+
+minimize(merge: bool = False) None[source]
+

Minimize KCFG by repeatedly performing the lifting transformations.

+

The KCFG is transformed to an equivalent in which no further lifting transformations are possible. +The loop is designed so that each transformation is performed once in each iteration.

+
+ +
+
+semantics: KCFGSemantics
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kcfg.semantics.html b/pyk/api/pyk.kcfg.semantics.html new file mode 100644 index 00000000000..3608d70612e --- /dev/null +++ b/pyk/api/pyk.kcfg.semantics.html @@ -0,0 +1,217 @@ + + + + + + + + + pyk.kcfg.semantics module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kcfg.semantics module

+
+
+class DefaultSemantics[source]
+

Bases: KCFGSemantics

+
+
+abstract_node(c: CTerm) CTerm[source]
+
+ +
+
+can_make_custom_step(c: CTerm) bool[source]
+
+ +
+
+custom_step(c: CTerm, cs: CTermSymbolic) KCFGExtendResult | None[source]
+
+ +
+
+is_loop(c: CTerm) bool[source]
+
+ +
+
+is_mergeable(c1: CTerm, c2: CTerm) bool[source]
+
+ +
+
+is_terminal(c: CTerm) bool[source]
+
+ +
+
+same_loop(c1: CTerm, c2: CTerm) bool[source]
+
+ +
+ +
+
+class KCFGSemantics[source]
+

Bases: ABC

+
+
+abstract abstract_node(c: CTerm) CTerm[source]
+
+ +
+
+abstract can_make_custom_step(c: CTerm) bool[source]
+
+ +
+
+abstract custom_step(c: CTerm, cs: CTermSymbolic) KCFGExtendResult | None[source]
+
+ +
+
+abstract is_loop(c: CTerm) bool[source]
+
+ +
+
+abstract is_mergeable(c1: CTerm, c2: CTerm) bool[source]
+
+ +
+
+abstract is_terminal(c: CTerm) bool[source]
+
+ +
+
+abstract same_loop(c1: CTerm, c2: CTerm) bool[source]
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kcfg.show.html b/pyk/api/pyk.kcfg.show.html new file mode 100644 index 00000000000..a81fd372040 --- /dev/null +++ b/pyk/api/pyk.kcfg.show.html @@ -0,0 +1,236 @@ + + + + + + + + + pyk.kcfg.show module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kcfg.show module

+
+
+class KCFGShow(kprint: KPrint, node_printer: NodePrinter | None = None)[source]
+

Bases: object

+
+
+dot(kcfg: KCFG) Digraph[source]
+
+ +
+
+dump(cfgid: str, cfg: KCFG, dump_dir: Path, dot: bool = False) None[source]
+
+ +
+
+static hide_cells(term: KInner, omit_cells: Iterable[str]) KInner[source]
+
+ +
+
+kprint: KPrint
+
+ +
+
+static make_unique_segments(segments: Iterable[tuple[str, Iterable[str]]]) Iterable[tuple[str, Iterable[str]]][source]
+
+ +
+
+node_printer: NodePrinter
+
+ +
+
+node_short_info(kcfg: KCFG, node: Node) list[str][source]
+
+ +
+
+pretty(kcfg: KCFG, minimize: bool = True) Iterable[str][source]
+
+ +
+
+pretty_segments(kcfg: KCFG, minimize: bool = True) Iterable[tuple[str, Iterable[str]]][source]
+

Return a pretty version of the KCFG in segments.

+

Each segment is a tuple of an identifier and a list of lines to be printed for that segment (Tuple[str, Iterable[str]). +The identifier tells you whether that segment is for a given node, edge, or just pretty spacing (‘unknown’). +This is useful for applications which want to pretty print in chunks, so that they can know which printed region corresponds to each node/edge.

+
+ +
+
+show(cfg: KCFG, nodes: Iterable[NodeIdLike] = (), node_deltas: Iterable[tuple[NodeIdLike, NodeIdLike]] = (), to_module: bool = False, minimize: bool = True, sort_collections: bool = False, omit_cells: Iterable[str] = (), module_name: str | None = None) list[str][source]
+
+ +
+
+static simplify_config(config: KInner, omit_cells: Iterable[str]) KInner[source]
+
+ +
+
+to_module(cfg: KCFG, module_name: str | None = None, omit_cells: Iterable[str] = (), parseable_output: bool = True) KFlatModule[source]
+
+ +
+ +
+
+class NodePrinter(kprint: KPrint, full_printer: bool = False, minimize: bool = False)[source]
+

Bases: object

+
+
+full_printer: bool
+
+ +
+
+kprint: KPrint
+
+ +
+
+minimize: bool
+
+ +
+
+node_attrs(kcfg: KCFG, node: Node) list[str][source]
+
+ +
+
+print_node(kcfg: KCFG, node: Node) list[str][source]
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kcfg.store.html b/pyk/api/pyk.kcfg.store.html new file mode 100644 index 00000000000..0add0ad609b --- /dev/null +++ b/pyk/api/pyk.kcfg.store.html @@ -0,0 +1,141 @@ + + + + + + + + + pyk.kcfg.store module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kcfg.store module

+
+
+class OptimizedNodeStore[source]
+

Bases: MutableMapping[int, Node]

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kcfg.tui.html b/pyk/api/pyk.kcfg.tui.html new file mode 100644 index 00000000000..33e40e0b502 --- /dev/null +++ b/pyk/api/pyk.kcfg.tui.html @@ -0,0 +1,607 @@ + + + + + + + + + pyk.kcfg.tui module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kcfg.tui module

+
+
+class BehaviorView(kcfg: KCFG, kprint: KPrint, minimize: bool = True, node_printer: NodePrinter | None = None, id: str = '')[source]
+

Bases: ScrollableContainer

+
+
+class Selected[source]
+

Bases: Message

+
+
+bubble: ClassVar[bool] = True
+
+ +
+
+handler_name: ClassVar[str] = 'on_behavior_view_selected'
+

Name of the default message handler.

+
+ +
+
+no_dispatch: ClassVar[bool] = False
+
+ +
+
+time
+
+ +
+
+verbose: ClassVar[bool] = False
+
+ +
+ +
+
+can_focus: bool = True
+

Widget may receive focus.

+
+ +
+
+can_focus_children: bool = True
+

Widget’s children may receive focus.

+
+ +
+
+compose() ComposeResult[source]
+
+ +
+
+on_click(click: Click) None[source]
+
+ +
+ +
+
+class Constraint[source]
+

Bases: NavWidget

+
+
+can_focus: bool = True
+

Widget may receive focus.

+
+ +
+
+can_focus_children: bool = True
+

Widget’s children may receive focus.

+
+ +
+
+on_click(click: Click) None[source]
+
+ +
+ +
+
+class Custom[source]
+

Bases: NavWidget

+
+
+can_focus: bool = True
+

Widget may receive focus.

+
+ +
+
+can_focus_children: bool = True
+

Widget’s children may receive focus.

+
+ +
+
+on_click(click: Click) None[source]
+
+ +
+ +
+
+class GraphChunk(id: str, node_text: Iterable[str] = ())[source]
+

Bases: Static

+
+
+class Selected(chunk_id: str)[source]
+

Bases: Message

+
+
+bubble: ClassVar[bool] = True
+
+ +
+
+chunk_id: str
+
+ +
+
+handler_name: ClassVar[str] = 'on_graph_chunk_selected'
+

Name of the default message handler.

+
+ +
+
+no_dispatch: ClassVar[bool] = False
+
+ +
+
+time
+
+ +
+
+verbose: ClassVar[bool] = False
+
+ +
+ +
+
+can_focus: bool = False
+

Widget may receive focus.

+
+ +
+
+can_focus_children: bool = True
+

Widget’s children may receive focus.

+
+ +
+
+on_click(click: Click) None[source]
+
+ +
+
+on_enter() None[source]
+
+ +
+
+on_leave() None[source]
+
+ +
+ +
+
+class Info[source]
+

Bases: Widget

+
+
+can_focus: bool = False
+

Widget may receive focus.

+
+ +
+
+can_focus_children: bool = True
+

Widget’s children may receive focus.

+
+ +
+
+compose() ComposeResult[source]
+
+ +
+
+text: reactive[str]
+

Create a reactive attribute.

+
+
Parameters:
+
    +
  • default – A default value or callable that returns a default.

  • +
  • layout – Perform a layout on change.

  • +
  • repaint – Perform a repaint on change.

  • +
  • init – Call watchers on initialize (post mount).

  • +
  • always_update – Call watchers even when the new value equals the old value.

  • +
+
+
+
+ +
+
+update(text: str) None[source]
+
+ +
+
+watch_text() None[source]
+
+ +
+ +
+
+class KCFGViewer(kcfg: KCFG, kprint: KPrint, node_printer: NodePrinter | None = None, custom_view: Callable[[KCFGElem], Iterable[str]] | None = None, minimize: bool = True)[source]
+

Bases: App

+
+
+BINDINGS: ClassVar[list[BindingType]] = [('h', 'keystroke("h")', 'Hide selected node.'), ('H', 'keystroke("H")', 'Unhide all nodes.'), ('t', 'keystroke("term")', 'Toggle term.'), ('c', 'keystroke("constraint")', 'Toggle constraint.'), ('m', 'keystroke("minimize")', 'Toggle minimization.'), ('s', 'keystroke("status")', 'Toggle status.'), Binding(key='q', action='quit', description='', show=True, key_display=None, priority=True)]
+
+ +
+
+CSS_PATH: ClassVar[CSSPathType | None] = PosixPath('/home/user/src/pyk/kcfg/style.css')
+

File paths to load CSS from.

+
+ +
+
+action_keystroke(key: str) None[source]
+
+ +
+
+compose() ComposeResult[source]
+
+ +
+
+on_graph_chunk_selected(message: Selected) None[source]
+
+ +
+ +
+
+class NavWidget(id: str)[source]
+

Bases: ScrollableContainer

+
+
+BINDINGS: ClassVar[list[BindingType]] = [('g', 'scroll_home', 'Go to vert start'), ('G', 'scroll_end', 'Go to vert end')]
+

Keyboard bindings for scrollable containers.

+
+
Key(s) | Description |
+
:- | :- |
+
up | Scroll up, if vertical scrolling is available. |
+
down | Scroll down, if vertical scrolling is available. |
+
left | Scroll left, if horizontal scrolling is available. |
+
right | Scroll right, if horizontal scrolling is available. |
+
home | Scroll to the home position, if scrolling is available. |
+
end | Scroll to the end position, if scrolling is available. |
+
pageup | Scroll up one page, if vertical scrolling is available. |
+
pagedown | Scroll down one page, if vertical scrolling is available. |
+
+
+ +
+
+class Selected[source]
+

Bases: Message

+
+
+bubble: ClassVar[bool] = True
+
+ +
+
+handler_name: ClassVar[str] = 'on_nav_widget_selected'
+

Name of the default message handler.

+
+ +
+
+no_dispatch: ClassVar[bool] = False
+
+ +
+
+time
+
+ +
+
+verbose: ClassVar[bool] = False
+
+ +
+ +
+
+can_focus: bool = True
+

Widget may receive focus.

+
+ +
+
+can_focus_children: bool = True
+

Widget’s children may receive focus.

+
+ +
+
+compose() ComposeResult[source]
+
+ +
+
+text: reactive[str]
+

Create a reactive attribute.

+
+
Parameters:
+
    +
  • default – A default value or callable that returns a default.

  • +
  • layout – Perform a layout on change.

  • +
  • repaint – Perform a repaint on change.

  • +
  • init – Call watchers on initialize (post mount).

  • +
  • always_update – Call watchers even when the new value equals the old value.

  • +
+
+
+
+ +
+
+update(text: str) None[source]
+
+ +
+
+watch_text() None[source]
+
+ +
+ +
+
+class NodeView(kprint: KPrint, id: str = '', minimize: bool = True, term_on: bool = True, constraint_on: bool = True, custom_on: bool = False, status_on: bool = True, custom_view: Callable[[KCFGElem], Iterable[str]] | None = None, proof_status: str = '', proof_id: str = '', exec_time: float = 0)[source]
+

Bases: Widget

+
+
+can_focus: bool = False
+

Widget may receive focus.

+
+ +
+
+can_focus_children: bool = True
+

Widget’s children may receive focus.

+
+ +
+
+compose() ComposeResult[source]
+
+ +
+
+on_behavior_view_selected() None[source]
+
+ +
+
+on_constraint_selected() None[source]
+
+ +
+
+on_custom_selected() None[source]
+
+ +
+
+on_mount() None[source]
+
+ +
+
+on_status_selected() None[source]
+
+ +
+
+on_term_selected() None[source]
+
+ +
+
+toggle_option(field: str) bool[source]
+
+ +
+
+toggle_view(field: str) None[source]
+
+ +
+
+update(element: Node | Successor) None[source]
+
+ +
+ +
+
+class Status[source]
+

Bases: NavWidget

+
+
+can_focus: bool = True
+

Widget may receive focus.

+
+ +
+
+can_focus_children: bool = True
+

Widget’s children may receive focus.

+
+ +
+
+on_click(click: Click) None[source]
+
+ +
+ +
+
+class Term[source]
+

Bases: NavWidget

+
+
+can_focus: bool = True
+

Widget may receive focus.

+
+ +
+
+can_focus_children: bool = True
+

Widget’s children may receive focus.

+
+ +
+
+on_click(click: Click) None[source]
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kcovr.html b/pyk/api/pyk.kcovr.html new file mode 100644 index 00000000000..54497140e3e --- /dev/null +++ b/pyk/api/pyk.kcovr.html @@ -0,0 +1,189 @@ + + + + + + + + + pyk.kcovr module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kcovr module

+
+
+count_lines_covered(rule_map: Mapping[str, tuple[str, int, int]], cover_map: Mapping[str, int]) int[source]
+
+ +
+
+count_lines_file(rule_map_file: Mapping[str, tuple[int, int]]) int[source]
+
+ +
+
+count_lines_global(rule_map: Mapping[str, tuple[str, int, int]]) int[source]
+
+ +
+
+count_rules_covered(cover_map: Mapping[str, int]) int[source]
+
+ +
+
+create_cover_map(definition_dirs: Iterable[Path]) dict[str, int][source]
+
+ +
+
+create_rule_map(definition_dirs: Iterable[Path]) dict[str, tuple[str, int, int]][source]
+
+ +
+
+create_rule_map_by_file(rule_map: Mapping[str, tuple[str, int, int]]) dict[str, dict[str, tuple[int, int]]][source]
+
+ +
+
+create_rule_map_by_line(rule_map_file: Mapping[str, tuple[int, int]]) dict[int, list[str]][source]
+
+ +
+
+main() None[source]
+
+ +
+
+parse_args() tuple[tuple[Path, ...], tuple[Path, ...]][source]
+
+ +
+
+render_classes(rule_map: Mapping[str, tuple[str, int, int]], cover_map: Mapping[str, int], source_files: Iterable[Path], source_dir: Path) list[str][source]
+
+ +
+
+render_coverage_xml(definition_dirs: Iterable[Path], source_files: Iterable[Path]) str[source]
+
+ +
+
+render_lines(rule_map_file: Mapping[str, tuple[int, int]], cover_map_file: Mapping[str, int]) list[str][source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kdist.api.html b/pyk/api/pyk.kdist.api.html new file mode 100644 index 00000000000..6dd1885aecc --- /dev/null +++ b/pyk/api/pyk.kdist.api.html @@ -0,0 +1,197 @@ + + + + + + + + + pyk.kdist.api module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kdist.api module

+
+
+class Target[source]
+

Bases: ABC

+
+
+abstract build(output_dir: Path, deps: dict[str, Path], args: dict[str, Any], verbose: bool) None[source]
+
+ +
+
+context() Mapping[str, str][source]
+
+ +
+
+deps() Iterable[str][source]
+
+ +
+
+final manifest() dict[str, Any][source]
+
+ +
+
+source() Iterable[str | Path][source]
+
+ +
+ +
+
+final class TargetId(plugin_name: 'str', target_name: 'str')[source]
+

Bases: object

+
+
+property full_name: str
+
+ +
+
+static parse(fqn: str) TargetId[source]
+
+ +
+
+plugin_name: str
+
+ +
+
+target_name: str
+
+ +
+ +
+
+valid_id(s: str) bool[source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kdist.html b/pyk/api/pyk.kdist.html new file mode 100644 index 00000000000..a7e9c8e4e01 --- /dev/null +++ b/pyk/api/pyk.kdist.html @@ -0,0 +1,167 @@ + + + + + + + + + pyk.kdist package — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/pyk/api/pyk.kdist.utils.html b/pyk/api/pyk.kdist.utils.html new file mode 100644 index 00000000000..f790c33fe0b --- /dev/null +++ b/pyk/api/pyk.kdist.utils.html @@ -0,0 +1,155 @@ + + + + + + + + + pyk.kdist.utils module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kdist.utils module

+
+
+cwd(path: Path) Iterator[None][source]
+
+ +
+
+files_for_path(path: str | Path) list[Path][source]
+
+ +
+
+package_path(obj: Any) Path[source]
+
+ +
+
+timestamp(path: Path) int[source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kllvm.ast.html b/pyk/api/pyk.kllvm.ast.html new file mode 100644 index 00000000000..3d7af1750e8 --- /dev/null +++ b/pyk/api/pyk.kllvm.ast.html @@ -0,0 +1,135 @@ + + + + + + + + + pyk.kllvm.ast module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kllvm.ast module

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kllvm.compiler.html b/pyk/api/pyk.kllvm.compiler.html new file mode 100644 index 00000000000..4987931002d --- /dev/null +++ b/pyk/api/pyk.kllvm.compiler.html @@ -0,0 +1,150 @@ + + + + + + + + + pyk.kllvm.compiler module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kllvm.compiler module

+
+
+compile_kllvm(target_dir: str | Path, *, verbose: bool = False) Path[source]
+
+ +
+
+compile_runtime(definition_dir: str | Path, target_dir: str | Path | None = None, *, ccopts: Iterable[str] = (), verbose: bool = False) Path[source]
+
+ +
+
+generate_hints(definition_dir: str | Path, input_kore_file: str | Path, target_dir: str | Path | None = None, hints_file_name: str = 'hints.bin') Path[source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kllvm.convert.html b/pyk/api/pyk.kllvm.convert.html new file mode 100644 index 00000000000..54aa30fb11b --- /dev/null +++ b/pyk/api/pyk.kllvm.convert.html @@ -0,0 +1,190 @@ + + + + + + + + + pyk.kllvm.convert module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kllvm.convert module

+
+
+definition_to_llvm(definition: Definition) Definition[source]
+
+ +
+
+llvm_to_definition(definition: Definition) Definition[source]
+
+ +
+
+llvm_to_module(module: Module) Module[source]
+
+ +
+
+llvm_to_pattern(pattern: kllvm.Pattern) Pattern[source]
+
+ +
+
+llvm_to_sentence(decl: kllvm.Declaration) Sentence[source]
+
+ +
+
+llvm_to_sort(sort: kllvm.Sort) Sort[source]
+
+ +
+
+llvm_to_sort_var(var: SortVariable) SortVar[source]
+
+ +
+
+module_to_llvm(module: Module) Module[source]
+
+ +
+
+pattern_to_llvm(pattern: Pattern) kllvm.Pattern[source]
+
+ +
+
+sentence_to_llvm(sentence: Sentence) kllvm.Declaration[source]
+
+ +
+
+sort_to_llvm(sort: Sort) kllvm.Sort[source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kllvm.hints.html b/pyk/api/pyk.kllvm.hints.html new file mode 100644 index 00000000000..611cf25b871 --- /dev/null +++ b/pyk/api/pyk.kllvm.hints.html @@ -0,0 +1,253 @@ + + + + + + + + + pyk.kllvm.hints package — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kllvm.hints package

+
+

Submodules

+
+ +
+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kllvm.hints.prooftrace.html b/pyk/api/pyk.kllvm.hints.prooftrace.html new file mode 100644 index 00000000000..9f70bd318bc --- /dev/null +++ b/pyk/api/pyk.kllvm.hints.prooftrace.html @@ -0,0 +1,848 @@ + + + + + + + + + pyk.kllvm.hints.prooftrace module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kllvm.hints.prooftrace module

+
+
+class KoreHeader(kore_header: kore_header)[source]
+

Bases: object

+

Represents the Kore header.

+

The Kore header is a file that contains the version of the Binary KORE used to serialize/deserialize the +Proof Trace and all the aditional information needed make this process faster the Proof Trace.

+
+
+_kore_header
+

The underlying KORE Header object.

+
+
Type:
+

kore_header

+
+
+
+ +
+
+__init__(kore_header: kore_header) None[source]
+

Initialize a new instance of the KoreHeader class.

+
+
Parameters:
+

kore_header (kore_header) – The KORE Header object.

+
+
+
+ +
+
+static create(header_path: Path) KoreHeader[source]
+

Create a new KoreHeader object from the given header file path.

+
+ +
+ +
+
+final class LLVMArgument(argument: Argument)[source]
+

Bases: object

+

Represents an LLVM argument.

+
+
+_argument
+

The underlying Argument object. An argument is a wrapper object containing either a step

+
+
Type:
+

Argument

+
+
+
+ +
+
+event or a KORE pattern.
+
+ +
+
+__init__(argument: Argument) None[source]
+

Initialize the LLVMArgument object.

+
+
Parameters:
+

argument (Argument) – The Argument object.

+
+
+
+ +
+
+__repr__() str[source]
+

Return a string representation of the object.

+
+
Returns:
+

Returns a string representation of the LLVMArgument object using the AST printing method.

+
+
+
+ +
+
+is_kore_pattern() bool[source]
+

Check if the argument is a KORE Pattern.

+
+ +
+
+is_step_event() bool[source]
+

Check if the argument is a step event.

+
+ +
+
+property kore_pattern: Pattern
+

Return the KORE Pattern associated with the argument if any.

+
+ +
+
+property step_event: LLVMStepEvent
+

Returns the LLVMStepEvent associated with the argument if any.

+
+ +
+ +
+
+class LLVMEventAnnotated(annotated_llvm_event: annotated_llvm_event)[source]
+

Bases: object

+

Represents an annotated LLVM event.

+

This class is used to wrap an llvm_event and its corresponding event type. +This class is used to iterate over the LLVM rewrite trace events.

+
+
+_annotated_llvm_event
+

The underlying annotated LLVM event object.

+
+
Type:
+

annotated_llvm_event

+
+
+
+ +
+
+__init__(annotated_llvm_event: annotated_llvm_event) None[source]
+

Initialize a new instance of the LLVMEventAnnotated class.

+
+
Parameters:
+

annotated_llvm_event (annotated_llvm_event) – The annotated LLVM event object.

+
+
+
+ +
+
+property event: LLVMArgument
+

Returns the LLVM event as an LLVMArgument object.

+
+ +
+
+property type: LLVMEventType
+

Returns the LLVM event type.

+
+ +
+ +
+
+class LLVMEventType(event_type: EventType)[source]
+

Bases: object

+

Represents an LLVM event type.

+

This works as a wrapper around the EventType enum. +It also provides properties to check the type of the event.

+
+
+_event_type
+

The underlying EventType object.

+
+
Type:
+

EventType

+
+
+
+ +
+
+__init__(event_type: EventType) None[source]
+

Initialize a new instance of the LLVMEventType class.

+
+
Parameters:
+

event_type (EventType) – The EventType object.

+
+
+
+ +
+
+property is_initial_config: bool
+

Checks if the event type is an initial configuration event.

+
+ +
+
+property is_pre_trace: bool
+

Checks if the event type is a pre-trace event.

+
+ +
+
+property is_trace: bool
+

Checks if the event type is a trace event.

+
+ +
+ +
+
+final class LLVMFunctionEvent(function_event: llvm_function_event)[source]
+

Bases: LLVMStepEvent

+

Represent an LLVM function event in a proof trace.

+
+
+_function_event
+

The underlying LLVM function event object.

+
+
Type:
+

llvm_function_event

+
+
+
+ +
+
+__init__(function_event: llvm_function_event) None[source]
+

Initialize a new instance of the LLVMFunctionEvent class.

+
+
Parameters:
+

function_event (llvm_function_event) – The LLVM function event object.

+
+
+
+ +
+
+__repr__() str[source]
+

Return a string representation of the object.

+
+
Returns:
+

A string representation of the LLVMFunctionEvent object using the AST printing method.

+
+
+
+ +
+
+property args: list[LLVMArgument]
+

Return a list of LLVMArgument objects representing the arguments of the LLVM function.

+
+ +
+
+property name: str
+

Return the name of the LLVM function as a KORE Symbol Name.

+
+ +
+
+property relative_position: str
+

Return the relative position of the LLVM function event in the proof trace.

+
+ +
+ +
+
+final class LLVMHookEvent(hook_event: llvm_hook_event)[source]
+

Bases: LLVMStepEvent

+

Represents a hook event in LLVM execution.

+
+
+_hook_event
+

The underlying hook event object.

+
+
Type:
+

llvm_hook_event

+
+
+
+ +
+
+__init__(hook_event: llvm_hook_event) None[source]
+

Initialize a new instance of the LLVMHookEvent class.

+
+
Parameters:
+

hook_event (llvm_hook_event) – The LLVM hook event object.

+
+
+
+ +
+
+__repr__() str[source]
+

Return a string representation of the object.

+
+
Returns:
+

A string representation of the LLVMHookEvent object using the AST printing method.

+
+
+
+ +
+
+property args: list[LLVMArgument]
+

Return a list of LLVMArgument objects representing the arguments of the hook event.

+
+ +
+
+property name: str
+

“INT.add”.

+
+
Type:
+

Return the attribute name of the hook event. Ex.

+
+
+
+ +
+
+property relative_position: str
+

Return the relative position of the hook event in the proof trace.

+
+ +
+
+property result: Pattern
+

Return the result pattern of the hook event evaluation.

+
+ +
+ +
+
+final class LLVMPatternMatchingFailureEvent(pattern_matching_failure_event: llvm_pattern_matching_failure_event)[source]
+

Bases: LLVMStepEvent

+

Represents an LLVM pattern matching failure event.

+

This event is used to indicate that the pattern matching failed during the rewriting process.

+
+
+_pattern_matching_failure_event
+

The underlying pattern matching failure event.

+
+
Type:
+

llvm_pattern_matching_failure_event

+
+
+
+ +
+
+__init__(pattern_matching_failure_event: llvm_pattern_matching_failure_event) None[source]
+

Initialize a new instance of the LLVMPatternMatchingFailureEvent class.

+
+
Parameters:
+

pattern_matching_failure_event (llvm_pattern_matching_failure_event) – The LLVM pattern matching failure event object.

+
+
+
+ +
+
+__repr__() str[source]
+

Return a string representation of the object.

+
+
Returns:
+

A string representation of the LLVMPatternMatchingFailureEvent object using the AST printing method.

+
+
+
+ +
+
+property function_name: str
+

Return the name of the function that failed to match the pattern.

+
+ +
+ +
+
+class LLVMRewriteEvent[source]
+

Bases: LLVMStepEvent

+

Represents LLVM rewrite event.

+
+
+abstract property rule_ordinal: int
+

Return the axiom ordinal number of the rewrite rule.

+

The rule ordinal represents the nth axiom in the kore definition.

+
+ +
+
+abstract property substitution: dict[str, Pattern]
+

Returns the substitution dictionary used to perform the rewrite represented by this event.

+
+ +
+ +
+
+final class LLVMRewriteTrace(rewrite_trace: llvm_rewrite_trace)[source]
+

Bases: object

+

Represents an LLVM rewrite trace.

+
+
+_rewrite_trace
+

The underlying LLVM rewrite trace object.

+
+
Type:
+

llvm_rewrite_trace

+
+
+
+ +
+
+__init__(rewrite_trace: llvm_rewrite_trace) None[source]
+

Initialize a new instance of the LLVMRewriteTrace class.

+
+
Parameters:
+

rewrite_trace (llvm_rewrite_trace) – The LLVM rewrite trace object.

+
+
+
+ +
+
+__repr__() str[source]
+

Return a string representation of the object.

+
+
Returns:
+

A string representation of the LLVMRewriteTrace object using the AST printing method.

+
+
+
+ +
+
+property initial_config: LLVMArgument
+

Returns the initial configuration as an LLVMArgument object.

+
+ +
+
+static parse(trace: bytes, header: KoreHeader) LLVMRewriteTrace[source]
+

Parse the given proof hints byte string using the given kore_header object.

+
+ +
+
+property pre_trace: list[LLVMArgument]
+

Returns a list of events that occurred before the initial configuration was constructed.

+
+ +
+
+property trace: list[LLVMArgument]
+

Returns the trace.

+

The trace is the list of events that occurred after the initial configurarion was constructed until the end of the +proof trace when the final configuration is reached.

+
+ +
+
+property version: int
+

Returns the version of the binary hints format used by this trace.

+
+ +
+ +
+
+class LLVMRewriteTraceIterator(rewrite_trace_iterator: llvm_rewrite_trace_iterator)[source]
+

Bases: object

+

Represents an LLVM rewrite trace iterator.

+

This class is used to iterate over the LLVM rewrite trace events in the stream parser.

+
+
+_rewrite_trace_iterator
+

The underlying LLVM rewrite trace iterator object.

+
+
Type:
+

llvm_rewrite_trace_iterator

+
+
+
+ +
+
+__init__(rewrite_trace_iterator: llvm_rewrite_trace_iterator) None[source]
+

Initialize a new instance of the LLVMRewriteTraceIterator class.

+
+
Parameters:
+

rewrite_trace_iterator (llvm_rewrite_trace_iterator) – The LLVM rewrite trace iterator object.

+
+
+
+ +
+
+__iter__() Generator[LLVMEventAnnotated, None, None][source]
+

Yield LLVMEventAnnotated options.

+

This method is an iterator that yields LLVMEventAnnotated options. +It iterates over the events in the trace and returns the next event as an LLVMEventAnnotated object.

+
+
Yields:
+

LLVMEventAnnotated – The next LLVMEventAnnotated option.

+
+
+
+ +
+
+__next__() LLVMEventAnnotated[source]
+

Yield the next LLVMEventAnnotated object from the iterator.

+
+
Returns:
+

The next LLVMEventAnnotated object.

+
+
Return type:
+

LLVMEventAnnotated

+
+
Raises:
+

StopIteration – If there are no more events in the iterator.

+
+
+
+ +
+
+__repr__() str[source]
+

Return a string representation of the object.

+
+
Returns:
+

A string representation of the LLVMRewriteTraceIterator object using the AST printing method.

+
+
+
+ +
+
+static from_file(trace_path: Path, header: KoreHeader) LLVMRewriteTraceIterator[source]
+

Create a new LLVMRewriteTraceIterator object from the given trace and header file paths.

+
+ +
+
+property version: int
+

Return the version of the HINTS format.

+
+ +
+ +
+
+final class LLVMRuleEvent(rule_event: llvm_rule_event)[source]
+

Bases: LLVMRewriteEvent

+

Represents an LLVM rule event.

+
+
+_rule_event
+

The underlying LLVM rule event.

+
+
Type:
+

llvm_rule_event

+
+
+
+ +
+
+__init__(rule_event: llvm_rule_event) None[source]
+

Initialize a new instance of the LLVMRuleEvent class.

+
+
Parameters:
+

rule_event (llvm_rule_event) – The LLVM rule event object.

+
+
+
+ +
+
+__repr__() str[source]
+

Return a string representation of the object.

+
+
Returns:
+

A string representation of the LLVMRuleEvent object using the AST printing method.

+
+
+
+ +
+
+property rule_ordinal: int
+

Returns the axiom ordinal number of the rule event.

+
+ +
+
+property substitution: dict[str, Pattern]
+

Returns the substitution dictionary used to perform the rewrite represented by this rule event.

+
+ +
+ +
+
+final class LLVMSideConditionEventEnter(side_condition_event: llvm_side_condition_event)[source]
+

Bases: LLVMRewriteEvent

+

Represents an event that enters a side condition in LLVM rewriting.

+

This event is used to check the side condition of a rule. Mostly used in ensures/requires clauses.

+
+
+_side_condition_event
+

The underlying side condition event.

+
+
Type:
+

llvm_side_condition_event

+
+
+
+ +
+
+__init__(side_condition_event: llvm_side_condition_event) None[source]
+

Initialize a new instance of the LLVMSideConditionEventEnter class.

+
+
Parameters:
+

side_condition_event (llvm_side_condition_event) – The LLVM side condition event object.

+
+
+
+ +
+
+__repr__() str[source]
+

Return a string representation of the object.

+
+
Returns:
+

A string representation of the LLVMSideConditionEventEnter object using the AST printing method.

+
+
+
+ +
+
+property rule_ordinal: int
+

Returns the axiom ordinal number associated with the side condition event.

+
+ +
+
+property substitution: dict[str, Pattern]
+

Returns the substitution dictionary used to perform the rewrite represented by this side condition event.

+
+ +
+ +
+
+final class LLVMSideConditionEventExit(side_condition_end_event: llvm_side_condition_end_event)[source]
+

Bases: LLVMStepEvent

+

Represents an LLVM side condition event indicating the exit of a side condition.

+

This event contains the result of the side condition evaluation.

+
+
+_side_condition_end_event
+

The underlying side condition end event.

+
+
Type:
+

llvm_side_condition_end_event

+
+
+
+ +
+
+__init__(side_condition_end_event: llvm_side_condition_end_event) None[source]
+

Initialize a new instance of the LLVMSideConditionEventExit class.

+
+
Parameters:
+

side_condition_end_event (llvm_side_condition_end_event) – The LLVM side condition end event object.

+
+
+
+ +
+
+__repr__() str[source]
+

Return a string representation of the object.

+
+
Returns:
+

A string representation of the LLVMSideConditionEventExit object using the AST printing method.

+
+
+
+ +
+
+property check_result: bool
+

Return the boolean result of the evaluation of the side condition that corresponds to this event.

+
+ +
+
+property rule_ordinal: int
+

Return the axiom ordinal number associated with the side condition event.

+
+ +
+ +
+
+class LLVMStepEvent[source]
+

Bases: ABC

+

Abstract base class representing an LLVM step event.

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kllvm.html b/pyk/api/pyk.kllvm.html new file mode 100644 index 00000000000..7df5e125b99 --- /dev/null +++ b/pyk/api/pyk.kllvm.html @@ -0,0 +1,237 @@ + + + + + + + + + pyk.kllvm package — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kllvm.importer.html b/pyk/api/pyk.kllvm.importer.html new file mode 100644 index 00000000000..591b7ec9ce8 --- /dev/null +++ b/pyk/api/pyk.kllvm.importer.html @@ -0,0 +1,155 @@ + + + + + + + + + pyk.kllvm.importer module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kllvm.importer module

+
+
+import_from_file(module_name: str, module_file: str | Path) ModuleType[source]
+
+ +
+
+import_kllvm(target_dir: str | Path) ModuleType[source]
+
+ +
+
+import_runtime(target_dir: str | Path) Runtime[source]
+
+ +
+
+rtld_local() Iterator[None][source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kllvm.load.html b/pyk/api/pyk.kllvm.load.html new file mode 100644 index 00000000000..c27ebfd5397 --- /dev/null +++ b/pyk/api/pyk.kllvm.load.html @@ -0,0 +1,135 @@ + + + + + + + + + pyk.kllvm.load module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kllvm.load module

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kllvm.load_static.html b/pyk/api/pyk.kllvm.load_static.html new file mode 100644 index 00000000000..70b3fe2f079 --- /dev/null +++ b/pyk/api/pyk.kllvm.load_static.html @@ -0,0 +1,140 @@ + + + + + + + + + pyk.kllvm.load_static module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kllvm.load_static module

+
+
+get_kllvm() Path[source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kllvm.parser.html b/pyk/api/pyk.kllvm.parser.html new file mode 100644 index 00000000000..407bb07acb0 --- /dev/null +++ b/pyk/api/pyk.kllvm.parser.html @@ -0,0 +1,165 @@ + + + + + + + + + pyk.kllvm.parser module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kllvm.parser module

+
+
+parse_definition(text: str) Definition[source]
+
+ +
+
+parse_definition_file(path: str | Path) Definition[source]
+
+ +
+
+parse_pattern(text: str) Pattern[source]
+
+ +
+
+parse_pattern_file(path: str | Path) Pattern[source]
+
+ +
+
+parse_sort(text: str) Sort[source]
+
+ +
+
+parse_sort_file(path: str | Path) Pattern[source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kllvm.runtime.html b/pyk/api/pyk.kllvm.runtime.html new file mode 100644 index 00000000000..aea6b564dc8 --- /dev/null +++ b/pyk/api/pyk.kllvm.runtime.html @@ -0,0 +1,202 @@ + + + + + + + + + pyk.kllvm.runtime module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kllvm.runtime module

+
+
+class Runtime(module: ModuleType)[source]
+

Bases: object

+
+
+deserialize(bs: bytes) Term | None[source]
+
+ +
+
+evaluate(pattern: CompositePattern) Pattern[source]
+
+ +
+
+run(pattern: Pattern) Pattern[source]
+
+ +
+
+simplify(pattern: Pattern, sort: Sort) Pattern[source]
+
+ +
+
+simplify_bool(pattern: Pattern) bool[source]
+
+ +
+
+step(pattern: Pattern, depth: int | None = 1) Pattern[source]
+
+ +
+
+term(pattern: Pattern) Term[source]
+
+ +
+ +
+
+class Term(block: Any)[source]
+

Bases: object

+
+
+property pattern: Pattern
+
+ +
+
+run() None[source]
+
+ +
+
+serialize() bytes[source]
+
+ +
+
+step(depth: int | None = 1) None[source]
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kllvm.utils.html b/pyk/api/pyk.kllvm.utils.html new file mode 100644 index 00000000000..8eb4cbd7e96 --- /dev/null +++ b/pyk/api/pyk.kllvm.utils.html @@ -0,0 +1,140 @@ + + + + + + + + + pyk.kllvm.utils module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kllvm.utils module

+
+
+get_requires(axiom: Axiom) Pattern | None[source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.konvert.html b/pyk/api/pyk.konvert.html new file mode 100644 index 00000000000..af0f99d81e0 --- /dev/null +++ b/pyk/api/pyk.konvert.html @@ -0,0 +1,134 @@ + + + + + + + + + pyk.konvert package — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.konvert package

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kore.html b/pyk/api/pyk.kore.html new file mode 100644 index 00000000000..043e834ee6c --- /dev/null +++ b/pyk/api/pyk.kore.html @@ -0,0 +1,1270 @@ + + + + + + + + + pyk.kore package — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kore package

+
+

Submodules

+
+ +
+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kore.kompiled.html b/pyk/api/pyk.kore.kompiled.html new file mode 100644 index 00000000000..00a551f585d --- /dev/null +++ b/pyk/api/pyk.kore.kompiled.html @@ -0,0 +1,238 @@ + + + + + + + + + pyk.kore.kompiled module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kore.kompiled module

+
+
+final class KompiledKore(sort_table: 'KoreSortTable', symbol_table: 'KoreSymbolTable')[source]
+

Bases: object

+
+
+add_injections(pattern: Pattern, sort: Sort | None = None) Pattern[source]
+
+ +
+
+static for_definition(definition: Definition) KompiledKore[source]
+
+ +
+
+static from_dict(dct: dict[str, Any]) KompiledKore[source]
+
+ +
+
+static load(definition_dir: str | Path) KompiledKore[source]
+
+ +
+
+static load_from_json(json_file: str | Path) KompiledKore[source]
+
+ +
+
+static load_from_kore(kore_file: str | Path) KompiledKore[source]
+
+ +
+
+sort_table: KoreSortTable
+
+ +
+
+symbol_table: KoreSymbolTable
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+
+write(definition_dir: str | Path) None[source]
+
+ +
+ +
+
+final class KoreSortTable(subsorts: 'Iterable[tuple[Sort, Sort]]')[source]
+

Bases: object

+
+
+static for_definition(definition: Definition) KoreSortTable[source]
+
+ +
+
+is_subsort(sort1: Sort, sort2: Sort) bool[source]
+
+ +
+
+meet(sort1: Sort, sort2: Sort) Sort[source]
+
+ +
+ +
+
+final class KoreSymbolTable(symbol_decls: 'Iterable[SymbolDecl]' = ())[source]
+

Bases: object

+
+
+static for_definition(definition: Definition, *, with_ml_symbols: bool = True) KoreSymbolTable[source]
+
+ +
+
+infer_sort(pattern: Pattern) Sort[source]
+
+ +
+
+pattern_sorts(pattern: Pattern) tuple[Sort, ...][source]
+
+ +
+
+resolve(symbol_id: str, sorts: Iterable[Sort] = ()) tuple[Sort, tuple[Sort, ...]][source]
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kore.lexer.html b/pyk/api/pyk.kore.lexer.html new file mode 100644 index 00000000000..92cd48923c2 --- /dev/null +++ b/pyk/api/pyk.kore.lexer.html @@ -0,0 +1,390 @@ + + + + + + + + + pyk.kore.lexer module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kore.lexer module

+
+
+class KoreToken(text, type)[source]
+

Bases: NamedTuple

+
+
+text: str
+

Alias for field number 0

+
+ +
+
+type: TokenType
+

Alias for field number 1

+
+ +
+ +
+
+class TokenType(value)[source]
+

Bases: Enum

+

An enumeration.

+
+
+COLON = 2
+
+ +
+
+COMMA = 1
+
+ +
+
+EOF = 0
+
+ +
+
+ID = 11
+
+ +
+
+KW_ALIAS = 43
+
+ +
+
+KW_AXIOM = 41
+
+ +
+
+KW_CLAIM = 42
+
+ +
+
+KW_ENDMODULE = 35
+
+ +
+
+KW_HOOKED_SORT = 38
+
+ +
+
+KW_HOOKED_SYMBOL = 40
+
+ +
+
+KW_IMPORT = 36
+
+ +
+
+KW_MODULE = 34
+
+ +
+
+KW_SORT = 37
+
+ +
+
+KW_SYMBOL = 39
+
+ +
+
+KW_WHERE = 44
+
+ +
+
+LBRACE = 6
+
+ +
+
+LBRACK = 8
+
+ +
+
+LPAREN = 4
+
+ +
+
+ML_AND = 17
+
+ +
+
+ML_BOTTOM = 15
+
+ +
+
+ML_CEIL = 25
+
+ +
+
+ML_DV = 31
+
+ +
+
+ML_EQUALS = 27
+
+ +
+
+ML_EXISTS = 21
+
+ +
+
+ML_FLOOR = 26
+
+ +
+
+ML_FORALL = 22
+
+ +
+
+ML_IFF = 20
+
+ +
+
+ML_IMPLIES = 19
+
+ +
+
+ML_IN = 28
+
+ +
+
+ML_LEFT_ASSOC = 32
+
+ +
+
+ML_MU = 23
+
+ +
+
+ML_NEXT = 29
+
+ +
+
+ML_NOT = 16
+
+ +
+
+ML_NU = 24
+
+ +
+
+ML_OR = 18
+
+ +
+
+ML_REWRITES = 30
+
+ +
+
+ML_RIGHT_ASSOC = 33
+
+ +
+
+ML_TOP = 14
+
+ +
+
+RBRACE = 7
+
+ +
+
+RBRACK = 9
+
+ +
+
+RPAREN = 5
+
+ +
+
+SET_VAR_ID = 13
+
+ +
+
+STRING = 10
+
+ +
+
+SYMBOL_ID = 12
+
+ +
+
+WALRUS = 3
+
+ +
+ +
+
+kore_lexer(it: Iterable[str]) Iterator[KoreToken][source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kore.manip.html b/pyk/api/pyk.kore.manip.html new file mode 100644 index 00000000000..417f092534c --- /dev/null +++ b/pyk/api/pyk.kore.manip.html @@ -0,0 +1,145 @@ + + + + + + + + + pyk.kore.manip module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kore.manip module

+
+
+conjuncts(pattern: Pattern) tuple[Pattern, ...][source]
+
+ +
+
+free_occs(pattern: Pattern, *, bound_vars: Collection[str] = ()) dict[str, list[EVar]][source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kore.match.html b/pyk/api/pyk.kore.match.html new file mode 100644 index 00000000000..7315690eb1a --- /dev/null +++ b/pyk/api/pyk.kore.match.html @@ -0,0 +1,272 @@ + + + + + + + + + pyk.kore.match module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kore.match module

+
+
+app(symbol: str | None = None) Callable[[Pattern], App][source]
+
+ +
+
+arg(n: int, /) Callable[[App], Pattern][source]
+
+arg(symbol: str, /) Callable[[App], App]
+
+ +
+
+args() Callable[[App], tuple[()]][source]
+
+args(n1: int, /) Callable[[App], tuple[Pattern]]
+
+args(n1: int, n2: int, /) Callable[[App], tuple[Pattern, Pattern]]
+
+args(n1: int, n2: int, n3: int, /) Callable[[App], tuple[Pattern, Pattern, Pattern]]
+
+args(n1: int, n2: int, n3: int, n4: int, /) Callable[[App], tuple[Pattern, Pattern, Pattern, Pattern]]
+
+args(*ns: int) Callable[[App], tuple[Pattern, ...]]
+
+args(s1: str, /) Callable[[App], tuple[App]]
+
+args(s1: str, s2: str, /) Callable[[App], tuple[App, App]]
+
+args(s1: str, s2: str, s3: str, /) Callable[[App], tuple[App, App, App]]
+
+args(s1: str, s2: str, s3: str, s4: str, /) Callable[[App], tuple[App, App, App, App]]
+
+args(*ss: str) Callable[[App], tuple[App, ...]]
+
+ +
+
+case_symbol(*cases: tuple[str, Callable[[App], T]], default: Callable[[App], T] | None = None) Callable[[Pattern], T][source]
+
+ +
+
+inj(pattern: Pattern) Pattern[source]
+
+ +
+
+kore_bool(pattern: Pattern) bool[source]
+
+ +
+
+kore_bytes(pattern: Pattern) bytes[source]
+
+ +
+
+kore_id(pattern: Pattern) str[source]
+
+ +
+
+kore_int(pattern: Pattern) int[source]
+
+ +
+
+kore_list_of(item: Callable[[Pattern], T]) Callable[[Pattern], tuple[T, ...]][source]
+
+ +
+
+kore_map_of(key: Callable[[Pattern], K], value: Callable[[Pattern], V], *, cell: str | None = None) Callable[[Pattern], tuple[tuple[K, V], ...]][source]
+
+ +
+
+kore_rangemap_of(key: Callable[[Pattern], K], value: Callable[[Pattern], V]) Callable[[Pattern], tuple[tuple[tuple[K, K], V], ...]][source]
+
+ +
+
+kore_set_of(item: Callable[[Pattern], T]) Callable[[Pattern], tuple[T, ...]][source]
+
+ +
+
+kore_str(pattern: Pattern) str[source]
+
+ +
+
+match_app(pattern: Pattern, symbol: str | None = None) App[source]
+
+ +
+
+match_dv(pattern: Pattern, sort: Sort | None = None) DV[source]
+
+ +
+
+match_inj(pattern: Pattern) App[source]
+
+ +
+
+match_left_assoc(pattern: Pattern, symbol: str | None = None) LeftAssoc[source]
+
+ +
+
+match_list(pattern: Pattern) tuple[Pattern, ...][source]
+
+ +
+
+match_map(pattern: Pattern, *, cell: str | None = None) tuple[tuple[Pattern, Pattern], ...][source]
+
+ +
+
+match_rangemap(pattern: Pattern) tuple[tuple[tuple[Pattern, Pattern], Pattern], ...][source]
+
+ +
+
+match_set(pattern: Pattern) tuple[Pattern, ...][source]
+
+ +
+
+match_symbol(actual: str, expected: str) None[source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kore.parser.html b/pyk/api/pyk.kore.parser.html new file mode 100644 index 00000000000..85bdc4ee662 --- /dev/null +++ b/pyk/api/pyk.kore.parser.html @@ -0,0 +1,376 @@ + + + + + + + + + pyk.kore.parser module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kore.parser module

+
+
+class KoreParser(text: str)[source]
+

Bases: object

+
+
+alias_decl() AliasDecl[source]
+
+ +
+
+andd() And[source]
+
+ +
+
+app() App[source]
+
+ +
+
+axiom() Axiom[source]
+
+ +
+
+bottom() Bottom[source]
+
+ +
+
+ceil() Ceil[source]
+
+ +
+
+claim() Claim[source]
+
+ +
+
+definition() Definition[source]
+
+ +
+
+dv() DV[source]
+
+ +
+
+elem_var() EVar[source]
+
+ +
+
+property eof: bool
+
+ +
+
+equals() Equals[source]
+
+ +
+
+exists() Exists[source]
+
+ +
+
+floor() Floor[source]
+
+ +
+
+forall() Forall[source]
+
+ +
+
+hooked_sort_decl() SortDecl[source]
+
+ +
+
+hooked_symbol_decl() SymbolDecl[source]
+
+ +
+
+id() str[source]
+
+ +
+
+iff() Iff[source]
+
+ +
+
+implies() Implies[source]
+
+ +
+
+importt() Import[source]
+
+ +
+
+inn() In[source]
+
+ +
+
+left_assoc() LeftAssoc[source]
+
+ +
+
+ml_pattern() MLPattern[source]
+
+ +
+
+module() Module[source]
+
+ +
+
+mu() Mu[source]
+
+ +
+
+multi_or() list[Pattern][source]
+
+ +
+
+next() Next[source]
+
+ +
+
+nott() Not[source]
+
+ +
+
+nu() Nu[source]
+
+ +
+
+orr() Or[source]
+
+ +
+
+pattern() Pattern[source]
+
+ +
+
+rewrites() Rewrites[source]
+
+ +
+
+right_assoc() RightAssoc[source]
+
+ +
+
+sentence() Sentence[source]
+
+ +
+
+set_var() SVar[source]
+
+ +
+
+set_var_id() str[source]
+
+ +
+
+sort() Sort[source]
+
+ +
+
+sort_app() SortApp[source]
+
+ +
+
+sort_decl() SortDecl[source]
+
+ +
+
+sort_var() SortVar[source]
+
+ +
+
+string() String[source]
+
+ +
+
+symbol() Symbol[source]
+
+ +
+
+symbol_decl() SymbolDecl[source]
+
+ +
+
+symbol_id() str[source]
+
+ +
+
+top() Top[source]
+
+ +
+
+var_pattern() VarPattern[source]
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kore.pool.html b/pyk/api/pyk.kore.pool.html new file mode 100644 index 00000000000..62ba0a10e34 --- /dev/null +++ b/pyk/api/pyk.kore.pool.html @@ -0,0 +1,151 @@ + + + + + + + + + pyk.kore.pool module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kore.pool module

+
+
+class KoreServerPool(create_server: Callable[[], KoreServer], *, max_workers: int | None = None)[source]
+

Bases: ContextManager[KoreServerPool]

+
+
+close() None[source]
+
+ +
+
+submit(fn: Callable[Concatenate[int, P], T], /, *args: P.args, **kwargs: P.kwargs) Future[T][source]
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kore.prelude.html b/pyk/api/pyk.kore.prelude.html new file mode 100644 index 00000000000..6f7d0cf2b80 --- /dev/null +++ b/pyk/api/pyk.kore.prelude.html @@ -0,0 +1,330 @@ + + + + + + + + + pyk.kore.prelude module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kore.prelude module

+
+
+and_bool(left: Pattern, right: Pattern) Pattern[source]
+
+ +
+
+bool_dv(val: bool) DV[source]
+
+ +
+
+bytes_dv(val: bytes) DV[source]
+
+ +
+
+dv(val: bool | int | bytes | str) DV[source]
+
+ +
+
+eq_bool(left: Pattern, right: Pattern) Pattern[source]
+
+ +
+
+eq_int(left: Pattern, right: Pattern) Pattern[source]
+
+ +
+
+ge_int(left: Pattern, right: Pattern) Pattern[source]
+
+ +
+
+generated_counter(pattern: Pattern) App[source]
+
+ +
+
+generated_top(patterns: Iterable[Pattern]) App[source]
+
+ +
+
+gt_int(left: Pattern, right: Pattern) Pattern[source]
+
+ +
+
+implies_bool(left: Pattern, right: Pattern) Pattern[source]
+
+ +
+
+init_generated_top_cell(pattern: Pattern) App[source]
+
+ +
+
+inj(sort1: Sort, sort2: Sort, pattern: Pattern) App[source]
+
+ +
+
+int_dv(val: int) DV[source]
+
+ +
+
+json2string(pattern: Pattern) App[source]
+
+ +
+
+json_entry(key: Pattern, value: Pattern) App[source]
+
+ +
+
+json_key(key: str) App[source]
+
+ +
+
+json_list(pattern: Pattern) App[source]
+
+ +
+
+json_object(pattern: Pattern) App[source]
+
+ +
+
+json_to_kore(data: Any) Pattern[source]
+
+ +
+
+jsons(patterns: Iterable[Pattern]) RightAssoc[source]
+
+ +
+
+k(pattern: Pattern) App[source]
+
+ +
+
+k_config_var(var: str) DV[source]
+
+ +
+
+kore_to_json(pattern: Pattern) Any[source]
+
+ +
+
+kseq(kitems: Iterable[Pattern], *, dotvar: EVar | None = None) Pattern[source]
+
+ +
+
+le_int(left: Pattern, right: Pattern) Pattern[source]
+
+ +
+
+list_pattern(*args: Pattern) Pattern[source]
+
+ +
+
+lt_int(left: Pattern, right: Pattern) Pattern[source]
+
+ +
+
+map_pattern(*args: tuple[Pattern, Pattern], cell: str | None = None) Pattern[source]
+
+ +
+
+ne_bool(left: Pattern, right: Pattern) Pattern[source]
+
+ +
+
+ne_int(left: Pattern, right: Pattern) Pattern[source]
+
+ +
+
+not_bool(pattern: Pattern) Pattern[source]
+
+ +
+
+or_bool(left: Pattern, right: Pattern) Pattern[source]
+
+ +
+
+rangemap_pattern(*args: tuple[tuple[Pattern, Pattern], Pattern]) Pattern[source]
+
+ +
+
+set_pattern(*args: Pattern) Pattern[source]
+
+ +
+
+str_dv(val: str) DV[source]
+
+ +
+
+string2json(pattern: Pattern) App[source]
+
+ +
+
+top_cell_initializer(config: Mapping[str, Pattern]) App[source]
+
+ +
+
+xor_bool(left: Pattern, right: Pattern) Pattern[source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kore.rpc.html b/pyk/api/pyk.kore.rpc.html new file mode 100644 index 00000000000..44fd8057ef9 --- /dev/null +++ b/pyk/api/pyk.kore.rpc.html @@ -0,0 +1,1361 @@ + + + + + + + + + pyk.kore.rpc module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kore.rpc module

+
+
+final class AbortedResult(state: 'State', depth: 'int', unknown_predicate: 'Pattern | None', logs: 'tuple[LogEntry, ...]')[source]
+

Bases: ExecuteResult

+
+
+depth: int
+
+ +
+
+classmethod from_dict(dct: Mapping[str, Any]) AbortedResult[source]
+
+ +
+
+logs: tuple[LogEntry, ...]
+
+ +
+
+next_states: tuple[State, ...] | None = None
+
+ +
+
+reason: ClassVar[StopReason] = 'aborted'
+
+ +
+
+rule: str | None = None
+
+ +
+
+state: State
+
+ +
+
+unknown_predicate: Pattern | None
+
+ +
+ +
+
+class BoosterServer(args: BoosterServerArgs)[source]
+

Bases: KoreServer

+
+ +
+
+class BoosterServerArgs[source]
+

Bases: dict

+
+
+bug_report: BugReport | None
+
+ +
+
+command: str | Iterable[str] | None
+
+ +
+
+fallback_on: Iterable[str | FallbackReason] | None
+
+ +
+
+haskell_log_entries: Iterable[str] | None
+
+ +
+
+haskell_log_format: KoreExecLogFormat | None
+
+ +
+
+haskell_threads: int | None
+
+ +
+
+interim_simplification: int | None
+
+ +
+
+kompiled_dir: Required[str | Path]
+
+ +
+
+llvm_kompiled_dir: Required[str | Path]
+
+ +
+
+log_axioms_file: Path | None
+
+ +
+
+log_context: Iterable[str] | None
+
+ +
+
+module_name: Required[str]
+
+ +
+
+no_post_exec_simplify: bool | None
+
+ +
+
+not_log_context: Iterable[str] | None
+
+ +
+
+port: int | None
+
+ +
+
+smt_reset_interval: int | None
+
+ +
+
+smt_retry_limit: int | None
+
+ +
+
+smt_tactic: str | None
+
+ +
+
+smt_timeout: int | None
+
+ +
+ +
+
+final class BranchingResult(state: 'State', depth: 'int', next_states: 'tuple[State, ...]', logs: 'tuple[LogEntry, ...]')[source]
+

Bases: ExecuteResult

+
+
+depth: int
+
+ +
+
+classmethod from_dict(dct: Mapping[str, Any]) BranchingResult[source]
+
+ +
+
+logs: tuple[LogEntry, ...]
+
+ +
+
+next_states: tuple[State, ...]
+
+ +
+
+reason: ClassVar[StopReason] = 'branching'
+
+ +
+
+rule: str | None = None
+
+ +
+
+state: State
+
+ +
+ +
+
+final class CutPointResult(state: 'State', depth: 'int', next_states: 'tuple[State, ...]', rule: 'str', logs: 'tuple[LogEntry, ...]')[source]
+

Bases: ExecuteResult

+
+
+depth: int
+
+ +
+
+classmethod from_dict(dct: Mapping[str, Any]) CutPointResult[source]
+
+ +
+
+logs: tuple[LogEntry, ...]
+
+ +
+
+next_states: tuple[State, ...]
+
+ +
+
+reason: ClassVar[StopReason] = 'cut-point-rule'
+
+ +
+
+rule: str
+
+ +
+
+state: State
+
+ +
+ +
+
+final exception DefaultError(message: 'str', code: 'int', data: 'Any' = None)[source]
+

Bases: KoreClientError

+
+
+code: int
+
+ +
+
+data: Any
+
+ +
+
+message: str
+
+ +
+ +
+
+final class DepthBoundResult(state: 'State', depth: 'int', logs: 'tuple[LogEntry, ...]')[source]
+

Bases: ExecuteResult

+
+
+depth: int
+
+ +
+
+classmethod from_dict(dct: Mapping[str, Any]) DepthBoundResult[source]
+
+ +
+
+logs: tuple[LogEntry, ...]
+
+ +
+
+next_states: tuple[State, ...] | None = None
+
+ +
+
+reason: ClassVar[StopReason] = 'depth-bound'
+
+ +
+
+rule: str | None = None
+
+ +
+
+state: State
+
+ +
+ +
+
+final exception DuplicateModuleError(module_name: 'str')[source]
+

Bases: KoreClientError

+
+
+module_name: str
+
+ +
+ +
+
+class ExecuteResult[source]
+

Bases: ABC

+
+
+depth: int
+
+ +
+
+classmethod from_dict(dct: Mapping[str, Any]) ER[source]
+
+ +
+
+logs: tuple[LogEntry, ...]
+
+ +
+
+next_states: tuple[State, ...] | None
+
+ +
+
+reason: ClassVar[StopReason]
+
+ +
+
+rule: str | None
+
+ +
+
+state: State
+
+ +
+ +
+
+class FallbackReason(value)[source]
+

Bases: Enum

+

An enumeration.

+
+
+ABORTED = 'Aborted'
+
+ +
+
+BRANCHING = 'Branching'
+
+ +
+
+STUCK = 'Stuck'
+
+ +
+ +
+
+class GetModelResult[source]
+

Bases: ABC

+
+
+static from_dict(dct: Mapping[str, Any]) GetModelResult[source]
+
+ +
+ +
+
+final class HttpTransport(host: str, port: int, *, timeout: int | None = None)[source]
+

Bases: Transport

+
+
+close() None[source]
+
+ +
+ +
+
+final exception ImplicationError(error: 'str', context: 'Iterable[str]')[source]
+

Bases: KoreClientError

+
+
+context: tuple[str, ...]
+
+ +
+
+error: str
+
+ +
+ +
+
+final class ImpliesResult(valid: 'bool', implication: 'Pattern', substitution: 'Pattern | None', predicate: 'Pattern | None', logs: 'tuple[LogEntry, ...]')[source]
+

Bases: object

+
+
+static from_dict(dct: Mapping[str, Any]) ImpliesResult[source]
+
+ +
+
+implication: Pattern
+
+ +
+
+logs: tuple[LogEntry, ...]
+
+ +
+
+predicate: Pattern | None
+
+ +
+
+substitution: Pattern | None
+
+ +
+
+valid: bool
+
+ +
+ +
+
+final exception InvalidModuleError(error: 'str', context: 'Iterable[str] | None')[source]
+

Bases: KoreClientError

+
+
+context: tuple[str, ...] | None
+
+ +
+
+error: str
+
+ +
+ +
+
+class JsonRpcClient(host: str, port: int, *, timeout: int | None = None, bug_report: BugReport | None = None, bug_report_id: str | None = None, transport: TransportType = TransportType.SINGLE_SOCKET)[source]
+

Bases: ContextManager[JsonRpcClient]

+
+
+close() None[source]
+
+ +
+
+request(method: str, **params: Any) dict[str, Any][source]
+
+ +
+ +
+
+class JsonRpcClientFacade(default_host: str, default_port: int, default_transport: TransportType, dispatch: dict[str, list[tuple[str, int, TransportType]]], *, timeout: int | None = None, bug_report: BugReport | None = None, bug_report_id: str | None = None)[source]
+

Bases: ContextManager[JsonRpcClientFacade]

+
+
+close() None[source]
+
+ +
+
+request(method: str, **params: Any) dict[str, Any][source]
+
+ +
+ +
+
+final exception JsonRpcError(message: 'str', code: 'int', data: 'Any' = None)[source]
+

Bases: Exception

+
+ +
+
+class KoreClient(host: str, port: int, *, timeout: int | None = None, bug_report: BugReport | None = None, bug_report_id: str | None = None, transport: TransportType = TransportType.SINGLE_SOCKET, dispatch: dict[str, list[tuple[str, int, TransportType]]] | None = None)[source]
+

Bases: ContextManager[KoreClient]

+
+
+add_module(module: Module, *, name_as_id: bool | None = None) str[source]
+
+ +
+
+close() None[source]
+
+ +
+
+execute(pattern: Pattern, *, max_depth: int | None = None, assume_state_defined: bool | None = None, cut_point_rules: Iterable[str] | None = None, terminal_rules: Iterable[str] | None = None, moving_average_step_timeout: bool | None = None, step_timeout: int | None = None, module_name: str | None = None, log_successful_rewrites: bool | None = None, log_failed_rewrites: bool | None = None) ExecuteResult[source]
+
+ +
+
+get_model(pattern: Pattern, module_name: str | None = None) GetModelResult[source]
+
+ +
+
+implies(antecedent: Pattern, consequent: Pattern, *, module_name: str | None = None, assume_defined: bool = False) ImpliesResult[source]
+
+ +
+
+port: int
+
+ +
+
+simplify(pattern: Pattern, *, module_name: str | None = None) tuple[Pattern, tuple[LogEntry, ...]][source]
+
+ +
+ +
+
+exception KoreClientError(message: str)[source]
+

Bases: Exception, ABC

+
+ +
+
+class KoreExecLogFormat(value)[source]
+

Bases: Enum

+

An enumeration.

+
+
+ONELINE = 'oneline'
+
+ +
+
+STANDARD = 'standard'
+
+ +
+ +
+
+class KoreServer(args: KoreServerArgs)[source]
+

Bases: ContextManager[KoreServer]

+
+
+close() None[source]
+
+ +
+
+property host: str
+
+ +
+
+property pid: int
+
+ +
+
+property port: int
+
+ +
+
+start() None[source]
+
+ +
+ +
+
+class KoreServerArgs[source]
+

Bases: TypedDict

+
+
+bug_report: BugReport | None
+
+ +
+
+command: str | Iterable[str] | None
+
+ +
+
+haskell_log_entries: Iterable[str] | None
+
+ +
+
+haskell_log_format: KoreExecLogFormat | None
+
+ +
+
+haskell_threads: int | None
+
+ +
+
+kompiled_dir: Required[str | Path]
+
+ +
+
+log_axioms_file: Path | None
+
+ +
+
+module_name: Required[str]
+
+ +
+
+port: int | None
+
+ +
+
+smt_reset_interval: int | None
+
+ +
+
+smt_retry_limit: int | None
+
+ +
+
+smt_tactic: str | None
+
+ +
+
+smt_timeout: int | None
+
+ +
+ +
+
+class KoreServerInfo(pid, host, port)[source]
+

Bases: NamedTuple

+
+
+host: str
+

Alias for field number 1

+
+ +
+
+pid: int
+

Alias for field number 0

+
+ +
+
+port: int
+

Alias for field number 2

+
+ +
+ +
+
+class LogEntry[source]
+

Bases: ABC

+
+
+classmethod from_dict(dct: Mapping[str, Any]) LE[source]
+
+ +
+
+abstract to_dict() dict[str, Any][source]
+
+ +
+ +
+
+class LogOrigin(value)[source]
+

Bases: str, Enum

+

An enumeration.

+
+
+BOOSTER = 'booster'
+
+ +
+
+KORE_RPC = 'kore-rpc'
+
+ +
+
+LLVM = 'llvm'
+
+ +
+
+PROXY = 'proxy'
+
+ +
+
+__format__(format_spec)
+

Returns format using actual value type unless __str__ has been overridden.

+
+ +
+ +
+
+final class LogRewrite(origin: 'LogOrigin', result: 'RewriteResult')[source]
+

Bases: LogEntry

+
+
+classmethod from_dict(dct: Mapping[str, Any]) LogRewrite[source]
+
+ +
+
+origin: LogOrigin
+
+ +
+
+result: RewriteResult
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+final exception ParseError(error: 'str')[source]
+

Bases: KoreClientError

+
+
+error: str
+
+ +
+ +
+
+final exception PatternError(error: 'str', context: 'Iterable[str]')[source]
+

Bases: KoreClientError

+
+
+context: tuple[str, ...]
+
+ +
+
+error: str
+
+ +
+ +
+
+final class RewriteFailure(rule_id: 'str | None', reason: 'str')[source]
+

Bases: RewriteResult

+
+
+classmethod from_dict(dct: Mapping[str, Any]) RewriteFailure[source]
+
+ +
+
+reason: str
+
+ +
+
+rule_id: str | None
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+class RewriteResult[source]
+

Bases: ABC

+
+
+classmethod from_dict(dct: Mapping[str, Any]) RR[source]
+
+ +
+
+rule_id: str | None
+
+ +
+
+abstract to_dict() dict[str, Any][source]
+
+ +
+ +
+
+final class RewriteSuccess(rule_id: 'str', rewritten_term: 'Pattern | None' = None)[source]
+

Bases: RewriteResult

+
+
+classmethod from_dict(dct: Mapping[str, Any]) RewriteSuccess[source]
+
+ +
+
+rewritten_term: Pattern | None = None
+
+ +
+
+rule_id: str
+
+ +
+
+to_dict() dict[str, Any][source]
+
+ +
+ +
+
+final class SatResult(model: 'Pattern | None')[source]
+

Bases: GetModelResult

+
+
+model: Pattern | None
+
+ +
+ +
+
+final class SingleSocketTransport(host: str, port: int, *, timeout: int | None = None)[source]
+

Bases: Transport

+
+
+close() None[source]
+
+ +
+ +
+
+final exception SmtSolverError(error: 'str', pattern: 'Pattern')[source]
+

Bases: KoreClientError

+
+
+error: str
+
+ +
+
+pattern: Pattern
+
+ +
+ +
+
+final class State(term: 'Pattern', *, substitution: 'Mapping[EVar, Pattern] | None' = None, predicate: 'Pattern | None' = None, rule_id: 'str | None' = None, rule_substitution: 'Mapping[EVar, Pattern] | None' = None, rule_predicate: 'Pattern | None' = None)[source]
+

Bases: object

+
+
+static from_dict(dct: Mapping[str, Any]) State[source]
+
+ +
+
+property kore: Pattern
+
+ +
+
+predicate: Pattern | None
+
+ +
+
+rule_id: str | None
+
+ +
+
+rule_predicate: Pattern | None
+
+ +
+
+rule_substitution: FrozenDict[EVar, Pattern] | None
+
+ +
+
+substitution: FrozenDict[EVar, Pattern] | None
+
+ +
+
+term: Pattern
+
+ +
+ +
+
+class StopReason(value)[source]
+

Bases: str, Enum

+

An enumeration.

+
+
+ABORTED = 'aborted'
+
+ +
+
+BRANCHING = 'branching'
+
+ +
+
+CUT_POINT_RULE = 'cut-point-rule'
+
+ +
+
+DEPTH_BOUND = 'depth-bound'
+
+ +
+
+STUCK = 'stuck'
+
+ +
+
+TERMINAL_RULE = 'terminal-rule'
+
+ +
+
+TIMEOUT = 'timeout'
+
+ +
+
+VACUOUS = 'vacuous'
+
+ +
+
+__format__(format_spec)
+

Returns format using actual value type unless __str__ has been overridden.

+
+ +
+ +
+
+final class StuckResult(state: 'State', depth: 'int', logs: 'tuple[LogEntry, ...]')[source]
+

Bases: ExecuteResult

+
+
+depth: int
+
+ +
+
+classmethod from_dict(dct: Mapping[str, Any]) StuckResult[source]
+
+ +
+
+logs: tuple[LogEntry, ...]
+
+ +
+
+next_states: tuple[State, ...] | None = None
+
+ +
+
+reason: ClassVar[StopReason] = 'stuck'
+
+ +
+
+rule: str | None = None
+
+ +
+
+state: State
+
+ +
+ +
+
+final class TerminalResult(state: 'State', depth: 'int', rule: 'str', logs: 'tuple[LogEntry, ...]')[source]
+

Bases: ExecuteResult

+
+
+depth: int
+
+ +
+
+classmethod from_dict(dct: Mapping[str, Any]) TerminalResult[source]
+
+ +
+
+logs: tuple[LogEntry, ...]
+
+ +
+
+next_states: tuple[State, ...] | None = None
+
+ +
+
+reason: ClassVar[StopReason] = 'terminal-rule'
+
+ +
+
+rule: str
+
+ +
+
+state: State
+
+ +
+ +
+
+final class TimeoutResult(state: 'State', depth: 'int', logs: 'tuple[LogEntry, ...]')[source]
+

Bases: ExecuteResult

+
+
+depth: int
+
+ +
+
+classmethod from_dict(dct: Mapping[str, Any]) TimeoutResult[source]
+
+ +
+
+logs: tuple[LogEntry, ...]
+
+ +
+
+next_states: tuple[State, ...] | None = None
+
+ +
+
+reason: ClassVar[StopReason] = 'timeout'
+
+ +
+
+rule: str | None = None
+
+ +
+
+state: State
+
+ +
+ +
+
+class Transport[source]
+

Bases: ContextManager[Transport], ABC

+
+
+abstract close() None[source]
+
+ +
+
+request(req: str, request_id: str, method_name: str) str[source]
+
+ +
+ +
+
+class TransportType(value)[source]
+

Bases: Enum

+

An enumeration.

+
+
+HTTP = 2
+
+ +
+
+SINGLE_SOCKET = 1
+
+ +
+ +
+
+final exception UnknownModuleError(module_name: 'str')[source]
+

Bases: KoreClientError

+
+
+module_name: str
+
+ +
+ +
+
+final class UnknownResult[source]
+

Bases: GetModelResult

+
+ +
+
+final class UnsatResult[source]
+

Bases: GetModelResult

+
+ +
+
+final class VacuousResult(state: 'State', depth: 'int', logs: 'tuple[LogEntry, ...]')[source]
+

Bases: ExecuteResult

+
+
+depth: int
+
+ +
+
+classmethod from_dict(dct: Mapping[str, Any]) VacuousResult[source]
+
+ +
+
+logs: tuple[LogEntry, ...]
+
+ +
+
+next_states: tuple[State, ...] | None = None
+
+ +
+
+reason: ClassVar[StopReason] = 'vacuous'
+
+ +
+
+rule: str | None = None
+
+ +
+
+state: State
+
+ +
+ +
+
+kore_server(definition_dir: str | Path, module_name: str, *, port: int | None = None, command: str | Iterable[str] | None = None, smt_timeout: int | None = None, smt_retry_limit: int | None = None, smt_tactic: str | None = None, log_axioms_file: Path | None = None, haskell_log_format: KoreExecLogFormat | None = None, haskell_log_entries: Iterable[str] | None = None, haskell_threads: int | None = None, llvm_definition_dir: Path | None = None, fallback_on: Iterable[str | FallbackReason] | None = None, interim_simplification: int | None = None, no_post_exec_simplify: bool | None = None, bug_report: BugReport | None = None) KoreServer[source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kore.rule.html b/pyk/api/pyk.kore.rule.html new file mode 100644 index 00000000000..d60d816a8a1 --- /dev/null +++ b/pyk/api/pyk.kore.rule.html @@ -0,0 +1,452 @@ + + + + + + + + + pyk.kore.rule module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kore.rule module

+
+
+final class AppRule(lhs: 'App', rhs: 'Pattern', req: 'Pattern | None', ens: 'Pattern | None', sort: 'Sort', priority: 'int')[source]
+

Bases: SimpliRule[App]

+
+
+ens: Pattern | None
+
+ +
+
+static from_axiom(axiom: Axiom) AppRule[source]
+
+ +
+
+lhs: App
+
+ +
+
+priority: int
+
+ +
+
+req: Pattern | None
+
+ +
+
+rhs: Pattern
+
+ +
+
+sort: Sort
+
+ +
+ +
+
+final class CeilRule(lhs: 'Ceil', rhs: 'Pattern', req: 'Pattern | None', ens: 'Pattern | None', sort: 'Sort', priority: 'int')[source]
+

Bases: SimpliRule[Ceil]

+
+
+ens: Pattern | None
+
+ +
+
+static from_axiom(axiom: Axiom) CeilRule[source]
+
+ +
+
+lhs: Ceil
+
+ +
+
+priority: int
+
+ +
+
+req: Pattern | None
+
+ +
+
+rhs: Pattern
+
+ +
+
+sort: Sort
+
+ +
+ +
+
+final class EqualsRule(lhs: 'Equals', rhs: 'Pattern', req: 'Pattern | None', ens: 'Pattern | None', sort: 'Sort', priority: 'int')[source]
+

Bases: SimpliRule[Equals]

+
+
+ens: Pattern | None
+
+ +
+
+static from_axiom(axiom: Axiom) EqualsRule[source]
+
+ +
+
+lhs: Equals
+
+ +
+
+priority: int
+
+ +
+
+req: Pattern | None
+
+ +
+
+rhs: Pattern
+
+ +
+
+sort: Sort
+
+ +
+ +
+
+final class FunctionRule(lhs: 'App', rhs: 'Pattern', req: 'Pattern | None', ens: 'Pattern | None', sort: 'Sort', arg_sorts: 'tuple[Sort, ...]', anti_left: 'Pattern | None', priority: 'int')[source]
+

Bases: Rule

+
+
+anti_left: Pattern | None
+
+ +
+
+arg_sorts: tuple[Sort, ...]
+
+ +
+
+ens: Pattern | None
+
+ +
+
+static from_axiom(axiom: Axiom) FunctionRule[source]
+
+ +
+
+lhs: App
+
+ +
+
+priority: int
+
+ +
+
+req: Pattern | None
+
+ +
+
+rhs: Pattern
+
+ +
+
+sort: Sort
+
+ +
+
+to_axiom() Axiom[source]
+
+ +
+ +
+
+final class RewriteRule(lhs: 'App', rhs: 'App', req: 'Pattern | None', ens: 'Pattern | None', ctx: 'EVar | None', priority: 'int', uid: 'str', label: 'str | None')[source]
+

Bases: Rule

+
+
+ctx: EVar | None
+
+ +
+
+ens: Pattern | None
+
+ +
+
+static from_axiom(axiom: Axiom) RewriteRule[source]
+
+ +
+
+label: str | None
+
+ +
+
+lhs: App
+
+ +
+
+priority: int
+
+ +
+
+req: Pattern | None
+
+ +
+
+rhs: App
+
+ +
+
+sort: Sort = SortApp(name='SortGeneratedTopCell', sorts=())
+
+ +
+
+to_axiom() Axiom[source]
+
+ +
+
+uid: str
+
+ +
+ +
+
+class Rule[source]
+

Bases: ABC

+
+
+ens: Pattern | None
+
+ +
+
+static extract_all(defn: Definition) list[Rule][source]
+
+ +
+
+static from_axiom(axiom: Axiom) Rule[source]
+
+ +
+
+static is_rule(axiom: Axiom) bool[source]
+
+ +
+
+lhs: Pattern
+
+ +
+
+priority: int
+
+ +
+
+req: Pattern | None
+
+ +
+
+rhs: Pattern
+
+ +
+
+sort: Sort
+
+ +
+
+abstract to_axiom() Axiom[source]
+
+ +
+ +
+
+class SimpliRule[source]
+

Bases: Rule, Generic[P], ABC

+
+
+lhs: P
+
+ +
+
+sort: Sort
+
+ +
+
+to_axiom() Axiom[source]
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kore.syntax.html b/pyk/api/pyk.kore.syntax.html new file mode 100644 index 00000000000..df26c20dba7 --- /dev/null +++ b/pyk/api/pyk.kore.syntax.html @@ -0,0 +1,2081 @@ + + + + + + + + + pyk.kore.syntax module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kore.syntax module

+
+
+final class AliasDecl(alias: 'Symbol', param_sorts: 'Iterable[Sort]', sort: 'Sort', left: 'App', right: 'Pattern', attrs: 'Iterable[App]' = ())[source]
+

Bases: Sentence

+
+
+alias: Symbol
+
+ +
+
+attrs: tuple[App, ...]
+
+ +
+
+left: App
+
+ +
+
+let(*, alias: Symbol | None = None, param_sorts: Iterable[Sort] | None = None, sort: Sort | None = None, left: App | None = None, right: Pattern | None = None, attrs: Iterable[App] | None = None) AliasDecl[source]
+
+ +
+
+let_attrs(attrs: Iterable[App]) AliasDecl[source]
+
+ +
+
+param_sorts: tuple[Sort, ...]
+
+ +
+
+right: Pattern
+
+ +
+
+sort: Sort
+
+ +
+
+write(output: IO[str]) None[source]
+
+ +
+ +
+
+final class And(sort: 'Sort', ops: 'Iterable[Pattern]' = ())[source]
+

Bases: MultiaryConn

+
+
+let(*, sort: Sort | None = None, ops: Iterable[Pattern] | None = None) And[source]
+
+ +
+
+let_patterns(patterns: Iterable[Pattern]) And[source]
+
+ +
+
+let_sort(sort: Sort) And[source]
+
+ +
+
+classmethod of(symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) And[source]
+
+ +
+
+ops: tuple[Pattern, ...]
+
+ +
+
+sort: Sort
+
+ +
+
+classmethod symbol() str[source]
+
+ +
+ +
+
+final class App(symbol: 'str | SymbolId', sorts: 'Iterable[Sort]' = (), args: 'Iterable[Pattern]' = ())[source]
+

Bases: Pattern

+
+
+args: tuple[Pattern, ...]
+
+ +
+
+let(*, symbol: str | SymbolId | None = None, sorts: Iterable | None = None, args: Iterable | None = None) App[source]
+
+ +
+
+let_patterns(patterns: Iterable[Pattern]) App[source]
+
+ +
+
+property patterns: tuple[Pattern, ...]
+
+ +
+
+sorts: tuple[Sort, ...]
+
+ +
+
+symbol: str
+
+ +
+
+write(output: IO[str]) None[source]
+
+ +
+ +
+
+class Assoc[source]
+

Bases: Pattern

+
+
+property app: App
+
+ +
+
+args: tuple[Pattern, ...]
+
+ +
+
+abstract classmethod kore_symbol() str[source]
+
+ +
+
+abstract property pattern: Pattern
+
+ +
+
+property patterns: tuple[Pattern, ...]
+
+ +
+
+sorts: tuple[Sort, ...]
+
+ +
+
+symbol: str
+
+ +
+
+write(output: IO[str]) None[source]
+
+ +
+ +
+
+final class Axiom(vars: 'Iterable[SortVar]', pattern: 'Pattern', attrs: 'Iterable[App]' = ())[source]
+

Bases: AxiomLike

+
+
+attrs: tuple[App, ...]
+
+ +
+
+let(*, vars: Iterable[SortVar] | None = None, pattern: Pattern | None = None, attrs: Iterable[App] | None = None) Axiom[source]
+
+ +
+
+let_attrs(attrs: Iterable[App]) Axiom[source]
+
+ +
+
+pattern: Pattern
+
+ +
+
+vars: tuple[SortVar, ...]
+
+ +
+ +
+
+class AxiomLike[source]
+

Bases: Sentence

+
+
+pattern: Pattern
+
+ +
+
+vars: tuple[SortVar, ...]
+
+ +
+
+write(output: IO[str]) None[source]
+
+ +
+ +
+
+class BinaryConn[source]
+

Bases: MLConn

+
+
+left: Pattern
+
+ +
+
+property patterns: tuple[Pattern, Pattern]
+
+ +
+
+right: Pattern
+
+ +
+ +
+
+class BinaryPred[source]
+

Bases: MLPred

+
+
+left: Pattern
+
+ +
+
+property patterns: tuple[Pattern, Pattern]
+
+ +
+
+right: Pattern
+
+ +
+
+property sorts: tuple[Sort, Sort]
+
+ +
+ +
+
+final class Bottom(sort: 'Sort')[source]
+

Bases: NullaryConn

+
+
+let(*, sort: Sort | None = None) Bottom[source]
+
+ +
+
+let_patterns(patterns: Iterable[Pattern]) Bottom[source]
+
+ +
+
+let_sort(sort: Sort) Bottom[source]
+
+ +
+
+classmethod of(symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) Bottom[source]
+
+ +
+
+sort: Sort
+
+ +
+
+classmethod symbol() str[source]
+
+ +
+ +
+
+final class Ceil(op_sort: 'Sort', sort: 'Sort', pattern: 'Pattern')[source]
+

Bases: RoundPred

+
+
+let(*, op_sort: Sort | None = None, sort: Sort | None = None, pattern: Pattern | None = None) Ceil[source]
+
+ +
+
+let_patterns(patterns: Iterable[Pattern]) Ceil[source]
+
+ +
+
+let_sort(sort: Sort) Ceil[source]
+
+ +
+
+classmethod of(symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) Ceil[source]
+
+ +
+
+op_sort: Sort
+
+ +
+
+pattern: Pattern
+
+ +
+
+sort: Sort
+
+ +
+
+classmethod symbol() str[source]
+
+ +
+ +
+
+final class Claim(vars: 'Iterable[SortVar]', pattern: 'Pattern', attrs: 'Iterable[App]' = ())[source]
+

Bases: AxiomLike

+
+
+attrs: tuple[App, ...]
+
+ +
+
+let(*, vars: Iterable[SortVar] | None = None, pattern: Pattern | None = None, attrs: Iterable[App] | None = None) Claim[source]
+
+ +
+
+let_attrs(attrs: Iterable[App]) Claim[source]
+
+ +
+
+pattern: Pattern
+
+ +
+
+vars: tuple[SortVar, ...]
+
+ +
+ +
+
+final class DV(sort: 'Sort', value: 'String')[source]
+

Bases: MLPattern, WithSort

+
+
+property ctor_patterns: tuple[String]
+
+ +
+
+let(*, sort: Sort | None = None, value: String | None = None) DV[source]
+
+ +
+
+let_patterns(patterns: Iterable[Pattern]) DV[source]
+
+ +
+
+let_sort(sort: Sort) DV[source]
+
+ +
+
+classmethod of(symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) DV[source]
+
+ +
+
+property patterns: tuple[()]
+
+ +
+
+sort: Sort
+
+ +
+
+property sorts: tuple[Sort]
+
+ +
+
+classmethod symbol() str[source]
+
+ +
+
+value: String
+
+ +
+ +
+
+final class Definition(modules: 'Iterable[Module]' = (), attrs: 'Iterable[App]' = ())[source]
+

Bases: Kore, WithAttrs, Iterable[Module]

+
+
+attrs: tuple[App, ...]
+
+ +
+
+property axioms: tuple[Axiom, ...]
+
+ +
+
+compute_ordinals() Definition[source]
+
+ +
+
+get_axiom_by_ordinal(ordinal: int) Axiom[source]
+
+ +
+
+let(*, modules: Iterable[Module] | None = None, attrs: Iterable[App] | None = None) Definition[source]
+
+ +
+
+let_attrs(attrs: Iterable[App]) Definition[source]
+
+ +
+
+modules: tuple[Module, ...]
+
+ +
+
+write(output: IO[str]) None[source]
+
+ +
+ +
+
+final class EVar(name: 'str | Id', sort: 'Sort')[source]
+

Bases: VarPattern

+
+
+let(*, name: str | Id | None = None, sort: Sort | None = None) EVar[source]
+
+ +
+
+let_patterns(patterns: Iterable[Pattern]) EVar[source]
+
+ +
+
+let_sort(sort: Sort) EVar[source]
+
+ +
+
+name: str
+
+ +
+
+sort: Sort
+
+ +
+ +
+
+final class Equals(op_sort: 'Sort', sort: 'Sort', left: 'Pattern', right: 'Pattern')[source]
+

Bases: BinaryPred

+
+
+left: Pattern
+
+ +
+
+let(*, op_sort: Sort | None = None, sort: Sort | None = None, left: Pattern | None = None, right: Pattern | None = None) Equals[source]
+
+ +
+
+let_patterns(patterns: Iterable[Pattern]) Equals[source]
+
+ +
+
+let_sort(sort: Sort) Equals[source]
+
+ +
+
+classmethod of(symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) Equals[source]
+
+ +
+
+op_sort: Sort
+
+ +
+
+right: Pattern
+
+ +
+
+sort: Sort
+
+ +
+
+classmethod symbol() str[source]
+
+ +
+ +
+
+final class Exists(sort: 'Sort', var: 'EVar', pattern: 'Pattern')[source]
+

Bases: MLQuant

+
+
+let(*, sort: Sort | None = None, var: EVar | None = None, pattern: Pattern | None = None) Exists[source]
+
+ +
+
+let_patterns(patterns: Iterable[Pattern]) Exists[source]
+
+ +
+
+let_sort(sort: Sort) Exists[source]
+
+ +
+
+classmethod of(symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) Exists[source]
+
+ +
+
+pattern: Pattern
+
+ +
+
+sort: Sort
+
+ +
+
+classmethod symbol() str[source]
+
+ +
+
+var: EVar
+
+ +
+ +
+
+final class Floor(op_sort: 'Sort', sort: 'Sort', pattern: 'Pattern')[source]
+

Bases: RoundPred

+
+
+let(*, op_sort: Sort | None = None, sort: Sort | None = None, pattern: Pattern | None = None) Floor[source]
+
+ +
+
+let_patterns(patterns: Iterable[Pattern]) Floor[source]
+
+ +
+
+let_sort(sort: Sort) Floor[source]
+
+ +
+
+classmethod of(symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) Floor[source]
+
+ +
+
+op_sort: Sort
+
+ +
+
+pattern: Pattern
+
+ +
+
+sort: Sort
+
+ +
+
+classmethod symbol() str[source]
+
+ +
+ +
+
+final class Forall(sort: 'Sort', var: 'EVar', pattern: 'Pattern')[source]
+

Bases: MLQuant

+
+
+let(*, sort: Sort | None = None, var: EVar | None = None, pattern: Pattern | None = None) Forall[source]
+
+ +
+
+let_patterns(patterns: Iterable[Pattern]) Forall[source]
+
+ +
+
+let_sort(sort: Sort) Forall[source]
+
+ +
+
+classmethod of(symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) Forall[source]
+
+ +
+
+pattern: Pattern
+
+ +
+
+sort: Sort
+
+ +
+
+classmethod symbol() str[source]
+
+ +
+
+var: EVar
+
+ +
+ +
+
+final class Id(value: 'str')[source]
+

Bases: object

+
+
+value: str
+
+ +
+ +
+
+final class Iff(sort: 'Sort', left: 'Pattern', right: 'Pattern')[source]
+

Bases: BinaryConn

+
+
+left: Pattern
+
+ +
+
+let(*, sort: Sort | None = None, left: Pattern | None = None, right: Pattern | None = None) Iff[source]
+
+ +
+
+let_patterns(patterns: Iterable[Pattern]) Iff[source]
+
+ +
+
+let_sort(sort: Sort) Iff[source]
+
+ +
+
+classmethod of(symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) Iff[source]
+
+ +
+
+right: Pattern
+
+ +
+
+sort: Sort
+
+ +
+
+classmethod symbol() str[source]
+
+ +
+ +
+
+final class Implies(sort: 'Sort', left: 'Pattern', right: 'Pattern')[source]
+

Bases: BinaryConn

+
+
+left: Pattern
+
+ +
+
+let(*, sort: Sort | None = None, left: Pattern | None = None, right: Pattern | None = None) Implies[source]
+
+ +
+
+let_patterns(patterns: Iterable[Pattern]) Implies[source]
+
+ +
+
+let_sort(sort: Sort) Implies[source]
+
+ +
+
+classmethod of(symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) Implies[source]
+
+ +
+
+right: Pattern
+
+ +
+
+sort: Sort
+
+ +
+
+classmethod symbol() str[source]
+
+ +
+ +
+
+final class Import(module_name: 'str | Id', attrs: 'Iterable[App]' = ())[source]
+

Bases: Sentence

+
+
+attrs: tuple[App, ...]
+
+ +
+
+let(*, module_name: str | Id | None = None, attrs: Iterable[App] | None = None) Import[source]
+
+ +
+
+let_attrs(attrs: Iterable[App]) Import[source]
+
+ +
+
+module_name: str
+
+ +
+
+write(output: IO[str]) None[source]
+
+ +
+ +
+
+final class In(op_sort: 'Sort', sort: 'Sort', left: 'Pattern', right: 'Pattern')[source]
+

Bases: BinaryPred

+
+
+left: Pattern
+
+ +
+
+let(*, op_sort: Sort | None = None, sort: Sort | None = None, left: Pattern | None = None, right: Pattern | None = None) In[source]
+
+ +
+
+let_patterns(patterns: Iterable[Pattern]) In[source]
+
+ +
+
+let_sort(sort: Sort) In[source]
+
+ +
+
+classmethod of(symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) In[source]
+
+ +
+
+op_sort: Sort
+
+ +
+
+right: Pattern
+
+ +
+
+sort: Sort
+
+ +
+
+classmethod symbol() str[source]
+
+ +
+ +
+
+class Kore[source]
+

Bases: ABC

+
+
+property text: str
+
+ +
+
+abstract write(output: IO[str]) None[source]
+
+ +
+ +
+
+final class LeftAssoc(symbol: 'str | SymbolId', sorts: 'Iterable[Sort]' = (), args: 'Iterable[Pattern]' = ())[source]
+

Bases: Assoc

+
+
+args: tuple[Pattern, ...]
+
+ +
+
+classmethod kore_symbol() str[source]
+
+ +
+
+let(*, symbol: str | SymbolId | None = None, sorts: Iterable | None = None, args: Iterable | None = None) LeftAssoc[source]
+
+ +
+
+let_patterns(patterns: Iterable[Pattern]) LeftAssoc[source]
+
+ +
+
+property pattern: Pattern
+
+ +
+
+sorts: tuple[Sort, ...]
+
+ +
+
+symbol: str
+
+ +
+ +
+
+class MLConn[source]
+

Bases: MLPattern, WithSort

+
+
+property sorts: tuple[Sort]
+
+ +
+ +
+
+class MLFixpoint[source]
+

Bases: MLPattern

+
+
+property ctor_patterns: tuple[SVar, Pattern]
+
+ +
+
+pattern: Pattern
+
+ +
+
+property patterns: tuple[Pattern]
+
+ +
+
+property sorts: tuple[()]
+
+ +
+
+var: SVar
+
+ +
+ +
+
+class MLPattern[source]
+

Bases: Pattern

+
+
+property ctor_patterns: tuple[Pattern, ...]
+

Return patterns used to construct the term with of.

+

Except for DV, MLFixpoint and MLQuant this coincides with patterns.

+
+ +
+
+classmethod of(symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) ML[source]
+
+ +
+
+abstract property sorts: tuple[Sort, ...]
+
+ +
+
+abstract classmethod symbol() str[source]
+
+ +
+
+write(output: IO[str]) None[source]
+
+ +
+ +
+
+class MLPred[source]
+

Bases: MLPattern, WithSort

+
+
+op_sort: Sort
+
+ +
+ +
+
+class MLQuant[source]
+

Bases: MLPattern, WithSort

+
+
+property ctor_patterns: tuple[EVar, Pattern]
+
+ +
+
+pattern: Pattern
+
+ +
+
+property patterns: tuple[Pattern]
+
+ +
+
+sort: Sort
+
+ +
+
+property sorts: tuple[Sort]
+
+ +
+
+var: EVar
+
+ +
+ +
+
+class MLRewrite[source]
+

Bases: MLPattern, WithSort

+
+
+property sorts: tuple[Sort]
+
+ +
+ +
+
+final class Module(name: 'str | Id', sentences: 'Iterable[Sentence]' = (), attrs: 'Iterable[App]' = ())[source]
+

Bases: Kore, WithAttrs, Iterable[Sentence]

+
+
+attrs: tuple[App, ...]
+
+ +
+
+property axioms: tuple[Axiom, ...]
+
+ +
+
+let(*, name: str | Id | None = None, sentences: Iterable[Sentence] | None = None, attrs: Iterable[App] | None = None) Module[source]
+
+ +
+
+let_attrs(attrs: Iterable[App]) Module[source]
+
+ +
+
+name: str
+
+ +
+
+sentences: tuple[Sentence, ...]
+
+ +
+
+property symbol_decls: tuple[SymbolDecl, ...]
+
+ +
+
+write(output: IO[str]) None[source]
+
+ +
+ +
+
+final class Mu(var: 'SVar', pattern: 'Pattern')[source]
+

Bases: MLFixpoint

+
+
+let(*, var: SVar | None = None, pattern: Pattern | None = None) Mu[source]
+
+ +
+
+let_patterns(patterns: Iterable[Pattern]) Mu[source]
+
+ +
+
+classmethod of(symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) Mu[source]
+
+ +
+
+pattern: Pattern
+
+ +
+
+classmethod symbol() str[source]
+
+ +
+
+var: SVar
+
+ +
+ +
+
+class MultiaryConn[source]
+

Bases: MLConn

+
+
+ops: tuple[Pattern, ...]
+
+ +
+
+property patterns: tuple[Pattern, ...]
+
+ +
+ +
+
+final class Next(sort: 'Sort', pattern: 'Pattern')[source]
+

Bases: MLRewrite

+
+
+let(*, sort: Sort | None = None, pattern: Pattern | None = None) Next[source]
+
+ +
+
+let_patterns(patterns: Iterable[Pattern]) Next[source]
+
+ +
+
+let_sort(sort: Sort) Next[source]
+
+ +
+
+classmethod of(symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) Next[source]
+
+ +
+
+pattern: Pattern
+
+ +
+
+property patterns: tuple[Pattern]
+
+ +
+
+sort: Sort
+
+ +
+
+classmethod symbol() str[source]
+
+ +
+ +
+
+final class Not(sort: 'Sort', pattern: 'Pattern')[source]
+

Bases: UnaryConn

+
+
+let(*, sort: Sort | None = None, pattern: Pattern | None = None) Not[source]
+
+ +
+
+let_patterns(patterns: Iterable[Pattern]) Not[source]
+
+ +
+
+let_sort(sort: Sort) Not[source]
+
+ +
+
+classmethod of(symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) Not[source]
+
+ +
+
+pattern: Pattern
+
+ +
+
+sort: Sort
+
+ +
+
+classmethod symbol() str[source]
+
+ +
+ +
+
+final class Nu(var: 'SVar', pattern: 'Pattern')[source]
+

Bases: MLFixpoint

+
+
+let(*, var: SVar | None = None, pattern: Pattern | None = None) Nu[source]
+
+ +
+
+let_patterns(patterns: Iterable[Pattern]) Nu[source]
+
+ +
+
+classmethod of(symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) Nu[source]
+
+ +
+
+pattern: Pattern
+
+ +
+
+classmethod symbol() str[source]
+
+ +
+
+var: SVar
+
+ +
+ +
+
+class NullaryConn[source]
+

Bases: MLConn

+
+
+property patterns: tuple[()]
+
+ +
+ +
+
+final class Or(sort: 'Sort', ops: 'Iterable[Pattern]' = ())[source]
+

Bases: MultiaryConn

+
+
+let(*, sort: Sort | None = None, ops: Iterable[Pattern] | None = None) Or[source]
+
+ +
+
+let_patterns(patterns: Iterable[Pattern]) Or[source]
+
+ +
+
+let_sort(sort: Sort) Or[source]
+
+ +
+
+classmethod of(symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) Or[source]
+
+ +
+
+ops: tuple[Pattern, ...]
+
+ +
+
+sort: Sort
+
+ +
+
+classmethod symbol() str[source]
+
+ +
+ +
+
+class Pattern[source]
+

Bases: Kore

+
+
+bottom_up(f: Callable[[Pattern], Pattern]) Pattern[source]
+
+ +
+
+property dict: dict[str, Any]
+
+ +
+
+static from_dict(dct: Mapping[str, Any]) Pattern[source]
+
+ +
+
+static from_json(s: str) Pattern[source]
+
+ +
+
+property json: str
+
+ +
+
+abstract let_patterns(patterns: Iterable[Pattern]) P[source]
+
+ +
+
+map_patterns(f: Callable[[Pattern], Pattern]) P[source]
+
+ +
+
+abstract property patterns: tuple[Pattern, ...]
+
+ +
+
+top_down(f: Callable[[Pattern], Pattern]) Pattern[source]
+
+ +
+ +
+
+final class Rewrites(sort: 'Sort', left: 'Pattern', right: 'Pattern')[source]
+

Bases: MLRewrite

+
+
+left: Pattern
+
+ +
+
+let(*, sort: Sort | None = None, left: Pattern | None = None, right: Pattern | None = None) Rewrites[source]
+
+ +
+
+let_patterns(patterns: Iterable[Pattern]) Rewrites[source]
+
+ +
+
+let_sort(sort: Sort) Rewrites[source]
+
+ +
+
+classmethod of(symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) Rewrites[source]
+
+ +
+
+property patterns: tuple[Pattern, Pattern]
+
+ +
+
+right: Pattern
+
+ +
+
+sort: Sort
+
+ +
+
+classmethod symbol() str[source]
+
+ +
+ +
+
+final class RightAssoc(symbol: 'str | SymbolId', sorts: 'Iterable[Sort]' = (), args: 'Iterable[Pattern]' = ())[source]
+

Bases: Assoc

+
+
+args: tuple[Pattern, ...]
+
+ +
+
+classmethod kore_symbol() str[source]
+
+ +
+
+let(*, symbol: str | SymbolId | None = None, sorts: Iterable | None = None, args: Iterable | None = None) RightAssoc[source]
+
+ +
+
+let_patterns(patterns: Iterable[Pattern]) RightAssoc[source]
+
+ +
+
+property pattern: Pattern
+
+ +
+
+sorts: tuple[Sort, ...]
+
+ +
+
+symbol: str
+
+ +
+ +
+
+class RoundPred[source]
+

Bases: MLPred

+
+
+pattern: Pattern
+
+ +
+
+property patterns: tuple[Pattern]
+
+ +
+
+property sorts: tuple[Sort, Sort]
+
+ +
+ +
+
+final class SVar(name: 'str | SetVarId', sort: 'Sort')[source]
+

Bases: VarPattern

+
+
+let(*, name: str | SetVarId | None = None, sort: Sort | None = None) SVar[source]
+
+ +
+
+let_patterns(patterns: Iterable[Pattern]) SVar[source]
+
+ +
+
+let_sort(sort: Sort) SVar[source]
+
+ +
+
+name: str
+
+ +
+
+sort: Sort
+
+ +
+ +
+
+class Sentence[source]
+

Bases: Kore, WithAttrs

+
+ +
+
+final class SetVarId(value: 'str')[source]
+

Bases: object

+
+
+value: str
+
+ +
+ +
+
+class Sort[source]
+

Bases: Kore

+
+
+abstract property dict: dict[str, Any]
+
+ +
+
+static from_dict(dct: Mapping[str, Any]) Sort[source]
+
+ +
+
+static from_json(s: str) Sort[source]
+
+ +
+
+property json: str
+
+ +
+
+name: str
+
+ +
+ +
+
+final class SortApp(name: 'str | Id', sorts: 'Iterable[Sort]' = ())[source]
+

Bases: Sort

+
+
+property dict: dict[str, Any]
+
+ +
+
+let(*, name: str | Id | None = None, sorts: Iterable[Sort] | None = None) SortApp[source]
+
+ +
+
+name: str
+
+ +
+
+sorts: tuple[Sort, ...]
+
+ +
+
+write(output: IO[str]) None[source]
+
+ +
+ +
+
+final class SortDecl(name: 'str | Id', vars: 'Iterable[SortVar]', attrs: 'Iterable[App]' = (), *, hooked: 'bool' = False)[source]
+

Bases: Sentence

+
+
+attrs: tuple[App, ...]
+
+ +
+
+hooked: bool
+
+ +
+
+let(*, name: str | Id | None = None, vars: Iterable[SortVar] | None = None, attrs: Iterable[App] | None = None, hooked: bool | None = None) SortDecl[source]
+
+ +
+
+let_attrs(attrs: Iterable[App]) SortDecl[source]
+
+ +
+
+name: str
+
+ +
+
+vars: tuple[SortVar, ...]
+
+ +
+
+write(output: IO[str]) None[source]
+
+ +
+ +
+
+final class SortVar(name: 'str | Id')[source]
+

Bases: Sort

+
+
+property dict: dict[str, Any]
+
+ +
+
+let(*, name: str | Id | None = None) SortVar[source]
+
+ +
+
+name: str
+
+ +
+
+write(output: IO[str]) None[source]
+
+ +
+ +
+
+final class String(value: 'str')[source]
+

Bases: Pattern

+
+
+let(*, value: str | None = None) String[source]
+
+ +
+
+let_patterns(patterns: Iterable[Pattern]) String[source]
+
+ +
+
+property patterns: tuple[()]
+
+ +
+
+value: str
+
+ +
+
+write(output: IO[str]) None[source]
+
+ +
+ +
+
+final class Symbol(name: 'str | SymbolId', vars: 'Iterable[SortVar]' = ())[source]
+

Bases: Kore

+
+
+let(*, name: str | SymbolId | None = None, vars: Iterable[SortVar] | None = None) Symbol[source]
+
+ +
+
+name: str
+
+ +
+
+vars: tuple[SortVar, ...]
+
+ +
+
+write(output: IO[str]) None[source]
+
+ +
+ +
+
+final class SymbolDecl(symbol: 'Symbol', param_sorts: 'Iterable[Sort]', sort: 'Sort', attrs: 'Iterable[App]' = (), *, hooked: 'bool' = False)[source]
+

Bases: Sentence

+
+
+attrs: tuple[App, ...]
+
+ +
+
+hooked: bool
+
+ +
+
+let(*, symbol: Symbol | None = None, param_sorts: Iterable[Sort] | None = None, sort: Sort | None = None, attrs: Iterable[App] | None = None, hooked: bool | None = None) SymbolDecl[source]
+
+ +
+
+let_attrs(attrs: Iterable[App]) SymbolDecl[source]
+
+ +
+
+param_sorts: tuple[Sort, ...]
+
+ +
+
+sort: Sort
+
+ +
+
+symbol: Symbol
+
+ +
+
+write(output: IO[str]) None[source]
+
+ +
+ +
+
+final class SymbolId(value: 'str')[source]
+

Bases: object

+
+
+value: str
+
+ +
+ +
+
+final class Top(sort: 'Sort')[source]
+

Bases: NullaryConn

+
+
+let(*, sort: Sort | None = None) Top[source]
+
+ +
+
+let_patterns(patterns: Iterable[Pattern]) Top[source]
+
+ +
+
+let_sort(sort: Sort) Top[source]
+
+ +
+
+classmethod of(symbol: str, sorts: Iterable[Sort] = (), patterns: Iterable[Pattern] = ()) Top[source]
+
+ +
+
+sort: Sort
+
+ +
+
+classmethod symbol() str[source]
+
+ +
+ +
+
+class UnaryConn[source]
+

Bases: MLConn

+
+
+pattern: Pattern
+
+ +
+
+property patterns: tuple[Pattern]
+
+ +
+ +
+
+class VarPattern[source]
+

Bases: Pattern, WithSort

+
+
+name: str
+
+ +
+
+property patterns: tuple[()]
+
+ +
+
+sort: Sort
+
+ +
+
+write(output: IO[str]) None[source]
+
+ +
+ +
+
+class WithAttrs[source]
+

Bases: ABC

+
+
+attrs: tuple[App, ...]
+
+ +
+
+property attrs_by_key: FrozenDict[str, App]
+
+ +
+
+abstract let_attrs(attrs: Iterable[App]) WA[source]
+
+ +
+
+map_attrs(f: Callable[[tuple[App, ...]], Iterable[App]]) WA[source]
+
+ +
+ +
+
+class WithSort[source]
+

Bases: ABC

+
+
+abstract let_sort(sort: Sort) WS[source]
+
+ +
+
+map_sort(f: Callable[[Sort], Sort]) WS[source]
+
+ +
+
+sort: Sort
+
+ +
+ +
+
+kore_term(dct: Mapping[str, Any]) Pattern[source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kore.tools.html b/pyk/api/pyk.kore.tools.html new file mode 100644 index 00000000000..b52e7c3d596 --- /dev/null +++ b/pyk/api/pyk.kore.tools.html @@ -0,0 +1,187 @@ + + + + + + + + + pyk.kore.tools module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kore.tools module

+
+
+class PrintOutput(value)[source]
+

Bases: Enum

+

An enumeration.

+
+
+BINARY = 'binary'
+
+ +
+
+JSON = 'json'
+
+ +
+
+KAST = 'kast'
+
+ +
+
+KORE = 'kore'
+
+ +
+
+LATEX = 'latex'
+
+ +
+
+NONE = 'none'
+
+ +
+
+PRETTY = 'pretty'
+
+ +
+
+PROGRAM = 'program'
+
+ +
+ +
+
+kore_print(pattern: str | Pattern, *, definition_dir: str | Path | None = None, output_file: str | Path | None = None, output: str | PrintOutput | None = None, color: bool | None = None) str[source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kore_exec_covr.html b/pyk/api/pyk.kore_exec_covr.html new file mode 100644 index 00000000000..c19235f7ea9 --- /dev/null +++ b/pyk/api/pyk.kore_exec_covr.html @@ -0,0 +1,151 @@ + + + + + + + + + pyk.kore_exec_covr package — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.kore_exec_covr.kore_exec_covr.html b/pyk/api/pyk.kore_exec_covr.kore_exec_covr.html new file mode 100644 index 00000000000..daa91fd7853 --- /dev/null +++ b/pyk/api/pyk.kore_exec_covr.kore_exec_covr.html @@ -0,0 +1,175 @@ + + + + + + + + + pyk.kore_exec_covr.kore_exec_covr module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.kore_exec_covr.kore_exec_covr module

+
+
+class HaskellLogEntry(value)[source]
+

Bases: Enum

+

An enumeration.

+
+
+DEBUG_APPLIED_REWRITE_RULES = 'DebugAppliedRewriteRules'
+
+ +
+
+DEBUG_APPLY_EQUATION = 'DebugApplyEquation'
+
+ +
+ +
+
+build_rule_dict(definition: KDefinition, *, skip_projections: bool = True, skip_initializers: bool = True) dict[str, KRule][source]
+

Traverse the kompiled definition and build a dictionary mapping str(file:location) to KRule.

+
+ +
+
+parse_rule_applications(haskell_backend_oneline_log_file: Path) dict[HaskellLogEntry, dict[str, int]][source]
+

Process a one-line log file produced by K’s Haskell backend.

+

Extracts information about:

+
    +
  • Applied rewrites (DebugAppliedRewriteRules).

  • +
  • Applied simplifications (DEBUG_APPLY_EQUATION).

  • +
+
+

Note

+

Haskell backend logs often contain rule applications with empty locations. +It seems likely that those are generated projection rules. +We report their applications in bulk with UNKNOWN location.

+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.krepl.html b/pyk/api/pyk.krepl.html new file mode 100644 index 00000000000..248d5a34398 --- /dev/null +++ b/pyk/api/pyk.krepl.html @@ -0,0 +1,179 @@ + + + + + + + + + pyk.krepl package — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/pyk/api/pyk.krepl.repl.html b/pyk/api/pyk.krepl.repl.html new file mode 100644 index 00000000000..8afe5d25c5d --- /dev/null +++ b/pyk/api/pyk.krepl.repl.html @@ -0,0 +1,273 @@ + + + + + + + + + pyk.krepl.repl module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.krepl.repl module

+
+
+class BaseRepl[source]
+

Bases: Cmd, Generic[T], ABC

+
+
+CAT_BUILTIN: Final = 'Built-in Commands'
+
+ +
+
+CAT_DEBUG: Final = 'Debugger Commands'
+
+ +
+
+abstract do_load(args: Any) bool | None[source]
+

Set up the interpreter.

+

Subclasses are expected to

+
    +
  • Decorate the method with with_argparser to ensure the right set of arguments is parsed.

  • +
  • Instantiate an Interpreter[T] based on args, then set self.interpreter.

  • +
  • Set self.state to self.interpreter.init_state().

  • +
+
+ +
+
+do_show(args: Namespace) None[source]
+
+ +
+
+do_step(args: Namespace) None[source]
+
+ +
+
+interpreter: Interpreter[T] | None
+
+ +
+
+prompt = '> '
+
+ +
+
+state: T | None
+
+ +
+ +
+
+class Interpreter[source]
+

Bases: Generic[T], ABC

+
+
+abstract init_state() T[source]
+
+ +
+
+abstract next_state(state: T, steps: int | None = None) T[source]
+
+ +
+ +
+
+class KInterpreter(definition_dir: Path, program_file: Path)[source]
+

Bases: Interpreter[KState]

+
+
+definition_dir: Path
+
+ +
+
+init_state() KState[source]
+
+ +
+
+next_state(state: KState, steps: int | None = None) KState[source]
+
+ +
+
+program_file: Path
+
+ +
+ +
+
+class KRepl(definition_dir: Path)[source]
+

Bases: BaseRepl[KState]

+
+
+do_load(args: Namespace) None[source]
+
+ +
+
+intro = 'K-REPL Shell\nType "help" or "?" for more information.'
+
+ +
+ +
+
+final class KState(definition_dir: 'Path', pattern: 'Pattern')[source]
+

Bases: object

+
+
+definition_dir: Path
+
+ +
+
+pattern: Pattern
+
+ +
+
+property pretty: str
+
+ +
+ +
+
+exception ReplError[source]
+

Bases: Exception

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.ktool.claim_index.html b/pyk/api/pyk.ktool.claim_index.html new file mode 100644 index 00000000000..56685abf6a0 --- /dev/null +++ b/pyk/api/pyk.ktool.claim_index.html @@ -0,0 +1,190 @@ + + + + + + + + + pyk.ktool.claim_index module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.ktool.claim_index module

+
+
+class ClaimIndex(claims: 'Mapping[str, KClaim]', main_module_name: 'str | None' = None)[source]
+

Bases: Mapping[str, KClaim]

+
+
+claims: FrozenDict[str, KClaim]
+
+ +
+
+static from_module_list(module_list: KFlatModuleList) ClaimIndex[source]
+
+ +
+
+labels(*, include: Iterable[str] | None = None, exclude: Iterable[str] | None = None, with_depends: bool = True, ordered: bool = False) list[str][source]
+

Return a list of labels from the index.

+
+
Parameters:
+
    +
  • include – Labels to include in the result. If None, all labels are included.

  • +
  • exclude – Labels to exclude from the result. If None, no labels are excluded. +Takes precedence over include.

  • +
  • with_depends – If True, the result is transitively closed w.r.t. the dependency relation. +Labels in exclude are pruned, and their dependencies are not considered on the given path.

  • +
  • ordered – If True, the result is topologically sorted w.r.t. the dependency relation.

  • +
+
+
Returns:
+

A list of labels from the index.

+
+
Raises:
+

ValueError – If an item in include or exclude cannot be resolved to a valid label.

+
+
+
+ +
+
+main_module_name: str | None
+
+ +
+
+resolve(label: str) str[source]
+
+ +
+
+resolve_all(labels: Iterable[str]) list[str][source]
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.ktool.claim_loader.html b/pyk/api/pyk.ktool.claim_loader.html new file mode 100644 index 00000000000..cc6c2dfccc6 --- /dev/null +++ b/pyk/api/pyk.ktool.claim_loader.html @@ -0,0 +1,162 @@ + + + + + + + + + pyk.ktool.claim_loader module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.ktool.claim_loader module

+
+
+class ClaimLoader(kprove: KProve)[source]
+

Bases: object

+

Load and cache spec files as JSON.

+
+
+load_claims(spec_file: Path, *, spec_module_name: str | None = None, include_dirs: Iterable[Path] = (), md_selector: str | None = None, claim_labels: Iterable[str] | None = None, exclude_claim_labels: Iterable[str] | None = None, include_dependencies: bool = True, type_inference_mode: TypeInferenceMode | None = None) list[KClaim][source]
+

Attempt to load a spec from JSON, write file on cache miss.

+
+
Parameters:
+
    +
  • spec_file – Spec file to load.

  • +
  • spec_module_name (optional) – Spec module to load.

  • +
  • include_dirs (optional) – Includes.

  • +
  • md_selector (optional) – Selector expression for Markdown tags.

  • +
  • claim_labels (optional) – Claim labels to include in the result.

  • +
  • exclude_claim_labels (optional) – Claim labels to exclude from the result.

  • +
  • include_dependencies (optional) – If True, claim dependencies are transitively included.

  • +
  • type_inference_mode (optional) – Type inference mode.

  • +
+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.ktool.html b/pyk/api/pyk.ktool.html new file mode 100644 index 00000000000..ec3d4ec30d0 --- /dev/null +++ b/pyk/api/pyk.ktool.html @@ -0,0 +1,294 @@ + + + + + + + + + pyk.ktool package — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.ktool package

+
+

Submodules

+
+ +
+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.ktool.kfuzz.html b/pyk/api/pyk.ktool.kfuzz.html new file mode 100644 index 00000000000..4457b4e45ea --- /dev/null +++ b/pyk/api/pyk.ktool.kfuzz.html @@ -0,0 +1,233 @@ + + + + + + + + + pyk.ktool.kfuzz module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.ktool.kfuzz module

+
+
+class KFuzz(definition_dir: Path, handler: KFuzzHandler = <pyk.ktool.kfuzz._KFuzzNullHandler object>)[source]
+

Bases: object

+

Interface for fuzzing over property tests in K.

+
+
+definition_dir: Path
+
+ +
+
+fuzz_with_check(template: Pattern, subst_strategy: dict[EVar, SearchStrategy[Pattern]], check_func: Callable[[Pattern], Any], **hypothesis_args: Any) None[source]
+

Fuzz over a property test using check_func to check for a passing test.

+

See fuzz for info on the parameters.

+
+ +
+
+fuzz_with_exit_code(template: Pattern, subst_strategy: dict[EVar, SearchStrategy[Pattern]], **hypothesis_args: Any) None[source]
+

Fuzz over a property test using the exit code from the interpreter to check for a passing test.

+

See fuzz for info on the parameters.

+
+ +
+
+handler: KFuzzHandler
+
+ +
+ +
+
+class KFuzzHandler[source]
+

Bases: ABC

+

Allows custom behavior (ie. printing) during fuzzing for each test case and on a test failure.

+

Can be passed to the KFuzz constructor or to fuzz with the handler keyword argument.

+
+
+abstract handle_failure(args: Mapping[EVar, Pattern]) None[source]
+

Handle a test case failure, before the AssertionError is raised.

+
+ +
+
+abstract handle_test(args: Mapping[EVar, Pattern]) None[source]
+

Handle each test case with the variable substitutions that are being used.

+
+ +
+ +
+
+fuzz(definition_dir: str | Path, template: Pattern, subst_strategy: dict[EVar, SearchStrategy[Pattern]], *, check_func: Callable[[Pattern], Any] | None = None, check_exit_code: bool = False, handler: KFuzzHandler = <pyk.ktool.kfuzz._KFuzzNullHandler object>, **hypothesis_args: Any) None[source]
+

Fuzz a property test with concrete execution over a K term.

+
+
Parameters:
+
    +
  • definition_dir – The location of the K definition to run the interpreter for.

  • +
  • template – The term which will be sent to the interpreter after randomizing inputs. It should contain at least one variable which will be substituted for a value.

  • +
  • subst_strategy – Should have each variable in the template term mapped to a strategy for generating values for it.

  • +
  • check_func – Will be called on the kore output from the interpreter. +Should throw an AssertionError if it determines that the output indicates a test failure. +A RuntimeError will be thrown if this is passed as an argument and check_exit_code is True.

  • +
  • check_exit_code – Check the exit code of the interpreter for a test failure instead of using check_func. +An exit code of 0 indicates a passing test. +A RuntimeError will be thrown if this is True and check_func is also passed as an argument.

  • +
  • handler – An instance of a KFuzzHandler implementing custom behavior while fuzzing.

  • +
  • hypothesis_args

    Keyword arguments that will be passed as settings for the hypothesis test. Defaults:

    +

    deadline: 5000

    +

    phases: (Phase.explicit, Phase.reuse, Phase.generate)

    +

  • +
+
+
Raises:
+

RuntimeError – If check_func exists and check_exit_code is set, or check_func doesn’t exist and check_exit_code is cleared.

+
+
+
+ +
+
+kintegers(*, min_value: int | None = None, max_value: int | None = None, with_inj: str | None = None) SearchStrategy[Pattern][source]
+

Return a search strategy for K integers.

+
+
Parameters:
+
    +
  • min_value – Minimum value for the generated integers

  • +
  • max_value – Maximum value for the generated integers

  • +
  • with_inj – Return the integer as an injection into this sort

  • +
+
+
Returns:
+

A strategy which generates integer domain values.

+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.ktool.kompile.html b/pyk/api/pyk.ktool.kompile.html new file mode 100644 index 00000000000..0f0b26c02ba --- /dev/null +++ b/pyk/api/pyk.ktool.kompile.html @@ -0,0 +1,172 @@ + + + + + + + + + pyk.ktool.kompile module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.ktool.kompile module

+
+
+class PykBackend(value)[source]
+

Bases: Enum

+

An enumeration.

+
+
+BOOSTER = 'booster'
+
+ +
+
+HASKELL = 'haskell'
+
+ +
+
+KORE = 'kore'
+
+ +
+
+LLVM = 'llvm'
+
+ +
+
+MAUDE = 'maude'
+
+ +
+ +
+
+kompile(main_file: str | Path, *, backend: str | PykBackend | None = None, command: Iterable[str] = ('kompile',), output_dir: str | Path | None = None, temp_dir: str | Path | None = None, type_inference_mode: str | TypeInferenceMode | None = None, warnings: str | Warnings | None = None, warnings_to_errors: bool = False, ignore_warnings: Iterable[str] = (), no_exc_wrap: bool = False, debug: bool = False, verbose: bool = False, cwd: Path | None = None, check: bool = True, **kwargs: Any) Path[source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.ktool.kprint.html b/pyk/api/pyk.ktool.kprint.html new file mode 100644 index 00000000000..8fe3451ccec --- /dev/null +++ b/pyk/api/pyk.ktool.kprint.html @@ -0,0 +1,285 @@ + + + + + + + + + pyk.ktool.kprint module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.ktool.kprint module

+
+
+class KAstInput(value)[source]
+

Bases: Enum

+

An enumeration.

+
+
+BINARY = 'binary'
+
+ +
+
+JSON = 'json'
+
+ +
+
+KAST = 'kast'
+
+ +
+
+KORE = 'kore'
+
+ +
+
+PROGRAM = 'program'
+
+ +
+
+RULE = 'rule'
+
+ +
+ +
+
+class KAstOutput(value)[source]
+

Bases: Enum

+

An enumeration.

+
+
+BINARY = 'binary'
+
+ +
+
+JSON = 'json'
+
+ +
+
+KAST = 'kast'
+
+ +
+
+KORE = 'kore'
+
+ +
+
+LATEX = 'latex'
+
+ +
+
+NONE = 'none'
+
+ +
+
+PRETTY = 'pretty'
+
+ +
+
+PROGRAM = 'program'
+
+ +
+ +
+
+class KPrint(definition_dir: Path, use_directory: Path | None = None, bug_report: BugReport | None = None, extra_unparsing_modules: Iterable[KFlatModule] = (), patch_symbol_table: Callable[[SymbolTable], None] | None = None)[source]
+

Bases: object

+
+
+backend: str
+
+ +
+
+property definition: KDefinition
+
+ +
+
+definition_dir: Path
+
+ +
+
+property definition_hash: str
+
+ +
+
+kast_to_kore(kast: KInner, sort: KSort | None = None, *, force_kast: bool = False) Pattern[source]
+
+ +
+
+kore_to_kast(kore: Pattern) KInner[source]
+
+ +
+
+kore_to_pretty(pattern: Pattern) str[source]
+
+ +
+
+main_module: str
+
+ +
+
+parse_token(ktoken: KToken, *, as_rule: bool = False) KInner[source]
+
+ +
+
+pretty_print(kast: KAst, *, in_module: str | None = None, unalias: bool = True, sort_collections: bool = False) str[source]
+
+ +
+
+use_directory: Path | None
+
+ +
+ +
+
+gen_glr_parser(parser_file: str | Path, *, command: str | None = None, definition_dir: str | Path | None = None, module: str | None = None, sort: str | None = None, temp_dir: str | Path | None = None) Path[source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.ktool.kprove.html b/pyk/api/pyk.ktool.kprove.html new file mode 100644 index 00000000000..8d80904ede1 --- /dev/null +++ b/pyk/api/pyk.ktool.kprove.html @@ -0,0 +1,228 @@ + + + + + + + + + pyk.ktool.kprove module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.ktool.kprove module

+
+
+class KProve(definition_dir: Path, main_file: Path | None = None, use_directory: Path | None = None, command: str = 'kprove', bug_report: BugReport | None = None, extra_unparsing_modules: Iterable[KFlatModule] = (), patch_symbol_table: Callable[[SymbolTable], None] | None = None)[source]
+

Bases: KPrint

+
+
+get_claim_index(spec_file: Path, spec_module_name: str | None = None, include_dirs: Iterable[Path] = (), md_selector: str | None = None, type_inference_mode: TypeInferenceMode | None = None) ClaimIndex[source]
+
+ +
+
+get_claims(spec_file: Path, spec_module_name: str | None = None, include_dirs: Iterable[Path] = (), md_selector: str | None = None, claim_labels: Iterable[str] | None = None, exclude_claim_labels: Iterable[str] | None = None, include_dependencies: bool = True, type_inference_mode: TypeInferenceMode | None = None) list[KClaim][source]
+
+ +
+
+main_file: Path | None
+
+ +
+
+parse_modules(file_path: Path, module_name: str | None = None, include_dirs: Iterable[Path] = (), md_selector: str | None = None, type_inference_mode: TypeInferenceMode | None = None) KFlatModuleList[source]
+
+ +
+
+prove(spec_file: Path, spec_module_name: str | None = None, args: Iterable[str] = (), include_dirs: Iterable[Path] = (), md_selector: str | None = None, haskell_args: Iterable[str] = (), depth: int | None = None) list[CTerm][source]
+
+ +
+
+prove_claim(claim: KClaim, claim_id: str, lemmas: Iterable[KRule] = (), args: Iterable[str] = (), haskell_args: Iterable[str] = (), depth: int | None = None) list[CTerm][source]
+
+ +
+
+prover: list[str]
+
+ +
+
+prover_args: list[str]
+
+ +
+ +
+
+class KProveOutput(value)[source]
+

Bases: Enum

+

An enumeration.

+
+
+BINARY = 'binary'
+
+ +
+
+JSON = 'json'
+
+ +
+
+KAST = 'KAST'
+
+ +
+
+KORE = 'kore'
+
+ +
+
+LATEX = 'latex'
+
+ +
+
+NONE = 'none'
+
+ +
+
+PRETTY = 'pretty'
+
+ +
+
+PROGAM = 'program'
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.ktool.krun.html b/pyk/api/pyk.ktool.krun.html new file mode 100644 index 00000000000..8f99d55432f --- /dev/null +++ b/pyk/api/pyk.ktool.krun.html @@ -0,0 +1,261 @@ + + + + + + + + + pyk.ktool.krun module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.ktool.krun module

+
+
+class KRun(definition_dir: Path, use_directory: Path | None = None, command: str = 'krun', bug_report: BugReport | None = None, extra_unparsing_modules: Iterable[KFlatModule] = (), patch_symbol_table: Callable[[SymbolTable], None] | None = None)[source]
+

Bases: KPrint

+
+
+command: str
+
+ +
+
+krun(input_file: Path) tuple[int, KInner][source]
+
+ +
+
+run(pgm: Pattern, *, cmap: Mapping[str, str] | None = None, pmap: Mapping[str, str] | None = None, term: bool = False, depth: int | None = None, expand_macros: bool = True, search_final: bool = False, no_pattern: bool = False, output: KRunOutput | None = KRunOutput.PRETTY, check: bool = False, pipe_stderr: bool = True, bug_report: BugReport | None = None, debugger: bool = False) None[source]
+
+ +
+
+run_pattern(pattern: Pattern, *, depth: int | None = None, expand_macros: bool = False, search_final: bool = False, no_pattern: bool = False, pipe_stderr: bool = True, check: bool = False, bug_report: BugReport | None = None, debugger: bool = False) Pattern[source]
+
+ +
+
+run_process(pgm: Pattern, *, cmap: Mapping[str, str] | None = None, pmap: Mapping[str, str] | None = None, term: bool = False, depth: int | None = None, expand_macros: bool = True, search_final: bool = False, no_pattern: bool = False, output: KRunOutput | None = KRunOutput.PRETTY, pipe_stderr: bool = True, bug_report: BugReport | None = None, debugger: bool = False) CompletedProcess[source]
+
+ +
+
+run_proof_hint(pgm: Pattern, *, cmap: Mapping[str, str] | None = None, pmap: Mapping[str, str] | None = None, output: KRunOutput | None = None, parser: str | None = None, term: bool = False, temp_dir: Path | None = None, depth: int | None = None, expand_macros: bool = True, search_final: bool = False, no_pattern: bool = False, check: bool = False, pipe_stderr: bool = True, debugger: bool = False, proof_hint: bool = False) bytes[source]
+
+ +
+ +
+
+class KRunOutput(value)[source]
+

Bases: Enum

+

An enumeration.

+
+
+BINARY = 'binary'
+
+ +
+
+JSON = 'json'
+
+ +
+
+KAST = 'kast'
+
+ +
+
+KORE = 'kore'
+
+ +
+
+LATEX = 'latex'
+
+ +
+
+NONE = 'none'
+
+ +
+
+PRETTY = 'pretty'
+
+ +
+
+PROGRAM = 'program'
+
+ +
+ +
+
+llvm_interpret(definition_dir: str | Path, pattern: Pattern, *, depth: int | None = None, check: bool = True) Pattern[source]
+

Execute the interpreter binary generated by the LLVM Backend.

+
+
Parameters:
+
    +
  • definition_dir – Path to the kompiled definition directory.

  • +
  • pattern – KORE pattern to start rewriting from.

  • +
  • depth – Maximal number of rewrite steps to take.

  • +
+
+
Returns:
+

The pattern resulting from the rewrites.

+
+
Raises:
+

RuntimeError – If check and the interpreter fails.

+
+
+
+ +
+
+llvm_interpret_raw(definition_dir: str | Path, kore: str, *, depth: int | None = None, check: bool = True) CompletedProcess[source]
+

Execute the interpreter binary generated by the LLVM Backend, with no processing of input/output.

+
+
Parameters:
+
    +
  • definition_dir – Path to the kompiled definition directory.

  • +
  • pattern – KORE string to start rewriting from.

  • +
  • depth – Maximal number of rewrite steps to take.

  • +
  • check – check the return code of the CompletedProcess

  • +
+
+
Returns:
+

The CompletedProcess of the interpreter.

+
+
Raises:
+

CalledProcessError – If check and the interpreter fails.

+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.ktool.prove_rpc.html b/pyk/api/pyk.ktool.prove_rpc.html new file mode 100644 index 00000000000..0f0b15158b8 --- /dev/null +++ b/pyk/api/pyk.ktool.prove_rpc.html @@ -0,0 +1,146 @@ + + + + + + + + + pyk.ktool.prove_rpc module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.ktool.prove_rpc module

+
+
+class ProveRpc(kprove: KProve, explore_context: Callable[[], ContextManager[KCFGExplore]])[source]
+

Bases: object

+
+
+prove_rpc(options: ProveOptions) list[Proof][source]
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.ktool.utils.html b/pyk/api/pyk.ktool.utils.html new file mode 100644 index 00000000000..1b555c9cd0d --- /dev/null +++ b/pyk/api/pyk.ktool.utils.html @@ -0,0 +1,170 @@ + + + + + + + + + pyk.ktool.utils module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.ktool.utils module

+
+
+final class KDistribution(path: Path)[source]
+

Bases: object

+

Represent the path to the K distribution.

+
+
+path
+

Path to the K distribution.

+
+
Type:
+

pathlib.Path

+
+
+
+ +
+
+property builtin_dir: Path
+

The path to the builtin directory.

+
+ +
+
+static create() KDistribution | None[source]
+

Instantiate the class based on the path to the kompile binary.

+
+ +
+
+path: Path
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.prelude.bytes.html b/pyk/api/pyk.prelude.bytes.html new file mode 100644 index 00000000000..cabded11183 --- /dev/null +++ b/pyk/api/pyk.prelude.bytes.html @@ -0,0 +1,155 @@ + + + + + + + + + pyk.prelude.bytes module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.prelude.bytes module

+
+
+bytesToken(b: bytes) KToken[source]
+
+ +
+
+bytesToken_from_str(pretty: str) KToken[source]
+
+ +
+
+pretty_bytes(token: KToken) bytes[source]
+
+ +
+
+pretty_bytes_str(token: KToken) str[source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.prelude.collections.html b/pyk/api/pyk.prelude.collections.html new file mode 100644 index 00000000000..396d0dd812e --- /dev/null +++ b/pyk/api/pyk.prelude.collections.html @@ -0,0 +1,195 @@ + + + + + + + + + pyk.prelude.collections module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.prelude.collections module

+
+
+list_empty() KInner[source]
+
+ +
+
+list_item(k: KInner) KInner[source]
+
+ +
+
+list_of(ks: Iterable[KInner]) KInner[source]
+
+ +
+
+map_empty() KInner[source]
+
+ +
+
+map_item(k: KInner, v: KInner) KInner[source]
+
+ +
+
+map_of(ks: dict[KInner, KInner] | Iterable[tuple[KInner, KInner]]) KInner[source]
+
+ +
+
+rangemap_empty() KInner[source]
+
+ +
+
+rangemap_item(k: tuple[KInner, KInner], v: KInner) KInner[source]
+
+ +
+
+rangemap_of(ks: dict[tuple[KInner, KInner], KInner] | Iterable[tuple[tuple[KInner, KInner], KInner]]) KInner[source]
+
+ +
+
+set_empty() KInner[source]
+
+ +
+
+set_item(k: KInner) KInner[source]
+
+ +
+
+set_of(ks: Iterable[KInner]) KInner[source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.prelude.html b/pyk/api/pyk.prelude.html new file mode 100644 index 00000000000..377ede540b3 --- /dev/null +++ b/pyk/api/pyk.prelude.html @@ -0,0 +1,228 @@ + + + + + + + + + pyk.prelude package — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.prelude.k.html b/pyk/api/pyk.prelude.k.html new file mode 100644 index 00000000000..6fb3c3d2d25 --- /dev/null +++ b/pyk/api/pyk.prelude.k.html @@ -0,0 +1,140 @@ + + + + + + + + + pyk.prelude.k module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.prelude.k module

+
+
+inj(from_sort: KSort, to_sort: KSort, term: KInner) KInner[source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.prelude.kbool.html b/pyk/api/pyk.prelude.kbool.html new file mode 100644 index 00000000000..ce8706a00ff --- /dev/null +++ b/pyk/api/pyk.prelude.kbool.html @@ -0,0 +1,160 @@ + + + + + + + + + pyk.prelude.kbool module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.prelude.kbool module

+
+
+andBool(items: Iterable[KInner]) KInner[source]
+
+ +
+
+boolToken(b: bool) KToken[source]
+
+ +
+
+impliesBool(antecedent: KInner, consequent: KInner) KApply[source]
+
+ +
+
+notBool(item: KInner) KApply[source]
+
+ +
+
+orBool(items: Iterable[KInner]) KInner[source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.prelude.kint.html b/pyk/api/pyk.prelude.kint.html new file mode 100644 index 00000000000..4189e0f0817 --- /dev/null +++ b/pyk/api/pyk.prelude.kint.html @@ -0,0 +1,552 @@ + + + + + + + + + pyk.prelude.kint module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.prelude.kint module

+
+
+absInt(i: KInner) KApply[source]
+

Instantiate the KAST term `absInt`(i).

+
+
Parameters:
+

i – The integer operand.

+
+
Returns:
+

The KAST term `absInt`(i).

+
+
+
+ +
+
+addInt(i1: KInner, i2: KInner) KApply[source]
+

Instantiate the KAST term `_+Int_`(i1, i2).

+
+
Parameters:
+
    +
  • i1 – The left operand.

  • +
  • i2 – The right operand.

  • +
+
+
Returns:
+

The KAST term `_+Int_`(i1, i2).

+
+
+
+ +
+
+andInt(i1: KInner, i2: KInner) KApply[source]
+

Instantiate the KAST term `_&Int_`(i1, i2).

+
+
Parameters:
+
    +
  • i1 – The left operand.

  • +
  • i2 – The right operand.

  • +
+
+
Returns:
+

The KAST term `_&Int_`(i1, i2).

+
+
+
+ +
+
+divInt(i1: KInner, i2: KInner) KApply[source]
+

Instantiate the KAST term `_/Int_`(i1, i2).

+
+
Parameters:
+
    +
  • i1 – The dividend.

  • +
  • i2 – The divisor.

  • +
+
+
Returns:
+

The KAST term `_/Int_`(i1, i2).

+
+
+
+ +
+
+eqInt(i1: KInner, i2: KInner) KApply[source]
+

Instantiate the KAST term `_==Int_`(i1, i2).

+
+
Parameters:
+
    +
  • i1 – The left operand.

  • +
  • i2 – The right operand.

  • +
+
+
Returns:
+

The KAST term `_==Int_`(i1, i2).

+
+
+
+ +
+
+euclidDivInt(i1: KInner, i2: KInner) KApply[source]
+

Instantiate the KAST term `_divInt_`(i1, i2).

+
+
Parameters:
+
    +
  • i1 – The dividend.

  • +
  • i2 – The divisor.

  • +
+
+
Returns:
+

The KAST term `_divInt_`(i1, i2).

+
+
+
+ +
+
+euclidModInt(i1: KInner, i2: KInner) KApply[source]
+

Instantiate the KAST term `_modInt_`(i1, i2).

+
+
Parameters:
+
    +
  • i1 – The dividend.

  • +
  • i2 – The divisor.

  • +
+
+
Returns:
+

The KAST term `_modInt_`(i1, i2).

+
+
+
+ +
+
+expInt(i1: KInner, i2: KInner) KApply[source]
+

Instantiate the KAST term `_^Int_`(i1, i2).

+
+
Parameters:
+
    +
  • i1 – The base.

  • +
  • i2 – The exponent.

  • +
+
+
Returns:
+

The KAST term `_^Int_`(i1, i2).

+
+
+
+ +
+
+expModInt(i1: KInner, i2: KInner, i3: KInner) KApply[source]
+

Instantiate the KAST term `_^%Int__`(i1, i2, i3).

+
+
Parameters:
+
    +
  • i1 – The dividend.

  • +
  • i2 – The divisior.

  • +
  • i3 – The modulus.

  • +
+
+
Returns:
+

The KAST term `_^%Int__`(i1, i2, i3).

+
+
+
+ +
+
+geInt(i1: KInner, i2: KInner) KApply[source]
+

Instantiate the KAST term `_>=Int_`(i1, i2).

+
+
Parameters:
+
    +
  • i1 – The left operand.

  • +
  • i2 – The right operand.

  • +
+
+
Returns:
+

The KAST term `_>=Int_`(i1, i2).

+
+
+
+ +
+
+gtInt(i1: KInner, i2: KInner) KApply[source]
+

Instantiate the KAST term `_>Int_`(i1, i2).

+
+
Parameters:
+
    +
  • i1 – The left operand.

  • +
  • i2 – The right operand.

  • +
+
+
Returns:
+

The KAST term `_>Int_`(i1, i2).

+
+
+
+ +
+
+intToken(i: int) KToken[source]
+

Instantiate the KAST term #token(i, "Int").

+
+
Parameters:
+

i – The integer literal.

+
+
Returns:
+

The KAST term #token(i, "Int").

+
+
+
+ +
+
+leInt(i1: KInner, i2: KInner) KApply[source]
+

Instantiate the KAST term `_<=Int_`(i1, i2).

+
+
Parameters:
+
    +
  • i1 – The left operand.

  • +
  • i2 – The right operand.

  • +
+
+
Returns:
+

The KAST term `_<=Int_`(i1, i2).

+
+
+
+ +
+
+lshiftInt(i1: KInner, i2: KInner) KApply[source]
+

Instantiate the KAST term `_<<Int_`(i1, i2).

+
+
Parameters:
+
    +
  • i1 – The left operand.

  • +
  • i2 – The right operand.

  • +
+
+
Returns:
+

The KAST term `_<<Int_`(i1, i2).

+
+
+
+ +
+
+ltInt(i1: KInner, i2: KInner) KApply[source]
+

Instantiate the KAST term `_<Int_`(i1, i2).

+
+
Parameters:
+
    +
  • i1 – The left operand.

  • +
  • i2 – The right operand.

  • +
+
+
Returns:
+

The KAST term `_<Int_`(i1, i2).

+
+
+
+ +
+
+maxInt(i1: KInner, i2: KInner) KApply[source]
+

Instantiate the KAST term `maxInt`(i1, i2).

+
+
Parameters:
+
    +
  • i1 – The left operand.

  • +
  • i2 – The right operand.

  • +
+
+
Returns:
+

The KAST term `maxInt`(i1, i2).

+
+
+
+ +
+
+minInt(i1: KInner, i2: KInner) KApply[source]
+

Instantiate the KAST term `minInt`(i1, i2).

+
+
Parameters:
+
    +
  • i1 – The left operand.

  • +
  • i2 – The right operand.

  • +
+
+
Returns:
+

The KAST term `minInt`(i1, i2).

+
+
+
+ +
+
+modInt(i1: KInner, i2: KInner) KApply[source]
+

Instantiate the KAST term `_%Int_`(i1, i2).

+
+
Parameters:
+
    +
  • i1 – The dividend.

  • +
  • i2 – The divisor.

  • +
+
+
Returns:
+

The KAST term `_%Int_`(i1, i2).

+
+
+
+ +
+
+mulInt(i1: KInner, i2: KInner) KApply[source]
+

Instantiate the KAST term `_*Int_`(i1, i2).

+
+
Parameters:
+
    +
  • i1 – The left operand.

  • +
  • i2 – The right operand.

  • +
+
+
Returns:
+

The KAST term `_*Int_`(i1, i2).

+
+
+
+ +
+
+neqInt(i1: KInner, i2: KInner) KApply[source]
+

Instantiate the KAST term `_=/=Int_`(i1, i2).

+
+
Parameters:
+
    +
  • i1 – The left operand.

  • +
  • i2 – The right operand.

  • +
+
+
Returns:
+

The KAST term `_=/=Int_`(i1, i2).

+
+
+
+ +
+
+notInt(i: KInner) KApply[source]
+

Instantiate the KAST term `~Int_`(i).

+
+
Parameters:
+

i – The integer operand.

+
+
Returns:
+

The KAST term `Int_`(i).

+
+
+
+ +
+
+orInt(i1: KInner, i2: KInner) KApply[source]
+

Instantiate the KAST term `_|Int_`(i1, i2).

+
+
Parameters:
+
    +
  • i1 – The left operand.

  • +
  • i2 – The right operand.

  • +
+
+
Returns:
+

The KAST term `_|Int_`(i1, i2).

+
+
+
+ +
+
+rshiftInt(i1: KInner, i2: KInner) KApply[source]
+

Instantiate the KAST term `_>>Int_`(i1, i2).

+
+
Parameters:
+
    +
  • i1 – The left operand.

  • +
  • i2 – The right operand.

  • +
+
+
Returns:
+

The KAST term `_>>Int_`(i1, i2).

+
+
+
+ +
+
+subInt(i1: KInner, i2: KInner) KApply[source]
+

Instantiate the KAST term `_-Int_`(i1, i2).

+
+
Parameters:
+
    +
  • i1 – The left operand.

  • +
  • i2 – The right operand.

  • +
+
+
Returns:
+

The KAST term `_-Int_`(i1, i2).

+
+
+
+ +
+
+xorInt(i1: KInner, i2: KInner) KApply[source]
+

Instantiate the KAST term `_xorInt_`(i1, i2).

+
+
Parameters:
+
    +
  • i1 – The left operand.

  • +
  • i2 – The right operand.

  • +
+
+
Returns:
+

The KAST term `_xorInt_`(i1, i2).

+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.prelude.ml.html b/pyk/api/pyk.prelude.ml.html new file mode 100644 index 00000000000..0112fe0380a --- /dev/null +++ b/pyk/api/pyk.prelude.ml.html @@ -0,0 +1,200 @@ + + + + + + + + + pyk.prelude.ml module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.prelude.ml module

+
+
+is_bottom(term: KInner, *, weak: bool = False) bool[source]
+
+ +
+
+is_top(term: KInner, *, weak: bool = False) bool[source]
+
+ +
+
+mlAnd(conjuncts: Iterable[KInner], sort: str | KSort = KSort(name='GeneratedTopCell')) KInner[source]
+
+ +
+
+mlBottom(sort: str | KSort = KSort(name='GeneratedTopCell')) KApply[source]
+
+ +
+
+mlCeil(term: KInner, arg_sort: str | KSort = KSort(name='GeneratedTopCell'), sort: str | KSort = KSort(name='GeneratedTopCell')) KApply[source]
+
+ +
+
+mlEquals(term1: KInner, term2: KInner, arg_sort: str | KSort = KSort(name='K'), sort: str | KSort = KSort(name='GeneratedTopCell')) KApply[source]
+
+ +
+
+mlEqualsFalse(term: KInner, sort: str | KSort = KSort(name='GeneratedTopCell')) KApply[source]
+
+ +
+
+mlEqualsTrue(term: KInner, sort: str | KSort = KSort(name='GeneratedTopCell')) KApply[source]
+
+ +
+
+mlExists(var: KVariable, body: KInner, sort1: str | KSort = KSort(name='KItem'), sort2: str | KSort = KSort(name='GeneratedTopCell')) KApply[source]
+
+ +
+
+mlImplies(antecedent: KInner, consequent: KInner, sort: str | KSort = KSort(name='GeneratedTopCell')) KApply[source]
+
+ +
+
+mlNot(term: KInner, sort: str | KSort = KSort(name='GeneratedTopCell')) KApply[source]
+
+ +
+
+mlOr(disjuncts: Iterable[KInner], sort: str | KSort = KSort(name='GeneratedTopCell')) KInner[source]
+
+ +
+
+mlTop(sort: str | KSort = KSort(name='GeneratedTopCell')) KApply[source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.prelude.string.html b/pyk/api/pyk.prelude.string.html new file mode 100644 index 00000000000..80794875b03 --- /dev/null +++ b/pyk/api/pyk.prelude.string.html @@ -0,0 +1,145 @@ + + + + + + + + + pyk.prelude.string module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.prelude.string module

+
+
+pretty_string(token: KToken) str[source]
+
+ +
+
+stringToken(pretty: str) KToken[source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.prelude.utils.html b/pyk/api/pyk.prelude.utils.html new file mode 100644 index 00000000000..3054dbd2fdf --- /dev/null +++ b/pyk/api/pyk.prelude.utils.html @@ -0,0 +1,140 @@ + + + + + + + + + pyk.prelude.utils module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.prelude.utils module

+
+
+token(x: bool | int | str | bytes) KToken[source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.proof.html b/pyk/api/pyk.proof.html new file mode 100644 index 00000000000..51b8bec44e3 --- /dev/null +++ b/pyk/api/pyk.proof.html @@ -0,0 +1,460 @@ + + + + + + + + + pyk.proof package — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.proof package

+
+

Submodules

+
+ +
+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.proof.implies.html b/pyk/api/pyk.proof.implies.html new file mode 100644 index 00000000000..717ecd9a1c3 --- /dev/null +++ b/pyk/api/pyk.proof.implies.html @@ -0,0 +1,453 @@ + + + + + + + + + pyk.proof.implies module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.proof.implies module

+
+
+class EqualityProof(id: str, lhs_body: KInner, rhs_body: KInner, sort: KSort, constraints: Iterable[KInner] = (), simplified_constraints: KInner | None = None, simplified_equality: KInner | None = None, csubst: CSubst | None = None, proof_dir: Path | None = None, subproof_ids: Iterable[str] = (), admitted: bool = False)[source]
+

Bases: ImpliesProof

+
+
+property constraint: KInner
+
+ +
+
+property constraints: list[KInner]
+
+ +
+
+property dict: dict[str, Any]
+
+ +
+
+property equality: KApply
+
+ +
+
+static from_claim(claim: KClaim, defn: KDefinition, proof_dir: Path | None = None) EqualityProof[source]
+
+ +
+
+classmethod from_dict(dct: Mapping[str, Any], proof_dir: Path | None = None) EqualityProof[source]
+
+ +
+
+property lhs_body: KInner
+
+ +
+
+pretty(kprint: KPrint) Iterable[str][source]
+
+ +
+
+static read_proof_data(proof_dir: Path, id: str) EqualityProof[source]
+
+ +
+
+property rhs_body: KInner
+
+ +
+
+property simplified_constraints: KInner | None
+
+ +
+
+property simplified_equality: KInner | None
+
+ +
+
+property sort: KSort
+
+ +
+
+property summary: EqualitySummary
+
+ +
+ +
+
+class EqualitySummary(id: 'str', status: 'ProofStatus', admitted: 'bool')[source]
+

Bases: ProofSummary

+
+
+admitted: bool
+
+ +
+
+id: str
+
+ +
+
+property lines: list[str]
+
+ +
+
+status: ProofStatus
+
+ +
+ +
+
+class ImpliesProof(id: str, antecedent: KInner, consequent: KInner, bind_universally: bool = False, simplified_antecedent: KInner | None = None, simplified_consequent: KInner | None = None, csubst: CSubst | None = None, proof_dir: Path | None = None, subproof_ids: Iterable[str] = (), admitted: bool = False)[source]
+

Bases: Proof[ImpliesProofStep, ImpliesProofResult]

+
+
+antecedent: KInner
+
+ +
+
+bind_universally: bool
+
+ +
+
+property can_progress: bool
+
+ +
+
+commit(result: ImpliesProofResult) None[source]
+
+ +
+
+consequent: KInner
+
+ +
+
+csubst: CSubst | None
+
+ +
+
+property dict: dict[str, Any]
+
+ +
+
+classmethod from_dict(dct: Mapping[str, Any], proof_dir: Path | None = None) ImpliesProof[source]
+
+ +
+
+get_steps() list[ImpliesProofStep][source]
+
+ +
+
+property own_status: ProofStatus
+
+ +
+
+simplified_antecedent: KInner | None
+
+ +
+
+simplified_consequent: KInner | None
+
+ +
+
+write_proof_data(subproofs: bool = False) None[source]
+
+ +
+ +
+
+class ImpliesProofResult(csubst: 'CSubst | None', simplified_antecedent: 'KInner | None', simplified_consequent: 'KInner | None')[source]
+

Bases: object

+
+
+csubst: CSubst | None
+
+ +
+
+simplified_antecedent: KInner | None
+
+ +
+
+simplified_consequent: KInner | None
+
+ +
+ +
+
+class ImpliesProofStep(proof: 'ImpliesProof')[source]
+

Bases: object

+
+
+proof: ImpliesProof
+
+ +
+ +
+
+class ImpliesProver(proof: ImpliesProof, kcfg_explore: KCFGExplore, assume_defined: bool = False)[source]
+

Bases: Prover[ImpliesProof, ImpliesProofStep, ImpliesProofResult]

+
+
+assume_defined: bool
+
+ +
+
+close() None[source]
+
+ +
+
+failure_info(proof: ImpliesProof) FailureInfo[source]
+
+ +
+
+init_proof(proof: ImpliesProof) None[source]
+
+ +
+
+kcfg_explore: KCFGExplore
+
+ +
+
+proof: ImpliesProof
+
+ +
+
+step_proof(step: ImpliesProofStep) list[ImpliesProofResult][source]
+
+ +
+ +
+
+class RefutationProof(id: str, pre_constraints: Iterable[KInner], last_constraint: KInner, simplified_antecedent: KInner | None = None, simplified_consequent: KInner | None = None, csubst: CSubst | None = None, proof_dir: Path | None = None, subproof_ids: Iterable[str] = (), admitted: bool = False)[source]
+

Bases: ImpliesProof

+
+
+property dict: dict[str, Any]
+
+ +
+
+classmethod from_dict(dct: Mapping[str, Any], proof_dir: Path | None = None) RefutationProof[source]
+
+ +
+
+property last_constraint: KInner
+
+ +
+
+property pre_constraints: list[KInner]
+
+ +
+
+pretty(kprint: KPrint) Iterable[str][source]
+
+ +
+
+static read_proof_data(proof_dir: Path, id: str) RefutationProof[source]
+
+ +
+
+property simplified_constraints: KInner | None
+
+ +
+
+property summary: RefutationSummary
+
+ +
+
+to_claim(claim_id: str) tuple[KClaim, Subst][source]
+
+ +
+ +
+
+class RefutationSummary(id: 'str', status: 'ProofStatus')[source]
+

Bases: ProofSummary

+
+
+id: str
+
+ +
+
+property lines: list[str]
+
+ +
+
+status: ProofStatus
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.proof.proof.html b/pyk/api/pyk.proof.proof.html new file mode 100644 index 00000000000..c819ebc2721 --- /dev/null +++ b/pyk/api/pyk.proof.proof.html @@ -0,0 +1,481 @@ + + + + + + + + + pyk.proof.proof module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.proof.proof module

+
+
+class CompositeSummary(_summaries: 'Iterable[ProofSummary]')[source]
+

Bases: ProofSummary

+
+
+property lines: list[str]
+
+ +
+
+summaries: tuple[ProofSummary, ...]
+
+ +
+ +
+
+class FailureInfo[source]
+

Bases: object

+
+ +
+
+class Proof(id: str, proof_dir: Path | None = None, subproof_ids: Iterable[str] = (), admitted: bool = False)[source]
+

Bases: Generic[PS, SR]

+

Abstract representation of a proof that can be executed in one or more discrete steps.

+

Generic type variables:

+
    +
  • PS: Proof step: data required to perform a step of the proof.

  • +
  • SR: Step result: data produced by executing a PS with Prover.step_proof used to update the Proof.

  • +
+
+
+add_subproof(proof: Proof) None[source]
+
+ +
+
+admit() None[source]
+
+ +
+
+admitted: bool
+
+ +
+
+abstract property can_progress: bool
+
+ +
+
+abstract commit(result: SR) None[source]
+

Apply the step result of type SR to self, modifying self.

+
+ +
+
+property dict: dict[str, Any]
+
+ +
+
+property digest: str
+
+ +
+
+property failed: bool
+
+ +
+
+failure_info: FailureInfo | None
+
+ +
+
+fetch_subproof(proof_id: str, force_reread: bool = False, uptodate_check_method: str = 'timestamp') Proof[source]
+

Get a subproof, re-reading from disk if it’s not up-to-date.

+
+ +
+
+fetch_subproof_data(proof_id: str, force_reread: bool = False, uptodate_check_method: str = 'timestamp') Proof[source]
+

Get a subproof, re-reading from disk if it’s not up-to-date.

+
+ +
+
+abstract classmethod from_dict(dct: Mapping[str, Any], proof_dir: Path | None = None) Proof[source]
+
+ +
+
+abstract get_steps() Iterable[PS][source]
+

Return all currently available steps associated with this Proof. Should not modify self.

+
+ +
+
+id: str
+
+ +
+
+property json: str
+
+ +
+
+property one_line_summary: str
+
+ +
+
+abstract property own_status: ProofStatus
+
+ +
+
+property passed: bool
+
+ +
+
+static proof_data_exists(id: str, proof_dir: Path) bool[source]
+
+ +
+
+proof_dir: Path | None
+
+ +
+
+static proof_exists(id: str, proof_dir: Path) bool[source]
+
+ +
+
+property proof_subdir: Path | None
+
+ +
+
+classmethod read_proof(id: str, proof_dir: Path) Proof[source]
+
+ +
+
+static read_proof_data(proof_dir: Path, id: str) Proof[source]
+
+ +
+
+read_subproof(proof_id: str) None[source]
+
+ +
+
+read_subproof_data(proof_id: str) None[source]
+
+ +
+
+remove_subproof(proof_id: str) None[source]
+
+ +
+
+property status: ProofStatus
+
+ +
+
+property subproof_ids: list[str]
+
+ +
+
+property subproofs: Iterable[Proof]
+

Return the subproofs, re-reading from disk the ones that changed.

+
+ +
+
+property subproofs_status: ProofStatus
+
+ +
+
+property summary: ProofSummary
+
+ +
+
+property up_to_date: bool
+

Check that the proof’s representation on disk is up-to-date.

+
+ +
+
+write_proof(subproofs: bool = False) None[source]
+
+ +
+
+abstract write_proof_data() None[source]
+
+ +
+ +
+
+class ProofStatus(value)[source]
+

Bases: Enum

+

An enumeration.

+
+
+FAILED = 'failed'
+
+ +
+
+PASSED = 'passed'
+
+ +
+
+PENDING = 'pending'
+
+ +
+ +
+
+class ProofSummary[source]
+

Bases: ABC

+
+
+id: str
+
+ +
+
+abstract property lines: list[str]
+
+ +
+
+status: ProofStatus
+
+ +
+ +
+
+class Prover[source]
+

Bases: ContextManager[Prover], Generic[P, PS, SR]

+

Abstract class which advances Proof`s with `init_proof() and step_proof().

+

Generic type variables:

+
    +
  • P: Type of proof this Prover operates on.

  • +
  • PS: Proof step: data required to perform a step of the proof.

  • +
  • SR: Step result: data produced by executing a PS with Prover.step_proof used to update the Proof.

  • +
+
+
+advance_proof(proof: P, max_iterations: int | None = None, fail_fast: bool = False, callback: Callable[[P], None] = <function Prover.<lambda>>, maintenance_rate: int = 1) None[source]
+

Advance a proof.

+

Performs loop Proof.get_steps() -> Prover.step_proof() -> Proof.commit().

+
+
Parameters:
+
    +
  • proof – proof to advance.

  • +
  • max_iterations (optional) – Maximum number of steps to take.

  • +
  • fail_fast – If the proof is failing after finishing a step, +halt execution even if there are still available steps.

  • +
  • callback – Callable to run in between each completed step, useful for getting real-time information about the proof.

  • +
  • maintenance_rate – Number of iterations between proof maintenance (writing to disk and executing callback).

  • +
+
+
+
+ +
+
+abstract close() None[source]
+
+ +
+
+abstract failure_info(proof: P) FailureInfo[source]
+
+ +
+
+abstract init_proof(proof: P) None[source]
+

Perform any initialization steps needed at the beginning of proof execution.

+

For example, for APRProver, upload circularity and depends module of the proof +to the KoreServer via add_module.

+
+ +
+
+abstract step_proof(step: PS) Iterable[SR][source]
+

Do the work associated with a PS, a proof step.

+

Should not modify a Proof or self, but may read from self as long as +those fields are not being modified during step_proof(), get_steps(), and commit().

+
+ +
+ +
+
+parallel_advance_proof(proof: P, create_prover: Callable[[], Prover[P, PS, SR]], max_iterations: int | None = None, fail_fast: bool = False, max_workers: int = 1, callback: Callable[[P], None] = <function <lambda>>, maintenance_rate: int = 1) None[source]
+

Advance proof with multithreaded strategy.

+

Prover.step_proof() to a worker thread pool for each step as available, +and Proof.commit() results as they become available, +and get new steps with Proof.get_steps() and submit to thread pool.

+

Generic type variables:

+
    +
  • P: Type of proof to be advanced in parallel.

  • +
  • PS: Proof step: data required to perform a step of the proof.

  • +
  • SR: Step result: data produced by executing a PS with Prover.step_proof used to update the Proof.

  • +
+
+
Parameters:
+
    +
  • proof – The proof to advance.

  • +
  • create_prover – Function which creates a new Prover. These provers must not reference any shared +data to be written during parallel_advance_proof, to avoid race conditions.

  • +
  • max_iterations – Maximum number of steps to take.

  • +
  • fail_fast – If the proof is failing after finishing a step, +halt execution even if there are still available steps.

  • +
  • max_workers – Maximum number of worker threads the pool can spawn.

  • +
  • callback – Callable to run during proof maintenance, useful for getting real-time information about the proof.

  • +
  • maintenance_rate – Number of iterations between proof maintenance (writing to disk and executing callback).

  • +
+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.proof.reachability.html b/pyk/api/pyk.proof.reachability.html new file mode 100644 index 00000000000..1d4d78cb0c3 --- /dev/null +++ b/pyk/api/pyk.proof.reachability.html @@ -0,0 +1,738 @@ + + + + + + + + + pyk.proof.reachability module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.proof.reachability module

+
+
+class APRFailureInfo(failing_nodes: 'Iterable[int]', pending_nodes: 'Iterable[int]', path_conditions: 'Mapping[int, str]', failure_reasons: 'Mapping[int, str]', models: 'Mapping[int, Iterable[tuple[str, str]]]')[source]
+

Bases: FailureInfo

+
+
+failing_nodes: frozenset[int]
+
+ +
+
+failure_reasons: FrozenDict[int, str]
+
+ +
+
+static from_proof(proof: APRProof, kcfg_explore: KCFGExplore, counterexample_info: bool = False, assume_defined: bool = False) APRFailureInfo[source]
+
+ +
+
+models: FrozenDict[int, frozenset[tuple[str, str]]]
+
+ +
+
+path_conditions: FrozenDict[int, str]
+
+ +
+
+pending_nodes: frozenset[int]
+
+ +
+
+print() list[str][source]
+
+ +
+ +
+
+class APRProof(id: str, kcfg: KCFG, terminal: Iterable[int], init: NodeIdLike, target: NodeIdLike, logs: dict[int, tuple[LogEntry, ...]], bmc_depth: int | None = None, bounded: Iterable[int] | None = None, proof_dir: Path | None = None, node_refutations: dict[int, str] | None = None, subproof_ids: Iterable[str] = (), circularity: bool = False, admitted: bool = False, _exec_time: float = 0, error_info: Exception | None = None, prior_loops_cache: dict[int, Iterable[int]] | None = None)[source]
+

Bases: Proof[APRProofStep, APRProofResult], KCFGExploration

+

Represent an all-path reachability proof.

+

APRProof and APRProver implement all-path reachability logic, +as introduced by A. Stefanescu and others in their paper ‘All-Path Reachability Logic’: +https://doi.org/10.23638/LMCS-15(2:5)2019

+

Note that reachability logic formula phi =>A psi has not the same meaning +as CTL/CTL*’s phi -> AF psi, since reachability logic ignores infinite traces. +This implementation extends the above with bounded model checking, allowing the user +to specify an optional loop iteration bound for each loop in execution.

+
+
+add_bounded(nid: NodeIdLike) None[source]
+
+ +
+
+add_exec_time(exec_time: float) None[source]
+
+ +
+
+as_rule(priority: int = 20) KRule[source]
+
+ +
+
+as_rules(priority: int = 20, direct_rule: bool = False) list[KRule][source]
+
+ +
+
+bmc_depth: int | None
+
+ +
+
+property bounded: list[Node]
+
+ +
+
+property can_progress: bool
+
+ +
+
+property circularities_module_name: str
+
+ +
+
+circularity: bool
+
+ +
+
+commit(result: APRProofResult) None[source]
+
+ +
+
+construct_node_refutation(node: Node) RefutationProof | None[source]
+
+ +
+
+property dependencies_module_name: str
+
+ +
+
+property dict: dict[str, Any]
+
+ +
+
+error_info: Exception | None
+
+ +
+
+property exec_time: float
+
+ +
+
+property failing: list[Node]
+
+ +
+
+formatted_exec_time() str[source]
+
+ +
+
+static from_claim(defn: KDefinition, claim: KClaim, logs: dict[int, tuple[LogEntry, ...]], proof_dir: Path | None = None, bmc_depth: int | None = None, **kwargs: Any) APRProof[source]
+
+ +
+
+classmethod from_dict(dct: Mapping[str, Any], proof_dir: Path | None = None) APRProof[source]
+
+ +
+
+static from_spec_modules(defn: KDefinition, spec_modules: KFlatModuleList, logs: dict[int, tuple[LogEntry, ...]], proof_dir: Path | None = None, spec_labels: Iterable[str] | None = None) list[APRProof][source]
+
+ +
+
+get_refutation_id(node_id: int) str[source]
+
+ +
+
+get_steps() list[APRProofStep][source]
+
+ +
+
+init: int
+
+ +
+
+is_bounded(node_id: NodeIdLike) bool[source]
+
+ +
+
+is_failing(node_id: NodeIdLike) bool[source]
+
+ +
+
+is_init(node_id: NodeIdLike) bool[source]
+
+ +
+
+is_pending(node_id: NodeIdLike) bool[source]
+
+ +
+
+is_refuted(node_id: NodeIdLike) bool[source]
+
+ +
+
+is_target(node_id: NodeIdLike) bool[source]
+
+ +
+
+logs: dict[int, tuple[LogEntry, ...]]
+
+ +
+
+property module_name: str
+
+ +
+
+node_refutations: dict[int, RefutationProof]
+
+ +
+
+nonzero_depth(node: Node) bool[source]
+
+ +
+
+property one_line_summary: str
+
+ +
+
+property own_status: ProofStatus
+
+ +
+
+path_constraints(final_node_id: NodeIdLike, sort_with: KDefinition | None = None) KInner[source]
+
+ +
+
+property pending: list[Node]
+
+ +
+
+prior_loops_cache: dict[int, tuple[int, ...]]
+
+ +
+
+prune(node_id: NodeIdLike, keep_nodes: Iterable[NodeIdLike] = ()) list[int][source]
+
+ +
+
+static read_proof(id: str, proof_dir: Path) APRProof[source]
+
+ +
+
+static read_proof_data(proof_dir: Path, id: str) APRProof[source]
+
+ +
+
+refute_node(node: Node) RefutationProof | None[source]
+
+ +
+
+property rule_id: str
+
+ +
+
+set_exec_time(exec_time: float) None[source]
+
+ +
+
+shortest_path_to(node_id: NodeIdLike) tuple[KCFG.Successor, ...][source]
+
+ +
+
+property summary: CompositeSummary
+
+ +
+
+target: int
+
+ +
+
+unrefute_node(node: Node) None[source]
+
+ +
+
+write_proof_data() None[source]
+
+ +
+ +
+
+class APRProofBoundedResult(node_id: 'int', prior_loops_cache_update: 'tuple[int, ...]', optimize_kcfg: 'bool')[source]
+

Bases: APRProofResult

+
+ +
+
+class APRProofExtendAndCacheResult(node_id: int, prior_loops_cache_update: tuple[int, ...], optimize_kcfg: bool, extension_to_apply: KCFGExtendResult, extension_to_cache: KCFGExtendResult)[source]
+

Bases: APRProofExtendResult

+

Proof extension to be cached.

+
+
+extension_to_cache: KCFGExtendResult
+
+ +
+ +
+
+class APRProofExtendResult(node_id: int, prior_loops_cache_update: tuple[int, ...], optimize_kcfg: bool, extension_to_apply: KCFGExtendResult)[source]
+

Bases: APRProofResult

+

Proof extension to be applied.

+
+
+extension_to_apply: KCFGExtendResult
+
+ +
+ +
+
+class APRProofResult(node_id: 'int', prior_loops_cache_update: 'tuple[int, ...]', optimize_kcfg: 'bool')[source]
+

Bases: object

+
+
+node_id: int
+
+ +
+
+optimize_kcfg: bool
+
+ +
+
+prior_loops_cache_update: tuple[int, ...]
+
+ +
+ +
+
+class APRProofStep(node: 'KCFG.Node', target: 'KCFG.Node', proof_id: 'str', bmc_depth: 'int | None', use_cache: 'NodeIdLike | None', module_name: 'str', shortest_path_to_node: 'tuple[KCFG.Node, ...]', prior_loops_cache: 'FrozenDict[int, tuple[int, ...]]', circularity: 'bool', nonzero_depth: 'bool', circularity_rule_id: 'str')[source]
+

Bases: object

+
+
+bmc_depth: int | None
+
+ +
+
+circularity: bool
+
+ +
+
+circularity_rule_id: str
+
+ +
+
+module_name: str
+
+ +
+
+node: KCFG.Node
+
+ +
+
+nonzero_depth: bool
+
+ +
+
+prior_loops_cache: FrozenDict[int, tuple[int, ...]]
+
+ +
+
+proof_id: str
+
+ +
+
+shortest_path_to_node: tuple[KCFG.Node, ...]
+
+ +
+
+target: KCFG.Node
+
+ +
+
+use_cache: NodeIdLike | None
+
+ +
+ +
+
+class APRProofSubsumeResult(node_id: 'int', prior_loops_cache_update: 'tuple[int, ...]', optimize_kcfg: 'bool', csubst: 'CSubst')[source]
+

Bases: APRProofResult

+
+
+csubst: CSubst
+
+ +
+ +
+
+class APRProofTerminalResult(node_id: 'int', prior_loops_cache_update: 'tuple[int, ...]', optimize_kcfg: 'bool')[source]
+

Bases: APRProofResult

+
+ +
+
+class APRProofUseCacheResult(node_id: int, prior_loops_cache_update: tuple[int, ...], optimize_kcfg: bool, cached_node_id: NodeIdLike)[source]
+

Bases: APRProofResult

+

Proof extension to be applied using the extension cache.

+
+
+cached_node_id: NodeIdLike
+
+ +
+ +
+
+class APRProver(kcfg_explore: KCFGExplore, execute_depth: int | None = None, cut_point_rules: Iterable[str] = (), terminal_rules: Iterable[str] = (), counterexample_info: bool = False, fast_check_subsumption: bool = False, direct_subproof_rules: bool = False, assume_defined: bool = False, extra_module: KFlatModule | None = None, optimize_kcfg: bool = False)[source]
+

Bases: Prover[APRProof, APRProofStep, APRProofResult]

+
+
+assume_defined: bool
+
+ +
+
+close() None[source]
+
+ +
+
+counterexample_info: bool
+
+ +
+
+cut_point_rules: Iterable[str]
+
+ +
+
+direct_subproof_rules: bool
+
+ +
+
+execute_depth: int | None
+
+ +
+
+extra_module: KFlatModule | None
+
+ +
+
+failure_info(proof: APRProof) FailureInfo[source]
+
+ +
+
+fast_check_subsumption: bool
+
+ +
+
+init_proof(proof: APRProof) None[source]
+
+ +
+
+kcfg_explore: KCFGExplore
+
+ +
+
+main_module_name: str
+
+ +
+
+optimize_kcfg: bool
+
+ +
+
+step_proof(step: APRProofStep) list[APRProofResult][source]
+
+ +
+
+terminal_rules: Iterable[str]
+
+ +
+ +
+
+class APRSummary(id: 'str', status: 'ProofStatus', admitted: 'bool', nodes: 'int', pending: 'int', failing: 'int', vacuous: 'int', stuck: 'int', terminal: 'int', refuted: 'int', bmc_depth: 'int | None', bounded: 'int', subproofs: 'int', formatted_exec_time: 'str')[source]
+

Bases: ProofSummary

+
+
+admitted: bool
+
+ +
+
+bmc_depth: int | None
+
+ +
+
+bounded: int
+
+ +
+
+failing: int
+
+ +
+
+formatted_exec_time: str
+
+ +
+
+id: str
+
+ +
+
+property lines: list[str]
+
+ +
+
+nodes: int
+
+ +
+
+pending: int
+
+ +
+
+refuted: int
+
+ +
+
+status: ProofStatus
+
+ +
+
+stuck: int
+
+ +
+
+subproofs: int
+
+ +
+
+terminal: int
+
+ +
+
+vacuous: int
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.proof.show.html b/pyk/api/pyk.proof.show.html new file mode 100644 index 00000000000..2bafbab816e --- /dev/null +++ b/pyk/api/pyk.proof.show.html @@ -0,0 +1,187 @@ + + + + + + + + + pyk.proof.show module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.proof.show module

+
+
+class APRProofNodePrinter(proof: APRProof, kprint: KPrint, full_printer: bool = False, minimize: bool = False)[source]
+

Bases: NodePrinter

+
+
+node_attrs(kcfg: KCFG, node: KCFG.Node) list[str][source]
+
+ +
+
+proof: APRProof
+
+ +
+ +
+
+class APRProofShow(kprint: KPrint, node_printer: NodePrinter | None = None)[source]
+

Bases: object

+
+
+dot(proof: APRProof) Digraph[source]
+
+ +
+
+dump(proof: APRProof, dump_dir: Path, dot: bool = False) None[source]
+
+ +
+
+kcfg_show: KCFGShow
+
+ +
+
+pretty(proof: APRProof, minimize: bool = True) Iterable[str][source]
+
+ +
+
+pretty_segments(proof: APRProof, minimize: bool = True) Iterable[tuple[str, Iterable[str]]][source]
+
+ +
+
+show(proof: APRProof, nodes: Iterable[NodeIdLike] = (), node_deltas: Iterable[tuple[NodeIdLike, NodeIdLike]] = (), to_module: bool = False, minimize: bool = True, sort_collections: bool = False, omit_cells: Iterable[str] = ()) list[str][source]
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.proof.tui.html b/pyk/api/pyk.proof.tui.html new file mode 100644 index 00000000000..36ddbf1b7b1 --- /dev/null +++ b/pyk/api/pyk.proof.tui.html @@ -0,0 +1,174 @@ + + + + + + + + + pyk.proof.tui module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.proof.tui module

+
+
+class APRProofBehaviorView(proof: APRProof, kprint: KPrint, minimize: bool = True, node_printer: NodePrinter | None = None, id: str = '')[source]
+

Bases: ScrollableContainer

+
+
+can_focus: bool = True
+

Widget may receive focus.

+
+ +
+
+can_focus_children: bool = True
+

Widget’s children may receive focus.

+
+ +
+
+compose() ComposeResult[source]
+
+ +
+ +
+
+class APRProofViewer(proof: APRProof, kprint: KPrint, node_printer: NodePrinter | None = None, custom_view: Callable[[KCFGElem], Iterable[str]] | None = None, minimize: bool = True)[source]
+

Bases: KCFGViewer

+
+
+compose() ComposeResult[source]
+
+ +
+
+on_mount() None[source]
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.testing.html b/pyk/api/pyk.testing.html new file mode 100644 index 00000000000..3ea2f9f244c --- /dev/null +++ b/pyk/api/pyk.testing.html @@ -0,0 +1,149 @@ + + + + + + + + + pyk.testing package — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.testing package

+
+

Submodules

+ +
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.testing.plugin.html b/pyk/api/pyk.testing.plugin.html new file mode 100644 index 00000000000..c42195a5d89 --- /dev/null +++ b/pyk/api/pyk.testing.plugin.html @@ -0,0 +1,160 @@ + + + + + + + + + pyk.testing.plugin module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.testing.plugin module

+
+
+bug_report(request: FixtureRequest, tmp_path: Path) BugReport | None[source]
+
+ +
+
+kompile(tmp_path_factory: TempPathFactory) Kompiler[source]
+
+ +
+
+profile(tmp_path: Path) Profiler[source]
+
+ +
+
+pytest_addoption(parser: Parser) None[source]
+
+ +
+
+use_server(request: FixtureRequest) UseServer[source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/api/pyk.utils.html b/pyk/api/pyk.utils.html new file mode 100644 index 00000000000..762daae3468 --- /dev/null +++ b/pyk/api/pyk.utils.html @@ -0,0 +1,364 @@ + + + + + + + + + pyk.utils module — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

pyk.utils module

+
+
+class BugReport(bug_report: Path)[source]
+

Bases: object

+
+
+add_command(args: Iterable[str]) None[source]
+
+ +
+
+add_file(finput: Path, arcname: Path) None[source]
+
+ +
+
+add_file_contents(input: str, arcname: Path) None[source]
+
+ +
+
+add_request(req_name: str) None[source]
+
+ +
+ +
+
+class Chainable(f: Callable[[P], R])[source]
+

Bases: Generic[P, R]

+
+ +
+
+class FrozenDict(*args: Any, **kwargs: Any)[source]
+

Bases: Mapping[K, V]

+
+ +
+
+final class POSet(relation: 'Iterable[tuple[H, H]]')[source]
+

Bases: Generic[H]

+
+
+image: FrozenDict[H, frozenset[H]]
+
+ +
+ +
+
+abs_or_rel_to(path: Path, base: Path) Path[source]
+
+ +
+
+add_indent(indent: str, lines: Iterable[str]) list[str][source]
+
+ +
+
+case(cases: Iterable[tuple[Callable[[P], bool], Callable[[P], R]]], default: Callable[[P], R] | None = None) Callable[[P], R][source]
+
+ +
+
+check_absolute_path(path: Path) None[source]
+
+ +
+
+check_dir_path(path: Path) None[source]
+
+ +
+
+check_file_path(path: Path) None[source]
+
+ +
+
+check_relative_path(path: Path) None[source]
+
+ +
+
+check_type(x: Any, typ: type[T]) T[source]
+
+ +
+
+compare_short_hashes(lhs: str, rhs: str) bool[source]
+
+ +
+
+deconstruct_short_hash(h: str) tuple[str, str][source]
+
+ +
+
+ensure_dir_path(path: str | Path) Path[source]
+
+ +
+
+exit_with_process_error(err: CalledProcessError) None[source]
+
+ +
+
+filter_none(mapping: Mapping[K, V]) dict[K, V][source]
+
+ +
+
+find_common_items(l1: Iterable[T], l2: Iterable[T]) tuple[list[T], list[T], list[T]][source]
+
+ +
+
+gen_file_timestamp(comment: str = '//') str[source]
+
+ +
+
+hash_file(file: Path, chunk_num_blocks: int = 128) str[source]
+
+ +
+
+hash_str(x: object) str[source]
+
+ +
+
+intersperse(iterable: Iterable[T], delimiter: T) Iterator[T][source]
+
+ +
+
+is_hash(x: Any) bool[source]
+
+ +
+
+is_hexstring(x: str) bool[source]
+
+ +
+
+maybe(f: Callable[[P], R]) Callable[[P | None], R | None][source]
+
+ +
+
+merge_with(f: Callable[[V, V], V], d1: Mapping[K, V], d2: Mapping[K, V]) dict[K, V][source]
+
+ +
+
+none(x: Any) None[source]
+
+ +
+
+nonempty_str(x: Any) str[source]
+
+ +
+
+not_none(x: T | None) T[source]
+
+ +
+
+partition(iterable: Iterable[T], pred: Callable[[T, T], bool]) list[list[T]][source]
+

Partition the iterable into sublists based on the given predicate.

+

predicate pred(_, _) should satisfy: +- pred(x, x) +- if pred(x, y) and pred(y, z) then pred(x, z); +- if pred(x, y) then pred(y, x);

+
+ +
+
+raised(f: Callable, *args: Any, **kwargs: Any) BaseException | None[source]
+
+ +
+
+repeat_last(iterable: Iterable[T]) Iterator[T][source]
+
+ +
+
+run_process(args: str | Iterable[str], *, check: bool = True, input: str | None = None, pipe_stdout: bool = True, pipe_stderr: bool = False, cwd: str | Path | None = None, env: Mapping[str, str] | None = None, logger: Logger | None = None, exec_process: bool = False) CompletedProcess[source]
+
+ +
+
+run_process_2(args: str | Iterable[str], *, input: str | None = None, write_stdout: bool = False, write_stderr: bool = False, cwd: str | Path | None = None, env: Mapping[str, str] | None = None, logger: Logger | None = None, loglevel: int | None = None, check: bool = True) CompletedProcess[source]
+
+ +
+
+shorten_hash(h: str, left_chars: int = 6, right_chars: int = 6) str[source]
+
+ +
+
+shorten_hashes(value: Any, left_chars: int = 6, right_chars: int = 6) Any[source]
+
+ +
+
+single(iterable: Iterable[T]) T[source]
+
+ +
+
+some(iterable: Iterable[T]) T | None[source]
+
+ +
+
+tuple_of() Callable[[tuple[()]], tuple[()]][source]
+
+tuple_of(f1: Callable[[P1], R1], /) Callable[[tuple[P1]], tuple[R1]]
+
+tuple_of(f1: Callable[[P1], R1], f2: Callable[[P2], R2], /) Callable[[tuple[P1, P2]], tuple[R1, R2]]
+
+tuple_of(f1: Callable[[P1], R1], f2: Callable[[P2], R2], f3: Callable[[P3], R3], /) Callable[[tuple[P1, P2, P3]], tuple[R1, R2, R3]]
+
+tuple_of(f1: Callable[[P1], R1], f2: Callable[[P2], R2], f3: Callable[[P3], R3], f4: Callable[[P4], R4], /) Callable[[tuple[P1, P2, P3, P4]], tuple[R1, R2, R3, R4]]
+
+ +
+
+unique(iterable: Iterable[H]) Iterator[H][source]
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/genindex.html b/pyk/genindex.html new file mode 100644 index 00000000000..960da834596 --- /dev/null +++ b/pyk/genindex.html @@ -0,0 +1,7585 @@ + + + + + + + + Index — pyk 7.1.191 documentation + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ + +

Index

+ +
+ _ + | A + | B + | C + | D + | E + | F + | G + | H + | I + | J + | K + | L + | M + | N + | O + | P + | Q + | R + | S + | T + | U + | V + | W + | X + | Y + | Z + +
+

_

+ + + +
+ +

A

+ + + +
+ +

B

+ + + +
+ +

C

+ + + +
+ +

D

+ + + +
+ +

E

+ + + +
+ +

F

+ + + +
+ +

G

+ + + +
+ +

H

+ + + +
+ +

I

+ + + +
+ +

J

+ + + +
+ +

K

+ + + +
+ +

L

+ + + +
+ +

M

+ + + +
+ +

N

+ + + +
+ +

O

+ + + +
+ +

P

+ + + +
+ +

Q

+ + +
+ +

R

+ + + +
+ +

S

+ + + +
+ +

T

+ + + +
+ +

U

+ + + +
+ +

V

+ + + +
+ +

W

+ + + +
+ +

X

+ + + +
+ +

Y

+ + + +
+ +

Z

+ + +
+ + + +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/index.html b/pyk/index.html new file mode 100644 index 00000000000..4f1c2624efc --- /dev/null +++ b/pyk/index.html @@ -0,0 +1,124 @@ + + + + + + + + + Welcome to pyk’s documentation! — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Welcome to pyk’s documentation!

+
+

Contents:

+ +
+
+
+

Indices and tables

+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/objects.inv b/pyk/objects.inv new file mode 100644 index 00000000000..fa7cc1c393c Binary files /dev/null and b/pyk/objects.inv differ diff --git a/pyk/py-modindex.html b/pyk/py-modindex.html new file mode 100644 index 00000000000..17aae5d8184 --- /dev/null +++ b/pyk/py-modindex.html @@ -0,0 +1,605 @@ + + + + + + + + Python Module Index — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ + +

Python Module Index

+ +
+ p +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 
+ p
+ pyk +
    + pyk.coverage +
    + pyk.cterm +
    + pyk.cterm.cterm +
    + pyk.cterm.symbolic +
    + pyk.dequote +
    + pyk.kast +
    + pyk.kast.att +
    + pyk.kast.color +
    + pyk.kast.formatter +
    + pyk.kast.inner +
    + pyk.kast.kast +
    + pyk.kast.lexer +
    + pyk.kast.manip +
    + pyk.kast.markdown +
    + pyk.kast.outer +
    + pyk.kast.outer_lexer +
    + pyk.kast.outer_parser +
    + pyk.kast.outer_syntax +
    + pyk.kast.parser +
    + pyk.kast.pretty +
    + pyk.kast.rewrite +
    + pyk.kast.utils +
    + pyk.kbuild +
    + pyk.kbuild.config +
    + pyk.kbuild.kbuild +
    + pyk.kbuild.project +
    + pyk.kbuild.utils +
    + pyk.kcfg +
    + pyk.kcfg.exploration +
    + pyk.kcfg.explore +
    + pyk.kcfg.kcfg +
    + pyk.kcfg.minimize +
    + pyk.kcfg.semantics +
    + pyk.kcfg.show +
    + pyk.kcfg.store +
    + pyk.kcfg.tui +
    + pyk.kcovr +
    + pyk.kdist +
    + pyk.kdist.api +
    + pyk.kdist.utils +
    + pyk.kllvm +
    + pyk.kllvm.ast +
    + pyk.kllvm.compiler +
    + pyk.kllvm.convert +
    + pyk.kllvm.hints +
    + pyk.kllvm.hints.prooftrace +
    + pyk.kllvm.importer +
    + pyk.kllvm.load +
    + pyk.kllvm.load_static +
    + pyk.kllvm.parser +
    + pyk.kllvm.runtime +
    + pyk.kllvm.utils +
    + pyk.konvert +
    + pyk.kore +
    + pyk.kore.kompiled +
    + pyk.kore.lexer +
    + pyk.kore.manip +
    + pyk.kore.match +
    + pyk.kore.parser +
    + pyk.kore.pool +
    + pyk.kore.prelude +
    + pyk.kore.rpc +
    + pyk.kore.rule +
    + pyk.kore.syntax +
    + pyk.kore.tools +
    + pyk.kore_exec_covr +
    + pyk.kore_exec_covr.kore_exec_covr +
    + pyk.krepl +
    + pyk.krepl.repl +
    + pyk.ktool +
    + pyk.ktool.claim_index +
    + pyk.ktool.claim_loader +
    + pyk.ktool.kfuzz +
    + pyk.ktool.kompile +
    + pyk.ktool.kprint +
    + pyk.ktool.kprove +
    + pyk.ktool.krun +
    + pyk.ktool.prove_rpc +
    + pyk.ktool.utils +
    + pyk.prelude +
    + pyk.prelude.bytes +
    + pyk.prelude.collections +
    + pyk.prelude.k +
    + pyk.prelude.kbool +
    + pyk.prelude.kint +
    + pyk.prelude.ml +
    + pyk.prelude.string +
    + pyk.prelude.utils +
    + pyk.proof +
    + pyk.proof.implies +
    + pyk.proof.proof +
    + pyk.proof.reachability +
    + pyk.proof.show +
    + pyk.proof.tui +
    + pyk.testing +
    + pyk.testing.plugin +
    + pyk.utils +
+ + +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/pyk/search.html b/pyk/search.html new file mode 100644 index 00000000000..ba282efeec5 --- /dev/null +++ b/pyk/search.html @@ -0,0 +1,120 @@ + + + + + + + + Search — pyk 7.1.191 documentation + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ + + + +
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2024, Runtime Verification, Inc.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/pyk/searchindex.js b/pyk/searchindex.js new file mode 100644 index 00000000000..85cbfd4a2e6 --- /dev/null +++ b/pyk/searchindex.js @@ -0,0 +1 @@ +Search.setIndex({"alltitles": {"Contents:": [[99, null]], "Indices and tables": [[99, "indices-and-tables"]], "Submodules": [[1, "submodules"], [3, "submodules"], [7, "submodules"], [24, "submodules"], [29, "submodules"], [39, "submodules"], [42, "submodules"], [46, "submodules"], [55, "submodules"], [67, "submodules"], [69, "submodules"], [71, "submodules"], [81, "submodules"], [90, "submodules"], [96, "submodules"]], "Subpackages": [[1, "subpackages"], [42, "subpackages"]], "Welcome to pyk\u2019s documentation!": [[99, "welcome-to-pyk-s-documentation"]], "pyk": [[0, "pyk"]], "pyk package": [[1, "module-pyk"]], "pyk.coverage module": [[2, "module-pyk.coverage"]], "pyk.cterm package": [[3, "module-pyk.cterm"]], "pyk.cterm.cterm module": [[4, "module-pyk.cterm.cterm"]], "pyk.cterm.symbolic module": [[5, "module-pyk.cterm.symbolic"]], "pyk.dequote module": [[6, "module-pyk.dequote"]], "pyk.kast package": [[7, "module-pyk.kast"]], "pyk.kast.att module": [[8, "module-pyk.kast.att"]], "pyk.kast.color module": [[9, "module-pyk.kast.color"]], "pyk.kast.formatter module": [[10, "module-pyk.kast.formatter"]], "pyk.kast.inner module": [[11, "module-pyk.kast.inner"]], "pyk.kast.kast module": [[12, "module-pyk.kast.kast"]], "pyk.kast.lexer module": [[13, "module-pyk.kast.lexer"]], "pyk.kast.manip module": [[14, "module-pyk.kast.manip"]], "pyk.kast.markdown module": [[15, "module-pyk.kast.markdown"]], "pyk.kast.outer module": [[16, "module-pyk.kast.outer"]], "pyk.kast.outer_lexer module": [[17, "module-pyk.kast.outer_lexer"]], "pyk.kast.outer_parser module": [[18, "module-pyk.kast.outer_parser"]], "pyk.kast.outer_syntax module": [[19, "module-pyk.kast.outer_syntax"]], "pyk.kast.parser module": [[20, "module-pyk.kast.parser"]], "pyk.kast.pretty module": [[21, "module-pyk.kast.pretty"]], "pyk.kast.rewrite module": [[22, "module-pyk.kast.rewrite"]], "pyk.kast.utils module": [[23, "module-pyk.kast.utils"]], "pyk.kbuild package": [[24, "module-pyk.kbuild"]], "pyk.kbuild.config module": [[25, "module-pyk.kbuild.config"]], "pyk.kbuild.kbuild module": [[26, "module-pyk.kbuild.kbuild"]], "pyk.kbuild.project module": [[27, "module-pyk.kbuild.project"]], "pyk.kbuild.utils module": [[28, "module-pyk.kbuild.utils"]], "pyk.kcfg package": [[29, "module-pyk.kcfg"]], "pyk.kcfg.exploration module": [[30, "module-pyk.kcfg.exploration"]], "pyk.kcfg.explore module": [[31, "module-pyk.kcfg.explore"]], "pyk.kcfg.kcfg module": [[32, "module-pyk.kcfg.kcfg"]], "pyk.kcfg.minimize module": [[33, "module-pyk.kcfg.minimize"]], "pyk.kcfg.semantics module": [[34, "module-pyk.kcfg.semantics"]], "pyk.kcfg.show module": [[35, "module-pyk.kcfg.show"]], "pyk.kcfg.store module": [[36, "module-pyk.kcfg.store"]], "pyk.kcfg.tui module": [[37, "module-pyk.kcfg.tui"]], "pyk.kcovr module": [[38, "module-pyk.kcovr"]], "pyk.kdist package": [[39, "module-pyk.kdist"]], "pyk.kdist.api module": [[40, "module-pyk.kdist.api"]], "pyk.kdist.utils module": [[41, "module-pyk.kdist.utils"]], "pyk.kllvm package": [[42, "module-pyk.kllvm"]], "pyk.kllvm.ast module": [[43, "module-pyk.kllvm.ast"]], "pyk.kllvm.compiler module": [[44, "module-pyk.kllvm.compiler"]], "pyk.kllvm.convert module": [[45, "module-pyk.kllvm.convert"]], "pyk.kllvm.hints package": [[46, "module-pyk.kllvm.hints"]], "pyk.kllvm.hints.prooftrace module": [[47, "module-pyk.kllvm.hints.prooftrace"]], "pyk.kllvm.importer module": [[48, "module-pyk.kllvm.importer"]], "pyk.kllvm.load module": [[49, "module-pyk.kllvm.load"]], "pyk.kllvm.load_static module": [[50, "module-pyk.kllvm.load_static"]], "pyk.kllvm.parser module": [[51, "module-pyk.kllvm.parser"]], "pyk.kllvm.runtime module": [[52, "module-pyk.kllvm.runtime"]], "pyk.kllvm.utils module": [[53, "module-pyk.kllvm.utils"]], "pyk.konvert package": [[54, "module-pyk.konvert"]], "pyk.kore package": [[55, "module-pyk.kore"]], "pyk.kore.kompiled module": [[56, "module-pyk.kore.kompiled"]], "pyk.kore.lexer module": [[57, "module-pyk.kore.lexer"]], "pyk.kore.manip module": [[58, "module-pyk.kore.manip"]], "pyk.kore.match module": [[59, "module-pyk.kore.match"]], "pyk.kore.parser module": [[60, "module-pyk.kore.parser"]], "pyk.kore.pool module": [[61, "module-pyk.kore.pool"]], "pyk.kore.prelude module": [[62, "module-pyk.kore.prelude"]], "pyk.kore.rpc module": [[63, "module-pyk.kore.rpc"]], "pyk.kore.rule module": [[64, "module-pyk.kore.rule"]], "pyk.kore.syntax module": [[65, "module-pyk.kore.syntax"]], "pyk.kore.tools module": [[66, "module-pyk.kore.tools"]], "pyk.kore_exec_covr package": [[67, "module-pyk.kore_exec_covr"]], "pyk.kore_exec_covr.kore_exec_covr module": [[68, "module-pyk.kore_exec_covr.kore_exec_covr"]], "pyk.krepl package": [[69, "module-pyk.krepl"]], "pyk.krepl.repl module": [[70, "module-pyk.krepl.repl"]], "pyk.ktool package": [[71, "module-pyk.ktool"]], "pyk.ktool.claim_index module": [[72, "module-pyk.ktool.claim_index"]], "pyk.ktool.claim_loader module": [[73, "module-pyk.ktool.claim_loader"]], "pyk.ktool.kfuzz module": [[74, "module-pyk.ktool.kfuzz"]], "pyk.ktool.kompile module": [[75, "module-pyk.ktool.kompile"]], "pyk.ktool.kprint module": [[76, "module-pyk.ktool.kprint"]], "pyk.ktool.kprove module": [[77, "module-pyk.ktool.kprove"]], "pyk.ktool.krun module": [[78, "module-pyk.ktool.krun"]], "pyk.ktool.prove_rpc module": [[79, "module-pyk.ktool.prove_rpc"]], "pyk.ktool.utils module": [[80, "module-pyk.ktool.utils"]], "pyk.prelude package": [[81, "module-pyk.prelude"]], "pyk.prelude.bytes module": [[82, "module-pyk.prelude.bytes"]], "pyk.prelude.collections module": [[83, "module-pyk.prelude.collections"]], "pyk.prelude.k module": [[84, "module-pyk.prelude.k"]], "pyk.prelude.kbool module": [[85, "module-pyk.prelude.kbool"]], "pyk.prelude.kint module": [[86, "module-pyk.prelude.kint"]], "pyk.prelude.ml module": [[87, "module-pyk.prelude.ml"]], "pyk.prelude.string module": [[88, "module-pyk.prelude.string"]], "pyk.prelude.utils module": [[89, "module-pyk.prelude.utils"]], "pyk.proof package": [[90, "module-pyk.proof"]], "pyk.proof.implies module": [[91, "module-pyk.proof.implies"]], "pyk.proof.proof module": [[92, "module-pyk.proof.proof"]], "pyk.proof.reachability module": [[93, "module-pyk.proof.reachability"]], "pyk.proof.show module": [[94, "module-pyk.proof.show"]], "pyk.proof.tui module": [[95, "module-pyk.proof.tui"]], "pyk.testing package": [[96, "module-pyk.testing"]], "pyk.testing.plugin module": [[97, "module-pyk.testing.plugin"]], "pyk.utils module": [[98, "module-pyk.utils"]]}, "docnames": ["api/modules", "api/pyk", "api/pyk.coverage", "api/pyk.cterm", "api/pyk.cterm.cterm", "api/pyk.cterm.symbolic", "api/pyk.dequote", "api/pyk.kast", "api/pyk.kast.att", "api/pyk.kast.color", "api/pyk.kast.formatter", "api/pyk.kast.inner", "api/pyk.kast.kast", "api/pyk.kast.lexer", "api/pyk.kast.manip", "api/pyk.kast.markdown", "api/pyk.kast.outer", "api/pyk.kast.outer_lexer", "api/pyk.kast.outer_parser", "api/pyk.kast.outer_syntax", "api/pyk.kast.parser", "api/pyk.kast.pretty", "api/pyk.kast.rewrite", "api/pyk.kast.utils", "api/pyk.kbuild", "api/pyk.kbuild.config", "api/pyk.kbuild.kbuild", "api/pyk.kbuild.project", "api/pyk.kbuild.utils", "api/pyk.kcfg", "api/pyk.kcfg.exploration", "api/pyk.kcfg.explore", "api/pyk.kcfg.kcfg", "api/pyk.kcfg.minimize", "api/pyk.kcfg.semantics", "api/pyk.kcfg.show", "api/pyk.kcfg.store", "api/pyk.kcfg.tui", "api/pyk.kcovr", "api/pyk.kdist", "api/pyk.kdist.api", "api/pyk.kdist.utils", "api/pyk.kllvm", "api/pyk.kllvm.ast", "api/pyk.kllvm.compiler", "api/pyk.kllvm.convert", "api/pyk.kllvm.hints", "api/pyk.kllvm.hints.prooftrace", "api/pyk.kllvm.importer", "api/pyk.kllvm.load", "api/pyk.kllvm.load_static", "api/pyk.kllvm.parser", "api/pyk.kllvm.runtime", "api/pyk.kllvm.utils", "api/pyk.konvert", "api/pyk.kore", "api/pyk.kore.kompiled", "api/pyk.kore.lexer", "api/pyk.kore.manip", "api/pyk.kore.match", "api/pyk.kore.parser", "api/pyk.kore.pool", "api/pyk.kore.prelude", "api/pyk.kore.rpc", "api/pyk.kore.rule", "api/pyk.kore.syntax", "api/pyk.kore.tools", "api/pyk.kore_exec_covr", "api/pyk.kore_exec_covr.kore_exec_covr", "api/pyk.krepl", "api/pyk.krepl.repl", "api/pyk.ktool", "api/pyk.ktool.claim_index", "api/pyk.ktool.claim_loader", "api/pyk.ktool.kfuzz", "api/pyk.ktool.kompile", "api/pyk.ktool.kprint", "api/pyk.ktool.kprove", "api/pyk.ktool.krun", "api/pyk.ktool.prove_rpc", "api/pyk.ktool.utils", "api/pyk.prelude", "api/pyk.prelude.bytes", "api/pyk.prelude.collections", "api/pyk.prelude.k", "api/pyk.prelude.kbool", "api/pyk.prelude.kint", "api/pyk.prelude.ml", "api/pyk.prelude.string", "api/pyk.prelude.utils", "api/pyk.proof", "api/pyk.proof.implies", "api/pyk.proof.proof", "api/pyk.proof.reachability", "api/pyk.proof.show", "api/pyk.proof.tui", "api/pyk.testing", "api/pyk.testing.plugin", "api/pyk.utils", "index"], "envversion": {"sphinx": 61, "sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.viewcode": 1}, "filenames": ["api/modules.rst", "api/pyk.rst", "api/pyk.coverage.rst", "api/pyk.cterm.rst", "api/pyk.cterm.cterm.rst", "api/pyk.cterm.symbolic.rst", "api/pyk.dequote.rst", "api/pyk.kast.rst", "api/pyk.kast.att.rst", "api/pyk.kast.color.rst", "api/pyk.kast.formatter.rst", "api/pyk.kast.inner.rst", "api/pyk.kast.kast.rst", "api/pyk.kast.lexer.rst", "api/pyk.kast.manip.rst", "api/pyk.kast.markdown.rst", "api/pyk.kast.outer.rst", "api/pyk.kast.outer_lexer.rst", "api/pyk.kast.outer_parser.rst", "api/pyk.kast.outer_syntax.rst", "api/pyk.kast.parser.rst", "api/pyk.kast.pretty.rst", "api/pyk.kast.rewrite.rst", "api/pyk.kast.utils.rst", "api/pyk.kbuild.rst", "api/pyk.kbuild.config.rst", "api/pyk.kbuild.kbuild.rst", "api/pyk.kbuild.project.rst", "api/pyk.kbuild.utils.rst", "api/pyk.kcfg.rst", "api/pyk.kcfg.exploration.rst", "api/pyk.kcfg.explore.rst", "api/pyk.kcfg.kcfg.rst", "api/pyk.kcfg.minimize.rst", "api/pyk.kcfg.semantics.rst", "api/pyk.kcfg.show.rst", "api/pyk.kcfg.store.rst", "api/pyk.kcfg.tui.rst", "api/pyk.kcovr.rst", "api/pyk.kdist.rst", "api/pyk.kdist.api.rst", "api/pyk.kdist.utils.rst", "api/pyk.kllvm.rst", "api/pyk.kllvm.ast.rst", "api/pyk.kllvm.compiler.rst", "api/pyk.kllvm.convert.rst", "api/pyk.kllvm.hints.rst", "api/pyk.kllvm.hints.prooftrace.rst", "api/pyk.kllvm.importer.rst", "api/pyk.kllvm.load.rst", "api/pyk.kllvm.load_static.rst", "api/pyk.kllvm.parser.rst", "api/pyk.kllvm.runtime.rst", "api/pyk.kllvm.utils.rst", "api/pyk.konvert.rst", "api/pyk.kore.rst", "api/pyk.kore.kompiled.rst", "api/pyk.kore.lexer.rst", "api/pyk.kore.manip.rst", "api/pyk.kore.match.rst", "api/pyk.kore.parser.rst", "api/pyk.kore.pool.rst", "api/pyk.kore.prelude.rst", "api/pyk.kore.rpc.rst", "api/pyk.kore.rule.rst", "api/pyk.kore.syntax.rst", "api/pyk.kore.tools.rst", "api/pyk.kore_exec_covr.rst", "api/pyk.kore_exec_covr.kore_exec_covr.rst", "api/pyk.krepl.rst", "api/pyk.krepl.repl.rst", "api/pyk.ktool.rst", "api/pyk.ktool.claim_index.rst", "api/pyk.ktool.claim_loader.rst", "api/pyk.ktool.kfuzz.rst", "api/pyk.ktool.kompile.rst", "api/pyk.ktool.kprint.rst", "api/pyk.ktool.kprove.rst", "api/pyk.ktool.krun.rst", "api/pyk.ktool.prove_rpc.rst", "api/pyk.ktool.utils.rst", "api/pyk.prelude.rst", "api/pyk.prelude.bytes.rst", "api/pyk.prelude.collections.rst", "api/pyk.prelude.k.rst", "api/pyk.prelude.kbool.rst", "api/pyk.prelude.kint.rst", "api/pyk.prelude.ml.rst", "api/pyk.prelude.string.rst", "api/pyk.prelude.utils.rst", "api/pyk.proof.rst", "api/pyk.proof.implies.rst", "api/pyk.proof.proof.rst", "api/pyk.proof.reachability.rst", "api/pyk.proof.show.rst", "api/pyk.proof.tui.rst", "api/pyk.testing.rst", "api/pyk.testing.plugin.rst", "api/pyk.utils.rst", "index.rst"], "indexentries": {"__call__() (csubst method)": [[4, "pyk.cterm.cterm.CSubst.__call__", false]], "__call__() (subst method)": [[11, "pyk.kast.inner.Subst.__call__", false]], "__format__() (logorigin method)": [[63, "pyk.kore.rpc.LogOrigin.__format__", false]], "__format__() (stopreason method)": [[63, "pyk.kore.rpc.StopReason.__format__", false]], "__getitem__() (subst method)": [[11, "pyk.kast.inner.Subst.__getitem__", false]], "__init__() (csubst method)": [[4, "pyk.cterm.cterm.CSubst.__init__", false]], "__init__() (cterm method)": [[4, "pyk.cterm.cterm.CTerm.__init__", false]], "__init__() (kapply method)": [[11, "pyk.kast.inner.KApply.__init__", false]], "__init__() (kas method)": [[11, "pyk.kast.inner.KAs.__init__", false]], "__init__() (klabel method)": [[11, "pyk.kast.inner.KLabel.__init__", false]], "__init__() (koreheader method)": [[47, "pyk.kllvm.hints.prooftrace.KoreHeader.__init__", false]], "__init__() (krewrite method)": [[11, "pyk.kast.inner.KRewrite.__init__", false]], "__init__() (ksequence method)": [[11, "pyk.kast.inner.KSequence.__init__", false]], "__init__() (ksort method)": [[11, "pyk.kast.inner.KSort.__init__", false]], "__init__() (ktoken method)": [[11, "pyk.kast.inner.KToken.__init__", false]], "__init__() (kvariable method)": [[11, "pyk.kast.inner.KVariable.__init__", false]], "__init__() (llvmargument method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMArgument.__init__", false]], "__init__() (llvmeventannotated method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMEventAnnotated.__init__", false]], "__init__() (llvmeventtype method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMEventType.__init__", false]], "__init__() (llvmfunctionevent method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMFunctionEvent.__init__", false]], "__init__() (llvmhookevent method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMHookEvent.__init__", false]], "__init__() (llvmpatternmatchingfailureevent method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMPatternMatchingFailureEvent.__init__", false]], "__init__() (llvmrewritetrace method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRewriteTrace.__init__", false]], "__init__() (llvmrewritetraceiterator method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRewriteTraceIterator.__init__", false]], "__init__() (llvmruleevent method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRuleEvent.__init__", false]], "__init__() (llvmsideconditionevententer method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMSideConditionEventEnter.__init__", false]], "__init__() (llvmsideconditioneventexit method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMSideConditionEventExit.__init__", false]], "__init__() (subst method)": [[11, "pyk.kast.inner.Subst.__init__", false]], "__iter__() (csubst method)": [[4, "pyk.cterm.cterm.CSubst.__iter__", false]], "__iter__() (cterm method)": [[4, "pyk.cterm.cterm.CTerm.__iter__", false]], "__iter__() (klabel method)": [[11, "pyk.kast.inner.KLabel.__iter__", false]], "__iter__() (krewrite method)": [[11, "pyk.kast.inner.KRewrite.__iter__", false]], "__iter__() (llvmrewritetraceiterator method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRewriteTraceIterator.__iter__", false]], "__iter__() (subst method)": [[11, "pyk.kast.inner.Subst.__iter__", false]], "__len__() (subst method)": [[11, "pyk.kast.inner.Subst.__len__", false]], "__lt__() (kvariable method)": [[11, "pyk.kast.inner.KVariable.__lt__", false]], "__mul__() (subst method)": [[11, "pyk.kast.inner.Subst.__mul__", false]], "__next__() (llvmrewritetraceiterator method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRewriteTraceIterator.__next__", false]], "__repr__() (llvmargument method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMArgument.__repr__", false]], "__repr__() (llvmfunctionevent method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMFunctionEvent.__repr__", false]], "__repr__() (llvmhookevent method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMHookEvent.__repr__", false]], "__repr__() (llvmpatternmatchingfailureevent method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMPatternMatchingFailureEvent.__repr__", false]], "__repr__() (llvmrewritetrace method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRewriteTrace.__repr__", false]], "__repr__() (llvmrewritetraceiterator method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRewriteTraceIterator.__repr__", false]], "__repr__() (llvmruleevent method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRuleEvent.__repr__", false]], "__repr__() (llvmsideconditionevententer method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMSideConditionEventEnter.__repr__", false]], "__repr__() (llvmsideconditioneventexit method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMSideConditionEventExit.__repr__", false]], "_annotated_llvm_event (llvmeventannotated attribute)": [[47, "pyk.kllvm.hints.prooftrace.LLVMEventAnnotated._annotated_llvm_event", false]], "_argument (llvmargument attribute)": [[47, "pyk.kllvm.hints.prooftrace.LLVMArgument._argument", false]], "_event_type (llvmeventtype attribute)": [[47, "pyk.kllvm.hints.prooftrace.LLVMEventType._event_type", false]], "_function_event (llvmfunctionevent attribute)": [[47, "pyk.kllvm.hints.prooftrace.LLVMFunctionEvent._function_event", false]], "_hook_event (llvmhookevent attribute)": [[47, "pyk.kllvm.hints.prooftrace.LLVMHookEvent._hook_event", false]], "_kore_header (koreheader attribute)": [[47, "pyk.kllvm.hints.prooftrace.KoreHeader._kore_header", false]], "_pattern_matching_failure_event (llvmpatternmatchingfailureevent attribute)": [[47, "pyk.kllvm.hints.prooftrace.LLVMPatternMatchingFailureEvent._pattern_matching_failure_event", false]], "_rewrite_trace (llvmrewritetrace attribute)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRewriteTrace._rewrite_trace", false]], "_rewrite_trace_iterator (llvmrewritetraceiterator attribute)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRewriteTraceIterator._rewrite_trace_iterator", false]], "_rule_event (llvmruleevent attribute)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRuleEvent._rule_event", false]], "_side_condition_end_event (llvmsideconditioneventexit attribute)": [[47, "pyk.kllvm.hints.prooftrace.LLVMSideConditionEventExit._side_condition_end_event", false]], "_side_condition_event (llvmsideconditionevententer attribute)": [[47, "pyk.kllvm.hints.prooftrace.LLVMSideConditionEventEnter._side_condition_event", false]], "aborted (fallbackreason attribute)": [[63, "pyk.kore.rpc.FallbackReason.ABORTED", false]], "aborted (stopreason attribute)": [[63, "pyk.kore.rpc.StopReason.ABORTED", false]], "abortedresult (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.AbortedResult", false]], "abs_or_rel_to() (in module pyk.utils)": [[98, "pyk.utils.abs_or_rel_to", false]], "absint() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.absInt", false]], "abstract (class in pyk.kcfg.kcfg)": [[32, "pyk.kcfg.kcfg.Abstract", false]], "abstract_node() (defaultsemantics method)": [[34, "pyk.kcfg.semantics.DefaultSemantics.abstract_node", false]], "abstract_node() (kcfgsemantics method)": [[34, "pyk.kcfg.semantics.KCFGSemantics.abstract_node", false]], "abstract_term_safely() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.abstract_term_safely", false]], "action_keystroke() (kcfgviewer method)": [[37, "pyk.kcfg.tui.KCFGViewer.action_keystroke", false]], "add_alias() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.add_alias", false]], "add_attr() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.add_attr", false]], "add_attr() (kcfg.node method)": [[32, "pyk.kcfg.kcfg.KCFG.Node.add_attr", false]], "add_bounded() (aprproof method)": [[93, "pyk.proof.reachability.APRProof.add_bounded", false]], "add_brackets() (in module pyk.kast.formatter)": [[10, "pyk.kast.formatter.add_brackets", false]], "add_cell_map_items() (kdefinition method)": [[16, "pyk.kast.outer.KDefinition.add_cell_map_items", false]], "add_command() (bugreport method)": [[98, "pyk.utils.BugReport.add_command", false]], "add_constraint() (csubst method)": [[4, "pyk.cterm.cterm.CSubst.add_constraint", false]], "add_constraint() (cterm method)": [[4, "pyk.cterm.cterm.CTerm.add_constraint", false]], "add_exec_time() (aprproof method)": [[93, "pyk.proof.reachability.APRProof.add_exec_time", false]], "add_file() (bugreport method)": [[98, "pyk.utils.BugReport.add_file", false]], "add_file_contents() (bugreport method)": [[98, "pyk.utils.BugReport.add_file_contents", false]], "add_indent() (in module pyk.utils)": [[98, "pyk.utils.add_indent", false]], "add_injections() (kompiledkore method)": [[56, "pyk.kore.kompiled.KompiledKore.add_injections", false]], "add_ksequence_under_k_productions() (kdefinition method)": [[16, "pyk.kast.outer.KDefinition.add_ksequence_under_k_productions", false]], "add_module() (koreclient method)": [[63, "pyk.kore.rpc.KoreClient.add_module", false]], "add_node() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.add_node", false]], "add_request() (bugreport method)": [[98, "pyk.utils.BugReport.add_request", false]], "add_sort_params() (kdefinition method)": [[16, "pyk.kast.outer.KDefinition.add_sort_params", false]], "add_stuck() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.add_stuck", false]], "add_subproof() (proof method)": [[92, "pyk.proof.proof.Proof.add_subproof", false]], "add_successor() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.add_successor", false]], "add_terminal() (kcfgexploration method)": [[30, "pyk.kcfg.exploration.KCFGExploration.add_terminal", false]], "add_vacuous() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.add_vacuous", false]], "addint() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.addInt", false]], "admit() (proof method)": [[92, "pyk.proof.proof.Proof.admit", false]], "admitted (aprsummary attribute)": [[93, "pyk.proof.reachability.APRSummary.admitted", false]], "admitted (equalitysummary attribute)": [[91, "pyk.proof.implies.EqualitySummary.admitted", false]], "admitted (proof attribute)": [[92, "pyk.proof.proof.Proof.admitted", false]], "advance_proof() (prover method)": [[92, "pyk.proof.proof.Prover.advance_proof", false]], "ahead (kversion.git attribute)": [[28, "pyk.kbuild.utils.KVersion.Git.ahead", false]], "alias (aliasdecl attribute)": [[65, "pyk.kore.syntax.AliasDecl.alias", false]], "alias (atts attribute)": [[8, "pyk.kast.att.Atts.ALIAS", false]], "alias (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.Alias", false]], "alias (kas attribute)": [[11, "pyk.kast.inner.KAs.alias", false]], "alias_decl() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.alias_decl", false]], "alias_rec (atts attribute)": [[8, "pyk.kast.att.Atts.ALIAS_REC", false]], "alias_rules (kdefinition property)": [[16, "pyk.kast.outer.KDefinition.alias_rules", false]], "aliasdecl (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.AliasDecl", false]], "aliases() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.aliases", false]], "alice_blue (color attribute)": [[9, "pyk.kast.color.Color.ALICE_BLUE", false]], "all_files (project property)": [[27, "pyk.kbuild.project.Project.all_files", false]], "all_module_names (kdefinition property)": [[16, "pyk.kast.outer.KDefinition.all_module_names", false]], "all_modules (kdefinition attribute)": [[16, "pyk.kast.outer.KDefinition.all_modules", false]], "all_modules_dict (kdefinition property)": [[16, "pyk.kast.outer.KDefinition.all_modules_dict", false]], "and (class in pyk.kast.markdown)": [[15, "pyk.kast.markdown.And", false]], "and (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.And", false]], "and_bool() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.and_bool", false]], "andbool() (in module pyk.prelude.kbool)": [[85, "pyk.prelude.kbool.andBool", false]], "andd() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.andd", false]], "andint() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.andInt", false]], "ansi_code (color property)": [[9, "pyk.kast.color.Color.ansi_code", false]], "antecedent (impliesproof attribute)": [[91, "pyk.proof.implies.ImpliesProof.antecedent", false]], "anti_left (functionrule attribute)": [[64, "pyk.kore.rule.FunctionRule.anti_left", false]], "anti_unify() (cterm method)": [[4, "pyk.cterm.cterm.CTerm.anti_unify", false]], "anti_unify() (in module pyk.cterm.cterm)": [[4, "pyk.cterm.cterm.anti_unify", false]], "antique_white (color attribute)": [[9, "pyk.kast.color.Color.ANTIQUE_WHITE", false]], "anytype (class in pyk.kast.att)": [[8, "pyk.kast.att.AnyType", false]], "anywhere (atts attribute)": [[8, "pyk.kast.att.Atts.ANYWHERE", false]], "app (assoc property)": [[65, "pyk.kore.syntax.Assoc.app", false]], "app (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.App", false]], "app() (in module pyk.kore.match)": [[59, "pyk.kore.match.app", false]], "app() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.app", false]], "apply() (csubst method)": [[4, "pyk.cterm.cterm.CSubst.apply", false]], "apply() (klabel method)": [[11, "pyk.kast.inner.KLabel.apply", false]], "apply() (krewrite method)": [[11, "pyk.kast.inner.KRewrite.apply", false]], "apply() (subst method)": [[11, "pyk.kast.inner.Subst.apply", false]], "apply_existential_substitutions() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.apply_existential_substitutions", false]], "apply_top() (krewrite method)": [[11, "pyk.kast.inner.KRewrite.apply_top", false]], "apprule (class in pyk.kore.rule)": [[64, "pyk.kore.rule.AppRule", false]], "aprfailureinfo (class in pyk.proof.reachability)": [[93, "pyk.proof.reachability.APRFailureInfo", false]], "apricot (color attribute)": [[9, "pyk.kast.color.Color.APRICOT", false]], "aprproof (class in pyk.proof.reachability)": [[93, "pyk.proof.reachability.APRProof", false]], "aprproofbehaviorview (class in pyk.proof.tui)": [[95, "pyk.proof.tui.APRProofBehaviorView", false]], "aprproofboundedresult (class in pyk.proof.reachability)": [[93, "pyk.proof.reachability.APRProofBoundedResult", false]], "aprproofextendandcacheresult (class in pyk.proof.reachability)": [[93, "pyk.proof.reachability.APRProofExtendAndCacheResult", false]], "aprproofextendresult (class in pyk.proof.reachability)": [[93, "pyk.proof.reachability.APRProofExtendResult", false]], "aprproofnodeprinter (class in pyk.proof.show)": [[94, "pyk.proof.show.APRProofNodePrinter", false]], "aprproofresult (class in pyk.proof.reachability)": [[93, "pyk.proof.reachability.APRProofResult", false]], "aprproofshow (class in pyk.proof.show)": [[94, "pyk.proof.show.APRProofShow", false]], "aprproofstep (class in pyk.proof.reachability)": [[93, "pyk.proof.reachability.APRProofStep", false]], "aprproofsubsumeresult (class in pyk.proof.reachability)": [[93, "pyk.proof.reachability.APRProofSubsumeResult", false]], "aprproofterminalresult (class in pyk.proof.reachability)": [[93, "pyk.proof.reachability.APRProofTerminalResult", false]], "aprproofusecacheresult (class in pyk.proof.reachability)": [[93, "pyk.proof.reachability.APRProofUseCacheResult", false]], "aprproofviewer (class in pyk.proof.tui)": [[95, "pyk.proof.tui.APRProofViewer", false]], "aprprover (class in pyk.proof.reachability)": [[93, "pyk.proof.reachability.APRProver", false]], "aprsummary (class in pyk.proof.reachability)": [[93, "pyk.proof.reachability.APRSummary", false]], "aqua (color attribute)": [[9, "pyk.kast.color.Color.AQUA", false]], "aquamarine (color attribute)": [[9, "pyk.kast.color.Color.AQUAMARINE", false]], "arg() (in module pyk.kore.match)": [[59, "pyk.kore.match.arg", false]], "arg_sorts (functionrule attribute)": [[64, "pyk.kore.rule.FunctionRule.arg_sorts", false]], "args (app attribute)": [[65, "pyk.kore.syntax.App.args", false]], "args (assoc attribute)": [[65, "pyk.kore.syntax.Assoc.args", false]], "args (kapply attribute)": [[11, "pyk.kast.inner.KApply.args", false]], "args (leftassoc attribute)": [[65, "pyk.kore.syntax.LeftAssoc.args", false]], "args (llvmfunctionevent property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMFunctionEvent.args", false]], "args (llvmhookevent property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMHookEvent.args", false]], "args (rightassoc attribute)": [[65, "pyk.kore.syntax.RightAssoc.args", false]], "args (sort attribute)": [[19, "pyk.kast.outer_syntax.Sort.args", false]], "args (sortdecl attribute)": [[19, "pyk.kast.outer_syntax.SortDecl.args", false]], "args (target attribute)": [[27, "pyk.kbuild.project.Target.args", false]], "args() (in module pyk.kore.match)": [[59, "pyk.kore.match.args", false]], "argument_sorts (kproduction property)": [[16, "pyk.kast.outer.KProduction.argument_sorts", false]], "arity (kapply property)": [[11, "pyk.kast.inner.KApply.arity", false]], "arity (ksequence property)": [[11, "pyk.kast.inner.KSequence.arity", false]], "as_rule() (aprproof method)": [[93, "pyk.proof.reachability.APRProof.as_rule", false]], "as_rules() (aprproof method)": [[93, "pyk.proof.reachability.APRProof.as_rules", false]], "as_subsort (kproduction property)": [[16, "pyk.kast.outer.KProduction.as_subsort", false]], "assoc (atts attribute)": [[8, "pyk.kast.att.Atts.ASSOC", false]], "assoc (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.Assoc", false]], "assoc (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Assoc", false]], "assoc (ksyntaxassociativity attribute)": [[16, "pyk.kast.outer.KSyntaxAssociativity.assoc", false]], "assoc (priorityblock attribute)": [[19, "pyk.kast.outer_syntax.PriorityBlock.assoc", false]], "assoc (syntaxassoc attribute)": [[19, "pyk.kast.outer_syntax.SyntaxAssoc.assoc", false]], "assoc_with_unit() (in module pyk.kast.pretty)": [[21, "pyk.kast.pretty.assoc_with_unit", false]], "assume_defined (aprprover attribute)": [[93, "pyk.proof.reachability.APRProver.assume_defined", false]], "assume_defined (impliesprover attribute)": [[91, "pyk.proof.implies.ImpliesProver.assume_defined", false]], "assume_defined() (ctermsymbolic method)": [[5, "pyk.cterm.symbolic.CTermSymbolic.assume_defined", false]], "ast (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.AST", false]], "atom (class in pyk.kast.markdown)": [[15, "pyk.kast.markdown.Atom", false]], "att (alias attribute)": [[19, "pyk.kast.outer_syntax.Alias.att", false]], "att (claim attribute)": [[19, "pyk.kast.outer_syntax.Claim.att", false]], "att (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.Att", false]], "att (config attribute)": [[19, "pyk.kast.outer_syntax.Config.att", false]], "att (context attribute)": [[19, "pyk.kast.outer_syntax.Context.att", false]], "att (kbubble attribute)": [[16, "pyk.kast.outer.KBubble.att", false]], "att (kclaim attribute)": [[16, "pyk.kast.outer.KClaim.att", false]], "att (kcontext attribute)": [[16, "pyk.kast.outer.KContext.att", false]], "att (kdefinition attribute)": [[16, "pyk.kast.outer.KDefinition.att", false]], "att (kflatmodule attribute)": [[16, "pyk.kast.outer.KFlatModule.att", false]], "att (kproduction attribute)": [[16, "pyk.kast.outer.KProduction.att", false]], "att (krule attribute)": [[16, "pyk.kast.outer.KRule.att", false]], "att (ksortsynonym attribute)": [[16, "pyk.kast.outer.KSortSynonym.att", false]], "att (ksyntaxassociativity attribute)": [[16, "pyk.kast.outer.KSyntaxAssociativity.att", false]], "att (ksyntaxlexical attribute)": [[16, "pyk.kast.outer.KSyntaxLexical.att", false]], "att (ksyntaxpriority attribute)": [[16, "pyk.kast.outer.KSyntaxPriority.att", false]], "att (ksyntaxsort attribute)": [[16, "pyk.kast.outer.KSyntaxSort.att", false]], "att (module attribute)": [[19, "pyk.kast.outer_syntax.Module.att", false]], "att (production attribute)": [[19, "pyk.kast.outer_syntax.Production.att", false]], "att (productionlike attribute)": [[19, "pyk.kast.outer_syntax.ProductionLike.att", false]], "att (rule attribute)": [[19, "pyk.kast.outer_syntax.Rule.att", false]], "att (stringsentence attribute)": [[19, "pyk.kast.outer_syntax.StringSentence.att", false]], "att (syntaxdecl attribute)": [[19, "pyk.kast.outer_syntax.SyntaxDecl.att", false]], "att (syntaxsynonym attribute)": [[19, "pyk.kast.outer_syntax.SyntaxSynonym.att", false]], "att (userlist attribute)": [[19, "pyk.kast.outer_syntax.UserList.att", false]], "att (withkatt attribute)": [[8, "pyk.kast.att.WithKAtt.att", false]], "attentry (class in pyk.kast.att)": [[8, "pyk.kast.att.AttEntry", false]], "attkey (class in pyk.kast.att)": [[8, "pyk.kast.att.AttKey", false]], "attr (state attribute)": [[17, "pyk.kast.outer_lexer.State.ATTR", false]], "attr_content (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.ATTR_CONTENT", false]], "attr_key (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.ATTR_KEY", false]], "attrs (aliasdecl attribute)": [[65, "pyk.kore.syntax.AliasDecl.attrs", false]], "attrs (axiom attribute)": [[65, "pyk.kore.syntax.Axiom.attrs", false]], "attrs (claim attribute)": [[65, "pyk.kore.syntax.Claim.attrs", false]], "attrs (definition attribute)": [[65, "pyk.kore.syntax.Definition.attrs", false]], "attrs (import attribute)": [[65, "pyk.kore.syntax.Import.attrs", false]], "attrs (kcfg.node attribute)": [[32, "pyk.kcfg.kcfg.KCFG.Node.attrs", false]], "attrs (module attribute)": [[65, "pyk.kore.syntax.Module.attrs", false]], "attrs (sortdecl attribute)": [[65, "pyk.kore.syntax.SortDecl.attrs", false]], "attrs (symboldecl attribute)": [[65, "pyk.kore.syntax.SymbolDecl.attrs", false]], "attrs (withattrs attribute)": [[65, "pyk.kore.syntax.WithAttrs.attrs", false]], "attrs_by_key (withattrs property)": [[65, "pyk.kore.syntax.WithAttrs.attrs_by_key", false]], "atts (class in pyk.kast.att)": [[8, "pyk.kast.att.Atts", false]], "atts (katt attribute)": [[8, "pyk.kast.att.KAtt.atts", false]], "atttype (class in pyk.kast.att)": [[8, "pyk.kast.att.AttType", false]], "avoid (atts attribute)": [[8, "pyk.kast.att.Atts.AVOID", false]], "axiom (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Axiom", false]], "axiom() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.axiom", false]], "axiomlike (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.AxiomLike", false]], "axioms (definition property)": [[65, "pyk.kore.syntax.Definition.axioms", false]], "axioms (module property)": [[65, "pyk.kore.syntax.Module.axioms", false]], "azure (color attribute)": [[9, "pyk.kast.color.Color.AZURE", false]], "backend (kprint attribute)": [[76, "pyk.ktool.kprint.KPrint.backend", false]], "baserepl (class in pyk.krepl.repl)": [[70, "pyk.krepl.repl.BaseRepl", false]], "behaviorview (class in pyk.kcfg.tui)": [[37, "pyk.kcfg.tui.BehaviorView", false]], "behaviorview.selected (class in pyk.kcfg.tui)": [[37, "pyk.kcfg.tui.BehaviorView.Selected", false]], "beige (color attribute)": [[9, "pyk.kast.color.Color.BEIGE", false]], "binary (kastinput attribute)": [[76, "pyk.ktool.kprint.KAstInput.BINARY", false]], "binary (kastoutput attribute)": [[76, "pyk.ktool.kprint.KAstOutput.BINARY", false]], "binary (kproveoutput attribute)": [[77, "pyk.ktool.kprove.KProveOutput.BINARY", false]], "binary (krunoutput attribute)": [[78, "pyk.ktool.krun.KRunOutput.BINARY", false]], "binary (printoutput attribute)": [[66, "pyk.kore.tools.PrintOutput.BINARY", false]], "binaryconn (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.BinaryConn", false]], "binarypred (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.BinaryPred", false]], "bind_universally (impliesproof attribute)": [[91, "pyk.proof.implies.ImpliesProof.bind_universally", false]], "bindings (kcfgviewer attribute)": [[37, "pyk.kcfg.tui.KCFGViewer.BINDINGS", false]], "bindings (navwidget attribute)": [[37, "pyk.kcfg.tui.NavWidget.BINDINGS", false]], "bisque (color attribute)": [[9, "pyk.kast.color.Color.BISQUE", false]], "bittersweet (color attribute)": [[9, "pyk.kast.color.Color.BITTERSWEET", false]], "black (color attribute)": [[9, "pyk.kast.color.Color.BLACK", false]], "blanched_almond (color attribute)": [[9, "pyk.kast.color.Color.BLANCHED_ALMOND", false]], "blocks (syntaxdefn attribute)": [[19, "pyk.kast.outer_syntax.SyntaxDefn.blocks", false]], "blue (color attribute)": [[9, "pyk.kast.color.Color.BLUE", false]], "blue_green (color attribute)": [[9, "pyk.kast.color.Color.BLUE_GREEN", false]], "blue_violet (color attribute)": [[9, "pyk.kast.color.Color.BLUE_VIOLET", false]], "bmc_depth (aprproof attribute)": [[93, "pyk.proof.reachability.APRProof.bmc_depth", false]], "bmc_depth (aprproofstep attribute)": [[93, "pyk.proof.reachability.APRProofStep.bmc_depth", false]], "bmc_depth (aprsummary attribute)": [[93, "pyk.proof.reachability.APRSummary.bmc_depth", false]], "body (kclaim attribute)": [[16, "pyk.kast.outer.KClaim.body", false]], "body (kcontext attribute)": [[16, "pyk.kast.outer.KContext.body", false]], "body (krule attribute)": [[16, "pyk.kast.outer.KRule.body", false]], "body (krulelike attribute)": [[16, "pyk.kast.outer.KRuleLike.body", false]], "bool_dv() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.bool_dv", false]], "bool_to_ml_pred() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.bool_to_ml_pred", false]], "booltoken() (in module pyk.prelude.kbool)": [[85, "pyk.prelude.kbool.boolToken", false]], "booster (logorigin attribute)": [[63, "pyk.kore.rpc.LogOrigin.BOOSTER", false]], "booster (pykbackend attribute)": [[75, "pyk.ktool.kompile.PykBackend.BOOSTER", false]], "boosterserver (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.BoosterServer", false]], "boosterserverargs (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.BoosterServerArgs", false]], "bottom (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Bottom", false]], "bottom() (cterm static method)": [[4, "pyk.cterm.cterm.CTerm.bottom", false]], "bottom() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.bottom", false]], "bottom_up() (in module pyk.kast.inner)": [[11, "pyk.kast.inner.bottom_up", false]], "bottom_up() (pattern method)": [[65, "pyk.kore.syntax.Pattern.bottom_up", false]], "bottom_up_with_summary() (in module pyk.kast.inner)": [[11, "pyk.kast.inner.bottom_up_with_summary", false]], "bounded (aprproof property)": [[93, "pyk.proof.reachability.APRProof.bounded", false]], "bounded (aprsummary attribute)": [[93, "pyk.proof.reachability.APRSummary.bounded", false]], "bracket (atts attribute)": [[8, "pyk.kast.att.Atts.BRACKET", false]], "bracket_label (atts attribute)": [[8, "pyk.kast.att.Atts.BRACKET_LABEL", false]], "brackets (kdefinition property)": [[16, "pyk.kast.outer.KDefinition.brackets", false]], "branch (class in pyk.kcfg.kcfg)": [[32, "pyk.kcfg.kcfg.Branch", false]], "branching (fallbackreason attribute)": [[63, "pyk.kore.rpc.FallbackReason.BRANCHING", false]], "branching (stopreason attribute)": [[63, "pyk.kore.rpc.StopReason.BRANCHING", false]], "branchingresult (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.BranchingResult", false]], "brick_red (color attribute)": [[9, "pyk.kast.color.Color.BRICK_RED", false]], "brown (color attribute)": [[9, "pyk.kast.color.Color.BROWN", false]], "bubble (alias attribute)": [[19, "pyk.kast.outer_syntax.Alias.bubble", false]], "bubble (behaviorview.selected attribute)": [[37, "pyk.kcfg.tui.BehaviorView.Selected.bubble", false]], "bubble (claim attribute)": [[19, "pyk.kast.outer_syntax.Claim.bubble", false]], "bubble (config attribute)": [[19, "pyk.kast.outer_syntax.Config.bubble", false]], "bubble (context attribute)": [[19, "pyk.kast.outer_syntax.Context.bubble", false]], "bubble (graphchunk.selected attribute)": [[37, "pyk.kcfg.tui.GraphChunk.Selected.bubble", false]], "bubble (navwidget.selected attribute)": [[37, "pyk.kcfg.tui.NavWidget.Selected.bubble", false]], "bubble (rule attribute)": [[19, "pyk.kast.outer_syntax.Rule.bubble", false]], "bubble (state attribute)": [[17, "pyk.kast.outer_lexer.State.BUBBLE", false]], "bubble (stringsentence attribute)": [[19, "pyk.kast.outer_syntax.StringSentence.bubble", false]], "bubble (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.BUBBLE", false]], "bug_report (boosterserverargs attribute)": [[63, "pyk.kore.rpc.BoosterServerArgs.bug_report", false]], "bug_report (koreserverargs attribute)": [[63, "pyk.kore.rpc.KoreServerArgs.bug_report", false]], "bug_report() (in module pyk.testing.plugin)": [[97, "pyk.testing.plugin.bug_report", false]], "bugreport (class in pyk.utils)": [[98, "pyk.utils.BugReport", false]], "build() (target method)": [[40, "pyk.kdist.api.Target.build", false]], "build_assoc() (in module pyk.kast.inner)": [[11, "pyk.kast.inner.build_assoc", false]], "build_claim() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.build_claim", false]], "build_cons() (in module pyk.kast.inner)": [[11, "pyk.kast.inner.build_cons", false]], "build_rule() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.build_rule", false]], "build_rule_dict() (in module pyk.kore_exec_covr.kore_exec_covr)": [[68, "pyk.kore_exec_covr.kore_exec_covr.build_rule_dict", false]], "build_symbol_table() (in module pyk.kast.pretty)": [[21, "pyk.kast.pretty.build_symbol_table", false]], "builtin_dir (kdistribution property)": [[80, "pyk.ktool.utils.KDistribution.builtin_dir", false]], "burly_wood (color attribute)": [[9, "pyk.kast.color.Color.BURLY_WOOD", false]], "burnt_orange (color attribute)": [[9, "pyk.kast.color.Color.BURNT_ORANGE", false]], "bytes_decode() (in module pyk.dequote)": [[6, "pyk.dequote.bytes_decode", false]], "bytes_dv() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.bytes_dv", false]], "bytes_encode() (in module pyk.dequote)": [[6, "pyk.dequote.bytes_encode", false]], "bytestoken() (in module pyk.prelude.bytes)": [[82, "pyk.prelude.bytes.bytesToken", false]], "bytestoken_from_str() (in module pyk.prelude.bytes)": [[82, "pyk.prelude.bytes.bytesToken_from_str", false]], "cached_node_id (aprproofusecacheresult attribute)": [[93, "pyk.proof.reachability.APRProofUseCacheResult.cached_node_id", false]], "cadet_blue (color attribute)": [[9, "pyk.kast.color.Color.CADET_BLUE", false]], "can_focus (aprproofbehaviorview attribute)": [[95, "pyk.proof.tui.APRProofBehaviorView.can_focus", false]], "can_focus (behaviorview attribute)": [[37, "pyk.kcfg.tui.BehaviorView.can_focus", false]], "can_focus (constraint attribute)": [[37, "pyk.kcfg.tui.Constraint.can_focus", false]], "can_focus (custom attribute)": [[37, "pyk.kcfg.tui.Custom.can_focus", false]], "can_focus (graphchunk attribute)": [[37, "pyk.kcfg.tui.GraphChunk.can_focus", false]], "can_focus (info attribute)": [[37, "pyk.kcfg.tui.Info.can_focus", false]], "can_focus (navwidget attribute)": [[37, "pyk.kcfg.tui.NavWidget.can_focus", false]], "can_focus (nodeview attribute)": [[37, "pyk.kcfg.tui.NodeView.can_focus", false]], "can_focus (status attribute)": [[37, "pyk.kcfg.tui.Status.can_focus", false]], "can_focus (term attribute)": [[37, "pyk.kcfg.tui.Term.can_focus", false]], "can_focus_children (aprproofbehaviorview attribute)": [[95, "pyk.proof.tui.APRProofBehaviorView.can_focus_children", false]], "can_focus_children (behaviorview attribute)": [[37, "pyk.kcfg.tui.BehaviorView.can_focus_children", false]], "can_focus_children (constraint attribute)": [[37, "pyk.kcfg.tui.Constraint.can_focus_children", false]], "can_focus_children (custom attribute)": [[37, "pyk.kcfg.tui.Custom.can_focus_children", false]], "can_focus_children (graphchunk attribute)": [[37, "pyk.kcfg.tui.GraphChunk.can_focus_children", false]], "can_focus_children (info attribute)": [[37, "pyk.kcfg.tui.Info.can_focus_children", false]], "can_focus_children (navwidget attribute)": [[37, "pyk.kcfg.tui.NavWidget.can_focus_children", false]], "can_focus_children (nodeview attribute)": [[37, "pyk.kcfg.tui.NodeView.can_focus_children", false]], "can_focus_children (status attribute)": [[37, "pyk.kcfg.tui.Status.can_focus_children", false]], "can_focus_children (term attribute)": [[37, "pyk.kcfg.tui.Term.can_focus_children", false]], "can_make_custom_step() (defaultsemantics method)": [[34, "pyk.kcfg.semantics.DefaultSemantics.can_make_custom_step", false]], "can_make_custom_step() (kcfgsemantics method)": [[34, "pyk.kcfg.semantics.KCFGSemantics.can_make_custom_step", false]], "can_progress (aprproof property)": [[93, "pyk.proof.reachability.APRProof.can_progress", false]], "can_progress (impliesproof property)": [[91, "pyk.proof.implies.ImpliesProof.can_progress", false]], "can_progress (proof property)": [[92, "pyk.proof.proof.Proof.can_progress", false]], "carnation_pink (color attribute)": [[9, "pyk.kast.color.Color.CARNATION_PINK", false]], "case() (in module pyk.utils)": [[98, "pyk.utils.case", false]], "case_symbol() (in module pyk.kore.match)": [[59, "pyk.kore.match.case_symbol", false]], "cat_builtin (baserepl attribute)": [[70, "pyk.krepl.repl.BaseRepl.CAT_BUILTIN", false]], "cat_debug (baserepl attribute)": [[70, "pyk.krepl.repl.BaseRepl.CAT_DEBUG", false]], "ceil (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Ceil", false]], "ceil() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.ceil", false]], "ceilrule (class in pyk.kore.rule)": [[64, "pyk.kore.rule.CeilRule", false]], "cell (atts attribute)": [[8, "pyk.kast.att.Atts.CELL", false]], "cell() (cterm method)": [[4, "pyk.cterm.cterm.CTerm.cell", false]], "cell_collection (atts attribute)": [[8, "pyk.kast.att.Atts.CELL_COLLECTION", false]], "cell_collection_productions (kdefinition property)": [[16, "pyk.kast.outer.KDefinition.cell_collection_productions", false]], "cell_collection_productions (kflatmodule property)": [[16, "pyk.kast.outer.KFlatModule.cell_collection_productions", false]], "cell_fragment (atts attribute)": [[8, "pyk.kast.att.Atts.CELL_FRAGMENT", false]], "cell_label_to_var_name() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.cell_label_to_var_name", false]], "cell_name (atts attribute)": [[8, "pyk.kast.att.Atts.CELL_NAME", false]], "cell_opt_absent (atts attribute)": [[8, "pyk.kast.att.Atts.CELL_OPT_ABSENT", false]], "cells (cterm property)": [[4, "pyk.cterm.cterm.CTerm.cells", false]], "cerulean (color attribute)": [[9, "pyk.kast.color.Color.CERULEAN", false]], "chainable (class in pyk.utils)": [[98, "pyk.utils.Chainable", false]], "chartreuse (color attribute)": [[9, "pyk.kast.color.Color.CHARTREUSE", false]], "check_absolute_path() (in module pyk.utils)": [[98, "pyk.utils.check_absolute_path", false]], "check_dir_path() (in module pyk.utils)": [[98, "pyk.utils.check_dir_path", false]], "check_extendable() (kcfgexplore method)": [[31, "pyk.kcfg.explore.KCFGExplore.check_extendable", false]], "check_file_path() (in module pyk.utils)": [[98, "pyk.utils.check_file_path", false]], "check_relative_path() (in module pyk.utils)": [[98, "pyk.utils.check_relative_path", false]], "check_result (llvmsideconditioneventexit property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMSideConditionEventExit.check_result", false]], "check_type() (in module pyk.utils)": [[98, "pyk.utils.check_type", false]], "chocolate (color attribute)": [[9, "pyk.kast.color.Color.CHOCOLATE", false]], "chunk_id (graphchunk.selected attribute)": [[37, "pyk.kcfg.tui.GraphChunk.Selected.chunk_id", false]], "circularities_module_name (aprproof property)": [[93, "pyk.proof.reachability.APRProof.circularities_module_name", false]], "circularity (aprproof attribute)": [[93, "pyk.proof.reachability.APRProof.circularity", false]], "circularity (aprproofstep attribute)": [[93, "pyk.proof.reachability.APRProofStep.circularity", false]], "circularity (atts attribute)": [[8, "pyk.kast.att.Atts.CIRCULARITY", false]], "circularity_rule_id (aprproofstep attribute)": [[93, "pyk.proof.reachability.APRProofStep.circularity_rule_id", false]], "claim (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.Claim", false]], "claim (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Claim", false]], "claim() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.claim", false]], "claimindex (class in pyk.ktool.claim_index)": [[72, "pyk.ktool.claim_index.ClaimIndex", false]], "claimloader (class in pyk.ktool.claim_loader)": [[73, "pyk.ktool.claim_loader.ClaimLoader", false]], "claims (claimindex attribute)": [[72, "pyk.ktool.claim_index.ClaimIndex.claims", false]], "claims (kflatmodule property)": [[16, "pyk.kast.outer.KFlatModule.claims", false]], "close() (aprprover method)": [[93, "pyk.proof.reachability.APRProver.close", false]], "close() (httptransport method)": [[63, "pyk.kore.rpc.HttpTransport.close", false]], "close() (impliesprover method)": [[91, "pyk.proof.implies.ImpliesProver.close", false]], "close() (jsonrpcclient method)": [[63, "pyk.kore.rpc.JsonRpcClient.close", false]], "close() (jsonrpcclientfacade method)": [[63, "pyk.kore.rpc.JsonRpcClientFacade.close", false]], "close() (koreclient method)": [[63, "pyk.kore.rpc.KoreClient.close", false]], "close() (koreserver method)": [[63, "pyk.kore.rpc.KoreServer.close", false]], "close() (koreserverpool method)": [[61, "pyk.kore.pool.KoreServerPool.close", false]], "close() (prover method)": [[92, "pyk.proof.proof.Prover.close", false]], "close() (singlesockettransport method)": [[63, "pyk.kore.rpc.SingleSocketTransport.close", false]], "close() (transport method)": [[63, "pyk.kore.rpc.Transport.close", false]], "code (codeblock attribute)": [[15, "pyk.kast.markdown.CodeBlock.code", false]], "code (defaulterror attribute)": [[63, "pyk.kore.rpc.DefaultError.code", false]], "code_blocks() (in module pyk.kast.markdown)": [[15, "pyk.kast.markdown.code_blocks", false]], "codeblock (class in pyk.kast.markdown)": [[15, "pyk.kast.markdown.CodeBlock", false]], "col (loc attribute)": [[17, "pyk.kast.outer_lexer.Loc.col", false]], "collapse_dots() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.collapse_dots", false]], "collect() (in module pyk.kast.inner)": [[11, "pyk.kast.inner.collect", false]], "colon (tokentype attribute)": [[13, "pyk.kast.lexer.TokenType.COLON", false], [17, "pyk.kast.outer_lexer.TokenType.COLON", false], [57, "pyk.kore.lexer.TokenType.COLON", false]], "color (atts attribute)": [[8, "pyk.kast.att.Atts.COLOR", false]], "color (class in pyk.kast.color)": [[9, "pyk.kast.color.Color", false]], "colors (atts attribute)": [[8, "pyk.kast.att.Atts.COLORS", false]], "colorstype (class in pyk.kast.att)": [[8, "pyk.kast.att.ColorsType", false]], "colortype (class in pyk.kast.att)": [[8, "pyk.kast.att.ColorType", false]], "comm (atts attribute)": [[8, "pyk.kast.att.Atts.COMM", false]], "comma (tokentype attribute)": [[13, "pyk.kast.lexer.TokenType.COMMA", false], [17, "pyk.kast.outer_lexer.TokenType.COMMA", false], [57, "pyk.kore.lexer.TokenType.COMMA", false]], "command (boosterserverargs attribute)": [[63, "pyk.kore.rpc.BoosterServerArgs.command", false]], "command (koreserverargs attribute)": [[63, "pyk.kore.rpc.KoreServerArgs.command", false]], "command (krun attribute)": [[78, "pyk.ktool.krun.KRun.command", false]], "commit() (aprproof method)": [[93, "pyk.proof.reachability.APRProof.commit", false]], "commit() (impliesproof method)": [[91, "pyk.proof.implies.ImpliesProof.commit", false]], "commit() (proof method)": [[92, "pyk.proof.proof.Proof.commit", false]], "compare_short_hashes() (in module pyk.utils)": [[98, "pyk.utils.compare_short_hashes", false]], "compile_kllvm() (in module pyk.kllvm.compiler)": [[44, "pyk.kllvm.compiler.compile_kllvm", false]], "compile_runtime() (in module pyk.kllvm.compiler)": [[44, "pyk.kllvm.compiler.compile_runtime", false]], "compose() (aprproofbehaviorview method)": [[95, "pyk.proof.tui.APRProofBehaviorView.compose", false]], "compose() (aprproofviewer method)": [[95, "pyk.proof.tui.APRProofViewer.compose", false]], "compose() (behaviorview method)": [[37, "pyk.kcfg.tui.BehaviorView.compose", false]], "compose() (info method)": [[37, "pyk.kcfg.tui.Info.compose", false]], "compose() (kcfgviewer method)": [[37, "pyk.kcfg.tui.KCFGViewer.compose", false]], "compose() (navwidget method)": [[37, "pyk.kcfg.tui.NavWidget.compose", false]], "compose() (nodeview method)": [[37, "pyk.kcfg.tui.NodeView.compose", false]], "compose() (subst method)": [[11, "pyk.kast.inner.Subst.compose", false]], "compositesummary (class in pyk.proof.proof)": [[92, "pyk.proof.proof.CompositeSummary", false]], "compute_ordinals() (definition method)": [[65, "pyk.kore.syntax.Definition.compute_ordinals", false]], "concat (atts attribute)": [[8, "pyk.kast.att.Atts.CONCAT", false]], "concrete (atts attribute)": [[8, "pyk.kast.att.Atts.CONCRETE", false]], "condition (nextstate attribute)": [[5, "pyk.cterm.symbolic.NextState.condition", false]], "config (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.Config", false]], "config (cterm attribute)": [[4, "pyk.cterm.cterm.CTerm.config", false]], "conjuncts() (in module pyk.kore.manip)": [[58, "pyk.kore.manip.conjuncts", false]], "consequent (impliesproof attribute)": [[91, "pyk.proof.implies.ImpliesProof.consequent", false]], "constraint (class in pyk.kcfg.tui)": [[37, "pyk.kcfg.tui.Constraint", false]], "constraint (csubst property)": [[4, "pyk.cterm.cterm.CSubst.constraint", false]], "constraint (equalityproof property)": [[91, "pyk.proof.implies.EqualityProof.constraint", false]], "constraints (branch attribute)": [[32, "pyk.kcfg.kcfg.Branch.constraints", false]], "constraints (csubst attribute)": [[4, "pyk.cterm.cterm.CSubst.constraints", false]], "constraints (cterm attribute)": [[4, "pyk.cterm.cterm.CTerm.constraints", false]], "constraints (equalityproof property)": [[91, "pyk.proof.implies.EqualityProof.constraints", false]], "construct_node_refutation() (aprproof method)": [[93, "pyk.proof.reachability.APRProof.construct_node_refutation", false]], "constructor (atts attribute)": [[8, "pyk.kast.att.Atts.CONSTRUCTOR", false]], "constructors (kdefinition property)": [[16, "pyk.kast.outer.KDefinition.constructors", false]], "constructors (kflatmodule property)": [[16, "pyk.kast.outer.KFlatModule.constructors", false]], "contains_cover() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.contains_cover", false]], "contains_edge() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.contains_edge", false]], "contains_merged_edge() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.contains_merged_edge", false]], "contains_ndbranch() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.contains_ndbranch", false]], "contains_node() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.contains_node", false]], "contains_split() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.contains_split", false]], "contents (kbubble attribute)": [[16, "pyk.kast.outer.KBubble.contents", false]], "context (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.Context", false]], "context (implicationerror attribute)": [[63, "pyk.kore.rpc.ImplicationError.context", false]], "context (invalidmoduleerror attribute)": [[63, "pyk.kore.rpc.InvalidModuleError.context", false]], "context (patternerror attribute)": [[63, "pyk.kore.rpc.PatternError.context", false]], "context (state attribute)": [[17, "pyk.kast.outer_lexer.State.CONTEXT", false]], "context() (target method)": [[40, "pyk.kdist.api.Target.context", false]], "coral (color attribute)": [[9, "pyk.kast.color.Color.CORAL", false]], "cornflower_blue (color attribute)": [[9, "pyk.kast.color.Color.CORNFLOWER_BLUE", false]], "cornsilk (color attribute)": [[9, "pyk.kast.color.Color.CORNSILK", false]], "count_lines_covered() (in module pyk.kcovr)": [[38, "pyk.kcovr.count_lines_covered", false]], "count_lines_file() (in module pyk.kcovr)": [[38, "pyk.kcovr.count_lines_file", false]], "count_lines_global() (in module pyk.kcovr)": [[38, "pyk.kcovr.count_lines_global", false]], "count_rules_covered() (in module pyk.kcovr)": [[38, "pyk.kcovr.count_rules_covered", false]], "count_vars() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.count_vars", false]], "counterexample_info (aprprover attribute)": [[93, "pyk.proof.reachability.APRProver.counterexample_info", false]], "cover() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.cover", false]], "covered (kcfg property)": [[32, "pyk.kcfg.kcfg.KCFG.covered", false]], "covers (kcfg.split property)": [[32, "pyk.kcfg.kcfg.KCFG.Split.covers", false]], "covers() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.covers", false]], "create() (kdistribution static method)": [[80, "pyk.ktool.utils.KDistribution.create", false]], "create() (koreheader static method)": [[47, "pyk.kllvm.hints.prooftrace.KoreHeader.create", false]], "create_cover() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.create_cover", false]], "create_cover_map() (in module pyk.kcovr)": [[38, "pyk.kcovr.create_cover_map", false]], "create_edge() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.create_edge", false]], "create_merged_edge() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.create_merged_edge", false]], "create_ndbranch() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.create_ndbranch", false]], "create_node() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.create_node", false]], "create_rule_map() (in module pyk.kcovr)": [[38, "pyk.kcovr.create_rule_map", false]], "create_rule_map_by_file() (in module pyk.kcovr)": [[38, "pyk.kcovr.create_rule_map_by_file", false]], "create_rule_map_by_line() (in module pyk.kcovr)": [[38, "pyk.kcovr.create_rule_map_by_line", false]], "create_split() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.create_split", false]], "create_split_by_nodes() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.create_split_by_nodes", false]], "create_temp() (kbuildenv static method)": [[26, "pyk.kbuild.kbuild.KBuildEnv.create_temp", false]], "crimson (color attribute)": [[9, "pyk.kast.color.Color.CRIMSON", false]], "css_path (kcfgviewer attribute)": [[37, "pyk.kcfg.tui.KCFGViewer.CSS_PATH", false]], "csubst (aprproofsubsumeresult attribute)": [[93, "pyk.proof.reachability.APRProofSubsumeResult.csubst", false]], "csubst (class in pyk.cterm.cterm)": [[4, "pyk.cterm.cterm.CSubst", false]], "csubst (ctermimplies attribute)": [[5, "pyk.cterm.symbolic.CTermImplies.csubst", false]], "csubst (impliesproof attribute)": [[91, "pyk.proof.implies.ImpliesProof.csubst", false]], "csubst (impliesproofresult attribute)": [[91, "pyk.proof.implies.ImpliesProofResult.csubst", false]], "csubst (kcfg.cover attribute)": [[32, "pyk.kcfg.kcfg.KCFG.Cover.csubst", false]], "cterm (abstract attribute)": [[32, "pyk.kcfg.kcfg.Abstract.cterm", false]], "cterm (class in pyk.cterm.cterm)": [[4, "pyk.cterm.cterm.CTerm", false]], "cterm (kcfg.node attribute)": [[32, "pyk.kcfg.kcfg.KCFG.Node.cterm", false]], "cterm (step attribute)": [[32, "pyk.kcfg.kcfg.Step.cterm", false]], "cterm_build_claim() (in module pyk.cterm.cterm)": [[4, "pyk.cterm.cterm.cterm_build_claim", false]], "cterm_build_rule() (in module pyk.cterm.cterm)": [[4, "pyk.cterm.cterm.cterm_build_rule", false]], "cterm_symbolic (kcfgexplore attribute)": [[31, "pyk.kcfg.explore.KCFGExplore.cterm_symbolic", false]], "cterm_symbolic() (in module pyk.cterm.symbolic)": [[5, "pyk.cterm.symbolic.cterm_symbolic", false]], "ctermexecute (class in pyk.cterm.symbolic)": [[5, "pyk.cterm.symbolic.CTermExecute", false]], "ctermimplies (class in pyk.cterm.symbolic)": [[5, "pyk.cterm.symbolic.CTermImplies", false]], "cterms (ndbranch attribute)": [[32, "pyk.kcfg.kcfg.NDBranch.cterms", false]], "cterms_anti_unify() (in module pyk.cterm.cterm)": [[4, "pyk.cterm.cterm.cterms_anti_unify", false]], "ctermsmterror": [[5, "pyk.cterm.symbolic.CTermSMTError", false]], "ctermsymbolic (class in pyk.cterm.symbolic)": [[5, "pyk.cterm.symbolic.CTermSymbolic", false]], "ctor_patterns (dv property)": [[65, "pyk.kore.syntax.DV.ctor_patterns", false]], "ctor_patterns (mlfixpoint property)": [[65, "pyk.kore.syntax.MLFixpoint.ctor_patterns", false]], "ctor_patterns (mlpattern property)": [[65, "pyk.kore.syntax.MLPattern.ctor_patterns", false]], "ctor_patterns (mlquant property)": [[65, "pyk.kore.syntax.MLQuant.ctor_patterns", false]], "ctx (rewriterule attribute)": [[64, "pyk.kore.rule.RewriteRule.ctx", false]], "custom (class in pyk.kcfg.tui)": [[37, "pyk.kcfg.tui.Custom", false]], "custom_step() (defaultsemantics method)": [[34, "pyk.kcfg.semantics.DefaultSemantics.custom_step", false]], "custom_step() (kcfgsemantics method)": [[34, "pyk.kcfg.semantics.KCFGSemantics.custom_step", false]], "cut (step attribute)": [[32, "pyk.kcfg.kcfg.Step.cut", false]], "cut_point_rule (stopreason attribute)": [[63, "pyk.kore.rpc.StopReason.CUT_POINT_RULE", false]], "cut_point_rules (aprprover attribute)": [[93, "pyk.proof.reachability.APRProver.cut_point_rules", false]], "cutpointresult (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.CutPointResult", false]], "cwd() (in module pyk.kdist.utils)": [[41, "pyk.kdist.utils.cwd", false]], "cyan (color attribute)": [[9, "pyk.kast.color.Color.CYAN", false]], "dandelion (color attribute)": [[9, "pyk.kast.color.Color.DANDELION", false]], "dark_blue (color attribute)": [[9, "pyk.kast.color.Color.DARK_BLUE", false]], "dark_cyan (color attribute)": [[9, "pyk.kast.color.Color.DARK_CYAN", false]], "dark_goldenrod (color attribute)": [[9, "pyk.kast.color.Color.DARK_GOLDENROD", false]], "dark_gray (color attribute)": [[9, "pyk.kast.color.Color.DARK_GRAY", false]], "dark_green (color attribute)": [[9, "pyk.kast.color.Color.DARK_GREEN", false]], "dark_grey (color attribute)": [[9, "pyk.kast.color.Color.DARK_GREY", false]], "dark_khaki (color attribute)": [[9, "pyk.kast.color.Color.DARK_KHAKI", false]], "dark_magenta (color attribute)": [[9, "pyk.kast.color.Color.DARK_MAGENTA", false]], "dark_olive_green (color attribute)": [[9, "pyk.kast.color.Color.DARK_OLIVE_GREEN", false]], "dark_orange (color attribute)": [[9, "pyk.kast.color.Color.DARK_ORANGE", false]], "dark_orchid (color attribute)": [[9, "pyk.kast.color.Color.DARK_ORCHID", false]], "dark_red (color attribute)": [[9, "pyk.kast.color.Color.DARK_RED", false]], "dark_salmon (color attribute)": [[9, "pyk.kast.color.Color.DARK_SALMON", false]], "dark_sea_green (color attribute)": [[9, "pyk.kast.color.Color.DARK_SEA_GREEN", false]], "dark_slate_blue (color attribute)": [[9, "pyk.kast.color.Color.DARK_SLATE_BLUE", false]], "dark_slate_gray (color attribute)": [[9, "pyk.kast.color.Color.DARK_SLATE_GRAY", false]], "dark_slate_grey (color attribute)": [[9, "pyk.kast.color.Color.DARK_SLATE_GREY", false]], "dark_turquoise (color attribute)": [[9, "pyk.kast.color.Color.DARK_TURQUOISE", false]], "dark_violet (color attribute)": [[9, "pyk.kast.color.Color.DARK_VIOLET", false]], "darkgray (color attribute)": [[9, "pyk.kast.color.Color.DARKGRAY", false]], "data (defaulterror attribute)": [[63, "pyk.kore.rpc.DefaultError.data", false]], "dcoloneq (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.DCOLONEQ", false]], "debug_applied_rewrite_rules (haskelllogentry attribute)": [[68, "pyk.kore_exec_covr.kore_exec_covr.HaskellLogEntry.DEBUG_APPLIED_REWRITE_RULES", false]], "debug_apply_equation (haskelllogentry attribute)": [[68, "pyk.kore_exec_covr.kore_exec_covr.HaskellLogEntry.DEBUG_APPLY_EQUATION", false]], "decl (syntaxdecl attribute)": [[19, "pyk.kast.outer_syntax.SyntaxDecl.decl", false]], "decl (syntaxdefn attribute)": [[19, "pyk.kast.outer_syntax.SyntaxDefn.decl", false]], "deconstruct_short_hash() (in module pyk.utils)": [[98, "pyk.utils.deconstruct_short_hash", false]], "deep_pink (color attribute)": [[9, "pyk.kast.color.Color.DEEP_PINK", false]], "deep_sky_blue (color attribute)": [[9, "pyk.kast.color.Color.DEEP_SKY_BLUE", false]], "default (state attribute)": [[13, "pyk.kast.lexer.State.DEFAULT", false], [17, "pyk.kast.outer_lexer.State.DEFAULT", false]], "default_format (kproduction property)": [[16, "pyk.kast.outer.KProduction.default_format", false]], "defaulterror": [[63, "pyk.kore.rpc.DefaultError", false]], "defaultsemantics (class in pyk.kcfg.semantics)": [[34, "pyk.kcfg.semantics.DefaultSemantics", false]], "definition (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.Definition", false]], "definition (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Definition", false]], "definition (formatter attribute)": [[10, "pyk.kast.formatter.Formatter.definition", false]], "definition (kprint property)": [[76, "pyk.ktool.kprint.KPrint.definition", false]], "definition (prettyprinter attribute)": [[21, "pyk.kast.pretty.PrettyPrinter.definition", false]], "definition() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.definition", false]], "definition() (outerparser method)": [[18, "pyk.kast.outer_parser.OuterParser.definition", false]], "definition_dir (kfuzz attribute)": [[74, "pyk.ktool.kfuzz.KFuzz.definition_dir", false]], "definition_dir (kinterpreter attribute)": [[70, "pyk.krepl.repl.KInterpreter.definition_dir", false]], "definition_dir (kprint attribute)": [[76, "pyk.ktool.kprint.KPrint.definition_dir", false]], "definition_dir (kstate attribute)": [[70, "pyk.krepl.repl.KState.definition_dir", false]], "definition_dir() (kbuild method)": [[26, "pyk.kbuild.kbuild.KBuild.definition_dir", false]], "definition_hash (kprint property)": [[76, "pyk.ktool.kprint.KPrint.definition_hash", false]], "definition_to_llvm() (in module pyk.kllvm.convert)": [[45, "pyk.kllvm.convert.definition_to_llvm", false]], "defunctionalize() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.defunctionalize", false]], "dependencies (kclaim property)": [[16, "pyk.kast.outer.KClaim.dependencies", false]], "dependencies (project attribute)": [[27, "pyk.kbuild.project.Project.dependencies", false]], "dependencies_module_name (aprproof property)": [[93, "pyk.proof.reachability.APRProof.dependencies_module_name", false]], "depends (atts attribute)": [[8, "pyk.kast.att.Atts.DEPENDS", false]], "deps() (target method)": [[40, "pyk.kdist.api.Target.deps", false]], "depth (abortedresult attribute)": [[63, "pyk.kore.rpc.AbortedResult.depth", false]], "depth (branchingresult attribute)": [[63, "pyk.kore.rpc.BranchingResult.depth", false]], "depth (ctermexecute attribute)": [[5, "pyk.cterm.symbolic.CTermExecute.depth", false]], "depth (cutpointresult attribute)": [[63, "pyk.kore.rpc.CutPointResult.depth", false]], "depth (depthboundresult attribute)": [[63, "pyk.kore.rpc.DepthBoundResult.depth", false]], "depth (executeresult attribute)": [[63, "pyk.kore.rpc.ExecuteResult.depth", false]], "depth (kcfg.edge attribute)": [[32, "pyk.kcfg.kcfg.KCFG.Edge.depth", false]], "depth (step attribute)": [[32, "pyk.kcfg.kcfg.Step.depth", false]], "depth (stuckresult attribute)": [[63, "pyk.kore.rpc.StuckResult.depth", false]], "depth (terminalresult attribute)": [[63, "pyk.kore.rpc.TerminalResult.depth", false]], "depth (timeoutresult attribute)": [[63, "pyk.kore.rpc.TimeoutResult.depth", false]], "depth (vacuousresult attribute)": [[63, "pyk.kore.rpc.VacuousResult.depth", false]], "depth_bound (stopreason attribute)": [[63, "pyk.kore.rpc.StopReason.DEPTH_BOUND", false]], "depthboundresult (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.DepthBoundResult", false]], "dequote_bytes() (in module pyk.dequote)": [[6, "pyk.dequote.dequote_bytes", false]], "dequote_string() (in module pyk.dequote)": [[6, "pyk.dequote.dequote_string", false]], "dequoted() (in module pyk.dequote)": [[6, "pyk.dequote.dequoted", false]], "deserialize() (runtime method)": [[52, "pyk.kllvm.runtime.Runtime.deserialize", false]], "dict (aprproof property)": [[93, "pyk.proof.reachability.APRProof.dict", false]], "dict (equalityproof property)": [[91, "pyk.proof.implies.EqualityProof.dict", false]], "dict (impliesproof property)": [[91, "pyk.proof.implies.ImpliesProof.dict", false]], "dict (pattern property)": [[65, "pyk.kore.syntax.Pattern.dict", false]], "dict (proof property)": [[92, "pyk.proof.proof.Proof.dict", false]], "dict (refutationproof property)": [[91, "pyk.proof.implies.RefutationProof.dict", false]], "dict (sort property)": [[65, "pyk.kore.syntax.Sort.dict", false]], "dict (sortapp property)": [[65, "pyk.kore.syntax.SortApp.dict", false]], "dict (sortvar property)": [[65, "pyk.kore.syntax.SortVar.dict", false]], "digest (atts attribute)": [[8, "pyk.kast.att.Atts.DIGEST", false]], "digest (proof property)": [[92, "pyk.proof.proof.Proof.digest", false]], "dim_gray (color attribute)": [[9, "pyk.kast.color.Color.DIM_GRAY", false]], "dim_grey (color attribute)": [[9, "pyk.kast.color.Color.DIM_GREY", false]], "direct_subproof_rules (aprprover attribute)": [[93, "pyk.proof.reachability.APRProver.direct_subproof_rules", false]], "dirty (kversion.git attribute)": [[28, "pyk.kbuild.utils.KVersion.Git.dirty", false]], "discard() (katt method)": [[8, "pyk.kast.att.KAtt.discard", false]], "discard_attr() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.discard_attr", false]], "discard_attr() (kcfg.node method)": [[32, "pyk.kcfg.kcfg.KCFG.Node.discard_attr", false]], "discard_stuck() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.discard_stuck", false]], "discard_vacuous() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.discard_vacuous", false]], "divint() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.divInt", false]], "do_load() (baserepl method)": [[70, "pyk.krepl.repl.BaseRepl.do_load", false]], "do_load() (krepl method)": [[70, "pyk.krepl.repl.KRepl.do_load", false]], "do_show() (baserepl method)": [[70, "pyk.krepl.repl.BaseRepl.do_show", false]], "do_step() (baserepl method)": [[70, "pyk.krepl.repl.BaseRepl.do_step", false]], "dodger_blue (color attribute)": [[9, "pyk.kast.color.Color.DODGER_BLUE", false]], "dot() (aprproofshow method)": [[94, "pyk.proof.show.APRProofShow.dot", false]], "dot() (kcfgshow method)": [[35, "pyk.kcfg.show.KCFGShow.dot", false]], "dotk (tokentype attribute)": [[13, "pyk.kast.lexer.TokenType.DOTK", false]], "dotklist (tokentype attribute)": [[13, "pyk.kast.lexer.TokenType.DOTKLIST", false]], "drop_source() (katt method)": [[8, "pyk.kast.att.KAtt.drop_source", false]], "dump() (aprproofshow method)": [[94, "pyk.proof.show.APRProofShow.dump", false]], "dump() (kcfgshow method)": [[35, "pyk.kcfg.show.KCFGShow.dump", false]], "duplicatemoduleerror": [[63, "pyk.kore.rpc.DuplicateModuleError", false]], "dv (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.DV", false]], "dv() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.dv", false]], "dv() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.dv", false]], "edge() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.edge", false]], "edge_likes() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.edge_likes", false]], "edges (kcfg.mergededge attribute)": [[32, "pyk.kcfg.kcfg.KCFG.MergedEdge.edges", false]], "edges (kcfg.ndbranch property)": [[32, "pyk.kcfg.kcfg.KCFG.NDBranch.edges", false]], "edges() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.edges", false]], "elem_var() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.elem_var", false]], "element (atts attribute)": [[8, "pyk.kast.att.Atts.ELEMENT", false]], "emerald (color attribute)": [[9, "pyk.kast.color.Color.EMERALD", false]], "empty_config() (kdefinition method)": [[16, "pyk.kast.outer.KDefinition.empty_config", false]], "enquote_bytes() (in module pyk.dequote)": [[6, "pyk.dequote.enquote_bytes", false]], "enquote_string() (in module pyk.dequote)": [[6, "pyk.dequote.enquote_string", false]], "enquoted() (in module pyk.dequote)": [[6, "pyk.dequote.enquoted", false]], "ens (apprule attribute)": [[64, "pyk.kore.rule.AppRule.ens", false]], "ens (ceilrule attribute)": [[64, "pyk.kore.rule.CeilRule.ens", false]], "ens (equalsrule attribute)": [[64, "pyk.kore.rule.EqualsRule.ens", false]], "ens (functionrule attribute)": [[64, "pyk.kore.rule.FunctionRule.ens", false]], "ens (rewriterule attribute)": [[64, "pyk.kore.rule.RewriteRule.ens", false]], "ens (rule attribute)": [[64, "pyk.kore.rule.Rule.ens", false]], "ensure_dir_path() (in module pyk.utils)": [[98, "pyk.utils.ensure_dir_path", false]], "ensures (kclaim attribute)": [[16, "pyk.kast.outer.KClaim.ensures", false]], "ensures (krule attribute)": [[16, "pyk.kast.outer.KRule.ensures", false]], "ensures (krulelike attribute)": [[16, "pyk.kast.outer.KRuleLike.ensures", false]], "entries() (katt method)": [[8, "pyk.kast.att.KAtt.entries", false]], "eof (koreparser property)": [[60, "pyk.kore.parser.KoreParser.eof", false]], "eof (tokentype attribute)": [[13, "pyk.kast.lexer.TokenType.EOF", false], [17, "pyk.kast.outer_lexer.TokenType.EOF", false], [57, "pyk.kore.lexer.TokenType.EOF", false]], "eof() (kastparser method)": [[20, "pyk.kast.parser.KAstParser.eof", false]], "eq (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.EQ", false]], "eq_bool() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.eq_bool", false]], "eq_int() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.eq_int", false]], "eqint() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.eqInt", false]], "equality (equalityproof property)": [[91, "pyk.proof.implies.EqualityProof.equality", false]], "equalityproof (class in pyk.proof.implies)": [[91, "pyk.proof.implies.EqualityProof", false]], "equalitysummary (class in pyk.proof.implies)": [[91, "pyk.proof.implies.EqualitySummary", false]], "equals (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Equals", false]], "equals() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.equals", false]], "equalsrule (class in pyk.kore.rule)": [[64, "pyk.kore.rule.EqualsRule", false]], "error (implicationerror attribute)": [[63, "pyk.kore.rpc.ImplicationError.error", false]], "error (invalidmoduleerror attribute)": [[63, "pyk.kore.rpc.InvalidModuleError.error", false]], "error (parseerror attribute)": [[63, "pyk.kore.rpc.ParseError.error", false]], "error (patternerror attribute)": [[63, "pyk.kore.rpc.PatternError.error", false]], "error (smtsolvererror attribute)": [[63, "pyk.kore.rpc.SmtSolverError.error", false]], "error_info (aprproof attribute)": [[93, "pyk.proof.reachability.APRProof.error_info", false]], "eucliddivint() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.euclidDivInt", false]], "euclidmodint() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.euclidModInt", false]], "eval() (and method)": [[15, "pyk.kast.markdown.And.eval", false]], "eval() (atom method)": [[15, "pyk.kast.markdown.Atom.eval", false]], "eval() (not method)": [[15, "pyk.kast.markdown.Not.eval", false]], "eval() (or method)": [[15, "pyk.kast.markdown.Or.eval", false]], "eval() (selector method)": [[15, "pyk.kast.markdown.Selector.eval", false]], "evaluate() (runtime method)": [[52, "pyk.kllvm.runtime.Runtime.evaluate", false]], "evar (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.EVar", false]], "event (llvmeventannotated property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMEventAnnotated.event", false]], "exec_time (aprproof property)": [[93, "pyk.proof.reachability.APRProof.exec_time", false]], "execute() (ctermsymbolic method)": [[5, "pyk.cterm.symbolic.CTermSymbolic.execute", false]], "execute() (koreclient method)": [[63, "pyk.kore.rpc.KoreClient.execute", false]], "execute_depth (aprprover attribute)": [[93, "pyk.proof.reachability.APRProver.execute_depth", false]], "executeresult (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.ExecuteResult", false]], "exists (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Exists", false]], "exists() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.exists", false]], "exit (atts attribute)": [[8, "pyk.kast.att.Atts.EXIT", false]], "exit_with_process_error() (in module pyk.utils)": [[98, "pyk.utils.exit_with_process_error", false]], "expint() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.expInt", false]], "explorable (kcfgexploration property)": [[30, "pyk.kcfg.exploration.KCFGExploration.explorable", false]], "expmodint() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.expModInt", false]], "extend() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.extend", false]], "extend_cterm() (kcfgexplore method)": [[31, "pyk.kcfg.explore.KCFGExplore.extend_cterm", false]], "extension_to_apply (aprproofextendresult attribute)": [[93, "pyk.proof.reachability.APRProofExtendResult.extension_to_apply", false]], "extension_to_cache (aprproofextendandcacheresult attribute)": [[93, "pyk.proof.reachability.APRProofExtendAndCacheResult.extension_to_cache", false]], "extra_module (aprprover attribute)": [[93, "pyk.proof.reachability.APRProver.extra_module", false]], "extract_all() (rule static method)": [[64, "pyk.kore.rule.Rule.extract_all", false]], "extract_cells() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.extract_cells", false]], "extract_lhs() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.extract_lhs", false]], "extract_rhs() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.extract_rhs", false]], "extract_subst() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.extract_subst", false]], "failed (proof property)": [[92, "pyk.proof.proof.Proof.failed", false]], "failed (proofstatus attribute)": [[92, "pyk.proof.proof.ProofStatus.FAILED", false]], "failing (aprproof property)": [[93, "pyk.proof.reachability.APRProof.failing", false]], "failing (aprsummary attribute)": [[93, "pyk.proof.reachability.APRSummary.failing", false]], "failing_cells (ctermimplies attribute)": [[5, "pyk.cterm.symbolic.CTermImplies.failing_cells", false]], "failing_nodes (aprfailureinfo attribute)": [[93, "pyk.proof.reachability.APRFailureInfo.failing_nodes", false]], "failure_info (proof attribute)": [[92, "pyk.proof.proof.Proof.failure_info", false]], "failure_info() (aprprover method)": [[93, "pyk.proof.reachability.APRProver.failure_info", false]], "failure_info() (impliesprover method)": [[91, "pyk.proof.implies.ImpliesProver.failure_info", false]], "failure_info() (prover method)": [[92, "pyk.proof.proof.Prover.failure_info", false]], "failure_reasons (aprfailureinfo attribute)": [[93, "pyk.proof.reachability.APRFailureInfo.failure_reasons", false]], "failureinfo (class in pyk.proof.proof)": [[92, "pyk.proof.proof.FailureInfo", false]], "fallback_on (boosterserverargs attribute)": [[63, "pyk.kore.rpc.BoosterServerArgs.fallback_on", false]], "fallbackreason (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.FallbackReason", false]], "fast_check_subsumption (aprprover attribute)": [[93, "pyk.proof.reachability.APRProver.fast_check_subsumption", false]], "fetch_subproof() (proof method)": [[92, "pyk.proof.proof.Proof.fetch_subproof", false]], "fetch_subproof_data() (proof method)": [[92, "pyk.proof.proof.Proof.fetch_subproof_data", false]], "files_for_path() (in module pyk.kdist.utils)": [[41, "pyk.kdist.utils.files_for_path", false]], "filter_none() (in module pyk.utils)": [[98, "pyk.utils.filter_none", false]], "find_common_items() (in module pyk.utils)": [[98, "pyk.utils.find_common_items", false]], "find_file_upwards() (in module pyk.kbuild.utils)": [[28, "pyk.kbuild.utils.find_file_upwards", false]], "fire_brick (color attribute)": [[9, "pyk.kast.color.Color.FIRE_BRICK", false]], "flatten_label() (in module pyk.kast.inner)": [[11, "pyk.kast.inner.flatten_label", false]], "floor (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Floor", false]], "floor() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.floor", false]], "floral_white (color attribute)": [[9, "pyk.kast.color.Color.FLORAL_WHITE", false]], "for_definition() (kompiledkore static method)": [[56, "pyk.kore.kompiled.KompiledKore.for_definition", false]], "for_definition() (koresorttable static method)": [[56, "pyk.kore.kompiled.KoreSortTable.for_definition", false]], "for_definition() (koresymboltable static method)": [[56, "pyk.kore.kompiled.KoreSymbolTable.for_definition", false]], "forall (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Forall", false]], "forall() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.forall", false]], "forest_green (color attribute)": [[9, "pyk.kast.color.Color.FOREST_GREEN", false]], "format (atts attribute)": [[8, "pyk.kast.att.Atts.FORMAT", false]], "format (class in pyk.kast.att)": [[8, "pyk.kast.att.Format", false]], "format() (formatter method)": [[10, "pyk.kast.formatter.Formatter.format", false]], "formatted_exec_time (aprsummary attribute)": [[93, "pyk.proof.reachability.APRSummary.formatted_exec_time", false]], "formatted_exec_time() (aprproof method)": [[93, "pyk.proof.reachability.APRProof.formatted_exec_time", false]], "formatter (class in pyk.kast.formatter)": [[10, "pyk.kast.formatter.Formatter", false]], "formattype (class in pyk.kast.att)": [[8, "pyk.kast.att.FormatType", false]], "free_occs() (in module pyk.kore.manip)": [[58, "pyk.kore.manip.free_occs", false]], "free_vars (cterm property)": [[4, "pyk.cterm.cterm.CTerm.free_vars", false]], "free_vars (kcfg.node property)": [[32, "pyk.kcfg.kcfg.KCFG.Node.free_vars", false]], "free_vars() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.free_vars", false]], "fresh_generator (atts attribute)": [[8, "pyk.kast.att.Atts.FRESH_GENERATOR", false]], "from_axiom() (apprule static method)": [[64, "pyk.kore.rule.AppRule.from_axiom", false]], "from_axiom() (ceilrule static method)": [[64, "pyk.kore.rule.CeilRule.from_axiom", false]], "from_axiom() (equalsrule static method)": [[64, "pyk.kore.rule.EqualsRule.from_axiom", false]], "from_axiom() (functionrule static method)": [[64, "pyk.kore.rule.FunctionRule.from_axiom", false]], "from_axiom() (rewriterule static method)": [[64, "pyk.kore.rule.RewriteRule.from_axiom", false]], "from_axiom() (rule static method)": [[64, "pyk.kore.rule.Rule.from_axiom", false]], "from_claim() (aprproof static method)": [[93, "pyk.proof.reachability.APRProof.from_claim", false]], "from_claim() (equalityproof static method)": [[91, "pyk.proof.implies.EqualityProof.from_claim", false]], "from_claim() (kcfg static method)": [[32, "pyk.kcfg.kcfg.KCFG.from_claim", false]], "from_dict() (abortedresult class method)": [[63, "pyk.kore.rpc.AbortedResult.from_dict", false]], "from_dict() (anytype method)": [[8, "pyk.kast.att.AnyType.from_dict", false]], "from_dict() (aprproof class method)": [[93, "pyk.proof.reachability.APRProof.from_dict", false]], "from_dict() (atttype method)": [[8, "pyk.kast.att.AttType.from_dict", false]], "from_dict() (branchingresult class method)": [[63, "pyk.kore.rpc.BranchingResult.from_dict", false]], "from_dict() (colorstype method)": [[8, "pyk.kast.att.ColorsType.from_dict", false]], "from_dict() (colortype method)": [[8, "pyk.kast.att.ColorType.from_dict", false]], "from_dict() (csubst static method)": [[4, "pyk.cterm.cterm.CSubst.from_dict", false]], "from_dict() (cterm static method)": [[4, "pyk.cterm.cterm.CTerm.from_dict", false]], "from_dict() (cutpointresult class method)": [[63, "pyk.kore.rpc.CutPointResult.from_dict", false]], "from_dict() (depthboundresult class method)": [[63, "pyk.kore.rpc.DepthBoundResult.from_dict", false]], "from_dict() (equalityproof class method)": [[91, "pyk.proof.implies.EqualityProof.from_dict", false]], "from_dict() (executeresult class method)": [[63, "pyk.kore.rpc.ExecuteResult.from_dict", false]], "from_dict() (formattype method)": [[8, "pyk.kast.att.FormatType.from_dict", false]], "from_dict() (getmodelresult static method)": [[63, "pyk.kore.rpc.GetModelResult.from_dict", false]], "from_dict() (impliesproof class method)": [[91, "pyk.proof.implies.ImpliesProof.from_dict", false]], "from_dict() (impliesresult static method)": [[63, "pyk.kore.rpc.ImpliesResult.from_dict", false]], "from_dict() (inttype method)": [[8, "pyk.kast.att.IntType.from_dict", false]], "from_dict() (katt class method)": [[8, "pyk.kast.att.KAtt.from_dict", false]], "from_dict() (kcfg static method)": [[32, "pyk.kcfg.kcfg.KCFG.from_dict", false]], "from_dict() (kcfg.cover static method)": [[32, "pyk.kcfg.kcfg.KCFG.Cover.from_dict", false]], "from_dict() (kcfg.edge static method)": [[32, "pyk.kcfg.kcfg.KCFG.Edge.from_dict", false]], "from_dict() (kcfg.mergededge static method)": [[32, "pyk.kcfg.kcfg.KCFG.MergedEdge.from_dict", false]], "from_dict() (kcfg.ndbranch static method)": [[32, "pyk.kcfg.kcfg.KCFG.NDBranch.from_dict", false]], "from_dict() (kcfg.node static method)": [[32, "pyk.kcfg.kcfg.KCFG.Node.from_dict", false]], "from_dict() (kcfg.split static method)": [[32, "pyk.kcfg.kcfg.KCFG.Split.from_dict", false]], "from_dict() (kcfg.successor static method)": [[32, "pyk.kcfg.kcfg.KCFG.Successor.from_dict", false]], "from_dict() (kcfgexploration static method)": [[30, "pyk.kcfg.exploration.KCFGExploration.from_dict", false]], "from_dict() (kdefinition static method)": [[16, "pyk.kast.outer.KDefinition.from_dict", false]], "from_dict() (kflatmodule static method)": [[16, "pyk.kast.outer.KFlatModule.from_dict", false]], "from_dict() (kflatmodulelist static method)": [[16, "pyk.kast.outer.KFlatModuleList.from_dict", false]], "from_dict() (kimport static method)": [[16, "pyk.kast.outer.KImport.from_dict", false]], "from_dict() (kinner static method)": [[11, "pyk.kast.inner.KInner.from_dict", false]], "from_dict() (klabel static method)": [[11, "pyk.kast.inner.KLabel.from_dict", false]], "from_dict() (kompiledkore static method)": [[56, "pyk.kore.kompiled.KompiledKore.from_dict", false]], "from_dict() (kproductionitem static method)": [[16, "pyk.kast.outer.KProductionItem.from_dict", false]], "from_dict() (krequire static method)": [[16, "pyk.kast.outer.KRequire.from_dict", false]], "from_dict() (ksentence static method)": [[16, "pyk.kast.outer.KSentence.from_dict", false]], "from_dict() (ksort static method)": [[11, "pyk.kast.inner.KSort.from_dict", false]], "from_dict() (locationtype method)": [[8, "pyk.kast.att.LocationType.from_dict", false]], "from_dict() (logentry class method)": [[63, "pyk.kore.rpc.LogEntry.from_dict", false]], "from_dict() (logrewrite class method)": [[63, "pyk.kore.rpc.LogRewrite.from_dict", false]], "from_dict() (nonetype method)": [[8, "pyk.kast.att.NoneType.from_dict", false]], "from_dict() (optionaltype method)": [[8, "pyk.kast.att.OptionalType.from_dict", false]], "from_dict() (pathtype method)": [[8, "pyk.kast.att.PathType.from_dict", false]], "from_dict() (pattern static method)": [[65, "pyk.kore.syntax.Pattern.from_dict", false]], "from_dict() (proof class method)": [[92, "pyk.proof.proof.Proof.from_dict", false]], "from_dict() (refutationproof class method)": [[91, "pyk.proof.implies.RefutationProof.from_dict", false]], "from_dict() (rewritefailure class method)": [[63, "pyk.kore.rpc.RewriteFailure.from_dict", false]], "from_dict() (rewriteresult class method)": [[63, "pyk.kore.rpc.RewriteResult.from_dict", false]], "from_dict() (rewritesuccess class method)": [[63, "pyk.kore.rpc.RewriteSuccess.from_dict", false]], "from_dict() (sort static method)": [[65, "pyk.kore.syntax.Sort.from_dict", false]], "from_dict() (source static method)": [[27, "pyk.kbuild.project.Source.from_dict", false]], "from_dict() (state static method)": [[63, "pyk.kore.rpc.State.from_dict", false]], "from_dict() (strtype method)": [[8, "pyk.kast.att.StrType.from_dict", false]], "from_dict() (stuckresult class method)": [[63, "pyk.kore.rpc.StuckResult.from_dict", false]], "from_dict() (subst static method)": [[11, "pyk.kast.inner.Subst.from_dict", false]], "from_dict() (terminalresult class method)": [[63, "pyk.kore.rpc.TerminalResult.from_dict", false]], "from_dict() (timeoutresult class method)": [[63, "pyk.kore.rpc.TimeoutResult.from_dict", false]], "from_dict() (vacuousresult class method)": [[63, "pyk.kore.rpc.VacuousResult.from_dict", false]], "from_file() (llvmrewritetraceiterator static method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRewriteTraceIterator.from_file", false]], "from_json() (kcfg static method)": [[32, "pyk.kcfg.kcfg.KCFG.from_json", false]], "from_json() (kinner static method)": [[11, "pyk.kast.inner.KInner.from_json", false]], "from_json() (pattern static method)": [[65, "pyk.kore.syntax.Pattern.from_json", false]], "from_json() (sort static method)": [[65, "pyk.kore.syntax.Sort.from_json", false]], "from_kast() (cterm static method)": [[4, "pyk.cterm.cterm.CTerm.from_kast", false]], "from_module_list() (claimindex static method)": [[72, "pyk.ktool.claim_index.ClaimIndex.from_module_list", false]], "from_pred() (csubst static method)": [[4, "pyk.cterm.cterm.CSubst.from_pred", false]], "from_pred() (subst static method)": [[11, "pyk.kast.inner.Subst.from_pred", false]], "from_proof() (aprfailureinfo static method)": [[93, "pyk.proof.reachability.APRFailureInfo.from_proof", false]], "from_spec_modules() (aprproof static method)": [[93, "pyk.proof.reachability.APRProof.from_spec_modules", false]], "frozendict (class in pyk.utils)": [[98, "pyk.utils.FrozenDict", false]], "fuchsia (color attribute)": [[9, "pyk.kast.color.Color.FUCHSIA", false]], "full_name (targetid property)": [[40, "pyk.kdist.api.TargetId.full_name", false]], "full_printer (nodeprinter attribute)": [[35, "pyk.kcfg.show.NodePrinter.full_printer", false]], "function (atts attribute)": [[8, "pyk.kast.att.Atts.FUNCTION", false]], "function_labels (kdefinition property)": [[16, "pyk.kast.outer.KDefinition.function_labels", false]], "function_name (llvmpatternmatchingfailureevent property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMPatternMatchingFailureEvent.function_name", false]], "functional (atts attribute)": [[8, "pyk.kast.att.Atts.FUNCTIONAL", false]], "functionrule (class in pyk.kore.rule)": [[64, "pyk.kore.rule.FunctionRule", false]], "functions (kdefinition property)": [[16, "pyk.kast.outer.KDefinition.functions", false]], "functions (kflatmodule property)": [[16, "pyk.kast.outer.KFlatModule.functions", false]], "fuzz() (in module pyk.ktool.kfuzz)": [[74, "pyk.ktool.kfuzz.fuzz", false]], "fuzz_with_check() (kfuzz method)": [[74, "pyk.ktool.kfuzz.KFuzz.fuzz_with_check", false]], "fuzz_with_exit_code() (kfuzz method)": [[74, "pyk.ktool.kfuzz.KFuzz.fuzz_with_exit_code", false]], "gainsboro (color attribute)": [[9, "pyk.kast.color.Color.GAINSBORO", false]], "ge_int() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.ge_int", false]], "geint() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.geInt", false]], "gen_file_timestamp() (in module pyk.utils)": [[98, "pyk.utils.gen_file_timestamp", false]], "gen_glr_parser() (in module pyk.ktool.kprint)": [[76, "pyk.ktool.kprint.gen_glr_parser", false]], "general_edges() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.general_edges", false]], "generate_hints() (in module pyk.kllvm.compiler)": [[44, "pyk.kllvm.compiler.generate_hints", false]], "generated_counter() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.generated_counter", false]], "generated_top() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.generated_top", false]], "get() (katt method)": [[8, "pyk.kast.att.KAtt.get", false]], "get_axiom_by_ordinal() (definition method)": [[65, "pyk.kore.syntax.Definition.get_axiom_by_ordinal", false]], "get_claim_index() (kprove method)": [[77, "pyk.ktool.kprove.KProve.get_claim_index", false]], "get_claims() (kprove method)": [[77, "pyk.ktool.kprove.KProve.get_claims", false]], "get_kllvm() (in module pyk.kllvm.load_static)": [[50, "pyk.kllvm.load_static.get_kllvm", false]], "get_model() (ctermsymbolic method)": [[5, "pyk.cterm.symbolic.CTermSymbolic.get_model", false]], "get_model() (koreclient method)": [[63, "pyk.kore.rpc.KoreClient.get_model", false]], "get_node() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.get_node", false]], "get_refutation_id() (aprproof method)": [[93, "pyk.proof.reachability.APRProof.get_refutation_id", false]], "get_requires() (in module pyk.kllvm.utils)": [[53, "pyk.kllvm.utils.get_requires", false]], "get_rule_by_id() (in module pyk.coverage)": [[2, "pyk.coverage.get_rule_by_id", false]], "get_steps() (aprproof method)": [[93, "pyk.proof.reachability.APRProof.get_steps", false]], "get_steps() (impliesproof method)": [[91, "pyk.proof.implies.ImpliesProof.get_steps", false]], "get_steps() (proof method)": [[92, "pyk.proof.proof.Proof.get_steps", false]], "get_target() (project method)": [[27, "pyk.kbuild.project.Project.get_target", false]], "getmodelresult (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.GetModelResult", false]], "ghost_white (color attribute)": [[9, "pyk.kast.color.Color.GHOST_WHITE", false]], "git (kversion attribute)": [[28, "pyk.kbuild.utils.KVersion.git", false]], "gold (color attribute)": [[9, "pyk.kast.color.Color.GOLD", false]], "goldenrod (color attribute)": [[9, "pyk.kast.color.Color.GOLDENROD", false]], "graphchunk (class in pyk.kcfg.tui)": [[37, "pyk.kcfg.tui.GraphChunk", false]], "graphchunk.selected (class in pyk.kcfg.tui)": [[37, "pyk.kcfg.tui.GraphChunk.Selected", false]], "gray (color attribute)": [[9, "pyk.kast.color.Color.GRAY", false]], "greatest_common_subsort() (kdefinition method)": [[16, "pyk.kast.outer.KDefinition.greatest_common_subsort", false]], "green (color attribute)": [[9, "pyk.kast.color.Color.GREEN", false]], "green_yellow (color attribute)": [[9, "pyk.kast.color.Color.GREEN_YELLOW", false]], "grey (color attribute)": [[9, "pyk.kast.color.Color.GREY", false]], "group (atts attribute)": [[8, "pyk.kast.att.Atts.GROUP", false]], "groups (syntaxpriority attribute)": [[19, "pyk.kast.outer_syntax.SyntaxPriority.groups", false]], "gt (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.GT", false]], "gt_int() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.gt_int", false]], "gtint() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.gtInt", false]], "handle_failure() (kfuzzhandler method)": [[74, "pyk.ktool.kfuzz.KFuzzHandler.handle_failure", false]], "handle_test() (kfuzzhandler method)": [[74, "pyk.ktool.kfuzz.KFuzzHandler.handle_test", false]], "handler (kfuzz attribute)": [[74, "pyk.ktool.kfuzz.KFuzz.handler", false]], "handler_name (behaviorview.selected attribute)": [[37, "pyk.kcfg.tui.BehaviorView.Selected.handler_name", false]], "handler_name (graphchunk.selected attribute)": [[37, "pyk.kcfg.tui.GraphChunk.Selected.handler_name", false]], "handler_name (navwidget.selected attribute)": [[37, "pyk.kcfg.tui.NavWidget.Selected.handler_name", false]], "has_domain_values (atts attribute)": [[8, "pyk.kast.att.Atts.HAS_DOMAIN_VALUES", false]], "hash (cterm property)": [[4, "pyk.cterm.cterm.CTerm.hash", false]], "hash (kast property)": [[12, "pyk.kast.kast.KAst.hash", false]], "hash_file() (in module pyk.utils)": [[98, "pyk.utils.hash_file", false]], "hash_str() (in module pyk.utils)": [[98, "pyk.utils.hash_str", false]], "haskell (pykbackend attribute)": [[75, "pyk.ktool.kompile.PykBackend.HASKELL", false]], "haskell_log_entries (boosterserverargs attribute)": [[63, "pyk.kore.rpc.BoosterServerArgs.haskell_log_entries", false]], "haskell_log_entries (koreserverargs attribute)": [[63, "pyk.kore.rpc.KoreServerArgs.haskell_log_entries", false]], "haskell_log_format (boosterserverargs attribute)": [[63, "pyk.kore.rpc.BoosterServerArgs.haskell_log_format", false]], "haskell_log_format (koreserverargs attribute)": [[63, "pyk.kore.rpc.KoreServerArgs.haskell_log_format", false]], "haskell_threads (boosterserverargs attribute)": [[63, "pyk.kore.rpc.BoosterServerArgs.haskell_threads", false]], "haskell_threads (koreserverargs attribute)": [[63, "pyk.kore.rpc.KoreServerArgs.haskell_threads", false]], "haskelllogentry (class in pyk.kore_exec_covr.kore_exec_covr)": [[68, "pyk.kore_exec_covr.kore_exec_covr.HaskellLogEntry", false]], "heuristic (branch attribute)": [[32, "pyk.kcfg.kcfg.Branch.heuristic", false]], "hide_cells() (kcfgshow static method)": [[35, "pyk.kcfg.show.KCFGShow.hide_cells", false]], "honeydew (color attribute)": [[9, "pyk.kast.color.Color.HONEYDEW", false]], "hook (atts attribute)": [[8, "pyk.kast.att.Atts.HOOK", false]], "hooked (sortdecl attribute)": [[65, "pyk.kore.syntax.SortDecl.hooked", false]], "hooked (symboldecl attribute)": [[65, "pyk.kore.syntax.SymbolDecl.hooked", false]], "hooked_sort_decl() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.hooked_sort_decl", false]], "hooked_symbol_decl() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.hooked_symbol_decl", false]], "host (koreserver property)": [[63, "pyk.kore.rpc.KoreServer.host", false]], "host (koreserverinfo attribute)": [[63, "pyk.kore.rpc.KoreServerInfo.host", false]], "hot_pink (color attribute)": [[9, "pyk.kast.color.Color.HOT_PINK", false]], "http (transporttype attribute)": [[63, "pyk.kore.rpc.TransportType.HTTP", false]], "httptransport (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.HttpTransport", false]], "id (aprsummary attribute)": [[93, "pyk.proof.reachability.APRSummary.id", false]], "id (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Id", false]], "id (equalitysummary attribute)": [[91, "pyk.proof.implies.EqualitySummary.id", false]], "id (kcfg.node attribute)": [[32, "pyk.kcfg.kcfg.KCFG.Node.id", false]], "id (kcfgexplore attribute)": [[31, "pyk.kcfg.explore.KCFGExplore.id", false]], "id (proof attribute)": [[92, "pyk.proof.proof.Proof.id", false]], "id (proofsummary attribute)": [[92, "pyk.proof.proof.ProofSummary.id", false]], "id (refutationsummary attribute)": [[91, "pyk.proof.implies.RefutationSummary.id", false]], "id (tokentype attribute)": [[13, "pyk.kast.lexer.TokenType.ID", false], [57, "pyk.kore.lexer.TokenType.ID", false]], "id() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.id", false]], "id_lower (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.ID_LOWER", false]], "id_upper (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.ID_UPPER", false]], "idem (atts attribute)": [[8, "pyk.kast.att.Atts.IDEM", false]], "if_ktype() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.if_ktype", false]], "iff (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Iff", false]], "iff() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.iff", false]], "image (poset attribute)": [[98, "pyk.utils.POSet.image", false]], "implication (impliesresult attribute)": [[63, "pyk.kore.rpc.ImpliesResult.implication", false]], "implication_failure_reason() (kcfgexplore method)": [[31, "pyk.kcfg.explore.KCFGExplore.implication_failure_reason", false]], "implicationerror": [[63, "pyk.kore.rpc.ImplicationError", false]], "implies (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Implies", false]], "implies() (ctermsymbolic method)": [[5, "pyk.cterm.symbolic.CTermSymbolic.implies", false]], "implies() (koreclient method)": [[63, "pyk.kore.rpc.KoreClient.implies", false]], "implies() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.implies", false]], "implies_bool() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.implies_bool", false]], "impliesbool() (in module pyk.prelude.kbool)": [[85, "pyk.prelude.kbool.impliesBool", false]], "impliesproof (class in pyk.proof.implies)": [[91, "pyk.proof.implies.ImpliesProof", false]], "impliesproofresult (class in pyk.proof.implies)": [[91, "pyk.proof.implies.ImpliesProofResult", false]], "impliesproofstep (class in pyk.proof.implies)": [[91, "pyk.proof.implies.ImpliesProofStep", false]], "impliesprover (class in pyk.proof.implies)": [[91, "pyk.proof.implies.ImpliesProver", false]], "impliesresult (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.ImpliesResult", false]], "import (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.Import", false]], "import (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Import", false]], "import_from_file() (in module pyk.kllvm.importer)": [[48, "pyk.kllvm.importer.import_from_file", false]], "import_kllvm() (in module pyk.kllvm.importer)": [[48, "pyk.kllvm.importer.import_kllvm", false]], "import_runtime() (in module pyk.kllvm.importer)": [[48, "pyk.kllvm.importer.import_runtime", false]], "imports (kflatmodule attribute)": [[16, "pyk.kast.outer.KFlatModule.imports", false]], "imports (module attribute)": [[19, "pyk.kast.outer_syntax.Module.imports", false]], "importt() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.importt", false]], "importt() (outerparser method)": [[18, "pyk.kast.outer_parser.OuterParser.importt", false]], "impure (atts attribute)": [[8, "pyk.kast.att.Atts.IMPURE", false]], "in (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.In", false]], "indent() (in module pyk.kast.pretty)": [[21, "pyk.kast.pretty.indent", false]], "index (atts attribute)": [[8, "pyk.kast.att.Atts.INDEX", false]], "indexed_rewrite() (in module pyk.kast.rewrite)": [[22, "pyk.kast.rewrite.indexed_rewrite", false]], "indian_red (color attribute)": [[9, "pyk.kast.color.Color.INDIAN_RED", false]], "indigo (color attribute)": [[9, "pyk.kast.color.Color.INDIGO", false]], "infer_sort() (koresymboltable method)": [[56, "pyk.kore.kompiled.KoreSymbolTable.infer_sort", false]], "info (branch attribute)": [[32, "pyk.kcfg.kcfg.Branch.info", false]], "info (class in pyk.kcfg.tui)": [[37, "pyk.kcfg.tui.Info", false]], "info (codeblock attribute)": [[15, "pyk.kast.markdown.CodeBlock.info", false]], "info (step attribute)": [[32, "pyk.kcfg.kcfg.Step.info", false]], "init (aprproof attribute)": [[93, "pyk.proof.reachability.APRProof.init", false]], "init_config() (kdefinition method)": [[16, "pyk.kast.outer.KDefinition.init_config", false]], "init_generated_top_cell() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.init_generated_top_cell", false]], "init_proof() (aprprover method)": [[93, "pyk.proof.reachability.APRProver.init_proof", false]], "init_proof() (impliesprover method)": [[91, "pyk.proof.implies.ImpliesProver.init_proof", false]], "init_proof() (prover method)": [[92, "pyk.proof.proof.Prover.init_proof", false]], "init_state() (interpreter method)": [[70, "pyk.krepl.repl.Interpreter.init_state", false]], "init_state() (kinterpreter method)": [[70, "pyk.krepl.repl.KInterpreter.init_state", false]], "initial_config (llvmrewritetrace property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRewriteTrace.initial_config", false]], "initializer (atts attribute)": [[8, "pyk.kast.att.Atts.INITIALIZER", false]], "inj() (in module pyk.kore.match)": [[59, "pyk.kore.match.inj", false]], "inj() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.inj", false]], "inj() (in module pyk.prelude.k)": [[84, "pyk.prelude.k.inj", false]], "injective (atts attribute)": [[8, "pyk.kast.att.Atts.INJECTIVE", false]], "inline_cell_maps() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.inline_cell_maps", false]], "inn() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.inn", false]], "instantiate_cell_vars() (kdefinition method)": [[16, "pyk.kast.outer.KDefinition.instantiate_cell_vars", false]], "int_dv() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.int_dv", false]], "interim_simplification (boosterserverargs attribute)": [[63, "pyk.kore.rpc.BoosterServerArgs.interim_simplification", false]], "interpreter (baserepl attribute)": [[70, "pyk.krepl.repl.BaseRepl.interpreter", false]], "interpreter (class in pyk.krepl.repl)": [[70, "pyk.krepl.repl.Interpreter", false]], "intersperse() (in module pyk.utils)": [[98, "pyk.utils.intersperse", false]], "intro (krepl attribute)": [[70, "pyk.krepl.repl.KRepl.intro", false]], "inttoken() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.intToken", false]], "inttype (class in pyk.kast.att)": [[8, "pyk.kast.att.IntType", false]], "invalidmoduleerror": [[63, "pyk.kore.rpc.InvalidModuleError", false]], "is_anon_var() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.is_anon_var", false]], "is_bottom (cterm property)": [[4, "pyk.cterm.cterm.CTerm.is_bottom", false]], "is_bottom() (in module pyk.prelude.ml)": [[87, "pyk.prelude.ml.is_bottom", false]], "is_bounded() (aprproof method)": [[93, "pyk.proof.reachability.APRProof.is_bounded", false]], "is_cell (kapply property)": [[11, "pyk.kast.inner.KApply.is_cell", false]], "is_circularity (kclaim property)": [[16, "pyk.kast.outer.KClaim.is_circularity", false]], "is_covered() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.is_covered", false]], "is_explorable() (kcfgexploration method)": [[30, "pyk.kcfg.exploration.KCFGExploration.is_explorable", false]], "is_failing() (aprproof method)": [[93, "pyk.proof.reachability.APRProof.is_failing", false]], "is_hash() (in module pyk.utils)": [[98, "pyk.utils.is_hash", false]], "is_hexstring() (in module pyk.utils)": [[98, "pyk.utils.is_hexstring", false]], "is_identity (subst property)": [[11, "pyk.kast.inner.Subst.is_identity", false]], "is_init() (aprproof method)": [[93, "pyk.proof.reachability.APRProof.is_init", false]], "is_initial_config (llvmeventtype property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMEventType.is_initial_config", false]], "is_kore_pattern() (llvmargument method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMArgument.is_kore_pattern", false]], "is_leaf() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.is_leaf", false]], "is_loop() (defaultsemantics method)": [[34, "pyk.kcfg.semantics.DefaultSemantics.is_loop", false]], "is_loop() (kcfgsemantics method)": [[34, "pyk.kcfg.semantics.KCFGSemantics.is_loop", false]], "is_mergeable() (defaultsemantics method)": [[34, "pyk.kcfg.semantics.DefaultSemantics.is_mergeable", false]], "is_mergeable() (kcfgsemantics method)": [[34, "pyk.kcfg.semantics.KCFGSemantics.is_mergeable", false]], "is_ndbranch() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.is_ndbranch", false]], "is_pending() (aprproof method)": [[93, "pyk.proof.reachability.APRProof.is_pending", false]], "is_pre_trace (llvmeventtype property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMEventType.is_pre_trace", false]], "is_prefix (kproduction property)": [[16, "pyk.kast.outer.KProduction.is_prefix", false]], "is_record (kproduction property)": [[16, "pyk.kast.outer.KProduction.is_record", false]], "is_refuted() (aprproof method)": [[93, "pyk.proof.reachability.APRProof.is_refuted", false]], "is_root() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.is_root", false]], "is_rule() (rule static method)": [[64, "pyk.kore.rule.Rule.is_rule", false]], "is_split() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.is_split", false]], "is_spurious_constraint() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.is_spurious_constraint", false]], "is_step_event() (llvmargument method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMArgument.is_step_event", false]], "is_stuck() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.is_stuck", false]], "is_subsort() (koresorttable method)": [[56, "pyk.kore.kompiled.KoreSortTable.is_subsort", false]], "is_target() (aprproof method)": [[93, "pyk.proof.reachability.APRProof.is_target", false]], "is_term_like() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.is_term_like", false]], "is_terminal() (defaultsemantics method)": [[34, "pyk.kcfg.semantics.DefaultSemantics.is_terminal", false]], "is_terminal() (kcfgexploration method)": [[30, "pyk.kcfg.exploration.KCFGExploration.is_terminal", false]], "is_terminal() (kcfgsemantics method)": [[34, "pyk.kcfg.semantics.KCFGSemantics.is_terminal", false]], "is_top() (in module pyk.prelude.ml)": [[87, "pyk.prelude.ml.is_top", false]], "is_trace (llvmeventtype property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMEventType.is_trace", false]], "is_trusted (kclaim property)": [[16, "pyk.kast.outer.KClaim.is_trusted", false]], "is_vacuous() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.is_vacuous", false]], "items (att attribute)": [[19, "pyk.kast.outer_syntax.Att.items", false]], "items (kproduction attribute)": [[16, "pyk.kast.outer.KProduction.items", false]], "items (ksequence attribute)": [[11, "pyk.kast.inner.KSequence.items", false]], "items (production attribute)": [[19, "pyk.kast.outer_syntax.Production.items", false]], "ivory (color attribute)": [[9, "pyk.kast.color.Color.IVORY", false]], "json (kastinput attribute)": [[76, "pyk.ktool.kprint.KAstInput.JSON", false]], "json (kastoutput attribute)": [[76, "pyk.ktool.kprint.KAstOutput.JSON", false]], "json (kproveoutput attribute)": [[77, "pyk.ktool.kprove.KProveOutput.JSON", false]], "json (krunoutput attribute)": [[78, "pyk.ktool.krun.KRunOutput.JSON", false]], "json (pattern property)": [[65, "pyk.kore.syntax.Pattern.json", false]], "json (printoutput attribute)": [[66, "pyk.kore.tools.PrintOutput.JSON", false]], "json (proof property)": [[92, "pyk.proof.proof.Proof.json", false]], "json (sort property)": [[65, "pyk.kore.syntax.Sort.json", false]], "json2string() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.json2string", false]], "json_entry() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.json_entry", false]], "json_key() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.json_key", false]], "json_list() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.json_list", false]], "json_object() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.json_object", false]], "json_to_kore() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.json_to_kore", false]], "jsonrpcclient (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.JsonRpcClient", false]], "jsonrpcclientfacade (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.JsonRpcClientFacade", false]], "jsonrpcerror": [[63, "pyk.kore.rpc.JsonRpcError", false]], "jsons() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.jsons", false]], "jungle_green (color attribute)": [[9, "pyk.kast.color.Color.JUNGLE_GREEN", false]], "k() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.k", false]], "k() (kastparser method)": [[20, "pyk.kast.parser.KAstParser.k", false]], "k_config_var() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.k_config_var", false]], "k_version (kbuild property)": [[26, "pyk.kbuild.kbuild.KBuild.k_version", false]], "k_version() (in module pyk.kbuild.utils)": [[28, "pyk.kbuild.utils.k_version", false]], "kapply (class in pyk.kast.inner)": [[11, "pyk.kast.inner.KApply", false]], "kas (class in pyk.kast.inner)": [[11, "pyk.kast.inner.KAs", false]], "kassoc (class in pyk.kast.outer)": [[16, "pyk.kast.outer.KAssoc", false]], "kast (class in pyk.kast.kast)": [[12, "pyk.kast.kast.KAst", false]], "kast (cterm property)": [[4, "pyk.cterm.cterm.CTerm.kast", false]], "kast (kastinput attribute)": [[76, "pyk.ktool.kprint.KAstInput.KAST", false]], "kast (kastoutput attribute)": [[76, "pyk.ktool.kprint.KAstOutput.KAST", false]], "kast (kproveoutput attribute)": [[77, "pyk.ktool.kprove.KProveOutput.KAST", false]], "kast (krunoutput attribute)": [[78, "pyk.ktool.krun.KRunOutput.KAST", false]], "kast (printoutput attribute)": [[66, "pyk.kore.tools.PrintOutput.KAST", false]], "kast_simplify() (ctermsymbolic method)": [[5, "pyk.cterm.symbolic.CTermSymbolic.kast_simplify", false]], "kast_term() (in module pyk.kast.kast)": [[12, "pyk.kast.kast.kast_term", false]], "kast_to_kore() (ctermsymbolic method)": [[5, "pyk.cterm.symbolic.CTermSymbolic.kast_to_kore", false]], "kast_to_kore() (kprint method)": [[76, "pyk.ktool.kprint.KPrint.kast_to_kore", false]], "kastinput (class in pyk.ktool.kprint)": [[76, "pyk.ktool.kprint.KAstInput", false]], "kastoutput (class in pyk.ktool.kprint)": [[76, "pyk.ktool.kprint.KAstOutput", false]], "kastparser (class in pyk.kast.parser)": [[20, "pyk.kast.parser.KAstParser", false]], "katt (class in pyk.kast.att)": [[8, "pyk.kast.att.KAtt", false]], "kbubble (class in pyk.kast.outer)": [[16, "pyk.kast.outer.KBubble", false]], "kbuild (class in pyk.kbuild.kbuild)": [[26, "pyk.kbuild.kbuild.KBuild", false]], "kbuildenv (class in pyk.kbuild.kbuild)": [[26, "pyk.kbuild.kbuild.KBuildEnv", false]], "kcfg (class in pyk.kcfg.kcfg)": [[32, "pyk.kcfg.kcfg.KCFG", false]], "kcfg (kcfgexploration attribute)": [[30, "pyk.kcfg.exploration.KCFGExploration.kcfg", false]], "kcfg (kcfgminimizer attribute)": [[33, "pyk.kcfg.minimize.KCFGMinimizer.kcfg", false]], "kcfg.cover (class in pyk.kcfg.kcfg)": [[32, "pyk.kcfg.kcfg.KCFG.Cover", false]], "kcfg.edge (class in pyk.kcfg.kcfg)": [[32, "pyk.kcfg.kcfg.KCFG.Edge", false]], "kcfg.edgelike (class in pyk.kcfg.kcfg)": [[32, "pyk.kcfg.kcfg.KCFG.EdgeLike", false]], "kcfg.mergededge (class in pyk.kcfg.kcfg)": [[32, "pyk.kcfg.kcfg.KCFG.MergedEdge", false]], "kcfg.multiedge (class in pyk.kcfg.kcfg)": [[32, "pyk.kcfg.kcfg.KCFG.MultiEdge", false]], "kcfg.ndbranch (class in pyk.kcfg.kcfg)": [[32, "pyk.kcfg.kcfg.KCFG.NDBranch", false]], "kcfg.node (class in pyk.kcfg.kcfg)": [[32, "pyk.kcfg.kcfg.KCFG.Node", false]], "kcfg.split (class in pyk.kcfg.kcfg)": [[32, "pyk.kcfg.kcfg.KCFG.Split", false]], "kcfg.successor (class in pyk.kcfg.kcfg)": [[32, "pyk.kcfg.kcfg.KCFG.Successor", false]], "kcfg_explore (aprprover attribute)": [[93, "pyk.proof.reachability.APRProver.kcfg_explore", false]], "kcfg_explore (impliesprover attribute)": [[91, "pyk.proof.implies.ImpliesProver.kcfg_explore", false]], "kcfg_json_path (kcfgstore property)": [[32, "pyk.kcfg.kcfg.KCFGStore.kcfg_json_path", false]], "kcfg_node_dir (kcfgstore property)": [[32, "pyk.kcfg.kcfg.KCFGStore.kcfg_node_dir", false]], "kcfg_node_path() (kcfgstore method)": [[32, "pyk.kcfg.kcfg.KCFGStore.kcfg_node_path", false]], "kcfg_semantics (kcfgexplore attribute)": [[31, "pyk.kcfg.explore.KCFGExplore.kcfg_semantics", false]], "kcfg_show (aprproofshow attribute)": [[94, "pyk.proof.show.APRProofShow.kcfg_show", false]], "kcfgexploration (class in pyk.kcfg.exploration)": [[30, "pyk.kcfg.exploration.KCFGExploration", false]], "kcfgexplorationnodeattr (class in pyk.kcfg.exploration)": [[30, "pyk.kcfg.exploration.KCFGExplorationNodeAttr", false]], "kcfgexplore (class in pyk.kcfg.explore)": [[31, "pyk.kcfg.explore.KCFGExplore", false]], "kcfgextendresult (class in pyk.kcfg.kcfg)": [[32, "pyk.kcfg.kcfg.KCFGExtendResult", false]], "kcfgminimizer (class in pyk.kcfg.minimize)": [[33, "pyk.kcfg.minimize.KCFGMinimizer", false]], "kcfgnodeattr (class in pyk.kcfg.kcfg)": [[32, "pyk.kcfg.kcfg.KCFGNodeAttr", false]], "kcfgsemantics (class in pyk.kcfg.semantics)": [[34, "pyk.kcfg.semantics.KCFGSemantics", false]], "kcfgshow (class in pyk.kcfg.show)": [[35, "pyk.kcfg.show.KCFGShow", false]], "kcfgstore (class in pyk.kcfg.kcfg)": [[32, "pyk.kcfg.kcfg.KCFGStore", false]], "kcfgviewer (class in pyk.kcfg.tui)": [[37, "pyk.kcfg.tui.KCFGViewer", false]], "kclaim (class in pyk.kast.outer)": [[16, "pyk.kast.outer.KClaim", false]], "kcontext (class in pyk.kast.outer)": [[16, "pyk.kast.outer.KContext", false]], "kdef (kcfgminimizer attribute)": [[33, "pyk.kcfg.minimize.KCFGMinimizer.kdef", false]], "kdefinition (class in pyk.kast.outer)": [[16, "pyk.kast.outer.KDefinition", false]], "kdist_dir (kbuild attribute)": [[26, "pyk.kbuild.kbuild.KBuild.kdist_dir", false]], "kdistribution (class in pyk.ktool.utils)": [[80, "pyk.ktool.utils.KDistribution", false]], "key (attentry attribute)": [[8, "pyk.kast.att.AttEntry.key", false]], "keys() (atts class method)": [[8, "pyk.kast.att.Atts.keys", false]], "kflatmodule (class in pyk.kast.outer)": [[16, "pyk.kast.outer.KFlatModule", false]], "kflatmodulelist (class in pyk.kast.outer)": [[16, "pyk.kast.outer.KFlatModuleList", false]], "kfuzz (class in pyk.ktool.kfuzz)": [[74, "pyk.ktool.kfuzz.KFuzz", false]], "kfuzzhandler (class in pyk.ktool.kfuzz)": [[74, "pyk.ktool.kfuzz.KFuzzHandler", false]], "khaki (color attribute)": [[9, "pyk.kast.color.Color.KHAKI", false]], "kimport (class in pyk.kast.outer)": [[16, "pyk.kast.outer.KImport", false]], "kinner (class in pyk.kast.inner)": [[11, "pyk.kast.inner.KInner", false]], "kintegers() (in module pyk.ktool.kfuzz)": [[74, "pyk.ktool.kfuzz.kintegers", false]], "kinterpreter (class in pyk.krepl.repl)": [[70, "pyk.krepl.repl.KInterpreter", false]], "kitem() (kastparser method)": [[20, "pyk.kast.parser.KAstParser.kitem", false]], "klabel (class in pyk.kast.inner)": [[11, "pyk.kast.inner.KLabel", false]], "klabel (kproduction attribute)": [[16, "pyk.kast.outer.KProduction.klabel", false]], "klabel (state attribute)": [[17, "pyk.kast.outer_lexer.State.KLABEL", false]], "klabel (tokentype attribute)": [[13, "pyk.kast.lexer.TokenType.KLABEL", false], [17, "pyk.kast.outer_lexer.TokenType.KLABEL", false]], "klabel() (kastparser method)": [[20, "pyk.kast.parser.KAstParser.klabel", false]], "klabels (syntaxassoc attribute)": [[19, "pyk.kast.outer_syntax.SyntaxAssoc.klabels", false]], "klist() (kastparser method)": [[20, "pyk.kast.parser.KAstParser.klist", false]], "knonterminal (class in pyk.kast.outer)": [[16, "pyk.kast.outer.KNonTerminal", false]], "kompile() (in module pyk.ktool.kompile)": [[75, "pyk.ktool.kompile.kompile", false]], "kompile() (in module pyk.testing.plugin)": [[97, "pyk.testing.plugin.kompile", false]], "kompile() (kbuild method)": [[26, "pyk.kbuild.kbuild.KBuild.kompile", false]], "kompile() (kbuildenv method)": [[26, "pyk.kbuild.kbuild.KBuildEnv.kompile", false]], "kompiled_dir (boosterserverargs attribute)": [[63, "pyk.kore.rpc.BoosterServerArgs.kompiled_dir", false]], "kompiled_dir (koreserverargs attribute)": [[63, "pyk.kore.rpc.KoreServerArgs.kompiled_dir", false]], "kompiledkore (class in pyk.kore.kompiled)": [[56, "pyk.kore.kompiled.KompiledKore", false]], "kore (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Kore", false]], "kore (kastinput attribute)": [[76, "pyk.ktool.kprint.KAstInput.KORE", false]], "kore (kastoutput attribute)": [[76, "pyk.ktool.kprint.KAstOutput.KORE", false]], "kore (kproveoutput attribute)": [[77, "pyk.ktool.kprove.KProveOutput.KORE", false]], "kore (krunoutput attribute)": [[78, "pyk.ktool.krun.KRunOutput.KORE", false]], "kore (printoutput attribute)": [[66, "pyk.kore.tools.PrintOutput.KORE", false]], "kore (pykbackend attribute)": [[75, "pyk.ktool.kompile.PykBackend.KORE", false]], "kore (state property)": [[63, "pyk.kore.rpc.State.kore", false]], "kore_bool() (in module pyk.kore.match)": [[59, "pyk.kore.match.kore_bool", false]], "kore_bytes() (in module pyk.kore.match)": [[59, "pyk.kore.match.kore_bytes", false]], "kore_id() (in module pyk.kore.match)": [[59, "pyk.kore.match.kore_id", false]], "kore_int() (in module pyk.kore.match)": [[59, "pyk.kore.match.kore_int", false]], "kore_lexer() (in module pyk.kore.lexer)": [[57, "pyk.kore.lexer.kore_lexer", false]], "kore_list_of() (in module pyk.kore.match)": [[59, "pyk.kore.match.kore_list_of", false]], "kore_map_of() (in module pyk.kore.match)": [[59, "pyk.kore.match.kore_map_of", false]], "kore_pattern (llvmargument property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMArgument.kore_pattern", false]], "kore_print() (in module pyk.kore.tools)": [[66, "pyk.kore.tools.kore_print", false]], "kore_rangemap_of() (in module pyk.kore.match)": [[59, "pyk.kore.match.kore_rangemap_of", false]], "kore_rpc (logorigin attribute)": [[63, "pyk.kore.rpc.LogOrigin.KORE_RPC", false]], "kore_server() (in module pyk.kore.rpc)": [[63, "pyk.kore.rpc.kore_server", false]], "kore_set_of() (in module pyk.kore.match)": [[59, "pyk.kore.match.kore_set_of", false]], "kore_str() (in module pyk.kore.match)": [[59, "pyk.kore.match.kore_str", false]], "kore_symbol() (assoc class method)": [[65, "pyk.kore.syntax.Assoc.kore_symbol", false]], "kore_symbol() (leftassoc class method)": [[65, "pyk.kore.syntax.LeftAssoc.kore_symbol", false]], "kore_symbol() (rightassoc class method)": [[65, "pyk.kore.syntax.RightAssoc.kore_symbol", false]], "kore_term() (in module pyk.kore.syntax)": [[65, "pyk.kore.syntax.kore_term", false]], "kore_to_json() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.kore_to_json", false]], "kore_to_kast() (ctermsymbolic method)": [[5, "pyk.cterm.symbolic.CTermSymbolic.kore_to_kast", false]], "kore_to_kast() (kprint method)": [[76, "pyk.ktool.kprint.KPrint.kore_to_kast", false]], "kore_to_pretty() (kprint method)": [[76, "pyk.ktool.kprint.KPrint.kore_to_pretty", false]], "koreclient (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.KoreClient", false]], "koreclienterror": [[63, "pyk.kore.rpc.KoreClientError", false]], "koreexeclogformat (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.KoreExecLogFormat", false]], "koreheader (class in pyk.kllvm.hints.prooftrace)": [[47, "pyk.kllvm.hints.prooftrace.KoreHeader", false]], "koreparser (class in pyk.kore.parser)": [[60, "pyk.kore.parser.KoreParser", false]], "koreserver (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.KoreServer", false]], "koreserverargs (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.KoreServerArgs", false]], "koreserverinfo (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.KoreServerInfo", false]], "koreserverpool (class in pyk.kore.pool)": [[61, "pyk.kore.pool.KoreServerPool", false]], "koresorttable (class in pyk.kore.kompiled)": [[56, "pyk.kore.kompiled.KoreSortTable", false]], "koresymboltable (class in pyk.kore.kompiled)": [[56, "pyk.kore.kompiled.KoreSymbolTable", false]], "koretoken (class in pyk.kore.lexer)": [[57, "pyk.kore.lexer.KoreToken", false]], "kouter (class in pyk.kast.outer)": [[16, "pyk.kast.outer.KOuter", false]], "kprint (class in pyk.ktool.kprint)": [[76, "pyk.ktool.kprint.KPrint", false]], "kprint (kcfgshow attribute)": [[35, "pyk.kcfg.show.KCFGShow.kprint", false]], "kprint (nodeprinter attribute)": [[35, "pyk.kcfg.show.NodePrinter.kprint", false]], "kproduction (class in pyk.kast.outer)": [[16, "pyk.kast.outer.KProduction", false]], "kproductionitem (class in pyk.kast.outer)": [[16, "pyk.kast.outer.KProductionItem", false]], "kprove (class in pyk.ktool.kprove)": [[77, "pyk.ktool.kprove.KProve", false]], "kproveoutput (class in pyk.ktool.kprove)": [[77, "pyk.ktool.kprove.KProveOutput", false]], "kregexterminal (class in pyk.kast.outer)": [[16, "pyk.kast.outer.KRegexTerminal", false]], "krepl (class in pyk.krepl.repl)": [[70, "pyk.krepl.repl.KRepl", false]], "krequire (class in pyk.kast.outer)": [[16, "pyk.kast.outer.KRequire", false]], "krewrite (class in pyk.kast.inner)": [[11, "pyk.kast.inner.KRewrite", false]], "krule (class in pyk.kast.outer)": [[16, "pyk.kast.outer.KRule", false]], "krulelike (class in pyk.kast.outer)": [[16, "pyk.kast.outer.KRuleLike", false]], "krun (class in pyk.ktool.krun)": [[78, "pyk.ktool.krun.KRun", false]], "krun() (krun method)": [[78, "pyk.ktool.krun.KRun.krun", false]], "krunoutput (class in pyk.ktool.krun)": [[78, "pyk.ktool.krun.KRunOutput", false]], "ksentence (class in pyk.kast.outer)": [[16, "pyk.kast.outer.KSentence", false]], "kseq (tokentype attribute)": [[13, "pyk.kast.lexer.TokenType.KSEQ", false]], "kseq() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.kseq", false]], "ksequence (class in pyk.kast.inner)": [[11, "pyk.kast.inner.KSequence", false]], "ksort (class in pyk.kast.inner)": [[11, "pyk.kast.inner.KSort", false]], "ksortsynonym (class in pyk.kast.outer)": [[16, "pyk.kast.outer.KSortSynonym", false]], "kstate (class in pyk.krepl.repl)": [[70, "pyk.krepl.repl.KState", false]], "ksyntaxassociativity (class in pyk.kast.outer)": [[16, "pyk.kast.outer.KSyntaxAssociativity", false]], "ksyntaxlexical (class in pyk.kast.outer)": [[16, "pyk.kast.outer.KSyntaxLexical", false]], "ksyntaxpriority (class in pyk.kast.outer)": [[16, "pyk.kast.outer.KSyntaxPriority", false]], "ksyntaxsort (class in pyk.kast.outer)": [[16, "pyk.kast.outer.KSyntaxSort", false]], "kterminal (class in pyk.kast.outer)": [[16, "pyk.kast.outer.KTerminal", false]], "ktoken (class in pyk.kast.inner)": [[11, "pyk.kast.inner.KToken", false]], "kvariable (class in pyk.kast.inner)": [[11, "pyk.kast.inner.KVariable", false]], "kversion (class in pyk.kbuild.utils)": [[28, "pyk.kbuild.utils.KVersion", false]], "kversion.git (class in pyk.kbuild.utils)": [[28, "pyk.kbuild.utils.KVersion.Git", false]], "kw_alias (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.KW_ALIAS", false], [57, "pyk.kore.lexer.TokenType.KW_ALIAS", false]], "kw_axiom (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.KW_AXIOM", false]], "kw_claim (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.KW_CLAIM", false], [57, "pyk.kore.lexer.TokenType.KW_CLAIM", false]], "kw_config (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.KW_CONFIG", false]], "kw_context (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.KW_CONTEXT", false]], "kw_endmodule (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.KW_ENDMODULE", false], [57, "pyk.kore.lexer.TokenType.KW_ENDMODULE", false]], "kw_hooked_sort (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.KW_HOOKED_SORT", false]], "kw_hooked_symbol (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.KW_HOOKED_SYMBOL", false]], "kw_import (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.KW_IMPORT", false]], "kw_imports (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.KW_IMPORTS", false]], "kw_left (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.KW_LEFT", false]], "kw_lexical (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.KW_LEXICAL", false]], "kw_module (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.KW_MODULE", false], [57, "pyk.kore.lexer.TokenType.KW_MODULE", false]], "kw_nonassoc (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.KW_NONASSOC", false]], "kw_priority (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.KW_PRIORITY", false]], "kw_private (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.KW_PRIVATE", false]], "kw_public (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.KW_PUBLIC", false]], "kw_requires (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.KW_REQUIRES", false]], "kw_right (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.KW_RIGHT", false]], "kw_rule (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.KW_RULE", false]], "kw_sort (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.KW_SORT", false]], "kw_symbol (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.KW_SYMBOL", false]], "kw_syntax (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.KW_SYNTAX", false]], "kw_where (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.KW_WHERE", false]], "label (alias attribute)": [[19, "pyk.kast.outer_syntax.Alias.label", false]], "label (atts attribute)": [[8, "pyk.kast.att.Atts.LABEL", false]], "label (claim attribute)": [[19, "pyk.kast.outer_syntax.Claim.label", false]], "label (config attribute)": [[19, "pyk.kast.outer_syntax.Config.label", false]], "label (context attribute)": [[19, "pyk.kast.outer_syntax.Context.label", false]], "label (kapply attribute)": [[11, "pyk.kast.inner.KApply.label", false]], "label (ksentence property)": [[16, "pyk.kast.outer.KSentence.label", false]], "label (rewriterule attribute)": [[64, "pyk.kore.rule.RewriteRule.label", false]], "label (rule attribute)": [[19, "pyk.kast.outer_syntax.Rule.label", false]], "label (stringsentence attribute)": [[19, "pyk.kast.outer_syntax.StringSentence.label", false]], "labels() (claimindex method)": [[72, "pyk.ktool.claim_index.ClaimIndex.labels", false]], "labels_to_dots() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.labels_to_dots", false]], "last_constraint (refutationproof property)": [[91, "pyk.proof.implies.RefutationProof.last_constraint", false]], "latex (kastoutput attribute)": [[76, "pyk.ktool.kprint.KAstOutput.LATEX", false]], "latex (kproveoutput attribute)": [[77, "pyk.ktool.kprove.KProveOutput.LATEX", false]], "latex (krunoutput attribute)": [[78, "pyk.ktool.krun.KRunOutput.LATEX", false]], "latex (printoutput attribute)": [[66, "pyk.kore.tools.PrintOutput.LATEX", false]], "lavender (color attribute)": [[9, "pyk.kast.color.Color.LAVENDER", false]], "lavender_blush (color attribute)": [[9, "pyk.kast.color.Color.LAVENDER_BLUSH", false]], "lawn_green (color attribute)": [[9, "pyk.kast.color.Color.LAWN_GREEN", false]], "lbrace (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.LBRACE", false], [57, "pyk.kore.lexer.TokenType.LBRACE", false]], "lbrack (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.LBRACK", false], [57, "pyk.kore.lexer.TokenType.LBRACK", false]], "le_int() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.le_int", false]], "least_common_supersort() (kdefinition method)": [[16, "pyk.kast.outer.KDefinition.least_common_supersort", false]], "leaves (kcfg property)": [[32, "pyk.kcfg.kcfg.KCFG.leaves", false]], "left (aliasdecl attribute)": [[65, "pyk.kore.syntax.AliasDecl.left", false]], "left (assoc attribute)": [[19, "pyk.kast.outer_syntax.Assoc.LEFT", false]], "left (atts attribute)": [[8, "pyk.kast.att.Atts.LEFT", false]], "left (binaryconn attribute)": [[65, "pyk.kore.syntax.BinaryConn.left", false]], "left (binarypred attribute)": [[65, "pyk.kore.syntax.BinaryPred.left", false]], "left (equals attribute)": [[65, "pyk.kore.syntax.Equals.left", false]], "left (iff attribute)": [[65, "pyk.kore.syntax.Iff.left", false]], "left (implies attribute)": [[65, "pyk.kore.syntax.Implies.left", false]], "left (in attribute)": [[65, "pyk.kore.syntax.In.left", false]], "left (kassoc attribute)": [[16, "pyk.kast.outer.KAssoc.LEFT", false]], "left (rewrites attribute)": [[65, "pyk.kore.syntax.Rewrites.left", false]], "left_assoc() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.left_assoc", false]], "left_assocs (kdefinition property)": [[16, "pyk.kast.outer.KDefinition.left_assocs", false]], "leftassoc (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.LeftAssoc", false]], "leint() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.leInt", false]], "lemon_chiffon (color attribute)": [[9, "pyk.kast.color.Color.LEMON_CHIFFON", false]], "let() (aliasdecl method)": [[65, "pyk.kore.syntax.AliasDecl.let", false]], "let() (and method)": [[65, "pyk.kore.syntax.And.let", false]], "let() (app method)": [[65, "pyk.kore.syntax.App.let", false]], "let() (axiom method)": [[65, "pyk.kore.syntax.Axiom.let", false]], "let() (bottom method)": [[65, "pyk.kore.syntax.Bottom.let", false]], "let() (ceil method)": [[65, "pyk.kore.syntax.Ceil.let", false]], "let() (claim method)": [[65, "pyk.kore.syntax.Claim.let", false]], "let() (definition method)": [[65, "pyk.kore.syntax.Definition.let", false]], "let() (dv method)": [[65, "pyk.kore.syntax.DV.let", false]], "let() (equals method)": [[65, "pyk.kore.syntax.Equals.let", false]], "let() (evar method)": [[65, "pyk.kore.syntax.EVar.let", false]], "let() (exists method)": [[65, "pyk.kore.syntax.Exists.let", false]], "let() (floor method)": [[65, "pyk.kore.syntax.Floor.let", false]], "let() (forall method)": [[65, "pyk.kore.syntax.Forall.let", false]], "let() (iff method)": [[65, "pyk.kore.syntax.Iff.let", false]], "let() (implies method)": [[65, "pyk.kore.syntax.Implies.let", false]], "let() (import method)": [[65, "pyk.kore.syntax.Import.let", false]], "let() (in method)": [[65, "pyk.kore.syntax.In.let", false]], "let() (kapply method)": [[11, "pyk.kast.inner.KApply.let", false]], "let() (kas method)": [[11, "pyk.kast.inner.KAs.let", false]], "let() (kbubble method)": [[16, "pyk.kast.outer.KBubble.let", false]], "let() (kcfg.node method)": [[32, "pyk.kcfg.kcfg.KCFG.Node.let", false]], "let() (kclaim method)": [[16, "pyk.kast.outer.KClaim.let", false]], "let() (kcontext method)": [[16, "pyk.kast.outer.KContext.let", false]], "let() (kdefinition method)": [[16, "pyk.kast.outer.KDefinition.let", false]], "let() (kflatmodule method)": [[16, "pyk.kast.outer.KFlatModule.let", false]], "let() (kflatmodulelist method)": [[16, "pyk.kast.outer.KFlatModuleList.let", false]], "let() (kimport method)": [[16, "pyk.kast.outer.KImport.let", false]], "let() (klabel method)": [[11, "pyk.kast.inner.KLabel.let", false]], "let() (knonterminal method)": [[16, "pyk.kast.outer.KNonTerminal.let", false]], "let() (kproduction method)": [[16, "pyk.kast.outer.KProduction.let", false]], "let() (kregexterminal method)": [[16, "pyk.kast.outer.KRegexTerminal.let", false]], "let() (krequire method)": [[16, "pyk.kast.outer.KRequire.let", false]], "let() (krewrite method)": [[11, "pyk.kast.inner.KRewrite.let", false]], "let() (krule method)": [[16, "pyk.kast.outer.KRule.let", false]], "let() (krulelike method)": [[16, "pyk.kast.outer.KRuleLike.let", false]], "let() (ksequence method)": [[11, "pyk.kast.inner.KSequence.let", false]], "let() (ksort method)": [[11, "pyk.kast.inner.KSort.let", false]], "let() (ksortsynonym method)": [[16, "pyk.kast.outer.KSortSynonym.let", false]], "let() (ksyntaxassociativity method)": [[16, "pyk.kast.outer.KSyntaxAssociativity.let", false]], "let() (ksyntaxlexical method)": [[16, "pyk.kast.outer.KSyntaxLexical.let", false]], "let() (ksyntaxpriority method)": [[16, "pyk.kast.outer.KSyntaxPriority.let", false]], "let() (ksyntaxsort method)": [[16, "pyk.kast.outer.KSyntaxSort.let", false]], "let() (kterminal method)": [[16, "pyk.kast.outer.KTerminal.let", false]], "let() (ktoken method)": [[11, "pyk.kast.inner.KToken.let", false]], "let() (kvariable method)": [[11, "pyk.kast.inner.KVariable.let", false]], "let() (leftassoc method)": [[65, "pyk.kore.syntax.LeftAssoc.let", false]], "let() (module method)": [[65, "pyk.kore.syntax.Module.let", false]], "let() (mu method)": [[65, "pyk.kore.syntax.Mu.let", false]], "let() (next method)": [[65, "pyk.kore.syntax.Next.let", false]], "let() (not method)": [[65, "pyk.kore.syntax.Not.let", false]], "let() (nu method)": [[65, "pyk.kore.syntax.Nu.let", false]], "let() (or method)": [[65, "pyk.kore.syntax.Or.let", false]], "let() (rewrites method)": [[65, "pyk.kore.syntax.Rewrites.let", false]], "let() (rightassoc method)": [[65, "pyk.kore.syntax.RightAssoc.let", false]], "let() (sortapp method)": [[65, "pyk.kore.syntax.SortApp.let", false]], "let() (sortdecl method)": [[65, "pyk.kore.syntax.SortDecl.let", false]], "let() (sortvar method)": [[65, "pyk.kore.syntax.SortVar.let", false]], "let() (string method)": [[65, "pyk.kore.syntax.String.let", false]], "let() (svar method)": [[65, "pyk.kore.syntax.SVar.let", false]], "let() (symbol method)": [[65, "pyk.kore.syntax.Symbol.let", false]], "let() (symboldecl method)": [[65, "pyk.kore.syntax.SymbolDecl.let", false]], "let() (token method)": [[17, "pyk.kast.outer_lexer.Token.let", false]], "let() (top method)": [[65, "pyk.kore.syntax.Top.let", false]], "let_att() (kbubble method)": [[16, "pyk.kast.outer.KBubble.let_att", false]], "let_att() (kclaim method)": [[16, "pyk.kast.outer.KClaim.let_att", false]], "let_att() (kcontext method)": [[16, "pyk.kast.outer.KContext.let_att", false]], "let_att() (kdefinition method)": [[16, "pyk.kast.outer.KDefinition.let_att", false]], "let_att() (kflatmodule method)": [[16, "pyk.kast.outer.KFlatModule.let_att", false]], "let_att() (kproduction method)": [[16, "pyk.kast.outer.KProduction.let_att", false]], "let_att() (krule method)": [[16, "pyk.kast.outer.KRule.let_att", false]], "let_att() (ksortsynonym method)": [[16, "pyk.kast.outer.KSortSynonym.let_att", false]], "let_att() (ksyntaxassociativity method)": [[16, "pyk.kast.outer.KSyntaxAssociativity.let_att", false]], "let_att() (ksyntaxlexical method)": [[16, "pyk.kast.outer.KSyntaxLexical.let_att", false]], "let_att() (ksyntaxpriority method)": [[16, "pyk.kast.outer.KSyntaxPriority.let_att", false]], "let_att() (ksyntaxsort method)": [[16, "pyk.kast.outer.KSyntaxSort.let_att", false]], "let_att() (withkatt method)": [[8, "pyk.kast.att.WithKAtt.let_att", false]], "let_attrs() (aliasdecl method)": [[65, "pyk.kore.syntax.AliasDecl.let_attrs", false]], "let_attrs() (axiom method)": [[65, "pyk.kore.syntax.Axiom.let_attrs", false]], "let_attrs() (claim method)": [[65, "pyk.kore.syntax.Claim.let_attrs", false]], "let_attrs() (definition method)": [[65, "pyk.kore.syntax.Definition.let_attrs", false]], "let_attrs() (import method)": [[65, "pyk.kore.syntax.Import.let_attrs", false]], "let_attrs() (module method)": [[65, "pyk.kore.syntax.Module.let_attrs", false]], "let_attrs() (sortdecl method)": [[65, "pyk.kore.syntax.SortDecl.let_attrs", false]], "let_attrs() (symboldecl method)": [[65, "pyk.kore.syntax.SymbolDecl.let_attrs", false]], "let_attrs() (withattrs method)": [[65, "pyk.kore.syntax.WithAttrs.let_attrs", false]], "let_node() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.let_node", false]], "let_patterns() (and method)": [[65, "pyk.kore.syntax.And.let_patterns", false]], "let_patterns() (app method)": [[65, "pyk.kore.syntax.App.let_patterns", false]], "let_patterns() (bottom method)": [[65, "pyk.kore.syntax.Bottom.let_patterns", false]], "let_patterns() (ceil method)": [[65, "pyk.kore.syntax.Ceil.let_patterns", false]], "let_patterns() (dv method)": [[65, "pyk.kore.syntax.DV.let_patterns", false]], "let_patterns() (equals method)": [[65, "pyk.kore.syntax.Equals.let_patterns", false]], "let_patterns() (evar method)": [[65, "pyk.kore.syntax.EVar.let_patterns", false]], "let_patterns() (exists method)": [[65, "pyk.kore.syntax.Exists.let_patterns", false]], "let_patterns() (floor method)": [[65, "pyk.kore.syntax.Floor.let_patterns", false]], "let_patterns() (forall method)": [[65, "pyk.kore.syntax.Forall.let_patterns", false]], "let_patterns() (iff method)": [[65, "pyk.kore.syntax.Iff.let_patterns", false]], "let_patterns() (implies method)": [[65, "pyk.kore.syntax.Implies.let_patterns", false]], "let_patterns() (in method)": [[65, "pyk.kore.syntax.In.let_patterns", false]], "let_patterns() (leftassoc method)": [[65, "pyk.kore.syntax.LeftAssoc.let_patterns", false]], "let_patterns() (mu method)": [[65, "pyk.kore.syntax.Mu.let_patterns", false]], "let_patterns() (next method)": [[65, "pyk.kore.syntax.Next.let_patterns", false]], "let_patterns() (not method)": [[65, "pyk.kore.syntax.Not.let_patterns", false]], "let_patterns() (nu method)": [[65, "pyk.kore.syntax.Nu.let_patterns", false]], "let_patterns() (or method)": [[65, "pyk.kore.syntax.Or.let_patterns", false]], "let_patterns() (pattern method)": [[65, "pyk.kore.syntax.Pattern.let_patterns", false]], "let_patterns() (rewrites method)": [[65, "pyk.kore.syntax.Rewrites.let_patterns", false]], "let_patterns() (rightassoc method)": [[65, "pyk.kore.syntax.RightAssoc.let_patterns", false]], "let_patterns() (string method)": [[65, "pyk.kore.syntax.String.let_patterns", false]], "let_patterns() (svar method)": [[65, "pyk.kore.syntax.SVar.let_patterns", false]], "let_patterns() (top method)": [[65, "pyk.kore.syntax.Top.let_patterns", false]], "let_sort() (and method)": [[65, "pyk.kore.syntax.And.let_sort", false]], "let_sort() (bottom method)": [[65, "pyk.kore.syntax.Bottom.let_sort", false]], "let_sort() (ceil method)": [[65, "pyk.kore.syntax.Ceil.let_sort", false]], "let_sort() (dv method)": [[65, "pyk.kore.syntax.DV.let_sort", false]], "let_sort() (equals method)": [[65, "pyk.kore.syntax.Equals.let_sort", false]], "let_sort() (evar method)": [[65, "pyk.kore.syntax.EVar.let_sort", false]], "let_sort() (exists method)": [[65, "pyk.kore.syntax.Exists.let_sort", false]], "let_sort() (floor method)": [[65, "pyk.kore.syntax.Floor.let_sort", false]], "let_sort() (forall method)": [[65, "pyk.kore.syntax.Forall.let_sort", false]], "let_sort() (iff method)": [[65, "pyk.kore.syntax.Iff.let_sort", false]], "let_sort() (implies method)": [[65, "pyk.kore.syntax.Implies.let_sort", false]], "let_sort() (in method)": [[65, "pyk.kore.syntax.In.let_sort", false]], "let_sort() (kvariable method)": [[11, "pyk.kast.inner.KVariable.let_sort", false]], "let_sort() (next method)": [[65, "pyk.kore.syntax.Next.let_sort", false]], "let_sort() (not method)": [[65, "pyk.kore.syntax.Not.let_sort", false]], "let_sort() (or method)": [[65, "pyk.kore.syntax.Or.let_sort", false]], "let_sort() (rewrites method)": [[65, "pyk.kore.syntax.Rewrites.let_sort", false]], "let_sort() (svar method)": [[65, "pyk.kore.syntax.SVar.let_sort", false]], "let_sort() (top method)": [[65, "pyk.kore.syntax.Top.let_sort", false]], "let_sort() (withsort method)": [[65, "pyk.kore.syntax.WithSort.let_sort", false]], "let_terms() (kapply method)": [[11, "pyk.kast.inner.KApply.let_terms", false]], "let_terms() (kas method)": [[11, "pyk.kast.inner.KAs.let_terms", false]], "let_terms() (kinner method)": [[11, "pyk.kast.inner.KInner.let_terms", false]], "let_terms() (krewrite method)": [[11, "pyk.kast.inner.KRewrite.let_terms", false]], "let_terms() (ksequence method)": [[11, "pyk.kast.inner.KSequence.let_terms", false]], "let_terms() (ktoken method)": [[11, "pyk.kast.inner.KToken.let_terms", false]], "let_terms() (kvariable method)": [[11, "pyk.kast.inner.KVariable.let_terms", false]], "lexer() (in module pyk.kast.lexer)": [[13, "pyk.kast.lexer.lexer", false]], "lexical (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.Lexical", false]], "lhs (apprule attribute)": [[64, "pyk.kore.rule.AppRule.lhs", false]], "lhs (ceilrule attribute)": [[64, "pyk.kore.rule.CeilRule.lhs", false]], "lhs (equalsrule attribute)": [[64, "pyk.kore.rule.EqualsRule.lhs", false]], "lhs (functionrule attribute)": [[64, "pyk.kore.rule.FunctionRule.lhs", false]], "lhs (krewrite attribute)": [[11, "pyk.kast.inner.KRewrite.lhs", false]], "lhs (rewriterule attribute)": [[64, "pyk.kore.rule.RewriteRule.lhs", false]], "lhs (rule attribute)": [[64, "pyk.kore.rule.Rule.lhs", false]], "lhs (simplirule attribute)": [[64, "pyk.kore.rule.SimpliRule.lhs", false]], "lhs_body (equalityproof property)": [[91, "pyk.proof.implies.EqualityProof.lhs_body", false]], "lift_edge() (kcfgminimizer method)": [[33, "pyk.kcfg.minimize.KCFGMinimizer.lift_edge", false]], "lift_edges() (kcfgminimizer method)": [[33, "pyk.kcfg.minimize.KCFGMinimizer.lift_edges", false]], "lift_split_edge() (kcfgminimizer method)": [[33, "pyk.kcfg.minimize.KCFGMinimizer.lift_split_edge", false]], "lift_split_split() (kcfgminimizer method)": [[33, "pyk.kcfg.minimize.KCFGMinimizer.lift_split_split", false]], "lift_splits() (kcfgminimizer method)": [[33, "pyk.kcfg.minimize.KCFGMinimizer.lift_splits", false]], "light_blue (color attribute)": [[9, "pyk.kast.color.Color.LIGHT_BLUE", false]], "light_coral (color attribute)": [[9, "pyk.kast.color.Color.LIGHT_CORAL", false]], "light_cyan (color attribute)": [[9, "pyk.kast.color.Color.LIGHT_CYAN", false]], "light_goldenrod (color attribute)": [[9, "pyk.kast.color.Color.LIGHT_GOLDENROD", false]], "light_goldenrod_yellow (color attribute)": [[9, "pyk.kast.color.Color.LIGHT_GOLDENROD_YELLOW", false]], "light_gray (color attribute)": [[9, "pyk.kast.color.Color.LIGHT_GRAY", false]], "light_green (color attribute)": [[9, "pyk.kast.color.Color.LIGHT_GREEN", false]], "light_grey (color attribute)": [[9, "pyk.kast.color.Color.LIGHT_GREY", false]], "light_pink (color attribute)": [[9, "pyk.kast.color.Color.LIGHT_PINK", false]], "light_salmon (color attribute)": [[9, "pyk.kast.color.Color.LIGHT_SALMON", false]], "light_sea_green (color attribute)": [[9, "pyk.kast.color.Color.LIGHT_SEA_GREEN", false]], "light_sky_blue (color attribute)": [[9, "pyk.kast.color.Color.LIGHT_SKY_BLUE", false]], "light_slate_blue (color attribute)": [[9, "pyk.kast.color.Color.LIGHT_SLATE_BLUE", false]], "light_slate_gray (color attribute)": [[9, "pyk.kast.color.Color.LIGHT_SLATE_GRAY", false]], "light_slate_grey (color attribute)": [[9, "pyk.kast.color.Color.LIGHT_SLATE_GREY", false]], "light_steel_blue (color attribute)": [[9, "pyk.kast.color.Color.LIGHT_STEEL_BLUE", false]], "light_yellow (color attribute)": [[9, "pyk.kast.color.Color.LIGHT_YELLOW", false]], "lightgray (color attribute)": [[9, "pyk.kast.color.Color.LIGHTGRAY", false]], "lime (color attribute)": [[9, "pyk.kast.color.Color.LIME", false]], "lime_green (color attribute)": [[9, "pyk.kast.color.Color.LIME_GREEN", false]], "line (loc attribute)": [[17, "pyk.kast.outer_lexer.Loc.line", false]], "linen (color attribute)": [[9, "pyk.kast.color.Color.LINEN", false]], "lines (aprsummary property)": [[93, "pyk.proof.reachability.APRSummary.lines", false]], "lines (compositesummary property)": [[92, "pyk.proof.proof.CompositeSummary.lines", false]], "lines (equalitysummary property)": [[91, "pyk.proof.implies.EqualitySummary.lines", false]], "lines (proofsummary property)": [[92, "pyk.proof.proof.ProofSummary.lines", false]], "lines (refutationsummary property)": [[91, "pyk.proof.implies.RefutationSummary.lines", false]], "list_empty() (in module pyk.prelude.collections)": [[83, "pyk.prelude.collections.list_empty", false]], "list_item() (in module pyk.prelude.collections)": [[83, "pyk.prelude.collections.list_item", false]], "list_of() (in module pyk.prelude.collections)": [[83, "pyk.prelude.collections.list_of", false]], "list_pattern() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.list_pattern", false]], "llvm (logorigin attribute)": [[63, "pyk.kore.rpc.LogOrigin.LLVM", false]], "llvm (pykbackend attribute)": [[75, "pyk.ktool.kompile.PykBackend.LLVM", false]], "llvm_interpret() (in module pyk.ktool.krun)": [[78, "pyk.ktool.krun.llvm_interpret", false]], "llvm_interpret_raw() (in module pyk.ktool.krun)": [[78, "pyk.ktool.krun.llvm_interpret_raw", false]], "llvm_kompiled_dir (boosterserverargs attribute)": [[63, "pyk.kore.rpc.BoosterServerArgs.llvm_kompiled_dir", false]], "llvm_to_definition() (in module pyk.kllvm.convert)": [[45, "pyk.kllvm.convert.llvm_to_definition", false]], "llvm_to_module() (in module pyk.kllvm.convert)": [[45, "pyk.kllvm.convert.llvm_to_module", false]], "llvm_to_pattern() (in module pyk.kllvm.convert)": [[45, "pyk.kllvm.convert.llvm_to_pattern", false]], "llvm_to_sentence() (in module pyk.kllvm.convert)": [[45, "pyk.kllvm.convert.llvm_to_sentence", false]], "llvm_to_sort() (in module pyk.kllvm.convert)": [[45, "pyk.kllvm.convert.llvm_to_sort", false]], "llvm_to_sort_var() (in module pyk.kllvm.convert)": [[45, "pyk.kllvm.convert.llvm_to_sort_var", false]], "llvmargument (class in pyk.kllvm.hints.prooftrace)": [[47, "pyk.kllvm.hints.prooftrace.LLVMArgument", false]], "llvmeventannotated (class in pyk.kllvm.hints.prooftrace)": [[47, "pyk.kllvm.hints.prooftrace.LLVMEventAnnotated", false]], "llvmeventtype (class in pyk.kllvm.hints.prooftrace)": [[47, "pyk.kllvm.hints.prooftrace.LLVMEventType", false]], "llvmfunctionevent (class in pyk.kllvm.hints.prooftrace)": [[47, "pyk.kllvm.hints.prooftrace.LLVMFunctionEvent", false]], "llvmhookevent (class in pyk.kllvm.hints.prooftrace)": [[47, "pyk.kllvm.hints.prooftrace.LLVMHookEvent", false]], "llvmpatternmatchingfailureevent (class in pyk.kllvm.hints.prooftrace)": [[47, "pyk.kllvm.hints.prooftrace.LLVMPatternMatchingFailureEvent", false]], "llvmrewriteevent (class in pyk.kllvm.hints.prooftrace)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRewriteEvent", false]], "llvmrewritetrace (class in pyk.kllvm.hints.prooftrace)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRewriteTrace", false]], "llvmrewritetraceiterator (class in pyk.kllvm.hints.prooftrace)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRewriteTraceIterator", false]], "llvmruleevent (class in pyk.kllvm.hints.prooftrace)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRuleEvent", false]], "llvmsideconditionevententer (class in pyk.kllvm.hints.prooftrace)": [[47, "pyk.kllvm.hints.prooftrace.LLVMSideConditionEventEnter", false]], "llvmsideconditioneventexit (class in pyk.kllvm.hints.prooftrace)": [[47, "pyk.kllvm.hints.prooftrace.LLVMSideConditionEventExit", false]], "llvmstepevent (class in pyk.kllvm.hints.prooftrace)": [[47, "pyk.kllvm.hints.prooftrace.LLVMStepEvent", false]], "load() (kompiledkore static method)": [[56, "pyk.kore.kompiled.KompiledKore.load", false]], "load() (project static method)": [[27, "pyk.kbuild.project.Project.load", false]], "load_claims() (claimloader method)": [[73, "pyk.ktool.claim_loader.ClaimLoader.load_claims", false]], "load_from_dir() (project static method)": [[27, "pyk.kbuild.project.Project.load_from_dir", false]], "load_from_json() (kompiledkore static method)": [[56, "pyk.kore.kompiled.KompiledKore.load_from_json", false]], "load_from_kore() (kompiledkore static method)": [[56, "pyk.kore.kompiled.KompiledKore.load_from_kore", false]], "loc (class in pyk.kast.outer_lexer)": [[17, "pyk.kast.outer_lexer.Loc", false]], "loc (locationiterator property)": [[17, "pyk.kast.outer_lexer.LocationIterator.loc", false]], "loc (token attribute)": [[17, "pyk.kast.outer_lexer.Token.loc", false]], "location (ast attribute)": [[19, "pyk.kast.outer_syntax.AST.location", false]], "location (atts attribute)": [[8, "pyk.kast.att.Atts.LOCATION", false]], "locationiterator (class in pyk.kast.outer_lexer)": [[17, "pyk.kast.outer_lexer.LocationIterator", false]], "locationtype (class in pyk.kast.att)": [[8, "pyk.kast.att.LocationType", false]], "log_axioms_file (boosterserverargs attribute)": [[63, "pyk.kore.rpc.BoosterServerArgs.log_axioms_file", false]], "log_axioms_file (koreserverargs attribute)": [[63, "pyk.kore.rpc.KoreServerArgs.log_axioms_file", false]], "log_context (boosterserverargs attribute)": [[63, "pyk.kore.rpc.BoosterServerArgs.log_context", false]], "logentry (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.LogEntry", false]], "logorigin (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.LogOrigin", false]], "logrewrite (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.LogRewrite", false]], "logs (abortedresult attribute)": [[63, "pyk.kore.rpc.AbortedResult.logs", false]], "logs (aprproof attribute)": [[93, "pyk.proof.reachability.APRProof.logs", false]], "logs (branchingresult attribute)": [[63, "pyk.kore.rpc.BranchingResult.logs", false]], "logs (ctermexecute attribute)": [[5, "pyk.cterm.symbolic.CTermExecute.logs", false]], "logs (ctermimplies attribute)": [[5, "pyk.cterm.symbolic.CTermImplies.logs", false]], "logs (cutpointresult attribute)": [[63, "pyk.kore.rpc.CutPointResult.logs", false]], "logs (depthboundresult attribute)": [[63, "pyk.kore.rpc.DepthBoundResult.logs", false]], "logs (executeresult attribute)": [[63, "pyk.kore.rpc.ExecuteResult.logs", false]], "logs (impliesresult attribute)": [[63, "pyk.kore.rpc.ImpliesResult.logs", false]], "logs (ndbranch attribute)": [[32, "pyk.kcfg.kcfg.NDBranch.logs", false]], "logs (step attribute)": [[32, "pyk.kcfg.kcfg.Step.logs", false]], "logs (stuckresult attribute)": [[63, "pyk.kore.rpc.StuckResult.logs", false]], "logs (terminalresult attribute)": [[63, "pyk.kore.rpc.TerminalResult.logs", false]], "logs (timeoutresult attribute)": [[63, "pyk.kore.rpc.TimeoutResult.logs", false]], "logs (vacuousresult attribute)": [[63, "pyk.kore.rpc.VacuousResult.logs", false]], "lparen (tokentype attribute)": [[13, "pyk.kast.lexer.TokenType.LPAREN", false], [17, "pyk.kast.outer_lexer.TokenType.LPAREN", false], [57, "pyk.kore.lexer.TokenType.LPAREN", false]], "lshiftint() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.lshiftInt", false]], "lt_int() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.lt_int", false]], "ltint() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.ltInt", false]], "macro (atts attribute)": [[8, "pyk.kast.att.Atts.MACRO", false]], "macro_rec (atts attribute)": [[8, "pyk.kast.att.Atts.MACRO_REC", false]], "macro_rules (kdefinition property)": [[16, "pyk.kast.outer.KDefinition.macro_rules", false]], "magenta (color attribute)": [[9, "pyk.kast.color.Color.MAGENTA", false]], "mahogany (color attribute)": [[9, "pyk.kast.color.Color.MAHOGANY", false]], "main() (in module pyk.kcovr)": [[38, "pyk.kcovr.main", false]], "main_file (kprove attribute)": [[77, "pyk.ktool.kprove.KProve.main_file", false]], "main_module (kdefinition attribute)": [[16, "pyk.kast.outer.KDefinition.main_module", false]], "main_module (kflatmodulelist attribute)": [[16, "pyk.kast.outer.KFlatModuleList.main_module", false]], "main_module (kprint attribute)": [[76, "pyk.ktool.kprint.KPrint.main_module", false]], "main_module_name (aprprover attribute)": [[93, "pyk.proof.reachability.APRProver.main_module_name", false]], "main_module_name (claimindex attribute)": [[72, "pyk.ktool.claim_index.ClaimIndex.main_module_name", false]], "main_module_name (kdefinition attribute)": [[16, "pyk.kast.outer.KDefinition.main_module_name", false]], "maincell (atts attribute)": [[8, "pyk.kast.att.Atts.MAINCELL", false]], "major (kversion attribute)": [[28, "pyk.kbuild.utils.KVersion.major", false]], "make_unique_segments() (kcfgshow static method)": [[35, "pyk.kcfg.show.KCFGShow.make_unique_segments", false]], "manifest() (target method)": [[40, "pyk.kdist.api.Target.manifest", false]], "map_att() (withkatt method)": [[8, "pyk.kast.att.WithKAtt.map_att", false]], "map_attrs() (withattrs method)": [[65, "pyk.kore.syntax.WithAttrs.map_attrs", false]], "map_empty() (in module pyk.prelude.collections)": [[83, "pyk.prelude.collections.map_empty", false]], "map_inner() (kinner method)": [[11, "pyk.kast.inner.KInner.map_inner", false]], "map_item() (in module pyk.prelude.collections)": [[83, "pyk.prelude.collections.map_item", false]], "map_of() (in module pyk.prelude.collections)": [[83, "pyk.prelude.collections.map_of", false]], "map_pattern() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.map_pattern", false]], "map_patterns() (pattern method)": [[65, "pyk.kore.syntax.Pattern.map_patterns", false]], "map_sentences() (kflatmodule method)": [[16, "pyk.kast.outer.KFlatModule.map_sentences", false]], "map_sort() (withsort method)": [[65, "pyk.kore.syntax.WithSort.map_sort", false]], "maroon (color attribute)": [[9, "pyk.kast.color.Color.MAROON", false]], "match() (cterm method)": [[4, "pyk.cterm.cterm.CTerm.match", false]], "match() (kapply method)": [[11, "pyk.kast.inner.KApply.match", false]], "match() (kas method)": [[11, "pyk.kast.inner.KAs.match", false]], "match() (kinner method)": [[11, "pyk.kast.inner.KInner.match", false]], "match() (krewrite method)": [[11, "pyk.kast.inner.KRewrite.match", false]], "match() (ksequence method)": [[11, "pyk.kast.inner.KSequence.match", false]], "match() (ktoken method)": [[11, "pyk.kast.inner.KToken.match", false]], "match() (kvariable method)": [[11, "pyk.kast.inner.KVariable.match", false]], "match_app() (in module pyk.kore.match)": [[59, "pyk.kore.match.match_app", false]], "match_dv() (in module pyk.kore.match)": [[59, "pyk.kore.match.match_dv", false]], "match_inj() (in module pyk.kore.match)": [[59, "pyk.kore.match.match_inj", false]], "match_left_assoc() (in module pyk.kore.match)": [[59, "pyk.kore.match.match_left_assoc", false]], "match_list() (in module pyk.kore.match)": [[59, "pyk.kore.match.match_list", false]], "match_map() (in module pyk.kore.match)": [[59, "pyk.kore.match.match_map", false]], "match_rangemap() (in module pyk.kore.match)": [[59, "pyk.kore.match.match_rangemap", false]], "match_set() (in module pyk.kore.match)": [[59, "pyk.kore.match.match_set", false]], "match_symbol() (in module pyk.kore.match)": [[59, "pyk.kore.match.match_symbol", false]], "match_with_constraint() (cterm method)": [[4, "pyk.cterm.cterm.CTerm.match_with_constraint", false]], "maude (pykbackend attribute)": [[75, "pyk.ktool.kompile.PykBackend.MAUDE", false]], "maxint() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.maxInt", false]], "maybe() (in module pyk.utils)": [[98, "pyk.utils.maybe", false]], "medium_aquamarine (color attribute)": [[9, "pyk.kast.color.Color.MEDIUM_AQUAMARINE", false]], "medium_blue (color attribute)": [[9, "pyk.kast.color.Color.MEDIUM_BLUE", false]], "medium_orchid (color attribute)": [[9, "pyk.kast.color.Color.MEDIUM_ORCHID", false]], "medium_purple (color attribute)": [[9, "pyk.kast.color.Color.MEDIUM_PURPLE", false]], "medium_sea_green (color attribute)": [[9, "pyk.kast.color.Color.MEDIUM_SEA_GREEN", false]], "medium_slate_blue (color attribute)": [[9, "pyk.kast.color.Color.MEDIUM_SLATE_BLUE", false]], "medium_spring_green (color attribute)": [[9, "pyk.kast.color.Color.MEDIUM_SPRING_GREEN", false]], "medium_turquoise (color attribute)": [[9, "pyk.kast.color.Color.MEDIUM_TURQUOISE", false]], "medium_violet_red (color attribute)": [[9, "pyk.kast.color.Color.MEDIUM_VIOLET_RED", false]], "meet() (koresorttable method)": [[56, "pyk.kore.kompiled.KoreSortTable.meet", false]], "melon (color attribute)": [[9, "pyk.kast.color.Color.MELON", false]], "merge_nodes() (kcfgminimizer method)": [[33, "pyk.kcfg.minimize.KCFGMinimizer.merge_nodes", false]], "merge_with() (in module pyk.utils)": [[98, "pyk.utils.merge_with", false]], "merged_edge() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.merged_edge", false]], "merged_edges() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.merged_edges", false]], "message (defaulterror attribute)": [[63, "pyk.kore.rpc.DefaultError.message", false]], "midnight_blue (color attribute)": [[9, "pyk.kast.color.Color.MIDNIGHT_BLUE", false]], "minimize (nodeprinter attribute)": [[35, "pyk.kcfg.show.NodePrinter.minimize", false]], "minimize() (kcfgminimizer method)": [[33, "pyk.kcfg.minimize.KCFGMinimizer.minimize", false]], "minimize() (subst method)": [[11, "pyk.kast.inner.Subst.minimize", false]], "minimize_kcfg() (kcfgexploration method)": [[30, "pyk.kcfg.exploration.KCFGExploration.minimize_kcfg", false]], "minimize_rule_like() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.minimize_rule_like", false]], "minimize_term() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.minimize_term", false]], "minint() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.minInt", false]], "minor (kversion attribute)": [[28, "pyk.kbuild.utils.KVersion.minor", false]], "mint_cream (color attribute)": [[9, "pyk.kast.color.Color.MINT_CREAM", false]], "misty_rose (color attribute)": [[9, "pyk.kast.color.Color.MISTY_ROSE", false]], "ml_and (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.ML_AND", false]], "ml_bottom (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.ML_BOTTOM", false]], "ml_ceil (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.ML_CEIL", false]], "ml_dv (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.ML_DV", false]], "ml_equals (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.ML_EQUALS", false]], "ml_exists (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.ML_EXISTS", false]], "ml_floor (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.ML_FLOOR", false]], "ml_forall (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.ML_FORALL", false]], "ml_iff (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.ML_IFF", false]], "ml_implies (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.ML_IMPLIES", false]], "ml_in (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.ML_IN", false]], "ml_left_assoc (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.ML_LEFT_ASSOC", false]], "ml_mu (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.ML_MU", false]], "ml_next (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.ML_NEXT", false]], "ml_not (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.ML_NOT", false]], "ml_nu (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.ML_NU", false]], "ml_or (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.ML_OR", false]], "ml_pattern() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.ml_pattern", false]], "ml_pred_to_bool() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.ml_pred_to_bool", false]], "ml_rewrites (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.ML_REWRITES", false]], "ml_right_assoc (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.ML_RIGHT_ASSOC", false]], "ml_top (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.ML_TOP", false]], "mland() (in module pyk.prelude.ml)": [[87, "pyk.prelude.ml.mlAnd", false]], "mlbottom() (in module pyk.prelude.ml)": [[87, "pyk.prelude.ml.mlBottom", false]], "mlceil() (in module pyk.prelude.ml)": [[87, "pyk.prelude.ml.mlCeil", false]], "mlconn (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.MLConn", false]], "mlequals() (in module pyk.prelude.ml)": [[87, "pyk.prelude.ml.mlEquals", false]], "mlequalsfalse() (in module pyk.prelude.ml)": [[87, "pyk.prelude.ml.mlEqualsFalse", false]], "mlequalstrue() (in module pyk.prelude.ml)": [[87, "pyk.prelude.ml.mlEqualsTrue", false]], "mlexists() (in module pyk.prelude.ml)": [[87, "pyk.prelude.ml.mlExists", false]], "mlfixpoint (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.MLFixpoint", false]], "mlimplies() (in module pyk.prelude.ml)": [[87, "pyk.prelude.ml.mlImplies", false]], "mlnot() (in module pyk.prelude.ml)": [[87, "pyk.prelude.ml.mlNot", false]], "mlor() (in module pyk.prelude.ml)": [[87, "pyk.prelude.ml.mlOr", false]], "mlpattern (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.MLPattern", false]], "mlpred (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.MLPred", false]], "mlquant (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.MLQuant", false]], "mlrewrite (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.MLRewrite", false]], "mltop() (in module pyk.prelude.ml)": [[87, "pyk.prelude.ml.mlTop", false]], "moccasin (color attribute)": [[9, "pyk.kast.color.Color.MOCCASIN", false]], "model (satresult attribute)": [[63, "pyk.kore.rpc.SatResult.model", false]], "models (aprfailureinfo attribute)": [[93, "pyk.proof.reachability.APRFailureInfo.models", false]], "modint() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.modInt", false]], "modname (state attribute)": [[17, "pyk.kast.outer_lexer.State.MODNAME", false]], "modname (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.MODNAME", false]], "module": [[1, "module-pyk", false], [2, "module-pyk.coverage", false], [3, "module-pyk.cterm", false], [4, "module-pyk.cterm.cterm", false], [5, "module-pyk.cterm.symbolic", false], [6, "module-pyk.dequote", false], [7, "module-pyk.kast", false], [8, "module-pyk.kast.att", false], [9, "module-pyk.kast.color", false], [10, "module-pyk.kast.formatter", false], [11, "module-pyk.kast.inner", false], [12, "module-pyk.kast.kast", false], [13, "module-pyk.kast.lexer", false], [14, "module-pyk.kast.manip", false], [15, "module-pyk.kast.markdown", false], [16, "module-pyk.kast.outer", false], [17, "module-pyk.kast.outer_lexer", false], [18, "module-pyk.kast.outer_parser", false], [19, "module-pyk.kast.outer_syntax", false], [20, "module-pyk.kast.parser", false], [21, "module-pyk.kast.pretty", false], [22, "module-pyk.kast.rewrite", false], [23, "module-pyk.kast.utils", false], [24, "module-pyk.kbuild", false], [25, "module-pyk.kbuild.config", false], [26, "module-pyk.kbuild.kbuild", false], [27, "module-pyk.kbuild.project", false], [28, "module-pyk.kbuild.utils", false], [29, "module-pyk.kcfg", false], [30, "module-pyk.kcfg.exploration", false], [31, "module-pyk.kcfg.explore", false], [32, "module-pyk.kcfg.kcfg", false], [33, "module-pyk.kcfg.minimize", false], [34, "module-pyk.kcfg.semantics", false], [35, "module-pyk.kcfg.show", false], [36, "module-pyk.kcfg.store", false], [37, "module-pyk.kcfg.tui", false], [38, "module-pyk.kcovr", false], [39, "module-pyk.kdist", false], [40, "module-pyk.kdist.api", false], [41, "module-pyk.kdist.utils", false], [42, "module-pyk.kllvm", false], [43, "module-pyk.kllvm.ast", false], [44, "module-pyk.kllvm.compiler", false], [45, "module-pyk.kllvm.convert", false], [46, "module-pyk.kllvm.hints", false], [47, "module-pyk.kllvm.hints.prooftrace", false], [48, "module-pyk.kllvm.importer", false], [49, "module-pyk.kllvm.load", false], [50, "module-pyk.kllvm.load_static", false], [51, "module-pyk.kllvm.parser", false], [52, "module-pyk.kllvm.runtime", false], [53, "module-pyk.kllvm.utils", false], [54, "module-pyk.konvert", false], [55, "module-pyk.kore", false], [56, "module-pyk.kore.kompiled", false], [57, "module-pyk.kore.lexer", false], [58, "module-pyk.kore.manip", false], [59, "module-pyk.kore.match", false], [60, "module-pyk.kore.parser", false], [61, "module-pyk.kore.pool", false], [62, "module-pyk.kore.prelude", false], [63, "module-pyk.kore.rpc", false], [64, "module-pyk.kore.rule", false], [65, "module-pyk.kore.syntax", false], [66, "module-pyk.kore.tools", false], [67, "module-pyk.kore_exec_covr", false], [68, "module-pyk.kore_exec_covr.kore_exec_covr", false], [69, "module-pyk.krepl", false], [70, "module-pyk.krepl.repl", false], [71, "module-pyk.ktool", false], [72, "module-pyk.ktool.claim_index", false], [73, "module-pyk.ktool.claim_loader", false], [74, "module-pyk.ktool.kfuzz", false], [75, "module-pyk.ktool.kompile", false], [76, "module-pyk.ktool.kprint", false], [77, "module-pyk.ktool.kprove", false], [78, "module-pyk.ktool.krun", false], [79, "module-pyk.ktool.prove_rpc", false], [80, "module-pyk.ktool.utils", false], [81, "module-pyk.prelude", false], [82, "module-pyk.prelude.bytes", false], [83, "module-pyk.prelude.collections", false], [84, "module-pyk.prelude.k", false], [85, "module-pyk.prelude.kbool", false], [86, "module-pyk.prelude.kint", false], [87, "module-pyk.prelude.ml", false], [88, "module-pyk.prelude.string", false], [89, "module-pyk.prelude.utils", false], [90, "module-pyk.proof", false], [91, "module-pyk.proof.implies", false], [92, "module-pyk.proof.proof", false], [93, "module-pyk.proof.reachability", false], [94, "module-pyk.proof.show", false], [95, "module-pyk.proof.tui", false], [96, "module-pyk.testing", false], [97, "module-pyk.testing.plugin", false], [98, "module-pyk.utils", false]], "module (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.Module", false]], "module (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Module", false]], "module() (kdefinition method)": [[16, "pyk.kast.outer.KDefinition.module", false]], "module() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.module", false]], "module() (outerparser method)": [[18, "pyk.kast.outer_parser.OuterParser.module", false]], "module_name (aprproof property)": [[93, "pyk.proof.reachability.APRProof.module_name", false]], "module_name (aprproofstep attribute)": [[93, "pyk.proof.reachability.APRProofStep.module_name", false]], "module_name (boosterserverargs attribute)": [[63, "pyk.kore.rpc.BoosterServerArgs.module_name", false]], "module_name (duplicatemoduleerror attribute)": [[63, "pyk.kore.rpc.DuplicateModuleError.module_name", false]], "module_name (import attribute)": [[19, "pyk.kast.outer_syntax.Import.module_name", false], [65, "pyk.kore.syntax.Import.module_name", false]], "module_name (koreserverargs attribute)": [[63, "pyk.kore.rpc.KoreServerArgs.module_name", false]], "module_name (unknownmoduleerror attribute)": [[63, "pyk.kore.rpc.UnknownModuleError.module_name", false]], "module_names (kdefinition property)": [[16, "pyk.kast.outer.KDefinition.module_names", false]], "module_to_llvm() (in module pyk.kllvm.convert)": [[45, "pyk.kllvm.convert.module_to_llvm", false]], "modules (definition attribute)": [[19, "pyk.kast.outer_syntax.Definition.modules", false], [65, "pyk.kore.syntax.Definition.modules", false]], "modules (kdefinition property)": [[16, "pyk.kast.outer.KDefinition.modules", false]], "modules (kflatmodulelist attribute)": [[16, "pyk.kast.outer.KFlatModuleList.modules", false]], "mu (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Mu", false]], "mu() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.mu", false]], "mulberry (color attribute)": [[9, "pyk.kast.color.Color.MULBERRY", false]], "mulint() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.mulInt", false]], "multi_or() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.multi_or", false]], "multiaryconn (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.MultiaryConn", false]], "multiplicity (atts attribute)": [[8, "pyk.kast.att.Atts.MULTIPLICITY", false]], "name (atom attribute)": [[15, "pyk.kast.markdown.Atom.name", false]], "name (attkey attribute)": [[8, "pyk.kast.att.AttKey.name", false]], "name (evar attribute)": [[65, "pyk.kore.syntax.EVar.name", false]], "name (kflatmodule attribute)": [[16, "pyk.kast.outer.KFlatModule.name", false]], "name (kimport attribute)": [[16, "pyk.kast.outer.KImport.name", false]], "name (klabel attribute)": [[11, "pyk.kast.inner.KLabel.name", false]], "name (knonterminal attribute)": [[16, "pyk.kast.outer.KNonTerminal.name", false]], "name (ksort attribute)": [[11, "pyk.kast.inner.KSort.name", false]], "name (ksyntaxlexical attribute)": [[16, "pyk.kast.outer.KSyntaxLexical.name", false]], "name (kvariable attribute)": [[11, "pyk.kast.inner.KVariable.name", false]], "name (llvmfunctionevent property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMFunctionEvent.name", false]], "name (llvmhookevent property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMHookEvent.name", false]], "name (module attribute)": [[19, "pyk.kast.outer_syntax.Module.name", false], [65, "pyk.kore.syntax.Module.name", false]], "name (nonterminal attribute)": [[19, "pyk.kast.outer_syntax.NonTerminal.name", false]], "name (project attribute)": [[27, "pyk.kbuild.project.Project.name", false]], "name (sort attribute)": [[19, "pyk.kast.outer_syntax.Sort.name", false], [65, "pyk.kore.syntax.Sort.name", false]], "name (sortapp attribute)": [[65, "pyk.kore.syntax.SortApp.name", false]], "name (sortdecl attribute)": [[19, "pyk.kast.outer_syntax.SortDecl.name", false], [65, "pyk.kore.syntax.SortDecl.name", false]], "name (sortvar attribute)": [[65, "pyk.kore.syntax.SortVar.name", false]], "name (svar attribute)": [[65, "pyk.kore.syntax.SVar.name", false]], "name (symbol attribute)": [[65, "pyk.kore.syntax.Symbol.name", false]], "name (syntaxlexical attribute)": [[19, "pyk.kast.outer_syntax.SyntaxLexical.name", false]], "name (target attribute)": [[27, "pyk.kbuild.project.Target.name", false]], "name (varpattern attribute)": [[65, "pyk.kore.syntax.VarPattern.name", false]], "nat (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.NAT", false]], "navajo_white (color attribute)": [[9, "pyk.kast.color.Color.NAVAJO_WHITE", false]], "navwidget (class in pyk.kcfg.tui)": [[37, "pyk.kcfg.tui.NavWidget", false]], "navwidget.selected (class in pyk.kcfg.tui)": [[37, "pyk.kcfg.tui.NavWidget.Selected", false]], "navy (color attribute)": [[9, "pyk.kast.color.Color.NAVY", false]], "navy_blue (color attribute)": [[9, "pyk.kast.color.Color.NAVY_BLUE", false]], "ndbranch (class in pyk.kcfg.kcfg)": [[32, "pyk.kcfg.kcfg.NDBranch", false]], "ndbranches() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.ndbranches", false]], "ne_bool() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.ne_bool", false]], "ne_int() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.ne_int", false]], "neqint() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.neqInt", false]], "new (syntaxsynonym attribute)": [[19, "pyk.kast.outer_syntax.SyntaxSynonym.new", false]], "new_sort (ksortsynonym attribute)": [[16, "pyk.kast.outer.KSortSynonym.new_sort", false]], "next (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Next", false]], "next() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.next", false]], "next_state() (interpreter method)": [[70, "pyk.krepl.repl.Interpreter.next_state", false]], "next_state() (kinterpreter method)": [[70, "pyk.krepl.repl.KInterpreter.next_state", false]], "next_states (abortedresult attribute)": [[63, "pyk.kore.rpc.AbortedResult.next_states", false]], "next_states (branchingresult attribute)": [[63, "pyk.kore.rpc.BranchingResult.next_states", false]], "next_states (ctermexecute attribute)": [[5, "pyk.cterm.symbolic.CTermExecute.next_states", false]], "next_states (cutpointresult attribute)": [[63, "pyk.kore.rpc.CutPointResult.next_states", false]], "next_states (depthboundresult attribute)": [[63, "pyk.kore.rpc.DepthBoundResult.next_states", false]], "next_states (executeresult attribute)": [[63, "pyk.kore.rpc.ExecuteResult.next_states", false]], "next_states (stuckresult attribute)": [[63, "pyk.kore.rpc.StuckResult.next_states", false]], "next_states (terminalresult attribute)": [[63, "pyk.kore.rpc.TerminalResult.next_states", false]], "next_states (timeoutresult attribute)": [[63, "pyk.kore.rpc.TimeoutResult.next_states", false]], "next_states (vacuousresult attribute)": [[63, "pyk.kore.rpc.VacuousResult.next_states", false]], "nextstate (class in pyk.cterm.symbolic)": [[5, "pyk.cterm.symbolic.NextState", false]], "no_cell_rewrite_to_dots() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.no_cell_rewrite_to_dots", false]], "no_dispatch (behaviorview.selected attribute)": [[37, "pyk.kcfg.tui.BehaviorView.Selected.no_dispatch", false]], "no_dispatch (graphchunk.selected attribute)": [[37, "pyk.kcfg.tui.GraphChunk.Selected.no_dispatch", false]], "no_dispatch (navwidget.selected attribute)": [[37, "pyk.kcfg.tui.NavWidget.Selected.no_dispatch", false]], "no_evaluators (atts attribute)": [[8, "pyk.kast.att.Atts.NO_EVALUATORS", false]], "no_post_exec_simplify (boosterserverargs attribute)": [[63, "pyk.kore.rpc.BoosterServerArgs.no_post_exec_simplify", false]], "node (aprproofstep attribute)": [[93, "pyk.proof.reachability.APRProofStep.node", false]], "node() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.node", false]], "node_attrs() (aprproofnodeprinter method)": [[94, "pyk.proof.show.APRProofNodePrinter.node_attrs", false]], "node_attrs() (nodeprinter method)": [[35, "pyk.kcfg.show.NodePrinter.node_attrs", false]], "node_id (aprproofresult attribute)": [[93, "pyk.proof.reachability.APRProofResult.node_id", false]], "node_printer (kcfgshow attribute)": [[35, "pyk.kcfg.show.KCFGShow.node_printer", false]], "node_refutations (aprproof attribute)": [[93, "pyk.proof.reachability.APRProof.node_refutations", false]], "node_short_info() (kcfgshow method)": [[35, "pyk.kcfg.show.KCFGShow.node_short_info", false]], "nodeattr (class in pyk.kcfg.kcfg)": [[32, "pyk.kcfg.kcfg.NodeAttr", false]], "nodeprinter (class in pyk.kcfg.show)": [[35, "pyk.kcfg.show.NodePrinter", false]], "nodes (aprsummary attribute)": [[93, "pyk.proof.reachability.APRSummary.nodes", false]], "nodes (kcfg property)": [[32, "pyk.kcfg.kcfg.KCFG.nodes", false]], "nodeview (class in pyk.kcfg.tui)": [[37, "pyk.kcfg.tui.NodeView", false]], "non_assoc (assoc attribute)": [[19, "pyk.kast.outer_syntax.Assoc.NON_ASSOC", false]], "non_assoc (kassoc attribute)": [[16, "pyk.kast.outer.KAssoc.NON_ASSOC", false]], "non_empty (userlist attribute)": [[19, "pyk.kast.outer_syntax.UserList.non_empty", false]], "non_terminals (kproduction property)": [[16, "pyk.kast.outer.KProduction.non_terminals", false]], "none (kastoutput attribute)": [[76, "pyk.ktool.kprint.KAstOutput.NONE", false]], "none (kproveoutput attribute)": [[77, "pyk.ktool.kprove.KProveOutput.NONE", false]], "none (krunoutput attribute)": [[78, "pyk.ktool.krun.KRunOutput.NONE", false]], "none (printoutput attribute)": [[66, "pyk.kore.tools.PrintOutput.NONE", false]], "none() (in module pyk.utils)": [[98, "pyk.utils.none", false]], "nonempty_str() (in module pyk.utils)": [[98, "pyk.utils.nonempty_str", false]], "nonetype (class in pyk.kast.att)": [[8, "pyk.kast.att.NoneType", false]], "nonterminal (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.NonTerminal", false]], "nonzero_depth (aprproofstep attribute)": [[93, "pyk.proof.reachability.APRProofStep.nonzero_depth", false]], "nonzero_depth() (aprproof method)": [[93, "pyk.proof.reachability.APRProof.nonzero_depth", false]], "normalize_constraints() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.normalize_constraints", false]], "normalize_ml_pred() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.normalize_ml_pred", false]], "not (class in pyk.kast.markdown)": [[15, "pyk.kast.markdown.Not", false]], "not (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Not", false]], "not_bool() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.not_bool", false]], "not_log_context (boosterserverargs attribute)": [[63, "pyk.kore.rpc.BoosterServerArgs.not_log_context", false]], "not_none() (in module pyk.utils)": [[98, "pyk.utils.not_none", false]], "notbool() (in module pyk.prelude.kbool)": [[85, "pyk.prelude.kbool.notBool", false]], "notint() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.notInt", false]], "nott() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.nott", false]], "nu (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Nu", false]], "nu() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.nu", false]], "nullaryconn (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.NullaryConn", false]], "of() (and class method)": [[65, "pyk.kore.syntax.And.of", false]], "of() (bottom class method)": [[65, "pyk.kore.syntax.Bottom.of", false]], "of() (ceil class method)": [[65, "pyk.kore.syntax.Ceil.of", false]], "of() (dv class method)": [[65, "pyk.kore.syntax.DV.of", false]], "of() (equals class method)": [[65, "pyk.kore.syntax.Equals.of", false]], "of() (exists class method)": [[65, "pyk.kore.syntax.Exists.of", false]], "of() (floor class method)": [[65, "pyk.kore.syntax.Floor.of", false]], "of() (forall class method)": [[65, "pyk.kore.syntax.Forall.of", false]], "of() (iff class method)": [[65, "pyk.kore.syntax.Iff.of", false]], "of() (implies class method)": [[65, "pyk.kore.syntax.Implies.of", false]], "of() (in class method)": [[65, "pyk.kore.syntax.In.of", false]], "of() (mlpattern class method)": [[65, "pyk.kore.syntax.MLPattern.of", false]], "of() (mu class method)": [[65, "pyk.kore.syntax.Mu.of", false]], "of() (next class method)": [[65, "pyk.kore.syntax.Next.of", false]], "of() (not class method)": [[65, "pyk.kore.syntax.Not.of", false]], "of() (nu class method)": [[65, "pyk.kore.syntax.Nu.of", false]], "of() (or class method)": [[65, "pyk.kore.syntax.Or.of", false]], "of() (rewrites class method)": [[65, "pyk.kore.syntax.Rewrites.of", false]], "of() (top class method)": [[65, "pyk.kore.syntax.Top.of", false]], "old (syntaxsynonym attribute)": [[19, "pyk.kast.outer_syntax.SyntaxSynonym.old", false]], "old_lace (color attribute)": [[9, "pyk.kast.color.Color.OLD_LACE", false]], "old_sort (ksortsynonym attribute)": [[16, "pyk.kast.outer.KSortSynonym.old_sort", false]], "olive (color attribute)": [[9, "pyk.kast.color.Color.OLIVE", false]], "olive_drab (color attribute)": [[9, "pyk.kast.color.Color.OLIVE_DRAB", false]], "olive_green (color attribute)": [[9, "pyk.kast.color.Color.OLIVE_GREEN", false]], "on_attributes() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.on_attributes", false]], "on_behavior_view_selected() (nodeview method)": [[37, "pyk.kcfg.tui.NodeView.on_behavior_view_selected", false]], "on_click() (behaviorview method)": [[37, "pyk.kcfg.tui.BehaviorView.on_click", false]], "on_click() (constraint method)": [[37, "pyk.kcfg.tui.Constraint.on_click", false]], "on_click() (custom method)": [[37, "pyk.kcfg.tui.Custom.on_click", false]], "on_click() (graphchunk method)": [[37, "pyk.kcfg.tui.GraphChunk.on_click", false]], "on_click() (status method)": [[37, "pyk.kcfg.tui.Status.on_click", false]], "on_click() (term method)": [[37, "pyk.kcfg.tui.Term.on_click", false]], "on_constraint_selected() (nodeview method)": [[37, "pyk.kcfg.tui.NodeView.on_constraint_selected", false]], "on_custom_selected() (nodeview method)": [[37, "pyk.kcfg.tui.NodeView.on_custom_selected", false]], "on_enter() (graphchunk method)": [[37, "pyk.kcfg.tui.GraphChunk.on_enter", false]], "on_graph_chunk_selected() (kcfgviewer method)": [[37, "pyk.kcfg.tui.KCFGViewer.on_graph_chunk_selected", false]], "on_leave() (graphchunk method)": [[37, "pyk.kcfg.tui.GraphChunk.on_leave", false]], "on_mount() (aprproofviewer method)": [[95, "pyk.proof.tui.APRProofViewer.on_mount", false]], "on_mount() (nodeview method)": [[37, "pyk.kcfg.tui.NodeView.on_mount", false]], "on_status_selected() (nodeview method)": [[37, "pyk.kcfg.tui.NodeView.on_status_selected", false]], "on_term_selected() (nodeview method)": [[37, "pyk.kcfg.tui.NodeView.on_term_selected", false]], "one_line_summary (aprproof property)": [[93, "pyk.proof.reachability.APRProof.one_line_summary", false]], "one_line_summary (proof property)": [[92, "pyk.proof.proof.Proof.one_line_summary", false]], "oneline (koreexeclogformat attribute)": [[63, "pyk.kore.rpc.KoreExecLogFormat.ONELINE", false]], "op (not attribute)": [[15, "pyk.kast.markdown.Not.op", false]], "op_sort (ceil attribute)": [[65, "pyk.kore.syntax.Ceil.op_sort", false]], "op_sort (equals attribute)": [[65, "pyk.kore.syntax.Equals.op_sort", false]], "op_sort (floor attribute)": [[65, "pyk.kore.syntax.Floor.op_sort", false]], "op_sort (in attribute)": [[65, "pyk.kore.syntax.In.op_sort", false]], "op_sort (mlpred attribute)": [[65, "pyk.kore.syntax.MLPred.op_sort", false]], "ops (and attribute)": [[15, "pyk.kast.markdown.And.ops", false], [65, "pyk.kore.syntax.And.ops", false]], "ops (multiaryconn attribute)": [[65, "pyk.kore.syntax.MultiaryConn.ops", false]], "ops (or attribute)": [[15, "pyk.kast.markdown.Or.ops", false], [65, "pyk.kore.syntax.Or.ops", false]], "optimize_kcfg (aprproofresult attribute)": [[93, "pyk.proof.reachability.APRProofResult.optimize_kcfg", false]], "optimize_kcfg (aprprover attribute)": [[93, "pyk.proof.reachability.APRProver.optimize_kcfg", false]], "optimizednodestore (class in pyk.kcfg.store)": [[36, "pyk.kcfg.store.OptimizedNodeStore", false]], "optionaltype (class in pyk.kast.att)": [[8, "pyk.kast.att.OptionalType", false]], "or (class in pyk.kast.markdown)": [[15, "pyk.kast.markdown.Or", false]], "or (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Or", false]], "or_bool() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.or_bool", false]], "orange (color attribute)": [[9, "pyk.kast.color.Color.ORANGE", false]], "orange_red (color attribute)": [[9, "pyk.kast.color.Color.ORANGE_RED", false]], "orbool() (in module pyk.prelude.kbool)": [[85, "pyk.prelude.kbool.orBool", false]], "orchid (color attribute)": [[9, "pyk.kast.color.Color.ORCHID", false]], "origin (logrewrite attribute)": [[63, "pyk.kore.rpc.LogRewrite.origin", false]], "orint() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.orInt", false]], "orr() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.orr", false]], "outer_lexer() (in module pyk.kast.outer_lexer)": [[17, "pyk.kast.outer_lexer.outer_lexer", false]], "outerparser (class in pyk.kast.outer_parser)": [[18, "pyk.kast.outer_parser.OuterParser", false]], "overload (atts attribute)": [[8, "pyk.kast.att.Atts.OVERLOAD", false]], "overloads (kdefinition property)": [[16, "pyk.kast.outer.KDefinition.overloads", false]], "owise (atts attribute)": [[8, "pyk.kast.att.Atts.OWISE", false]], "own_status (aprproof property)": [[93, "pyk.proof.reachability.APRProof.own_status", false]], "own_status (impliesproof property)": [[91, "pyk.proof.implies.ImpliesProof.own_status", false]], "own_status (proof property)": [[92, "pyk.proof.proof.Proof.own_status", false]], "package (packagesource attribute)": [[27, "pyk.kbuild.project.PackageSource.package", false]], "package_path() (in module pyk.kdist.utils)": [[41, "pyk.kdist.utils.package_path", false]], "packagesource (class in pyk.kbuild.project)": [[27, "pyk.kbuild.project.PackageSource", false]], "pale_goldenrod (color attribute)": [[9, "pyk.kast.color.Color.PALE_GOLDENROD", false]], "pale_green (color attribute)": [[9, "pyk.kast.color.Color.PALE_GREEN", false]], "pale_turquoise (color attribute)": [[9, "pyk.kast.color.Color.PALE_TURQUOISE", false]], "pale_violet_red (color attribute)": [[9, "pyk.kast.color.Color.PALE_VIOLET_RED", false]], "papaya_whip (color attribute)": [[9, "pyk.kast.color.Color.PAPAYA_WHIP", false]], "parallel_advance_proof() (in module pyk.proof.proof)": [[92, "pyk.proof.proof.parallel_advance_proof", false]], "param_sorts (aliasdecl attribute)": [[65, "pyk.kore.syntax.AliasDecl.param_sorts", false]], "param_sorts (symboldecl attribute)": [[65, "pyk.kore.syntax.SymbolDecl.param_sorts", false]], "params (klabel attribute)": [[11, "pyk.kast.inner.KLabel.params", false]], "params (kproduction attribute)": [[16, "pyk.kast.outer.KProduction.params", false]], "params (ksyntaxsort attribute)": [[16, "pyk.kast.outer.KSyntaxSort.params", false]], "params (sortdecl attribute)": [[19, "pyk.kast.outer_syntax.SortDecl.params", false]], "paren() (in module pyk.kast.pretty)": [[21, "pyk.kast.pretty.paren", false]], "parse() (anytype method)": [[8, "pyk.kast.att.AnyType.parse", false]], "parse() (atttype method)": [[8, "pyk.kast.att.AttType.parse", false]], "parse() (colorstype method)": [[8, "pyk.kast.att.ColorsType.parse", false]], "parse() (colortype method)": [[8, "pyk.kast.att.ColorType.parse", false]], "parse() (format class method)": [[8, "pyk.kast.att.Format.parse", false]], "parse() (formattype method)": [[8, "pyk.kast.att.FormatType.parse", false]], "parse() (inttype method)": [[8, "pyk.kast.att.IntType.parse", false]], "parse() (katt class method)": [[8, "pyk.kast.att.KAtt.parse", false]], "parse() (kversion static method)": [[28, "pyk.kbuild.utils.KVersion.parse", false]], "parse() (llvmrewritetrace static method)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRewriteTrace.parse", false]], "parse() (locationtype method)": [[8, "pyk.kast.att.LocationType.parse", false]], "parse() (nonetype method)": [[8, "pyk.kast.att.NoneType.parse", false]], "parse() (optionaltype method)": [[8, "pyk.kast.att.OptionalType.parse", false]], "parse() (pathtype method)": [[8, "pyk.kast.att.PathType.parse", false]], "parse() (selectorparser method)": [[15, "pyk.kast.markdown.SelectorParser.parse", false]], "parse() (strtype method)": [[8, "pyk.kast.att.StrType.parse", false]], "parse() (targetid static method)": [[40, "pyk.kdist.api.TargetId.parse", false]], "parse_args() (in module pyk.kcovr)": [[38, "pyk.kcovr.parse_args", false]], "parse_definition() (in module pyk.kllvm.parser)": [[51, "pyk.kllvm.parser.parse_definition", false]], "parse_definition_file() (in module pyk.kllvm.parser)": [[51, "pyk.kllvm.parser.parse_definition_file", false]], "parse_modules() (kprove method)": [[77, "pyk.ktool.kprove.KProve.parse_modules", false]], "parse_outer() (in module pyk.kast.utils)": [[23, "pyk.kast.utils.parse_outer", false]], "parse_pattern() (in module pyk.kllvm.parser)": [[51, "pyk.kllvm.parser.parse_pattern", false]], "parse_pattern_file() (in module pyk.kllvm.parser)": [[51, "pyk.kllvm.parser.parse_pattern_file", false]], "parse_rule_applications() (in module pyk.kore_exec_covr.kore_exec_covr)": [[68, "pyk.kore_exec_covr.kore_exec_covr.parse_rule_applications", false]], "parse_sort() (in module pyk.kllvm.parser)": [[51, "pyk.kllvm.parser.parse_sort", false]], "parse_sort_file() (in module pyk.kllvm.parser)": [[51, "pyk.kllvm.parser.parse_sort_file", false]], "parse_tags() (in module pyk.kast.markdown)": [[15, "pyk.kast.markdown.parse_tags", false]], "parse_token() (kprint method)": [[76, "pyk.ktool.kprint.KPrint.parse_token", false]], "parseerror": [[63, "pyk.kore.rpc.ParseError", false]], "partition() (in module pyk.utils)": [[98, "pyk.utils.partition", false]], "passed (proof property)": [[92, "pyk.proof.proof.Proof.passed", false]], "passed (proofstatus attribute)": [[92, "pyk.proof.proof.ProofStatus.PASSED", false]], "patch (kversion attribute)": [[28, "pyk.kbuild.utils.KVersion.patch", false]], "path (kbuildenv attribute)": [[26, "pyk.kbuild.kbuild.KBuildEnv.path", false]], "path (kdistribution attribute)": [[80, "id0", false], [80, "pyk.ktool.utils.KDistribution.path", false]], "path (pathsource attribute)": [[27, "pyk.kbuild.project.PathSource.path", false]], "path (project attribute)": [[27, "pyk.kbuild.project.Project.path", false]], "path (require attribute)": [[19, "pyk.kast.outer_syntax.Require.path", false]], "path_conditions (aprfailureinfo attribute)": [[93, "pyk.proof.reachability.APRFailureInfo.path_conditions", false]], "path_constraints() (aprproof method)": [[93, "pyk.proof.reachability.APRProof.path_constraints", false]], "path_length() (kcfg static method)": [[32, "pyk.kcfg.kcfg.KCFG.path_length", false]], "paths_between() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.paths_between", false]], "pathsource (class in pyk.kbuild.project)": [[27, "pyk.kbuild.project.PathSource", false]], "pathtype (class in pyk.kast.att)": [[8, "pyk.kast.att.PathType", false]], "pattern (assoc property)": [[65, "pyk.kore.syntax.Assoc.pattern", false]], "pattern (axiom attribute)": [[65, "pyk.kore.syntax.Axiom.pattern", false]], "pattern (axiomlike attribute)": [[65, "pyk.kore.syntax.AxiomLike.pattern", false]], "pattern (ceil attribute)": [[65, "pyk.kore.syntax.Ceil.pattern", false]], "pattern (claim attribute)": [[65, "pyk.kore.syntax.Claim.pattern", false]], "pattern (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Pattern", false]], "pattern (exists attribute)": [[65, "pyk.kore.syntax.Exists.pattern", false]], "pattern (floor attribute)": [[65, "pyk.kore.syntax.Floor.pattern", false]], "pattern (forall attribute)": [[65, "pyk.kore.syntax.Forall.pattern", false]], "pattern (kas attribute)": [[11, "pyk.kast.inner.KAs.pattern", false]], "pattern (kstate attribute)": [[70, "pyk.krepl.repl.KState.pattern", false]], "pattern (kversion attribute)": [[28, "pyk.kbuild.utils.KVersion.PATTERN", false]], "pattern (leftassoc property)": [[65, "pyk.kore.syntax.LeftAssoc.pattern", false]], "pattern (mlfixpoint attribute)": [[65, "pyk.kore.syntax.MLFixpoint.pattern", false]], "pattern (mlquant attribute)": [[65, "pyk.kore.syntax.MLQuant.pattern", false]], "pattern (mu attribute)": [[65, "pyk.kore.syntax.Mu.pattern", false]], "pattern (next attribute)": [[65, "pyk.kore.syntax.Next.pattern", false]], "pattern (not attribute)": [[65, "pyk.kore.syntax.Not.pattern", false]], "pattern (nu attribute)": [[65, "pyk.kore.syntax.Nu.pattern", false]], "pattern (rightassoc property)": [[65, "pyk.kore.syntax.RightAssoc.pattern", false]], "pattern (roundpred attribute)": [[65, "pyk.kore.syntax.RoundPred.pattern", false]], "pattern (smtsolvererror attribute)": [[63, "pyk.kore.rpc.SmtSolverError.pattern", false]], "pattern (term property)": [[52, "pyk.kllvm.runtime.Term.pattern", false]], "pattern (unaryconn attribute)": [[65, "pyk.kore.syntax.UnaryConn.pattern", false]], "pattern() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.pattern", false]], "pattern_sorts() (koresymboltable method)": [[56, "pyk.kore.kompiled.KoreSymbolTable.pattern_sorts", false]], "pattern_to_llvm() (in module pyk.kllvm.convert)": [[45, "pyk.kllvm.convert.pattern_to_llvm", false]], "patternerror": [[63, "pyk.kore.rpc.PatternError", false]], "patterns (app property)": [[65, "pyk.kore.syntax.App.patterns", false]], "patterns (assoc property)": [[65, "pyk.kore.syntax.Assoc.patterns", false]], "patterns (binaryconn property)": [[65, "pyk.kore.syntax.BinaryConn.patterns", false]], "patterns (binarypred property)": [[65, "pyk.kore.syntax.BinaryPred.patterns", false]], "patterns (dv property)": [[65, "pyk.kore.syntax.DV.patterns", false]], "patterns (mlfixpoint property)": [[65, "pyk.kore.syntax.MLFixpoint.patterns", false]], "patterns (mlquant property)": [[65, "pyk.kore.syntax.MLQuant.patterns", false]], "patterns (multiaryconn property)": [[65, "pyk.kore.syntax.MultiaryConn.patterns", false]], "patterns (next property)": [[65, "pyk.kore.syntax.Next.patterns", false]], "patterns (nullaryconn property)": [[65, "pyk.kore.syntax.NullaryConn.patterns", false]], "patterns (pattern property)": [[65, "pyk.kore.syntax.Pattern.patterns", false]], "patterns (rewrites property)": [[65, "pyk.kore.syntax.Rewrites.patterns", false]], "patterns (roundpred property)": [[65, "pyk.kore.syntax.RoundPred.patterns", false]], "patterns (string property)": [[65, "pyk.kore.syntax.String.patterns", false]], "patterns (unaryconn property)": [[65, "pyk.kore.syntax.UnaryConn.patterns", false]], "patterns (varpattern property)": [[65, "pyk.kore.syntax.VarPattern.patterns", false]], "peach (color attribute)": [[9, "pyk.kast.color.Color.PEACH", false]], "peach_puff (color attribute)": [[9, "pyk.kast.color.Color.PEACH_PUFF", false]], "pending (aprproof property)": [[93, "pyk.proof.reachability.APRProof.pending", false]], "pending (aprsummary attribute)": [[93, "pyk.proof.reachability.APRSummary.pending", false]], "pending (proofstatus attribute)": [[92, "pyk.proof.proof.ProofStatus.PENDING", false]], "pending_nodes (aprfailureinfo attribute)": [[93, "pyk.proof.reachability.APRFailureInfo.pending_nodes", false]], "periwinkle (color attribute)": [[9, "pyk.kast.color.Color.PERIWINKLE", false]], "peru (color attribute)": [[9, "pyk.kast.color.Color.PERU", false]], "pid (koreserver property)": [[63, "pyk.kore.rpc.KoreServer.pid", false]], "pid (koreserverinfo attribute)": [[63, "pyk.kore.rpc.KoreServerInfo.pid", false]], "pine_green (color attribute)": [[9, "pyk.kast.color.Color.PINE_GREEN", false]], "pink (color attribute)": [[9, "pyk.kast.color.Color.PINK", false]], "plugin_name (targetid attribute)": [[40, "pyk.kdist.api.TargetId.plugin_name", false]], "plum (color attribute)": [[9, "pyk.kast.color.Color.PLUM", false]], "plus (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.PLUS", false]], "port (boosterserverargs attribute)": [[63, "pyk.kore.rpc.BoosterServerArgs.port", false]], "port (koreclient attribute)": [[63, "pyk.kore.rpc.KoreClient.port", false]], "port (koreserver property)": [[63, "pyk.kore.rpc.KoreServer.port", false]], "port (koreserverargs attribute)": [[63, "pyk.kore.rpc.KoreServerArgs.port", false]], "port (koreserverinfo attribute)": [[63, "pyk.kore.rpc.KoreServerInfo.port", false]], "poset (class in pyk.utils)": [[98, "pyk.utils.POSet", false]], "powder_blue (color attribute)": [[9, "pyk.kast.color.Color.POWDER_BLUE", false]], "pre_constraints (refutationproof property)": [[91, "pyk.proof.implies.RefutationProof.pre_constraints", false]], "pre_trace (llvmrewritetrace property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRewriteTrace.pre_trace", false]], "pred (subst property)": [[11, "pyk.kast.inner.Subst.pred", false]], "pred() (csubst method)": [[4, "pyk.cterm.cterm.CSubst.pred", false]], "predecessors() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.predecessors", false]], "predicate (atts attribute)": [[8, "pyk.kast.att.Atts.PREDICATE", false]], "predicate (impliesresult attribute)": [[63, "pyk.kore.rpc.ImpliesResult.predicate", false]], "predicate (state attribute)": [[63, "pyk.kore.rpc.State.predicate", false]], "prefer (atts attribute)": [[8, "pyk.kast.att.Atts.PREFER", false]], "pretty (kastoutput attribute)": [[76, "pyk.ktool.kprint.KAstOutput.PRETTY", false]], "pretty (katt property)": [[8, "pyk.kast.att.KAtt.pretty", false]], "pretty (kproveoutput attribute)": [[77, "pyk.ktool.kprove.KProveOutput.PRETTY", false]], "pretty (krunoutput attribute)": [[78, "pyk.ktool.krun.KRunOutput.PRETTY", false]], "pretty (kstate property)": [[70, "pyk.krepl.repl.KState.pretty", false]], "pretty (printoutput attribute)": [[66, "pyk.kore.tools.PrintOutput.PRETTY", false]], "pretty() (aprproofshow method)": [[94, "pyk.proof.show.APRProofShow.pretty", false]], "pretty() (equalityproof method)": [[91, "pyk.proof.implies.EqualityProof.pretty", false]], "pretty() (kcfgshow method)": [[35, "pyk.kcfg.show.KCFGShow.pretty", false]], "pretty() (refutationproof method)": [[91, "pyk.proof.implies.RefutationProof.pretty", false]], "pretty_bytes() (in module pyk.prelude.bytes)": [[82, "pyk.prelude.bytes.pretty_bytes", false]], "pretty_bytes_str() (in module pyk.prelude.bytes)": [[82, "pyk.prelude.bytes.pretty_bytes_str", false]], "pretty_print() (kcfgexplore method)": [[31, "pyk.kcfg.explore.KCFGExplore.pretty_print", false]], "pretty_print() (kprint method)": [[76, "pyk.ktool.kprint.KPrint.pretty_print", false]], "pretty_segments() (aprproofshow method)": [[94, "pyk.proof.show.APRProofShow.pretty_segments", false]], "pretty_segments() (kcfgshow method)": [[35, "pyk.kcfg.show.KCFGShow.pretty_segments", false]], "pretty_string() (in module pyk.prelude.string)": [[88, "pyk.prelude.string.pretty_string", false]], "prettyprinter (class in pyk.kast.pretty)": [[21, "pyk.kast.pretty.PrettyPrinter", false]], "print() (aprfailureinfo method)": [[93, "pyk.proof.reachability.APRFailureInfo.print", false]], "print() (prettyprinter method)": [[21, "pyk.kast.pretty.PrettyPrinter.print", false]], "print_node() (nodeprinter method)": [[35, "pyk.kcfg.show.NodePrinter.print_node", false]], "printoutput (class in pyk.kore.tools)": [[66, "pyk.kore.tools.PrintOutput", false]], "prior_loops_cache (aprproof attribute)": [[93, "pyk.proof.reachability.APRProof.prior_loops_cache", false]], "prior_loops_cache (aprproofstep attribute)": [[93, "pyk.proof.reachability.APRProofStep.prior_loops_cache", false]], "prior_loops_cache_update (aprproofresult attribute)": [[93, "pyk.proof.reachability.APRProofResult.prior_loops_cache_update", false]], "priorities (atts attribute)": [[8, "pyk.kast.att.Atts.PRIORITIES", false]], "priorities (kdefinition property)": [[16, "pyk.kast.outer.KDefinition.priorities", false]], "priorities (ksyntaxpriority attribute)": [[16, "pyk.kast.outer.KSyntaxPriority.priorities", false]], "priority (apprule attribute)": [[64, "pyk.kore.rule.AppRule.priority", false]], "priority (atts attribute)": [[8, "pyk.kast.att.Atts.PRIORITY", false]], "priority (ceilrule attribute)": [[64, "pyk.kore.rule.CeilRule.priority", false]], "priority (equalsrule attribute)": [[64, "pyk.kore.rule.EqualsRule.priority", false]], "priority (functionrule attribute)": [[64, "pyk.kore.rule.FunctionRule.priority", false]], "priority (krule property)": [[16, "pyk.kast.outer.KRule.priority", false]], "priority (rewriterule attribute)": [[64, "pyk.kore.rule.RewriteRule.priority", false]], "priority (rule attribute)": [[64, "pyk.kore.rule.Rule.priority", false]], "priorityblock (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.PriorityBlock", false]], "private (atts attribute)": [[8, "pyk.kast.att.Atts.PRIVATE", false]], "process_blue (color attribute)": [[9, "pyk.kast.color.Color.PROCESS_BLUE", false]], "production (atts attribute)": [[8, "pyk.kast.att.Atts.PRODUCTION", false]], "production (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.Production", false]], "production_for_cell_sort() (kdefinition method)": [[16, "pyk.kast.outer.KDefinition.production_for_cell_sort", false]], "productionitem (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.ProductionItem", false]], "productionlike (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.ProductionLike", false]], "productions (kdefinition property)": [[16, "pyk.kast.outer.KDefinition.productions", false]], "productions (kflatmodule property)": [[16, "pyk.kast.outer.KFlatModule.productions", false]], "productions (priorityblock attribute)": [[19, "pyk.kast.outer_syntax.PriorityBlock.productions", false]], "profile() (in module pyk.testing.plugin)": [[97, "pyk.testing.plugin.profile", false]], "progam (kproveoutput attribute)": [[77, "pyk.ktool.kprove.KProveOutput.PROGAM", false]], "program (kastinput attribute)": [[76, "pyk.ktool.kprint.KAstInput.PROGRAM", false]], "program (kastoutput attribute)": [[76, "pyk.ktool.kprint.KAstOutput.PROGRAM", false]], "program (krunoutput attribute)": [[78, "pyk.ktool.krun.KRunOutput.PROGRAM", false]], "program (printoutput attribute)": [[66, "pyk.kore.tools.PrintOutput.PROGRAM", false]], "program_file (kinterpreter attribute)": [[70, "pyk.krepl.repl.KInterpreter.program_file", false]], "project (class in pyk.kbuild.project)": [[27, "pyk.kbuild.project.Project", false]], "project (kbuildenv attribute)": [[26, "pyk.kbuild.kbuild.KBuildEnv.project", false]], "project_file (project property)": [[27, "pyk.kbuild.project.Project.project_file", false]], "projection (atts attribute)": [[8, "pyk.kast.att.Atts.PROJECTION", false]], "prompt (baserepl attribute)": [[70, "pyk.krepl.repl.BaseRepl.prompt", false]], "proof (aprproofnodeprinter attribute)": [[94, "pyk.proof.show.APRProofNodePrinter.proof", false]], "proof (class in pyk.proof.proof)": [[92, "pyk.proof.proof.Proof", false]], "proof (impliesproofstep attribute)": [[91, "pyk.proof.implies.ImpliesProofStep.proof", false]], "proof (impliesprover attribute)": [[91, "pyk.proof.implies.ImpliesProver.proof", false]], "proof_data_exists() (proof static method)": [[92, "pyk.proof.proof.Proof.proof_data_exists", false]], "proof_dir (proof attribute)": [[92, "pyk.proof.proof.Proof.proof_dir", false]], "proof_exists() (proof static method)": [[92, "pyk.proof.proof.Proof.proof_exists", false]], "proof_id (aprproofstep attribute)": [[93, "pyk.proof.reachability.APRProofStep.proof_id", false]], "proof_subdir (proof property)": [[92, "pyk.proof.proof.Proof.proof_subdir", false]], "proofstatus (class in pyk.proof.proof)": [[92, "pyk.proof.proof.ProofStatus", false]], "proofsummary (class in pyk.proof.proof)": [[92, "pyk.proof.proof.ProofSummary", false]], "propagate_up_constraints() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.propagate_up_constraints", false]], "prove() (kprove method)": [[77, "pyk.ktool.kprove.KProve.prove", false]], "prove_claim() (kprove method)": [[77, "pyk.ktool.kprove.KProve.prove_claim", false]], "prove_rpc() (proverpc method)": [[79, "pyk.ktool.prove_rpc.ProveRpc.prove_rpc", false]], "prover (class in pyk.proof.proof)": [[92, "pyk.proof.proof.Prover", false]], "prover (kprove attribute)": [[77, "pyk.ktool.kprove.KProve.prover", false]], "prover_args (kprove attribute)": [[77, "pyk.ktool.kprove.KProve.prover_args", false]], "proverpc (class in pyk.ktool.prove_rpc)": [[79, "pyk.ktool.prove_rpc.ProveRpc", false]], "proxy (logorigin attribute)": [[63, "pyk.kore.rpc.LogOrigin.PROXY", false]], "prune() (aprproof method)": [[93, "pyk.proof.reachability.APRProof.prune", false]], "prune() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.prune", false]], "prune() (kcfgexploration method)": [[30, "pyk.kcfg.exploration.KCFGExploration.prune", false]], "public (import attribute)": [[19, "pyk.kast.outer_syntax.Import.public", false]], "public (kimport attribute)": [[16, "pyk.kast.outer.KImport.public", false]], "purple (color attribute)": [[9, "pyk.kast.color.Color.PURPLE", false]], "push_down_rewrites() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.push_down_rewrites", false]], "pyk": [[1, "module-pyk", false]], "pyk.coverage": [[2, "module-pyk.coverage", false]], "pyk.cterm": [[3, "module-pyk.cterm", false]], "pyk.cterm.cterm": [[4, "module-pyk.cterm.cterm", false]], "pyk.cterm.symbolic": [[5, "module-pyk.cterm.symbolic", false]], "pyk.dequote": [[6, "module-pyk.dequote", false]], "pyk.kast": [[7, "module-pyk.kast", false]], "pyk.kast.att": [[8, "module-pyk.kast.att", false]], "pyk.kast.color": [[9, "module-pyk.kast.color", false]], "pyk.kast.formatter": [[10, "module-pyk.kast.formatter", false]], "pyk.kast.inner": [[11, "module-pyk.kast.inner", false]], "pyk.kast.kast": [[12, "module-pyk.kast.kast", false]], "pyk.kast.lexer": [[13, "module-pyk.kast.lexer", false]], "pyk.kast.manip": [[14, "module-pyk.kast.manip", false]], "pyk.kast.markdown": [[15, "module-pyk.kast.markdown", false]], "pyk.kast.outer": [[16, "module-pyk.kast.outer", false]], "pyk.kast.outer_lexer": [[17, "module-pyk.kast.outer_lexer", false]], "pyk.kast.outer_parser": [[18, "module-pyk.kast.outer_parser", false]], "pyk.kast.outer_syntax": [[19, "module-pyk.kast.outer_syntax", false]], "pyk.kast.parser": [[20, "module-pyk.kast.parser", false]], "pyk.kast.pretty": [[21, "module-pyk.kast.pretty", false]], "pyk.kast.rewrite": [[22, "module-pyk.kast.rewrite", false]], "pyk.kast.utils": [[23, "module-pyk.kast.utils", false]], "pyk.kbuild": [[24, "module-pyk.kbuild", false]], "pyk.kbuild.config": [[25, "module-pyk.kbuild.config", false]], "pyk.kbuild.kbuild": [[26, "module-pyk.kbuild.kbuild", false]], "pyk.kbuild.project": [[27, "module-pyk.kbuild.project", false]], "pyk.kbuild.utils": [[28, "module-pyk.kbuild.utils", false]], "pyk.kcfg": [[29, "module-pyk.kcfg", false]], "pyk.kcfg.exploration": [[30, "module-pyk.kcfg.exploration", false]], "pyk.kcfg.explore": [[31, "module-pyk.kcfg.explore", false]], "pyk.kcfg.kcfg": [[32, "module-pyk.kcfg.kcfg", false]], "pyk.kcfg.minimize": [[33, "module-pyk.kcfg.minimize", false]], "pyk.kcfg.semantics": [[34, "module-pyk.kcfg.semantics", false]], "pyk.kcfg.show": [[35, "module-pyk.kcfg.show", false]], "pyk.kcfg.store": [[36, "module-pyk.kcfg.store", false]], "pyk.kcfg.tui": [[37, "module-pyk.kcfg.tui", false]], "pyk.kcovr": [[38, "module-pyk.kcovr", false]], "pyk.kdist": [[39, "module-pyk.kdist", false]], "pyk.kdist.api": [[40, "module-pyk.kdist.api", false]], "pyk.kdist.utils": [[41, "module-pyk.kdist.utils", false]], "pyk.kllvm": [[42, "module-pyk.kllvm", false]], "pyk.kllvm.ast": [[43, "module-pyk.kllvm.ast", false]], "pyk.kllvm.compiler": [[44, "module-pyk.kllvm.compiler", false]], "pyk.kllvm.convert": [[45, "module-pyk.kllvm.convert", false]], "pyk.kllvm.hints": [[46, "module-pyk.kllvm.hints", false]], "pyk.kllvm.hints.prooftrace": [[47, "module-pyk.kllvm.hints.prooftrace", false]], "pyk.kllvm.importer": [[48, "module-pyk.kllvm.importer", false]], "pyk.kllvm.load": [[49, "module-pyk.kllvm.load", false]], "pyk.kllvm.load_static": [[50, "module-pyk.kllvm.load_static", false]], "pyk.kllvm.parser": [[51, "module-pyk.kllvm.parser", false]], "pyk.kllvm.runtime": [[52, "module-pyk.kllvm.runtime", false]], "pyk.kllvm.utils": [[53, "module-pyk.kllvm.utils", false]], "pyk.konvert": [[54, "module-pyk.konvert", false]], "pyk.kore": [[55, "module-pyk.kore", false]], "pyk.kore.kompiled": [[56, "module-pyk.kore.kompiled", false]], "pyk.kore.lexer": [[57, "module-pyk.kore.lexer", false]], "pyk.kore.manip": [[58, "module-pyk.kore.manip", false]], "pyk.kore.match": [[59, "module-pyk.kore.match", false]], "pyk.kore.parser": [[60, "module-pyk.kore.parser", false]], "pyk.kore.pool": [[61, "module-pyk.kore.pool", false]], "pyk.kore.prelude": [[62, "module-pyk.kore.prelude", false]], "pyk.kore.rpc": [[63, "module-pyk.kore.rpc", false]], "pyk.kore.rule": [[64, "module-pyk.kore.rule", false]], "pyk.kore.syntax": [[65, "module-pyk.kore.syntax", false]], "pyk.kore.tools": [[66, "module-pyk.kore.tools", false]], "pyk.kore_exec_covr": [[67, "module-pyk.kore_exec_covr", false]], "pyk.kore_exec_covr.kore_exec_covr": [[68, "module-pyk.kore_exec_covr.kore_exec_covr", false]], "pyk.krepl": [[69, "module-pyk.krepl", false]], "pyk.krepl.repl": [[70, "module-pyk.krepl.repl", false]], "pyk.ktool": [[71, "module-pyk.ktool", false]], "pyk.ktool.claim_index": [[72, "module-pyk.ktool.claim_index", false]], "pyk.ktool.claim_loader": [[73, "module-pyk.ktool.claim_loader", false]], "pyk.ktool.kfuzz": [[74, "module-pyk.ktool.kfuzz", false]], "pyk.ktool.kompile": [[75, "module-pyk.ktool.kompile", false]], "pyk.ktool.kprint": [[76, "module-pyk.ktool.kprint", false]], "pyk.ktool.kprove": [[77, "module-pyk.ktool.kprove", false]], "pyk.ktool.krun": [[78, "module-pyk.ktool.krun", false]], "pyk.ktool.prove_rpc": [[79, "module-pyk.ktool.prove_rpc", false]], "pyk.ktool.utils": [[80, "module-pyk.ktool.utils", false]], "pyk.prelude": [[81, "module-pyk.prelude", false]], "pyk.prelude.bytes": [[82, "module-pyk.prelude.bytes", false]], "pyk.prelude.collections": [[83, "module-pyk.prelude.collections", false]], "pyk.prelude.k": [[84, "module-pyk.prelude.k", false]], "pyk.prelude.kbool": [[85, "module-pyk.prelude.kbool", false]], "pyk.prelude.kint": [[86, "module-pyk.prelude.kint", false]], "pyk.prelude.ml": [[87, "module-pyk.prelude.ml", false]], "pyk.prelude.string": [[88, "module-pyk.prelude.string", false]], "pyk.prelude.utils": [[89, "module-pyk.prelude.utils", false]], "pyk.proof": [[90, "module-pyk.proof", false]], "pyk.proof.implies": [[91, "module-pyk.proof.implies", false]], "pyk.proof.proof": [[92, "module-pyk.proof.proof", false]], "pyk.proof.reachability": [[93, "module-pyk.proof.reachability", false]], "pyk.proof.show": [[94, "module-pyk.proof.show", false]], "pyk.proof.tui": [[95, "module-pyk.proof.tui", false]], "pyk.testing": [[96, "module-pyk.testing", false]], "pyk.testing.plugin": [[97, "module-pyk.testing.plugin", false]], "pyk.utils": [[98, "module-pyk.utils", false]], "pykbackend (class in pyk.ktool.kompile)": [[75, "pyk.ktool.kompile.PykBackend", false]], "pytest_addoption() (in module pyk.testing.plugin)": [[97, "pyk.testing.plugin.pytest_addoption", false]], "question (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.QUESTION", false]], "raised() (in module pyk.utils)": [[98, "pyk.utils.raised", false]], "rangemap_empty() (in module pyk.prelude.collections)": [[83, "pyk.prelude.collections.rangemap_empty", false]], "rangemap_item() (in module pyk.prelude.collections)": [[83, "pyk.prelude.collections.rangemap_item", false]], "rangemap_of() (in module pyk.prelude.collections)": [[83, "pyk.prelude.collections.rangemap_of", false]], "rangemap_pattern() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.rangemap_pattern", false]], "raw_sienna (color attribute)": [[9, "pyk.kast.color.Color.RAW_SIENNA", false]], "rbrace (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.RBRACE", false], [57, "pyk.kore.lexer.TokenType.RBRACE", false]], "rbrack (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.RBRACK", false], [57, "pyk.kore.lexer.TokenType.RBRACK", false]], "reachable_nodes() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.reachable_nodes", false]], "read_cfg_data() (kcfg static method)": [[32, "pyk.kcfg.kcfg.KCFG.read_cfg_data", false]], "read_cfg_data() (kcfgstore method)": [[32, "pyk.kcfg.kcfg.KCFGStore.read_cfg_data", false]], "read_kast_definition() (in module pyk.kast.outer)": [[16, "pyk.kast.outer.read_kast_definition", false]], "read_node_data() (kcfg static method)": [[32, "pyk.kcfg.kcfg.KCFG.read_node_data", false]], "read_node_data() (kcfgstore method)": [[32, "pyk.kcfg.kcfg.KCFGStore.read_node_data", false]], "read_proof() (aprproof static method)": [[93, "pyk.proof.reachability.APRProof.read_proof", false]], "read_proof() (proof class method)": [[92, "pyk.proof.proof.Proof.read_proof", false]], "read_proof_data() (aprproof static method)": [[93, "pyk.proof.reachability.APRProof.read_proof_data", false]], "read_proof_data() (equalityproof static method)": [[91, "pyk.proof.implies.EqualityProof.read_proof_data", false]], "read_proof_data() (proof static method)": [[92, "pyk.proof.proof.Proof.read_proof_data", false]], "read_proof_data() (refutationproof static method)": [[91, "pyk.proof.implies.RefutationProof.read_proof_data", false]], "read_subproof() (proof method)": [[92, "pyk.proof.proof.Proof.read_subproof", false]], "read_subproof_data() (proof method)": [[92, "pyk.proof.proof.Proof.read_subproof_data", false]], "reason (abortedresult attribute)": [[63, "pyk.kore.rpc.AbortedResult.reason", false]], "reason (branchingresult attribute)": [[63, "pyk.kore.rpc.BranchingResult.reason", false]], "reason (cutpointresult attribute)": [[63, "pyk.kore.rpc.CutPointResult.reason", false]], "reason (depthboundresult attribute)": [[63, "pyk.kore.rpc.DepthBoundResult.reason", false]], "reason (executeresult attribute)": [[63, "pyk.kore.rpc.ExecuteResult.reason", false]], "reason (rewritefailure attribute)": [[63, "pyk.kore.rpc.RewriteFailure.reason", false]], "reason (stuckresult attribute)": [[63, "pyk.kore.rpc.StuckResult.reason", false]], "reason (terminalresult attribute)": [[63, "pyk.kore.rpc.TerminalResult.reason", false]], "reason (timeoutresult attribute)": [[63, "pyk.kore.rpc.TimeoutResult.reason", false]], "reason (vacuousresult attribute)": [[63, "pyk.kore.rpc.VacuousResult.reason", false]], "red (color attribute)": [[9, "pyk.kast.color.Color.RED", false]], "red_orange (color attribute)": [[9, "pyk.kast.color.Color.RED_ORANGE", false]], "red_violet (color attribute)": [[9, "pyk.kast.color.Color.RED_VIOLET", false]], "refutationproof (class in pyk.proof.implies)": [[91, "pyk.proof.implies.RefutationProof", false]], "refutationsummary (class in pyk.proof.implies)": [[91, "pyk.proof.implies.RefutationSummary", false]], "refute_node() (aprproof method)": [[93, "pyk.proof.reachability.APRProof.refute_node", false]], "refuted (aprsummary attribute)": [[93, "pyk.proof.reachability.APRSummary.refuted", false]], "regex (kregexterminal attribute)": [[16, "pyk.kast.outer.KRegexTerminal.regex", false]], "regex (ksyntaxlexical attribute)": [[16, "pyk.kast.outer.KSyntaxLexical.regex", false]], "regex (lexical attribute)": [[19, "pyk.kast.outer_syntax.Lexical.regex", false]], "regex (syntaxlexical attribute)": [[19, "pyk.kast.outer_syntax.SyntaxLexical.regex", false]], "regex (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.REGEX", false]], "relative_position (llvmfunctionevent property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMFunctionEvent.relative_position", false]], "relative_position (llvmhookevent property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMHookEvent.relative_position", false]], "remaining_implication (ctermimplies attribute)": [[5, "pyk.cterm.symbolic.CTermImplies.remaining_implication", false]], "remove_alias() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.remove_alias", false]], "remove_attr() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.remove_attr", false]], "remove_attr() (kcfg.node method)": [[32, "pyk.kcfg.kcfg.KCFG.Node.remove_attr", false]], "remove_attrs() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.remove_attrs", false]], "remove_cell_map_items() (kdefinition method)": [[16, "pyk.kast.outer.KDefinition.remove_cell_map_items", false]], "remove_cover() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.remove_cover", false]], "remove_edge() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.remove_edge", false]], "remove_edges_around() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.remove_edges_around", false]], "remove_generated_cells() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.remove_generated_cells", false]], "remove_merged_edge() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.remove_merged_edge", false]], "remove_node() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.remove_node", false]], "remove_node() (kcfgexploration method)": [[30, "pyk.kcfg.exploration.KCFGExploration.remove_node", false]], "remove_semantic_casts() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.remove_semantic_casts", false]], "remove_source_map() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.remove_source_map", false]], "remove_stuck() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.remove_stuck", false]], "remove_subproof() (proof method)": [[92, "pyk.proof.proof.Proof.remove_subproof", false]], "remove_terminal() (kcfgexploration method)": [[30, "pyk.kcfg.exploration.KCFGExploration.remove_terminal", false]], "remove_useless_constraints() (cterm method)": [[4, "pyk.cterm.cterm.CTerm.remove_useless_constraints", false]], "remove_useless_constraints() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.remove_useless_constraints", false]], "remove_vacuous() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.remove_vacuous", false]], "rename_generated_vars() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.rename_generated_vars", false]], "render_classes() (in module pyk.kcovr)": [[38, "pyk.kcovr.render_classes", false]], "render_coverage_xml() (in module pyk.kcovr)": [[38, "pyk.kcovr.render_coverage_xml", false]], "render_lines() (in module pyk.kcovr)": [[38, "pyk.kcovr.render_lines", false]], "repeat_last() (in module pyk.utils)": [[98, "pyk.utils.repeat_last", false]], "replace() (krewrite method)": [[11, "pyk.kast.inner.KRewrite.replace", false]], "replace_node() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.replace_node", false]], "replace_rewrites_with_implies() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.replace_rewrites_with_implies", false]], "replace_source() (kcfg.cover method)": [[32, "pyk.kcfg.kcfg.KCFG.Cover.replace_source", false]], "replace_source() (kcfg.edge method)": [[32, "pyk.kcfg.kcfg.KCFG.Edge.replace_source", false]], "replace_source() (kcfg.mergededge method)": [[32, "pyk.kcfg.kcfg.KCFG.MergedEdge.replace_source", false]], "replace_source() (kcfg.ndbranch method)": [[32, "pyk.kcfg.kcfg.KCFG.NDBranch.replace_source", false]], "replace_source() (kcfg.split method)": [[32, "pyk.kcfg.kcfg.KCFG.Split.replace_source", false]], "replace_source() (kcfg.successor method)": [[32, "pyk.kcfg.kcfg.KCFG.Successor.replace_source", false]], "replace_target() (kcfg.cover method)": [[32, "pyk.kcfg.kcfg.KCFG.Cover.replace_target", false]], "replace_target() (kcfg.edge method)": [[32, "pyk.kcfg.kcfg.KCFG.Edge.replace_target", false]], "replace_target() (kcfg.mergededge method)": [[32, "pyk.kcfg.kcfg.KCFG.MergedEdge.replace_target", false]], "replace_target() (kcfg.ndbranch method)": [[32, "pyk.kcfg.kcfg.KCFG.NDBranch.replace_target", false]], "replace_target() (kcfg.split method)": [[32, "pyk.kcfg.kcfg.KCFG.Split.replace_target", false]], "replace_target() (kcfg.successor method)": [[32, "pyk.kcfg.kcfg.KCFG.Successor.replace_target", false]], "replace_top() (krewrite method)": [[11, "pyk.kast.inner.KRewrite.replace_top", false]], "replerror": [[70, "pyk.krepl.repl.ReplError", false]], "req (apprule attribute)": [[64, "pyk.kore.rule.AppRule.req", false]], "req (ceilrule attribute)": [[64, "pyk.kore.rule.CeilRule.req", false]], "req (equalsrule attribute)": [[64, "pyk.kore.rule.EqualsRule.req", false]], "req (functionrule attribute)": [[64, "pyk.kore.rule.FunctionRule.req", false]], "req (rewriterule attribute)": [[64, "pyk.kore.rule.RewriteRule.req", false]], "req (rule attribute)": [[64, "pyk.kore.rule.Rule.req", false]], "request() (jsonrpcclient method)": [[63, "pyk.kore.rpc.JsonRpcClient.request", false]], "request() (jsonrpcclientfacade method)": [[63, "pyk.kore.rpc.JsonRpcClientFacade.request", false]], "request() (transport method)": [[63, "pyk.kore.rpc.Transport.request", false]], "require (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.Require", false]], "require (krequire attribute)": [[16, "pyk.kast.outer.KRequire.require", false]], "require() (outerparser method)": [[18, "pyk.kast.outer_parser.OuterParser.require", false]], "requires (definition attribute)": [[19, "pyk.kast.outer_syntax.Definition.requires", false]], "requires (kclaim attribute)": [[16, "pyk.kast.outer.KClaim.requires", false]], "requires (kcontext attribute)": [[16, "pyk.kast.outer.KContext.requires", false]], "requires (kdefinition attribute)": [[16, "pyk.kast.outer.KDefinition.requires", false]], "requires (krule attribute)": [[16, "pyk.kast.outer.KRule.requires", false]], "requires (krulelike attribute)": [[16, "pyk.kast.outer.KRuleLike.requires", false]], "reset() (color static method)": [[9, "pyk.kast.color.Color.reset", false]], "reset_code() (color static method)": [[9, "pyk.kast.color.Color.reset_code", false]], "resolve() (claimindex method)": [[72, "pyk.ktool.claim_index.ClaimIndex.resolve", false]], "resolve() (koresymboltable method)": [[56, "pyk.kore.kompiled.KoreSymbolTable.resolve", false]], "resolve() (packagesource method)": [[27, "pyk.kbuild.project.PackageSource.resolve", false]], "resolve() (pathsource method)": [[27, "pyk.kbuild.project.PathSource.resolve", false]], "resolve() (source method)": [[27, "pyk.kbuild.project.Source.resolve", false]], "resolve_all() (claimindex method)": [[72, "pyk.ktool.claim_index.ClaimIndex.resolve_all", false]], "resolve_sorts() (kdefinition method)": [[16, "pyk.kast.outer.KDefinition.resolve_sorts", false]], "resource_file_names (project property)": [[27, "pyk.kbuild.project.Project.resource_file_names", false]], "resource_files (project property)": [[27, "pyk.kbuild.project.Project.resource_files", false]], "resources (project attribute)": [[27, "pyk.kbuild.project.Project.resources", false]], "result (llvmhookevent property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMHookEvent.result", false]], "result (logrewrite attribute)": [[63, "pyk.kore.rpc.LogRewrite.result", false]], "returns_unit (atts attribute)": [[8, "pyk.kast.att.Atts.RETURNS_UNIT", false]], "rev (kversion.git attribute)": [[28, "pyk.kbuild.utils.KVersion.Git.rev", false]], "rewritefailure (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.RewriteFailure", false]], "rewriteresult (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.RewriteResult", false]], "rewriterule (class in pyk.kore.rule)": [[64, "pyk.kore.rule.RewriteRule", false]], "rewrites (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Rewrites", false]], "rewrites() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.rewrites", false]], "rewritesuccess (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.RewriteSuccess", false]], "rewritten_term (rewritesuccess attribute)": [[63, "pyk.kore.rpc.RewriteSuccess.rewritten_term", false]], "rhodamine (color attribute)": [[9, "pyk.kast.color.Color.RHODAMINE", false]], "rhs (apprule attribute)": [[64, "pyk.kore.rule.AppRule.rhs", false]], "rhs (ceilrule attribute)": [[64, "pyk.kore.rule.CeilRule.rhs", false]], "rhs (equalsrule attribute)": [[64, "pyk.kore.rule.EqualsRule.rhs", false]], "rhs (functionrule attribute)": [[64, "pyk.kore.rule.FunctionRule.rhs", false]], "rhs (krewrite attribute)": [[11, "pyk.kast.inner.KRewrite.rhs", false]], "rhs (rewriterule attribute)": [[64, "pyk.kore.rule.RewriteRule.rhs", false]], "rhs (rule attribute)": [[64, "pyk.kore.rule.Rule.rhs", false]], "rhs_body (equalityproof property)": [[91, "pyk.proof.implies.EqualityProof.rhs_body", false]], "right (aliasdecl attribute)": [[65, "pyk.kore.syntax.AliasDecl.right", false]], "right (assoc attribute)": [[19, "pyk.kast.outer_syntax.Assoc.RIGHT", false]], "right (atts attribute)": [[8, "pyk.kast.att.Atts.RIGHT", false]], "right (binaryconn attribute)": [[65, "pyk.kore.syntax.BinaryConn.right", false]], "right (binarypred attribute)": [[65, "pyk.kore.syntax.BinaryPred.right", false]], "right (equals attribute)": [[65, "pyk.kore.syntax.Equals.right", false]], "right (iff attribute)": [[65, "pyk.kore.syntax.Iff.right", false]], "right (implies attribute)": [[65, "pyk.kore.syntax.Implies.right", false]], "right (in attribute)": [[65, "pyk.kore.syntax.In.right", false]], "right (kassoc attribute)": [[16, "pyk.kast.outer.KAssoc.RIGHT", false]], "right (rewrites attribute)": [[65, "pyk.kore.syntax.Rewrites.right", false]], "right_assoc() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.right_assoc", false]], "right_assocs (kdefinition property)": [[16, "pyk.kast.outer.KDefinition.right_assocs", false]], "rightassoc (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.RightAssoc", false]], "root (kcfg property)": [[32, "pyk.kcfg.kcfg.KCFG.root", false]], "rosy_brown (color attribute)": [[9, "pyk.kast.color.Color.ROSY_BROWN", false]], "roundpred (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.RoundPred", false]], "royal_blue (color attribute)": [[9, "pyk.kast.color.Color.ROYAL_BLUE", false]], "royal_purple (color attribute)": [[9, "pyk.kast.color.Color.ROYAL_PURPLE", false]], "rparen (tokentype attribute)": [[13, "pyk.kast.lexer.TokenType.RPAREN", false], [17, "pyk.kast.outer_lexer.TokenType.RPAREN", false], [57, "pyk.kore.lexer.TokenType.RPAREN", false]], "rshiftint() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.rshiftInt", false]], "rtld_local() (in module pyk.kllvm.importer)": [[48, "pyk.kllvm.importer.rtld_local", false]], "rubine_red (color attribute)": [[9, "pyk.kast.color.Color.RUBINE_RED", false]], "rule (abortedresult attribute)": [[63, "pyk.kore.rpc.AbortedResult.rule", false]], "rule (branchingresult attribute)": [[63, "pyk.kore.rpc.BranchingResult.rule", false]], "rule (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.Rule", false]], "rule (class in pyk.kore.rule)": [[64, "pyk.kore.rule.Rule", false]], "rule (cutpointresult attribute)": [[63, "pyk.kore.rpc.CutPointResult.rule", false]], "rule (depthboundresult attribute)": [[63, "pyk.kore.rpc.DepthBoundResult.rule", false]], "rule (executeresult attribute)": [[63, "pyk.kore.rpc.ExecuteResult.rule", false]], "rule (kastinput attribute)": [[76, "pyk.ktool.kprint.KAstInput.RULE", false]], "rule (stuckresult attribute)": [[63, "pyk.kore.rpc.StuckResult.rule", false]], "rule (terminalresult attribute)": [[63, "pyk.kore.rpc.TerminalResult.rule", false]], "rule (timeoutresult attribute)": [[63, "pyk.kore.rpc.TimeoutResult.rule", false]], "rule (vacuousresult attribute)": [[63, "pyk.kore.rpc.VacuousResult.rule", false]], "rule_id (aprproof property)": [[93, "pyk.proof.reachability.APRProof.rule_id", false]], "rule_id (rewritefailure attribute)": [[63, "pyk.kore.rpc.RewriteFailure.rule_id", false]], "rule_id (rewriteresult attribute)": [[63, "pyk.kore.rpc.RewriteResult.rule_id", false]], "rule_id (rewritesuccess attribute)": [[63, "pyk.kore.rpc.RewriteSuccess.rule_id", false]], "rule_id (state attribute)": [[63, "pyk.kore.rpc.State.rule_id", false]], "rule_label (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.RULE_LABEL", false]], "rule_labels (ndbranch attribute)": [[32, "pyk.kcfg.kcfg.NDBranch.rule_labels", false]], "rule_labels (step attribute)": [[32, "pyk.kcfg.kcfg.Step.rule_labels", false]], "rule_ordinal (llvmrewriteevent property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRewriteEvent.rule_ordinal", false]], "rule_ordinal (llvmruleevent property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRuleEvent.rule_ordinal", false]], "rule_ordinal (llvmsideconditionevententer property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMSideConditionEventEnter.rule_ordinal", false]], "rule_ordinal (llvmsideconditioneventexit property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMSideConditionEventExit.rule_ordinal", false]], "rule_predicate (state attribute)": [[63, "pyk.kore.rpc.State.rule_predicate", false]], "rule_substitution (state attribute)": [[63, "pyk.kore.rpc.State.rule_substitution", false]], "rules (kcfg.edge attribute)": [[32, "pyk.kcfg.kcfg.KCFG.Edge.rules", false]], "rules (kcfg.ndbranch attribute)": [[32, "pyk.kcfg.kcfg.KCFG.NDBranch.rules", false]], "rules (kdefinition property)": [[16, "pyk.kast.outer.KDefinition.rules", false]], "rules (kflatmodule property)": [[16, "pyk.kast.outer.KFlatModule.rules", false]], "run() (krun method)": [[78, "pyk.ktool.krun.KRun.run", false]], "run() (runtime method)": [[52, "pyk.kllvm.runtime.Runtime.run", false]], "run() (term method)": [[52, "pyk.kllvm.runtime.Term.run", false]], "run_pattern() (krun method)": [[78, "pyk.ktool.krun.KRun.run_pattern", false]], "run_process() (in module pyk.utils)": [[98, "pyk.utils.run_process", false]], "run_process() (krun method)": [[78, "pyk.ktool.krun.KRun.run_process", false]], "run_process_2() (in module pyk.utils)": [[98, "pyk.utils.run_process_2", false]], "run_proof_hint() (krun method)": [[78, "pyk.ktool.krun.KRun.run_proof_hint", false]], "runtime (class in pyk.kllvm.runtime)": [[52, "pyk.kllvm.runtime.Runtime", false]], "saddle_brown (color attribute)": [[9, "pyk.kast.color.Color.SADDLE_BROWN", false]], "salmon (color attribute)": [[9, "pyk.kast.color.Color.SALMON", false]], "same_loop() (defaultsemantics method)": [[34, "pyk.kcfg.semantics.DefaultSemantics.same_loop", false]], "same_loop() (kcfgsemantics method)": [[34, "pyk.kcfg.semantics.KCFGSemantics.same_loop", false]], "sandy_brown (color attribute)": [[9, "pyk.kast.color.Color.SANDY_BROWN", false]], "satresult (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.SatResult", false]], "sea_green (color attribute)": [[9, "pyk.kast.color.Color.SEA_GREEN", false]], "seashell (color attribute)": [[9, "pyk.kast.color.Color.SEASHELL", false]], "section_edge() (kcfgexplore method)": [[31, "pyk.kcfg.explore.KCFGExplore.section_edge", false]], "select_code_blocks() (in module pyk.kast.markdown)": [[15, "pyk.kast.markdown.select_code_blocks", false]], "selector (class in pyk.kast.markdown)": [[15, "pyk.kast.markdown.Selector", false]], "selector_lexer() (in module pyk.kast.markdown)": [[15, "pyk.kast.markdown.selector_lexer", false]], "selectorparser (class in pyk.kast.markdown)": [[15, "pyk.kast.markdown.SelectorParser", false]], "semantic_rules (kdefinition property)": [[16, "pyk.kast.outer.KDefinition.semantic_rules", false]], "semantics (kcfgminimizer attribute)": [[33, "pyk.kcfg.minimize.KCFGMinimizer.semantics", false]], "sentence (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.Sentence", false]], "sentence (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Sentence", false]], "sentence() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.sentence", false]], "sentence() (outerparser method)": [[18, "pyk.kast.outer_parser.OuterParser.sentence", false]], "sentence_by_unique_id (kdefinition property)": [[16, "pyk.kast.outer.KDefinition.sentence_by_unique_id", false]], "sentence_by_unique_id (kflatmodule property)": [[16, "pyk.kast.outer.KFlatModule.sentence_by_unique_id", false]], "sentence_to_llvm() (in module pyk.kllvm.convert)": [[45, "pyk.kllvm.convert.sentence_to_llvm", false]], "sentence_type (kbubble attribute)": [[16, "pyk.kast.outer.KBubble.sentence_type", false]], "sentences (kflatmodule attribute)": [[16, "pyk.kast.outer.KFlatModule.sentences", false]], "sentences (module attribute)": [[19, "pyk.kast.outer_syntax.Module.sentences", false], [65, "pyk.kore.syntax.Module.sentences", false]], "sep (userlist attribute)": [[19, "pyk.kast.outer_syntax.UserList.sep", false]], "sepia (color attribute)": [[9, "pyk.kast.color.Color.SEPIA", false]], "seqstrict (atts attribute)": [[8, "pyk.kast.att.Atts.SEQSTRICT", false]], "serialize() (term method)": [[52, "pyk.kllvm.runtime.Term.serialize", false]], "set() (color method)": [[9, "pyk.kast.color.Color.set", false]], "set_cell() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.set_cell", false]], "set_empty() (in module pyk.prelude.collections)": [[83, "pyk.prelude.collections.set_empty", false]], "set_exec_time() (aprproof method)": [[93, "pyk.proof.reachability.APRProof.set_exec_time", false]], "set_item() (in module pyk.prelude.collections)": [[83, "pyk.prelude.collections.set_item", false]], "set_of() (in module pyk.prelude.collections)": [[83, "pyk.prelude.collections.set_of", false]], "set_pattern() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.set_pattern", false]], "set_var() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.set_var", false]], "set_var_id (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.SET_VAR_ID", false]], "set_var_id() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.set_var_id", false]], "setvarid (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.SetVarId", false]], "shorten_hash() (in module pyk.utils)": [[98, "pyk.utils.shorten_hash", false]], "shorten_hashes() (in module pyk.utils)": [[98, "pyk.utils.shorten_hashes", false]], "shortest_distance_between() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.shortest_distance_between", false]], "shortest_path_between() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.shortest_path_between", false]], "shortest_path_to() (aprproof method)": [[93, "pyk.proof.reachability.APRProof.shortest_path_to", false]], "shortest_path_to_node (aprproofstep attribute)": [[93, "pyk.proof.reachability.APRProofStep.shortest_path_to_node", false]], "show() (aprproofshow method)": [[94, "pyk.proof.show.APRProofShow.show", false]], "show() (kcfgshow method)": [[35, "pyk.kcfg.show.KCFGShow.show", false]], "sienna (color attribute)": [[9, "pyk.kast.color.Color.SIENNA", false]], "silver (color attribute)": [[9, "pyk.kast.color.Color.SILVER", false]], "simplification (atts attribute)": [[8, "pyk.kast.att.Atts.SIMPLIFICATION", false]], "simplified_antecedent (impliesproof attribute)": [[91, "pyk.proof.implies.ImpliesProof.simplified_antecedent", false]], "simplified_antecedent (impliesproofresult attribute)": [[91, "pyk.proof.implies.ImpliesProofResult.simplified_antecedent", false]], "simplified_consequent (impliesproof attribute)": [[91, "pyk.proof.implies.ImpliesProof.simplified_consequent", false]], "simplified_consequent (impliesproofresult attribute)": [[91, "pyk.proof.implies.ImpliesProofResult.simplified_consequent", false]], "simplified_constraints (equalityproof property)": [[91, "pyk.proof.implies.EqualityProof.simplified_constraints", false]], "simplified_constraints (refutationproof property)": [[91, "pyk.proof.implies.RefutationProof.simplified_constraints", false]], "simplified_equality (equalityproof property)": [[91, "pyk.proof.implies.EqualityProof.simplified_equality", false]], "simplify() (ctermsymbolic method)": [[5, "pyk.cterm.symbolic.CTermSymbolic.simplify", false]], "simplify() (kcfgexplore method)": [[31, "pyk.kcfg.explore.KCFGExplore.simplify", false]], "simplify() (koreclient method)": [[63, "pyk.kore.rpc.KoreClient.simplify", false]], "simplify() (runtime method)": [[52, "pyk.kllvm.runtime.Runtime.simplify", false]], "simplify_bool() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.simplify_bool", false]], "simplify_bool() (runtime method)": [[52, "pyk.kllvm.runtime.Runtime.simplify_bool", false]], "simplify_config() (kcfgshow static method)": [[35, "pyk.kcfg.show.KCFGShow.simplify_config", false]], "simplirule (class in pyk.kore.rule)": [[64, "pyk.kore.rule.SimpliRule", false]], "single() (in module pyk.utils)": [[98, "pyk.utils.single", false]], "single_socket (transporttype attribute)": [[63, "pyk.kore.rpc.TransportType.SINGLE_SOCKET", false]], "singlesockettransport (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.SingleSocketTransport", false]], "sky_blue (color attribute)": [[9, "pyk.kast.color.Color.SKY_BLUE", false]], "slate_blue (color attribute)": [[9, "pyk.kast.color.Color.SLATE_BLUE", false]], "slate_gray (color attribute)": [[9, "pyk.kast.color.Color.SLATE_GRAY", false]], "slate_grey (color attribute)": [[9, "pyk.kast.color.Color.SLATE_GREY", false]], "slurp_definitions() (in module pyk.kast.utils)": [[23, "pyk.kast.utils.slurp_definitions", false]], "smt_reset_interval (boosterserverargs attribute)": [[63, "pyk.kore.rpc.BoosterServerArgs.smt_reset_interval", false]], "smt_reset_interval (koreserverargs attribute)": [[63, "pyk.kore.rpc.KoreServerArgs.smt_reset_interval", false]], "smt_retry_limit (boosterserverargs attribute)": [[63, "pyk.kore.rpc.BoosterServerArgs.smt_retry_limit", false]], "smt_retry_limit (koreserverargs attribute)": [[63, "pyk.kore.rpc.KoreServerArgs.smt_retry_limit", false]], "smt_tactic (boosterserverargs attribute)": [[63, "pyk.kore.rpc.BoosterServerArgs.smt_tactic", false]], "smt_tactic (koreserverargs attribute)": [[63, "pyk.kore.rpc.KoreServerArgs.smt_tactic", false]], "smt_timeout (boosterserverargs attribute)": [[63, "pyk.kore.rpc.BoosterServerArgs.smt_timeout", false]], "smt_timeout (koreserverargs attribute)": [[63, "pyk.kore.rpc.KoreServerArgs.smt_timeout", false]], "smtlemma (atts attribute)": [[8, "pyk.kast.att.Atts.SMTLEMMA", false]], "smtsolvererror": [[63, "pyk.kore.rpc.SmtSolverError", false]], "snow (color attribute)": [[9, "pyk.kast.color.Color.SNOW", false]], "some() (in module pyk.utils)": [[98, "pyk.utils.some", false]], "sort (aliasdecl attribute)": [[65, "pyk.kore.syntax.AliasDecl.sort", false]], "sort (and attribute)": [[65, "pyk.kore.syntax.And.sort", false]], "sort (apprule attribute)": [[64, "pyk.kore.rule.AppRule.sort", false]], "sort (atts attribute)": [[8, "pyk.kast.att.Atts.SORT", false]], "sort (bottom attribute)": [[65, "pyk.kore.syntax.Bottom.sort", false]], "sort (ceil attribute)": [[65, "pyk.kore.syntax.Ceil.sort", false]], "sort (ceilrule attribute)": [[64, "pyk.kore.rule.CeilRule.sort", false]], "sort (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.Sort", false]], "sort (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Sort", false]], "sort (dv attribute)": [[65, "pyk.kore.syntax.DV.sort", false]], "sort (equalityproof property)": [[91, "pyk.proof.implies.EqualityProof.sort", false]], "sort (equals attribute)": [[65, "pyk.kore.syntax.Equals.sort", false]], "sort (equalsrule attribute)": [[64, "pyk.kore.rule.EqualsRule.sort", false]], "sort (evar attribute)": [[65, "pyk.kore.syntax.EVar.sort", false]], "sort (exists attribute)": [[65, "pyk.kore.syntax.Exists.sort", false]], "sort (floor attribute)": [[65, "pyk.kore.syntax.Floor.sort", false]], "sort (forall attribute)": [[65, "pyk.kore.syntax.Forall.sort", false]], "sort (functionrule attribute)": [[64, "pyk.kore.rule.FunctionRule.sort", false]], "sort (iff attribute)": [[65, "pyk.kore.syntax.Iff.sort", false]], "sort (implies attribute)": [[65, "pyk.kore.syntax.Implies.sort", false]], "sort (in attribute)": [[65, "pyk.kore.syntax.In.sort", false]], "sort (knonterminal attribute)": [[16, "pyk.kast.outer.KNonTerminal.sort", false]], "sort (kproduction attribute)": [[16, "pyk.kast.outer.KProduction.sort", false]], "sort (ksyntaxsort attribute)": [[16, "pyk.kast.outer.KSyntaxSort.sort", false]], "sort (ktoken attribute)": [[11, "pyk.kast.inner.KToken.sort", false]], "sort (kvariable attribute)": [[11, "pyk.kast.inner.KVariable.sort", false]], "sort (mlquant attribute)": [[65, "pyk.kore.syntax.MLQuant.sort", false]], "sort (next attribute)": [[65, "pyk.kore.syntax.Next.sort", false]], "sort (nonterminal attribute)": [[19, "pyk.kast.outer_syntax.NonTerminal.sort", false]], "sort (not attribute)": [[65, "pyk.kore.syntax.Not.sort", false]], "sort (or attribute)": [[65, "pyk.kore.syntax.Or.sort", false]], "sort (rewriterule attribute)": [[64, "pyk.kore.rule.RewriteRule.sort", false]], "sort (rewrites attribute)": [[65, "pyk.kore.syntax.Rewrites.sort", false]], "sort (rule attribute)": [[64, "pyk.kore.rule.Rule.sort", false]], "sort (simplirule attribute)": [[64, "pyk.kore.rule.SimpliRule.sort", false]], "sort (state attribute)": [[13, "pyk.kast.lexer.State.SORT", false]], "sort (svar attribute)": [[65, "pyk.kore.syntax.SVar.sort", false]], "sort (symboldecl attribute)": [[65, "pyk.kore.syntax.SymbolDecl.sort", false]], "sort (tokentype attribute)": [[13, "pyk.kast.lexer.TokenType.SORT", false]], "sort (top attribute)": [[65, "pyk.kore.syntax.Top.sort", false]], "sort (userlist attribute)": [[19, "pyk.kast.outer_syntax.UserList.sort", false]], "sort (varpattern attribute)": [[65, "pyk.kore.syntax.VarPattern.sort", false]], "sort (withsort attribute)": [[65, "pyk.kore.syntax.WithSort.sort", false]], "sort() (kdefinition method)": [[16, "pyk.kast.outer.KDefinition.sort", false]], "sort() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.sort", false]], "sort_ac_collections() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.sort_ac_collections", false]], "sort_app() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.sort_app", false]], "sort_assoc_label() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.sort_assoc_label", false]], "sort_decl() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.sort_decl", false]], "sort_strict() (kdefinition method)": [[16, "pyk.kast.outer.KDefinition.sort_strict", false]], "sort_table (kompiledkore attribute)": [[56, "pyk.kore.kompiled.KompiledKore.sort_table", false]], "sort_to_llvm() (in module pyk.kllvm.convert)": [[45, "pyk.kllvm.convert.sort_to_llvm", false]], "sort_var() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.sort_var", false]], "sort_vars() (kdefinition method)": [[16, "pyk.kast.outer.KDefinition.sort_vars", false]], "sortapp (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.SortApp", false]], "sortdecl (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.SortDecl", false]], "sortdecl (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.SortDecl", false]], "sorts (app attribute)": [[65, "pyk.kore.syntax.App.sorts", false]], "sorts (assoc attribute)": [[65, "pyk.kore.syntax.Assoc.sorts", false]], "sorts (binarypred property)": [[65, "pyk.kore.syntax.BinaryPred.sorts", false]], "sorts (dv property)": [[65, "pyk.kore.syntax.DV.sorts", false]], "sorts (leftassoc attribute)": [[65, "pyk.kore.syntax.LeftAssoc.sorts", false]], "sorts (mlconn property)": [[65, "pyk.kore.syntax.MLConn.sorts", false]], "sorts (mlfixpoint property)": [[65, "pyk.kore.syntax.MLFixpoint.sorts", false]], "sorts (mlpattern property)": [[65, "pyk.kore.syntax.MLPattern.sorts", false]], "sorts (mlquant property)": [[65, "pyk.kore.syntax.MLQuant.sorts", false]], "sorts (mlrewrite property)": [[65, "pyk.kore.syntax.MLRewrite.sorts", false]], "sorts (rightassoc attribute)": [[65, "pyk.kore.syntax.RightAssoc.sorts", false]], "sorts (roundpred property)": [[65, "pyk.kore.syntax.RoundPred.sorts", false]], "sorts (sortapp attribute)": [[65, "pyk.kore.syntax.SortApp.sorts", false]], "sortvar (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.SortVar", false]], "source (ast attribute)": [[19, "pyk.kast.outer_syntax.AST.source", false]], "source (atts attribute)": [[8, "pyk.kast.att.Atts.SOURCE", false]], "source (class in pyk.kbuild.project)": [[27, "pyk.kbuild.project.Source", false]], "source (kcfg.cover attribute)": [[32, "pyk.kcfg.kcfg.KCFG.Cover.source", false]], "source (kcfg.edge attribute)": [[32, "pyk.kcfg.kcfg.KCFG.Edge.source", false]], "source (kcfg.edgelike attribute)": [[32, "pyk.kcfg.kcfg.KCFG.EdgeLike.source", false]], "source (kcfg.mergededge attribute)": [[32, "pyk.kcfg.kcfg.KCFG.MergedEdge.source", false]], "source (kcfg.multiedge attribute)": [[32, "pyk.kcfg.kcfg.KCFG.MultiEdge.source", false]], "source (kcfg.ndbranch attribute)": [[32, "pyk.kcfg.kcfg.KCFG.NDBranch.source", false]], "source (kcfg.split attribute)": [[32, "pyk.kcfg.kcfg.KCFG.Split.source", false]], "source (kcfg.successor attribute)": [[32, "pyk.kcfg.kcfg.KCFG.Successor.source", false]], "source (ksentence property)": [[16, "pyk.kast.outer.KSentence.source", false]], "source() (target method)": [[40, "pyk.kdist.api.Target.source", false]], "source_dir (project attribute)": [[27, "pyk.kbuild.project.Project.source_dir", false]], "source_file_names (project property)": [[27, "pyk.kbuild.project.Project.source_file_names", false]], "source_files (project property)": [[27, "pyk.kbuild.project.Project.source_files", false]], "source_vars (kcfg.successor property)": [[32, "pyk.kcfg.kcfg.KCFG.Successor.source_vars", false]], "split_config_and_constraints() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.split_config_and_constraints", false]], "split_config_from() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.split_config_from", false]], "split_on_constraints() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.split_on_constraints", false]], "splits (kcfg.split property)": [[32, "pyk.kcfg.kcfg.KCFG.Split.splits", false]], "splits() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.splits", false]], "spring_green (color attribute)": [[9, "pyk.kast.color.Color.SPRING_GREEN", false]], "standard (koreexeclogformat attribute)": [[63, "pyk.kore.rpc.KoreExecLogFormat.STANDARD", false]], "start() (koreserver method)": [[63, "pyk.kore.rpc.KoreServer.start", false]], "state (abortedresult attribute)": [[63, "pyk.kore.rpc.AbortedResult.state", false]], "state (baserepl attribute)": [[70, "pyk.krepl.repl.BaseRepl.state", false]], "state (branchingresult attribute)": [[63, "pyk.kore.rpc.BranchingResult.state", false]], "state (class in pyk.kast.lexer)": [[13, "pyk.kast.lexer.State", false]], "state (class in pyk.kast.outer_lexer)": [[17, "pyk.kast.outer_lexer.State", false]], "state (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.State", false]], "state (ctermexecute attribute)": [[5, "pyk.cterm.symbolic.CTermExecute.state", false]], "state (cutpointresult attribute)": [[63, "pyk.kore.rpc.CutPointResult.state", false]], "state (depthboundresult attribute)": [[63, "pyk.kore.rpc.DepthBoundResult.state", false]], "state (executeresult attribute)": [[63, "pyk.kore.rpc.ExecuteResult.state", false]], "state (nextstate attribute)": [[5, "pyk.cterm.symbolic.NextState.state", false]], "state (stuckresult attribute)": [[63, "pyk.kore.rpc.StuckResult.state", false]], "state (terminalresult attribute)": [[63, "pyk.kore.rpc.TerminalResult.state", false]], "state (timeoutresult attribute)": [[63, "pyk.kore.rpc.TimeoutResult.state", false]], "state (vacuousresult attribute)": [[63, "pyk.kore.rpc.VacuousResult.state", false]], "status (aprsummary attribute)": [[93, "pyk.proof.reachability.APRSummary.status", false]], "status (class in pyk.kcfg.tui)": [[37, "pyk.kcfg.tui.Status", false]], "status (equalitysummary attribute)": [[91, "pyk.proof.implies.EqualitySummary.status", false]], "status (proof property)": [[92, "pyk.proof.proof.Proof.status", false]], "status (proofsummary attribute)": [[92, "pyk.proof.proof.ProofSummary.status", false]], "status (refutationsummary attribute)": [[91, "pyk.proof.implies.RefutationSummary.status", false]], "steel_blue (color attribute)": [[9, "pyk.kast.color.Color.STEEL_BLUE", false]], "step (class in pyk.kcfg.kcfg)": [[32, "pyk.kcfg.kcfg.Step", false]], "step() (kcfgexplore method)": [[31, "pyk.kcfg.explore.KCFGExplore.step", false]], "step() (runtime method)": [[52, "pyk.kllvm.runtime.Runtime.step", false]], "step() (term method)": [[52, "pyk.kllvm.runtime.Term.step", false]], "step_event (llvmargument property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMArgument.step_event", false]], "step_proof() (aprprover method)": [[93, "pyk.proof.reachability.APRProver.step_proof", false]], "step_proof() (impliesprover method)": [[91, "pyk.proof.implies.ImpliesProver.step_proof", false]], "step_proof() (prover method)": [[92, "pyk.proof.proof.Prover.step_proof", false]], "stopreason (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.StopReason", false]], "store_path (kcfgstore attribute)": [[32, "pyk.kcfg.kcfg.KCFGStore.store_path", false]], "str_dv() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.str_dv", false]], "strict (atts attribute)": [[8, "pyk.kast.att.Atts.STRICT", false]], "string (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.String", false]], "string (tokentype attribute)": [[13, "pyk.kast.lexer.TokenType.STRING", false], [17, "pyk.kast.outer_lexer.TokenType.STRING", false], [57, "pyk.kore.lexer.TokenType.STRING", false]], "string() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.string", false]], "string2json() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.string2json", false]], "string_sentence() (outerparser method)": [[18, "pyk.kast.outer_parser.OuterParser.string_sentence", false]], "stringsentence (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.StringSentence", false]], "stringtoken() (in module pyk.prelude.string)": [[88, "pyk.prelude.string.stringToken", false]], "strip_coverage_logger() (in module pyk.coverage)": [[2, "pyk.coverage.strip_coverage_logger", false]], "strtype (class in pyk.kast.att)": [[8, "pyk.kast.att.StrType", false]], "stuck (aprsummary attribute)": [[93, "pyk.proof.reachability.APRSummary.stuck", false]], "stuck (class in pyk.kcfg.kcfg)": [[32, "pyk.kcfg.kcfg.Stuck", false]], "stuck (fallbackreason attribute)": [[63, "pyk.kore.rpc.FallbackReason.STUCK", false]], "stuck (kcfg property)": [[32, "pyk.kcfg.kcfg.KCFG.stuck", false]], "stuck (kcfgnodeattr attribute)": [[32, "pyk.kcfg.kcfg.KCFGNodeAttr.STUCK", false]], "stuck (stopreason attribute)": [[63, "pyk.kore.rpc.StopReason.STUCK", false]], "stuckresult (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.StuckResult", false]], "sub_projects (project property)": [[27, "pyk.kbuild.project.Project.sub_projects", false]], "subint() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.subInt", false]], "submit() (koreserverpool method)": [[61, "pyk.kore.pool.KoreServerPool.submit", false]], "subproof_ids (proof property)": [[92, "pyk.proof.proof.Proof.subproof_ids", false]], "subproofs (aprsummary attribute)": [[93, "pyk.proof.reachability.APRSummary.subproofs", false]], "subproofs (proof property)": [[92, "pyk.proof.proof.Proof.subproofs", false]], "subproofs_status (proof property)": [[92, "pyk.proof.proof.Proof.subproofs_status", false]], "subsort_table (kdefinition property)": [[16, "pyk.kast.outer.KDefinition.subsort_table", false]], "subsorts() (kdefinition method)": [[16, "pyk.kast.outer.KDefinition.subsorts", false]], "subst (class in pyk.kast.inner)": [[11, "pyk.kast.inner.Subst", false]], "subst (csubst attribute)": [[4, "pyk.cterm.cterm.CSubst.subst", false]], "substitution (impliesresult attribute)": [[63, "pyk.kore.rpc.ImpliesResult.substitution", false]], "substitution (llvmrewriteevent property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRewriteEvent.substitution", false]], "substitution (llvmruleevent property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRuleEvent.substitution", false]], "substitution (llvmsideconditionevententer property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMSideConditionEventEnter.substitution", false]], "substitution (state attribute)": [[63, "pyk.kore.rpc.State.substitution", false]], "successors() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.successors", false]], "summaries (compositesummary attribute)": [[92, "pyk.proof.proof.CompositeSummary.summaries", false]], "summary (aprproof property)": [[93, "pyk.proof.reachability.APRProof.summary", false]], "summary (equalityproof property)": [[91, "pyk.proof.implies.EqualityProof.summary", false]], "summary (proof property)": [[92, "pyk.proof.proof.Proof.summary", false]], "summary (refutationproof property)": [[91, "pyk.proof.implies.RefutationProof.summary", false]], "svar (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.SVar", false]], "symbol (app attribute)": [[65, "pyk.kore.syntax.App.symbol", false]], "symbol (assoc attribute)": [[65, "pyk.kore.syntax.Assoc.symbol", false]], "symbol (atts attribute)": [[8, "pyk.kast.att.Atts.SYMBOL", false]], "symbol (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Symbol", false]], "symbol (leftassoc attribute)": [[65, "pyk.kore.syntax.LeftAssoc.symbol", false]], "symbol (rightassoc attribute)": [[65, "pyk.kore.syntax.RightAssoc.symbol", false]], "symbol (symboldecl attribute)": [[65, "pyk.kore.syntax.SymbolDecl.symbol", false]], "symbol() (and class method)": [[65, "pyk.kore.syntax.And.symbol", false]], "symbol() (bottom class method)": [[65, "pyk.kore.syntax.Bottom.symbol", false]], "symbol() (ceil class method)": [[65, "pyk.kore.syntax.Ceil.symbol", false]], "symbol() (dv class method)": [[65, "pyk.kore.syntax.DV.symbol", false]], "symbol() (equals class method)": [[65, "pyk.kore.syntax.Equals.symbol", false]], "symbol() (exists class method)": [[65, "pyk.kore.syntax.Exists.symbol", false]], "symbol() (floor class method)": [[65, "pyk.kore.syntax.Floor.symbol", false]], "symbol() (forall class method)": [[65, "pyk.kore.syntax.Forall.symbol", false]], "symbol() (iff class method)": [[65, "pyk.kore.syntax.Iff.symbol", false]], "symbol() (implies class method)": [[65, "pyk.kore.syntax.Implies.symbol", false]], "symbol() (in class method)": [[65, "pyk.kore.syntax.In.symbol", false]], "symbol() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.symbol", false]], "symbol() (mlpattern class method)": [[65, "pyk.kore.syntax.MLPattern.symbol", false]], "symbol() (mu class method)": [[65, "pyk.kore.syntax.Mu.symbol", false]], "symbol() (next class method)": [[65, "pyk.kore.syntax.Next.symbol", false]], "symbol() (not class method)": [[65, "pyk.kore.syntax.Not.symbol", false]], "symbol() (nu class method)": [[65, "pyk.kore.syntax.Nu.symbol", false]], "symbol() (or class method)": [[65, "pyk.kore.syntax.Or.symbol", false]], "symbol() (rewrites class method)": [[65, "pyk.kore.syntax.Rewrites.symbol", false]], "symbol() (top class method)": [[65, "pyk.kore.syntax.Top.symbol", false]], "symbol_decl() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.symbol_decl", false]], "symbol_decls (module property)": [[65, "pyk.kore.syntax.Module.symbol_decls", false]], "symbol_id (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.SYMBOL_ID", false]], "symbol_id() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.symbol_id", false]], "symbol_table (kompiledkore attribute)": [[56, "pyk.kore.kompiled.KompiledKore.symbol_table", false]], "symbol_table (prettyprinter property)": [[21, "pyk.kast.pretty.PrettyPrinter.symbol_table", false]], "symboldecl (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.SymbolDecl", false]], "symbolic (atts attribute)": [[8, "pyk.kast.att.Atts.SYMBOLIC", false]], "symbolid (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.SymbolId", false]], "symbols (kdefinition property)": [[16, "pyk.kast.outer.KDefinition.symbols", false]], "sync() (kbuildenv method)": [[26, "pyk.kbuild.kbuild.KBuildEnv.sync", false]], "sync_files() (in module pyk.kbuild.utils)": [[28, "pyk.kbuild.utils.sync_files", false]], "syntax (state attribute)": [[17, "pyk.kast.outer_lexer.State.SYNTAX", false]], "syntax_module (atts attribute)": [[8, "pyk.kast.att.Atts.SYNTAX_MODULE", false]], "syntax_productions (kdefinition property)": [[16, "pyk.kast.outer.KDefinition.syntax_productions", false]], "syntax_productions (kflatmodule property)": [[16, "pyk.kast.outer.KFlatModule.syntax_productions", false]], "syntax_sentence() (outerparser method)": [[18, "pyk.kast.outer_parser.OuterParser.syntax_sentence", false]], "syntax_sorts (kflatmodule property)": [[16, "pyk.kast.outer.KFlatModule.syntax_sorts", false]], "syntax_symbols (kdefinition property)": [[16, "pyk.kast.outer.KDefinition.syntax_symbols", false]], "syntaxassoc (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.SyntaxAssoc", false]], "syntaxdecl (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.SyntaxDecl", false]], "syntaxdefn (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.SyntaxDefn", false]], "syntaxlexical (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.SyntaxLexical", false]], "syntaxpriority (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.SyntaxPriority", false]], "syntaxsentence (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.SyntaxSentence", false]], "syntaxsynonym (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.SyntaxSynonym", false]], "tags (ksyntaxassociativity attribute)": [[16, "pyk.kast.outer.KSyntaxAssociativity.tags", false]], "tan (color attribute)": [[9, "pyk.kast.color.Color.TAN", false]], "target (aprproof attribute)": [[93, "pyk.proof.reachability.APRProof.target", false]], "target (aprproofstep attribute)": [[93, "pyk.proof.reachability.APRProofStep.target", false]], "target (class in pyk.kbuild.project)": [[27, "pyk.kbuild.project.Target", false]], "target (class in pyk.kdist.api)": [[40, "pyk.kdist.api.Target", false]], "target (kcfg.cover attribute)": [[32, "pyk.kcfg.kcfg.KCFG.Cover.target", false]], "target (kcfg.edge attribute)": [[32, "pyk.kcfg.kcfg.KCFG.Edge.target", false]], "target (kcfg.edgelike attribute)": [[32, "pyk.kcfg.kcfg.KCFG.EdgeLike.target", false]], "target (kcfg.mergededge attribute)": [[32, "pyk.kcfg.kcfg.KCFG.MergedEdge.target", false]], "target_ids (kcfg.successor property)": [[32, "pyk.kcfg.kcfg.KCFG.Successor.target_ids", false]], "target_name (targetid attribute)": [[40, "pyk.kdist.api.TargetId.target_name", false]], "target_vars (kcfg.successor property)": [[32, "pyk.kcfg.kcfg.KCFG.Successor.target_vars", false]], "targetid (class in pyk.kdist.api)": [[40, "pyk.kdist.api.TargetId", false]], "targets (kcfg.edgelike property)": [[32, "pyk.kcfg.kcfg.KCFG.EdgeLike.targets", false]], "targets (kcfg.ndbranch property)": [[32, "pyk.kcfg.kcfg.KCFG.NDBranch.targets", false]], "targets (kcfg.split property)": [[32, "pyk.kcfg.kcfg.KCFG.Split.targets", false]], "targets (kcfg.successor property)": [[32, "pyk.kcfg.kcfg.KCFG.Successor.targets", false]], "targets (project attribute)": [[27, "pyk.kbuild.project.Project.targets", false]], "teal (color attribute)": [[9, "pyk.kast.color.Color.TEAL", false]], "teal_blue (color attribute)": [[9, "pyk.kast.color.Color.TEAL_BLUE", false]], "term (class in pyk.kcfg.tui)": [[37, "pyk.kcfg.tui.Term", false]], "term (class in pyk.kllvm.runtime)": [[52, "pyk.kllvm.runtime.Term", false]], "term (state attribute)": [[63, "pyk.kore.rpc.State.term", false]], "term() (runtime method)": [[52, "pyk.kllvm.runtime.Runtime.term", false]], "terminal (aprsummary attribute)": [[93, "pyk.proof.reachability.APRSummary.terminal", false]], "terminal (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.Terminal", false]], "terminal (kcfgexploration property)": [[30, "pyk.kcfg.exploration.KCFGExploration.terminal", false]], "terminal (kcfgexplorationnodeattr attribute)": [[30, "pyk.kcfg.exploration.KCFGExplorationNodeAttr.TERMINAL", false]], "terminal_ids (kcfgexploration property)": [[30, "pyk.kcfg.exploration.KCFGExploration.terminal_ids", false]], "terminal_rule (stopreason attribute)": [[63, "pyk.kore.rpc.StopReason.TERMINAL_RULE", false]], "terminal_rules (aprprover attribute)": [[93, "pyk.proof.reachability.APRProver.terminal_rules", false]], "terminalresult (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.TerminalResult", false]], "terminals (atts attribute)": [[8, "pyk.kast.att.Atts.TERMINALS", false]], "terminator_symbol (atts attribute)": [[8, "pyk.kast.att.Atts.TERMINATOR_SYMBOL", false]], "terms (kapply property)": [[11, "pyk.kast.inner.KApply.terms", false]], "terms (kas property)": [[11, "pyk.kast.inner.KAs.terms", false]], "terms (kinner property)": [[11, "pyk.kast.inner.KInner.terms", false]], "terms (krewrite property)": [[11, "pyk.kast.inner.KRewrite.terms", false]], "terms (ksequence property)": [[11, "pyk.kast.inner.KSequence.terms", false]], "terms (ktoken property)": [[11, "pyk.kast.inner.KToken.terms", false]], "terms (kvariable property)": [[11, "pyk.kast.inner.KVariable.terms", false]], "text (info attribute)": [[37, "pyk.kcfg.tui.Info.text", false]], "text (kore property)": [[65, "pyk.kore.syntax.Kore.text", false]], "text (koretoken attribute)": [[57, "pyk.kore.lexer.KoreToken.text", false]], "text (kversion property)": [[28, "pyk.kbuild.utils.KVersion.text", false]], "text (navwidget attribute)": [[37, "pyk.kcfg.tui.NavWidget.text", false]], "text (token attribute)": [[13, "pyk.kast.lexer.Token.text", false], [17, "pyk.kast.outer_lexer.Token.text", false]], "thistle (color attribute)": [[9, "pyk.kast.color.Color.THISTLE", false]], "tilde (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.TILDE", false]], "time (behaviorview.selected attribute)": [[37, "pyk.kcfg.tui.BehaviorView.Selected.time", false]], "time (graphchunk.selected attribute)": [[37, "pyk.kcfg.tui.GraphChunk.Selected.time", false]], "time (navwidget.selected attribute)": [[37, "pyk.kcfg.tui.NavWidget.Selected.time", false]], "timeout (stopreason attribute)": [[63, "pyk.kore.rpc.StopReason.TIMEOUT", false]], "timeoutresult (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.TimeoutResult", false]], "times (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.TIMES", false]], "timestamp() (in module pyk.kdist.utils)": [[41, "pyk.kdist.utils.timestamp", false]], "to_axiom() (functionrule method)": [[64, "pyk.kore.rule.FunctionRule.to_axiom", false]], "to_axiom() (rewriterule method)": [[64, "pyk.kore.rule.RewriteRule.to_axiom", false]], "to_axiom() (rule method)": [[64, "pyk.kore.rule.Rule.to_axiom", false]], "to_axiom() (simplirule method)": [[64, "pyk.kore.rule.SimpliRule.to_axiom", false]], "to_claim() (refutationproof method)": [[91, "pyk.proof.implies.RefutationProof.to_claim", false]], "to_dict() (anytype method)": [[8, "pyk.kast.att.AnyType.to_dict", false]], "to_dict() (atttype method)": [[8, "pyk.kast.att.AttType.to_dict", false]], "to_dict() (colorstype method)": [[8, "pyk.kast.att.ColorsType.to_dict", false]], "to_dict() (colortype method)": [[8, "pyk.kast.att.ColorType.to_dict", false]], "to_dict() (csubst method)": [[4, "pyk.cterm.cterm.CSubst.to_dict", false]], "to_dict() (cterm method)": [[4, "pyk.cterm.cterm.CTerm.to_dict", false]], "to_dict() (formattype method)": [[8, "pyk.kast.att.FormatType.to_dict", false]], "to_dict() (inttype method)": [[8, "pyk.kast.att.IntType.to_dict", false]], "to_dict() (kast method)": [[12, "pyk.kast.kast.KAst.to_dict", false]], "to_dict() (katt method)": [[8, "pyk.kast.att.KAtt.to_dict", false]], "to_dict() (kbubble method)": [[16, "pyk.kast.outer.KBubble.to_dict", false]], "to_dict() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.to_dict", false]], "to_dict() (kcfg.cover method)": [[32, "pyk.kcfg.kcfg.KCFG.Cover.to_dict", false]], "to_dict() (kcfg.edge method)": [[32, "pyk.kcfg.kcfg.KCFG.Edge.to_dict", false]], "to_dict() (kcfg.mergededge method)": [[32, "pyk.kcfg.kcfg.KCFG.MergedEdge.to_dict", false]], "to_dict() (kcfg.ndbranch method)": [[32, "pyk.kcfg.kcfg.KCFG.NDBranch.to_dict", false]], "to_dict() (kcfg.node method)": [[32, "pyk.kcfg.kcfg.KCFG.Node.to_dict", false]], "to_dict() (kcfg.split method)": [[32, "pyk.kcfg.kcfg.KCFG.Split.to_dict", false]], "to_dict() (kcfg.successor method)": [[32, "pyk.kcfg.kcfg.KCFG.Successor.to_dict", false]], "to_dict() (kcfgexploration method)": [[30, "pyk.kcfg.exploration.KCFGExploration.to_dict", false]], "to_dict() (kclaim method)": [[16, "pyk.kast.outer.KClaim.to_dict", false]], "to_dict() (kcontext method)": [[16, "pyk.kast.outer.KContext.to_dict", false]], "to_dict() (kdefinition method)": [[16, "pyk.kast.outer.KDefinition.to_dict", false]], "to_dict() (kflatmodule method)": [[16, "pyk.kast.outer.KFlatModule.to_dict", false]], "to_dict() (kflatmodulelist method)": [[16, "pyk.kast.outer.KFlatModuleList.to_dict", false]], "to_dict() (kimport method)": [[16, "pyk.kast.outer.KImport.to_dict", false]], "to_dict() (kinner method)": [[11, "pyk.kast.inner.KInner.to_dict", false]], "to_dict() (klabel method)": [[11, "pyk.kast.inner.KLabel.to_dict", false]], "to_dict() (knonterminal method)": [[16, "pyk.kast.outer.KNonTerminal.to_dict", false]], "to_dict() (kompiledkore method)": [[56, "pyk.kore.kompiled.KompiledKore.to_dict", false]], "to_dict() (kproduction method)": [[16, "pyk.kast.outer.KProduction.to_dict", false]], "to_dict() (kregexterminal method)": [[16, "pyk.kast.outer.KRegexTerminal.to_dict", false]], "to_dict() (krequire method)": [[16, "pyk.kast.outer.KRequire.to_dict", false]], "to_dict() (krule method)": [[16, "pyk.kast.outer.KRule.to_dict", false]], "to_dict() (ksort method)": [[11, "pyk.kast.inner.KSort.to_dict", false]], "to_dict() (ksortsynonym method)": [[16, "pyk.kast.outer.KSortSynonym.to_dict", false]], "to_dict() (ksyntaxassociativity method)": [[16, "pyk.kast.outer.KSyntaxAssociativity.to_dict", false]], "to_dict() (ksyntaxlexical method)": [[16, "pyk.kast.outer.KSyntaxLexical.to_dict", false]], "to_dict() (ksyntaxpriority method)": [[16, "pyk.kast.outer.KSyntaxPriority.to_dict", false]], "to_dict() (ksyntaxsort method)": [[16, "pyk.kast.outer.KSyntaxSort.to_dict", false]], "to_dict() (kterminal method)": [[16, "pyk.kast.outer.KTerminal.to_dict", false]], "to_dict() (locationtype method)": [[8, "pyk.kast.att.LocationType.to_dict", false]], "to_dict() (logentry method)": [[63, "pyk.kore.rpc.LogEntry.to_dict", false]], "to_dict() (logrewrite method)": [[63, "pyk.kore.rpc.LogRewrite.to_dict", false]], "to_dict() (nonetype method)": [[8, "pyk.kast.att.NoneType.to_dict", false]], "to_dict() (optionaltype method)": [[8, "pyk.kast.att.OptionalType.to_dict", false]], "to_dict() (pathtype method)": [[8, "pyk.kast.att.PathType.to_dict", false]], "to_dict() (rewritefailure method)": [[63, "pyk.kore.rpc.RewriteFailure.to_dict", false]], "to_dict() (rewriteresult method)": [[63, "pyk.kore.rpc.RewriteResult.to_dict", false]], "to_dict() (rewritesuccess method)": [[63, "pyk.kore.rpc.RewriteSuccess.to_dict", false]], "to_dict() (strtype method)": [[8, "pyk.kast.att.StrType.to_dict", false]], "to_dict() (subst method)": [[11, "pyk.kast.inner.Subst.to_dict", false]], "to_dict_no_nodes() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.to_dict_no_nodes", false]], "to_json() (kast method)": [[12, "pyk.kast.kast.KAst.to_json", false]], "to_json() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.to_json", false]], "to_module() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.to_module", false]], "to_module() (kcfgshow method)": [[35, "pyk.kcfg.show.KCFGShow.to_module", false]], "to_rule() (kcfg.edge method)": [[32, "pyk.kcfg.kcfg.KCFG.Edge.to_rule", false]], "to_rule() (kcfg.mergededge method)": [[32, "pyk.kcfg.kcfg.KCFG.MergedEdge.to_rule", false]], "to_rules() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.to_rules", false]], "toggle_option() (nodeview method)": [[37, "pyk.kcfg.tui.NodeView.toggle_option", false]], "toggle_view() (nodeview method)": [[37, "pyk.kcfg.tui.NodeView.toggle_view", false]], "token (atts attribute)": [[8, "pyk.kast.att.Atts.TOKEN", false]], "token (class in pyk.kast.lexer)": [[13, "pyk.kast.lexer.Token", false]], "token (class in pyk.kast.outer_lexer)": [[17, "pyk.kast.outer_lexer.Token", false]], "token (ktoken attribute)": [[11, "pyk.kast.inner.KToken.token", false]], "token (tokentype attribute)": [[13, "pyk.kast.lexer.TokenType.TOKEN", false]], "token() (in module pyk.prelude.utils)": [[89, "pyk.prelude.utils.token", false]], "tokens (format attribute)": [[8, "pyk.kast.att.Format.tokens", false]], "tokentype (class in pyk.kast.lexer)": [[13, "pyk.kast.lexer.TokenType", false]], "tokentype (class in pyk.kast.outer_lexer)": [[17, "pyk.kast.outer_lexer.TokenType", false]], "tokentype (class in pyk.kore.lexer)": [[57, "pyk.kore.lexer.TokenType", false]], "tomato (color attribute)": [[9, "pyk.kast.color.Color.TOMATO", false]], "top (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.Top", false]], "top() (cterm static method)": [[4, "pyk.cterm.cterm.CTerm.top", false]], "top() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.top", false]], "top_cell_initializer() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.top_cell_initializer", false]], "top_down() (in module pyk.kast.inner)": [[11, "pyk.kast.inner.top_down", false]], "top_down() (pattern method)": [[65, "pyk.kore.syntax.Pattern.top_down", false]], "total (atts attribute)": [[8, "pyk.kast.att.Atts.TOTAL", false]], "trace (llvmrewritetrace property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRewriteTrace.trace", false]], "translate_coverage() (in module pyk.coverage)": [[2, "pyk.coverage.translate_coverage", false]], "translate_coverage_from_paths() (in module pyk.coverage)": [[2, "pyk.coverage.translate_coverage_from_paths", false]], "transport (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.Transport", false]], "transporttype (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.TransportType", false]], "trusted (atts attribute)": [[8, "pyk.kast.att.Atts.TRUSTED", false]], "try_cell() (cterm method)": [[4, "pyk.cterm.cterm.CTerm.try_cell", false]], "tuple_of() (in module pyk.utils)": [[98, "pyk.utils.tuple_of", false]], "turquoise (color attribute)": [[9, "pyk.kast.color.Color.TURQUOISE", false]], "type (attkey attribute)": [[8, "pyk.kast.att.AttKey.type", false]], "type (atts attribute)": [[8, "pyk.kast.att.Atts.TYPE", false]], "type (koretoken attribute)": [[57, "pyk.kore.lexer.KoreToken.type", false]], "type (llvmeventannotated property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMEventAnnotated.type", false]], "type (token attribute)": [[13, "pyk.kast.lexer.Token.type", false], [17, "pyk.kast.outer_lexer.Token.type", false]], "uid (rewriterule attribute)": [[64, "pyk.kore.rule.RewriteRule.uid", false]], "unapply() (subst method)": [[11, "pyk.kast.inner.Subst.unapply", false]], "unaryconn (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.UnaryConn", false]], "uncovered (kcfg property)": [[32, "pyk.kcfg.kcfg.KCFG.uncovered", false]], "undo_aliases() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.undo_aliases", false]], "union() (subst method)": [[11, "pyk.kast.inner.Subst.union", false]], "unique() (in module pyk.utils)": [[98, "pyk.utils.unique", false]], "unique_id (atts attribute)": [[8, "pyk.kast.att.Atts.UNIQUE_ID", false]], "unique_id (ksentence property)": [[16, "pyk.kast.outer.KSentence.unique_id", false]], "unit (atts attribute)": [[8, "pyk.kast.att.Atts.UNIT", false]], "unknown_predicate (abortedresult attribute)": [[63, "pyk.kore.rpc.AbortedResult.unknown_predicate", false]], "unknownmoduleerror": [[63, "pyk.kore.rpc.UnknownModuleError", false]], "unknownresult (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.UnknownResult", false]], "unparse() (anytype method)": [[8, "pyk.kast.att.AnyType.unparse", false]], "unparse() (atttype method)": [[8, "pyk.kast.att.AttType.unparse", false]], "unparse() (colorstype method)": [[8, "pyk.kast.att.ColorsType.unparse", false]], "unparse() (colortype method)": [[8, "pyk.kast.att.ColorType.unparse", false]], "unparse() (format method)": [[8, "pyk.kast.att.Format.unparse", false]], "unparse() (formattype method)": [[8, "pyk.kast.att.FormatType.unparse", false]], "unparse() (inttype method)": [[8, "pyk.kast.att.IntType.unparse", false]], "unparse() (locationtype method)": [[8, "pyk.kast.att.LocationType.unparse", false]], "unparse() (nonetype method)": [[8, "pyk.kast.att.NoneType.unparse", false]], "unparse() (optionaltype method)": [[8, "pyk.kast.att.OptionalType.unparse", false]], "unparse() (pathtype method)": [[8, "pyk.kast.att.PathType.unparse", false]], "unparse() (strtype method)": [[8, "pyk.kast.att.StrType.unparse", false]], "unparse_avoid (atts attribute)": [[8, "pyk.kast.att.Atts.UNPARSE_AVOID", false]], "unparser_for_production() (in module pyk.kast.pretty)": [[21, "pyk.kast.pretty.unparser_for_production", false]], "unrefute_node() (aprproof method)": [[93, "pyk.proof.reachability.APRProof.unrefute_node", false]], "unsatresult (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.UnsatResult", false]], "up_to_date (proof property)": [[92, "pyk.proof.proof.Proof.up_to_date", false]], "up_to_date() (kbuild method)": [[26, "pyk.kbuild.kbuild.KBuild.up_to_date", false]], "update (atts attribute)": [[8, "pyk.kast.att.Atts.UPDATE", false]], "update() (info method)": [[37, "pyk.kcfg.tui.Info.update", false]], "update() (katt method)": [[8, "pyk.kast.att.KAtt.update", false]], "update() (navwidget method)": [[37, "pyk.kcfg.tui.NavWidget.update", false]], "update() (nodeview method)": [[37, "pyk.kcfg.tui.NodeView.update", false]], "update_atts() (withkatt method)": [[8, "pyk.kast.att.WithKAtt.update_atts", false]], "use_cache (aprproofstep attribute)": [[93, "pyk.proof.reachability.APRProofStep.use_cache", false]], "use_directory (kprint attribute)": [[76, "pyk.ktool.kprint.KPrint.use_directory", false]], "use_server() (in module pyk.testing.plugin)": [[97, "pyk.testing.plugin.use_server", false]], "useless_vars_to_dots() (in module pyk.kast.manip)": [[14, "pyk.kast.manip.useless_vars_to_dots", false]], "user_list (atts attribute)": [[8, "pyk.kast.att.Atts.USER_LIST", false]], "userlist (class in pyk.kast.outer_syntax)": [[19, "pyk.kast.outer_syntax.UserList", false]], "vacuous (aprsummary attribute)": [[93, "pyk.proof.reachability.APRSummary.vacuous", false]], "vacuous (class in pyk.kcfg.kcfg)": [[32, "pyk.kcfg.kcfg.Vacuous", false]], "vacuous (ctermexecute attribute)": [[5, "pyk.cterm.symbolic.CTermExecute.vacuous", false]], "vacuous (kcfg property)": [[32, "pyk.kcfg.kcfg.KCFG.vacuous", false]], "vacuous (kcfgnodeattr attribute)": [[32, "pyk.kcfg.kcfg.KCFGNodeAttr.VACUOUS", false]], "vacuous (stopreason attribute)": [[63, "pyk.kore.rpc.StopReason.VACUOUS", false]], "vacuousresult (class in pyk.kore.rpc)": [[63, "pyk.kore.rpc.VacuousResult", false]], "valid (impliesresult attribute)": [[63, "pyk.kore.rpc.ImpliesResult.valid", false]], "valid_id() (in module pyk.kdist.api)": [[40, "pyk.kdist.api.valid_id", false]], "value (attentry attribute)": [[8, "pyk.kast.att.AttEntry.value", false]], "value (dv attribute)": [[65, "pyk.kore.syntax.DV.value", false]], "value (id attribute)": [[65, "pyk.kore.syntax.Id.value", false]], "value (kterminal attribute)": [[16, "pyk.kast.outer.KTerminal.value", false]], "value (nodeattr attribute)": [[32, "pyk.kcfg.kcfg.NodeAttr.value", false]], "value (setvarid attribute)": [[65, "pyk.kore.syntax.SetVarId.value", false]], "value (string attribute)": [[65, "pyk.kore.syntax.String.value", false]], "value (symbolid attribute)": [[65, "pyk.kore.syntax.SymbolId.value", false]], "value (terminal attribute)": [[19, "pyk.kast.outer_syntax.Terminal.value", false]], "var (exists attribute)": [[65, "pyk.kore.syntax.Exists.var", false]], "var (forall attribute)": [[65, "pyk.kore.syntax.Forall.var", false]], "var (mlfixpoint attribute)": [[65, "pyk.kore.syntax.MLFixpoint.var", false]], "var (mlquant attribute)": [[65, "pyk.kore.syntax.MLQuant.var", false]], "var (mu attribute)": [[65, "pyk.kore.syntax.Mu.var", false]], "var (nu attribute)": [[65, "pyk.kore.syntax.Nu.var", false]], "var_occurrences() (in module pyk.kast.inner)": [[11, "pyk.kast.inner.var_occurrences", false]], "var_pattern() (koreparser method)": [[60, "pyk.kore.parser.KoreParser.var_pattern", false]], "variable (tokentype attribute)": [[13, "pyk.kast.lexer.TokenType.VARIABLE", false]], "varpattern (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.VarPattern", false]], "vars (axiom attribute)": [[65, "pyk.kore.syntax.Axiom.vars", false]], "vars (axiomlike attribute)": [[65, "pyk.kore.syntax.AxiomLike.vars", false]], "vars (claim attribute)": [[65, "pyk.kore.syntax.Claim.vars", false]], "vars (sortdecl attribute)": [[65, "pyk.kore.syntax.SortDecl.vars", false]], "vars (symbol attribute)": [[65, "pyk.kore.syntax.Symbol.vars", false]], "vbar (tokentype attribute)": [[17, "pyk.kast.outer_lexer.TokenType.VBAR", false]], "verbose (behaviorview.selected attribute)": [[37, "pyk.kcfg.tui.BehaviorView.Selected.verbose", false]], "verbose (graphchunk.selected attribute)": [[37, "pyk.kcfg.tui.GraphChunk.Selected.verbose", false]], "verbose (navwidget.selected attribute)": [[37, "pyk.kcfg.tui.NavWidget.Selected.verbose", false]], "version (llvmrewritetrace property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRewriteTrace.version", false]], "version (llvmrewritetraceiterator property)": [[47, "pyk.kllvm.hints.prooftrace.LLVMRewriteTraceIterator.version", false]], "version (project attribute)": [[27, "pyk.kbuild.project.Project.version", false]], "version() (kast static method)": [[12, "pyk.kast.kast.KAst.version", false]], "violet (color attribute)": [[9, "pyk.kast.color.Color.VIOLET", false]], "violet_red (color attribute)": [[9, "pyk.kast.color.Color.VIOLET_RED", false]], "walrus (tokentype attribute)": [[57, "pyk.kore.lexer.TokenType.WALRUS", false]], "watch_text() (info method)": [[37, "pyk.kcfg.tui.Info.watch_text", false]], "watch_text() (navwidget method)": [[37, "pyk.kcfg.tui.NavWidget.watch_text", false]], "wheat (color attribute)": [[9, "pyk.kast.color.Color.WHEAT", false]], "white (color attribute)": [[9, "pyk.kast.color.Color.WHITE", false]], "white_smoke (color attribute)": [[9, "pyk.kast.color.Color.WHITE_SMOKE", false]], "wild_strawberry (color attribute)": [[9, "pyk.kast.color.Color.WILD_STRAWBERRY", false]], "with_single_target() (kcfg.multiedge method)": [[32, "pyk.kcfg.kcfg.KCFG.MultiEdge.with_single_target", false]], "with_single_target() (kcfg.ndbranch method)": [[32, "pyk.kcfg.kcfg.KCFG.NDBranch.with_single_target", false]], "with_single_target() (kcfg.split method)": [[32, "pyk.kcfg.kcfg.KCFG.Split.with_single_target", false]], "withattrs (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.WithAttrs", false]], "withkatt (class in pyk.kast.att)": [[8, "pyk.kast.att.WithKAtt", false]], "withsort (class in pyk.kore.syntax)": [[65, "pyk.kore.syntax.WithSort", false]], "wrap_element (atts attribute)": [[8, "pyk.kast.att.Atts.WRAP_ELEMENT", false]], "write() (aliasdecl method)": [[65, "pyk.kore.syntax.AliasDecl.write", false]], "write() (app method)": [[65, "pyk.kore.syntax.App.write", false]], "write() (assoc method)": [[65, "pyk.kore.syntax.Assoc.write", false]], "write() (axiomlike method)": [[65, "pyk.kore.syntax.AxiomLike.write", false]], "write() (definition method)": [[65, "pyk.kore.syntax.Definition.write", false]], "write() (import method)": [[65, "pyk.kore.syntax.Import.write", false]], "write() (kompiledkore method)": [[56, "pyk.kore.kompiled.KompiledKore.write", false]], "write() (kore method)": [[65, "pyk.kore.syntax.Kore.write", false]], "write() (mlpattern method)": [[65, "pyk.kore.syntax.MLPattern.write", false]], "write() (module method)": [[65, "pyk.kore.syntax.Module.write", false]], "write() (sortapp method)": [[65, "pyk.kore.syntax.SortApp.write", false]], "write() (sortdecl method)": [[65, "pyk.kore.syntax.SortDecl.write", false]], "write() (sortvar method)": [[65, "pyk.kore.syntax.SortVar.write", false]], "write() (string method)": [[65, "pyk.kore.syntax.String.write", false]], "write() (symbol method)": [[65, "pyk.kore.syntax.Symbol.write", false]], "write() (symboldecl method)": [[65, "pyk.kore.syntax.SymbolDecl.write", false]], "write() (varpattern method)": [[65, "pyk.kore.syntax.VarPattern.write", false]], "write_cfg_data() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.write_cfg_data", false]], "write_cfg_data() (kcfgstore method)": [[32, "pyk.kcfg.kcfg.KCFGStore.write_cfg_data", false]], "write_proof() (proof method)": [[92, "pyk.proof.proof.Proof.write_proof", false]], "write_proof_data() (aprproof method)": [[93, "pyk.proof.reachability.APRProof.write_proof_data", false]], "write_proof_data() (impliesproof method)": [[91, "pyk.proof.implies.ImpliesProof.write_proof_data", false]], "write_proof_data() (proof method)": [[92, "pyk.proof.proof.Proof.write_proof_data", false]], "xor_bool() (in module pyk.kore.prelude)": [[62, "pyk.kore.prelude.xor_bool", false]], "xorint() (in module pyk.prelude.kint)": [[86, "pyk.prelude.kint.xorInt", false]], "yellow (color attribute)": [[9, "pyk.kast.color.Color.YELLOW", false]], "yellow_green (color attribute)": [[9, "pyk.kast.color.Color.YELLOW_GREEN", false]], "yellow_orange (color attribute)": [[9, "pyk.kast.color.Color.YELLOW_ORANGE", false]], "zero_depth_between() (kcfg method)": [[32, "pyk.kcfg.kcfg.KCFG.zero_depth_between", false]]}, "objects": {"": [[1, 0, 0, "-", "pyk"]], "pyk": [[2, 0, 0, "-", "coverage"], [3, 0, 0, "-", "cterm"], [6, 0, 0, "-", "dequote"], [7, 0, 0, "-", "kast"], [24, 0, 0, "-", "kbuild"], [29, 0, 0, "-", "kcfg"], [38, 0, 0, "-", "kcovr"], [39, 0, 0, "-", "kdist"], [42, 0, 0, "-", "kllvm"], [54, 0, 0, "-", "konvert"], [55, 0, 0, "-", "kore"], [67, 0, 0, "-", "kore_exec_covr"], [69, 0, 0, "-", "krepl"], [71, 0, 0, "-", "ktool"], [81, 0, 0, "-", "prelude"], [90, 0, 0, "-", "proof"], [96, 0, 0, "-", "testing"], [98, 0, 0, "-", "utils"]], "pyk.coverage": [[2, 1, 1, "", "get_rule_by_id"], [2, 1, 1, "", "strip_coverage_logger"], [2, 1, 1, "", "translate_coverage"], [2, 1, 1, "", "translate_coverage_from_paths"]], "pyk.cterm": [[4, 0, 0, "-", "cterm"], [5, 0, 0, "-", "symbolic"]], "pyk.cterm.cterm": [[4, 2, 1, "", "CSubst"], [4, 2, 1, "", "CTerm"], [4, 1, 1, "", "anti_unify"], [4, 1, 1, "", "cterm_build_claim"], [4, 1, 1, "", "cterm_build_rule"], [4, 1, 1, "", "cterms_anti_unify"]], "pyk.cterm.cterm.CSubst": [[4, 3, 1, "", "__call__"], [4, 3, 1, "", "__init__"], [4, 3, 1, "", "__iter__"], [4, 3, 1, "", "add_constraint"], [4, 3, 1, "", "apply"], [4, 4, 1, "", "constraint"], [4, 5, 1, "", "constraints"], [4, 3, 1, "", "from_dict"], [4, 3, 1, "", "from_pred"], [4, 3, 1, "", "pred"], [4, 5, 1, "", "subst"], [4, 3, 1, "", "to_dict"]], "pyk.cterm.cterm.CTerm": [[4, 3, 1, "", "__init__"], [4, 3, 1, "", "__iter__"], [4, 3, 1, "", "add_constraint"], [4, 3, 1, "", "anti_unify"], [4, 3, 1, "", "bottom"], [4, 3, 1, "", "cell"], [4, 4, 1, "", "cells"], [4, 5, 1, "", "config"], [4, 5, 1, "", "constraints"], [4, 4, 1, "", "free_vars"], [4, 3, 1, "", "from_dict"], [4, 3, 1, "", "from_kast"], [4, 4, 1, "", "hash"], [4, 4, 1, "", "is_bottom"], [4, 4, 1, "", "kast"], [4, 3, 1, "", "match"], [4, 3, 1, "", "match_with_constraint"], [4, 3, 1, "", "remove_useless_constraints"], [4, 3, 1, "", "to_dict"], [4, 3, 1, "", "top"], [4, 3, 1, "", "try_cell"]], "pyk.cterm.symbolic": [[5, 2, 1, "", "CTermExecute"], [5, 2, 1, "", "CTermImplies"], [5, 6, 1, "", "CTermSMTError"], [5, 2, 1, "", "CTermSymbolic"], [5, 2, 1, "", "NextState"], [5, 1, 1, "", "cterm_symbolic"]], "pyk.cterm.symbolic.CTermExecute": [[5, 5, 1, "", "depth"], [5, 5, 1, "", "logs"], [5, 5, 1, "", "next_states"], [5, 5, 1, "", "state"], [5, 5, 1, "", "vacuous"]], "pyk.cterm.symbolic.CTermImplies": [[5, 5, 1, "", "csubst"], [5, 5, 1, "", "failing_cells"], [5, 5, 1, "", "logs"], [5, 5, 1, "", "remaining_implication"]], "pyk.cterm.symbolic.CTermSymbolic": [[5, 3, 1, "", "assume_defined"], [5, 3, 1, "", "execute"], [5, 3, 1, "", "get_model"], [5, 3, 1, "", "implies"], [5, 3, 1, "", "kast_simplify"], [5, 3, 1, "", "kast_to_kore"], [5, 3, 1, "", "kore_to_kast"], [5, 3, 1, "", "simplify"]], "pyk.cterm.symbolic.NextState": [[5, 5, 1, "", "condition"], [5, 5, 1, "", "state"]], "pyk.dequote": [[6, 1, 1, "", "bytes_decode"], [6, 1, 1, "", "bytes_encode"], [6, 1, 1, "", "dequote_bytes"], [6, 1, 1, "", "dequote_string"], [6, 1, 1, "", "dequoted"], [6, 1, 1, "", "enquote_bytes"], [6, 1, 1, "", "enquote_string"], [6, 1, 1, "", "enquoted"]], "pyk.kast": [[8, 0, 0, "-", "att"], [9, 0, 0, "-", "color"], [10, 0, 0, "-", "formatter"], [11, 0, 0, "-", "inner"], [12, 0, 0, "-", "kast"], [13, 0, 0, "-", "lexer"], [14, 0, 0, "-", "manip"], [15, 0, 0, "-", "markdown"], [16, 0, 0, "-", "outer"], [17, 0, 0, "-", "outer_lexer"], [18, 0, 0, "-", "outer_parser"], [19, 0, 0, "-", "outer_syntax"], [20, 0, 0, "-", "parser"], [21, 0, 0, "-", "pretty"], [22, 0, 0, "-", "rewrite"], [23, 0, 0, "-", "utils"]], "pyk.kast.att": [[8, 2, 1, "", "AnyType"], [8, 2, 1, "", "AttEntry"], [8, 2, 1, "", "AttKey"], [8, 2, 1, "", "AttType"], [8, 2, 1, "", "Atts"], [8, 2, 1, "", "ColorType"], [8, 2, 1, "", "ColorsType"], [8, 2, 1, "", "Format"], [8, 2, 1, "", "FormatType"], [8, 2, 1, "", "IntType"], [8, 2, 1, "", "KAtt"], [8, 2, 1, "", "LocationType"], [8, 2, 1, "", "NoneType"], [8, 2, 1, "", "OptionalType"], [8, 2, 1, "", "PathType"], [8, 2, 1, "", "StrType"], [8, 2, 1, "", "WithKAtt"]], "pyk.kast.att.AnyType": [[8, 3, 1, "", "from_dict"], [8, 3, 1, "", "parse"], [8, 3, 1, "", "to_dict"], [8, 3, 1, "", "unparse"]], "pyk.kast.att.AttEntry": [[8, 5, 1, "", "key"], [8, 5, 1, "", "value"]], "pyk.kast.att.AttKey": [[8, 5, 1, "", "name"], [8, 5, 1, "", "type"]], "pyk.kast.att.AttType": [[8, 3, 1, "", "from_dict"], [8, 3, 1, "", "parse"], [8, 3, 1, "", "to_dict"], [8, 3, 1, "", "unparse"]], "pyk.kast.att.Atts": [[8, 5, 1, "", "ALIAS"], [8, 5, 1, "", "ALIAS_REC"], [8, 5, 1, "", "ANYWHERE"], [8, 5, 1, "", "ASSOC"], [8, 5, 1, "", "AVOID"], [8, 5, 1, "", "BRACKET"], [8, 5, 1, "", "BRACKET_LABEL"], [8, 5, 1, "", "CELL"], [8, 5, 1, "", "CELL_COLLECTION"], [8, 5, 1, "", "CELL_FRAGMENT"], [8, 5, 1, "", "CELL_NAME"], [8, 5, 1, "", "CELL_OPT_ABSENT"], [8, 5, 1, "", "CIRCULARITY"], [8, 5, 1, "", "COLOR"], [8, 5, 1, "", "COLORS"], [8, 5, 1, "", "COMM"], [8, 5, 1, "", "CONCAT"], [8, 5, 1, "", "CONCRETE"], [8, 5, 1, "", "CONSTRUCTOR"], [8, 5, 1, "", "DEPENDS"], [8, 5, 1, "", "DIGEST"], [8, 5, 1, "", "ELEMENT"], [8, 5, 1, "", "EXIT"], [8, 5, 1, "", "FORMAT"], [8, 5, 1, "", "FRESH_GENERATOR"], [8, 5, 1, "", "FUNCTION"], [8, 5, 1, "", "FUNCTIONAL"], [8, 5, 1, "", "GROUP"], [8, 5, 1, "", "HAS_DOMAIN_VALUES"], [8, 5, 1, "", "HOOK"], [8, 5, 1, "", "IDEM"], [8, 5, 1, "", "IMPURE"], [8, 5, 1, "", "INDEX"], [8, 5, 1, "", "INITIALIZER"], [8, 5, 1, "", "INJECTIVE"], [8, 5, 1, "", "LABEL"], [8, 5, 1, "", "LEFT"], [8, 5, 1, "", "LOCATION"], [8, 5, 1, "", "MACRO"], [8, 5, 1, "", "MACRO_REC"], [8, 5, 1, "", "MAINCELL"], [8, 5, 1, "", "MULTIPLICITY"], [8, 5, 1, "", "NO_EVALUATORS"], [8, 5, 1, "", "OVERLOAD"], [8, 5, 1, "", "OWISE"], [8, 5, 1, "", "PREDICATE"], [8, 5, 1, "", "PREFER"], [8, 5, 1, "", "PRIORITIES"], [8, 5, 1, "", "PRIORITY"], [8, 5, 1, "", "PRIVATE"], [8, 5, 1, "", "PRODUCTION"], [8, 5, 1, "", "PROJECTION"], [8, 5, 1, "", "RETURNS_UNIT"], [8, 5, 1, "", "RIGHT"], [8, 5, 1, "", "SEQSTRICT"], [8, 5, 1, "", "SIMPLIFICATION"], [8, 5, 1, "", "SMTLEMMA"], [8, 5, 1, "", "SORT"], [8, 5, 1, "", "SOURCE"], [8, 5, 1, "", "STRICT"], [8, 5, 1, "", "SYMBOL"], [8, 5, 1, "", "SYMBOLIC"], [8, 5, 1, "", "SYNTAX_MODULE"], [8, 5, 1, "", "TERMINALS"], [8, 5, 1, "", "TERMINATOR_SYMBOL"], [8, 5, 1, "", "TOKEN"], [8, 5, 1, "", "TOTAL"], [8, 5, 1, "", "TRUSTED"], [8, 5, 1, "", "TYPE"], [8, 5, 1, "", "UNIQUE_ID"], [8, 5, 1, "", "UNIT"], [8, 5, 1, "", "UNPARSE_AVOID"], [8, 5, 1, "", "UPDATE"], [8, 5, 1, "", "USER_LIST"], [8, 5, 1, "", "WRAP_ELEMENT"], [8, 3, 1, "", "keys"]], "pyk.kast.att.ColorType": [[8, 3, 1, "", "from_dict"], [8, 3, 1, "", "parse"], [8, 3, 1, "", "to_dict"], [8, 3, 1, "", "unparse"]], "pyk.kast.att.ColorsType": [[8, 3, 1, "", "from_dict"], [8, 3, 1, "", "parse"], [8, 3, 1, "", "to_dict"], [8, 3, 1, "", "unparse"]], "pyk.kast.att.Format": [[8, 3, 1, "", "parse"], [8, 5, 1, "", "tokens"], [8, 3, 1, "", "unparse"]], "pyk.kast.att.FormatType": [[8, 3, 1, "", "from_dict"], [8, 3, 1, "", "parse"], [8, 3, 1, "", "to_dict"], [8, 3, 1, "", "unparse"]], "pyk.kast.att.IntType": [[8, 3, 1, "", "from_dict"], [8, 3, 1, "", "parse"], [8, 3, 1, "", "to_dict"], [8, 3, 1, "", "unparse"]], "pyk.kast.att.KAtt": [[8, 5, 1, "", "atts"], [8, 3, 1, "", "discard"], [8, 3, 1, "", "drop_source"], [8, 3, 1, "", "entries"], [8, 3, 1, "", "from_dict"], [8, 3, 1, "", "get"], [8, 3, 1, "", "parse"], [8, 4, 1, "", "pretty"], [8, 3, 1, "", "to_dict"], [8, 3, 1, "", "update"]], "pyk.kast.att.LocationType": [[8, 3, 1, "", "from_dict"], [8, 3, 1, "", "parse"], [8, 3, 1, "", "to_dict"], [8, 3, 1, "", "unparse"]], "pyk.kast.att.NoneType": [[8, 3, 1, "", "from_dict"], [8, 3, 1, "", "parse"], [8, 3, 1, "", "to_dict"], [8, 3, 1, "", "unparse"]], "pyk.kast.att.OptionalType": [[8, 3, 1, "", "from_dict"], [8, 3, 1, "", "parse"], [8, 3, 1, "", "to_dict"], [8, 3, 1, "", "unparse"]], "pyk.kast.att.PathType": [[8, 3, 1, "", "from_dict"], [8, 3, 1, "", "parse"], [8, 3, 1, "", "to_dict"], [8, 3, 1, "", "unparse"]], "pyk.kast.att.StrType": [[8, 3, 1, "", "from_dict"], [8, 3, 1, "", "parse"], [8, 3, 1, "", "to_dict"], [8, 3, 1, "", "unparse"]], "pyk.kast.att.WithKAtt": [[8, 5, 1, "", "att"], [8, 3, 1, "", "let_att"], [8, 3, 1, "", "map_att"], [8, 3, 1, "", "update_atts"]], "pyk.kast.color": [[9, 2, 1, "", "Color"]], "pyk.kast.color.Color": [[9, 5, 1, "", "ALICE_BLUE"], [9, 5, 1, "", "ANTIQUE_WHITE"], [9, 5, 1, "", "APRICOT"], [9, 5, 1, "", "AQUA"], [9, 5, 1, "", "AQUAMARINE"], [9, 5, 1, "", "AZURE"], [9, 5, 1, "", "BEIGE"], [9, 5, 1, "", "BISQUE"], [9, 5, 1, "", "BITTERSWEET"], [9, 5, 1, "", "BLACK"], [9, 5, 1, "", "BLANCHED_ALMOND"], [9, 5, 1, "", "BLUE"], [9, 5, 1, "", "BLUE_GREEN"], [9, 5, 1, "", "BLUE_VIOLET"], [9, 5, 1, "", "BRICK_RED"], [9, 5, 1, "", "BROWN"], [9, 5, 1, "", "BURLY_WOOD"], [9, 5, 1, "", "BURNT_ORANGE"], [9, 5, 1, "", "CADET_BLUE"], [9, 5, 1, "", "CARNATION_PINK"], [9, 5, 1, "", "CERULEAN"], [9, 5, 1, "", "CHARTREUSE"], [9, 5, 1, "", "CHOCOLATE"], [9, 5, 1, "", "CORAL"], [9, 5, 1, "", "CORNFLOWER_BLUE"], [9, 5, 1, "", "CORNSILK"], [9, 5, 1, "", "CRIMSON"], [9, 5, 1, "", "CYAN"], [9, 5, 1, "", "DANDELION"], [9, 5, 1, "", "DARKGRAY"], [9, 5, 1, "", "DARK_BLUE"], [9, 5, 1, "", "DARK_CYAN"], [9, 5, 1, "", "DARK_GOLDENROD"], [9, 5, 1, "", "DARK_GRAY"], [9, 5, 1, "", "DARK_GREEN"], [9, 5, 1, "", "DARK_GREY"], [9, 5, 1, "", "DARK_KHAKI"], [9, 5, 1, "", "DARK_MAGENTA"], [9, 5, 1, "", "DARK_OLIVE_GREEN"], [9, 5, 1, "", "DARK_ORANGE"], [9, 5, 1, "", "DARK_ORCHID"], [9, 5, 1, "", "DARK_RED"], [9, 5, 1, "", "DARK_SALMON"], [9, 5, 1, "", "DARK_SEA_GREEN"], [9, 5, 1, "", "DARK_SLATE_BLUE"], [9, 5, 1, "", "DARK_SLATE_GRAY"], [9, 5, 1, "", "DARK_SLATE_GREY"], [9, 5, 1, "", "DARK_TURQUOISE"], [9, 5, 1, "", "DARK_VIOLET"], [9, 5, 1, "", "DEEP_PINK"], [9, 5, 1, "", "DEEP_SKY_BLUE"], [9, 5, 1, "", "DIM_GRAY"], [9, 5, 1, "", "DIM_GREY"], [9, 5, 1, "", "DODGER_BLUE"], [9, 5, 1, "", "EMERALD"], [9, 5, 1, "", "FIRE_BRICK"], [9, 5, 1, "", "FLORAL_WHITE"], [9, 5, 1, "", "FOREST_GREEN"], [9, 5, 1, "", "FUCHSIA"], [9, 5, 1, "", "GAINSBORO"], [9, 5, 1, "", "GHOST_WHITE"], [9, 5, 1, "", "GOLD"], [9, 5, 1, "", "GOLDENROD"], [9, 5, 1, "", "GRAY"], [9, 5, 1, "", "GREEN"], [9, 5, 1, "", "GREEN_YELLOW"], [9, 5, 1, "", "GREY"], [9, 5, 1, "", "HONEYDEW"], [9, 5, 1, "", "HOT_PINK"], [9, 5, 1, "", "INDIAN_RED"], [9, 5, 1, "", "INDIGO"], [9, 5, 1, "", "IVORY"], [9, 5, 1, "", "JUNGLE_GREEN"], [9, 5, 1, "", "KHAKI"], [9, 5, 1, "", "LAVENDER"], [9, 5, 1, "", "LAVENDER_BLUSH"], [9, 5, 1, "", "LAWN_GREEN"], [9, 5, 1, "", "LEMON_CHIFFON"], [9, 5, 1, "", "LIGHTGRAY"], [9, 5, 1, "", "LIGHT_BLUE"], [9, 5, 1, "", "LIGHT_CORAL"], [9, 5, 1, "", "LIGHT_CYAN"], [9, 5, 1, "", "LIGHT_GOLDENROD"], [9, 5, 1, "", "LIGHT_GOLDENROD_YELLOW"], [9, 5, 1, "", "LIGHT_GRAY"], [9, 5, 1, "", "LIGHT_GREEN"], [9, 5, 1, "", "LIGHT_GREY"], [9, 5, 1, "", "LIGHT_PINK"], [9, 5, 1, "", "LIGHT_SALMON"], [9, 5, 1, "", "LIGHT_SEA_GREEN"], [9, 5, 1, "", "LIGHT_SKY_BLUE"], [9, 5, 1, "", "LIGHT_SLATE_BLUE"], [9, 5, 1, "", "LIGHT_SLATE_GRAY"], [9, 5, 1, "", "LIGHT_SLATE_GREY"], [9, 5, 1, "", "LIGHT_STEEL_BLUE"], [9, 5, 1, "", "LIGHT_YELLOW"], [9, 5, 1, "", "LIME"], [9, 5, 1, "", "LIME_GREEN"], [9, 5, 1, "", "LINEN"], [9, 5, 1, "", "MAGENTA"], [9, 5, 1, "", "MAHOGANY"], [9, 5, 1, "", "MAROON"], [9, 5, 1, "", "MEDIUM_AQUAMARINE"], [9, 5, 1, "", "MEDIUM_BLUE"], [9, 5, 1, "", "MEDIUM_ORCHID"], [9, 5, 1, "", "MEDIUM_PURPLE"], [9, 5, 1, "", "MEDIUM_SEA_GREEN"], [9, 5, 1, "", "MEDIUM_SLATE_BLUE"], [9, 5, 1, "", "MEDIUM_SPRING_GREEN"], [9, 5, 1, "", "MEDIUM_TURQUOISE"], [9, 5, 1, "", "MEDIUM_VIOLET_RED"], [9, 5, 1, "", "MELON"], [9, 5, 1, "", "MIDNIGHT_BLUE"], [9, 5, 1, "", "MINT_CREAM"], [9, 5, 1, "", "MISTY_ROSE"], [9, 5, 1, "", "MOCCASIN"], [9, 5, 1, "", "MULBERRY"], [9, 5, 1, "", "NAVAJO_WHITE"], [9, 5, 1, "", "NAVY"], [9, 5, 1, "", "NAVY_BLUE"], [9, 5, 1, "", "OLD_LACE"], [9, 5, 1, "", "OLIVE"], [9, 5, 1, "", "OLIVE_DRAB"], [9, 5, 1, "", "OLIVE_GREEN"], [9, 5, 1, "", "ORANGE"], [9, 5, 1, "", "ORANGE_RED"], [9, 5, 1, "", "ORCHID"], [9, 5, 1, "", "PALE_GOLDENROD"], [9, 5, 1, "", "PALE_GREEN"], [9, 5, 1, "", "PALE_TURQUOISE"], [9, 5, 1, "", "PALE_VIOLET_RED"], [9, 5, 1, "", "PAPAYA_WHIP"], [9, 5, 1, "", "PEACH"], [9, 5, 1, "", "PEACH_PUFF"], [9, 5, 1, "", "PERIWINKLE"], [9, 5, 1, "", "PERU"], [9, 5, 1, "", "PINE_GREEN"], [9, 5, 1, "", "PINK"], [9, 5, 1, "", "PLUM"], [9, 5, 1, "", "POWDER_BLUE"], [9, 5, 1, "", "PROCESS_BLUE"], [9, 5, 1, "", "PURPLE"], [9, 5, 1, "", "RAW_SIENNA"], [9, 5, 1, "", "RED"], [9, 5, 1, "", "RED_ORANGE"], [9, 5, 1, "", "RED_VIOLET"], [9, 5, 1, "", "RHODAMINE"], [9, 5, 1, "", "ROSY_BROWN"], [9, 5, 1, "", "ROYAL_BLUE"], [9, 5, 1, "", "ROYAL_PURPLE"], [9, 5, 1, "", "RUBINE_RED"], [9, 5, 1, "", "SADDLE_BROWN"], [9, 5, 1, "", "SALMON"], [9, 5, 1, "", "SANDY_BROWN"], [9, 5, 1, "", "SEASHELL"], [9, 5, 1, "", "SEA_GREEN"], [9, 5, 1, "", "SEPIA"], [9, 5, 1, "", "SIENNA"], [9, 5, 1, "", "SILVER"], [9, 5, 1, "", "SKY_BLUE"], [9, 5, 1, "", "SLATE_BLUE"], [9, 5, 1, "", "SLATE_GRAY"], [9, 5, 1, "", "SLATE_GREY"], [9, 5, 1, "", "SNOW"], [9, 5, 1, "", "SPRING_GREEN"], [9, 5, 1, "", "STEEL_BLUE"], [9, 5, 1, "", "TAN"], [9, 5, 1, "", "TEAL"], [9, 5, 1, "", "TEAL_BLUE"], [9, 5, 1, "", "THISTLE"], [9, 5, 1, "", "TOMATO"], [9, 5, 1, "", "TURQUOISE"], [9, 5, 1, "", "VIOLET"], [9, 5, 1, "", "VIOLET_RED"], [9, 5, 1, "", "WHEAT"], [9, 5, 1, "", "WHITE"], [9, 5, 1, "", "WHITE_SMOKE"], [9, 5, 1, "", "WILD_STRAWBERRY"], [9, 5, 1, "", "YELLOW"], [9, 5, 1, "", "YELLOW_GREEN"], [9, 5, 1, "", "YELLOW_ORANGE"], [9, 4, 1, "", "ansi_code"], [9, 3, 1, "", "reset"], [9, 3, 1, "", "reset_code"], [9, 3, 1, "", "set"]], "pyk.kast.formatter": [[10, 2, 1, "", "Formatter"], [10, 1, 1, "", "add_brackets"]], "pyk.kast.formatter.Formatter": [[10, 5, 1, "", "definition"], [10, 3, 1, "", "format"]], "pyk.kast.inner": [[11, 2, 1, "", "KApply"], [11, 2, 1, "", "KAs"], [11, 2, 1, "", "KInner"], [11, 2, 1, "", "KLabel"], [11, 2, 1, "", "KRewrite"], [11, 2, 1, "", "KSequence"], [11, 2, 1, "", "KSort"], [11, 2, 1, "", "KToken"], [11, 2, 1, "", "KVariable"], [11, 2, 1, "", "Subst"], [11, 1, 1, "", "bottom_up"], [11, 1, 1, "", "bottom_up_with_summary"], [11, 1, 1, "", "build_assoc"], [11, 1, 1, "", "build_cons"], [11, 1, 1, "", "collect"], [11, 1, 1, "", "flatten_label"], [11, 1, 1, "", "top_down"], [11, 1, 1, "", "var_occurrences"]], "pyk.kast.inner.KApply": [[11, 3, 1, "", "__init__"], [11, 5, 1, "", "args"], [11, 4, 1, "", "arity"], [11, 4, 1, "", "is_cell"], [11, 5, 1, "", "label"], [11, 3, 1, "", "let"], [11, 3, 1, "", "let_terms"], [11, 3, 1, "", "match"], [11, 4, 1, "", "terms"]], "pyk.kast.inner.KAs": [[11, 3, 1, "", "__init__"], [11, 5, 1, "", "alias"], [11, 3, 1, "", "let"], [11, 3, 1, "", "let_terms"], [11, 3, 1, "", "match"], [11, 5, 1, "", "pattern"], [11, 4, 1, "", "terms"]], "pyk.kast.inner.KInner": [[11, 3, 1, "", "from_dict"], [11, 3, 1, "", "from_json"], [11, 3, 1, "", "let_terms"], [11, 3, 1, "", "map_inner"], [11, 3, 1, "", "match"], [11, 4, 1, "", "terms"], [11, 3, 1, "", "to_dict"]], "pyk.kast.inner.KLabel": [[11, 3, 1, "", "__init__"], [11, 3, 1, "", "__iter__"], [11, 3, 1, "", "apply"], [11, 3, 1, "", "from_dict"], [11, 3, 1, "", "let"], [11, 5, 1, "", "name"], [11, 5, 1, "", "params"], [11, 3, 1, "", "to_dict"]], "pyk.kast.inner.KRewrite": [[11, 3, 1, "", "__init__"], [11, 3, 1, "", "__iter__"], [11, 3, 1, "", "apply"], [11, 3, 1, "", "apply_top"], [11, 3, 1, "", "let"], [11, 3, 1, "", "let_terms"], [11, 5, 1, "", "lhs"], [11, 3, 1, "", "match"], [11, 3, 1, "", "replace"], [11, 3, 1, "", "replace_top"], [11, 5, 1, "", "rhs"], [11, 4, 1, "", "terms"]], "pyk.kast.inner.KSequence": [[11, 3, 1, "", "__init__"], [11, 4, 1, "", "arity"], [11, 5, 1, "", "items"], [11, 3, 1, "", "let"], [11, 3, 1, "", "let_terms"], [11, 3, 1, "", "match"], [11, 4, 1, "", "terms"]], "pyk.kast.inner.KSort": [[11, 3, 1, "", "__init__"], [11, 3, 1, "", "from_dict"], [11, 3, 1, "", "let"], [11, 5, 1, "", "name"], [11, 3, 1, "", "to_dict"]], "pyk.kast.inner.KToken": [[11, 3, 1, "", "__init__"], [11, 3, 1, "", "let"], [11, 3, 1, "", "let_terms"], [11, 3, 1, "", "match"], [11, 5, 1, "", "sort"], [11, 4, 1, "", "terms"], [11, 5, 1, "", "token"]], "pyk.kast.inner.KVariable": [[11, 3, 1, "", "__init__"], [11, 3, 1, "", "__lt__"], [11, 3, 1, "", "let"], [11, 3, 1, "", "let_sort"], [11, 3, 1, "", "let_terms"], [11, 3, 1, "", "match"], [11, 5, 1, "", "name"], [11, 5, 1, "", "sort"], [11, 4, 1, "", "terms"]], "pyk.kast.inner.Subst": [[11, 3, 1, "", "__call__"], [11, 3, 1, "", "__getitem__"], [11, 3, 1, "", "__init__"], [11, 3, 1, "", "__iter__"], [11, 3, 1, "", "__len__"], [11, 3, 1, "", "__mul__"], [11, 3, 1, "", "apply"], [11, 3, 1, "", "compose"], [11, 3, 1, "", "from_dict"], [11, 3, 1, "", "from_pred"], [11, 4, 1, "", "is_identity"], [11, 3, 1, "", "minimize"], [11, 4, 1, "", "pred"], [11, 3, 1, "", "to_dict"], [11, 3, 1, "", "unapply"], [11, 3, 1, "", "union"]], "pyk.kast.kast": [[12, 2, 1, "", "KAst"], [12, 1, 1, "", "kast_term"]], "pyk.kast.kast.KAst": [[12, 4, 1, "", "hash"], [12, 3, 1, "", "to_dict"], [12, 3, 1, "", "to_json"], [12, 3, 1, "", "version"]], "pyk.kast.lexer": [[13, 2, 1, "", "State"], [13, 2, 1, "", "Token"], [13, 2, 1, "", "TokenType"], [13, 1, 1, "", "lexer"]], "pyk.kast.lexer.State": [[13, 5, 1, "", "DEFAULT"], [13, 5, 1, "", "SORT"]], "pyk.kast.lexer.Token": [[13, 5, 1, "", "text"], [13, 5, 1, "", "type"]], "pyk.kast.lexer.TokenType": [[13, 5, 1, "", "COLON"], [13, 5, 1, "", "COMMA"], [13, 5, 1, "", "DOTK"], [13, 5, 1, "", "DOTKLIST"], [13, 5, 1, "", "EOF"], [13, 5, 1, "", "ID"], [13, 5, 1, "", "KLABEL"], [13, 5, 1, "", "KSEQ"], [13, 5, 1, "", "LPAREN"], [13, 5, 1, "", "RPAREN"], [13, 5, 1, "", "SORT"], [13, 5, 1, "", "STRING"], [13, 5, 1, "", "TOKEN"], [13, 5, 1, "", "VARIABLE"]], "pyk.kast.manip": [[14, 1, 1, "", "abstract_term_safely"], [14, 1, 1, "", "apply_existential_substitutions"], [14, 1, 1, "", "bool_to_ml_pred"], [14, 1, 1, "", "build_claim"], [14, 1, 1, "", "build_rule"], [14, 1, 1, "", "cell_label_to_var_name"], [14, 1, 1, "", "collapse_dots"], [14, 1, 1, "", "count_vars"], [14, 1, 1, "", "defunctionalize"], [14, 1, 1, "", "extract_cells"], [14, 1, 1, "", "extract_lhs"], [14, 1, 1, "", "extract_rhs"], [14, 1, 1, "", "extract_subst"], [14, 1, 1, "", "free_vars"], [14, 1, 1, "", "if_ktype"], [14, 1, 1, "", "inline_cell_maps"], [14, 1, 1, "", "is_anon_var"], [14, 1, 1, "", "is_spurious_constraint"], [14, 1, 1, "", "is_term_like"], [14, 1, 1, "", "labels_to_dots"], [14, 1, 1, "", "minimize_rule_like"], [14, 1, 1, "", "minimize_term"], [14, 1, 1, "", "ml_pred_to_bool"], [14, 1, 1, "", "no_cell_rewrite_to_dots"], [14, 1, 1, "", "normalize_constraints"], [14, 1, 1, "", "normalize_ml_pred"], [14, 1, 1, "", "on_attributes"], [14, 1, 1, "", "propagate_up_constraints"], [14, 1, 1, "", "push_down_rewrites"], [14, 1, 1, "", "remove_attrs"], [14, 1, 1, "", "remove_generated_cells"], [14, 1, 1, "", "remove_semantic_casts"], [14, 1, 1, "", "remove_source_map"], [14, 1, 1, "", "remove_useless_constraints"], [14, 1, 1, "", "rename_generated_vars"], [14, 1, 1, "", "replace_rewrites_with_implies"], [14, 1, 1, "", "set_cell"], [14, 1, 1, "", "simplify_bool"], [14, 1, 1, "", "sort_ac_collections"], [14, 1, 1, "", "sort_assoc_label"], [14, 1, 1, "", "split_config_and_constraints"], [14, 1, 1, "", "split_config_from"], [14, 1, 1, "", "undo_aliases"], [14, 1, 1, "", "useless_vars_to_dots"]], "pyk.kast.markdown": [[15, 2, 1, "", "And"], [15, 2, 1, "", "Atom"], [15, 2, 1, "", "CodeBlock"], [15, 2, 1, "", "Not"], [15, 2, 1, "", "Or"], [15, 2, 1, "", "Selector"], [15, 2, 1, "", "SelectorParser"], [15, 1, 1, "", "code_blocks"], [15, 1, 1, "", "parse_tags"], [15, 1, 1, "", "select_code_blocks"], [15, 1, 1, "", "selector_lexer"]], "pyk.kast.markdown.And": [[15, 3, 1, "", "eval"], [15, 5, 1, "", "ops"]], "pyk.kast.markdown.Atom": [[15, 3, 1, "", "eval"], [15, 5, 1, "", "name"]], "pyk.kast.markdown.CodeBlock": [[15, 5, 1, "", "code"], [15, 5, 1, "", "info"]], "pyk.kast.markdown.Not": [[15, 3, 1, "", "eval"], [15, 5, 1, "", "op"]], "pyk.kast.markdown.Or": [[15, 3, 1, "", "eval"], [15, 5, 1, "", "ops"]], "pyk.kast.markdown.Selector": [[15, 3, 1, "", "eval"]], "pyk.kast.markdown.SelectorParser": [[15, 3, 1, "", "parse"]], "pyk.kast.outer": [[16, 2, 1, "", "KAssoc"], [16, 2, 1, "", "KBubble"], [16, 2, 1, "", "KClaim"], [16, 2, 1, "", "KContext"], [16, 2, 1, "", "KDefinition"], [16, 2, 1, "", "KFlatModule"], [16, 2, 1, "", "KFlatModuleList"], [16, 2, 1, "", "KImport"], [16, 2, 1, "", "KNonTerminal"], [16, 2, 1, "", "KOuter"], [16, 2, 1, "", "KProduction"], [16, 2, 1, "", "KProductionItem"], [16, 2, 1, "", "KRegexTerminal"], [16, 2, 1, "", "KRequire"], [16, 2, 1, "", "KRule"], [16, 2, 1, "", "KRuleLike"], [16, 2, 1, "", "KSentence"], [16, 2, 1, "", "KSortSynonym"], [16, 2, 1, "", "KSyntaxAssociativity"], [16, 2, 1, "", "KSyntaxLexical"], [16, 2, 1, "", "KSyntaxPriority"], [16, 2, 1, "", "KSyntaxSort"], [16, 2, 1, "", "KTerminal"], [16, 1, 1, "", "read_kast_definition"]], "pyk.kast.outer.KAssoc": [[16, 5, 1, "", "LEFT"], [16, 5, 1, "", "NON_ASSOC"], [16, 5, 1, "", "RIGHT"]], "pyk.kast.outer.KBubble": [[16, 5, 1, "", "att"], [16, 5, 1, "", "contents"], [16, 3, 1, "", "let"], [16, 3, 1, "", "let_att"], [16, 5, 1, "", "sentence_type"], [16, 3, 1, "", "to_dict"]], "pyk.kast.outer.KClaim": [[16, 5, 1, "", "att"], [16, 5, 1, "", "body"], [16, 4, 1, "", "dependencies"], [16, 5, 1, "", "ensures"], [16, 4, 1, "", "is_circularity"], [16, 4, 1, "", "is_trusted"], [16, 3, 1, "", "let"], [16, 3, 1, "", "let_att"], [16, 5, 1, "", "requires"], [16, 3, 1, "", "to_dict"]], "pyk.kast.outer.KContext": [[16, 5, 1, "", "att"], [16, 5, 1, "", "body"], [16, 3, 1, "", "let"], [16, 3, 1, "", "let_att"], [16, 5, 1, "", "requires"], [16, 3, 1, "", "to_dict"]], "pyk.kast.outer.KDefinition": [[16, 3, 1, "", "add_cell_map_items"], [16, 3, 1, "", "add_ksequence_under_k_productions"], [16, 3, 1, "", "add_sort_params"], [16, 4, 1, "", "alias_rules"], [16, 4, 1, "", "all_module_names"], [16, 5, 1, "", "all_modules"], [16, 4, 1, "", "all_modules_dict"], [16, 5, 1, "", "att"], [16, 4, 1, "", "brackets"], [16, 4, 1, "", "cell_collection_productions"], [16, 4, 1, "", "constructors"], [16, 3, 1, "", "empty_config"], [16, 3, 1, "", "from_dict"], [16, 4, 1, "", "function_labels"], [16, 4, 1, "", "functions"], [16, 3, 1, "", "greatest_common_subsort"], [16, 3, 1, "", "init_config"], [16, 3, 1, "", "instantiate_cell_vars"], [16, 3, 1, "", "least_common_supersort"], [16, 4, 1, "", "left_assocs"], [16, 3, 1, "", "let"], [16, 3, 1, "", "let_att"], [16, 4, 1, "", "macro_rules"], [16, 5, 1, "", "main_module"], [16, 5, 1, "", "main_module_name"], [16, 3, 1, "", "module"], [16, 4, 1, "", "module_names"], [16, 4, 1, "", "modules"], [16, 4, 1, "", "overloads"], [16, 4, 1, "", "priorities"], [16, 3, 1, "", "production_for_cell_sort"], [16, 4, 1, "", "productions"], [16, 3, 1, "", "remove_cell_map_items"], [16, 5, 1, "", "requires"], [16, 3, 1, "", "resolve_sorts"], [16, 4, 1, "", "right_assocs"], [16, 4, 1, "", "rules"], [16, 4, 1, "", "semantic_rules"], [16, 4, 1, "", "sentence_by_unique_id"], [16, 3, 1, "", "sort"], [16, 3, 1, "", "sort_strict"], [16, 3, 1, "", "sort_vars"], [16, 4, 1, "", "subsort_table"], [16, 3, 1, "", "subsorts"], [16, 4, 1, "", "symbols"], [16, 4, 1, "", "syntax_productions"], [16, 4, 1, "", "syntax_symbols"], [16, 3, 1, "", "to_dict"]], "pyk.kast.outer.KFlatModule": [[16, 5, 1, "", "att"], [16, 4, 1, "", "cell_collection_productions"], [16, 4, 1, "", "claims"], [16, 4, 1, "", "constructors"], [16, 3, 1, "", "from_dict"], [16, 4, 1, "", "functions"], [16, 5, 1, "", "imports"], [16, 3, 1, "", "let"], [16, 3, 1, "", "let_att"], [16, 3, 1, "", "map_sentences"], [16, 5, 1, "", "name"], [16, 4, 1, "", "productions"], [16, 4, 1, "", "rules"], [16, 4, 1, "", "sentence_by_unique_id"], [16, 5, 1, "", "sentences"], [16, 4, 1, "", "syntax_productions"], [16, 4, 1, "", "syntax_sorts"], [16, 3, 1, "", "to_dict"]], "pyk.kast.outer.KFlatModuleList": [[16, 3, 1, "", "from_dict"], [16, 3, 1, "", "let"], [16, 5, 1, "", "main_module"], [16, 5, 1, "", "modules"], [16, 3, 1, "", "to_dict"]], "pyk.kast.outer.KImport": [[16, 3, 1, "", "from_dict"], [16, 3, 1, "", "let"], [16, 5, 1, "", "name"], [16, 5, 1, "", "public"], [16, 3, 1, "", "to_dict"]], "pyk.kast.outer.KNonTerminal": [[16, 3, 1, "", "let"], [16, 5, 1, "", "name"], [16, 5, 1, "", "sort"], [16, 3, 1, "", "to_dict"]], "pyk.kast.outer.KProduction": [[16, 4, 1, "", "argument_sorts"], [16, 4, 1, "", "as_subsort"], [16, 5, 1, "", "att"], [16, 4, 1, "", "default_format"], [16, 4, 1, "", "is_prefix"], [16, 4, 1, "", "is_record"], [16, 5, 1, "", "items"], [16, 5, 1, "", "klabel"], [16, 3, 1, "", "let"], [16, 3, 1, "", "let_att"], [16, 4, 1, "", "non_terminals"], [16, 5, 1, "", "params"], [16, 5, 1, "", "sort"], [16, 3, 1, "", "to_dict"]], "pyk.kast.outer.KProductionItem": [[16, 3, 1, "", "from_dict"]], "pyk.kast.outer.KRegexTerminal": [[16, 3, 1, "", "let"], [16, 5, 1, "", "regex"], [16, 3, 1, "", "to_dict"]], "pyk.kast.outer.KRequire": [[16, 3, 1, "", "from_dict"], [16, 3, 1, "", "let"], [16, 5, 1, "", "require"], [16, 3, 1, "", "to_dict"]], "pyk.kast.outer.KRule": [[16, 5, 1, "", "att"], [16, 5, 1, "", "body"], [16, 5, 1, "", "ensures"], [16, 3, 1, "", "let"], [16, 3, 1, "", "let_att"], [16, 4, 1, "", "priority"], [16, 5, 1, "", "requires"], [16, 3, 1, "", "to_dict"]], "pyk.kast.outer.KRuleLike": [[16, 5, 1, "", "body"], [16, 5, 1, "", "ensures"], [16, 3, 1, "", "let"], [16, 5, 1, "", "requires"]], "pyk.kast.outer.KSentence": [[16, 3, 1, "", "from_dict"], [16, 4, 1, "", "label"], [16, 4, 1, "", "source"], [16, 4, 1, "", "unique_id"]], "pyk.kast.outer.KSortSynonym": [[16, 5, 1, "", "att"], [16, 3, 1, "", "let"], [16, 3, 1, "", "let_att"], [16, 5, 1, "", "new_sort"], [16, 5, 1, "", "old_sort"], [16, 3, 1, "", "to_dict"]], "pyk.kast.outer.KSyntaxAssociativity": [[16, 5, 1, "", "assoc"], [16, 5, 1, "", "att"], [16, 3, 1, "", "let"], [16, 3, 1, "", "let_att"], [16, 5, 1, "", "tags"], [16, 3, 1, "", "to_dict"]], "pyk.kast.outer.KSyntaxLexical": [[16, 5, 1, "", "att"], [16, 3, 1, "", "let"], [16, 3, 1, "", "let_att"], [16, 5, 1, "", "name"], [16, 5, 1, "", "regex"], [16, 3, 1, "", "to_dict"]], "pyk.kast.outer.KSyntaxPriority": [[16, 5, 1, "", "att"], [16, 3, 1, "", "let"], [16, 3, 1, "", "let_att"], [16, 5, 1, "", "priorities"], [16, 3, 1, "", "to_dict"]], "pyk.kast.outer.KSyntaxSort": [[16, 5, 1, "", "att"], [16, 3, 1, "", "let"], [16, 3, 1, "", "let_att"], [16, 5, 1, "", "params"], [16, 5, 1, "", "sort"], [16, 3, 1, "", "to_dict"]], "pyk.kast.outer.KTerminal": [[16, 3, 1, "", "let"], [16, 3, 1, "", "to_dict"], [16, 5, 1, "", "value"]], "pyk.kast.outer_lexer": [[17, 2, 1, "", "Loc"], [17, 2, 1, "", "LocationIterator"], [17, 2, 1, "", "State"], [17, 2, 1, "", "Token"], [17, 2, 1, "", "TokenType"], [17, 1, 1, "", "outer_lexer"]], "pyk.kast.outer_lexer.Loc": [[17, 5, 1, "", "col"], [17, 5, 1, "", "line"]], "pyk.kast.outer_lexer.LocationIterator": [[17, 4, 1, "", "loc"]], "pyk.kast.outer_lexer.State": [[17, 5, 1, "", "ATTR"], [17, 5, 1, "", "BUBBLE"], [17, 5, 1, "", "CONTEXT"], [17, 5, 1, "", "DEFAULT"], [17, 5, 1, "", "KLABEL"], [17, 5, 1, "", "MODNAME"], [17, 5, 1, "", "SYNTAX"]], "pyk.kast.outer_lexer.Token": [[17, 3, 1, "", "let"], [17, 5, 1, "", "loc"], [17, 5, 1, "", "text"], [17, 5, 1, "", "type"]], "pyk.kast.outer_lexer.TokenType": [[17, 5, 1, "", "ATTR_CONTENT"], [17, 5, 1, "", "ATTR_KEY"], [17, 5, 1, "", "BUBBLE"], [17, 5, 1, "", "COLON"], [17, 5, 1, "", "COMMA"], [17, 5, 1, "", "DCOLONEQ"], [17, 5, 1, "", "EOF"], [17, 5, 1, "", "EQ"], [17, 5, 1, "", "GT"], [17, 5, 1, "", "ID_LOWER"], [17, 5, 1, "", "ID_UPPER"], [17, 5, 1, "", "KLABEL"], [17, 5, 1, "", "KW_ALIAS"], [17, 5, 1, "", "KW_CLAIM"], [17, 5, 1, "", "KW_CONFIG"], [17, 5, 1, "", "KW_CONTEXT"], [17, 5, 1, "", "KW_ENDMODULE"], [17, 5, 1, "", "KW_IMPORTS"], [17, 5, 1, "", "KW_LEFT"], [17, 5, 1, "", "KW_LEXICAL"], [17, 5, 1, "", "KW_MODULE"], [17, 5, 1, "", "KW_NONASSOC"], [17, 5, 1, "", "KW_PRIORITY"], [17, 5, 1, "", "KW_PRIVATE"], [17, 5, 1, "", "KW_PUBLIC"], [17, 5, 1, "", "KW_REQUIRES"], [17, 5, 1, "", "KW_RIGHT"], [17, 5, 1, "", "KW_RULE"], [17, 5, 1, "", "KW_SYNTAX"], [17, 5, 1, "", "LBRACE"], [17, 5, 1, "", "LBRACK"], [17, 5, 1, "", "LPAREN"], [17, 5, 1, "", "MODNAME"], [17, 5, 1, "", "NAT"], [17, 5, 1, "", "PLUS"], [17, 5, 1, "", "QUESTION"], [17, 5, 1, "", "RBRACE"], [17, 5, 1, "", "RBRACK"], [17, 5, 1, "", "REGEX"], [17, 5, 1, "", "RPAREN"], [17, 5, 1, "", "RULE_LABEL"], [17, 5, 1, "", "STRING"], [17, 5, 1, "", "TILDE"], [17, 5, 1, "", "TIMES"], [17, 5, 1, "", "VBAR"]], "pyk.kast.outer_parser": [[18, 2, 1, "", "OuterParser"]], "pyk.kast.outer_parser.OuterParser": [[18, 3, 1, "", "definition"], [18, 3, 1, "", "importt"], [18, 3, 1, "", "module"], [18, 3, 1, "", "require"], [18, 3, 1, "", "sentence"], [18, 3, 1, "", "string_sentence"], [18, 3, 1, "", "syntax_sentence"]], "pyk.kast.outer_syntax": [[19, 2, 1, "", "AST"], [19, 2, 1, "", "Alias"], [19, 2, 1, "", "Assoc"], [19, 2, 1, "", "Att"], [19, 2, 1, "", "Claim"], [19, 2, 1, "", "Config"], [19, 2, 1, "", "Context"], [19, 2, 1, "", "Definition"], [19, 2, 1, "", "Import"], [19, 2, 1, "", "Lexical"], [19, 2, 1, "", "Module"], [19, 2, 1, "", "NonTerminal"], [19, 2, 1, "", "PriorityBlock"], [19, 2, 1, "", "Production"], [19, 2, 1, "", "ProductionItem"], [19, 2, 1, "", "ProductionLike"], [19, 2, 1, "", "Require"], [19, 2, 1, "", "Rule"], [19, 2, 1, "", "Sentence"], [19, 2, 1, "", "Sort"], [19, 2, 1, "", "SortDecl"], [19, 2, 1, "", "StringSentence"], [19, 2, 1, "", "SyntaxAssoc"], [19, 2, 1, "", "SyntaxDecl"], [19, 2, 1, "", "SyntaxDefn"], [19, 2, 1, "", "SyntaxLexical"], [19, 2, 1, "", "SyntaxPriority"], [19, 2, 1, "", "SyntaxSentence"], [19, 2, 1, "", "SyntaxSynonym"], [19, 2, 1, "", "Terminal"], [19, 2, 1, "", "UserList"]], "pyk.kast.outer_syntax.AST": [[19, 5, 1, "", "location"], [19, 5, 1, "", "source"]], "pyk.kast.outer_syntax.Alias": [[19, 5, 1, "", "att"], [19, 5, 1, "", "bubble"], [19, 5, 1, "", "label"]], "pyk.kast.outer_syntax.Assoc": [[19, 5, 1, "", "LEFT"], [19, 5, 1, "", "NON_ASSOC"], [19, 5, 1, "", "RIGHT"]], "pyk.kast.outer_syntax.Att": [[19, 5, 1, "", "items"]], "pyk.kast.outer_syntax.Claim": [[19, 5, 1, "", "att"], [19, 5, 1, "", "bubble"], [19, 5, 1, "", "label"]], "pyk.kast.outer_syntax.Config": [[19, 5, 1, "", "att"], [19, 5, 1, "", "bubble"], [19, 5, 1, "", "label"]], "pyk.kast.outer_syntax.Context": [[19, 5, 1, "", "att"], [19, 5, 1, "", "bubble"], [19, 5, 1, "", "label"]], "pyk.kast.outer_syntax.Definition": [[19, 5, 1, "", "modules"], [19, 5, 1, "", "requires"]], "pyk.kast.outer_syntax.Import": [[19, 5, 1, "", "module_name"], [19, 5, 1, "", "public"]], "pyk.kast.outer_syntax.Lexical": [[19, 5, 1, "", "regex"]], "pyk.kast.outer_syntax.Module": [[19, 5, 1, "", "att"], [19, 5, 1, "", "imports"], [19, 5, 1, "", "name"], [19, 5, 1, "", "sentences"]], "pyk.kast.outer_syntax.NonTerminal": [[19, 5, 1, "", "name"], [19, 5, 1, "", "sort"]], "pyk.kast.outer_syntax.PriorityBlock": [[19, 5, 1, "", "assoc"], [19, 5, 1, "", "productions"]], "pyk.kast.outer_syntax.Production": [[19, 5, 1, "", "att"], [19, 5, 1, "", "items"]], "pyk.kast.outer_syntax.ProductionLike": [[19, 5, 1, "", "att"]], "pyk.kast.outer_syntax.Require": [[19, 5, 1, "", "path"]], "pyk.kast.outer_syntax.Rule": [[19, 5, 1, "", "att"], [19, 5, 1, "", "bubble"], [19, 5, 1, "", "label"]], "pyk.kast.outer_syntax.Sort": [[19, 5, 1, "", "args"], [19, 5, 1, "", "name"]], "pyk.kast.outer_syntax.SortDecl": [[19, 5, 1, "", "args"], [19, 5, 1, "", "name"], [19, 5, 1, "", "params"]], "pyk.kast.outer_syntax.StringSentence": [[19, 5, 1, "", "att"], [19, 5, 1, "", "bubble"], [19, 5, 1, "", "label"]], "pyk.kast.outer_syntax.SyntaxAssoc": [[19, 5, 1, "", "assoc"], [19, 5, 1, "", "klabels"]], "pyk.kast.outer_syntax.SyntaxDecl": [[19, 5, 1, "", "att"], [19, 5, 1, "", "decl"]], "pyk.kast.outer_syntax.SyntaxDefn": [[19, 5, 1, "", "blocks"], [19, 5, 1, "", "decl"]], "pyk.kast.outer_syntax.SyntaxLexical": [[19, 5, 1, "", "name"], [19, 5, 1, "", "regex"]], "pyk.kast.outer_syntax.SyntaxPriority": [[19, 5, 1, "", "groups"]], "pyk.kast.outer_syntax.SyntaxSynonym": [[19, 5, 1, "", "att"], [19, 5, 1, "", "new"], [19, 5, 1, "", "old"]], "pyk.kast.outer_syntax.Terminal": [[19, 5, 1, "", "value"]], "pyk.kast.outer_syntax.UserList": [[19, 5, 1, "", "att"], [19, 5, 1, "", "non_empty"], [19, 5, 1, "", "sep"], [19, 5, 1, "", "sort"]], "pyk.kast.parser": [[20, 2, 1, "", "KAstParser"]], "pyk.kast.parser.KAstParser": [[20, 3, 1, "", "eof"], [20, 3, 1, "", "k"], [20, 3, 1, "", "kitem"], [20, 3, 1, "", "klabel"], [20, 3, 1, "", "klist"]], "pyk.kast.pretty": [[21, 2, 1, "", "PrettyPrinter"], [21, 1, 1, "", "assoc_with_unit"], [21, 1, 1, "", "build_symbol_table"], [21, 1, 1, "", "indent"], [21, 1, 1, "", "paren"], [21, 1, 1, "", "unparser_for_production"]], "pyk.kast.pretty.PrettyPrinter": [[21, 5, 1, "", "definition"], [21, 3, 1, "", "print"], [21, 4, 1, "", "symbol_table"]], "pyk.kast.rewrite": [[22, 1, 1, "", "indexed_rewrite"]], "pyk.kast.utils": [[23, 1, 1, "", "parse_outer"], [23, 1, 1, "", "slurp_definitions"]], "pyk.kbuild": [[25, 0, 0, "-", "config"], [26, 0, 0, "-", "kbuild"], [27, 0, 0, "-", "project"], [28, 0, 0, "-", "utils"]], "pyk.kbuild.kbuild": [[26, 2, 1, "", "KBuild"], [26, 2, 1, "", "KBuildEnv"]], "pyk.kbuild.kbuild.KBuild": [[26, 3, 1, "", "definition_dir"], [26, 4, 1, "", "k_version"], [26, 5, 1, "", "kdist_dir"], [26, 3, 1, "", "kompile"], [26, 3, 1, "", "up_to_date"]], "pyk.kbuild.kbuild.KBuildEnv": [[26, 3, 1, "", "create_temp"], [26, 3, 1, "", "kompile"], [26, 5, 1, "", "path"], [26, 5, 1, "", "project"], [26, 3, 1, "", "sync"]], "pyk.kbuild.project": [[27, 2, 1, "", "PackageSource"], [27, 2, 1, "", "PathSource"], [27, 2, 1, "", "Project"], [27, 2, 1, "", "Source"], [27, 2, 1, "", "Target"]], "pyk.kbuild.project.PackageSource": [[27, 5, 1, "", "package"], [27, 3, 1, "", "resolve"]], "pyk.kbuild.project.PathSource": [[27, 5, 1, "", "path"], [27, 3, 1, "", "resolve"]], "pyk.kbuild.project.Project": [[27, 4, 1, "", "all_files"], [27, 5, 1, "", "dependencies"], [27, 3, 1, "", "get_target"], [27, 3, 1, "", "load"], [27, 3, 1, "", "load_from_dir"], [27, 5, 1, "", "name"], [27, 5, 1, "", "path"], [27, 4, 1, "", "project_file"], [27, 4, 1, "", "resource_file_names"], [27, 4, 1, "", "resource_files"], [27, 5, 1, "", "resources"], [27, 5, 1, "", "source_dir"], [27, 4, 1, "", "source_file_names"], [27, 4, 1, "", "source_files"], [27, 4, 1, "", "sub_projects"], [27, 5, 1, "", "targets"], [27, 5, 1, "", "version"]], "pyk.kbuild.project.Source": [[27, 3, 1, "", "from_dict"], [27, 3, 1, "", "resolve"]], "pyk.kbuild.project.Target": [[27, 5, 1, "", "args"], [27, 5, 1, "", "name"]], "pyk.kbuild.utils": [[28, 2, 1, "", "KVersion"], [28, 1, 1, "", "find_file_upwards"], [28, 1, 1, "", "k_version"], [28, 1, 1, "", "sync_files"]], "pyk.kbuild.utils.KVersion": [[28, 2, 1, "", "Git"], [28, 5, 1, "", "PATTERN"], [28, 5, 1, "", "git"], [28, 5, 1, "", "major"], [28, 5, 1, "", "minor"], [28, 3, 1, "", "parse"], [28, 5, 1, "", "patch"], [28, 4, 1, "", "text"]], "pyk.kbuild.utils.KVersion.Git": [[28, 5, 1, "", "ahead"], [28, 5, 1, "", "dirty"], [28, 5, 1, "", "rev"]], "pyk.kcfg": [[30, 0, 0, "-", "exploration"], [31, 0, 0, "-", "explore"], [32, 0, 0, "-", "kcfg"], [33, 0, 0, "-", "minimize"], [34, 0, 0, "-", "semantics"], [35, 0, 0, "-", "show"], [36, 0, 0, "-", "store"], [37, 0, 0, "-", "tui"]], "pyk.kcfg.exploration": [[30, 2, 1, "", "KCFGExploration"], [30, 2, 1, "", "KCFGExplorationNodeAttr"]], "pyk.kcfg.exploration.KCFGExploration": [[30, 3, 1, "", "add_terminal"], [30, 4, 1, "", "explorable"], [30, 3, 1, "", "from_dict"], [30, 3, 1, "", "is_explorable"], [30, 3, 1, "", "is_terminal"], [30, 5, 1, "", "kcfg"], [30, 3, 1, "", "minimize_kcfg"], [30, 3, 1, "", "prune"], [30, 3, 1, "", "remove_node"], [30, 3, 1, "", "remove_terminal"], [30, 4, 1, "", "terminal"], [30, 4, 1, "", "terminal_ids"], [30, 3, 1, "", "to_dict"]], "pyk.kcfg.exploration.KCFGExplorationNodeAttr": [[30, 5, 1, "", "TERMINAL"]], "pyk.kcfg.explore": [[31, 2, 1, "", "KCFGExplore"]], "pyk.kcfg.explore.KCFGExplore": [[31, 3, 1, "", "check_extendable"], [31, 5, 1, "", "cterm_symbolic"], [31, 3, 1, "", "extend_cterm"], [31, 5, 1, "", "id"], [31, 3, 1, "", "implication_failure_reason"], [31, 5, 1, "", "kcfg_semantics"], [31, 3, 1, "", "pretty_print"], [31, 3, 1, "", "section_edge"], [31, 3, 1, "", "simplify"], [31, 3, 1, "", "step"]], "pyk.kcfg.kcfg": [[32, 2, 1, "", "Abstract"], [32, 2, 1, "", "Branch"], [32, 2, 1, "", "KCFG"], [32, 2, 1, "", "KCFGExtendResult"], [32, 2, 1, "", "KCFGNodeAttr"], [32, 2, 1, "", "KCFGStore"], [32, 2, 1, "", "NDBranch"], [32, 2, 1, "", "NodeAttr"], [32, 2, 1, "", "Step"], [32, 2, 1, "", "Stuck"], [32, 2, 1, "", "Vacuous"]], "pyk.kcfg.kcfg.Abstract": [[32, 5, 1, "", "cterm"]], "pyk.kcfg.kcfg.Branch": [[32, 5, 1, "", "constraints"], [32, 5, 1, "", "heuristic"], [32, 5, 1, "", "info"]], "pyk.kcfg.kcfg.KCFG": [[32, 2, 1, "", "Cover"], [32, 2, 1, "", "Edge"], [32, 2, 1, "", "EdgeLike"], [32, 2, 1, "", "MergedEdge"], [32, 2, 1, "", "MultiEdge"], [32, 2, 1, "", "NDBranch"], [32, 2, 1, "", "Node"], [32, 2, 1, "", "Split"], [32, 2, 1, "", "Successor"], [32, 3, 1, "", "add_alias"], [32, 3, 1, "", "add_attr"], [32, 3, 1, "", "add_node"], [32, 3, 1, "", "add_stuck"], [32, 3, 1, "", "add_successor"], [32, 3, 1, "", "add_vacuous"], [32, 3, 1, "", "aliases"], [32, 3, 1, "", "contains_cover"], [32, 3, 1, "", "contains_edge"], [32, 3, 1, "", "contains_merged_edge"], [32, 3, 1, "", "contains_ndbranch"], [32, 3, 1, "", "contains_node"], [32, 3, 1, "", "contains_split"], [32, 3, 1, "", "cover"], [32, 4, 1, "", "covered"], [32, 3, 1, "", "covers"], [32, 3, 1, "", "create_cover"], [32, 3, 1, "", "create_edge"], [32, 3, 1, "", "create_merged_edge"], [32, 3, 1, "", "create_ndbranch"], [32, 3, 1, "", "create_node"], [32, 3, 1, "", "create_split"], [32, 3, 1, "", "create_split_by_nodes"], [32, 3, 1, "", "discard_attr"], [32, 3, 1, "", "discard_stuck"], [32, 3, 1, "", "discard_vacuous"], [32, 3, 1, "", "edge"], [32, 3, 1, "", "edge_likes"], [32, 3, 1, "", "edges"], [32, 3, 1, "", "extend"], [32, 3, 1, "", "from_claim"], [32, 3, 1, "", "from_dict"], [32, 3, 1, "", "from_json"], [32, 3, 1, "", "general_edges"], [32, 3, 1, "", "get_node"], [32, 3, 1, "", "is_covered"], [32, 3, 1, "", "is_leaf"], [32, 3, 1, "", "is_ndbranch"], [32, 3, 1, "", "is_root"], [32, 3, 1, "", "is_split"], [32, 3, 1, "", "is_stuck"], [32, 3, 1, "", "is_vacuous"], [32, 4, 1, "", "leaves"], [32, 3, 1, "", "let_node"], [32, 3, 1, "", "merged_edge"], [32, 3, 1, "", "merged_edges"], [32, 3, 1, "", "ndbranches"], [32, 3, 1, "", "node"], [32, 4, 1, "", "nodes"], [32, 3, 1, "", "path_length"], [32, 3, 1, "", "paths_between"], [32, 3, 1, "", "predecessors"], [32, 3, 1, "", "prune"], [32, 3, 1, "", "reachable_nodes"], [32, 3, 1, "", "read_cfg_data"], [32, 3, 1, "", "read_node_data"], [32, 3, 1, "", "remove_alias"], [32, 3, 1, "", "remove_attr"], [32, 3, 1, "", "remove_cover"], [32, 3, 1, "", "remove_edge"], [32, 3, 1, "", "remove_edges_around"], [32, 3, 1, "", "remove_merged_edge"], [32, 3, 1, "", "remove_node"], [32, 3, 1, "", "remove_stuck"], [32, 3, 1, "", "remove_vacuous"], [32, 3, 1, "", "replace_node"], [32, 4, 1, "", "root"], [32, 3, 1, "", "shortest_distance_between"], [32, 3, 1, "", "shortest_path_between"], [32, 3, 1, "", "split_on_constraints"], [32, 3, 1, "", "splits"], [32, 4, 1, "", "stuck"], [32, 3, 1, "", "successors"], [32, 3, 1, "", "to_dict"], [32, 3, 1, "", "to_dict_no_nodes"], [32, 3, 1, "", "to_json"], [32, 3, 1, "", "to_module"], [32, 3, 1, "", "to_rules"], [32, 4, 1, "", "uncovered"], [32, 4, 1, "", "vacuous"], [32, 3, 1, "", "write_cfg_data"], [32, 3, 1, "", "zero_depth_between"]], "pyk.kcfg.kcfg.KCFG.Cover": [[32, 5, 1, "", "csubst"], [32, 3, 1, "", "from_dict"], [32, 3, 1, "", "replace_source"], [32, 3, 1, "", "replace_target"], [32, 5, 1, "", "source"], [32, 5, 1, "", "target"], [32, 3, 1, "", "to_dict"]], "pyk.kcfg.kcfg.KCFG.Edge": [[32, 5, 1, "", "depth"], [32, 3, 1, "", "from_dict"], [32, 3, 1, "", "replace_source"], [32, 3, 1, "", "replace_target"], [32, 5, 1, "", "rules"], [32, 5, 1, "", "source"], [32, 5, 1, "", "target"], [32, 3, 1, "", "to_dict"], [32, 3, 1, "", "to_rule"]], "pyk.kcfg.kcfg.KCFG.EdgeLike": [[32, 5, 1, "", "source"], [32, 5, 1, "", "target"], [32, 4, 1, "", "targets"]], "pyk.kcfg.kcfg.KCFG.MergedEdge": [[32, 5, 1, "", "edges"], [32, 3, 1, "", "from_dict"], [32, 3, 1, "", "replace_source"], [32, 3, 1, "", "replace_target"], [32, 5, 1, "", "source"], [32, 5, 1, "", "target"], [32, 3, 1, "", "to_dict"], [32, 3, 1, "", "to_rule"]], "pyk.kcfg.kcfg.KCFG.MultiEdge": [[32, 5, 1, "", "source"], [32, 3, 1, "", "with_single_target"]], "pyk.kcfg.kcfg.KCFG.NDBranch": [[32, 4, 1, "", "edges"], [32, 3, 1, "", "from_dict"], [32, 3, 1, "", "replace_source"], [32, 3, 1, "", "replace_target"], [32, 5, 1, "", "rules"], [32, 5, 1, "", "source"], [32, 4, 1, "", "targets"], [32, 3, 1, "", "to_dict"], [32, 3, 1, "", "with_single_target"]], "pyk.kcfg.kcfg.KCFG.Node": [[32, 3, 1, "", "add_attr"], [32, 5, 1, "", "attrs"], [32, 5, 1, "", "cterm"], [32, 3, 1, "", "discard_attr"], [32, 4, 1, "", "free_vars"], [32, 3, 1, "", "from_dict"], [32, 5, 1, "", "id"], [32, 3, 1, "", "let"], [32, 3, 1, "", "remove_attr"], [32, 3, 1, "", "to_dict"]], "pyk.kcfg.kcfg.KCFG.Split": [[32, 4, 1, "", "covers"], [32, 3, 1, "", "from_dict"], [32, 3, 1, "", "replace_source"], [32, 3, 1, "", "replace_target"], [32, 5, 1, "", "source"], [32, 4, 1, "", "splits"], [32, 4, 1, "", "targets"], [32, 3, 1, "", "to_dict"], [32, 3, 1, "", "with_single_target"]], "pyk.kcfg.kcfg.KCFG.Successor": [[32, 3, 1, "", "from_dict"], [32, 3, 1, "", "replace_source"], [32, 3, 1, "", "replace_target"], [32, 5, 1, "", "source"], [32, 4, 1, "", "source_vars"], [32, 4, 1, "", "target_ids"], [32, 4, 1, "", "target_vars"], [32, 4, 1, "", "targets"], [32, 3, 1, "", "to_dict"]], "pyk.kcfg.kcfg.KCFGNodeAttr": [[32, 5, 1, "", "STUCK"], [32, 5, 1, "", "VACUOUS"]], "pyk.kcfg.kcfg.KCFGStore": [[32, 4, 1, "", "kcfg_json_path"], [32, 4, 1, "", "kcfg_node_dir"], [32, 3, 1, "", "kcfg_node_path"], [32, 3, 1, "", "read_cfg_data"], [32, 3, 1, "", "read_node_data"], [32, 5, 1, "", "store_path"], [32, 3, 1, "", "write_cfg_data"]], "pyk.kcfg.kcfg.NDBranch": [[32, 5, 1, "", "cterms"], [32, 5, 1, "", "logs"], [32, 5, 1, "", "rule_labels"]], "pyk.kcfg.kcfg.NodeAttr": [[32, 5, 1, "", "value"]], "pyk.kcfg.kcfg.Step": [[32, 5, 1, "", "cterm"], [32, 5, 1, "", "cut"], [32, 5, 1, "", "depth"], [32, 5, 1, "", "info"], [32, 5, 1, "", "logs"], [32, 5, 1, "", "rule_labels"]], "pyk.kcfg.minimize": [[33, 2, 1, "", "KCFGMinimizer"]], "pyk.kcfg.minimize.KCFGMinimizer": [[33, 5, 1, "", "kcfg"], [33, 5, 1, "", "kdef"], [33, 3, 1, "", "lift_edge"], [33, 3, 1, "", "lift_edges"], [33, 3, 1, "", "lift_split_edge"], [33, 3, 1, "", "lift_split_split"], [33, 3, 1, "", "lift_splits"], [33, 3, 1, "", "merge_nodes"], [33, 3, 1, "", "minimize"], [33, 5, 1, "", "semantics"]], "pyk.kcfg.semantics": [[34, 2, 1, "", "DefaultSemantics"], [34, 2, 1, "", "KCFGSemantics"]], "pyk.kcfg.semantics.DefaultSemantics": [[34, 3, 1, "", "abstract_node"], [34, 3, 1, "", "can_make_custom_step"], [34, 3, 1, "", "custom_step"], [34, 3, 1, "", "is_loop"], [34, 3, 1, "", "is_mergeable"], [34, 3, 1, "", "is_terminal"], [34, 3, 1, "", "same_loop"]], "pyk.kcfg.semantics.KCFGSemantics": [[34, 3, 1, "", "abstract_node"], [34, 3, 1, "", "can_make_custom_step"], [34, 3, 1, "", "custom_step"], [34, 3, 1, "", "is_loop"], [34, 3, 1, "", "is_mergeable"], [34, 3, 1, "", "is_terminal"], [34, 3, 1, "", "same_loop"]], "pyk.kcfg.show": [[35, 2, 1, "", "KCFGShow"], [35, 2, 1, "", "NodePrinter"]], "pyk.kcfg.show.KCFGShow": [[35, 3, 1, "", "dot"], [35, 3, 1, "", "dump"], [35, 3, 1, "", "hide_cells"], [35, 5, 1, "", "kprint"], [35, 3, 1, "", "make_unique_segments"], [35, 5, 1, "", "node_printer"], [35, 3, 1, "", "node_short_info"], [35, 3, 1, "", "pretty"], [35, 3, 1, "", "pretty_segments"], [35, 3, 1, "", "show"], [35, 3, 1, "", "simplify_config"], [35, 3, 1, "", "to_module"]], "pyk.kcfg.show.NodePrinter": [[35, 5, 1, "", "full_printer"], [35, 5, 1, "", "kprint"], [35, 5, 1, "", "minimize"], [35, 3, 1, "", "node_attrs"], [35, 3, 1, "", "print_node"]], "pyk.kcfg.store": [[36, 2, 1, "", "OptimizedNodeStore"]], "pyk.kcfg.tui": [[37, 2, 1, "", "BehaviorView"], [37, 2, 1, "", "Constraint"], [37, 2, 1, "", "Custom"], [37, 2, 1, "", "GraphChunk"], [37, 2, 1, "", "Info"], [37, 2, 1, "", "KCFGViewer"], [37, 2, 1, "", "NavWidget"], [37, 2, 1, "", "NodeView"], [37, 2, 1, "", "Status"], [37, 2, 1, "", "Term"]], "pyk.kcfg.tui.BehaviorView": [[37, 2, 1, "", "Selected"], [37, 5, 1, "", "can_focus"], [37, 5, 1, "", "can_focus_children"], [37, 3, 1, "", "compose"], [37, 3, 1, "", "on_click"]], "pyk.kcfg.tui.BehaviorView.Selected": [[37, 5, 1, "", "bubble"], [37, 5, 1, "", "handler_name"], [37, 5, 1, "", "no_dispatch"], [37, 5, 1, "", "time"], [37, 5, 1, "", "verbose"]], "pyk.kcfg.tui.Constraint": [[37, 5, 1, "", "can_focus"], [37, 5, 1, "", "can_focus_children"], [37, 3, 1, "", "on_click"]], "pyk.kcfg.tui.Custom": [[37, 5, 1, "", "can_focus"], [37, 5, 1, "", "can_focus_children"], [37, 3, 1, "", "on_click"]], "pyk.kcfg.tui.GraphChunk": [[37, 2, 1, "", "Selected"], [37, 5, 1, "", "can_focus"], [37, 5, 1, "", "can_focus_children"], [37, 3, 1, "", "on_click"], [37, 3, 1, "", "on_enter"], [37, 3, 1, "", "on_leave"]], "pyk.kcfg.tui.GraphChunk.Selected": [[37, 5, 1, "", "bubble"], [37, 5, 1, "", "chunk_id"], [37, 5, 1, "", "handler_name"], [37, 5, 1, "", "no_dispatch"], [37, 5, 1, "", "time"], [37, 5, 1, "", "verbose"]], "pyk.kcfg.tui.Info": [[37, 5, 1, "", "can_focus"], [37, 5, 1, "", "can_focus_children"], [37, 3, 1, "", "compose"], [37, 5, 1, "", "text"], [37, 3, 1, "", "update"], [37, 3, 1, "", "watch_text"]], "pyk.kcfg.tui.KCFGViewer": [[37, 5, 1, "", "BINDINGS"], [37, 5, 1, "", "CSS_PATH"], [37, 3, 1, "", "action_keystroke"], [37, 3, 1, "", "compose"], [37, 3, 1, "", "on_graph_chunk_selected"]], "pyk.kcfg.tui.NavWidget": [[37, 5, 1, "", "BINDINGS"], [37, 2, 1, "", "Selected"], [37, 5, 1, "", "can_focus"], [37, 5, 1, "", "can_focus_children"], [37, 3, 1, "", "compose"], [37, 5, 1, "", "text"], [37, 3, 1, "", "update"], [37, 3, 1, "", "watch_text"]], "pyk.kcfg.tui.NavWidget.Selected": [[37, 5, 1, "", "bubble"], [37, 5, 1, "", "handler_name"], [37, 5, 1, "", "no_dispatch"], [37, 5, 1, "", "time"], [37, 5, 1, "", "verbose"]], "pyk.kcfg.tui.NodeView": [[37, 5, 1, "", "can_focus"], [37, 5, 1, "", "can_focus_children"], [37, 3, 1, "", "compose"], [37, 3, 1, "", "on_behavior_view_selected"], [37, 3, 1, "", "on_constraint_selected"], [37, 3, 1, "", "on_custom_selected"], [37, 3, 1, "", "on_mount"], [37, 3, 1, "", "on_status_selected"], [37, 3, 1, "", "on_term_selected"], [37, 3, 1, "", "toggle_option"], [37, 3, 1, "", "toggle_view"], [37, 3, 1, "", "update"]], "pyk.kcfg.tui.Status": [[37, 5, 1, "", "can_focus"], [37, 5, 1, "", "can_focus_children"], [37, 3, 1, "", "on_click"]], "pyk.kcfg.tui.Term": [[37, 5, 1, "", "can_focus"], [37, 5, 1, "", "can_focus_children"], [37, 3, 1, "", "on_click"]], "pyk.kcovr": [[38, 1, 1, "", "count_lines_covered"], [38, 1, 1, "", "count_lines_file"], [38, 1, 1, "", "count_lines_global"], [38, 1, 1, "", "count_rules_covered"], [38, 1, 1, "", "create_cover_map"], [38, 1, 1, "", "create_rule_map"], [38, 1, 1, "", "create_rule_map_by_file"], [38, 1, 1, "", "create_rule_map_by_line"], [38, 1, 1, "", "main"], [38, 1, 1, "", "parse_args"], [38, 1, 1, "", "render_classes"], [38, 1, 1, "", "render_coverage_xml"], [38, 1, 1, "", "render_lines"]], "pyk.kdist": [[40, 0, 0, "-", "api"], [41, 0, 0, "-", "utils"]], "pyk.kdist.api": [[40, 2, 1, "", "Target"], [40, 2, 1, "", "TargetId"], [40, 1, 1, "", "valid_id"]], "pyk.kdist.api.Target": [[40, 3, 1, "", "build"], [40, 3, 1, "", "context"], [40, 3, 1, "", "deps"], [40, 3, 1, "", "manifest"], [40, 3, 1, "", "source"]], "pyk.kdist.api.TargetId": [[40, 4, 1, "", "full_name"], [40, 3, 1, "", "parse"], [40, 5, 1, "", "plugin_name"], [40, 5, 1, "", "target_name"]], "pyk.kdist.utils": [[41, 1, 1, "", "cwd"], [41, 1, 1, "", "files_for_path"], [41, 1, 1, "", "package_path"], [41, 1, 1, "", "timestamp"]], "pyk.kllvm": [[43, 0, 0, "-", "ast"], [44, 0, 0, "-", "compiler"], [45, 0, 0, "-", "convert"], [46, 0, 0, "-", "hints"], [48, 0, 0, "-", "importer"], [49, 0, 0, "-", "load"], [50, 0, 0, "-", "load_static"], [51, 0, 0, "-", "parser"], [52, 0, 0, "-", "runtime"], [53, 0, 0, "-", "utils"]], "pyk.kllvm.compiler": [[44, 1, 1, "", "compile_kllvm"], [44, 1, 1, "", "compile_runtime"], [44, 1, 1, "", "generate_hints"]], "pyk.kllvm.convert": [[45, 1, 1, "", "definition_to_llvm"], [45, 1, 1, "", "llvm_to_definition"], [45, 1, 1, "", "llvm_to_module"], [45, 1, 1, "", "llvm_to_pattern"], [45, 1, 1, "", "llvm_to_sentence"], [45, 1, 1, "", "llvm_to_sort"], [45, 1, 1, "", "llvm_to_sort_var"], [45, 1, 1, "", "module_to_llvm"], [45, 1, 1, "", "pattern_to_llvm"], [45, 1, 1, "", "sentence_to_llvm"], [45, 1, 1, "", "sort_to_llvm"]], "pyk.kllvm.hints": [[47, 0, 0, "-", "prooftrace"]], "pyk.kllvm.hints.prooftrace": [[47, 2, 1, "", "KoreHeader"], [47, 2, 1, "", "LLVMArgument"], [47, 2, 1, "", "LLVMEventAnnotated"], [47, 2, 1, "", "LLVMEventType"], [47, 2, 1, "", "LLVMFunctionEvent"], [47, 2, 1, "", "LLVMHookEvent"], [47, 2, 1, "", "LLVMPatternMatchingFailureEvent"], [47, 2, 1, "", "LLVMRewriteEvent"], [47, 2, 1, "", "LLVMRewriteTrace"], [47, 2, 1, "", "LLVMRewriteTraceIterator"], [47, 2, 1, "", "LLVMRuleEvent"], [47, 2, 1, "", "LLVMSideConditionEventEnter"], [47, 2, 1, "", "LLVMSideConditionEventExit"], [47, 2, 1, "", "LLVMStepEvent"]], "pyk.kllvm.hints.prooftrace.KoreHeader": [[47, 3, 1, "", "__init__"], [47, 5, 1, "", "_kore_header"], [47, 3, 1, "", "create"]], "pyk.kllvm.hints.prooftrace.LLVMArgument": [[47, 3, 1, "", "__init__"], [47, 3, 1, "", "__repr__"], [47, 5, 1, "", "_argument"], [47, 3, 1, "", "is_kore_pattern"], [47, 3, 1, "", "is_step_event"], [47, 4, 1, "", "kore_pattern"], [47, 4, 1, "", "step_event"]], "pyk.kllvm.hints.prooftrace.LLVMEventAnnotated": [[47, 3, 1, "", "__init__"], [47, 5, 1, "", "_annotated_llvm_event"], [47, 4, 1, "", "event"], [47, 4, 1, "", "type"]], "pyk.kllvm.hints.prooftrace.LLVMEventType": [[47, 3, 1, "", "__init__"], [47, 5, 1, "", "_event_type"], [47, 4, 1, "", "is_initial_config"], [47, 4, 1, "", "is_pre_trace"], [47, 4, 1, "", "is_trace"]], "pyk.kllvm.hints.prooftrace.LLVMFunctionEvent": [[47, 3, 1, "", "__init__"], [47, 3, 1, "", "__repr__"], [47, 5, 1, "", "_function_event"], [47, 4, 1, "", "args"], [47, 4, 1, "", "name"], [47, 4, 1, "", "relative_position"]], "pyk.kllvm.hints.prooftrace.LLVMHookEvent": [[47, 3, 1, "", "__init__"], [47, 3, 1, "", "__repr__"], [47, 5, 1, "", "_hook_event"], [47, 4, 1, "", "args"], [47, 4, 1, "", "name"], [47, 4, 1, "", "relative_position"], [47, 4, 1, "", "result"]], "pyk.kllvm.hints.prooftrace.LLVMPatternMatchingFailureEvent": [[47, 3, 1, "", "__init__"], [47, 3, 1, "", "__repr__"], [47, 5, 1, "", "_pattern_matching_failure_event"], [47, 4, 1, "", "function_name"]], "pyk.kllvm.hints.prooftrace.LLVMRewriteEvent": [[47, 4, 1, "", "rule_ordinal"], [47, 4, 1, "", "substitution"]], "pyk.kllvm.hints.prooftrace.LLVMRewriteTrace": [[47, 3, 1, "", "__init__"], [47, 3, 1, "", "__repr__"], [47, 5, 1, "", "_rewrite_trace"], [47, 4, 1, "", "initial_config"], [47, 3, 1, "", "parse"], [47, 4, 1, "", "pre_trace"], [47, 4, 1, "", "trace"], [47, 4, 1, "", "version"]], "pyk.kllvm.hints.prooftrace.LLVMRewriteTraceIterator": [[47, 3, 1, "", "__init__"], [47, 3, 1, "", "__iter__"], [47, 3, 1, "", "__next__"], [47, 3, 1, "", "__repr__"], [47, 5, 1, "", "_rewrite_trace_iterator"], [47, 3, 1, "", "from_file"], [47, 4, 1, "", "version"]], "pyk.kllvm.hints.prooftrace.LLVMRuleEvent": [[47, 3, 1, "", "__init__"], [47, 3, 1, "", "__repr__"], [47, 5, 1, "", "_rule_event"], [47, 4, 1, "", "rule_ordinal"], [47, 4, 1, "", "substitution"]], "pyk.kllvm.hints.prooftrace.LLVMSideConditionEventEnter": [[47, 3, 1, "", "__init__"], [47, 3, 1, "", "__repr__"], [47, 5, 1, "", "_side_condition_event"], [47, 4, 1, "", "rule_ordinal"], [47, 4, 1, "", "substitution"]], "pyk.kllvm.hints.prooftrace.LLVMSideConditionEventExit": [[47, 3, 1, "", "__init__"], [47, 3, 1, "", "__repr__"], [47, 5, 1, "", "_side_condition_end_event"], [47, 4, 1, "", "check_result"], [47, 4, 1, "", "rule_ordinal"]], "pyk.kllvm.importer": [[48, 1, 1, "", "import_from_file"], [48, 1, 1, "", "import_kllvm"], [48, 1, 1, "", "import_runtime"], [48, 1, 1, "", "rtld_local"]], "pyk.kllvm.load_static": [[50, 1, 1, "", "get_kllvm"]], "pyk.kllvm.parser": [[51, 1, 1, "", "parse_definition"], [51, 1, 1, "", "parse_definition_file"], [51, 1, 1, "", "parse_pattern"], [51, 1, 1, "", "parse_pattern_file"], [51, 1, 1, "", "parse_sort"], [51, 1, 1, "", "parse_sort_file"]], "pyk.kllvm.runtime": [[52, 2, 1, "", "Runtime"], [52, 2, 1, "", "Term"]], "pyk.kllvm.runtime.Runtime": [[52, 3, 1, "", "deserialize"], [52, 3, 1, "", "evaluate"], [52, 3, 1, "", "run"], [52, 3, 1, "", "simplify"], [52, 3, 1, "", "simplify_bool"], [52, 3, 1, "", "step"], [52, 3, 1, "", "term"]], "pyk.kllvm.runtime.Term": [[52, 4, 1, "", "pattern"], [52, 3, 1, "", "run"], [52, 3, 1, "", "serialize"], [52, 3, 1, "", "step"]], "pyk.kllvm.utils": [[53, 1, 1, "", "get_requires"]], "pyk.kore": [[56, 0, 0, "-", "kompiled"], [57, 0, 0, "-", "lexer"], [58, 0, 0, "-", "manip"], [59, 0, 0, "-", "match"], [60, 0, 0, "-", "parser"], [61, 0, 0, "-", "pool"], [62, 0, 0, "-", "prelude"], [63, 0, 0, "-", "rpc"], [64, 0, 0, "-", "rule"], [65, 0, 0, "-", "syntax"], [66, 0, 0, "-", "tools"]], "pyk.kore.kompiled": [[56, 2, 1, "", "KompiledKore"], [56, 2, 1, "", "KoreSortTable"], [56, 2, 1, "", "KoreSymbolTable"]], "pyk.kore.kompiled.KompiledKore": [[56, 3, 1, "", "add_injections"], [56, 3, 1, "", "for_definition"], [56, 3, 1, "", "from_dict"], [56, 3, 1, "", "load"], [56, 3, 1, "", "load_from_json"], [56, 3, 1, "", "load_from_kore"], [56, 5, 1, "", "sort_table"], [56, 5, 1, "", "symbol_table"], [56, 3, 1, "", "to_dict"], [56, 3, 1, "", "write"]], "pyk.kore.kompiled.KoreSortTable": [[56, 3, 1, "", "for_definition"], [56, 3, 1, "", "is_subsort"], [56, 3, 1, "", "meet"]], "pyk.kore.kompiled.KoreSymbolTable": [[56, 3, 1, "", "for_definition"], [56, 3, 1, "", "infer_sort"], [56, 3, 1, "", "pattern_sorts"], [56, 3, 1, "", "resolve"]], "pyk.kore.lexer": [[57, 2, 1, "", "KoreToken"], [57, 2, 1, "", "TokenType"], [57, 1, 1, "", "kore_lexer"]], "pyk.kore.lexer.KoreToken": [[57, 5, 1, "", "text"], [57, 5, 1, "", "type"]], "pyk.kore.lexer.TokenType": [[57, 5, 1, "", "COLON"], [57, 5, 1, "", "COMMA"], [57, 5, 1, "", "EOF"], [57, 5, 1, "", "ID"], [57, 5, 1, "", "KW_ALIAS"], [57, 5, 1, "", "KW_AXIOM"], [57, 5, 1, "", "KW_CLAIM"], [57, 5, 1, "", "KW_ENDMODULE"], [57, 5, 1, "", "KW_HOOKED_SORT"], [57, 5, 1, "", "KW_HOOKED_SYMBOL"], [57, 5, 1, "", "KW_IMPORT"], [57, 5, 1, "", "KW_MODULE"], [57, 5, 1, "", "KW_SORT"], [57, 5, 1, "", "KW_SYMBOL"], [57, 5, 1, "", "KW_WHERE"], [57, 5, 1, "", "LBRACE"], [57, 5, 1, "", "LBRACK"], [57, 5, 1, "", "LPAREN"], [57, 5, 1, "", "ML_AND"], [57, 5, 1, "", "ML_BOTTOM"], [57, 5, 1, "", "ML_CEIL"], [57, 5, 1, "", "ML_DV"], [57, 5, 1, "", "ML_EQUALS"], [57, 5, 1, "", "ML_EXISTS"], [57, 5, 1, "", "ML_FLOOR"], [57, 5, 1, "", "ML_FORALL"], [57, 5, 1, "", "ML_IFF"], [57, 5, 1, "", "ML_IMPLIES"], [57, 5, 1, "", "ML_IN"], [57, 5, 1, "", "ML_LEFT_ASSOC"], [57, 5, 1, "", "ML_MU"], [57, 5, 1, "", "ML_NEXT"], [57, 5, 1, "", "ML_NOT"], [57, 5, 1, "", "ML_NU"], [57, 5, 1, "", "ML_OR"], [57, 5, 1, "", "ML_REWRITES"], [57, 5, 1, "", "ML_RIGHT_ASSOC"], [57, 5, 1, "", "ML_TOP"], [57, 5, 1, "", "RBRACE"], [57, 5, 1, "", "RBRACK"], [57, 5, 1, "", "RPAREN"], [57, 5, 1, "", "SET_VAR_ID"], [57, 5, 1, "", "STRING"], [57, 5, 1, "", "SYMBOL_ID"], [57, 5, 1, "", "WALRUS"]], "pyk.kore.manip": [[58, 1, 1, "", "conjuncts"], [58, 1, 1, "", "free_occs"]], "pyk.kore.match": [[59, 1, 1, "", "app"], [59, 1, 1, "", "arg"], [59, 1, 1, "", "args"], [59, 1, 1, "", "case_symbol"], [59, 1, 1, "", "inj"], [59, 1, 1, "", "kore_bool"], [59, 1, 1, "", "kore_bytes"], [59, 1, 1, "", "kore_id"], [59, 1, 1, "", "kore_int"], [59, 1, 1, "", "kore_list_of"], [59, 1, 1, "", "kore_map_of"], [59, 1, 1, "", "kore_rangemap_of"], [59, 1, 1, "", "kore_set_of"], [59, 1, 1, "", "kore_str"], [59, 1, 1, "", "match_app"], [59, 1, 1, "", "match_dv"], [59, 1, 1, "", "match_inj"], [59, 1, 1, "", "match_left_assoc"], [59, 1, 1, "", "match_list"], [59, 1, 1, "", "match_map"], [59, 1, 1, "", "match_rangemap"], [59, 1, 1, "", "match_set"], [59, 1, 1, "", "match_symbol"]], "pyk.kore.parser": [[60, 2, 1, "", "KoreParser"]], "pyk.kore.parser.KoreParser": [[60, 3, 1, "", "alias_decl"], [60, 3, 1, "", "andd"], [60, 3, 1, "", "app"], [60, 3, 1, "", "axiom"], [60, 3, 1, "", "bottom"], [60, 3, 1, "", "ceil"], [60, 3, 1, "", "claim"], [60, 3, 1, "", "definition"], [60, 3, 1, "", "dv"], [60, 3, 1, "", "elem_var"], [60, 4, 1, "", "eof"], [60, 3, 1, "", "equals"], [60, 3, 1, "", "exists"], [60, 3, 1, "", "floor"], [60, 3, 1, "", "forall"], [60, 3, 1, "", "hooked_sort_decl"], [60, 3, 1, "", "hooked_symbol_decl"], [60, 3, 1, "", "id"], [60, 3, 1, "", "iff"], [60, 3, 1, "", "implies"], [60, 3, 1, "", "importt"], [60, 3, 1, "", "inn"], [60, 3, 1, "", "left_assoc"], [60, 3, 1, "", "ml_pattern"], [60, 3, 1, "", "module"], [60, 3, 1, "", "mu"], [60, 3, 1, "", "multi_or"], [60, 3, 1, "", "next"], [60, 3, 1, "", "nott"], [60, 3, 1, "", "nu"], [60, 3, 1, "", "orr"], [60, 3, 1, "", "pattern"], [60, 3, 1, "", "rewrites"], [60, 3, 1, "", "right_assoc"], [60, 3, 1, "", "sentence"], [60, 3, 1, "", "set_var"], [60, 3, 1, "", "set_var_id"], [60, 3, 1, "", "sort"], [60, 3, 1, "", "sort_app"], [60, 3, 1, "", "sort_decl"], [60, 3, 1, "", "sort_var"], [60, 3, 1, "", "string"], [60, 3, 1, "", "symbol"], [60, 3, 1, "", "symbol_decl"], [60, 3, 1, "", "symbol_id"], [60, 3, 1, "", "top"], [60, 3, 1, "", "var_pattern"]], "pyk.kore.pool": [[61, 2, 1, "", "KoreServerPool"]], "pyk.kore.pool.KoreServerPool": [[61, 3, 1, "", "close"], [61, 3, 1, "", "submit"]], "pyk.kore.prelude": [[62, 1, 1, "", "and_bool"], [62, 1, 1, "", "bool_dv"], [62, 1, 1, "", "bytes_dv"], [62, 1, 1, "", "dv"], [62, 1, 1, "", "eq_bool"], [62, 1, 1, "", "eq_int"], [62, 1, 1, "", "ge_int"], [62, 1, 1, "", "generated_counter"], [62, 1, 1, "", "generated_top"], [62, 1, 1, "", "gt_int"], [62, 1, 1, "", "implies_bool"], [62, 1, 1, "", "init_generated_top_cell"], [62, 1, 1, "", "inj"], [62, 1, 1, "", "int_dv"], [62, 1, 1, "", "json2string"], [62, 1, 1, "", "json_entry"], [62, 1, 1, "", "json_key"], [62, 1, 1, "", "json_list"], [62, 1, 1, "", "json_object"], [62, 1, 1, "", "json_to_kore"], [62, 1, 1, "", "jsons"], [62, 1, 1, "", "k"], [62, 1, 1, "", "k_config_var"], [62, 1, 1, "", "kore_to_json"], [62, 1, 1, "", "kseq"], [62, 1, 1, "", "le_int"], [62, 1, 1, "", "list_pattern"], [62, 1, 1, "", "lt_int"], [62, 1, 1, "", "map_pattern"], [62, 1, 1, "", "ne_bool"], [62, 1, 1, "", "ne_int"], [62, 1, 1, "", "not_bool"], [62, 1, 1, "", "or_bool"], [62, 1, 1, "", "rangemap_pattern"], [62, 1, 1, "", "set_pattern"], [62, 1, 1, "", "str_dv"], [62, 1, 1, "", "string2json"], [62, 1, 1, "", "top_cell_initializer"], [62, 1, 1, "", "xor_bool"]], "pyk.kore.rpc": [[63, 2, 1, "", "AbortedResult"], [63, 2, 1, "", "BoosterServer"], [63, 2, 1, "", "BoosterServerArgs"], [63, 2, 1, "", "BranchingResult"], [63, 2, 1, "", "CutPointResult"], [63, 6, 1, "", "DefaultError"], [63, 2, 1, "", "DepthBoundResult"], [63, 6, 1, "", "DuplicateModuleError"], [63, 2, 1, "", "ExecuteResult"], [63, 2, 1, "", "FallbackReason"], [63, 2, 1, "", "GetModelResult"], [63, 2, 1, "", "HttpTransport"], [63, 6, 1, "", "ImplicationError"], [63, 2, 1, "", "ImpliesResult"], [63, 6, 1, "", "InvalidModuleError"], [63, 2, 1, "", "JsonRpcClient"], [63, 2, 1, "", "JsonRpcClientFacade"], [63, 6, 1, "", "JsonRpcError"], [63, 2, 1, "", "KoreClient"], [63, 6, 1, "", "KoreClientError"], [63, 2, 1, "", "KoreExecLogFormat"], [63, 2, 1, "", "KoreServer"], [63, 2, 1, "", "KoreServerArgs"], [63, 2, 1, "", "KoreServerInfo"], [63, 2, 1, "", "LogEntry"], [63, 2, 1, "", "LogOrigin"], [63, 2, 1, "", "LogRewrite"], [63, 6, 1, "", "ParseError"], [63, 6, 1, "", "PatternError"], [63, 2, 1, "", "RewriteFailure"], [63, 2, 1, "", "RewriteResult"], [63, 2, 1, "", "RewriteSuccess"], [63, 2, 1, "", "SatResult"], [63, 2, 1, "", "SingleSocketTransport"], [63, 6, 1, "", "SmtSolverError"], [63, 2, 1, "", "State"], [63, 2, 1, "", "StopReason"], [63, 2, 1, "", "StuckResult"], [63, 2, 1, "", "TerminalResult"], [63, 2, 1, "", "TimeoutResult"], [63, 2, 1, "", "Transport"], [63, 2, 1, "", "TransportType"], [63, 6, 1, "", "UnknownModuleError"], [63, 2, 1, "", "UnknownResult"], [63, 2, 1, "", "UnsatResult"], [63, 2, 1, "", "VacuousResult"], [63, 1, 1, "", "kore_server"]], "pyk.kore.rpc.AbortedResult": [[63, 5, 1, "", "depth"], [63, 3, 1, "", "from_dict"], [63, 5, 1, "", "logs"], [63, 5, 1, "", "next_states"], [63, 5, 1, "", "reason"], [63, 5, 1, "", "rule"], [63, 5, 1, "", "state"], [63, 5, 1, "", "unknown_predicate"]], "pyk.kore.rpc.BoosterServerArgs": [[63, 5, 1, "", "bug_report"], [63, 5, 1, "", "command"], [63, 5, 1, "", "fallback_on"], [63, 5, 1, "", "haskell_log_entries"], [63, 5, 1, "", "haskell_log_format"], [63, 5, 1, "", "haskell_threads"], [63, 5, 1, "", "interim_simplification"], [63, 5, 1, "", "kompiled_dir"], [63, 5, 1, "", "llvm_kompiled_dir"], [63, 5, 1, "", "log_axioms_file"], [63, 5, 1, "", "log_context"], [63, 5, 1, "", "module_name"], [63, 5, 1, "", "no_post_exec_simplify"], [63, 5, 1, "", "not_log_context"], [63, 5, 1, "", "port"], [63, 5, 1, "", "smt_reset_interval"], [63, 5, 1, "", "smt_retry_limit"], [63, 5, 1, "", "smt_tactic"], [63, 5, 1, "", "smt_timeout"]], "pyk.kore.rpc.BranchingResult": [[63, 5, 1, "", "depth"], [63, 3, 1, "", "from_dict"], [63, 5, 1, "", "logs"], [63, 5, 1, "", "next_states"], [63, 5, 1, "", "reason"], [63, 5, 1, "", "rule"], [63, 5, 1, "", "state"]], "pyk.kore.rpc.CutPointResult": [[63, 5, 1, "", "depth"], [63, 3, 1, "", "from_dict"], [63, 5, 1, "", "logs"], [63, 5, 1, "", "next_states"], [63, 5, 1, "", "reason"], [63, 5, 1, "", "rule"], [63, 5, 1, "", "state"]], "pyk.kore.rpc.DefaultError": [[63, 5, 1, "", "code"], [63, 5, 1, "", "data"], [63, 5, 1, "", "message"]], "pyk.kore.rpc.DepthBoundResult": [[63, 5, 1, "", "depth"], [63, 3, 1, "", "from_dict"], [63, 5, 1, "", "logs"], [63, 5, 1, "", "next_states"], [63, 5, 1, "", "reason"], [63, 5, 1, "", "rule"], [63, 5, 1, "", "state"]], "pyk.kore.rpc.DuplicateModuleError": [[63, 5, 1, "", "module_name"]], "pyk.kore.rpc.ExecuteResult": [[63, 5, 1, "", "depth"], [63, 3, 1, "", "from_dict"], [63, 5, 1, "", "logs"], [63, 5, 1, "", "next_states"], [63, 5, 1, "", "reason"], [63, 5, 1, "", "rule"], [63, 5, 1, "", "state"]], "pyk.kore.rpc.FallbackReason": [[63, 5, 1, "", "ABORTED"], [63, 5, 1, "", "BRANCHING"], [63, 5, 1, "", "STUCK"]], "pyk.kore.rpc.GetModelResult": [[63, 3, 1, "", "from_dict"]], "pyk.kore.rpc.HttpTransport": [[63, 3, 1, "", "close"]], "pyk.kore.rpc.ImplicationError": [[63, 5, 1, "", "context"], [63, 5, 1, "", "error"]], "pyk.kore.rpc.ImpliesResult": [[63, 3, 1, "", "from_dict"], [63, 5, 1, "", "implication"], [63, 5, 1, "", "logs"], [63, 5, 1, "", "predicate"], [63, 5, 1, "", "substitution"], [63, 5, 1, "", "valid"]], "pyk.kore.rpc.InvalidModuleError": [[63, 5, 1, "", "context"], [63, 5, 1, "", "error"]], "pyk.kore.rpc.JsonRpcClient": [[63, 3, 1, "", "close"], [63, 3, 1, "", "request"]], "pyk.kore.rpc.JsonRpcClientFacade": [[63, 3, 1, "", "close"], [63, 3, 1, "", "request"]], "pyk.kore.rpc.KoreClient": [[63, 3, 1, "", "add_module"], [63, 3, 1, "", "close"], [63, 3, 1, "", "execute"], [63, 3, 1, "", "get_model"], [63, 3, 1, "", "implies"], [63, 5, 1, "", "port"], [63, 3, 1, "", "simplify"]], "pyk.kore.rpc.KoreExecLogFormat": [[63, 5, 1, "", "ONELINE"], [63, 5, 1, "", "STANDARD"]], "pyk.kore.rpc.KoreServer": [[63, 3, 1, "", "close"], [63, 4, 1, "", "host"], [63, 4, 1, "", "pid"], [63, 4, 1, "", "port"], [63, 3, 1, "", "start"]], "pyk.kore.rpc.KoreServerArgs": [[63, 5, 1, "", "bug_report"], [63, 5, 1, "", "command"], [63, 5, 1, "", "haskell_log_entries"], [63, 5, 1, "", "haskell_log_format"], [63, 5, 1, "", "haskell_threads"], [63, 5, 1, "", "kompiled_dir"], [63, 5, 1, "", "log_axioms_file"], [63, 5, 1, "", "module_name"], [63, 5, 1, "", "port"], [63, 5, 1, "", "smt_reset_interval"], [63, 5, 1, "", "smt_retry_limit"], [63, 5, 1, "", "smt_tactic"], [63, 5, 1, "", "smt_timeout"]], "pyk.kore.rpc.KoreServerInfo": [[63, 5, 1, "", "host"], [63, 5, 1, "", "pid"], [63, 5, 1, "", "port"]], "pyk.kore.rpc.LogEntry": [[63, 3, 1, "", "from_dict"], [63, 3, 1, "", "to_dict"]], "pyk.kore.rpc.LogOrigin": [[63, 5, 1, "", "BOOSTER"], [63, 5, 1, "", "KORE_RPC"], [63, 5, 1, "", "LLVM"], [63, 5, 1, "", "PROXY"], [63, 3, 1, "", "__format__"]], "pyk.kore.rpc.LogRewrite": [[63, 3, 1, "", "from_dict"], [63, 5, 1, "", "origin"], [63, 5, 1, "", "result"], [63, 3, 1, "", "to_dict"]], "pyk.kore.rpc.ParseError": [[63, 5, 1, "", "error"]], "pyk.kore.rpc.PatternError": [[63, 5, 1, "", "context"], [63, 5, 1, "", "error"]], "pyk.kore.rpc.RewriteFailure": [[63, 3, 1, "", "from_dict"], [63, 5, 1, "", "reason"], [63, 5, 1, "", "rule_id"], [63, 3, 1, "", "to_dict"]], "pyk.kore.rpc.RewriteResult": [[63, 3, 1, "", "from_dict"], [63, 5, 1, "", "rule_id"], [63, 3, 1, "", "to_dict"]], "pyk.kore.rpc.RewriteSuccess": [[63, 3, 1, "", "from_dict"], [63, 5, 1, "", "rewritten_term"], [63, 5, 1, "", "rule_id"], [63, 3, 1, "", "to_dict"]], "pyk.kore.rpc.SatResult": [[63, 5, 1, "", "model"]], "pyk.kore.rpc.SingleSocketTransport": [[63, 3, 1, "", "close"]], "pyk.kore.rpc.SmtSolverError": [[63, 5, 1, "", "error"], [63, 5, 1, "", "pattern"]], "pyk.kore.rpc.State": [[63, 3, 1, "", "from_dict"], [63, 4, 1, "", "kore"], [63, 5, 1, "", "predicate"], [63, 5, 1, "", "rule_id"], [63, 5, 1, "", "rule_predicate"], [63, 5, 1, "", "rule_substitution"], [63, 5, 1, "", "substitution"], [63, 5, 1, "", "term"]], "pyk.kore.rpc.StopReason": [[63, 5, 1, "", "ABORTED"], [63, 5, 1, "", "BRANCHING"], [63, 5, 1, "", "CUT_POINT_RULE"], [63, 5, 1, "", "DEPTH_BOUND"], [63, 5, 1, "", "STUCK"], [63, 5, 1, "", "TERMINAL_RULE"], [63, 5, 1, "", "TIMEOUT"], [63, 5, 1, "", "VACUOUS"], [63, 3, 1, "", "__format__"]], "pyk.kore.rpc.StuckResult": [[63, 5, 1, "", "depth"], [63, 3, 1, "", "from_dict"], [63, 5, 1, "", "logs"], [63, 5, 1, "", "next_states"], [63, 5, 1, "", "reason"], [63, 5, 1, "", "rule"], [63, 5, 1, "", "state"]], "pyk.kore.rpc.TerminalResult": [[63, 5, 1, "", "depth"], [63, 3, 1, "", "from_dict"], [63, 5, 1, "", "logs"], [63, 5, 1, "", "next_states"], [63, 5, 1, "", "reason"], [63, 5, 1, "", "rule"], [63, 5, 1, "", "state"]], "pyk.kore.rpc.TimeoutResult": [[63, 5, 1, "", "depth"], [63, 3, 1, "", "from_dict"], [63, 5, 1, "", "logs"], [63, 5, 1, "", "next_states"], [63, 5, 1, "", "reason"], [63, 5, 1, "", "rule"], [63, 5, 1, "", "state"]], "pyk.kore.rpc.Transport": [[63, 3, 1, "", "close"], [63, 3, 1, "", "request"]], "pyk.kore.rpc.TransportType": [[63, 5, 1, "", "HTTP"], [63, 5, 1, "", "SINGLE_SOCKET"]], "pyk.kore.rpc.UnknownModuleError": [[63, 5, 1, "", "module_name"]], "pyk.kore.rpc.VacuousResult": [[63, 5, 1, "", "depth"], [63, 3, 1, "", "from_dict"], [63, 5, 1, "", "logs"], [63, 5, 1, "", "next_states"], [63, 5, 1, "", "reason"], [63, 5, 1, "", "rule"], [63, 5, 1, "", "state"]], "pyk.kore.rule": [[64, 2, 1, "", "AppRule"], [64, 2, 1, "", "CeilRule"], [64, 2, 1, "", "EqualsRule"], [64, 2, 1, "", "FunctionRule"], [64, 2, 1, "", "RewriteRule"], [64, 2, 1, "", "Rule"], [64, 2, 1, "", "SimpliRule"]], "pyk.kore.rule.AppRule": [[64, 5, 1, "", "ens"], [64, 3, 1, "", "from_axiom"], [64, 5, 1, "", "lhs"], [64, 5, 1, "", "priority"], [64, 5, 1, "", "req"], [64, 5, 1, "", "rhs"], [64, 5, 1, "", "sort"]], "pyk.kore.rule.CeilRule": [[64, 5, 1, "", "ens"], [64, 3, 1, "", "from_axiom"], [64, 5, 1, "", "lhs"], [64, 5, 1, "", "priority"], [64, 5, 1, "", "req"], [64, 5, 1, "", "rhs"], [64, 5, 1, "", "sort"]], "pyk.kore.rule.EqualsRule": [[64, 5, 1, "", "ens"], [64, 3, 1, "", "from_axiom"], [64, 5, 1, "", "lhs"], [64, 5, 1, "", "priority"], [64, 5, 1, "", "req"], [64, 5, 1, "", "rhs"], [64, 5, 1, "", "sort"]], "pyk.kore.rule.FunctionRule": [[64, 5, 1, "", "anti_left"], [64, 5, 1, "", "arg_sorts"], [64, 5, 1, "", "ens"], [64, 3, 1, "", "from_axiom"], [64, 5, 1, "", "lhs"], [64, 5, 1, "", "priority"], [64, 5, 1, "", "req"], [64, 5, 1, "", "rhs"], [64, 5, 1, "", "sort"], [64, 3, 1, "", "to_axiom"]], "pyk.kore.rule.RewriteRule": [[64, 5, 1, "", "ctx"], [64, 5, 1, "", "ens"], [64, 3, 1, "", "from_axiom"], [64, 5, 1, "", "label"], [64, 5, 1, "", "lhs"], [64, 5, 1, "", "priority"], [64, 5, 1, "", "req"], [64, 5, 1, "", "rhs"], [64, 5, 1, "", "sort"], [64, 3, 1, "", "to_axiom"], [64, 5, 1, "", "uid"]], "pyk.kore.rule.Rule": [[64, 5, 1, "", "ens"], [64, 3, 1, "", "extract_all"], [64, 3, 1, "", "from_axiom"], [64, 3, 1, "", "is_rule"], [64, 5, 1, "", "lhs"], [64, 5, 1, "", "priority"], [64, 5, 1, "", "req"], [64, 5, 1, "", "rhs"], [64, 5, 1, "", "sort"], [64, 3, 1, "", "to_axiom"]], "pyk.kore.rule.SimpliRule": [[64, 5, 1, "", "lhs"], [64, 5, 1, "", "sort"], [64, 3, 1, "", "to_axiom"]], "pyk.kore.syntax": [[65, 2, 1, "", "AliasDecl"], [65, 2, 1, "", "And"], [65, 2, 1, "", "App"], [65, 2, 1, "", "Assoc"], [65, 2, 1, "", "Axiom"], [65, 2, 1, "", "AxiomLike"], [65, 2, 1, "", "BinaryConn"], [65, 2, 1, "", "BinaryPred"], [65, 2, 1, "", "Bottom"], [65, 2, 1, "", "Ceil"], [65, 2, 1, "", "Claim"], [65, 2, 1, "", "DV"], [65, 2, 1, "", "Definition"], [65, 2, 1, "", "EVar"], [65, 2, 1, "", "Equals"], [65, 2, 1, "", "Exists"], [65, 2, 1, "", "Floor"], [65, 2, 1, "", "Forall"], [65, 2, 1, "", "Id"], [65, 2, 1, "", "Iff"], [65, 2, 1, "", "Implies"], [65, 2, 1, "", "Import"], [65, 2, 1, "", "In"], [65, 2, 1, "", "Kore"], [65, 2, 1, "", "LeftAssoc"], [65, 2, 1, "", "MLConn"], [65, 2, 1, "", "MLFixpoint"], [65, 2, 1, "", "MLPattern"], [65, 2, 1, "", "MLPred"], [65, 2, 1, "", "MLQuant"], [65, 2, 1, "", "MLRewrite"], [65, 2, 1, "", "Module"], [65, 2, 1, "", "Mu"], [65, 2, 1, "", "MultiaryConn"], [65, 2, 1, "", "Next"], [65, 2, 1, "", "Not"], [65, 2, 1, "", "Nu"], [65, 2, 1, "", "NullaryConn"], [65, 2, 1, "", "Or"], [65, 2, 1, "", "Pattern"], [65, 2, 1, "", "Rewrites"], [65, 2, 1, "", "RightAssoc"], [65, 2, 1, "", "RoundPred"], [65, 2, 1, "", "SVar"], [65, 2, 1, "", "Sentence"], [65, 2, 1, "", "SetVarId"], [65, 2, 1, "", "Sort"], [65, 2, 1, "", "SortApp"], [65, 2, 1, "", "SortDecl"], [65, 2, 1, "", "SortVar"], [65, 2, 1, "", "String"], [65, 2, 1, "", "Symbol"], [65, 2, 1, "", "SymbolDecl"], [65, 2, 1, "", "SymbolId"], [65, 2, 1, "", "Top"], [65, 2, 1, "", "UnaryConn"], [65, 2, 1, "", "VarPattern"], [65, 2, 1, "", "WithAttrs"], [65, 2, 1, "", "WithSort"], [65, 1, 1, "", "kore_term"]], "pyk.kore.syntax.AliasDecl": [[65, 5, 1, "", "alias"], [65, 5, 1, "", "attrs"], [65, 5, 1, "", "left"], [65, 3, 1, "", "let"], [65, 3, 1, "", "let_attrs"], [65, 5, 1, "", "param_sorts"], [65, 5, 1, "", "right"], [65, 5, 1, "", "sort"], [65, 3, 1, "", "write"]], "pyk.kore.syntax.And": [[65, 3, 1, "", "let"], [65, 3, 1, "", "let_patterns"], [65, 3, 1, "", "let_sort"], [65, 3, 1, "", "of"], [65, 5, 1, "", "ops"], [65, 5, 1, "", "sort"], [65, 3, 1, "", "symbol"]], "pyk.kore.syntax.App": [[65, 5, 1, "", "args"], [65, 3, 1, "", "let"], [65, 3, 1, "", "let_patterns"], [65, 4, 1, "", "patterns"], [65, 5, 1, "", "sorts"], [65, 5, 1, "", "symbol"], [65, 3, 1, "", "write"]], "pyk.kore.syntax.Assoc": [[65, 4, 1, "", "app"], [65, 5, 1, "", "args"], [65, 3, 1, "", "kore_symbol"], [65, 4, 1, "", "pattern"], [65, 4, 1, "", "patterns"], [65, 5, 1, "", "sorts"], [65, 5, 1, "", "symbol"], [65, 3, 1, "", "write"]], "pyk.kore.syntax.Axiom": [[65, 5, 1, "", "attrs"], [65, 3, 1, "", "let"], [65, 3, 1, "", "let_attrs"], [65, 5, 1, "", "pattern"], [65, 5, 1, "", "vars"]], "pyk.kore.syntax.AxiomLike": [[65, 5, 1, "", "pattern"], [65, 5, 1, "", "vars"], [65, 3, 1, "", "write"]], "pyk.kore.syntax.BinaryConn": [[65, 5, 1, "", "left"], [65, 4, 1, "", "patterns"], [65, 5, 1, "", "right"]], "pyk.kore.syntax.BinaryPred": [[65, 5, 1, "", "left"], [65, 4, 1, "", "patterns"], [65, 5, 1, "", "right"], [65, 4, 1, "", "sorts"]], "pyk.kore.syntax.Bottom": [[65, 3, 1, "", "let"], [65, 3, 1, "", "let_patterns"], [65, 3, 1, "", "let_sort"], [65, 3, 1, "", "of"], [65, 5, 1, "", "sort"], [65, 3, 1, "", "symbol"]], "pyk.kore.syntax.Ceil": [[65, 3, 1, "", "let"], [65, 3, 1, "", "let_patterns"], [65, 3, 1, "", "let_sort"], [65, 3, 1, "", "of"], [65, 5, 1, "", "op_sort"], [65, 5, 1, "", "pattern"], [65, 5, 1, "", "sort"], [65, 3, 1, "", "symbol"]], "pyk.kore.syntax.Claim": [[65, 5, 1, "", "attrs"], [65, 3, 1, "", "let"], [65, 3, 1, "", "let_attrs"], [65, 5, 1, "", "pattern"], [65, 5, 1, "", "vars"]], "pyk.kore.syntax.DV": [[65, 4, 1, "", "ctor_patterns"], [65, 3, 1, "", "let"], [65, 3, 1, "", "let_patterns"], [65, 3, 1, "", "let_sort"], [65, 3, 1, "", "of"], [65, 4, 1, "", "patterns"], [65, 5, 1, "", "sort"], [65, 4, 1, "", "sorts"], [65, 3, 1, "", "symbol"], [65, 5, 1, "", "value"]], "pyk.kore.syntax.Definition": [[65, 5, 1, "", "attrs"], [65, 4, 1, "", "axioms"], [65, 3, 1, "", "compute_ordinals"], [65, 3, 1, "", "get_axiom_by_ordinal"], [65, 3, 1, "", "let"], [65, 3, 1, "", "let_attrs"], [65, 5, 1, "", "modules"], [65, 3, 1, "", "write"]], "pyk.kore.syntax.EVar": [[65, 3, 1, "", "let"], [65, 3, 1, "", "let_patterns"], [65, 3, 1, "", "let_sort"], [65, 5, 1, "", "name"], [65, 5, 1, "", "sort"]], "pyk.kore.syntax.Equals": [[65, 5, 1, "", "left"], [65, 3, 1, "", "let"], [65, 3, 1, "", "let_patterns"], [65, 3, 1, "", "let_sort"], [65, 3, 1, "", "of"], [65, 5, 1, "", "op_sort"], [65, 5, 1, "", "right"], [65, 5, 1, "", "sort"], [65, 3, 1, "", "symbol"]], "pyk.kore.syntax.Exists": [[65, 3, 1, "", "let"], [65, 3, 1, "", "let_patterns"], [65, 3, 1, "", "let_sort"], [65, 3, 1, "", "of"], [65, 5, 1, "", "pattern"], [65, 5, 1, "", "sort"], [65, 3, 1, "", "symbol"], [65, 5, 1, "", "var"]], "pyk.kore.syntax.Floor": [[65, 3, 1, "", "let"], [65, 3, 1, "", "let_patterns"], [65, 3, 1, "", "let_sort"], [65, 3, 1, "", "of"], [65, 5, 1, "", "op_sort"], [65, 5, 1, "", "pattern"], [65, 5, 1, "", "sort"], [65, 3, 1, "", "symbol"]], "pyk.kore.syntax.Forall": [[65, 3, 1, "", "let"], [65, 3, 1, "", "let_patterns"], [65, 3, 1, "", "let_sort"], [65, 3, 1, "", "of"], [65, 5, 1, "", "pattern"], [65, 5, 1, "", "sort"], [65, 3, 1, "", "symbol"], [65, 5, 1, "", "var"]], "pyk.kore.syntax.Id": [[65, 5, 1, "", "value"]], "pyk.kore.syntax.Iff": [[65, 5, 1, "", "left"], [65, 3, 1, "", "let"], [65, 3, 1, "", "let_patterns"], [65, 3, 1, "", "let_sort"], [65, 3, 1, "", "of"], [65, 5, 1, "", "right"], [65, 5, 1, "", "sort"], [65, 3, 1, "", "symbol"]], "pyk.kore.syntax.Implies": [[65, 5, 1, "", "left"], [65, 3, 1, "", "let"], [65, 3, 1, "", "let_patterns"], [65, 3, 1, "", "let_sort"], [65, 3, 1, "", "of"], [65, 5, 1, "", "right"], [65, 5, 1, "", "sort"], [65, 3, 1, "", "symbol"]], "pyk.kore.syntax.Import": [[65, 5, 1, "", "attrs"], [65, 3, 1, "", "let"], [65, 3, 1, "", "let_attrs"], [65, 5, 1, "", "module_name"], [65, 3, 1, "", "write"]], "pyk.kore.syntax.In": [[65, 5, 1, "", "left"], [65, 3, 1, "", "let"], [65, 3, 1, "", "let_patterns"], [65, 3, 1, "", "let_sort"], [65, 3, 1, "", "of"], [65, 5, 1, "", "op_sort"], [65, 5, 1, "", "right"], [65, 5, 1, "", "sort"], [65, 3, 1, "", "symbol"]], "pyk.kore.syntax.Kore": [[65, 4, 1, "", "text"], [65, 3, 1, "", "write"]], "pyk.kore.syntax.LeftAssoc": [[65, 5, 1, "", "args"], [65, 3, 1, "", "kore_symbol"], [65, 3, 1, "", "let"], [65, 3, 1, "", "let_patterns"], [65, 4, 1, "", "pattern"], [65, 5, 1, "", "sorts"], [65, 5, 1, "", "symbol"]], "pyk.kore.syntax.MLConn": [[65, 4, 1, "", "sorts"]], "pyk.kore.syntax.MLFixpoint": [[65, 4, 1, "", "ctor_patterns"], [65, 5, 1, "", "pattern"], [65, 4, 1, "", "patterns"], [65, 4, 1, "", "sorts"], [65, 5, 1, "", "var"]], "pyk.kore.syntax.MLPattern": [[65, 4, 1, "", "ctor_patterns"], [65, 3, 1, "", "of"], [65, 4, 1, "", "sorts"], [65, 3, 1, "", "symbol"], [65, 3, 1, "", "write"]], "pyk.kore.syntax.MLPred": [[65, 5, 1, "", "op_sort"]], "pyk.kore.syntax.MLQuant": [[65, 4, 1, "", "ctor_patterns"], [65, 5, 1, "", "pattern"], [65, 4, 1, "", "patterns"], [65, 5, 1, "", "sort"], [65, 4, 1, "", "sorts"], [65, 5, 1, "", "var"]], "pyk.kore.syntax.MLRewrite": [[65, 4, 1, "", "sorts"]], "pyk.kore.syntax.Module": [[65, 5, 1, "", "attrs"], [65, 4, 1, "", "axioms"], [65, 3, 1, "", "let"], [65, 3, 1, "", "let_attrs"], [65, 5, 1, "", "name"], [65, 5, 1, "", "sentences"], [65, 4, 1, "", "symbol_decls"], [65, 3, 1, "", "write"]], "pyk.kore.syntax.Mu": [[65, 3, 1, "", "let"], [65, 3, 1, "", "let_patterns"], [65, 3, 1, "", "of"], [65, 5, 1, "", "pattern"], [65, 3, 1, "", "symbol"], [65, 5, 1, "", "var"]], "pyk.kore.syntax.MultiaryConn": [[65, 5, 1, "", "ops"], [65, 4, 1, "", "patterns"]], "pyk.kore.syntax.Next": [[65, 3, 1, "", "let"], [65, 3, 1, "", "let_patterns"], [65, 3, 1, "", "let_sort"], [65, 3, 1, "", "of"], [65, 5, 1, "", "pattern"], [65, 4, 1, "", "patterns"], [65, 5, 1, "", "sort"], [65, 3, 1, "", "symbol"]], "pyk.kore.syntax.Not": [[65, 3, 1, "", "let"], [65, 3, 1, "", "let_patterns"], [65, 3, 1, "", "let_sort"], [65, 3, 1, "", "of"], [65, 5, 1, "", "pattern"], [65, 5, 1, "", "sort"], [65, 3, 1, "", "symbol"]], "pyk.kore.syntax.Nu": [[65, 3, 1, "", "let"], [65, 3, 1, "", "let_patterns"], [65, 3, 1, "", "of"], [65, 5, 1, "", "pattern"], [65, 3, 1, "", "symbol"], [65, 5, 1, "", "var"]], "pyk.kore.syntax.NullaryConn": [[65, 4, 1, "", "patterns"]], "pyk.kore.syntax.Or": [[65, 3, 1, "", "let"], [65, 3, 1, "", "let_patterns"], [65, 3, 1, "", "let_sort"], [65, 3, 1, "", "of"], [65, 5, 1, "", "ops"], [65, 5, 1, "", "sort"], [65, 3, 1, "", "symbol"]], "pyk.kore.syntax.Pattern": [[65, 3, 1, "", "bottom_up"], [65, 4, 1, "", "dict"], [65, 3, 1, "", "from_dict"], [65, 3, 1, "", "from_json"], [65, 4, 1, "", "json"], [65, 3, 1, "", "let_patterns"], [65, 3, 1, "", "map_patterns"], [65, 4, 1, "", "patterns"], [65, 3, 1, "", "top_down"]], "pyk.kore.syntax.Rewrites": [[65, 5, 1, "", "left"], [65, 3, 1, "", "let"], [65, 3, 1, "", "let_patterns"], [65, 3, 1, "", "let_sort"], [65, 3, 1, "", "of"], [65, 4, 1, "", "patterns"], [65, 5, 1, "", "right"], [65, 5, 1, "", "sort"], [65, 3, 1, "", "symbol"]], "pyk.kore.syntax.RightAssoc": [[65, 5, 1, "", "args"], [65, 3, 1, "", "kore_symbol"], [65, 3, 1, "", "let"], [65, 3, 1, "", "let_patterns"], [65, 4, 1, "", "pattern"], [65, 5, 1, "", "sorts"], [65, 5, 1, "", "symbol"]], "pyk.kore.syntax.RoundPred": [[65, 5, 1, "", "pattern"], [65, 4, 1, "", "patterns"], [65, 4, 1, "", "sorts"]], "pyk.kore.syntax.SVar": [[65, 3, 1, "", "let"], [65, 3, 1, "", "let_patterns"], [65, 3, 1, "", "let_sort"], [65, 5, 1, "", "name"], [65, 5, 1, "", "sort"]], "pyk.kore.syntax.SetVarId": [[65, 5, 1, "", "value"]], "pyk.kore.syntax.Sort": [[65, 4, 1, "", "dict"], [65, 3, 1, "", "from_dict"], [65, 3, 1, "", "from_json"], [65, 4, 1, "", "json"], [65, 5, 1, "", "name"]], "pyk.kore.syntax.SortApp": [[65, 4, 1, "", "dict"], [65, 3, 1, "", "let"], [65, 5, 1, "", "name"], [65, 5, 1, "", "sorts"], [65, 3, 1, "", "write"]], "pyk.kore.syntax.SortDecl": [[65, 5, 1, "", "attrs"], [65, 5, 1, "", "hooked"], [65, 3, 1, "", "let"], [65, 3, 1, "", "let_attrs"], [65, 5, 1, "", "name"], [65, 5, 1, "", "vars"], [65, 3, 1, "", "write"]], "pyk.kore.syntax.SortVar": [[65, 4, 1, "", "dict"], [65, 3, 1, "", "let"], [65, 5, 1, "", "name"], [65, 3, 1, "", "write"]], "pyk.kore.syntax.String": [[65, 3, 1, "", "let"], [65, 3, 1, "", "let_patterns"], [65, 4, 1, "", "patterns"], [65, 5, 1, "", "value"], [65, 3, 1, "", "write"]], "pyk.kore.syntax.Symbol": [[65, 3, 1, "", "let"], [65, 5, 1, "", "name"], [65, 5, 1, "", "vars"], [65, 3, 1, "", "write"]], "pyk.kore.syntax.SymbolDecl": [[65, 5, 1, "", "attrs"], [65, 5, 1, "", "hooked"], [65, 3, 1, "", "let"], [65, 3, 1, "", "let_attrs"], [65, 5, 1, "", "param_sorts"], [65, 5, 1, "", "sort"], [65, 5, 1, "", "symbol"], [65, 3, 1, "", "write"]], "pyk.kore.syntax.SymbolId": [[65, 5, 1, "", "value"]], "pyk.kore.syntax.Top": [[65, 3, 1, "", "let"], [65, 3, 1, "", "let_patterns"], [65, 3, 1, "", "let_sort"], [65, 3, 1, "", "of"], [65, 5, 1, "", "sort"], [65, 3, 1, "", "symbol"]], "pyk.kore.syntax.UnaryConn": [[65, 5, 1, "", "pattern"], [65, 4, 1, "", "patterns"]], "pyk.kore.syntax.VarPattern": [[65, 5, 1, "", "name"], [65, 4, 1, "", "patterns"], [65, 5, 1, "", "sort"], [65, 3, 1, "", "write"]], "pyk.kore.syntax.WithAttrs": [[65, 5, 1, "", "attrs"], [65, 4, 1, "", "attrs_by_key"], [65, 3, 1, "", "let_attrs"], [65, 3, 1, "", "map_attrs"]], "pyk.kore.syntax.WithSort": [[65, 3, 1, "", "let_sort"], [65, 3, 1, "", "map_sort"], [65, 5, 1, "", "sort"]], "pyk.kore.tools": [[66, 2, 1, "", "PrintOutput"], [66, 1, 1, "", "kore_print"]], "pyk.kore.tools.PrintOutput": [[66, 5, 1, "", "BINARY"], [66, 5, 1, "", "JSON"], [66, 5, 1, "", "KAST"], [66, 5, 1, "", "KORE"], [66, 5, 1, "", "LATEX"], [66, 5, 1, "", "NONE"], [66, 5, 1, "", "PRETTY"], [66, 5, 1, "", "PROGRAM"]], "pyk.kore_exec_covr": [[68, 0, 0, "-", "kore_exec_covr"]], "pyk.kore_exec_covr.kore_exec_covr": [[68, 2, 1, "", "HaskellLogEntry"], [68, 1, 1, "", "build_rule_dict"], [68, 1, 1, "", "parse_rule_applications"]], "pyk.kore_exec_covr.kore_exec_covr.HaskellLogEntry": [[68, 5, 1, "", "DEBUG_APPLIED_REWRITE_RULES"], [68, 5, 1, "", "DEBUG_APPLY_EQUATION"]], "pyk.krepl": [[70, 0, 0, "-", "repl"]], "pyk.krepl.repl": [[70, 2, 1, "", "BaseRepl"], [70, 2, 1, "", "Interpreter"], [70, 2, 1, "", "KInterpreter"], [70, 2, 1, "", "KRepl"], [70, 2, 1, "", "KState"], [70, 6, 1, "", "ReplError"]], "pyk.krepl.repl.BaseRepl": [[70, 5, 1, "", "CAT_BUILTIN"], [70, 5, 1, "", "CAT_DEBUG"], [70, 3, 1, "", "do_load"], [70, 3, 1, "", "do_show"], [70, 3, 1, "", "do_step"], [70, 5, 1, "", "interpreter"], [70, 5, 1, "", "prompt"], [70, 5, 1, "", "state"]], "pyk.krepl.repl.Interpreter": [[70, 3, 1, "", "init_state"], [70, 3, 1, "", "next_state"]], "pyk.krepl.repl.KInterpreter": [[70, 5, 1, "", "definition_dir"], [70, 3, 1, "", "init_state"], [70, 3, 1, "", "next_state"], [70, 5, 1, "", "program_file"]], "pyk.krepl.repl.KRepl": [[70, 3, 1, "", "do_load"], [70, 5, 1, "", "intro"]], "pyk.krepl.repl.KState": [[70, 5, 1, "", "definition_dir"], [70, 5, 1, "", "pattern"], [70, 4, 1, "", "pretty"]], "pyk.ktool": [[72, 0, 0, "-", "claim_index"], [73, 0, 0, "-", "claim_loader"], [74, 0, 0, "-", "kfuzz"], [75, 0, 0, "-", "kompile"], [76, 0, 0, "-", "kprint"], [77, 0, 0, "-", "kprove"], [78, 0, 0, "-", "krun"], [79, 0, 0, "-", "prove_rpc"], [80, 0, 0, "-", "utils"]], "pyk.ktool.claim_index": [[72, 2, 1, "", "ClaimIndex"]], "pyk.ktool.claim_index.ClaimIndex": [[72, 5, 1, "", "claims"], [72, 3, 1, "", "from_module_list"], [72, 3, 1, "", "labels"], [72, 5, 1, "", "main_module_name"], [72, 3, 1, "", "resolve"], [72, 3, 1, "", "resolve_all"]], "pyk.ktool.claim_loader": [[73, 2, 1, "", "ClaimLoader"]], "pyk.ktool.claim_loader.ClaimLoader": [[73, 3, 1, "", "load_claims"]], "pyk.ktool.kfuzz": [[74, 2, 1, "", "KFuzz"], [74, 2, 1, "", "KFuzzHandler"], [74, 1, 1, "", "fuzz"], [74, 1, 1, "", "kintegers"]], "pyk.ktool.kfuzz.KFuzz": [[74, 5, 1, "", "definition_dir"], [74, 3, 1, "", "fuzz_with_check"], [74, 3, 1, "", "fuzz_with_exit_code"], [74, 5, 1, "", "handler"]], "pyk.ktool.kfuzz.KFuzzHandler": [[74, 3, 1, "", "handle_failure"], [74, 3, 1, "", "handle_test"]], "pyk.ktool.kompile": [[75, 2, 1, "", "PykBackend"], [75, 1, 1, "", "kompile"]], "pyk.ktool.kompile.PykBackend": [[75, 5, 1, "", "BOOSTER"], [75, 5, 1, "", "HASKELL"], [75, 5, 1, "", "KORE"], [75, 5, 1, "", "LLVM"], [75, 5, 1, "", "MAUDE"]], "pyk.ktool.kprint": [[76, 2, 1, "", "KAstInput"], [76, 2, 1, "", "KAstOutput"], [76, 2, 1, "", "KPrint"], [76, 1, 1, "", "gen_glr_parser"]], "pyk.ktool.kprint.KAstInput": [[76, 5, 1, "", "BINARY"], [76, 5, 1, "", "JSON"], [76, 5, 1, "", "KAST"], [76, 5, 1, "", "KORE"], [76, 5, 1, "", "PROGRAM"], [76, 5, 1, "", "RULE"]], "pyk.ktool.kprint.KAstOutput": [[76, 5, 1, "", "BINARY"], [76, 5, 1, "", "JSON"], [76, 5, 1, "", "KAST"], [76, 5, 1, "", "KORE"], [76, 5, 1, "", "LATEX"], [76, 5, 1, "", "NONE"], [76, 5, 1, "", "PRETTY"], [76, 5, 1, "", "PROGRAM"]], "pyk.ktool.kprint.KPrint": [[76, 5, 1, "", "backend"], [76, 4, 1, "", "definition"], [76, 5, 1, "", "definition_dir"], [76, 4, 1, "", "definition_hash"], [76, 3, 1, "", "kast_to_kore"], [76, 3, 1, "", "kore_to_kast"], [76, 3, 1, "", "kore_to_pretty"], [76, 5, 1, "", "main_module"], [76, 3, 1, "", "parse_token"], [76, 3, 1, "", "pretty_print"], [76, 5, 1, "", "use_directory"]], "pyk.ktool.kprove": [[77, 2, 1, "", "KProve"], [77, 2, 1, "", "KProveOutput"]], "pyk.ktool.kprove.KProve": [[77, 3, 1, "", "get_claim_index"], [77, 3, 1, "", "get_claims"], [77, 5, 1, "", "main_file"], [77, 3, 1, "", "parse_modules"], [77, 3, 1, "", "prove"], [77, 3, 1, "", "prove_claim"], [77, 5, 1, "", "prover"], [77, 5, 1, "", "prover_args"]], "pyk.ktool.kprove.KProveOutput": [[77, 5, 1, "", "BINARY"], [77, 5, 1, "", "JSON"], [77, 5, 1, "", "KAST"], [77, 5, 1, "", "KORE"], [77, 5, 1, "", "LATEX"], [77, 5, 1, "", "NONE"], [77, 5, 1, "", "PRETTY"], [77, 5, 1, "", "PROGAM"]], "pyk.ktool.krun": [[78, 2, 1, "", "KRun"], [78, 2, 1, "", "KRunOutput"], [78, 1, 1, "", "llvm_interpret"], [78, 1, 1, "", "llvm_interpret_raw"]], "pyk.ktool.krun.KRun": [[78, 5, 1, "", "command"], [78, 3, 1, "", "krun"], [78, 3, 1, "", "run"], [78, 3, 1, "", "run_pattern"], [78, 3, 1, "", "run_process"], [78, 3, 1, "", "run_proof_hint"]], "pyk.ktool.krun.KRunOutput": [[78, 5, 1, "", "BINARY"], [78, 5, 1, "", "JSON"], [78, 5, 1, "", "KAST"], [78, 5, 1, "", "KORE"], [78, 5, 1, "", "LATEX"], [78, 5, 1, "", "NONE"], [78, 5, 1, "", "PRETTY"], [78, 5, 1, "", "PROGRAM"]], "pyk.ktool.prove_rpc": [[79, 2, 1, "", "ProveRpc"]], "pyk.ktool.prove_rpc.ProveRpc": [[79, 3, 1, "", "prove_rpc"]], "pyk.ktool.utils": [[80, 2, 1, "", "KDistribution"]], "pyk.ktool.utils.KDistribution": [[80, 4, 1, "", "builtin_dir"], [80, 3, 1, "", "create"], [80, 5, 1, "id0", "path"]], "pyk.prelude": [[82, 0, 0, "-", "bytes"], [83, 0, 0, "-", "collections"], [84, 0, 0, "-", "k"], [85, 0, 0, "-", "kbool"], [86, 0, 0, "-", "kint"], [87, 0, 0, "-", "ml"], [88, 0, 0, "-", "string"], [89, 0, 0, "-", "utils"]], "pyk.prelude.bytes": [[82, 1, 1, "", "bytesToken"], [82, 1, 1, "", "bytesToken_from_str"], [82, 1, 1, "", "pretty_bytes"], [82, 1, 1, "", "pretty_bytes_str"]], "pyk.prelude.collections": [[83, 1, 1, "", "list_empty"], [83, 1, 1, "", "list_item"], [83, 1, 1, "", "list_of"], [83, 1, 1, "", "map_empty"], [83, 1, 1, "", "map_item"], [83, 1, 1, "", "map_of"], [83, 1, 1, "", "rangemap_empty"], [83, 1, 1, "", "rangemap_item"], [83, 1, 1, "", "rangemap_of"], [83, 1, 1, "", "set_empty"], [83, 1, 1, "", "set_item"], [83, 1, 1, "", "set_of"]], "pyk.prelude.k": [[84, 1, 1, "", "inj"]], "pyk.prelude.kbool": [[85, 1, 1, "", "andBool"], [85, 1, 1, "", "boolToken"], [85, 1, 1, "", "impliesBool"], [85, 1, 1, "", "notBool"], [85, 1, 1, "", "orBool"]], "pyk.prelude.kint": [[86, 1, 1, "", "absInt"], [86, 1, 1, "", "addInt"], [86, 1, 1, "", "andInt"], [86, 1, 1, "", "divInt"], [86, 1, 1, "", "eqInt"], [86, 1, 1, "", "euclidDivInt"], [86, 1, 1, "", "euclidModInt"], [86, 1, 1, "", "expInt"], [86, 1, 1, "", "expModInt"], [86, 1, 1, "", "geInt"], [86, 1, 1, "", "gtInt"], [86, 1, 1, "", "intToken"], [86, 1, 1, "", "leInt"], [86, 1, 1, "", "lshiftInt"], [86, 1, 1, "", "ltInt"], [86, 1, 1, "", "maxInt"], [86, 1, 1, "", "minInt"], [86, 1, 1, "", "modInt"], [86, 1, 1, "", "mulInt"], [86, 1, 1, "", "neqInt"], [86, 1, 1, "", "notInt"], [86, 1, 1, "", "orInt"], [86, 1, 1, "", "rshiftInt"], [86, 1, 1, "", "subInt"], [86, 1, 1, "", "xorInt"]], "pyk.prelude.ml": [[87, 1, 1, "", "is_bottom"], [87, 1, 1, "", "is_top"], [87, 1, 1, "", "mlAnd"], [87, 1, 1, "", "mlBottom"], [87, 1, 1, "", "mlCeil"], [87, 1, 1, "", "mlEquals"], [87, 1, 1, "", "mlEqualsFalse"], [87, 1, 1, "", "mlEqualsTrue"], [87, 1, 1, "", "mlExists"], [87, 1, 1, "", "mlImplies"], [87, 1, 1, "", "mlNot"], [87, 1, 1, "", "mlOr"], [87, 1, 1, "", "mlTop"]], "pyk.prelude.string": [[88, 1, 1, "", "pretty_string"], [88, 1, 1, "", "stringToken"]], "pyk.prelude.utils": [[89, 1, 1, "", "token"]], "pyk.proof": [[91, 0, 0, "-", "implies"], [92, 0, 0, "-", "proof"], [93, 0, 0, "-", "reachability"], [94, 0, 0, "-", "show"], [95, 0, 0, "-", "tui"]], "pyk.proof.implies": [[91, 2, 1, "", "EqualityProof"], [91, 2, 1, "", "EqualitySummary"], [91, 2, 1, "", "ImpliesProof"], [91, 2, 1, "", "ImpliesProofResult"], [91, 2, 1, "", "ImpliesProofStep"], [91, 2, 1, "", "ImpliesProver"], [91, 2, 1, "", "RefutationProof"], [91, 2, 1, "", "RefutationSummary"]], "pyk.proof.implies.EqualityProof": [[91, 4, 1, "", "constraint"], [91, 4, 1, "", "constraints"], [91, 4, 1, "", "dict"], [91, 4, 1, "", "equality"], [91, 3, 1, "", "from_claim"], [91, 3, 1, "", "from_dict"], [91, 4, 1, "", "lhs_body"], [91, 3, 1, "", "pretty"], [91, 3, 1, "", "read_proof_data"], [91, 4, 1, "", "rhs_body"], [91, 4, 1, "", "simplified_constraints"], [91, 4, 1, "", "simplified_equality"], [91, 4, 1, "", "sort"], [91, 4, 1, "", "summary"]], "pyk.proof.implies.EqualitySummary": [[91, 5, 1, "", "admitted"], [91, 5, 1, "", "id"], [91, 4, 1, "", "lines"], [91, 5, 1, "", "status"]], "pyk.proof.implies.ImpliesProof": [[91, 5, 1, "", "antecedent"], [91, 5, 1, "", "bind_universally"], [91, 4, 1, "", "can_progress"], [91, 3, 1, "", "commit"], [91, 5, 1, "", "consequent"], [91, 5, 1, "", "csubst"], [91, 4, 1, "", "dict"], [91, 3, 1, "", "from_dict"], [91, 3, 1, "", "get_steps"], [91, 4, 1, "", "own_status"], [91, 5, 1, "", "simplified_antecedent"], [91, 5, 1, "", "simplified_consequent"], [91, 3, 1, "", "write_proof_data"]], "pyk.proof.implies.ImpliesProofResult": [[91, 5, 1, "", "csubst"], [91, 5, 1, "", "simplified_antecedent"], [91, 5, 1, "", "simplified_consequent"]], "pyk.proof.implies.ImpliesProofStep": [[91, 5, 1, "", "proof"]], "pyk.proof.implies.ImpliesProver": [[91, 5, 1, "", "assume_defined"], [91, 3, 1, "", "close"], [91, 3, 1, "", "failure_info"], [91, 3, 1, "", "init_proof"], [91, 5, 1, "", "kcfg_explore"], [91, 5, 1, "", "proof"], [91, 3, 1, "", "step_proof"]], "pyk.proof.implies.RefutationProof": [[91, 4, 1, "", "dict"], [91, 3, 1, "", "from_dict"], [91, 4, 1, "", "last_constraint"], [91, 4, 1, "", "pre_constraints"], [91, 3, 1, "", "pretty"], [91, 3, 1, "", "read_proof_data"], [91, 4, 1, "", "simplified_constraints"], [91, 4, 1, "", "summary"], [91, 3, 1, "", "to_claim"]], "pyk.proof.implies.RefutationSummary": [[91, 5, 1, "", "id"], [91, 4, 1, "", "lines"], [91, 5, 1, "", "status"]], "pyk.proof.proof": [[92, 2, 1, "", "CompositeSummary"], [92, 2, 1, "", "FailureInfo"], [92, 2, 1, "", "Proof"], [92, 2, 1, "", "ProofStatus"], [92, 2, 1, "", "ProofSummary"], [92, 2, 1, "", "Prover"], [92, 1, 1, "", "parallel_advance_proof"]], "pyk.proof.proof.CompositeSummary": [[92, 4, 1, "", "lines"], [92, 5, 1, "", "summaries"]], "pyk.proof.proof.Proof": [[92, 3, 1, "", "add_subproof"], [92, 3, 1, "", "admit"], [92, 5, 1, "", "admitted"], [92, 4, 1, "", "can_progress"], [92, 3, 1, "", "commit"], [92, 4, 1, "", "dict"], [92, 4, 1, "", "digest"], [92, 4, 1, "", "failed"], [92, 5, 1, "", "failure_info"], [92, 3, 1, "", "fetch_subproof"], [92, 3, 1, "", "fetch_subproof_data"], [92, 3, 1, "", "from_dict"], [92, 3, 1, "", "get_steps"], [92, 5, 1, "", "id"], [92, 4, 1, "", "json"], [92, 4, 1, "", "one_line_summary"], [92, 4, 1, "", "own_status"], [92, 4, 1, "", "passed"], [92, 3, 1, "", "proof_data_exists"], [92, 5, 1, "", "proof_dir"], [92, 3, 1, "", "proof_exists"], [92, 4, 1, "", "proof_subdir"], [92, 3, 1, "", "read_proof"], [92, 3, 1, "", "read_proof_data"], [92, 3, 1, "", "read_subproof"], [92, 3, 1, "", "read_subproof_data"], [92, 3, 1, "", "remove_subproof"], [92, 4, 1, "", "status"], [92, 4, 1, "", "subproof_ids"], [92, 4, 1, "", "subproofs"], [92, 4, 1, "", "subproofs_status"], [92, 4, 1, "", "summary"], [92, 4, 1, "", "up_to_date"], [92, 3, 1, "", "write_proof"], [92, 3, 1, "", "write_proof_data"]], "pyk.proof.proof.ProofStatus": [[92, 5, 1, "", "FAILED"], [92, 5, 1, "", "PASSED"], [92, 5, 1, "", "PENDING"]], "pyk.proof.proof.ProofSummary": [[92, 5, 1, "", "id"], [92, 4, 1, "", "lines"], [92, 5, 1, "", "status"]], "pyk.proof.proof.Prover": [[92, 3, 1, "", "advance_proof"], [92, 3, 1, "", "close"], [92, 3, 1, "", "failure_info"], [92, 3, 1, "", "init_proof"], [92, 3, 1, "", "step_proof"]], "pyk.proof.reachability": [[93, 2, 1, "", "APRFailureInfo"], [93, 2, 1, "", "APRProof"], [93, 2, 1, "", "APRProofBoundedResult"], [93, 2, 1, "", "APRProofExtendAndCacheResult"], [93, 2, 1, "", "APRProofExtendResult"], [93, 2, 1, "", "APRProofResult"], [93, 2, 1, "", "APRProofStep"], [93, 2, 1, "", "APRProofSubsumeResult"], [93, 2, 1, "", "APRProofTerminalResult"], [93, 2, 1, "", "APRProofUseCacheResult"], [93, 2, 1, "", "APRProver"], [93, 2, 1, "", "APRSummary"]], "pyk.proof.reachability.APRFailureInfo": [[93, 5, 1, "", "failing_nodes"], [93, 5, 1, "", "failure_reasons"], [93, 3, 1, "", "from_proof"], [93, 5, 1, "", "models"], [93, 5, 1, "", "path_conditions"], [93, 5, 1, "", "pending_nodes"], [93, 3, 1, "", "print"]], "pyk.proof.reachability.APRProof": [[93, 3, 1, "", "add_bounded"], [93, 3, 1, "", "add_exec_time"], [93, 3, 1, "", "as_rule"], [93, 3, 1, "", "as_rules"], [93, 5, 1, "", "bmc_depth"], [93, 4, 1, "", "bounded"], [93, 4, 1, "", "can_progress"], [93, 4, 1, "", "circularities_module_name"], [93, 5, 1, "", "circularity"], [93, 3, 1, "", "commit"], [93, 3, 1, "", "construct_node_refutation"], [93, 4, 1, "", "dependencies_module_name"], [93, 4, 1, "", "dict"], [93, 5, 1, "", "error_info"], [93, 4, 1, "", "exec_time"], [93, 4, 1, "", "failing"], [93, 3, 1, "", "formatted_exec_time"], [93, 3, 1, "", "from_claim"], [93, 3, 1, "", "from_dict"], [93, 3, 1, "", "from_spec_modules"], [93, 3, 1, "", "get_refutation_id"], [93, 3, 1, "", "get_steps"], [93, 5, 1, "", "init"], [93, 3, 1, "", "is_bounded"], [93, 3, 1, "", "is_failing"], [93, 3, 1, "", "is_init"], [93, 3, 1, "", "is_pending"], [93, 3, 1, "", "is_refuted"], [93, 3, 1, "", "is_target"], [93, 5, 1, "", "logs"], [93, 4, 1, "", "module_name"], [93, 5, 1, "", "node_refutations"], [93, 3, 1, "", "nonzero_depth"], [93, 4, 1, "", "one_line_summary"], [93, 4, 1, "", "own_status"], [93, 3, 1, "", "path_constraints"], [93, 4, 1, "", "pending"], [93, 5, 1, "", "prior_loops_cache"], [93, 3, 1, "", "prune"], [93, 3, 1, "", "read_proof"], [93, 3, 1, "", "read_proof_data"], [93, 3, 1, "", "refute_node"], [93, 4, 1, "", "rule_id"], [93, 3, 1, "", "set_exec_time"], [93, 3, 1, "", "shortest_path_to"], [93, 4, 1, "", "summary"], [93, 5, 1, "", "target"], [93, 3, 1, "", "unrefute_node"], [93, 3, 1, "", "write_proof_data"]], "pyk.proof.reachability.APRProofExtendAndCacheResult": [[93, 5, 1, "", "extension_to_cache"]], "pyk.proof.reachability.APRProofExtendResult": [[93, 5, 1, "", "extension_to_apply"]], "pyk.proof.reachability.APRProofResult": [[93, 5, 1, "", "node_id"], [93, 5, 1, "", "optimize_kcfg"], [93, 5, 1, "", "prior_loops_cache_update"]], "pyk.proof.reachability.APRProofStep": [[93, 5, 1, "", "bmc_depth"], [93, 5, 1, "", "circularity"], [93, 5, 1, "", "circularity_rule_id"], [93, 5, 1, "", "module_name"], [93, 5, 1, "", "node"], [93, 5, 1, "", "nonzero_depth"], [93, 5, 1, "", "prior_loops_cache"], [93, 5, 1, "", "proof_id"], [93, 5, 1, "", "shortest_path_to_node"], [93, 5, 1, "", "target"], [93, 5, 1, "", "use_cache"]], "pyk.proof.reachability.APRProofSubsumeResult": [[93, 5, 1, "", "csubst"]], "pyk.proof.reachability.APRProofUseCacheResult": [[93, 5, 1, "", "cached_node_id"]], "pyk.proof.reachability.APRProver": [[93, 5, 1, "", "assume_defined"], [93, 3, 1, "", "close"], [93, 5, 1, "", "counterexample_info"], [93, 5, 1, "", "cut_point_rules"], [93, 5, 1, "", "direct_subproof_rules"], [93, 5, 1, "", "execute_depth"], [93, 5, 1, "", "extra_module"], [93, 3, 1, "", "failure_info"], [93, 5, 1, "", "fast_check_subsumption"], [93, 3, 1, "", "init_proof"], [93, 5, 1, "", "kcfg_explore"], [93, 5, 1, "", "main_module_name"], [93, 5, 1, "", "optimize_kcfg"], [93, 3, 1, "", "step_proof"], [93, 5, 1, "", "terminal_rules"]], "pyk.proof.reachability.APRSummary": [[93, 5, 1, "", "admitted"], [93, 5, 1, "", "bmc_depth"], [93, 5, 1, "", "bounded"], [93, 5, 1, "", "failing"], [93, 5, 1, "", "formatted_exec_time"], [93, 5, 1, "", "id"], [93, 4, 1, "", "lines"], [93, 5, 1, "", "nodes"], [93, 5, 1, "", "pending"], [93, 5, 1, "", "refuted"], [93, 5, 1, "", "status"], [93, 5, 1, "", "stuck"], [93, 5, 1, "", "subproofs"], [93, 5, 1, "", "terminal"], [93, 5, 1, "", "vacuous"]], "pyk.proof.show": [[94, 2, 1, "", "APRProofNodePrinter"], [94, 2, 1, "", "APRProofShow"]], "pyk.proof.show.APRProofNodePrinter": [[94, 3, 1, "", "node_attrs"], [94, 5, 1, "", "proof"]], "pyk.proof.show.APRProofShow": [[94, 3, 1, "", "dot"], [94, 3, 1, "", "dump"], [94, 5, 1, "", "kcfg_show"], [94, 3, 1, "", "pretty"], [94, 3, 1, "", "pretty_segments"], [94, 3, 1, "", "show"]], "pyk.proof.tui": [[95, 2, 1, "", "APRProofBehaviorView"], [95, 2, 1, "", "APRProofViewer"]], "pyk.proof.tui.APRProofBehaviorView": [[95, 5, 1, "", "can_focus"], [95, 5, 1, "", "can_focus_children"], [95, 3, 1, "", "compose"]], "pyk.proof.tui.APRProofViewer": [[95, 3, 1, "", "compose"], [95, 3, 1, "", "on_mount"]], "pyk.testing": [[97, 0, 0, "-", "plugin"]], "pyk.testing.plugin": [[97, 1, 1, "", "bug_report"], [97, 1, 1, "", "kompile"], [97, 1, 1, "", "profile"], [97, 1, 1, "", "pytest_addoption"], [97, 1, 1, "", "use_server"]], "pyk.utils": [[98, 2, 1, "", "BugReport"], [98, 2, 1, "", "Chainable"], [98, 2, 1, "", "FrozenDict"], [98, 2, 1, "", "POSet"], [98, 1, 1, "", "abs_or_rel_to"], [98, 1, 1, "", "add_indent"], [98, 1, 1, "", "case"], [98, 1, 1, "", "check_absolute_path"], [98, 1, 1, "", "check_dir_path"], [98, 1, 1, "", "check_file_path"], [98, 1, 1, "", "check_relative_path"], [98, 1, 1, "", "check_type"], [98, 1, 1, "", "compare_short_hashes"], [98, 1, 1, "", "deconstruct_short_hash"], [98, 1, 1, "", "ensure_dir_path"], [98, 1, 1, "", "exit_with_process_error"], [98, 1, 1, "", "filter_none"], [98, 1, 1, "", "find_common_items"], [98, 1, 1, "", "gen_file_timestamp"], [98, 1, 1, "", "hash_file"], [98, 1, 1, "", "hash_str"], [98, 1, 1, "", "intersperse"], [98, 1, 1, "", "is_hash"], [98, 1, 1, "", "is_hexstring"], [98, 1, 1, "", "maybe"], [98, 1, 1, "", "merge_with"], [98, 1, 1, "", "none"], [98, 1, 1, "", "nonempty_str"], [98, 1, 1, "", "not_none"], [98, 1, 1, "", "partition"], [98, 1, 1, "", "raised"], [98, 1, 1, "", "repeat_last"], [98, 1, 1, "", "run_process"], [98, 1, 1, "", "run_process_2"], [98, 1, 1, "", "shorten_hash"], [98, 1, 1, "", "shorten_hashes"], [98, 1, 1, "", "single"], [98, 1, 1, "", "some"], [98, 1, 1, "", "tuple_of"], [98, 1, 1, "", "unique"]], "pyk.utils.BugReport": [[98, 3, 1, "", "add_command"], [98, 3, 1, "", "add_file"], [98, 3, 1, "", "add_file_contents"], [98, 3, 1, "", "add_request"]], "pyk.utils.POSet": [[98, 5, 1, "", "image"]]}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "function", "Python function"], "2": ["py", "class", "Python class"], "3": ["py", "method", "Python method"], "4": ["py", "property", "Python property"], "5": ["py", "attribute", "Python attribute"], "6": ["py", "exception", "Python exception"]}, "objtypes": {"0": "py:module", "1": "py:function", "2": "py:class", "3": "py:method", "4": "py:property", "5": "py:attribute", "6": "py:exception"}, "terms": {"": [4, 6, 8, 11, 14, 16, 32, 37, 40, 65, 68, 92, 93, 95], "0": [5, 10, 13, 15, 17, 28, 37, 57, 63, 74, 93], "1": [5, 13, 15, 17, 28, 31, 52, 57, 63, 92], "10": [13, 17, 28, 57, 93], "11": [13, 17, 57], "12": [13, 17, 57], "128": 98, "13": [13, 17, 57], "14": [13, 17, 57], "15": [17, 57, 93], "16": [17, 57], "17": [17, 57], "18": [17, 57], "19": [17, 57], "2": [5, 13, 17, 21, 31, 57, 63, 93], "20": [17, 32, 57, 93], "2019": 93, "21": [17, 57], "22": [17, 57], "23": [17, 57], "23638": 93, "24": [17, 57], "25": [17, 57], "26": [17, 57], "27": [17, 57], "28": [17, 57], "29": [17, 57], "3": [5, 13, 17, 57], "30": [17, 57], "31": [17, 57], "32": [17, 57], "33": [17, 57], "34": [17, 57], "35": [17, 57], "36": [17, 57], "37": [17, 57], "38": [17, 57], "39": [17, 57], "4": [5, 13, 17, 57], "40": [17, 57], "41": [17, 57], "42": [17, 57], "43": [17, 57], "44": [17, 57], "5": [13, 17, 57, 93], "5000": 74, "6": [13, 17, 57, 98], "7": [13, 17, 57], "8": [9, 13, 17, 57], "9": [13, 17, 28, 57], "9a": 28, "A": [4, 11, 14, 17, 33, 37, 47, 72, 74, 93], "And": [1, 7, 15, 33, 55, 60, 65], "For": 92, "If": [17, 33, 47, 72, 73, 74, 78, 92], "In": [1, 55, 60, 65], "It": [47, 68, 74], "Not": [1, 7, 15, 55, 60, 65], "Or": [1, 7, 15, 55, 60, 65], "The": [4, 11, 14, 16, 17, 33, 35, 47, 74, 78, 80, 86, 92], "These": 92, "Will": 74, "_": [11, 86, 98], "__call__": [3, 4, 7, 11], "__format__": [55, 63], "__getitem__": [7, 11], "__init__": [3, 4, 7, 11, 46, 47], "__iter__": [3, 4, 7, 11, 46, 47], "__len__": [7, 11], "__lt__": [7, 11], "__mul__": [7, 11], "__next__": [46, 47], "__repr__": [46, 47], "__str__": 63, "_annotated_llvm_ev": [46, 47], "_argument": [46, 47], "_configuration_": 4, "_cterm": 31, "_divint_": 86, "_event_typ": [46, 47], "_exec_tim": 93, "_function_ev": [46, 47], "_hook_ev": [46, 47], "_id": 32, "_io": 9, "_kfuzznullhandl": 74, "_kore_head": [46, 47], "_modint_": 86, "_path": 32, "_pattern_matching_failure_ev": [46, 47], "_rewrite_trac": [46, 47], "_rewrite_trace_iter": [46, 47], "_rule_ev": [46, 47], "_side_condition_end_ev": [46, 47], "_side_condition_ev": [46, 47], "_summari": 92, "_target": 32, "_xorint_": 86, "a_1": 33, "a_2": 33, "a_i": 33, "a_merg": 33, "a_n": 33, "a_x": 33, "a_z": 33, "abc": [8, 12, 15, 19, 27, 32, 34, 40, 47, 63, 64, 65, 70, 74, 92], "abl": 4, "abort": [55, 63], "abortedresult": [1, 55, 63], "about": [4, 11, 68, 92], "abov": 93, "abs_or_rel_to": [0, 1, 98], "absint": [1, 81, 86], "abstract": [1, 4, 8, 11, 12, 14, 15, 16, 27, 29, 32, 34, 40, 47, 63, 64, 65, 70, 74, 92], "abstract_label": 14, "abstract_nod": [29, 34], "abstract_term_saf": [1, 7, 14], "access": 4, "achiev": 4, "across": 33, "action": 37, "action_keystrok": [29, 37], "actual": [2, 16, 59, 63], "ad": [4, 16], "add": [4, 47], "add_alia": [29, 32], "add_attr": [29, 32], "add_bound": [90, 93], "add_bracket": [1, 7, 10], "add_cell_map_item": [7, 16], "add_command": [1, 98], "add_constraint": [3, 4], "add_exec_tim": [90, 93], "add_fil": [1, 98], "add_file_cont": [1, 98], "add_ind": [0, 1, 98], "add_inject": [55, 56], "add_ksequence_under_k_product": [7, 16], "add_modul": [55, 63, 92], "add_nod": [29, 32], "add_request": [1, 98], "add_sort_param": [7, 16], "add_stuck": [29, 32], "add_subproof": [90, 92], "add_successor": [29, 32], "add_termin": [29, 30], "add_vacu": [29, 32], "addint": [1, 81, 86], "addit": 4, "adit": 47, "admit": [90, 91, 92, 93], "advanc": 92, "advance_proof": [90, 92], "af": 93, "after": [47, 74, 92], "against": 16, "ahead": [24, 28], "algorithm": 16, "alia": [1, 5, 7, 8, 11, 13, 15, 16, 17, 19, 32, 55, 57, 63, 65], "alias": [29, 32], "alias_decl": [55, 60], "alias_rec": [7, 8], "alias_rul": [7, 16], "aliasdecl": [1, 55, 60, 65], "alice_blu": [7, 9], "aliceblu": 9, "all": [4, 11, 16, 33, 37, 47, 72, 92, 93], "all_fil": [24, 27], "all_modul": [7, 16], "all_module_nam": [7, 16], "all_modules_dict": [7, 16], "allow": [16, 33, 74, 93], "allow_unicod": 6, "allrul": 2, "also": [47, 74], "always_upd": 37, "amount": 14, "an": [4, 9, 11, 13, 14, 16, 17, 19, 33, 35, 47, 57, 63, 66, 68, 70, 72, 74, 75, 76, 77, 78, 92, 93], "analysi": 4, "and_bool": [1, 55, 62], "andbool": [1, 81, 85], "andd": [55, 60], "andint": [1, 81, 86], "ani": [4, 8, 11, 12, 16, 27, 30, 32, 33, 40, 41, 47, 52, 56, 62, 63, 65, 70, 74, 75, 91, 92, 93, 98], "annot": 47, "annotated_llvm_ev": 47, "anoth": [2, 16, 33], "ansi_cod": [7, 9], "anteced": [5, 31, 63, 85, 87, 90, 91], "anti_left": [55, 64], "anti_unifi": [1, 3, 4], "antique_whit": [7, 9], "antiquewhit": 9, "anytyp": [1, 7, 8], "anywher": [7, 8], "api": [1, 39], "app": [1, 37, 55, 59, 60, 62, 64, 65], "appli": [2, 3, 4, 7, 11, 14, 68, 92, 93], "applic": [11, 35, 68], "apply_existential_substitut": [1, 7, 14], "apply_top": [7, 11], "approach": 16, "appropri": 4, "apprul": [1, 55, 64], "aprfailureinfo": [1, 90, 93], "apricot": [7, 9], "aprproof": [1, 90, 93, 94, 95], "aprproofbehaviorview": [1, 90, 95], "aprproofboundedresult": [1, 90, 93], "aprproofextendandcacheresult": [1, 90, 93], "aprproofextendresult": [1, 90, 93], "aprproofnodeprint": [1, 90, 94], "aprproofresult": [1, 90, 93], "aprproofshow": [1, 90, 94], "aprproofstep": [1, 90, 93], "aprproofsubsumeresult": [1, 90, 93], "aprproofterminalresult": [1, 90, 93], "aprproofusecacheresult": [1, 90, 93], "aprproofview": [1, 90, 95], "aprprov": [1, 90, 92, 93], "aprsummari": [1, 90, 93], "aqua": [7, 9], "aquamarin": [7, 9], "ar": [4, 11, 14, 16, 33, 47, 68, 70, 72, 73, 74, 92], "arcnam": 98, "arg": [1, 7, 11, 19, 24, 27, 40, 46, 47, 55, 59, 61, 62, 63, 65, 70, 74, 77, 98], "arg_sort": [55, 64, 87], "argument": [11, 14, 16, 47, 70, 74], "argument_sort": [7, 16], "ariti": [7, 11], "around": 47, "as_rul": [76, 90, 93], "as_subsort": [7, 16], "assertionerror": [33, 74], "assign": [4, 11, 14, 16], "assoc": [1, 7, 8, 16, 19, 55, 65], "assoc_join": 21, "assoc_with_unit": [1, 7, 21], "associ": [11, 16, 47, 92], "assume_defin": [3, 5, 31, 63, 90, 91, 93], "assume_state_defin": 63, "ast": [1, 7, 11, 14, 16, 19, 42, 47], "atom": [1, 7, 15], "att": [1, 7, 16, 19, 32], "attach": 4, "attempt": [11, 14, 73], "attentri": [1, 7, 8], "attkei": [1, 7, 8], "attr": [7, 17, 29, 32, 55, 65], "attr_cont": [7, 17], "attr_kei": [7, 17], "attribut": [8, 16, 37, 47], "attrs_by_kei": [55, 65], "atttyp": [1, 7, 8], "automat": [14, 21], "avail": [37, 92], "avoid": [7, 8, 92], "awai": 14, "axiom": [1, 47, 53, 55, 60, 64, 65], "axiomlik": [1, 55, 65], "azur": [7, 9], "b": [6, 33, 52, 82, 85], "b_i": 33, "b_id": 33, "b_merg": 33, "b_x": 33, "b_y": 33, "b_z": 33, "back": 14, "backend": [68, 71, 75, 76, 78], "bare": 4, "base": [4, 5, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 26, 27, 28, 30, 31, 32, 33, 34, 35, 36, 37, 40, 47, 52, 56, 57, 60, 61, 63, 64, 65, 66, 68, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 86, 91, 92, 93, 94, 95, 98], "base_nam": 14, "baseexcept": 98, "baserepl": [1, 69, 70], "basic": 4, "becaus": 16, "becom": [33, 92], "been": [14, 17, 32, 63], "befor": [47, 74], "begin": 92, "behavior": 74, "behaviorview": [1, 29, 37], "beig": [7, 9], "being": [4, 16, 74, 92], "best": [16, 21], "between": [4, 14, 92], "bin": 44, "binari": [47, 55, 66, 71, 76, 77, 78, 80], "binaryconn": [1, 55, 65], "binarypr": [1, 55, 65], "bind": [11, 29, 37], "bind_univers": [5, 90, 91], "bindingtyp": 37, "bisqu": [7, 9], "bittersweet": [7, 9], "black": [7, 9], "blanched_almond": [7, 9], "blanchedalmond": 9, "block": [7, 19, 52], "blue": [7, 9], "blue_green": [7, 9], "blue_violet": [7, 9], "bluegreen": 9, "blueviolet": 9, "bmc_depth": [90, 93], "bodi": [7, 16, 87], "bool": [4, 5, 6, 10, 11, 14, 15, 16, 19, 20, 21, 23, 26, 28, 30, 31, 32, 33, 34, 35, 37, 40, 44, 47, 52, 56, 59, 60, 62, 63, 64, 65, 66, 68, 70, 72, 73, 74, 75, 76, 77, 78, 85, 87, 89, 91, 92, 93, 94, 95, 98], "bool_dv": [1, 55, 62], "bool_to_ml_pr": [1, 7, 14], "boolean": [4, 11, 47], "booltoken": [1, 81, 85], "booster": [55, 63, 71, 75], "boosterserv": [1, 55, 63], "boosterserverarg": [1, 55, 63], "both": [4, 11], "bottom": [1, 3, 4, 11, 55, 60, 65], "bottom_up": [1, 7, 11, 55, 65], "bottom_up_with_summari": [1, 7, 11], "bound": [4, 14, 16, 63, 90, 93], "bound_var": 58, "bracket": [7, 8, 10, 16], "bracket_label": [7, 8], "bracketlabel": 8, "branch": [1, 29, 32, 33, 55, 63], "branchingresult": [1, 55, 63], "brick_r": [7, 9], "brickr": 9, "brown": [7, 9], "bubbl": [7, 17, 19, 29, 37], "bug_report": [1, 5, 55, 63, 76, 77, 78, 96, 97, 98], "bug_report_id": 63, "bugreport": [0, 1, 5, 63, 76, 77, 78, 97, 98], "build": [11, 21, 39, 40, 68], "build_assoc": [1, 7, 11], "build_claim": [1, 7, 14], "build_con": [1, 7, 11], "build_rul": [1, 7, 14], "build_rule_dict": [1, 67, 68], "build_symbol_t": [1, 7, 21], "built": 70, "builtin": 80, "builtin_dir": [71, 80], "bulk": 68, "burly_wood": [7, 9], "burlywood": 9, "burnt_orang": [7, 9], "burntorang": 9, "byte": [1, 6, 47, 52, 59, 62, 78, 81, 89], "bytes_decod": [0, 1, 6], "bytes_dv": [1, 55, 62], "bytes_encod": [0, 1, 6], "bytestoken": [1, 81, 82], "bytestoken_from_str": [1, 81, 82], "c": [33, 34, 37], "c1": 34, "c2": 34, "c_1": 33, "c_n": 33, "cach": [73, 93], "cached_node_id": [90, 93], "cadet_blu": [7, 9], "cadetblu": 9, "call": [16, 37, 74], "callabl": [8, 11, 14, 16, 21, 37, 59, 61, 65, 74, 76, 77, 78, 79, 92, 95, 98], "callback": [11, 92], "calledprocesserror": [78, 98], "can": [4, 11, 14, 35, 74, 92], "can_focu": [29, 37, 90, 95], "can_focus_children": [29, 37, 90, 95], "can_make_custom_step": [29, 34], "can_progress": [90, 91, 92, 93], "cannot": 72, "carnation_pink": [7, 9], "carnationpink": 9, "case": [0, 1, 11, 59, 74, 98], "case_symbol": [1, 55, 59], "cat_builtin": [69, 70], "cat_debug": [69, 70], "ccopt": 44, "ceil": [1, 55, 60, 64, 65], "ceilrul": [1, 55, 64], "cell": [3, 4, 7, 8, 11, 14, 16, 59, 62], "cell_collect": [7, 8], "cell_collection_product": [7, 16], "cell_frag": [7, 8], "cell_label_to_var_nam": [1, 7, 14], "cell_nam": [7, 8], "cell_opt_abs": [7, 8], "cell_valu": 14, "cell_vari": 14, "cellcollect": 8, "cellfrag": 8, "cellnam": 8, "celloptabs": 8, "central": 33, "cerulean": [7, 9], "cfg": [31, 35], "cfg_dir": 32, "cfgid": 35, "chainabl": [0, 1, 98], "chang": [37, 92], "charact": 17, "chartreus": [7, 9], "check": [4, 11, 47, 74, 75, 78, 92, 93, 98], "check_absolute_path": [0, 1, 98], "check_dir_path": [0, 1, 98], "check_exit_cod": 74, "check_extend": [29, 31], "check_file_path": [0, 1, 98], "check_func": 74, "check_relative_path": [0, 1, 98], "check_result": [46, 47], "check_typ": [0, 1, 98], "children": [11, 37, 95], "chocol": [7, 9], "chunk": [16, 35], "chunk_id": [29, 37], "chunk_num_block": 98, "circular": [7, 8, 16, 90, 92, 93], "circularities_module_nam": [90, 93], "circularity_rule_id": [90, 93], "claim": [1, 4, 7, 14, 16, 19, 32, 55, 60, 65, 71, 72, 73, 77, 91, 93], "claim_id": [4, 14, 77, 91], "claim_index": [1, 71], "claim_label": [73, 77], "claim_load": [1, 71], "claimindex": [1, 71, 72, 77], "claimload": [1, 71, 73], "class": [4, 5, 8, 9, 10, 11, 12, 13, 15, 16, 17, 18, 19, 20, 21, 26, 27, 28, 30, 31, 32, 33, 34, 35, 36, 37, 40, 47, 52, 56, 57, 60, 61, 63, 64, 65, 66, 68, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 91, 92, 93, 94, 95, 98], "classmethod": [8, 63, 65, 91, 92, 93], "classvar": [28, 37, 63], "claus": [4, 14, 16, 47], "clear": 74, "click": 37, "close": [55, 61, 63, 72, 90, 91, 92, 93], "cmap": 78, "cmd": 70, "code": [7, 15, 55, 63, 74, 78], "code_block": [1, 7, 15], "codeblock": [1, 7, 15], "coincid": 65, "coinduct": 16, "col": [7, 17], "collapse_dot": [1, 7, 14], "collect": [1, 7, 11, 14, 16, 32, 58, 81], "colon": [7, 13, 17, 55, 57], "color": [1, 7, 8, 66], "colorstyp": [1, 7, 8], "colortyp": [1, 7, 8], "column": 17, "come": 11, "comm": [7, 8], "comma": [7, 13, 17, 55, 57], "command": [55, 63, 70, 71, 75, 76, 77, 78], "comment": 98, "commit": [90, 91, 92, 93], "compare_short_hash": [0, 1, 98], "comparison": 11, "compil": [1, 28, 42], "compile_kllvm": [1, 42, 44], "compile_runtim": [1, 42, 44], "complet": [16, 92], "completedprocess": [78, 98], "compon": [4, 16], "compos": [7, 11, 29, 37, 90, 95], "composeresult": [37, 95], "compositepattern": 52, "compositesummari": [1, 90, 92, 93], "comput": [11, 16], "compute_ordin": [55, 65], "con": 11, "concat": [7, 8], "concaten": 61, "concret": [7, 8, 74], "cond_1": 33, "cond_b": 33, "cond_i": 33, "cond_n": 33, "condit": [3, 4, 5, 14, 16, 47, 92], "config": [1, 3, 4, 7, 14, 19, 24, 35, 62], "configur": [4, 14, 16, 33, 47], "configurarion": 47, "conflict": [11, 16], "conjunct": [1, 55, 58, 87], "consequ": [5, 31, 63, 85, 87, 90, 91], "consid": [4, 16, 72], "constrain": 4, "constrained_term": 14, "constraint": [1, 3, 4, 14, 29, 32, 37, 90, 91], "constraint_on": 37, "construct": [4, 11, 47, 65], "construct_node_refut": [90, 93], "constructor": [7, 8, 14, 16, 74], "contain": [4, 8, 14, 15, 16, 32, 33, 37, 47, 68, 74], "contains_cov": [29, 32], "contains_edg": [29, 32], "contains_merged_edg": [29, 32], "contains_ndbranch": [29, 32], "contains_nod": [29, 32], "contains_split": [29, 32], "content": [2, 4, 7, 14, 16], "context": [1, 7, 16, 17, 19, 39, 40, 55, 63], "contextmanag": [61, 63, 79, 92], "convent": [4, 14], "convert": [1, 42], "copi": 11, "coral": [7, 9], "cornflower_blu": [7, 9], "cornflowerblu": 9, "cornsilk": [7, 9], "correspond": [35, 47], "could": 16, "count": 11, "count_lines_cov": [0, 1, 38], "count_lines_fil": [0, 1, 38], "count_lines_glob": [0, 1, 38], "count_rules_cov": [0, 1, 38], "count_var": [1, 7, 14], "counter": 14, "counterexample_info": [90, 93], "cover": [29, 32], "cover_map": 38, "cover_map_fil": 38, "coverag": [0, 1], "craft": 32, "creat": [32, 37, 46, 47, 71, 80, 92], "create_cov": [29, 32], "create_cover_map": [0, 1, 38], "create_edg": [29, 32], "create_merged_edg": [29, 32], "create_ndbranch": [29, 32], "create_nod": [29, 32], "create_prov": 92, "create_rule_map": [0, 1, 38], "create_rule_map_by_fil": [0, 1, 38], "create_rule_map_by_lin": [0, 1, 38], "create_serv": 61, "create_split": [29, 32], "create_split_by_nod": [29, 32], "create_temp": [24, 26], "created_nod": 32, "crimson": [7, 9], "css": 37, "css_path": [29, 37], "csspathtyp": 37, "csubst": [1, 3, 4, 5, 29, 32, 90, 91, 93], "csubst1": 4, "csubst2": 4, "cterm": [0, 1, 29, 31, 32, 34, 77], "cterm_build_claim": [1, 3, 4], "cterm_build_rul": [1, 3, 4], "cterm_symbol": [1, 3, 5, 29, 31], "ctermexecut": [1, 3, 5], "ctermimpli": [1, 3, 5], "cterms_anti_unifi": [1, 3, 4], "ctermsmterror": [1, 3, 5], "ctermsymbol": [1, 3, 5, 31, 34], "ctl": 93, "ctor_pattern": [55, 65], "ctx": [55, 64], "current": 92, "custom": [1, 29, 37, 74], "custom_on": 37, "custom_step": [29, 34], "custom_view": [37, 95], "cut": [29, 32, 33, 63], "cut_point_rul": [5, 31, 55, 63, 90, 93], "cutpointresult": [1, 55, 63], "cwd": [1, 39, 41, 75, 98], "cyan": [7, 9], "d": [8, 11, 16], "d1": 98, "d2": 98, "dandelion": [7, 9], "dark_blu": [7, 9], "dark_cyan": [7, 9], "dark_goldenrod": [7, 9], "dark_grai": [7, 9], "dark_green": [7, 9], "dark_grei": [7, 9], "dark_khaki": [7, 9], "dark_magenta": [7, 9], "dark_olive_green": [7, 9], "dark_orang": [7, 9], "dark_orchid": [7, 9], "dark_r": [7, 9], "dark_salmon": [7, 9], "dark_sea_green": [7, 9], "dark_slate_blu": [7, 9], "dark_slate_grai": [7, 9], "dark_slate_grei": [7, 9], "dark_turquois": [7, 9], "dark_violet": [7, 9], "darkblu": 9, "darkcyan": 9, "darkgoldenrod": 9, "darkgrai": [7, 9], "darkgreen": 9, "darkgrei": 9, "darkkhaki": 9, "darkmagenta": 9, "darkolivegreen": 9, "darkorang": 9, "darkorchid": 9, "darkr": 9, "darksalmon": 9, "darkseagreen": 9, "darkslateblu": 9, "darkslategrai": 9, "darkslategrei": 9, "darkturquois": 9, "darkviolet": 9, "data": [2, 4, 55, 62, 63, 92], "datastructur": 16, "date": 92, "dcoloneq": [7, 17], "dct": [4, 11, 12, 27, 30, 32, 56, 63, 65, 91, 92, 93], "deadlin": 74, "debug": [26, 75], "debug_applied_rewrite_rul": [67, 68], "debug_apply_equ": [67, 68], "debugappliedrewriterul": 68, "debugapplyequ": 68, "debugg": [70, 78], "decl": [7, 19, 45], "declar": [16, 45], "deconstruct_short_hash": [0, 1, 98], "decor": 70, "deep_pink": [7, 9], "deep_sky_blu": [7, 9], "deeppink": 9, "deepskyblu": 9, "default": [7, 8, 13, 17, 37, 59, 74, 98], "default_format": [7, 16], "default_host": 63, "default_port": 63, "default_transport": 63, "defaulterror": [1, 55, 63], "defaultsemant": [1, 29, 34], "defin": [11, 16], "definit": [1, 2, 5, 7, 8, 10, 14, 16, 18, 19, 21, 23, 45, 47, 51, 55, 56, 60, 64, 65, 68, 71, 74, 76, 78], "definition_dir": [5, 24, 26, 38, 44, 56, 63, 66, 69, 70, 71, 74, 76, 77, 78], "definition_fil": 23, "definition_hash": [71, 76], "definition_to_llvm": [1, 42, 45], "defn": [14, 32, 64, 91, 93], "defunc_with": [4, 14, 32], "defunction": [1, 4, 7, 14], "deleted_nod": 32, "delimit": 98, "dep": [39, 40], "depend": [7, 8, 14, 16, 24, 27, 72, 73, 92], "dependencies_module_nam": [90, 93], "depth": [3, 5, 29, 31, 32, 52, 55, 63, 77, 78], "depth_bound": [55, 63], "depthboundresult": [1, 55, 63], "dequot": [0, 1], "dequote_byt": [0, 1, 6], "dequote_str": [0, 1, 6], "descript": 37, "deseri": [4, 11, 42, 47, 52], "design": 33, "desir": [2, 11], "destin": 2, "determin": [16, 74], "di": 16, "dict": [4, 8, 11, 12, 14, 16, 21, 23, 27, 30, 31, 32, 38, 40, 47, 55, 56, 58, 63, 65, 68, 74, 83, 90, 91, 92, 93, 98], "dictionari": [4, 11, 16, 21, 47, 68], "die": 4, "differ": 33, "digest": [7, 8, 90, 92], "digraph": [35, 94], "dim_grai": [7, 9], "dim_grei": [7, 9], "dimgrai": 9, "dimgrei": 9, "direct_rul": 93, "direct_subproof_rul": [90, 93], "directli": [14, 33], "directori": [2, 78, 80], "dirti": [24, 28], "disallow": 33, "discard": [4, 7, 8], "discard_attr": [29, 32], "discard_stuck": [29, 32], "discard_vacu": [29, 32], "discret": 92, "disjunct": 87, "disk": [16, 92], "dispatch": 63, "distribut": 80, "dividend": 86, "divint": [1, 81, 86], "divisior": 86, "divisor": 86, "do": [4, 14, 92], "do_load": [69, 70], "do_show": [69, 70], "do_step": [69, 70], "dodger_blu": [7, 9], "dodgerblu": 9, "doe": 16, "doesn": 74, "doi": 93, "domain": [11, 74], "dot": [14, 29, 35, 90, 94], "dotk": [7, 13], "dotklist": [7, 13], "dotvar": 62, "down": [11, 33, 37], "downward": 11, "drop_sourc": [7, 8], "dst": 2, "dst_all_rul": 2, "dst_definit": 2, "dst_kompiled_dir": 2, "dump": [29, 35, 90, 94], "dump_dir": [35, 94], "duplicatemoduleerror": [1, 55, 63], "dure": [47, 74, 92], "dv": [1, 55, 59, 60, 62, 65], "each": [4, 11, 14, 33, 35, 74, 92, 93], "ebnf": 16, "edg": [29, 32, 33, 35], "edge_lik": [29, 32], "edgelik": [29, 32], "effect": [11, 33], "effort": [16, 21], "either": [4, 11, 16, 33, 47], "elem_var": [55, 60], "element": [7, 8, 11, 16, 37], "emain": 16, "emerald": [7, 9], "empti": [4, 11, 16, 68], "empty_config": [7, 16], "en": [55, 64], "encod": [2, 9, 21], "end": [37, 47], "enquot": [0, 1, 6], "enquote_byt": [0, 1, 6], "enquote_str": [0, 1, 6], "ensur": [4, 7, 14, 16, 47, 70], "ensure_dir_path": [0, 1, 98], "enter": 47, "entir": 16, "entri": [7, 8], "enum": [9, 13, 16, 17, 19, 47, 57, 63, 66, 68, 75, 76, 77, 78, 92], "enumer": [9, 13, 16, 17, 19, 57, 63, 66, 68, 75, 76, 77, 78, 92], "env": 98, "eof": [7, 13, 17, 20, 55, 57, 60], "eq": [7, 17], "eq_bool": [1, 55, 62], "eq_int": [1, 55, 62], "eqint": [1, 81, 86], "equal": [1, 37, 55, 60, 64, 65, 90, 91], "equalityproof": [1, 90, 91], "equalitysummari": [1, 90, 91], "equalsrul": [1, 55, 64], "equival": 33, "er": 63, "err": 98, "error": [55, 63], "error_info": [90, 93], "eucliddivint": [1, 81, 86], "euclidmodint": [1, 81, 86], "eval": [7, 15], "evalu": [8, 16, 42, 47, 52], "evar": [1, 55, 58, 60, 62, 63, 64, 65, 74], "even": [4, 14, 37, 92], "event": [46, 47], "event_typ": 47, "eventtyp": 47, "everi": 11, "ex": 47, "exact": 11, "exampl": [16, 92], "except": [5, 63, 65, 70, 93], "exclud": [72, 73], "exclude_claim_label": [73, 77], "exec_process": 98, "exec_tim": [37, 90, 93], "execut": [3, 4, 5, 47, 55, 63, 74, 78, 92, 93], "execute_depth": [31, 90, 93], "executeresult": [1, 55, 63], "exist": [1, 4, 11, 55, 60, 65, 74], "existing_var_nam": 14, "exit": [7, 8, 47, 74], "exit_with_process_error": [0, 1, 98], "expand_macro": 78, "expect": [4, 16, 59, 70], "expint": [1, 81, 86], "explicit": 74, "explor": [1, 29], "explore_context": 79, "expmodint": [1, 81, 86], "expon": 86, "express": [16, 73], "extend": [29, 32, 93], "extend_cterm": [29, 31], "extend_result": 32, "extens": 93, "extension_to_appli": [90, 93], "extension_to_cach": [90, 93], "extra_modul": [21, 90, 93], "extra_unparsing_modul": [21, 76, 77, 78], "extract": [4, 11, 68], "extract_al": [55, 64], "extract_cel": [1, 7, 14], "extract_lh": [1, 7, 14], "extract_rh": [1, 7, 14], "extract_subst": [1, 7, 14], "f": [8, 11, 14, 16, 28, 65, 98], "f1": 98, "f2": 98, "f3": 98, "f4": 98, "fail": [11, 16, 47, 78, 90, 92, 93], "fail_fast": 92, "failing_cel": [3, 5], "failing_nod": [90, 93], "failur": [4, 16, 47, 74], "failure_info": [90, 91, 92, 93], "failure_reason": [5, 90, 93], "failureinfo": [1, 90, 91, 92, 93], "fallback_on": [5, 55, 63], "fallbackreason": [1, 5, 55, 63], "fals": [4, 5, 14, 19, 21, 26, 30, 31, 32, 33, 35, 37, 44, 63, 65, 72, 74, 75, 76, 78, 87, 91, 92, 93, 94, 98], "fast_check_subsumpt": [90, 93], "faster": 47, "fetch_subproof": [90, 92], "fetch_subproof_data": [90, 92], "field": [5, 13, 15, 17, 37, 57, 63, 92], "file": [2, 9, 16, 37, 47, 68, 73, 98], "file_nam": 28, "file_path": 77, "files_for_path": [1, 39, 41], "fill": 16, "filter": 14, "filter_non": [0, 1, 98], "final": [4, 5, 8, 11, 12, 14, 15, 16, 19, 26, 27, 28, 32, 40, 47, 56, 63, 64, 65, 70, 80, 98], "final_config": 14, "final_constraint": 14, "final_cterm": 4, "final_node_id": 93, "find": [4, 14, 16], "find_common_item": [0, 1, 98], "find_file_upward": [1, 24, 28], "finish": 92, "finput": 98, "fire_brick": [7, 9], "firebrick": 9, "first": 11, "fixturerequest": 97, "flat": 11, "flatten": [4, 11], "flatten_label": [1, 7, 11], "float": [37, 93], "floor": [1, 55, 60, 65], "floral_whit": [7, 9], "floralwhit": 9, "fn": 61, "fo": 11, "focu": [37, 95], "focus": 16, "follow": 33, "for_definit": [55, 56], "foral": [1, 55, 60, 65], "force_kast": 76, "force_reread": 92, "forest_green": [7, 9], "forestgreen": 9, "form": 16, "format": [1, 7, 8, 10, 11, 16, 47, 63], "format_spec": 63, "formatt": [1, 7], "formatted_exec_tim": [90, 93], "formattyp": [1, 7, 8], "formula": 93, "fqn": 40, "frame": 14, "free": [4, 11], "free_occ": [1, 55, 58], "free_var": [1, 3, 4, 7, 14, 29, 32], "fresh": [14, 33], "fresh_gener": [7, 8], "freshgener": 8, "from": [2, 4, 11, 14, 16, 33, 37, 47, 72, 73, 74, 78, 92], "from_axiom": [55, 64], "from_claim": [29, 32, 90, 91, 93], "from_dict": [3, 4, 7, 8, 11, 16, 24, 27, 29, 30, 32, 55, 56, 63, 65, 90, 91, 92, 93], "from_fil": [46, 47], "from_json": [7, 11, 29, 32, 55, 65], "from_kast": [3, 4], "from_module_list": [71, 72], "from_pr": [3, 4, 7, 11], "from_proof": [90, 93], "from_sort": 84, "from_spec_modul": [90, 93], "frontend": [4, 14, 16], "frozendict": [0, 1, 8, 11, 16, 27, 32, 63, 65, 72, 93, 98], "frozenset": [4, 14, 16, 32, 93, 98], "fuchsia": [7, 9], "full_nam": [39, 40], "full_print": [29, 35, 94], "function": [2, 7, 8, 11, 14, 16, 33, 47, 92], "function_ev": 47, "function_label": [7, 16], "function_nam": [46, 47], "functionrul": [1, 55, 64], "further": 33, "futur": 61, "fuzz": [1, 71, 74], "fuzz_with_check": [71, 74], "fuzz_with_exit_cod": [71, 74], "g": [28, 37], "gainsboro": [7, 9], "ge_int": [1, 55, 62], "geint": [1, 81, 86], "gen_file_timestamp": [0, 1, 98], "gen_glr_pars": [1, 71, 76], "gener": [2, 4, 8, 11, 14, 16, 21, 47, 64, 68, 70, 74, 78, 92, 98], "general_edg": [29, 32], "generate_hint": [1, 42, 44], "generated_count": [1, 55, 62], "generated_top": [1, 55, 62], "generatedcount": 14, "generatedtop": 14, "generatedtopcel": [14, 87], "get": [2, 7, 8, 11, 92], "get_axiom_by_ordin": [55, 65], "get_claim": [71, 77], "get_claim_index": [71, 77], "get_kllvm": [1, 42, 50], "get_model": [3, 5, 55, 63], "get_nod": [29, 32], "get_refutation_id": [90, 93], "get_requir": [1, 42, 53], "get_rule_by_id": [0, 1, 2], "get_step": [90, 91, 92, 93], "get_target": [24, 27], "getmodelresult": [1, 55, 63], "ghost_whit": [7, 9], "ghostwhit": 9, "git": [24, 28], "give": [4, 14], "given": [2, 4, 11, 14, 16, 21, 33, 35, 47, 72, 98], "glb": 16, "go": 37, "gold": [7, 9], "goldenrod": [7, 9], "grai": [7, 9], "grammar": 16, "graphchunk": [1, 29, 37], "greatest": 16, "greatest_common_subsort": [7, 16], "green": [7, 9], "green_yellow": [7, 9], "greenyellow": 9, "grei": [7, 9], "group": [7, 8, 19], "gt": [7, 17], "gt_int": [1, 55, 62], "gtint": [1, 81, 86], "guarante": 33, "h": [37, 98], "ha": [2, 17, 63, 93], "halt": 92, "hand": [11, 14], "handl": 74, "handle_failur": [71, 74], "handle_test": [71, 74], "handler": [37, 71, 74], "handler_nam": [29, 37], "has_domain_valu": [7, 8], "hasdomainvalu": 8, "hash": [3, 4, 7, 12], "hash_fil": [0, 1, 98], "hash_str": [0, 1, 98], "haskel": [68, 71, 75], "haskell_arg": 77, "haskell_backend_oneline_log_fil": 68, "haskell_log_entri": [5, 55, 63], "haskell_log_format": [5, 55, 63], "haskell_thread": [55, 63], "haskelllogentri": [1, 67, 68], "have": [14, 16, 32, 33, 74], "head": [4, 11], "header": 47, "header_path": 47, "help": [16, 70], "here": 16, "heurist": [11, 29, 30, 32, 33], "hide": 37, "hide_cel": [29, 35], "hint": [1, 42, 44], "hints_file_nam": 44, "home": 37, "honeydew": [7, 9], "hook": [7, 8, 47, 55, 65], "hook_ev": 47, "hooked_sort_decl": [55, 60], "hooked_symbol_decl": [55, 60], "hopefulli": 16, "horizont": 37, "host": [55, 63], "hot_pink": [7, 9], "hotpink": 9, "how": 16, "http": [55, 63, 93], "httptransport": [1, 55, 63], "hypothesi": 74, "hypothesis_arg": 74, "i": [4, 11, 14, 16, 17, 32, 33, 35, 37, 47, 70, 72, 74, 86, 92], "i1": 86, "i2": 86, "i3": 86, "id": [1, 2, 5, 7, 13, 16, 29, 31, 32, 37, 55, 57, 60, 65, 90, 91, 92, 93, 95], "id_low": [7, 17], "id_upp": [7, 17], "idem": [7, 8], "ident": [11, 14], "identifi": [2, 16, 33, 35], "ie": 74, "if_ktyp": [1, 7, 14], "iff": [1, 55, 60, 65], "ignor": 93, "ignore_warn": 75, "imag": [1, 98], "implement": [74, 93], "impli": [1, 3, 5, 55, 60, 63, 65, 90], "implic": [55, 63], "implication_failure_reason": [29, 31], "implicationerror": [1, 55, 63], "implies_bool": [1, 55, 62], "impliesbool": [1, 81, 85], "impliesproof": [1, 90, 91], "impliesproofresult": [1, 90, 91], "impliesproofstep": [1, 90, 91], "impliesprov": [1, 90, 91], "impliesresult": [1, 55, 63], "import": [1, 7, 16, 18, 19, 32, 42, 55, 60, 65], "import_from_fil": [1, 42, 48], "import_kllvm": [1, 42, 48], "import_runtim": [1, 42, 48], "importt": [7, 18, 55, 60], "impur": [7, 8], "in_modul": 76, "includ": [16, 72, 73], "include_depend": [73, 77], "include_dir": [23, 73, 77], "include_sourc": 23, "indent": [1, 7, 10, 21, 98], "index": [4, 7, 8, 14, 72, 99], "indexed_rewrit": [1, 7, 22], "indian_r": [7, 9], "indianr": 9, "indic": [33, 47, 74], "indigo": [7, 9], "indirectli": 14, "individu": 16, "infer": 73, "infer_sort": [55, 56], "infinit": 93, "info": [1, 7, 15, 29, 32, 37, 74], "inform": [2, 4, 11, 14, 17, 47, 68, 70, 92], "inherit": 16, "init": [37, 90, 93], "init_config": [7, 14, 16], "init_constraint": 14, "init_cterm": 4, "init_generated_top_cel": [1, 55, 62], "init_proof": [90, 91, 92, 93], "init_st": [69, 70], "initi": [4, 7, 8, 14, 16, 17, 37, 47, 92], "initial_config": [46, 47], "initial_var": 14, "initvar": 16, "inj": [1, 55, 59, 62, 81, 84], "inject": [7, 8, 14, 16, 74], "inlin": 14, "inline_cell_map": [1, 7, 14], "inn": [55, 60], "inner": [1, 7], "input": [4, 11, 14, 16, 74, 78, 98], "input_fil": 78, "input_kore_fil": 44, "insert": 16, "inspect": 16, "instanc": [4, 47, 74], "instanti": [4, 11, 16, 70, 80, 86], "instantiate_cell_var": [7, 16], "instead": [11, 16, 74], "int": [4, 5, 8, 10, 11, 12, 14, 16, 17, 19, 21, 28, 30, 31, 32, 36, 38, 41, 47, 52, 59, 61, 62, 63, 64, 65, 68, 70, 74, 77, 78, 86, 89, 92, 93, 98], "int_": 86, "int__": 86, "int_dv": [1, 55, 62], "integ": [74, 86], "interfac": 74, "interim_simplif": [5, 55, 63], "interpret": [1, 4, 69, 70, 74, 78], "interspers": [0, 1, 98], "intro": [69, 70], "introduc": 93, "inttoken": [1, 81, 86], "inttyp": [1, 7, 8], "invalidmoduleerror": [1, 55, 63], "io": [9, 65], "is_anon_var": [1, 7, 14], "is_bottom": [1, 3, 4, 81, 87], "is_bound": [90, 93], "is_cel": [7, 11], "is_circular": [7, 16], "is_cov": [29, 32], "is_explor": [29, 30], "is_fail": [90, 93], "is_hash": [0, 1, 98], "is_hexstr": [0, 1, 98], "is_ident": [7, 11], "is_init": [90, 93], "is_initial_config": [46, 47], "is_kore_pattern": [46, 47], "is_leaf": [29, 32], "is_loop": [29, 34], "is_merg": [29, 33, 34], "is_ndbranch": [29, 32], "is_pend": [90, 93], "is_pre_trac": [46, 47], "is_prefix": [7, 16], "is_record": [7, 16], "is_refut": [90, 93], "is_root": [29, 32], "is_rul": [55, 64], "is_split": [29, 32], "is_spurious_constraint": [1, 7, 14], "is_step_ev": [46, 47], "is_stuck": [29, 32], "is_subsort": [55, 56], "is_target": [90, 93], "is_term_lik": [1, 7, 14], "is_termin": [29, 30, 34], "is_top": [1, 81, 87], "is_trac": [46, 47], "is_trust": [7, 16], "is_vacu": [29, 32], "isn": 16, "isol": 16, "item": [7, 11, 16, 19, 59, 72, 85], "iter": [2, 4, 5, 6, 8, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 26, 27, 28, 30, 31, 32, 33, 35, 37, 38, 40, 41, 44, 47, 48, 56, 57, 62, 63, 65, 72, 73, 75, 76, 77, 78, 83, 85, 87, 91, 92, 93, 94, 95, 98], "its": [4, 14, 47], "itself": 16, "ivori": [7, 9], "join": [11, 33], "json": [1, 2, 21, 55, 62, 65, 66, 71, 73, 76, 77, 78, 90, 92], "json2str": [1, 55, 62], "json_entri": [1, 55, 62], "json_fil": 56, "json_kei": [1, 55, 62], "json_list": [1, 55, 62], "json_object": [1, 55, 62], "json_to_kor": [1, 55, 62], "jsonrpcclient": [1, 55, 63], "jsonrpcclientfacad": [1, 55, 63], "jsonrpcerror": [1, 55, 63], "jungle_green": [7, 9], "junglegreen": 9, "just": [11, 35], "k": [1, 4, 7, 11, 14, 16, 20, 21, 23, 55, 59, 62, 68, 70, 74, 80, 81, 83, 87, 98], "k_": 11, "k_config_var": [1, 55, 62], "k_version": [1, 24, 26, 28], "ka": [1, 7, 11], "kappli": [1, 7, 11, 14, 85, 86, 87, 91], "kassoc": [1, 7, 16], "kast": [0, 1, 3, 4, 5, 55, 66, 71, 76, 77, 78, 86], "kast_simplifi": [3, 5], "kast_term": [1, 7, 12], "kast_to_kor": [3, 5, 71, 76], "kastinput": [1, 71, 76], "kastoutput": [1, 71, 76], "kastpars": [1, 7, 20], "katt": [1, 7, 8, 14, 16, 32], "kbool": [1, 81], "kbubbl": [1, 7, 16], "kbuild": [0, 1], "kbuildenv": [1, 24, 26], "kcfg": [0, 1, 93, 94], "kcfg_explor": [31, 90, 91, 93], "kcfg_json_path": [29, 32], "kcfg_node_dir": [29, 32], "kcfg_node_path": [29, 32], "kcfg_semant": [29, 31], "kcfg_show": [90, 94], "kcfgelem": [37, 95], "kcfgexplor": [1, 29, 30, 31, 79, 91, 93], "kcfgexplorationnodeattr": [1, 29, 30], "kcfgextendresult": [1, 29, 31, 32, 34, 93], "kcfgminim": [1, 29, 33], "kcfgnodeattr": [1, 29, 32], "kcfgsemant": [1, 29, 30, 31, 33, 34], "kcfgshow": [1, 29, 35, 94], "kcfgstore": [1, 29, 32], "kcfgviewer": [1, 29, 37, 95], "kclaim": [1, 4, 7, 14, 16, 32, 72, 73, 77, 91, 93], "kcontext": [1, 7, 16], "kcovr": [0, 1], "kdef": [4, 29, 33], "kdefinit": [1, 2, 4, 5, 7, 10, 11, 14, 16, 21, 23, 32, 33, 68, 76, 91, 93], "kdist": [0, 1], "kdist_dir": [24, 26], "kdistribut": [1, 71, 80], "keep": [4, 14], "keep_cel": 14, "keep_nod": [30, 32, 93], "keep_valu": 4, "keep_var": [4, 14], "kei": [4, 7, 8, 11, 16, 37, 59, 62], "key_displai": 37, "keyboard": 37, "keystrok": 37, "keyword": 74, "kflatmodul": [1, 7, 16, 21, 32, 35, 76, 77, 78, 93], "kflatmodulelist": [1, 7, 16, 72, 77, 93], "kframework": 8, "kfuzz": [1, 71], "kfuzzhandl": [1, 71, 74], "khaki": [7, 9], "ki": [11, 14], "kimport": [1, 7, 16, 32], "kinner": [1, 4, 5, 7, 10, 11, 14, 16, 20, 22, 31, 32, 35, 76, 78, 83, 84, 85, 86, 87, 91, 93], "kint": [1, 81], "kinteg": [1, 71, 74], "kinterpret": [1, 69, 70], "kitem": [7, 11, 20, 62, 87], "klabel": [1, 7, 11, 13, 16, 17, 19, 20, 21], "klist": [7, 20], "kllvm": [0, 1], "knontermin": [1, 7, 16], "know": 35, "kompil": [1, 2, 24, 26, 55, 68, 71, 78, 80, 96, 97], "kompiled_dir": [55, 63], "kompiledkor": [1, 55, 56], "konvert": [0, 1], "kore": [0, 1, 8, 47, 71, 74, 75, 76, 77, 78], "kore_bool": [1, 55, 59], "kore_byt": [1, 55, 59], "kore_cli": 5, "kore_exec_covr": [0, 1], "kore_fil": 56, "kore_head": 47, "kore_id": [1, 55, 59], "kore_int": [1, 55, 59], "kore_lex": [1, 55, 57], "kore_list_of": [1, 55, 59], "kore_map_of": [1, 55, 59], "kore_pattern": [46, 47], "kore_print": [1, 55, 66], "kore_rangemap_of": [1, 55, 59], "kore_rpc": [55, 63], "kore_rpc_command": 5, "kore_serv": [1, 55, 63], "kore_set_of": [1, 55, 59], "kore_str": [1, 55, 59], "kore_symbol": [55, 65], "kore_term": [1, 55, 65], "kore_to_json": [1, 55, 62], "kore_to_kast": [3, 5, 71, 76], "kore_to_pretti": [71, 76], "korecli": [1, 5, 55, 63], "koreclienterror": [1, 55, 63], "koreexeclogformat": [1, 5, 55, 63], "korehead": [42, 46, 47], "korepars": [1, 55, 60], "koreserv": [1, 55, 61, 63, 92], "koreserverarg": [1, 55, 63], "koreserverinfo": [1, 55, 63], "koreserverpool": [1, 55, 61], "koresortt": [1, 55, 56], "koresymbolt": [1, 55, 56], "koretoken": [1, 55, 57], "kouter": [1, 7, 16], "kprint": [1, 29, 35, 37, 71, 77, 78, 91, 94, 95], "kproduct": [1, 7, 16, 21], "kproductionitem": [1, 7, 16], "kprove": [1, 71, 73, 79], "kproveoutput": [1, 71, 77], "kregextermin": [1, 7, 16], "krepl": [0, 1], "krequir": [1, 7, 16], "krewrit": [1, 7, 11, 22], "krule": [1, 2, 4, 7, 14, 16, 68, 77, 93], "krulelik": [1, 7, 16, 32], "krun": [1, 71], "krunoutput": [1, 71, 78], "ksentenc": [1, 7, 16], "kseq": [1, 7, 13, 55, 62], "ksequenc": [1, 7, 11, 16], "ksort": [1, 7, 11, 14, 16, 76, 84, 87, 91], "ksortsynonym": [1, 7, 16], "kstate": [1, 69, 70], "ksyntaxassoci": [1, 7, 16], "ksyntaxlex": [1, 7, 16], "ksyntaxprior": [1, 7, 16], "ksyntaxsort": [1, 7, 16], "ktermin": [1, 7, 16], "ktoken": [1, 7, 11, 16, 76, 82, 85, 86, 88, 89], "ktool": [0, 1], "ktype": 14, "kvariabl": [1, 7, 11, 14, 87], "kversion": [1, 24, 28], "kw_alia": [7, 17, 55, 57], "kw_axiom": [55, 57], "kw_claim": [7, 17, 55, 57], "kw_config": [7, 17], "kw_context": [7, 17], "kw_endmodul": [7, 17, 55, 57], "kw_hooked_sort": [55, 57], "kw_hooked_symbol": [55, 57], "kw_import": [7, 17, 55, 57], "kw_left": [7, 17], "kw_lexic": [7, 17], "kw_modul": [7, 17, 55, 57], "kw_nonassoc": [7, 17], "kw_prioriti": [7, 17], "kw_privat": [7, 17], "kw_public": [7, 17], "kw_requir": [7, 17], "kw_right": [7, 17], "kw_rule": [7, 17], "kw_sort": [55, 57], "kw_symbol": [55, 57], "kw_syntax": [7, 17], "kw_where": [55, 57], "kwarg": [61, 75, 93, 98], "l1": 98, "l2": 98, "label": [4, 7, 8, 11, 14, 16, 19, 32, 55, 64, 71, 72, 73], "labels_to_dot": [1, 7, 14], "lambda": 92, "last": 17, "last_constraint": [90, 91], "latex": [55, 66, 71, 76, 77, 78], "lattic": 16, "lavend": [7, 9], "lavender_blush": [7, 9], "lavenderblush": 9, "lawn_green": [7, 9], "lawngreen": 9, "layout": 37, "lbrace": [7, 17, 55, 57], "lbrack": [7, 17, 55, 57], "le": 63, "le_int": [1, 55, 62], "leaf": 14, "least": [33, 74], "least_common_supersort": [7, 16], "leav": [4, 14, 29, 32], "left": [7, 8, 11, 14, 16, 19, 37, 55, 62, 65, 86], "left_assoc": [7, 16, 55, 60], "left_char": 98, "leftassoc": [1, 55, 59, 60, 65], "leint": [1, 81, 86], "lemma": [8, 77], "lemon_chiffon": [7, 9], "lemonchiffon": 9, "length": 11, "let": [7, 11, 16, 17, 29, 32, 55, 65], "let_att": [7, 8, 16], "let_attr": [55, 65], "let_nod": [29, 32], "let_pattern": [55, 65], "let_sort": [7, 11, 55, 65], "let_term": [7, 11], "lexer": [1, 7, 55], "lexic": [1, 7, 16, 19], "lexicograph": 11, "lh": [4, 7, 11, 14, 55, 64, 98], "lhs_bodi": [90, 91], "lift": 33, "lift_edg": [29, 33], "lift_split": [29, 33], "lift_split_edg": [29, 33], "lift_split_split": [29, 33], "light_blu": [7, 9], "light_cor": [7, 9], "light_cyan": [7, 9], "light_goldenrod": [7, 9], "light_goldenrod_yellow": [7, 9], "light_grai": [7, 9], "light_green": [7, 9], "light_grei": [7, 9], "light_pink": [7, 9], "light_salmon": [7, 9], "light_sea_green": [7, 9], "light_sky_blu": [7, 9], "light_slate_blu": [7, 9], "light_slate_grai": [7, 9], "light_slate_grei": [7, 9], "light_steel_blu": [7, 9], "light_yellow": [7, 9], "lightblu": 9, "lightcor": 9, "lightcyan": 9, "lightgoldenrod": 9, "lightgoldenrodyellow": 9, "lightgrai": [7, 9], "lightgreen": 9, "lightgrei": 9, "lightpink": 9, "lightsalmon": 9, "lightseagreen": 9, "lightskyblu": 9, "lightslateblu": 9, "lightslategrai": 9, "lightslategrei": 9, "lightsteelblu": 9, "lightyellow": 9, "like": [14, 16, 68], "lime": [7, 9], "lime_green": [7, 9], "limegreen": 9, "limit": 4, "line": [7, 17, 35, 68, 90, 91, 92, 93, 98], "linen": [7, 9], "list": [2, 4, 11, 14, 16, 20, 27, 28, 30, 31, 32, 33, 35, 37, 38, 41, 47, 58, 60, 63, 64, 72, 73, 77, 79, 91, 92, 93, 94, 98], "list_empti": [1, 81, 83], "list_item": [1, 81, 83], "list_of": [1, 81, 83], "list_pattern": [1, 55, 62], "liter": [16, 86], "llvm": [47, 55, 63, 71, 75, 78], "llvm_definition_dir": [5, 63], "llvm_event": 47, "llvm_function_ev": 47, "llvm_hook_ev": 47, "llvm_interpret": [1, 71, 78], "llvm_interpret_raw": [1, 71, 78], "llvm_kompiled_dir": [55, 63], "llvm_pattern_matching_failure_ev": 47, "llvm_rewrite_trac": 47, "llvm_rewrite_trace_iter": 47, "llvm_rule_ev": 47, "llvm_side_condition_end_ev": 47, "llvm_side_condition_ev": 47, "llvm_to_definit": [1, 42, 45], "llvm_to_modul": [1, 42, 45], "llvm_to_pattern": [1, 42, 45], "llvm_to_sent": [1, 42, 45], "llvm_to_sort": [1, 42, 45], "llvm_to_sort_var": [1, 42, 45], "llvmargument": [42, 46, 47], "llvmeventannot": [42, 46, 47], "llvmeventtyp": [42, 46, 47], "llvmfunctionev": [42, 46, 47], "llvmhookev": [42, 46, 47], "llvmpatternmatchingfailureev": [42, 46, 47], "llvmrewriteev": [42, 46, 47], "llvmrewritetrac": [42, 46, 47], "llvmrewritetraceiter": [42, 46, 47], "llvmruleev": [42, 46, 47], "llvmsideconditioneventent": [42, 46, 47], "llvmsideconditioneventexit": [42, 46, 47], "llvmstepev": [42, 46, 47], "lmc": 93, "load": [1, 24, 27, 37, 42, 55, 56, 73], "load_claim": [71, 73], "load_from_dir": [24, 27], "load_from_json": [55, 56], "load_from_kor": [55, 56], "load_stat": [1, 42], "loc": [1, 7, 17], "locat": [7, 8, 17, 19, 68, 74], "locationiter": [1, 7, 17], "locationtyp": [1, 7, 8], "log": [3, 5, 29, 31, 32, 55, 63, 68, 90, 93], "log_axioms_fil": [5, 55, 63], "log_context": [55, 63], "log_fail_rewrit": 5, "log_failed_rewrit": 63, "log_succ_rewrit": 5, "log_successful_rewrit": 63, "logentri": [1, 5, 31, 32, 55, 63, 93], "logger": 98, "logic": [11, 93], "loglevel": 98, "logorigin": [1, 55, 63], "logrewrit": [1, 55, 63], "long": 92, "loop": [33, 92, 93], "lower": 16, "lowest": 16, "lparen": [7, 13, 17, 55, 57], "lshiftint": [1, 81, 86], "lt_int": [1, 55, 62], "ltint": [1, 81, 86], "lub": 16, "m": [33, 37], "macro": [7, 8, 16], "macro_rec": [7, 8], "macro_rul": [7, 16], "magenta": [7, 9], "mahogani": [7, 9], "mai": [16, 37, 92, 95], "main": [0, 1, 16, 38], "main_fil": [23, 71, 75, 77], "main_modul": [7, 16, 23, 71, 76], "main_module_nam": [7, 16, 71, 72, 90, 93], "maincel": [7, 8], "mainten": 92, "maintenance_r": 92, "major": [24, 28], "make": [4, 14, 47], "make_unique_seg": [29, 35], "mani": 4, "manifest": [39, 40], "manip": [1, 7, 55], "manipul": 4, "map": [8, 11, 12, 14, 16, 21, 27, 30, 32, 38, 40, 62, 63, 65, 68, 72, 74, 78, 91, 92, 93, 98], "map_att": [7, 8], "map_attr": [55, 65], "map_empti": [1, 81, 83], "map_inn": [7, 11], "map_item": [1, 81, 83], "map_of": [1, 81, 83], "map_pattern": [1, 55, 62, 65], "map_sent": [7, 16], "map_sort": [55, 65], "markdown": [1, 7, 73], "maroon": [7, 9], "match": [1, 3, 4, 7, 11, 16, 33, 47, 55], "match_app": [1, 55, 59], "match_dv": [1, 55, 59], "match_inj": [1, 55, 59], "match_left_assoc": [1, 55, 59], "match_list": [1, 55, 59], "match_map": [1, 55, 59], "match_rangemap": [1, 55, 59], "match_set": [1, 55, 59], "match_symbol": [1, 55, 59], "match_with_constraint": [3, 4], "maud": [71, 75], "maude_port": 5, "max_depth": 63, "max_iter": 92, "max_valu": 74, "max_work": [61, 92], "maxim": 78, "maximum": [74, 92], "maxint": [1, 81, 86], "mayb": [0, 1, 98], "md_selector": [23, 73, 77], "mean": [16, 93], "medium_aquamarin": [7, 9], "medium_blu": [7, 9], "medium_orchid": [7, 9], "medium_purpl": [7, 9], "medium_sea_green": [7, 9], "medium_slate_blu": [7, 9], "medium_spring_green": [7, 9], "medium_turquois": [7, 9], "medium_violet_r": [7, 9], "mediumaquamarin": 9, "mediumblu": 9, "mediumorchid": 9, "mediumpurpl": 9, "mediumseagreen": 9, "mediumslateblu": 9, "mediumspringgreen": 9, "mediumturquois": 9, "mediumvioletr": 9, "meet": [55, 56], "melon": [7, 9], "merg": [30, 32, 33], "merge_nod": [29, 33], "merge_with": [0, 1, 98], "mergeabl": 33, "merged_edg": [29, 32], "mergededg": [29, 32], "messag": [5, 37, 55, 63], "method": [47, 63, 70], "method_nam": 63, "midnight_blu": [7, 9], "midnightblu": 9, "min_valu": 74, "minim": [1, 7, 11, 14, 29, 32, 35, 37, 94, 95], "minimize_kcfg": [29, 30], "minimize_rule_lik": [1, 7, 14], "minimize_term": [1, 7, 14], "minimum": 74, "minint": [1, 81, 86], "minor": [24, 28], "mint_cream": [7, 9], "mintcream": 9, "miss": [16, 73], "misty_ros": [7, 9], "mistyros": 9, "ml": [1, 4, 65, 81], "ml_and": [55, 57], "ml_bottom": [55, 57], "ml_ceil": [55, 57], "ml_dv": [55, 57], "ml_equal": [55, 57], "ml_exist": [55, 57], "ml_floor": [55, 57], "ml_foral": [55, 57], "ml_iff": [55, 57], "ml_impli": [55, 57], "ml_in": [55, 57], "ml_left_assoc": [55, 57], "ml_mu": [55, 57], "ml_next": [55, 57], "ml_not": [55, 57], "ml_nu": [55, 57], "ml_or": [55, 57], "ml_pattern": [55, 60], "ml_pred_to_bool": [1, 7, 14], "ml_rewrit": [55, 57], "ml_right_assoc": [55, 57], "ml_top": [55, 57], "mland": [1, 4, 81, 87], "mlbottom": [1, 81, 87], "mlceil": [1, 81, 87], "mlconn": [1, 55, 65], "mlequal": [1, 81, 87], "mlequalsfals": [1, 81, 87], "mlequalstru": [1, 81, 87], "mlexist": [1, 81, 87], "mlfixpoint": [1, 55, 65], "mlimpli": [1, 81, 87], "mlnot": [1, 81, 87], "mlor": [1, 81, 87], "mlpattern": [1, 55, 60, 65], "mlpred": [1, 55, 65], "mlquant": [1, 55, 65], "mlrewrit": [1, 55, 65], "mltop": [1, 81, 87], "moccasin": [7, 9], "mode": [9, 73], "model": [55, 63, 90, 93], "modifi": 92, "modint": [1, 81, 86], "modnam": [7, 17], "modul": [0, 1, 3, 7, 24, 29, 39, 42, 46, 55, 67, 69, 71, 81, 90, 96, 99], "module_fil": 48, "module_list": 72, "module_nam": [5, 7, 16, 19, 31, 32, 35, 48, 55, 63, 65, 77, 90, 93], "module_to_llvm": [1, 42, 45], "moduletyp": [48, 52], "modulu": 86, "more": [4, 11, 16, 47, 70, 92], "mostli": 47, "mount": 37, "move": 11, "moving_average_step_timeout": 63, "mu": [1, 55, 60, 65], "mul": 16, "mulberri": [7, 9], "mulint": [1, 81, 86], "multi_or": [55, 60], "multiaryconn": [1, 55, 65], "multiedg": [29, 32], "multipl": [7, 8], "multithread": 92, "must": [16, 92], "mutablemap": 36, "n": [16, 33, 59], "n1": 59, "n2": 59, "n3": 59, "n4": 59, "name": [4, 7, 8, 9, 11, 14, 15, 16, 19, 24, 27, 37, 46, 47, 55, 64, 65, 87], "name_as_id": 63, "namedtupl": [5, 13, 15, 17, 57, 63], "namespac": 70, "nat": [7, 17], "navajo_whit": [7, 9], "navajowhit": 9, "navi": [7, 9], "navwidget": [1, 29, 37], "navy_blu": [7, 9], "navyblu": 9, "ndbranch": [1, 29, 32], "ne_bool": [1, 55, 62], "ne_int": [1, 55, 62], "necessarili": 16, "need": [2, 14, 16, 47, 92], "neqint": [1, 81, 86], "new": [4, 7, 11, 16, 19, 37, 47, 92], "new_constraint": 4, "new_sort": [7, 16], "next": [1, 47, 55, 60, 65], "next_stat": [3, 5, 55, 63, 69, 70], "nextstat": [1, 3, 5], "nice": 14, "nid": 93, "no_cell_rewrite_to_dot": [1, 7, 14], "no_dispatch": [29, 37], "no_evalu": [7, 8], "no_exc_wrap": 75, "no_pattern": 78, "no_post_exec_simplifi": [5, 55, 63], "node": [11, 14, 29, 30, 31, 32, 33, 35, 36, 37, 90, 93, 94], "node_1_id": 32, "node_2_id": 32, "node_attr": [29, 35, 90, 94], "node_delta": [35, 94], "node_id": [30, 31, 32, 90, 93], "node_print": [29, 35, 37, 94, 95], "node_refut": [90, 93], "node_short_info": [29, 35], "node_text": 37, "nodeattr": [1, 29, 30, 32], "nodeidlik": [30, 31, 32, 33, 35, 93, 94], "nodeprint": [1, 29, 35, 37, 94, 95], "nodeview": [1, 29, 37], "non": [2, 14, 16, 19], "non_assoc": [7, 16, 19], "non_empti": [7, 19], "non_termin": [7, 16], "nonassoc": 16, "none": [0, 1, 4, 5, 8, 9, 11, 14, 15, 16, 17, 18, 19, 21, 23, 26, 27, 28, 30, 31, 32, 33, 34, 35, 37, 38, 40, 41, 44, 47, 48, 52, 53, 55, 56, 59, 61, 62, 63, 64, 65, 66, 70, 71, 72, 73, 74, 75, 76, 77, 78, 80, 91, 92, 93, 94, 95, 97, 98], "nonempty_str": [0, 1, 98], "nonetyp": [1, 7, 8], "nontermin": [1, 7, 16, 19], "nonzero_depth": [90, 93], "normalize_constraint": [1, 7, 14], "normalize_ml_pr": [1, 7, 14], "not_bool": [1, 55, 62], "not_log_context": [55, 63], "not_non": [0, 1, 98], "notbool": [1, 81, 85], "note": 93, "notint": [1, 81, 86], "nott": [55, 60], "nth": 47, "ntype": 70, "nu": [1, 55, 60, 65], "nullaryconn": [1, 55, 65], "number": [5, 13, 15, 17, 33, 47, 57, 63, 78, 92], "obj": [8, 41], "object": [4, 5, 8, 10, 15, 18, 20, 21, 26, 27, 28, 30, 31, 32, 33, 35, 40, 47, 52, 56, 60, 63, 65, 70, 73, 74, 76, 79, 80, 91, 92, 93, 94, 98], "obtain": 4, "occur": [11, 47], "occurr": 11, "of_typ": 16, "often": 68, "old": [7, 19, 37], "old_lac": [7, 9], "old_sort": [7, 16], "oldlac": 9, "oliv": [7, 9], "olive_drab": [7, 9], "olive_green": [7, 9], "olivedrab": 9, "olivegreen": 9, "omit_cel": [35, 94], "on_attribut": [1, 7, 14], "on_behavior_view_select": [29, 37], "on_click": [29, 37], "on_constraint_select": [29, 37], "on_custom_select": [29, 37], "on_ent": [29, 37], "on_graph_chunk_select": [29, 37], "on_leav": [29, 37], "on_mount": [29, 37, 90, 95], "on_nav_widget_select": 37, "on_status_select": [29, 37], "on_term_select": [29, 37], "onc": [11, 14, 33], "one": [2, 4, 11, 16, 33, 37, 68, 74, 92], "one_line_summari": [90, 92, 93], "onelin": [5, 55, 63], "ones": [16, 92], "onli": [14, 17], "op": [7, 15, 55, 65], "op_sort": [55, 65], "oper": [11, 16, 92], "operand": 86, "opinion": 21, "optimize_kcfg": [32, 90, 93], "optimize_memori": 32, "optimizednodestor": [1, 29, 36], "option": [4, 11, 47, 73, 79, 92, 93], "optionaltyp": [1, 7, 8], "or_bool": [1, 55, 62], "orang": [7, 9], "orange_r": [7, 9], "orbool": [1, 81, 85], "orchid": [7, 9], "order": [16, 72], "ordin": [47, 65], "org": [8, 93], "origin": [4, 11, 14, 16, 55, 63], "orint": [1, 81, 86], "orr": [55, 60], "other": [4, 11, 14, 16, 93], "otherwis": [11, 16], "out": [2, 14, 16, 21], "outer": [1, 7, 21], "outer_lex": [1, 7], "outer_pars": [1, 7], "outer_syntax": [1, 7], "outerpars": [1, 7, 18], "output": [65, 66, 74, 78], "output_dir": [26, 40, 75], "output_fil": 66, "over": [4, 47, 72, 74], "overload": [4, 7, 8, 11, 16], "overridden": 63, "owis": [7, 8], "own_statu": [90, 91, 92, 93], "p": [28, 61, 64, 65, 92, 98], "p1": 98, "p2": 98, "p3": 98, "p4": 98, "packag": [0, 27, 99], "package_path": [1, 39, 41], "packagesourc": [1, 24, 27], "page": [37, 99], "pagedown": 37, "pageup": 37, "pair": 16, "pale_goldenrod": [7, 9], "pale_green": [7, 9], "pale_turquois": [7, 9], "pale_violet_r": [7, 9], "palegoldenrod": 9, "palegreen": 9, "paleturquois": 9, "palevioletr": 9, "papaya_whip": [7, 9], "papayawhip": 9, "paper": 93, "parallel": 92, "parallel_advance_proof": [1, 90, 92], "param": [7, 11, 16, 19, 33, 63], "param_sort": [55, 65], "paramet": [2, 4, 11, 14, 16, 21, 33, 37, 47, 72, 73, 74, 78, 86, 92], "parametr": 16, "paren": [1, 7, 21], "pars": [7, 8, 15, 24, 28, 39, 40, 46, 47, 70], "parse_arg": [0, 1, 38], "parse_definit": [1, 42, 51], "parse_definition_fil": [1, 42, 51], "parse_modul": [71, 77], "parse_out": [1, 7, 23], "parse_pattern": [1, 42, 51], "parse_pattern_fil": [1, 42, 51], "parse_rule_appl": [1, 67, 68], "parse_sort": [1, 42, 51], "parse_sort_fil": [1, 42, 51], "parse_tag": [1, 7, 15], "parse_token": [71, 76], "parseabl": [4, 14], "parseable_output": 35, "parseerror": [1, 55, 63], "parser": [1, 7, 16, 42, 47, 55, 78, 97], "parser_fil": 76, "partial": 16, "partit": [0, 1, 98], "pass": [74, 90, 92], "patch": [24, 28], "patch_symbol_t": [21, 76, 77, 78], "path": [2, 5, 7, 8, 16, 18, 19, 23, 24, 26, 27, 28, 32, 35, 37, 38, 40, 41, 44, 47, 48, 50, 51, 56, 63, 66, 68, 70, 71, 72, 73, 74, 75, 76, 77, 78, 80, 91, 92, 93, 94, 97, 98], "path_condit": [90, 93], "path_constraint": [90, 93], "path_length": [29, 32], "pathlib": 80, "pathlik": [2, 16], "paths_between": [29, 32], "pathsourc": [1, 24, 27], "pathtyp": [1, 7, 8], "pattern": [1, 5, 7, 11, 24, 28, 33, 42, 45, 47, 51, 52, 53, 55, 56, 58, 59, 60, 62, 63, 64, 65, 66, 69, 70, 74, 76, 78], "pattern_matching_failure_ev": 47, "pattern_sort": [55, 56], "pattern_to_llvm": [1, 42, 45], "patternerror": [1, 55, 63], "peach": [7, 9], "peach_puff": [7, 9], "peachpuff": 9, "pend": [90, 92, 93], "pending_nod": [90, 93], "perform": [4, 11, 33, 37, 47, 92], "periwinkl": [7, 9], "peru": [7, 9], "pgm": 78, "phase": 74, "phi": 93, "pid": [55, 63], "piec": 16, "pine_green": [7, 9], "pinegreen": 9, "pink": [7, 9], "pipe_stderr": [78, 98], "pipe_stdout": 98, "place": [16, 33], "plu": [7, 17], "plugin": [1, 96], "plugin_nam": [39, 40], "plum": [7, 9], "pmap": 78, "point": 63, "pool": [1, 55, 92], "port": [5, 55, 63], "poset": [0, 1, 98], "posit": [11, 16, 37, 47], "posixpath": 37, "possibl": [4, 33], "possibli": 14, "post": [16, 37], "potenti": [4, 11, 14, 16], "powder_blu": [7, 9], "powderblu": 9, "pre": [16, 47], "pre_constraint": [90, 91], "pre_trac": [46, 47], "preced": [33, 72], "precis": 4, "pred": [3, 4, 7, 11, 14, 98], "predecessor": [29, 32], "predefin": 14, "predic": [4, 7, 8, 11, 55, 63, 98], "prefer": [7, 8, 11], "prefix": 16, "prelud": [0, 1, 55], "present": [11, 33], "pretti": [1, 7, 8, 14, 29, 35, 55, 66, 69, 70, 71, 76, 77, 78, 82, 88, 90, 91, 94], "pretty_byt": [1, 81, 82], "pretty_bytes_str": [1, 81, 82], "pretty_print": [29, 31, 71, 76], "pretty_seg": [29, 35, 90, 94], "pretty_str": [1, 81, 88], "prettyprint": [1, 7, 21], "previous": 33, "print": [7, 14, 21, 35, 47, 74, 90, 93], "print_nod": [29, 35], "printer": 21, "printoutput": [1, 55, 66], "prior_loops_cach": [90, 93], "prior_loops_cache_upd": [90, 93], "prioriti": [4, 7, 8, 14, 16, 32, 37, 55, 64, 93], "priorityblock": [1, 7, 19], "privat": [7, 8], "process": [47, 68, 78], "process_blu": [7, 9], "processblu": 9, "prod": 21, "produc": [2, 68, 92], "product": [1, 7, 8, 16, 19], "production_for_cell_sort": [7, 16], "productionitem": [1, 7, 16, 19], "productionlik": [1, 7, 19], "profil": [1, 96, 97], "progam": [71, 77], "program": [4, 55, 66, 71, 76, 77, 78], "program_fil": [69, 70], "project": [1, 7, 8, 24, 26, 68], "project_dir": 27, "project_fil": [24, 27], "project_path": 27, "prompt": [69, 70], "proof": [0, 1, 16, 47, 79], "proof_data_exist": [90, 92], "proof_dir": [90, 91, 92, 93], "proof_exist": [90, 92], "proof_hint": 78, "proof_id": [37, 90, 92, 93], "proof_statu": 37, "proof_subdir": [90, 92], "proofstatu": [1, 90, 91, 92, 93], "proofsummari": [1, 90, 91, 92, 93], "prooftrac": [42, 46], "propagate_up_constraint": [1, 7, 14], "proper": 16, "properti": [4, 8, 9, 11, 12, 16, 17, 21, 26, 27, 28, 30, 32, 40, 47, 52, 60, 63, 65, 70, 74, 76, 80, 91, 92, 93], "prove": [16, 71, 77], "prove_claim": [71, 77], "prove_rpc": [1, 71], "proven": 16, "proveopt": 79, "prover": [1, 16, 71, 77, 90, 91, 92, 93], "prover_arg": [71, 77], "proverpc": [1, 71, 79], "provid": [33, 47], "proxi": [55, 63], "prune": [29, 30, 32, 72, 90, 93], "psi": 93, "public": [7, 16, 19], "pull": 14, "purpl": [7, 9], "push_down_rewrit": [1, 7, 14], "put": [4, 14], "pykbackend": [1, 71, 75], "pytest_addopt": [1, 96, 97], "python": [11, 21], "q": 37, "question": [7, 17, 33], "quit": 37, "r": [72, 98], "r1": 98, "r2": 98, "r3": 98, "r4": 98, "race": 92, "rais": [0, 1, 33, 47, 72, 74, 78, 98], "random": 74, "rangemap_empti": [1, 81, 83], "rangemap_item": [1, 81, 83], "rangemap_of": [1, 81, 83], "rangemap_pattern": [1, 55, 62], "raw_sienna": [7, 9], "rawsienna": 9, "rbrace": [7, 17, 55, 57], "rbrack": [7, 17, 55, 57], "re": [28, 92], "reach": 47, "reachabl": [1, 90], "reachable_nod": [29, 32], "reactiv": 37, "read": [16, 92], "read_cfg_data": [29, 32], "read_kast_definit": [1, 7, 16], "read_node_data": [29, 32], "read_proof": [90, 92, 93], "read_proof_data": [90, 91, 92, 93], "read_subproof": [90, 92], "read_subproof_data": [90, 92], "real": 92, "reason": [55, 63], "rec": 8, "receiv": [37, 95], "reciev": 16, "recov": [4, 14], "recurs": 14, "red": [7, 9], "red_orang": [7, 9], "red_violet": [7, 9], "redorang": 9, "redviolet": 9, "refer": 92, "refut": [90, 93], "refutationproof": [1, 90, 91, 93], "refutationsummari": [1, 90, 91], "refute_nod": [90, 93], "regex": [7, 16, 17, 19], "region": 35, "regular": 16, "rel": 47, "relat": [72, 98], "relative_posit": [46, 47], "remaining_impl": [3, 5], "remov": [4, 11, 14, 16, 33], "remove_alia": [29, 32], "remove_attr": [1, 7, 14, 29, 32], "remove_cell_map_item": [7, 16], "remove_cov": [29, 32], "remove_edg": [29, 32], "remove_edges_around": [29, 32], "remove_generated_cel": [1, 7, 14], "remove_merged_edg": [29, 32], "remove_nod": [29, 30, 32], "remove_semantic_cast": [1, 7, 14], "remove_source_map": [1, 7, 14], "remove_stuck": [29, 32], "remove_subproof": [90, 92], "remove_termin": [29, 30], "remove_useless_constraint": [1, 3, 4, 7, 14], "remove_vacu": [29, 32], "renam": [4, 14], "rename_generated_var": [1, 7, 14], "render_class": [0, 1, 38], "render_coverage_xml": [0, 1, 38], "render_lin": [0, 1, 38], "repaint": 37, "repeat_last": [0, 1, 98], "repeatedli": 33, "repl": [1, 69], "replac": [7, 11, 14], "replace_nod": [29, 32], "replace_rewrites_with_impli": [1, 7, 14], "replace_sourc": [29, 32], "replace_target": [29, 32], "replace_top": [7, 11], "replerror": [1, 69, 70], "report": 68, "repres": [4, 11, 16, 47, 80, 93], "represent": [4, 11, 21, 47, 92], "req": [55, 63, 64], "req_nam": 98, "request": [55, 63, 97], "request_id": 63, "requir": [1, 4, 7, 14, 16, 18, 19, 47, 63, 92], "reset": [7, 9], "reset_cod": [7, 9], "resolv": [24, 27, 55, 56, 71, 72], "resolve_al": [71, 72], "resolve_sort": [7, 16], "resourc": [24, 27], "resource_fil": [24, 27], "resource_file_nam": [24, 27], "result": [4, 11, 16, 46, 47, 55, 63, 72, 73, 78, 91, 92, 93], "return": [2, 4, 11, 14, 16, 17, 21, 33, 35, 37, 47, 63, 65, 72, 74, 78, 86, 92], "returns_unit": [7, 8], "returnsunit": 8, "reus": 74, "rev": [24, 28], "revers": 32, "rewrit": [1, 7, 11, 16, 33, 47, 55, 60, 65, 68, 78], "rewrite_trac": 47, "rewrite_trace_iter": 47, "rewritefailur": [1, 55, 63], "rewriteresult": [1, 55, 63], "rewriterul": [1, 55, 64], "rewritesuccess": [1, 55, 63], "rewritten": 33, "rewritten_term": [55, 63], "rh": [4, 7, 11, 14, 55, 64, 98], "rhodamin": [7, 9], "rhs_bodi": [90, 91], "right": [7, 8, 11, 14, 16, 19, 37, 55, 62, 65, 70, 86], "right_assoc": [7, 16, 55, 60], "right_char": 98, "rightassoc": [1, 55, 60, 62, 65], "rl": [14, 16], "root": [29, 32], "rosy_brown": [7, 9], "rosybrown": 9, "roundpr": [1, 55, 65], "royal_blu": [7, 9], "royal_purpl": [7, 9], "royalblu": 9, "royalpurpl": 9, "rparen": [7, 13, 17, 55, 57], "rpc": [1, 55], "rr": 63, "rshiftint": [1, 81, 86], "rtld_local": [1, 42, 48], "rubin": 9, "rubine_r": [7, 9], "rule": [1, 2, 4, 7, 14, 16, 19, 29, 32, 47, 55, 63, 68, 71, 76], "rule_ev": 47, "rule_id": [2, 4, 14, 55, 63, 90, 93], "rule_label": [7, 17, 29, 32], "rule_map": 38, "rule_map_fil": 38, "rule_ordin": [46, 47], "rule_pred": [55, 63], "rule_substitut": [55, 63], "run": [42, 52, 71, 74, 78, 92], "run_pattern": [71, 78], "run_process": [0, 1, 71, 78, 98], "run_process_2": [0, 1, 98], "run_proof_hint": [71, 78], "runtim": [1, 42, 48], "runtimeerror": [74, 78], "s1": 59, "s2": 59, "s3": 59, "s4": 59, "saddle_brown": [7, 9], "saddlebrown": 9, "salmon": [7, 9], "same": [14, 93], "same_loop": [29, 34], "sandy_brown": [7, 9], "sandybrown": 9, "saniti": 4, "satisfi": 98, "satresult": [1, 55, 63], "scroll": 37, "scroll_end": 37, "scroll_hom": 37, "scrollabl": 37, "scrollablecontain": [37, 95], "sea_green": [7, 9], "seagreen": 9, "search": [74, 99], "search_fin": 78, "searchstrategi": 74, "seashel": [7, 9], "second": 11, "section": 31, "section_edg": [29, 31], "see": [4, 16, 74], "seem": 68, "segment": 35, "select": [29, 37], "select_code_block": [1, 7, 15], "selector": [1, 7, 15, 73], "selector_lex": [1, 7, 15], "selectorpars": [1, 7, 15], "self": [4, 11, 16, 70, 92], "semant": [1, 2, 16, 29, 33], "semantic_rul": [7, 16], "semanticcast": 14, "sent": 74, "sentenc": [1, 7, 16, 18, 19, 45, 55, 60, 65], "sentence_by_unique_id": [7, 16], "sentence_to_llvm": [1, 42, 45], "sentence_typ": [7, 16], "sep": [7, 19], "sepia": [7, 9], "seqstrict": [7, 8], "sequenc": [11, 16, 19, 33], "serial": [4, 11, 42, 47, 52], "set": [4, 7, 9, 11, 14, 15, 16, 30, 32, 70, 74], "set_cel": [1, 7, 14], "set_empti": [1, 81, 83], "set_exec_tim": [90, 93], "set_item": [1, 81, 83], "set_of": [1, 81, 83], "set_pattern": [1, 55, 62], "set_var": [55, 60], "set_var_id": [55, 57, 60], "setvarid": [1, 55, 65], "share": 92, "shell": 70, "shorten_hash": [0, 1, 98], "shortest_distance_between": [29, 32], "shortest_path_between": [29, 32], "shortest_path_to": [90, 93], "shortest_path_to_nod": [90, 93], "should": [4, 11, 14, 74, 92, 98], "show": [1, 29, 37, 90], "side": [4, 11, 14, 33, 47], "side_condition_end_ev": 47, "side_condition_ev": 47, "sienna": [7, 9], "silver": [7, 9], "similar": 11, "simpl": [11, 16], "simplif": [7, 8, 68], "simplifi": [3, 5, 29, 31, 42, 52, 55, 63], "simplified_anteced": [90, 91], "simplified_consequ": [90, 91], "simplified_constraint": [90, 91], "simplified_equ": [90, 91], "simplify_bool": [1, 7, 14, 42, 52], "simplify_config": [29, 35], "simplirul": [1, 55, 64], "sinc": 93, "singl": [0, 1, 4, 32, 33, 98], "single_socket": [55, 63], "singlesockettransport": [1, 55, 63], "size": 21, "skip_initi": 68, "skip_project": 68, "sky_blu": [7, 9], "skyblu": 9, "slate_blu": [7, 9], "slate_grai": [7, 9], "slate_grei": [7, 9], "slateblu": 9, "slategrai": 9, "slategrei": 9, "slurp_definit": [1, 7, 23], "smt": 8, "smt_reset_interv": [55, 63], "smt_retry_limit": [5, 55, 63], "smt_tactic": [5, 55, 63], "smt_timeout": [5, 55, 63], "smtlemma": [7, 8], "smtsolvererror": [1, 55, 63], "snow": [7, 9], "so": [4, 11, 14, 33, 35], "some": [0, 1, 98], "someth": 16, "somewher": 16, "sort": [1, 7, 8, 11, 13, 14, 16, 19, 45, 51, 52, 55, 56, 59, 60, 62, 64, 65, 72, 74, 76, 87, 90, 91], "sort1": [16, 56, 62, 87], "sort2": [16, 56, 62, 87], "sort_ac_collect": [1, 7, 14], "sort_app": [55, 60], "sort_assoc_label": [1, 7, 14], "sort_collect": [21, 35, 76, 94], "sort_decl": [55, 60], "sort_strict": [7, 16], "sort_tabl": [55, 56], "sort_to_llvm": [1, 42, 45], "sort_var": [7, 16, 55, 60], "sort_with": [4, 93], "sortapp": [1, 55, 60, 64, 65], "sortdecl": [1, 7, 19, 55, 60, 65], "sortgeneratedtopcel": 64, "sortvar": [1, 45, 55, 60, 65], "sortvari": 45, "sourc": [1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 44, 45, 47, 48, 50, 51, 52, 53, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 68, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 82, 83, 84, 85, 86, 87, 88, 89, 91, 92, 93, 94, 95, 97, 98], "source_dir": [24, 27, 28, 38], "source_fil": [24, 27, 38], "source_file_nam": [24, 27], "source_id": [31, 32], "source_node_id": 32, "source_var": [29, 32], "space": 35, "spawn": 92, "spec": 73, "spec_fil": [73, 77], "spec_label": 93, "spec_modul": 93, "spec_module_nam": [73, 77], "special": 16, "specif": [4, 11, 14, 16, 33], "specifi": 93, "speed": 16, "split": [4, 14, 29, 32, 33], "split_config_and_constraint": [1, 7, 14], "split_config_from": [1, 7, 14], "split_on_constraint": [29, 32], "spring_green": [7, 9], "springgreen": 9, "sr": 92, "src": [2, 37], "src_all_rul": 2, "src_kompiled_dir": 2, "src_rules_fil": 2, "src_rules_list": 2, "ss": 59, "standalon": 16, "standard": [55, 63], "start": [11, 37, 55, 63, 78], "start_dir": 28, "start_serv": 5, "state": [1, 3, 4, 5, 7, 13, 14, 17, 55, 63, 69, 70], "state1": 4, "state2": 4, "static": [4, 9, 11, 12, 16, 26, 27, 28, 30, 32, 35, 37, 40, 47, 56, 63, 64, 65, 72, 80, 91, 92, 93], "statu": [1, 29, 37, 90, 91, 92, 93], "status_on": 37, "stdout": 9, "steel_blu": [7, 9], "steelblu": 9, "stefanescu": 93, "step": [1, 29, 31, 32, 33, 42, 47, 52, 70, 78, 91, 92, 93], "step_ev": [46, 47], "step_proof": [90, 91, 92, 93], "step_timeout": 63, "still": 92, "stopiter": 47, "stopreason": [1, 55, 63], "store": [1, 4, 11, 29], "store_path": [29, 32], "str": [2, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 26, 27, 28, 30, 31, 32, 35, 37, 38, 40, 41, 44, 47, 48, 51, 56, 57, 58, 59, 60, 62, 63, 64, 65, 66, 68, 70, 72, 73, 74, 75, 76, 77, 78, 82, 87, 88, 89, 91, 92, 93, 94, 95, 98], "str_dv": [1, 55, 62], "strategi": [74, 92], "stream": 47, "strict": [7, 8], "string": [1, 2, 7, 11, 13, 16, 17, 21, 47, 55, 57, 60, 65, 78, 81], "string2json": [1, 55, 62], "string_sent": [7, 18], "stringsent": [1, 7, 18, 19], "stringtoken": [1, 81, 88], "strip": 2, "strip_coverage_logg": [0, 1, 2], "strtype": [1, 7, 8], "structur": [4, 14, 16, 33], "stuck": [1, 29, 32, 55, 63, 90, 93], "stuckresult": [1, 55, 63], "style": [16, 37], "sub_project": [24, 27], "subclass": 70, "subint": [1, 81, 86], "sublist": 98, "submit": [55, 61, 92], "submodul": 0, "subpackag": 0, "subproof": [90, 91, 92, 93], "subproof_id": [90, 91, 92, 93], "subproofs_statu": [90, 92], "subsort": [7, 16, 56], "subsort_t": [7, 16], "subst": [1, 3, 4, 5, 7, 11, 14, 33, 91], "subst1": 4, "subst2": 4, "subst_strategi": 74, "substitut": [4, 11, 14, 46, 47, 55, 63, 74], "succ": 32, "successor": [29, 32, 37, 93], "summar": 11, "summari": [11, 90, 91, 92, 93], "supersort": 16, "suppli": [4, 11, 14, 16], "svar": [1, 55, 60, 65], "symbol": [1, 3, 4, 7, 8, 11, 14, 16, 21, 47, 55, 59, 60, 65], "symbol_decl": [55, 56, 60, 65], "symbol_id": [55, 56, 57, 60], "symbol_t": [7, 21, 55, 56], "symboldecl": [1, 55, 56, 60, 65], "symbolic_config": 14, "symbolid": [1, 55, 65], "symbolt": [21, 76, 77, 78], "sync": [24, 26], "sync_fil": [1, 24, 28], "synonym": 16, "syntact": [11, 16], "syntax": [1, 7, 16, 17, 21, 55], "syntax_modul": [7, 8], "syntax_product": [7, 16], "syntax_sent": [7, 18], "syntax_sort": [7, 16], "syntax_symbol": [7, 16], "syntaxassoc": [1, 7, 19], "syntaxdecl": [1, 7, 19], "syntaxdefn": [1, 7, 19], "syntaxlex": [1, 7, 19], "syntaxmodul": 8, "syntaxprior": [1, 7, 19], "syntaxsent": [1, 7, 18, 19], "syntaxsynonym": [1, 7, 19], "t": [8, 16, 37, 59, 61, 70, 72, 74, 98], "tabl": 21, "tag": [7, 16, 73], "tail": [4, 11], "take": [72, 78, 92], "tan": [7, 9], "target": [1, 24, 27, 29, 32, 33, 39, 40, 90, 93], "target_dir": [28, 44, 48], "target_id": [29, 31, 32], "target_nam": [26, 27, 39, 40], "target_node_id": 32, "target_var": [29, 32], "targetid": [1, 39, 40], "teal": [7, 9], "teal_blu": [7, 9], "tealblu": 9, "tell": 35, "temp_dir": [75, 76, 78], "templat": 74, "temppathfactori": 97, "term": [1, 7, 10, 11, 14, 16, 21, 29, 35, 37, 42, 52, 55, 63, 65, 74, 78, 84, 86, 87], "term1": 87, "term2": 87, "term_on": 37, "termin": [1, 7, 8, 11, 16, 19, 29, 30, 63, 90, 93], "terminal_id": [29, 30], "terminal_rul": [5, 31, 55, 63, 90, 93], "terminalresult": [1, 55, 63], "terminator_symbol": [7, 8], "test": [0, 1, 74], "text": [7, 8, 13, 15, 16, 17, 21, 24, 28, 29, 37, 51, 55, 57, 60, 65], "textiowrapp": 9, "th": 16, "than": [4, 16], "thei": [11, 35, 92], "them": [16, 33], "therefor": 33, "thi": [4, 11, 14, 16, 17, 33, 35, 47, 65, 74, 92, 93], "thistl": [7, 9], "those": [14, 68, 92], "thread": 92, "through": 14, "throw": 74, "thrown": 74, "tild": [7, 17], "time": [7, 17, 29, 37, 92], "timeout": [55, 63], "timeoutresult": [1, 55, 63], "timestamp": [1, 39, 41, 92], "tmp_path": 97, "tmp_path_factori": 97, "to_axiom": [55, 64], "to_claim": [90, 91], "to_dict": [3, 4, 7, 8, 11, 12, 16, 29, 30, 32, 55, 56, 63], "to_dict_no_nod": [29, 32], "to_json": [7, 12, 29, 32], "to_modul": [29, 32, 35, 94], "to_rul": [29, 32], "to_sort": 84, "togeth": 11, "toggl": 37, "toggle_opt": [29, 37], "toggle_view": [29, 37], "token": [1, 7, 8, 11, 13, 16, 17, 81, 82, 86, 88, 89], "tokentyp": [1, 7, 13, 17, 55, 57], "tomato": [7, 9], "tool": [1, 55], "top": [1, 3, 4, 11, 55, 60, 65], "top_cell_initi": [1, 55, 62], "top_down": [1, 7, 11, 55, 65], "topmost": 16, "topolog": 72, "total": [7, 8], "trace": [46, 47, 93], "trace_path": 47, "track": 17, "transform": [11, 14, 33], "transit": [16, 72, 73], "translat": 2, "translate_coverag": [0, 1, 2], "translate_coverage_from_path": [0, 1, 2], "transport": [1, 55, 63], "transporttyp": [1, 55, 63], "travers": [11, 14, 68], "trivial": 4, "true": [4, 5, 6, 10, 16, 19, 21, 23, 32, 35, 37, 56, 68, 72, 73, 74, 75, 76, 77, 78, 94, 95, 98], "trust": [7, 8, 16], "try_cel": [3, 4], "tui": [1, 29, 90], "tupl": [4, 5, 8, 11, 14, 15, 16, 19, 27, 31, 32, 35, 38, 56, 58, 59, 62, 63, 64, 65, 78, 83, 91, 92, 93, 94, 98], "tuple_of": [0, 1, 98], "turn": [11, 14], "turquois": [7, 9], "two": [4, 11, 16], "txt": 2, "typ": 98, "type": [7, 8, 11, 13, 14, 16, 17, 46, 47, 55, 57, 63, 73, 80, 92, 98], "type_inference_mod": [73, 75, 77], "typed": 11, "typeddict": 63, "typeinferencemod": [73, 75, 77], "typic": 16, "u": 8, "uid": [55, 64], "unalia": [21, 76], "unappli": [7, 11], "unaryconn": [1, 55, 65], "unbound": 4, "unchang": 33, "uncov": [29, 32], "under": 16, "underli": [11, 47], "undo_alias": [1, 7, 14], "undon": [4, 14], "unhid": 37, "union": [7, 11], "uniqu": [0, 1, 2, 4, 16, 98], "unique_id": [7, 8, 16], "unit": [7, 8, 11, 21], "unknown": [35, 68], "unknown_pred": [55, 63], "unknownmoduleerror": [1, 55, 63], "unknownresult": [1, 55, 63], "unless": 63, "unpars": [7, 8, 16, 21], "unparse_avoid": [7, 8], "unparseavoid": 8, "unparser_for_product": [1, 7, 21], "unrefute_nod": [90, 93], "unsaf": 14, "unsatresult": [1, 55, 63], "unstructur": 4, "until": 47, "unus": 14, "up": [11, 16, 33, 37, 70, 92], "up_to_d": [24, 26, 90, 92], "updat": [7, 8, 11, 29, 37, 92], "update_att": [7, 8], "upload": 92, "upper": 16, "uptodate_check_method": 92, "upward": 11, "us": [4, 11, 14, 16, 33, 35, 47, 63, 65, 74, 92, 93], "use_cach": [90, 93], "use_directori": [71, 76, 77, 78], "use_serv": [1, 96, 97], "useless": 14, "useless_vars_to_dot": [1, 7, 14], "user": [16, 37, 93], "user_list": [7, 8], "userlist": [1, 7, 8, 19], "useserv": 97, "utf": 9, "util": [0, 1, 7, 24, 39, 42, 71, 81], "v": [14, 28, 59, 83, 98], "vacuou": [1, 3, 5, 29, 32, 55, 63, 90, 93], "vacuousresult": [1, 55, 63], "val": 62, "valid": [55, 63, 72], "valid_id": [1, 39, 40], "valu": [4, 7, 8, 9, 11, 13, 16, 17, 19, 29, 30, 32, 37, 55, 57, 59, 62, 63, 65, 66, 68, 74, 75, 76, 77, 78, 92, 98], "valuat": 11, "value_typ": 8, "valueerror": 72, "var": [45, 55, 62, 65, 87], "var_map": [4, 14], "var_occurr": [1, 7, 11], "var_pattern": [55, 60], "varabl": 14, "variabl": [4, 7, 11, 13, 14, 16, 33, 74, 92], "variant": 11, "varpattern": [1, 55, 60, 65], "vbar": [7, 17], "verbos": [29, 37, 40, 44, 75], "veri": 16, "version": [7, 12, 24, 27, 35, 46, 47], "vert": 37, "vertic": 37, "via": 92, "violet": [7, 9], "violet_r": [7, 9], "violetr": 9, "w": [8, 9, 14, 65, 72], "wa": [2, 17, 33, 47, 65], "walru": [55, 57], "want": 35, "warn": 75, "warnings_to_error": 75, "watch_text": [29, 37], "watcher": 37, "we": 68, "weak": 87, "well": 11, "wheat": [7, 9], "when": [4, 14, 33, 37, 47], "where": [4, 14, 16, 33], "whether": [11, 16, 33, 35], "which": [2, 4, 11, 14, 16, 17, 33, 35, 74, 92], "while": 74, "white": [7, 9], "white_smok": [7, 9], "whitesmok": 9, "whose": 14, "widget": [37, 95], "wild_strawberri": [7, 9], "wildstrawberri": 9, "with_argpars": 70, "with_depend": 72, "with_inj": 74, "with_ml_symbol": 56, "with_single_target": [29, 32], "withattr": [1, 55, 65], "withkatt": [1, 7, 8, 16], "without": [14, 32, 33], "withsort": [1, 55, 65], "work": [16, 47, 92], "worker": 92, "wrap": [16, 47], "wrap_el": [7, 8], "wrapel": 8, "wrapper": [16, 47], "write": [55, 56, 65, 73, 92], "write_cfg_data": [29, 32], "write_proof": [90, 92], "write_proof_data": [90, 91, 92, 93], "write_stderr": 98, "write_stdout": 98, "written": 92, "x": [89, 98], "xor_bool": [1, 55, 62], "xorint": [1, 81, 86], "y": 98, "yellow": [7, 9], "yellow_green": [7, 9], "yellow_orang": [7, 9], "yellowgreen": 9, "yelloworang": 9, "yet": 17, "yield": [4, 47], "you": [2, 35], "z": 98, "zero_depth_between": [29, 32]}, "titles": ["pyk", "pyk package", "pyk.coverage module", "pyk.cterm package", "pyk.cterm.cterm module", "pyk.cterm.symbolic module", "pyk.dequote module", "pyk.kast package", "pyk.kast.att module", "pyk.kast.color module", "pyk.kast.formatter module", "pyk.kast.inner module", "pyk.kast.kast module", "pyk.kast.lexer module", "pyk.kast.manip module", "pyk.kast.markdown module", "pyk.kast.outer module", "pyk.kast.outer_lexer module", "pyk.kast.outer_parser module", "pyk.kast.outer_syntax module", "pyk.kast.parser module", "pyk.kast.pretty module", "pyk.kast.rewrite module", "pyk.kast.utils module", "pyk.kbuild package", "pyk.kbuild.config module", "pyk.kbuild.kbuild module", "pyk.kbuild.project module", "pyk.kbuild.utils module", "pyk.kcfg package", "pyk.kcfg.exploration module", "pyk.kcfg.explore module", "pyk.kcfg.kcfg module", "pyk.kcfg.minimize module", "pyk.kcfg.semantics module", "pyk.kcfg.show module", "pyk.kcfg.store module", "pyk.kcfg.tui module", "pyk.kcovr module", "pyk.kdist package", "pyk.kdist.api module", "pyk.kdist.utils module", "pyk.kllvm package", "pyk.kllvm.ast module", "pyk.kllvm.compiler module", "pyk.kllvm.convert module", "pyk.kllvm.hints package", "pyk.kllvm.hints.prooftrace module", "pyk.kllvm.importer module", "pyk.kllvm.load module", "pyk.kllvm.load_static module", "pyk.kllvm.parser module", "pyk.kllvm.runtime module", "pyk.kllvm.utils module", "pyk.konvert package", "pyk.kore package", "pyk.kore.kompiled module", "pyk.kore.lexer module", "pyk.kore.manip module", "pyk.kore.match module", "pyk.kore.parser module", "pyk.kore.pool module", "pyk.kore.prelude module", "pyk.kore.rpc module", "pyk.kore.rule module", "pyk.kore.syntax module", "pyk.kore.tools module", "pyk.kore_exec_covr package", "pyk.kore_exec_covr.kore_exec_covr module", "pyk.krepl package", "pyk.krepl.repl module", "pyk.ktool package", "pyk.ktool.claim_index module", "pyk.ktool.claim_loader module", "pyk.ktool.kfuzz module", "pyk.ktool.kompile module", "pyk.ktool.kprint module", "pyk.ktool.kprove module", "pyk.ktool.krun module", "pyk.ktool.prove_rpc module", "pyk.ktool.utils module", "pyk.prelude package", "pyk.prelude.bytes module", "pyk.prelude.collections module", "pyk.prelude.k module", "pyk.prelude.kbool module", "pyk.prelude.kint module", "pyk.prelude.ml module", "pyk.prelude.string module", "pyk.prelude.utils module", "pyk.proof package", "pyk.proof.implies module", "pyk.proof.proof module", "pyk.proof.reachability module", "pyk.proof.show module", "pyk.proof.tui module", "pyk.testing package", "pyk.testing.plugin module", "pyk.utils module", "Welcome to pyk\u2019s documentation!"], "titleterms": {"": 99, "api": 40, "ast": 43, "att": 8, "byte": 82, "claim_index": 72, "claim_load": 73, "collect": 83, "color": 9, "compil": 44, "config": 25, "content": 99, "convert": 45, "coverag": 2, "cterm": [3, 4, 5], "dequot": 6, "document": 99, "explor": [30, 31], "formatt": 10, "hint": [46, 47], "impli": 91, "import": 48, "indic": 99, "inner": 11, "k": 84, "kast": [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], "kbool": 85, "kbuild": [24, 25, 26, 27, 28], "kcfg": [29, 30, 31, 32, 33, 34, 35, 36, 37], "kcovr": 38, "kdist": [39, 40, 41], "kfuzz": 74, "kint": 86, "kllvm": [42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53], "kompil": [56, 75], "konvert": 54, "kore": [55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66], "kore_exec_covr": [67, 68], "kprint": 76, "kprove": 77, "krepl": [69, 70], "krun": 78, "ktool": [71, 72, 73, 74, 75, 76, 77, 78, 79, 80], "lexer": [13, 57], "load": 49, "load_stat": 50, "manip": [14, 58], "markdown": 15, "match": 59, "minim": 33, "ml": 87, "modul": [2, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 25, 26, 27, 28, 30, 31, 32, 33, 34, 35, 36, 37, 38, 40, 41, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 68, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 82, 83, 84, 85, 86, 87, 88, 89, 91, 92, 93, 94, 95, 97, 98], "outer": 16, "outer_lex": 17, "outer_pars": 18, "outer_syntax": 19, "packag": [1, 3, 7, 24, 29, 39, 42, 46, 54, 55, 67, 69, 71, 81, 90, 96], "parser": [20, 51, 60], "plugin": 97, "pool": 61, "prelud": [62, 81, 82, 83, 84, 85, 86, 87, 88, 89], "pretti": 21, "project": 27, "proof": [90, 91, 92, 93, 94, 95], "prooftrac": 47, "prove_rpc": 79, "pyk": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], "reachabl": 93, "repl": 70, "rewrit": 22, "rpc": 63, "rule": 64, "runtim": 52, "semant": 34, "show": [35, 94], "store": 36, "string": 88, "submodul": [1, 3, 7, 24, 29, 39, 42, 46, 55, 67, 69, 71, 81, 90, 96], "subpackag": [1, 42], "symbol": 5, "syntax": 65, "tabl": 99, "test": [96, 97], "tool": 66, "tui": [37, 95], "util": [23, 28, 41, 53, 80, 89, 98], "welcom": 99}}) \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 00000000000..4e2e33ddfff --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,711 @@ + + + + https://kframework.org/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/index.html + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/exports/K.pdf + 0.5 + 2024-12-17 + monthly + + + https://kframework.org/exports/K.epub + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/exports/K.mobi + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/exports/K.html + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/1_basic/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/1_basic/01_installing/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/1_basic/02_basics/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/1_basic/03_parsing/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/1_basic/04_disambiguation/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/1_basic/05_modules/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/1_basic/06_ints_and_bools/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/1_basic/07_side_conditions/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/1_basic/08_literate_programming/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/1_basic/09_unparsing/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/1_basic/10_strings/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/1_basic/11_casts/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/1_basic/12_syntactic_lists/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/1_basic/13_rewrite_rules/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/1_basic/14_evaluation_order/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/1_basic/15_configurations/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/1_basic/16_collections/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/1_basic/17_cell_multiplicity/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/1_basic/18_equality_and_conditionals/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/1_basic/19_debugging/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/1_basic/20_backends/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/1_basic/21_symbolic_execution/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/1_basic/22_proofs/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/2_intermediate/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/2_intermediate/01_macros/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/2_intermediate/02_fresh_constants/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/2_intermediate/03_klabels/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/2_intermediate/04_overloading/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/2_intermediate/05_matching_logic/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/2_intermediate/06_function_context/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/2_intermediate/07_record_productions/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/2_intermediate/08_fun_and_let/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/2_intermediate/09_as/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/2_intermediate/10_matching_operator/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/2_intermediate/11_evaluation_order/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/2_intermediate/12_floats_and_machine_ints/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/2_intermediate/13_substitution/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/2_intermediate/14_io/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/2_intermediate/15_string_buffers_and_bytes/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/2_intermediate/16_kore/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/2_intermediate/17_debugging_proofs/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/docs/user_manual/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/docs/cheat_sheet/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/docs/ktools/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/include/kframework/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/include/kframework/builtin/domains/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/include/kframework/builtin/kast/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/include/kframework/builtin/prelude/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/include/kframework/builtin/ffi/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/include/kframework/builtin/json/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/include/kframework/builtin/rat/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/include/kframework/builtin/substitution/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/overview/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/1_lambda/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/1_lambda/lesson_1/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/1_lambda/lesson_2/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/1_lambda/lesson_3/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/1_lambda/lesson_4/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/1_lambda/lesson_5/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/1_lambda/lesson_6/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/1_lambda/lesson_7/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/1_lambda/lesson_8/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/1_lambda/lesson_9/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/2_imp/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/2_imp/lesson_1/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/2_imp/lesson_2/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/2_imp/lesson_3/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/2_imp/lesson_4/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/2_imp/lesson_5/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/3_lambda++/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_1/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_2/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_3/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_4/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_5/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/3_lambda++/lesson_6/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/4_imp++/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/4_imp++/lesson_1/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/4_imp++/lesson_2/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/4_imp++/lesson_3/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/4_imp++/lesson_4/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/4_imp++/lesson_5/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/4_imp++/lesson_6/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/4_imp++/lesson_7/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/4_imp++/lesson_8/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/5_types/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/5_types/lesson_1/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/5_types/lesson_2/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/5_types/lesson_3/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/5_types/lesson_4/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/5_types/lesson_5/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/5_types/lesson_6/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/5_types/lesson_7/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/5_types/lesson_8/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/1_k/5_types/lesson_9/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/2_languages/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/2_languages/1_simple/1_untyped/simple-untyped/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/2_languages/1_simple/2_typed/1_static/simple-typed-static/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/2_languages/1_simple/2_typed/2_dynamic/simple-typed-dynamic/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/2_languages/2_kool/1_untyped/kool-untyped/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/2_languages/2_kool/2_typed/1_dynamic/kool-typed-dynamic/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/2_languages/2_kool/2_typed/2_static/kool-typed-static/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/2_languages/3_fun/1_untyped/1_environment/fun-untyped/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/2_languages/3_fun/1_untyped/2_substitution/fun-untyped/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/pl-tutorial/2_languages/4_logik/basic/logik/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/projects/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/editor_support/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/faq/ + 1.0 + 2024-12-17 + monthly + + + https://kframework.org/k-distribution/k-tutorial/3_advanced/ + 1.0 + 2024-12-17 + monthly + + \ No newline at end of file