diff --git a/.github/workflows/default.yml b/.github/workflows/default.yml new file mode 100644 index 0000000..dc56737 --- /dev/null +++ b/.github/workflows/default.yml @@ -0,0 +1,25 @@ +on: [pull_request] + +name: CI + +jobs: + all: + name: Normal Build, Test Suite, Test JS bindings, Rustfmt, Clippy + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + toolchain: stable + override: true + components: rustfmt, clippy + - uses: extractions/setup-just@v1 + - uses: jetli/wasm-pack-action@v0.3.0 + - uses: actions/setup-node@v1 + with: + node-version: '13.x' + - run: cargo build + - run: cargo test + - run: just js-test + - run: cargo clippy -- -D warnings + - run: cargo fmt --all -- --check diff --git a/Cargo.toml b/Cargo.toml index 1b73eba..c787dd8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rify" -version = "0.5.1" +version = "0.6.0" authors = [ "Andrew Dirksen ", "Sam Hellawell " @@ -15,18 +15,16 @@ documentation = "https://docs.rs/rify" readme = "README.md" repository = "https://github.com/docknetwork/rify" -[package.metadata.wasm-pack.profile.release] -wasm-opt = false - [lib] -crate-type = ["cdylib", "lib"] +crate-type = ["lib"] [dependencies] -wasm-bindgen = { version = "0.2", optional = true, features = ["serde-serialize"] } serde = { version = "1", optional = true, features = ["derive"] } -serde_json = { version = "1", optional = true } + +[dev-dependencies] +rify = { path = ".", features = [ "serde", "std" ] } # enables these features for tests +serde_json = "1" [features] -default = ["std", "js-library-wasm"] -js-library-wasm = ["wasm-bindgen", "serde", "serde_json"] +default = [] std = [] diff --git a/README.md b/README.md index abce2a4..e786ab0 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ Rify is a [forward chaining](https://en.wikipedia.org/wiki/Forward_chaining) [inference engine](https://en.wikipedia.org/wiki/Inference_engine) designed specifically for use on in-memory RDF graphs. -It accepts conjunctive rules with limited expressiveness so reasoning is bounded by `O(n^k)` in both memory and in computation where n is the number of nodes in the input RDF graph. +It accepts conjunctive rules with limited expressiveness so reasoning is bounded by `O(n^k)` in both memory and in computation where n is the number of nodes in the input RDF dataset. Reasoning generates a proof which can be used to quickly verify the result pragmatically. @@ -14,20 +14,38 @@ Logical rules are defined as if-then clauses. Something like this: ```rust struct Rule { - if_all: Vec<[Entity; 3]>, - then: Vec<[Entity; 3]>, + if_all: Vec<[Entity; 4]>, + then: Vec<[Entity; 4]>, } +// Notice it's `[Entity; 4]`, not `[Entity; 3]`. This reasoner operates on Rdf Quads, not triples. +// You can still reason over triples by binding a unique resource e.g. `RdfTerm::DefaultGraph` +// as the graph in all rules and all quads. + enum Entity { /// A a named variable with an unknown value. Unbound(String), - /// A literal RDF node. - Bound(RdfNode), + /// A literal with a constant value. + Bound(RdfTerm), } // actual definitions of these types are templated but this is the gist + +// You get to define this type! Here is an example definition. +enum RdfTerm { + Blank(usize), + Iri(String), + Literal { + datatype: String, + value: String, + lang_tag: Option, + }, + DefaultGraph, +} ``` +Rify doesn't care whether its input and output is valid rdf. For example, it will happily accept a quad whose predicate is a literal. https://www.w3.org/TR/rdf11-concepts/#section-triples + # Use Two functions are central to this library: `prove` and `validate`. @@ -44,25 +62,37 @@ use rify::{ // (?a, is, awesome) ∧ (?a, score, ?s) -> (?a score, awesome) let awesome_score_axiom = Rule::create( vec![ - [Unbound("a"), Bound("is"), Bound("awesome")], // if someone is awesome - [Unbound("a"), Bound("score"), Unbound("s")], // and they have some score + // if someone is awesome + [Unbound("a"), Bound("is"), Bound("awesome"), Bound("default_graph")], + // and they have some score + [Unbound("a"), Bound("score"), Unbound("s"), Bound("default_graph")], + ], + vec![ + // then they must have an awesome score + [Unbound("a"), Bound("score"), Bound("awesome"), Bound("default_graph")] ], - vec![[Unbound("a"), Bound("score"), Bound("awesome")]], // then they must have an awesome score -)?; +).expect("invalid rule"); assert_eq!( prove::<&str, &str>( - &[["you", "score", "unspecified"], ["you", "is", "awesome"]], - &[["you", "score", "awesome"]], + // list of already known facts (premises) + &[ + ["you", "score", "unspecified", "default_graph"], + ["you", "is", "awesome", "default_graph"] + ], + // the thing we want to prove + &[["you", "score", "awesome", "default_graph"]], + // ordered list of logical rules &[awesome_score_axiom] - )?, - &[ + ), + Ok(vec![ + // the desired statement is proven by instantiating the `awesome_score_axiom` // (you is awesome) ∧ (you score unspecified) -> (you score awesome) RuleApplication { rule_index: 0, instantiations: vec!["you", "unspecified"] } - ] + ]) ); ``` @@ -72,10 +102,17 @@ assert_eq!( use rify::{ prove, validate, Valid, Rule, RuleApplication, + Entity::{Bound, Unbound} }; -// (?a, is, awesome) ∧ (?a, score, ?s) -> (?a score, awesome) -let awesome_score_axiom = ...; +// same as above +let awesome_score_axiom = Rule::create( + vec![ + [Unbound("a"), Bound("is"), Bound("awesome"), Bound("default_graph")], + [Unbound("a"), Bound("score"), Unbound("s"), Bound("default_graph")], + ], + vec![[Unbound("a"), Bound("score"), Bound("awesome"), Bound("default_graph")]], +).expect("invalid rule"); let proof = vec![ // (you is awesome) ∧ (you score unspecified) -> (you score awesome) @@ -88,16 +125,16 @@ let proof = vec![ let Valid { assumed, implied } = validate::<&str, &str>( &[awesome_score_axiom], &proof, -).map_err(|e| format!("{:?}", e))?; +).expect("invalid proof"); -// Now we know that under the given rules, if all RDF triples in `assumed` are true, then all -// RDF triples in `implied` are also true. +// Now we know that under the given rules, if all quads in `assumed` are true, then all +// quads in `implied` are also true. ``` -# recipies +# Recipes In addition to normal cargo commands like `cargo test` and `cargo check` the `./justfile` -defines some scripts which can be useful during develompent. For example, `just js-test` will +defines some scripts which can be useful during development. For example, `just js-test` will test the javascript bindings to this library. See `./justfile` for more. # License diff --git a/bindings/js_wasm/.gitignore b/bindings/js_wasm/.gitignore new file mode 100644 index 0000000..0363452 --- /dev/null +++ b/bindings/js_wasm/.gitignore @@ -0,0 +1,5 @@ +/target +/pkg +/wasm-pack.log +/tmp +/Cargo.lock diff --git a/bindings/js_wasm/Cargo.toml b/bindings/js_wasm/Cargo.toml new file mode 100644 index 0000000..d6bb686 --- /dev/null +++ b/bindings/js_wasm/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "rify_js" +version = "0.0.1" +authors = [ + "Andrew Dirksen ", + "Sam Hellawell " +] +edition = "2018" +description = """ +RDF reasoner that operates on RIF-like conjunctive rules. Outputs a machine +readable proof of some claim which can be cheaply verified. +""" +license = "MIT OR Apache-2.0" +documentation = "https://docs.rs/rify" +readme = "README.md" +repository = "https://github.com/docknetwork/rify" + +[package.metadata.wasm-pack.profile.release] +wasm-opt = false + +[lib] +crate-type = ["cdylib"] + +[dependencies] +wasm-bindgen = { version = "0.2", features = ["serde-serialize"] } +serde = { version = "1", features = ["derive"] } +serde_json = "1" +rify = { path = "../..", features = ["serde"] } diff --git a/bindings/js_wasm/README.md b/bindings/js_wasm/README.md new file mode 100644 index 0000000..6bfc0a0 --- /dev/null +++ b/bindings/js_wasm/README.md @@ -0,0 +1,5 @@ +# Changelog + +## 0.6.0 + +Breaking change! Changed public methods to accept and return quads instead of triples. diff --git a/bindings_tests/rify_js/.gitignore b/bindings/js_wasm/binding_tests/.gitignore similarity index 100% rename from bindings_tests/rify_js/.gitignore rename to bindings/js_wasm/binding_tests/.gitignore diff --git a/bindings_tests/rify_js/.npmrc b/bindings/js_wasm/binding_tests/.npmrc similarity index 100% rename from bindings_tests/rify_js/.npmrc rename to bindings/js_wasm/binding_tests/.npmrc diff --git a/bindings_tests/rify_js/bootstrap.js b/bindings/js_wasm/binding_tests/bootstrap.js similarity index 100% rename from bindings_tests/rify_js/bootstrap.js rename to bindings/js_wasm/binding_tests/bootstrap.js diff --git a/bindings_tests/rify_js/package.json b/bindings/js_wasm/binding_tests/package.json similarity index 92% rename from bindings_tests/rify_js/package.json rename to bindings/js_wasm/binding_tests/package.json index 7835c5c..1b2de41 100644 --- a/bindings_tests/rify_js/package.json +++ b/bindings/js_wasm/binding_tests/package.json @@ -3,7 +3,7 @@ "version": "1.0.0", "license": "MIT", "dependencies": { - "rify": "../../pkg", + "rify": "../pkg", "chai": "^4.2.0" }, "devDependencies": { diff --git a/bindings_tests/rify_js/test.js b/bindings/js_wasm/binding_tests/test.js similarity index 64% rename from bindings_tests/rify_js/test.js rename to bindings/js_wasm/binding_tests/test.js index 5297fce..0ef5e9b 100644 --- a/bindings_tests/rify_js/test.js +++ b/bindings/js_wasm/binding_tests/test.js @@ -38,48 +38,48 @@ function e(str) { // a credential in Explicit Ethos form const CREDENTIAL_EE = [ - ["root_authority", "claims", "_:0"], - ["_:0", "subject", "root_authority"], - ["_:0", "predicate", "defersTo"], - ["_:0", "object", "issuer"], - ["issuer", "claims", "_:1"], - ["_:1", "subject", "bobert"], - ["_:1", "predicate", "mayPurchase"], - ["_:1", "object", "http://www.heppnetz.de/ontologies/vso/ns#Vehicle"], + ["root_authority", "claims", "_:0", "default_graph"], + ["_:0", "subject", "root_authority", "default_graph"], + ["_:0", "predicate", "defersTo", "default_graph"], + ["_:0", "object", "issuer", "default_graph"], + ["issuer", "claims", "_:1", "default_graph"], + ["_:1", "subject", "bobert", "default_graph"], + ["_:1", "predicate", "mayPurchase", "default_graph"], + ["_:1", "object", "http://www.heppnetz.de/ontologies/vso/ns#Vehicle", "default_graph"], ]; const RULES = [ // to claim deference is deference { if_all: [ - [a("super"), e("claims"), a("claim1")], - [a("claim1"), e("subject"), a("super")], - [a("claim1"), e("predicate"), e("defersTo")], - [a("claim1"), e("object"), a("minor")], + [a("super"), e("claims"), a("claim1"), e("default_graph")], + [a("claim1"), e("subject"), a("super"), e("default_graph")], + [a("claim1"), e("predicate"), e("defersTo"), e("default_graph")], + [a("claim1"), e("object"), a("minor"), e("default_graph")], ], then: [ - [a("super"), e("defersTo"), a("minor")], + [a("super"), e("defersTo"), a("minor"), e("default_graph")], ], }, // defered entity may make claims on behalf of the deferer { if_all: [ - [a("super"), e("defersTo"), a("minor")], - [a("minor"), e("claims"), a("claim1")], + [a("super"), e("defersTo"), a("minor"), e("default_graph")], + [a("minor"), e("claims"), a("claim1"), e("default_graph")], ], then: [ - [a("super"), e("claims"), a("claim1")], + [a("super"), e("claims"), a("claim1"), e("default_graph")], ], }, // the verifier trusts root_authority { if_all: [ - [e("root_authority"), e("claims"), a("c")], - [a("c"), e("subject"), a("s")], - [a("c"), e("predicate"), a("p")], - [a("c"), e("object"), a("o")], + [e("root_authority"), e("claims"), a("c"), e("default_graph")], + [a("c"), e("subject"), a("s"), e("default_graph")], + [a("c"), e("predicate"), a("p"), e("default_graph")], + [a("c"), e("object"), a("o"), e("default_graph")], ], then: [ - [a("s"), a("p"), a("o")], + [a("s"), a("p"), a("o"), e("default_graph")], ], } ]; @@ -93,7 +93,7 @@ tests([ ['The proof is the output of the theorem prover (DCK-69).', () => { // call `prove` then use the output of `prove` to verify the ruleset const composite_claims = [ - ["bobert", "mayPurchase", "http://www.heppnetz.de/ontologies/vso/ns#Vehicle"] + ["bobert", "mayPurchase", "http://www.heppnetz.de/ontologies/vso/ns#Vehicle", "default_graph"] ]; let proof = prove(CREDENTIAL_EE, composite_claims, RULES); expect(proof).to.deep.equal([ @@ -118,27 +118,29 @@ tests([ let valid = validate(RULES, proof); expect(valid).to.deep.equal({ assumed: [ - ['_:0', 'object', 'issuer'], - ['_:0', 'predicate', 'defersTo'], - ['_:0', 'subject', 'root_authority'], + ['_:0', 'object', 'issuer', 'default_graph'], + ['_:0', 'predicate', 'defersTo', 'default_graph'], + ['_:0', 'subject', 'root_authority', 'default_graph'], [ '_:1', 'object', - 'http://www.heppnetz.de/ontologies/vso/ns#Vehicle' + 'http://www.heppnetz.de/ontologies/vso/ns#Vehicle', + 'default_graph' ], - ['_:1', 'predicate', 'mayPurchase'], - ['_:1', 'subject', 'bobert'], - ['issuer', 'claims', '_:1'], - ['root_authority', 'claims', '_:0'] + ['_:1', 'predicate', 'mayPurchase', 'default_graph'], + ['_:1', 'subject', 'bobert', 'default_graph'], + ['issuer', 'claims', '_:1', 'default_graph'], + ['root_authority', 'claims', '_:0', 'default_graph'] ], implied: [ [ 'bobert', 'mayPurchase', - 'http://www.heppnetz.de/ontologies/vso/ns#Vehicle' + 'http://www.heppnetz.de/ontologies/vso/ns#Vehicle', + 'default_graph' ], - ['root_authority', 'claims', '_:1'], - ['root_authority', 'defersTo', 'issuer'] + ['root_authority', 'claims', '_:1', 'default_graph'], + ['root_authority', 'defersTo', 'issuer', 'default_graph'] ] }); }], @@ -167,19 +169,19 @@ tests([ // (?a, is, awesome) ∧ (?a, score, ?s) -> (?a score, awesome) let awesome_score_axiom = { if_all: [ - [{ Unbound: "a" }, { Bound: "is" }, { Bound: "awesome" }], - [{ Unbound: "a" }, { Bound: "score" }, { Unbound: "s" }], + [{ Unbound: "a" }, { Bound: "is" }, { Bound: "awesome" }, { Bound: "default_graph" }], + [{ Unbound: "a" }, { Bound: "score" }, { Unbound: "s" }, { Bound: "default_graph" }], ], then: [ - [{ Unbound: "a" }, { Bound: "score" }, { Bound: "awesome" }] + [{ Unbound: "a" }, { Bound: "score" }, { Bound: "awesome" }, { Bound: "default_graph" }] ], }; let proof = prove( [ - ["you", "score", "unspecified"], - ["you", "is", "awesome"], + ["you", "score", "unspecified", "default_graph"], + ["you", "is", "awesome", "default_graph"], ], - [["you", "score", "awesome"]], + [["you", "score", "awesome", "default_graph"]], [awesome_score_axiom], ); expect(proof).to.deep.equal([{ @@ -192,16 +194,16 @@ tests([ // (?a, is, awesome) ∧ (?a, score, ?s) -> (?a score, awesome) let awesome_score_axiom = { if_all: [ - [{ Unbound: "a" }, { Bound: "is" }, { Bound: "awesome" }], - [{ Unbound: "a" }, { Bound: "score" }, { Unbound: "s" }], + [{ Unbound: "a" }, { Bound: "is" }, { Bound: "awesome" }, { Bound: "default_graph" }], + [{ Unbound: "a" }, { Bound: "score" }, { Unbound: "s" }, { Bound: "default_graph" }], ], then: [ - [{ Unbound: "a" }, { Bound: "score" }, { Bound: "awesome" }] + [{ Unbound: "a" }, { Bound: "score" }, { Bound: "awesome" }, { Bound: "default_graph" }] ], }; let known_facts = [ - ["you", "score", "unspecified"], - ["you", "is", "awesome"], + ["you", "score", "unspecified", "default_graph"], + ["you", "is", "awesome", "default_graph"], ]; let valid = validate( [awesome_score_axiom], @@ -212,11 +214,11 @@ tests([ ); expect(valid).to.deep.equal({ assumed: [ - ["you", "is", "awesome"], - ["you", "score", "unspecified"], + ["you", "is", "awesome", "default_graph"], + ["you", "score", "unspecified", "default_graph"], ], implied: [ - ["you", "score", "awesome"], + ["you", "score", "awesome", "default_graph"], ] }); @@ -228,7 +230,7 @@ tests([ } // After verifying all the assumptions in the proof are true, we know that the - // triples in valid.implied are true with respect to the provided rules. + // quads in valid.implied are true with respect to the provided rules. }], ]); diff --git a/bindings_tests/rify_js/webpack.config.js b/bindings/js_wasm/binding_tests/webpack.config.js similarity index 100% rename from bindings_tests/rify_js/webpack.config.js rename to bindings/js_wasm/binding_tests/webpack.config.js diff --git a/bindings_tests/rify_js/yarn.lock b/bindings/js_wasm/binding_tests/yarn.lock similarity index 99% rename from bindings_tests/rify_js/yarn.lock rename to bindings/js_wasm/binding_tests/yarn.lock index 56eec4c..550d8e4 100644 --- a/bindings_tests/rify_js/yarn.lock +++ b/bindings/js_wasm/binding_tests/yarn.lock @@ -2090,8 +2090,8 @@ ret@~0.1.10: resolved "https://registry.yarnpkg.com/ret/-/ret-0.1.15.tgz#b8a4825d5bdb1fc3f6f53c2bc33f81388681c7bc" integrity sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg== -rify@../../pkg: - version "0.1.0" +rify@../pkg: + version "0.4.0" rimraf@^2.5.4, rimraf@^2.6.3: version "2.7.1" diff --git a/package.json b/bindings/js_wasm/package.json similarity index 97% rename from package.json rename to bindings/js_wasm/package.json index 3051c99..503c736 100644 --- a/package.json +++ b/bindings/js_wasm/package.json @@ -5,7 +5,7 @@ "Sam Hellawell " ], "description": "RDF reasoner that operates on RIF-like conjunctive rules. Outputs a machine\nreadable proof of some claim which can be cheaply verified.\n", - "version": "0.4.0", + "version": "0.6.0", "license": "MIT OR Apache-2.0", "repository": { "type": "git", diff --git a/src/lang_bindings/js_wasm_test.rs b/bindings/js_wasm/src/js_wasm_test.rs similarity index 85% rename from src/lang_bindings/js_wasm_test.rs rename to bindings/js_wasm/src/js_wasm_test.rs index 7f9ca5b..155129a 100644 --- a/src/lang_bindings/js_wasm_test.rs +++ b/bindings/js_wasm/src/js_wasm_test.rs @@ -1,18 +1,17 @@ -use super::js_wasm::Entity; -use super::js_wasm::RuleUnchecked; -use crate::prove::RuleApplication; -use crate::Claim; +use super::Entity; +use super::RuleUnchecked; +use rify::RuleApplication; use serde_json::json; type RulesInput = Vec; -type ClaimsInput = Vec>; +type ClaimsInput = Vec<[String; 4]>; type ProofInput = Vec>; /// this dummy function ensures the above types are up-to-date /// with the actual interface fn _types_correct(r: RulesInput, c: ClaimsInput, p: ProofInput) -> ProofInput { - super::js_wasm::validate_(r.clone(), p).unwrap(); - super::js_wasm::prove_(c.clone(), c, r).unwrap() + super::validate_(r.clone(), p).unwrap(); + super::prove_(c.clone(), c, r).unwrap() } #[test] @@ -22,13 +21,14 @@ fn ser_deser_rules() { Entity::Bound("a".into()), Entity::Bound("b".into()), Entity::Unbound("c".into()), + Entity::Bound("g".into()), ]], then: vec![], }]; let expected = json!([ { "if_all": [ - [{"Bound": "a"}, {"Bound": "b"}, {"Unbound": "c"}] + [{"Bound": "a"}, {"Bound": "b"}, {"Unbound": "c"}, {"Bound": "g"}] ], "then": [] } @@ -43,8 +43,8 @@ fn ser_deser_rules() { #[test] fn ser_deser_claims() { - let rules: ClaimsInput = vec![["a".into(), "b".into(), "c".into()]]; - let expected = json!([["a", "b", "c"]]); + let rules: ClaimsInput = vec![["a".into(), "b".into(), "c".into(), "g".into()]]; + let expected = json!([["a", "b", "c", "g"]]); assert_eq!(&serde_json::to_value(&rules).unwrap(), &expected); assert_eq!( diff --git a/src/lang_bindings/js_wasm.rs b/bindings/js_wasm/src/lib.rs similarity index 70% rename from src/lang_bindings/js_wasm.rs rename to bindings/js_wasm/src/lib.rs index ea61faf..491244a 100644 --- a/src/lang_bindings/js_wasm.rs +++ b/bindings/js_wasm/src/lib.rs @@ -1,8 +1,11 @@ extern crate wasm_bindgen; -use crate::rule::InvalidRule; -use crate::RuleApplication; -use crate::{Claim, Rule}; +#[cfg(test)] +mod js_wasm_test; + +use rify::InvalidRule; +use rify::Rule; +use rify::RuleApplication; use serde::de::DeserializeOwned; use wasm_bindgen::prelude::*; @@ -12,19 +15,19 @@ use wasm_bindgen::prelude::*; /// // (?a, is, awesome) ∧ (?a, score, ?s) -> (?a score, awesome) /// let awesome_score_axiom = { /// if_all: [ -/// [{Unbound: "a"}, {Bound: "is"}, {Bound: "awesome"}], -/// [{Unbound: "a"}, {Bound: "score"}, {Unbound: "s"}], +/// [{Unbound: "a"}, {Bound: "is"}, {Bound: "awesome"}, {Bound: "default_graph"}], +/// [{Unbound: "a"}, {Bound: "score"}, {Unbound: "s"}, {Bound: "default_graph"}], /// ], /// then: [ -/// [{Unbound: "a"}, {Bound: "score"}, {Bound: "awesome"}] +/// [{Unbound: "a"}, {Bound: "score"}, {Bound: "awesome"}, {Bound: "default_graph"}] /// ], /// }; /// let proof = prove( /// [ -/// ["you", "score", "unspecified"], -/// ["you", "is", "awesome"], +/// ["you", "score", "unspecified", "default_graph"], +/// ["you", "is", "awesome", "default_graph"], /// ], -/// [["you", "score", "awesome"]], +/// [["you", "score", "awesome", "default_graph"]], /// [awesome_score_axiom], /// ); /// expect(proof).to.deep.equal([{ @@ -39,20 +42,21 @@ pub fn prove( rules: Box<[JsValue]>, ) -> Result, JsValue> { let proof = prove_( - deser_list(premises)?, - deser_list(to_prove)?, - deser_list(rules)?, + deser_list(&premises)?, + deser_list(&to_prove)?, + deser_list(&rules)?, )?; Ok(ser_list(&proof)) } -pub(super) fn prove_( - premises: Vec>, - to_prove: Vec>, +pub fn prove_( + premises: Vec<[String; 4]>, + to_prove: Vec<[String; 4]>, rules: Vec, ) -> Result>, Error> { let rules = RuleUnchecked::check_all(rules)?; - let proof = crate::prove(&premises, &to_prove, &rules).map_err(Into::::into)?; + let proof = + rify::prove::(&premises, &to_prove, &rules).map_err(Into::::into)?; Ok(proof) } @@ -77,16 +81,16 @@ pub(super) fn prove_( /// // (?a, is, awesome) ∧ (?a, score, ?s) -> (?a score, awesome) /// let awesome_score_axiom = { /// if_all: [ -/// [{ Unbound: "a" }, { Bound: "is" }, { Bound: "awesome" }], -/// [{ Unbound: "a" }, { Bound: "score" }, { Unbound: "s" }], +/// [{ Unbound: "a" }, { Bound: "is" }, { Bound: "awesome" }, { Bound: "default_graph" }], +/// [{ Unbound: "a" }, { Bound: "score" }, { Unbound: "s" }, { Bound: "default_graph" }], /// ], /// then: [ -/// [{ Unbound: "a" }, { Bound: "score" }, { Bound: "awesome" }] +/// [{ Unbound: "a" }, { Bound: "score" }, { Bound: "awesome" }, { Bound: "default_graph" }] /// ], /// }; /// let known_facts = [ -/// ["you", "score", "unspecified"], -/// ["you", "is", "awesome"], +/// ["you", "score", "unspecified", "default_graph"], +/// ["you", "is", "awesome", "default_graph"], /// ]; /// let valid = validate( /// [awesome_score_axiom], @@ -97,11 +101,11 @@ pub(super) fn prove_( /// ); /// expect(valid).to.deep.equal({ /// assumed: [ -/// ["you", "is", "awesome"], -/// ["you", "score", "unspecified"], +/// ["you", "is", "awesome", "default_graph"], +/// ["you", "score", "unspecified", "default_graph"], /// ], /// implied: [ -/// ["you", "score", "awesome"], +/// ["you", "score", "awesome", "default_graph"], /// ] /// }); /// @@ -113,34 +117,34 @@ pub(super) fn prove_( /// } /// /// // After verifying all the assumptions in the proof are true, we know that the -/// // triples in valid.implied are true with respect to the provided rules. +/// // quads in valid.implied are true with respect to the provided rules. /// ``` #[wasm_bindgen] pub fn validate(rules: Box<[JsValue]>, proof: Box<[JsValue]>) -> Result { - let valid = validate_(deser_list(rules)?, deser_list(proof)?)?; + let valid = validate_(deser_list(&rules)?, deser_list(&proof)?)?; Ok(ser(&valid)) } -pub(super) fn validate_( +pub fn validate_( rules: Vec, proof: Vec>, -) -> Result, Error> { +) -> Result, Error> { let rules = RuleUnchecked::check_all(rules)?; - let valid = crate::validate::validate(&rules, &proof)?; + let valid = rify::validate::(&rules, &proof)?; Ok(valid) } #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, serde::Serialize, serde::Deserialize)] -pub(super) enum Entity { +pub enum Entity { Unbound(String), Bound(String), } -impl From for crate::Entity { +impl From for rify::Entity { fn from(ent: Entity) -> Self { match ent { - Entity::Unbound(unbound) => crate::Entity::Unbound(unbound), - Entity::Bound(bound) => crate::Entity::Bound(bound), + Entity::Unbound(unbound) => rify::Entity::Unbound(unbound), + Entity::Bound(bound) => rify::Entity::Bound(bound), } } } @@ -149,8 +153,8 @@ impl From for crate::Entity { pub enum Error { InputTypo, InvalidRule(InvalidRule), - CantProve(crate::prove::CantProve), - InvalidProof(crate::validate::Invalid), + CantProve(rify::CantProve), + InvalidProof(rify::Invalid), } impl From for JsValue { @@ -171,20 +175,20 @@ impl From for Error { } } -impl From for Error { - fn from(other: crate::prove::CantProve) -> Self { +impl From for Error { + fn from(other: rify::CantProve) -> Self { Error::CantProve(other) } } -impl From for Error { - fn from(other: crate::validate::Invalid) -> Self { +impl From for Error { + fn from(other: rify::Invalid) -> Self { Error::InvalidProof(other) } } /// Deserialize a list of js values into a list of rust values -fn deser_list(a: Box<[JsValue]>) -> Result, Error> { +fn deser_list(a: &[JsValue]) -> Result, Error> { a.iter().map(|b| deser::(b)).collect() } @@ -214,8 +218,8 @@ fn ser(a: &T) -> JsValue { #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, serde::Serialize, serde::Deserialize)] pub struct RuleUnchecked { - pub(super) if_all: Vec>, - pub(super) then: Vec>, + pub if_all: Vec<[Entity; 4]>, + pub then: Vec<[Entity; 4]>, } impl RuleUnchecked { @@ -231,7 +235,7 @@ impl RuleUnchecked { } } -fn convert_claim>(c: Claim) -> Claim { - let [s, p, o] = c; - [s.into(), p.into(), o.into()] +fn convert_claim>(claim: [A; 4]) -> [T; 4] { + let [s, p, o, g] = claim; + [s.into(), p.into(), o.into(), g.into()] } diff --git a/justfile b/justfile index afbaf59..301281a 100644 --- a/justfile +++ b/justfile @@ -2,6 +2,8 @@ # build wasm and js bindings js: + #!/usr/bin/env bash + cd bindings/js_wasm rm -rf pkg wasm-pack build --target nodejs --out-dir pkg --out-name index wasm-pack build --target bundler --out-dir pkg --out-name index_bundle @@ -9,13 +11,17 @@ js: # install js test depenedenicies, requires yarn js-test-init: - cd bindings_tests/rify_js; yarn + #!/usr/bin/env bash + cd bindings/js_wasm/binding_tests + yarn # run js tests but assume `js-test-init` and `js` were already run js-test-light: - cd bindings_tests/rify_js; yarn test + #!/usr/bin/env bash + cd bindings/js_wasm/binding_tests + yarn test -# run js tests +# run js tests (called from ci) js-test: just js just js-test-init @@ -24,10 +30,10 @@ js-test: # remove dist and node_modules from js bindings tests clean: cargo clean - rm -r pkg || true + rm -r bindings/js_wasm/pkg || true just clean-js # remove artifacts from js bindings tests clean-js: - rm -r bindings_tests/rify_js/dist || true - rm -r bindings_tests/rify_js/node_modules || true + rm -r bindings/js_wasm/binding_tests/dist || true + rm -r bindings/js_wasm/binding_tests/node_modules || true diff --git a/src/common.rs b/src/common.rs index 242d80b..c632b63 100644 --- a/src/common.rs +++ b/src/common.rs @@ -9,26 +9,28 @@ /// assert_eq!(inc(&mut a), 2); /// assert_eq!(a, 3); /// ``` -pub fn inc(a: &mut u32) -> u32 { - *a += 1; - *a - 1 +pub fn inc(a: &mut T) -> T +where + T: core::ops::AddAssign + Clone, + u8: Into, +{ + let ret = a.clone(); + *a += 1u8.into(); + ret } #[cfg(test)] mod test_util { - use crate::rule::Entity; - pub use crate::rule::Entity::{Bound, Unbound}; - use crate::rule::Rule; - use crate::Claim; use core::fmt::Debug; pub fn decl_rules( - rs: &[[&[Claim>]; 2]], - ) -> Vec> { + rs: &[[&[[crate::rule::Entity; 4]]; 2]], + ) -> Vec> { rs.iter() - .map(|[ifa, then]| Rule::create(ifa.to_vec(), then.to_vec()).unwrap()) + .map(|[ifa, then]| crate::rule::Rule::create(ifa.to_vec(), then.to_vec()).unwrap()) .collect() } } + #[cfg(test)] pub use test_util::*; diff --git a/src/lang_bindings/mod.rs b/src/lang_bindings/mod.rs deleted file mode 100644 index 5027304..0000000 --- a/src/lang_bindings/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -#[cfg(feature = "js-library-wasm")] -pub mod js_wasm; - -#[cfg(test)] -mod js_wasm_test; - -// In the future we may export other sorts of bindings e.g. using pyo3. diff --git a/src/lib.rs b/src/lib.rs index 4b84ce1..1ad3c5a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,7 +2,6 @@ extern crate alloc; extern crate core; mod common; -pub mod lang_bindings; mod mapstack; mod prove; mod reasoner; @@ -11,8 +10,18 @@ mod translator; mod validate; mod vecset; -pub use prove::{prove, RuleApplication}; +pub use prove::{prove, CantProve, RuleApplication}; pub use rule::{Entity, InvalidRule, Rule}; pub use validate::{validate, Invalid, Valid}; -pub type Claim = [T; 3]; +#[cfg(doctest)] +mod test_readme { + macro_rules! external_doc_test { + ($x:expr) => { + #[doc = $x] + extern {} + }; + } + + external_doc_test!(include_str!("../README.md")); +} diff --git a/src/prove.rs b/src/prove.rs index 85424db..2f85bda 100644 --- a/src/prove.rs +++ b/src/prove.rs @@ -1,9 +1,7 @@ -use crate::reasoner::{Triple, TripleStore}; +use crate::reasoner::{Quad, Reasoner}; use crate::rule::{Entity, LowRule, Rule}; use crate::translator::Translator; -use crate::Claim; use alloc::collections::{BTreeMap, BTreeSet}; -use core::convert::TryInto; use core::fmt::{Debug, Display}; /// Locate a proof of some composite claims given the provied premises and rules. @@ -19,19 +17,31 @@ use core::fmt::{Debug, Display}; /// // (?a, is, awesome) ∧ (?a, score, ?s) -> (?a score, awesome) /// let awesome_score_axiom = Rule::create( /// vec![ -/// [Unbound("a"), Bound("is"), Bound("awesome")], // if someone is awesome -/// [Unbound("a"), Bound("score"), Unbound("s")], // and they have some score +/// // if someone is awesome +/// [Unbound("a"), Bound("is"), Bound("awesome"), Bound("default_graph")], +/// // and they have some score +/// [Unbound("a"), Bound("score"), Unbound("s"), Bound("default_graph")], +/// ], +/// vec![ +/// // then they must have an awesome score +/// [Unbound("a"), Bound("score"), Bound("awesome"), Bound("default_graph")] /// ], -/// vec![[Unbound("a"), Bound("score"), Bound("awesome")]], // then they must have an awesome score /// )?; /// /// assert_eq!( /// prove::<&str, &str>( -/// &[["you", "score", "unspecified"], ["you", "is", "awesome"]], -/// &[["you", "score", "awesome"]], +/// // list of already known facts (premises) +/// &[ +/// ["you", "score", "unspecified", "default_graph"], +/// ["you", "is", "awesome", "default_graph"] +/// ], +/// // the thing we want to prove +/// &[["you", "score", "awesome", "default_graph"]], +/// // ordered list of logical rules /// &[awesome_score_axiom] /// )?, /// &[ +/// // the desired statement is be proven by instatiating the `awesome_score_axiom` /// // (you is awesome) ∧ (you score unspecified) -> (you score awesome) /// RuleApplication { /// rule_index: 0, @@ -43,8 +53,8 @@ use core::fmt::{Debug, Display}; /// # } /// ``` pub fn prove<'a, Unbound: Ord + Clone, Bound: Ord + Clone>( - premises: &'a [Claim], - to_prove: &'a [Claim], + premises: &'a [[Bound; 4]], + to_prove: &'a [[Bound; 4]], rules: &'a [Rule], ) -> Result>, CantProve> { let tran: Translator = rules @@ -53,15 +63,19 @@ pub fn prove<'a, Unbound: Ord + Clone, Bound: Ord + Clone>( .chain(premises.iter().flatten()) .cloned() .collect(); - let as_raw = |[s, p, o]: &Claim| -> Option { - Some(Triple::from_tuple( - tran.forward(&s)?, - tran.forward(&p)?, - tran.forward(&o)?, - )) + let as_raw = |[s, p, o, g]: &[Bound; 4]| -> Option { + Some( + [ + tran.forward(&s)?, + tran.forward(&p)?, + tran.forward(&o)?, + tran.forward(&g)?, + ] + .into(), + ) }; - let lpremises: Vec = premises.iter().map(|spo| as_raw(spo).unwrap()).collect(); - let lto_prove: Vec = to_prove + let lpremises: Vec = premises.iter().map(|spo| as_raw(spo).unwrap()).collect(); + let lto_prove: Vec = to_prove .iter() .map(as_raw) .collect::>() @@ -84,41 +98,43 @@ pub fn prove<'a, Unbound: Ord + Clone, Bound: Ord + Clone>( } fn low_prove( - premises: &[Triple], - to_prove: &[Triple], + premises: &[Quad], + to_prove: &[Quad], rules: &[LowRule], ) -> Result, CantProve> { - let mut ts = TripleStore::new(); + let mut rs = Reasoner::default(); for prem in premises { - ts.insert(prem.clone()); + rs.insert(prem.clone()); } - // statement (Triple) is proved by applying rule (LowRuleApplication) - let mut arguments: BTreeMap = BTreeMap::new(); + // statement (Quad) is proved by applying rule (LowRuleApplication) + let mut arguments: BTreeMap = BTreeMap::new(); // reason loop { - if to_prove.iter().all(|tp| ts.contains(tp)) { + if to_prove.iter().all(|tp| rs.contains(tp)) { break; } - let mut to_add = BTreeSet::::new(); + let mut to_add = BTreeSet::::new(); for (rule_index, rr) in rules.iter().enumerate() { - ts.apply(&mut rr.if_all.clone(), &mut rr.inst.clone(), &mut |inst| { + rs.apply(&mut rr.if_all.clone(), &mut rr.inst.clone(), &mut |inst| { let ins = inst.as_ref(); for implied in &rr.then { - let new_triple = Triple::from_tuple( - ins[&implied.subject.0], - ins[&implied.property.0], - ins[&implied.object.0], - ); - if !ts.contains(&new_triple) { + let new_quad = [ + ins[&implied.s.0], + ins[&implied.p.0], + ins[&implied.o.0], + ins[&implied.g.0], + ] + .into(); + if !rs.contains(&new_quad) { arguments - .entry(new_triple.clone()) + .entry(new_quad.clone()) .or_insert_with(|| LowRuleApplication { rule_index, instantiations: ins.clone(), }); - to_add.insert(new_triple); + to_add.insert(new_quad); } } }); @@ -127,11 +143,11 @@ fn low_prove( break; } for new in to_add.into_iter() { - ts.insert(new); + rs.insert(new); } } - if !to_prove.iter().all(|tp| ts.contains(tp)) { + if !to_prove.iter().all(|tp| rs.contains(tp)) { return Err(CantProve::ExhaustedSearchSpace); } @@ -142,40 +158,38 @@ fn low_prove( Ok(ret) } -// TODO, this fuction is not tail recursive -/// As this function populates the output. It removes correspond arguments from the input. +// this function is not tail recursive +/// As this function populates the output. It removes corresponding arguments from the input. /// The reason being that a single argument does not need to be proved twice. Once is is /// proved, it can be treated as a premise. -fn recall_proof<'a>( - // globally scoped triple to prove - to_prove: &Triple, - arguments: &mut BTreeMap, +fn recall_proof( + // the globally scoped quad for which to find arguments + to_prove: &Quad, + arguments: &mut BTreeMap, rules: &[LowRule], outp: &mut Vec, ) { - let to_global_scope = |rra: &LowRuleApplication, locally_scoped_entity: u32| -> u32 { - let concrete = rules[rra.rule_index] - .inst - .as_ref() - .get(&locally_scoped_entity); - let found = rra.instantiations.get(&locally_scoped_entity); - debug_assert!(if let (Some(c), Some(f)) = (concrete, found) { - c == f - } else { - true - }); + let to_global_scope = |rra: &LowRuleApplication, locally_scoped: usize| -> usize { + let concrete = rules[rra.rule_index].inst.as_ref().get(&locally_scoped); + let found = rra.instantiations.get(&locally_scoped); + if let (Some(c), Some(f)) = (concrete, found) { + debug_assert_eq!(c, f); + } *concrete.or(found).unwrap() }; if let Some(application) = arguments.remove(to_prove) { // for every required sub-statement, recurse - for triple in &rules[application.rule_index].if_all { + for quad in &rules[application.rule_index].if_all { + let Quad { s, p, o, g } = quad; recall_proof( - &Triple::from_tuple( - to_global_scope(&application, triple.subject.0), - to_global_scope(&application, triple.property.0), - to_global_scope(&application, triple.object.0), - ), + &[ + to_global_scope(&application, s.0), + to_global_scope(&application, p.0), + to_global_scope(&application, o.0), + to_global_scope(&application, g.0), + ] + .into(), arguments, rules, outp, @@ -208,7 +222,7 @@ impl std::error::Error for CantProve {} #[derive(Clone, Debug, PartialEq, Eq)] struct LowRuleApplication { rule_index: usize, - instantiations: BTreeMap, + instantiations: BTreeMap, } impl LowRuleApplication { @@ -226,16 +240,16 @@ impl LowRuleApplication { let mut instantiations = Vec::new(); // unbound_human -> unbound_local - let uhul: BTreeMap<&Unbound, u32> = original_rule + let uhul: BTreeMap<&Unbound, usize> = original_rule .cononical_unbound() .enumerate() - .map(|(a, b)| (b, a.try_into().unwrap())) + .map(|(i, a)| (a, i)) .collect(); for unbound_human in original_rule.cononical_unbound() { - let unbound_local = uhul[unbound_human]; - let bound_global = self.instantiations[&unbound_local]; - let bound_human = trans.back(bound_global).unwrap(); + let unbound_local: usize = uhul[unbound_human]; + let bound_global: usize = self.instantiations[&unbound_local]; + let bound_human: &Bound = trans.back(bound_global).unwrap(); instantiations.push(bound_human.clone()); } @@ -259,14 +273,14 @@ impl RuleApplication { pub(crate) fn assumptions_when_applied<'a, Unbound: Ord + Clone>( &'a self, rule: &'a Rule, - ) -> Result> + 'a, BadRuleApplication> { + ) -> Result + 'a, BadRuleApplication> { self.bind_claims(rule, rule.if_all()) } pub(crate) fn implications_when_applied<'a, Unbound: Ord + Clone>( &'a self, rule: &'a Rule, - ) -> Result> + 'a, BadRuleApplication> { + ) -> Result + 'a, BadRuleApplication> { self.bind_claims(rule, rule.then()) } @@ -274,8 +288,8 @@ impl RuleApplication { fn bind_claims<'a, Unbound: Ord + Clone>( &'a self, rule: &'a Rule, - claims: &'a [Claim>], - ) -> Result> + 'a, BadRuleApplication> { + claims: &'a [[Entity; 4]], + ) -> Result + 'a, BadRuleApplication> { let cannon: BTreeMap<&Unbound, usize> = rule .cononical_unbound() .enumerate() @@ -296,14 +310,15 @@ impl RuleApplication { /// panics if an unbound entity is not registered in map /// panics if the canonical index of unbound (according to map) is too large to index instanitations fn bind_claim( - [s, p, o]: Claim>, + [s, p, o, g]: [Entity; 4], map: &BTreeMap<&Unbound, usize>, instanitations: &[Bound], -) -> Claim { +) -> [Bound; 4] { [ bind_entity(s, map, instanitations), bind_entity(p, map, instanitations), bind_entity(o, map, instanitations), + bind_entity(g, map, instanitations), ] } @@ -329,15 +344,17 @@ pub struct BadRuleApplication; #[cfg(test)] mod test { use super::*; - use crate::common::{decl_rules, inc}; - use crate::rule::Entity::{Bound, Unbound}; + use crate::common::decl_rules; + use crate::common::inc; + use crate::rule::Entity::{Bound as B, Unbound as U}; use crate::validate::validate; use crate::validate::Valid; #[test] fn novel_name() { assert_eq!( - prove::<&str, &str>(&[], &[["andrew", "score", "awesome"]], &[]).unwrap_err(), + prove::<&str, &str>(&[], &[["andrew", "score", "awesome", "default_graph"]], &[]) + .unwrap_err(), CantProve::NovelName ); } @@ -346,29 +363,29 @@ mod test { fn search_space_exhausted() { let err = prove::<&str, &str>( &[ - ["score", "score", "score"], - ["andrew", "andrew", "andrew"], - ["awesome", "awesome", "awesome"], + ["score", "score", "score", "default_graph"], + ["andrew", "andrew", "andrew", "default_graph"], + ["awesome", "awesome", "awesome", "default_graph"], ], - &[["andrew", "score", "awesome"]], + &[["andrew", "score", "awesome", "default_graph"]], &[], ) .unwrap_err(); assert_eq!(err, CantProve::ExhaustedSearchSpace); let err = prove::<&str, &str>( &[ - ["score", "score", "score"], - ["andrew", "andrew", "andrew"], - ["awesome", "awesome", "awesome"], - ["backflip", "backflip", "backflip"], - ["ability", "ability", "ability"], + ["score", "score", "score", "default_graph"], + ["andrew", "andrew", "andrew", "default_graph"], + ["awesome", "awesome", "awesome", "default_graph"], + ["backflip", "backflip", "backflip", "default_graph"], + ["ability", "ability", "ability", "default_graph"], ], - &[["andrew", "score", "awesome"]], + &[["andrew", "score", "awesome", "default_graph"]], &[ Rule::create(vec![], vec![]).unwrap(), Rule::create( - vec![[Unbound("a"), Bound("ability"), Bound("backflip")]], - vec![[Unbound("a"), Bound("score"), Bound("awesome")]], + vec![[U("a"), B("ability"), B("backflip"), U("g")]], + vec![[U("a"), B("score"), B("awesome"), U("g")]], ) .unwrap(), ], @@ -381,8 +398,8 @@ mod test { fn prove_already_stated() { assert_eq!( prove::<&str, &str>( - &[["doggo", "score", "11"]], - &[["doggo", "score", "11"]], + &[["doggo", "score", "11", "default_graph"]], + &[["doggo", "score", "11", "default_graph"]], &[] ) .unwrap(), @@ -396,16 +413,19 @@ mod test { // (?boi, is, awesome) ∧ (?boi, score, ?s) -> (?boi score, awesome) let awesome_score_axiom = Rule::create( vec![ - [Unbound("boi"), Bound("is"), Bound("awesome")], // if someone is awesome - [Unbound("boi"), Bound("score"), Unbound("s")], // and they have some score + [U("boi"), B("is"), B("awesome"), U("g")], // if someone is awesome + [U("boi"), B("score"), U("s"), U("g")], // and they have some score ], - vec![[Unbound("boi"), Bound("score"), Bound("awesome")]], // then they must have an awesome score + vec![[U("boi"), B("score"), B("awesome"), U("g")]], // then they must have an awesome score ) .unwrap(); assert_eq!( prove::<&str, &str>( - &[["you", "score", "unspecified"], ["you", "is", "awesome"]], - &[["you", "score", "awesome"]], + &[ + ["you", "score", "unspecified", "default_graph"], + ["you", "is", "awesome", "default_graph"] + ], + &[["you", "score", "awesome", "default_graph"]], &[awesome_score_axiom] ) .unwrap(), @@ -413,12 +433,73 @@ mod test { // (you is awesome) ∧ (you score unspecified) -> (you score awesome) RuleApplication { rule_index: 0, - instantiations: vec!["you", "unspecified"] + instantiations: vec!["you", "default_graph", "unspecified"] } ] ); } + #[test] + /// rules with a single unbound graphname cannot be applied accross graphnames + fn graph_separation() { + let awesome_score_axiom = Rule::create( + vec![ + [U("boi"), B("is"), B("awesome"), U("g")], + [U("boi"), B("score"), U("s"), U("g")], + ], + vec![[U("boi"), B("score"), B("awesome"), U("g")]], + ) + .unwrap(); + + prove::<&str, &str>( + &[ + ["you", "score", "unspecified", "default_graph"], + ["you", "is", "awesome", "default_graph"], + ], + &[["you", "score", "awesome", "default_graph"]], + &[awesome_score_axiom.clone()], + ) + .unwrap(); + assert_eq!( + prove( + &[ + ["you", "score", "unspecified", "default_graph"], + ["you", "is", "awesome", "other_graph"] + ], + &[["you", "score", "awesome", "default_graph"]], + &[awesome_score_axiom.clone()] + ) + .unwrap_err(), + CantProve::ExhaustedSearchSpace + ); + assert_eq!( + prove( + &[ + ["you", "score", "unspecified", "default_graph"], + ["you", "is", "awesome", "other_graph"] + ], + &[["you", "score", "awesome", "other_graph"]], + &[awesome_score_axiom.clone()] + ) + .unwrap_err(), + CantProve::ExhaustedSearchSpace + ); + assert_eq!( + prove( + &[ + ["you", "score", "unspecified", "default_graph"], + ["you", "is", "awesome", "default_graph"], + // to prevent NovelName error: + ["other_graph", "other_graph", "other_graph", "other_graph"], + ], + &[["you", "score", "awesome", "other_graph"]], + &[awesome_score_axiom.clone()] + ) + .unwrap_err(), + CantProve::ExhaustedSearchSpace + ); + } + #[test] fn prove_multi_step() { // Rules: @@ -441,38 +522,42 @@ mod test { // (soyoung is awesome) // (nick is awesome) + // the following rules operate only on the defualt graph let rules: Vec> = { - let ru: &[[&[Claim>]; 2]] = &[ + let ru: &[[&[[Entity<&str, &str>; 4]]; 2]] = &[ [ &[ - [Bound("andrew"), Bound("claims"), Unbound("c")], - [Unbound("c"), Bound("subject"), Unbound("s")], - [Unbound("c"), Bound("property"), Unbound("p")], - [Unbound("c"), Bound("object"), Unbound("o")], + [B("andrew"), B("claims"), U("c"), B("default_graph")], + [U("c"), B("subject"), U("s"), B("default_graph")], + [U("c"), B("property"), U("p"), B("default_graph")], + [U("c"), B("object"), U("o"), B("default_graph")], ], - &[[Unbound("s"), Unbound("p"), Unbound("o")]], + &[[U("s"), U("p"), U("o"), B("default_graph")]], ], [ &[ - [Unbound("person_a"), Bound("is"), Bound("awesome")], + [U("person_a"), B("is"), B("awesome"), B("default_graph")], [ - Unbound("person_a"), - Bound("friendswith"), - Unbound("person_b"), + U("person_a"), + B("friendswith"), + U("person_b"), + B("default_graph"), ], ], - &[[Unbound("person_b"), Bound("is"), Bound("awesome")]], + &[[U("person_b"), B("is"), B("awesome"), B("default_graph")]], ], [ &[[ - Unbound("person_a"), - Bound("friendswith"), - Unbound("person_b"), + U("person_a"), + B("friendswith"), + U("person_b"), + B("default_graph"), ]], &[[ - Unbound("person_b"), - Bound("friendswith"), - Unbound("person_a"), + U("person_b"), + B("friendswith"), + U("person_a"), + B("default_graph"), ]], ], ]; @@ -480,19 +565,21 @@ mod test { .map(|[ifa, then]| Rule::create(ifa.to_vec(), then.to_vec()).unwrap()) .collect() }; - let facts: &[Claim<&str>] = &[ - ["soyoung", "friendswith", "nick"], - ["nick", "friendswith", "elina"], - ["elina", "friendswith", "sam"], - ["sam", "friendswith", "fausto"], - ["fausto", "friendswith", "lovesh"], - ["andrew", "claims", "_:claim1"], - ["_:claim1", "subject", "lovesh"], - ["_:claim1", "property", "is"], - ["_:claim1", "object", "awesome"], + let facts: &[[&str; 4]] = &[ + ["soyoung", "friendswith", "nick", "default_graph"], + ["nick", "friendswith", "elina", "default_graph"], + ["elina", "friendswith", "sam", "default_graph"], + ["sam", "friendswith", "fausto", "default_graph"], + ["fausto", "friendswith", "lovesh", "default_graph"], + ["andrew", "claims", "_:claim1", "default_graph"], + ["_:claim1", "subject", "lovesh", "default_graph"], + ["_:claim1", "property", "is", "default_graph"], + ["_:claim1", "object", "awesome", "default_graph"], + ]; + let composite_claims: &[[&str; 4]] = &[ + ["soyoung", "is", "awesome", "default_graph"], + ["nick", "is", "awesome", "default_graph"], ]; - let composite_claims: &[Claim<&str>] = - &[["soyoung", "is", "awesome"], ["nick", "is", "awesome"]]; let expected_proof: Vec> = { let ep: &[(usize, &[&str])] = &[ (0, &["_:claim1", "lovesh", "is", "awesome"]), @@ -545,33 +632,35 @@ mod test { let mut next_uniq = 0u32; let parent = inc(&mut next_uniq); let ancestor = inc(&mut next_uniq); + let default_graph = inc(&mut next_uniq); let nodes: Vec = (0usize..10).map(|_| inc(&mut next_uniq)).collect(); - let facts: Vec> = nodes + let facts: Vec<[u32; 4]> = nodes .iter() .zip(nodes.iter().cycle().skip(1)) - .map(|(a, b)| [*a, parent, *b]) + .map(|(a, b)| [*a, parent, *b, default_graph]) .collect(); let rules = decl_rules(&[ [ - &[[Unbound("a"), Bound(parent), Unbound("b")]], - &[[Unbound("a"), Bound(ancestor), Unbound("b")]], + &[[U("a"), B(parent), U("b"), B(default_graph)]], + &[[U("a"), B(ancestor), U("b"), B(default_graph)]], ], [ &[ - [Unbound("a"), Bound(ancestor), Unbound("b")], - [Unbound("b"), Bound(ancestor), Unbound("c")], + [U("a"), B(ancestor), U("b"), B(default_graph)], + [U("b"), B(ancestor), U("c"), B(default_graph)], ], - &[[Unbound("a"), Bound(ancestor), Unbound("c")]], + &[[U("a"), B(ancestor), U("c"), B(default_graph)]], ], ]); let composite_claims = vec![ - [nodes[0], ancestor, *nodes.last().unwrap()], - [*nodes.last().unwrap(), ancestor, nodes[0]], - [nodes[0], ancestor, nodes[0]], - [nodes[0], parent, nodes[1]], // (first node, parent, second node) is a premise + [nodes[0], ancestor, *nodes.last().unwrap(), default_graph], + [*nodes.last().unwrap(), ancestor, nodes[0], default_graph], + [nodes[0], ancestor, nodes[0], default_graph], + // (first node, parent, second node) is a premise + [nodes[0], parent, nodes[1], default_graph], ]; let proof = prove::<&str, u32>(&facts, &composite_claims, &rules).unwrap(); - let Valid { assumed, implied } = validate(&rules, &proof).unwrap(); + let Valid { assumed, implied } = validate::<&str, u32>(&rules, &proof).unwrap(); assert_eq!( &assumed, &facts.iter().cloned().collect(), @@ -591,27 +680,29 @@ mod test { #[test] fn no_proof_is_generated_for_facts() { - let facts: Vec> = vec![ - ["tacos", "are", "tasty"], - ["nachos", "are", "tasty"], - ["nachos", "are", "food"], + let facts: Vec<[&str; 4]> = vec![ + ["tacos", "are", "tasty", "default_graph"], + ["nachos", "are", "tasty", "default_graph"], + ["nachos", "are", "food", "default_graph"], ]; let rules = decl_rules::<&str, &str>(&[[ - &[[Bound("nachos"), Bound("are"), Bound("tasty")]], - &[[Bound("nachos"), Bound("are"), Bound("food")]], + &[[B("nachos"), B("are"), B("tasty"), B("default_graph")]], + &[[B("nachos"), B("are"), B("food"), B("default_graph")]], ]]); - let composite_claims = vec![["nachos", "are", "food"]]; - let proof = prove(&facts, &composite_claims, &rules).unwrap(); + let composite_claims = vec![["nachos", "are", "food", "default_graph"]]; + let proof = prove::<&str, &str>(&facts, &composite_claims, &rules).unwrap(); assert_eq!(&proof, &vec![]); } #[test] fn unconditional_rule() { - let facts: Vec> = vec![]; - let rules = - decl_rules::<&str, &str>(&[[&[], &[[Bound("nachos"), Bound("are"), Bound("food")]]]]); - let composite_claims = vec![["nachos", "are", "food"]]; - let proof = prove(&facts, &composite_claims, &rules).unwrap(); + let facts: Vec<[&str; 4]> = vec![]; + let rules = decl_rules::<&str, &str>(&[[ + &[], + &[[B("nachos"), B("are"), B("food"), B("default_graph")]], + ]]); + let composite_claims = vec![["nachos", "are", "food", "default_graph"]]; + let proof = prove::<&str, &str>(&facts, &composite_claims, &rules).unwrap(); assert_eq!( &proof, &[RuleApplication { diff --git a/src/reasoner.rs b/src/reasoner.rs index d8811a9..b2f82f0 100644 --- a/src/reasoner.rs +++ b/src/reasoner.rs @@ -3,135 +3,133 @@ use crate::vecset::VecSet; use core::cmp::Ordering; #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub struct Triple { - pub subject: Subj, - pub property: Prop, - pub object: Obje, +pub struct Quad { + pub s: Subj, + pub p: Prop, + pub o: Obje, + pub g: Grap, } -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)] -pub struct Subj(pub u32); - -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)] -pub struct Prop(pub u32); - -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)] -pub struct Obje(pub u32); - -impl Triple { - pub fn from_tuple(subject: u32, property: u32, object: u32) -> Self { +impl From<[usize; 4]> for Quad { + fn from([s, p, o, g]: [usize; 4]) -> Self { Self { - subject: Subj(subject), - property: Prop(property), - object: Obje(object), + s: Subj(s), + p: Prop(p), + o: Obje(o), + g: Grap(g), } } +} - fn cmp_spo(&self, other: &Self) -> Ordering { - (self.subject, self.property, self.object).cmp(&( - other.subject, - other.property, - other.object, - )) - } - - fn cmp_pos(&self, other: &Self) -> Ordering { - (self.property, self.object, self.subject).cmp(&( - other.property, - other.object, - other.subject, - )) - } - - fn cmp_osp(&self, other: &Self) -> Ordering { - (self.object, self.subject, self.property).cmp(&( - other.object, - other.subject, - other.property, - )) - } +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)] +pub struct Subj(pub usize); - fn cmp_sp(&self, other: (Subj, Prop)) -> Ordering { - (self.subject, self.property).cmp(&other) - } +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)] +pub struct Prop(pub usize); - fn cmp_po(&self, other: (Prop, Obje)) -> Ordering { - (self.property, self.object).cmp(&other) - } +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)] +pub struct Obje(pub usize); - fn cmp_os(&self, other: (Obje, Subj)) -> Ordering { - (self.object, self.subject).cmp(&other) - } -} +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)] +pub struct Grap(pub usize); + +type Spog = (Subj, Prop, Obje, Grap); +type Posg = (Prop, Obje, Subj, Grap); +type Ospg = (Obje, Subj, Prop, Grap); +type Gspo = (Grap, Subj, Prop, Obje); +type Gpos = (Grap, Prop, Obje, Subj); +type Gosp = (Grap, Obje, Subj, Prop); +type Spo = (Subj, Prop, Obje); +type Pos = (Prop, Obje, Subj); +type Osp = (Obje, Subj, Prop); +type Gsp = (Grap, Subj, Prop); +type Gpo = (Grap, Prop, Obje); +type Gos = (Grap, Obje, Subj); +type Sp = (Subj, Prop); +type Po = (Prop, Obje); +type Os = (Obje, Subj); +type Gs = (Grap, Subj); +type Gp = (Grap, Prop); +type Go = (Grap, Obje); +type S = (Subj,); +type P = (Prop,); +type O = (Obje,); +type G = (Grap,); /// Bindings of slots within the context of a rule. -pub type Instantiations = MapStack; - -pub struct TripleStore { - claims: Vec, - spo: VecSet, - pos: VecSet, - osp: VecSet, +pub type Instantiations = MapStack; + +#[derive(Default)] +pub struct Reasoner { + claims: Vec, + spog: VecSet, + posg: VecSet, + ospg: VecSet, + gspo: VecSet, + gpos: VecSet, + gosp: VecSet, } -impl TripleStore { - pub fn new() -> Self { - Self { - claims: Vec::new(), - spo: VecSet::new(), - pos: VecSet::new(), - osp: VecSet::new(), - } - } - - pub fn contains(&self, triple: &Triple) -> bool { - self.spo - .as_slice() - .binary_search_by(|e| self.claims[*e].cmp_spo(&triple)) - .is_ok() +impl Reasoner { + pub fn contains(&self, quad: &Quad) -> bool { + let Quad { s, p, o, g } = quad; + !(*s, *p, *o, *g).search(self).is_empty() } - pub fn insert(&mut self, triple: Triple) { - if !self.contains(&triple) { - let mut claims = core::mem::replace(&mut self.claims, Vec::new()); - claims.push(triple); - let ni = claims.len() - 1; - self.spo.insert(ni, |a, b| claims[*a].cmp_spo(&claims[*b])); - self.pos.insert(ni, |a, b| claims[*a].cmp_pos(&claims[*b])); - self.osp.insert(ni, |a, b| claims[*a].cmp_osp(&claims[*b])); - self.claims = claims; + pub fn insert(&mut self, quad: Quad) { + if !self.contains(&quad) { + self.claims.push(quad); + let ni = self.claims.len() - 1; + let cl = core::mem::replace(&mut self.claims, Vec::new()); + self.spog.insert(ni, |a, b| Spog::qord(&cl[*a], &cl[*b])); + self.posg.insert(ni, |a, b| Posg::qord(&cl[*a], &cl[*b])); + self.ospg.insert(ni, |a, b| Ospg::qord(&cl[*a], &cl[*b])); + self.gspo.insert(ni, |a, b| Gspo::qord(&cl[*a], &cl[*b])); + self.gpos.insert(ni, |a, b| Gpos::qord(&cl[*a], &cl[*b])); + self.gosp.insert(ni, |a, b| Gosp::qord(&cl[*a], &cl[*b])); + self.claims = cl; } - debug_assert_eq!(self.spo.as_slice().len(), self.claims.len()); - debug_assert_eq!(self.pos.as_slice().len(), self.claims.len()); - debug_assert_eq!(self.osp.as_slice().len(), self.claims.len()); + debug_assert!([ + self.claims.len(), + self.spog.as_slice().len(), + self.posg.as_slice().len(), + self.ospg.as_slice().len(), + self.gspo.as_slice().len(), + self.gpos.as_slice().len(), + self.gosp.as_slice().len(), + ] + .windows(2) + .all(|wn| wn[0] == wn[1])); } - /// Find in this tuple store all possible valid instantiations of rule. Report the + /// Find in this store all possible valid instantiations of rule. Report the /// instantiations through a callback. /// TODO: This function is recursive, but not tail recursive. Rules that are too long may /// consume the stack. pub fn apply( &self, - rule: &mut [Triple], + rule: &mut [Quad], instantiations: &mut Instantiations, cb: &mut impl FnMut(&Instantiations), ) { - let (strictest, mut less_strict) = - if let Some(s) = self.pop_strictest_requirement(rule, instantiations) { - s - } else { + let st = self.pop_strictest_requirement(rule, instantiations); + let (strictest, mut less_strict) = match st { + Some(s) => s, + None => { cb(instantiations); return; - }; + } + }; // For every possible concrete instantiation fulfilling the requirement, bind the slots // in the requirement to the instantiation then recurse. for index in self.matches(strictest, instantiations) { - let triple = &self.claims[*index]; + let quad = &self.claims[*index]; let to_write = [ - (strictest.subject.0, triple.subject.0), - (strictest.property.0, triple.property.0), - (strictest.object.0, triple.object.0), + (strictest.s.0, quad.s.0), + (strictest.p.0, quad.p.0), + (strictest.o.0, quad.o.0), + (strictest.g.0, quad.g.0), ]; for (k, v) in &to_write { debug_assert!( @@ -154,28 +152,31 @@ impl TripleStore { /// Return a slice representing all possible matches to the pattern provided. /// pattern is in a local scope. instantiations is a partial translation of that /// local scope to the global scope represented by self.claims - fn matches(&self, pattern: &Triple, instantiations: &Instantiations) -> &[usize] { + fn matches(&self, pattern: &Quad, instantiations: &Instantiations) -> &[usize] { let inst = instantiations.as_ref(); - let pattern: (Option, Option, Option) = ( - inst.get(&pattern.subject.0).cloned().map(Subj), - inst.get(&pattern.property.0).cloned().map(Prop), - inst.get(&pattern.object.0).cloned().map(Obje), + let pattern: (Option, Option, Option, Option) = ( + inst.get(&pattern.s.0).cloned().map(Subj), + inst.get(&pattern.p.0).cloned().map(Prop), + inst.get(&pattern.o.0).cloned().map(Obje), + inst.get(&pattern.g.0).cloned().map(Grap), ); match pattern { - (Some(subject), Some(property), Some(object)) => self.spo.range(|b| { - self.claims[*b].cmp_spo(&Triple { - subject, - property, - object, - }) - }), - (Some(s), Some(p), None) => self.spo.range(|b| self.claims[*b].cmp_sp((s, p))), - (Some(s), None, Some(o)) => self.osp.range(|b| self.claims[*b].cmp_os((o, s))), - (None, Some(p), Some(o)) => self.pos.range(|b| self.claims[*b].cmp_po((p, o))), - (Some(s), None, None) => self.spo.range(|b| self.claims[*b].subject.cmp(&s)), - (None, Some(p), None) => self.pos.range(|b| self.claims[*b].property.cmp(&p)), - (None, None, Some(o)) => self.osp.range(|b| self.claims[*b].object.cmp(&o)), - (None, None, None) => self.spo.as_slice(), + (Some(s), Some(p), Some(o), Some(g)) => (s, p, o, g).search(self), + (Some(s), Some(p), Some(o), None) => (s, p, o).search(self), + (Some(s), Some(p), None, Some(g)) => (g, s, p).search(self), + (Some(s), Some(p), None, None) => (s, p).search(self), + (Some(s), None, Some(o), Some(g)) => (g, o, s).search(self), + (Some(s), None, Some(o), None) => (o, s).search(self), + (Some(s), None, None, Some(g)) => (g, s).search(self), + (Some(s), None, None, None) => (s,).search(self), + (None, Some(p), Some(o), Some(g)) => (g, p, o).search(self), + (None, Some(p), Some(o), None) => (p, o).search(self), + (None, Some(p), None, Some(g)) => (g, p).search(self), + (None, Some(p), None, None) => (p,).search(self), + (None, None, Some(o), Some(g)) => (g, o).search(self), + (None, None, Some(o), None) => (o,).search(self), + (None, None, None, Some(g)) => (g,).search(self), + (None, None, None, None) => self.spog.as_slice(), } } @@ -187,9 +188,9 @@ impl TripleStore { /// This function changes the ordering of the rule. fn pop_strictest_requirement<'rule>( &self, - rule: &'rule mut [Triple], + rule: &'rule mut [Quad], instantiations: &Instantiations, - ) -> Option<(&'rule Triple, &'rule mut [Triple])> { + ) -> Option<(&'rule Quad, &'rule mut [Quad])> { let index_strictest = (0..rule.len()) .min_by_key(|index| self.matches(&rule[*index], instantiations).len())?; rule.swap(0, index_strictest); @@ -198,93 +199,139 @@ impl TripleStore { } } +trait Indexed { + fn target(rs: &Reasoner) -> &VecSet; + fn qcmp(&self, quad: &Quad) -> Ordering; + + fn search<'a>(&'_ self, rs: &'a Reasoner) -> &'a [usize] { + Self::target(rs).range(|e| self.qcmp(&rs.claims[*e]).reverse()) + } +} + +macro_rules! impl_indexed { + ($t:ty, $idx:tt, ( $($fields:tt,)* )) => { + impl Indexed for $t { + fn target(rs: &Reasoner) -> &VecSet { + &rs.$idx + } + + fn qcmp(&self, quad: &Quad) -> Ordering { + self.cmp(&($(quad. $fields,)*)) + } + } + }; +} + +impl_indexed!(Spog, spog, (s, p, o, g,)); +impl_indexed!(Posg, posg, (p, o, s, g,)); +impl_indexed!(Ospg, ospg, (o, s, p, g,)); +impl_indexed!(Gspo, gspo, (g, s, p, o,)); +impl_indexed!(Gpos, gpos, (g, p, o, s,)); +impl_indexed!(Gosp, gosp, (g, o, s, p,)); +impl_indexed!(Spo, spog, (s, p, o,)); +impl_indexed!(Pos, posg, (p, o, s,)); +impl_indexed!(Osp, ospg, (o, s, p,)); +impl_indexed!(Gsp, gspo, (g, s, p,)); +impl_indexed!(Gpo, gpos, (g, p, o,)); +impl_indexed!(Gos, gosp, (g, o, s,)); +impl_indexed!(Sp, spog, (s, p,)); +impl_indexed!(Po, posg, (p, o,)); +impl_indexed!(Os, ospg, (o, s,)); +impl_indexed!(Gs, gspo, (g, s,)); +impl_indexed!(Gp, gpos, (g, p,)); +impl_indexed!(Go, gosp, (g, o,)); +impl_indexed!(S, spog, (s,)); +impl_indexed!(P, posg, (p,)); +impl_indexed!(O, ospg, (o,)); +impl_indexed!(G, gspo, (g,)); + +trait QuadOrder { + fn qord(a: &Quad, b: &Quad) -> Ordering; +} + +macro_rules! impl_quad_order { + ($t:ty, ( $($fields:tt,)* )) => { + impl QuadOrder for $t { + fn qord(a: &Quad, b: &Quad) -> Ordering { + ($(a. $fields,)*).cmp(&($(b. $fields,)*)) + } + } + }; +} + +impl_quad_order!(Spog, (s, p, o, g,)); +impl_quad_order!(Posg, (p, o, s, g,)); +impl_quad_order!(Ospg, (o, s, p, g,)); +impl_quad_order!(Gspo, (g, s, p, o,)); +impl_quad_order!(Gpos, (g, p, o, s,)); +impl_quad_order!(Gosp, (g, o, s, p,)); + #[cfg(test)] mod tests { use super::*; - use crate::common::{inc, Bound, Unbound}; - use crate::reasoner::{Triple, TripleStore}; - use crate::rule::{Entity, LowRule, Rule}; + use crate::common::inc; + use crate::rule::{ + Entity::{self, Bound as B, Unbound as U}, + LowRule, Rule, + }; use crate::translator::Translator; - use crate::Claim; use alloc::collections::{BTreeMap, BTreeSet}; - use core::iter::once; #[test] fn ancestry_raw() { #[derive(Clone, Debug)] struct Rule { - if_all: Vec, - then: Vec, + if_all: Vec, + then: Vec, inst: Instantiations, } // entities - let mut count = 0u32; - let parent = inc(&mut count); - let ancestor = inc(&mut count); + let mut count = 0usize; + let parent = Prop(inc(&mut count)); + let ancestor = Prop(inc(&mut count)); let nodes: Vec<_> = (0..4).map(|_| inc(&mut count)).collect(); + let default_graph = Grap(inc(&mut count)); - // initial facts: (n0 parent n1), (n1 parent n2), ... (n[l-2] parent n[l-1]) - // (n[l-1] parent n0) - let facts: Vec = nodes + // initial facts: (n0 parent n1 dg), (n1 parent n2 dg), ... (n[l-2] parent n[l-1] dg) + // (n[l-1] parent n0 dg) + // dg: default_graph + let facts = nodes .iter() .zip(nodes.iter().cycle().skip(1)) - .map(|(a, b)| Triple { - subject: Subj(*a), - property: Prop(parent), - object: Obje(*b), - }) - .collect(); + .map(|(a, b)| Quad { + s: Subj(*a), + p: parent, + o: Obje(*b), + g: default_graph, + }); - // rules: (?a parent ?b) -> (?a ancestor ?b) - // (?a ancestor ?b) and (?b ancestor ?c) -> (?a ancestor ?c) + // rules: (?a parent ?b ?g) -> (?a ancestor ?b ?g) + // (?a ancestor ?b ?g) and (?b ancestor ?c ?g) -> (?a ancestor ?c ?g) let rules = vec![ - // (?a parent ?b) -> (?a ancestor ?b) + // (?a parent ?b ?g) -> (?a ancestor ?b ?g) Rule { - if_all: vec![Triple { - subject: Subj(0), - property: Prop(1), - object: Obje(2), - }], - then: vec![Triple { - subject: Subj(0), - property: Prop(3), - object: Obje(2), - }], - inst: [(1u32, parent), (3u32, ancestor)].iter().cloned().collect(), + if_all: quads(&[[0, 1, 2, 4]]), + then: quads(&[[0, 3, 2, 4]]), + inst: [(1, parent.0), (3, ancestor.0)].iter().cloned().collect(), }, - // (?a ancestor ?b) and (?b ancestor ?c) -> (?a ancestor ?c) + // (?a ancestor ?b ?g) and (?b ancestor ?c ?g) -> (?a ancestor ?c ?g) Rule { - if_all: vec![ - Triple { - subject: Subj(1), - property: Prop(0), - object: Obje(2), - }, - Triple { - subject: Subj(2), - property: Prop(0), - object: Obje(3), - }, - ], - then: vec![Triple { - subject: Subj(1), - property: Prop(0), - object: Obje(3), - }], - inst: [(0u32, ancestor)].iter().cloned().collect(), + if_all: quads(&[[1, 0, 2, 4], [2, 0, 3, 4]]), + then: quads(&[[1, 0, 3, 4]]), + inst: [(0, ancestor.0)].iter().cloned().collect(), }, ]; // expected logical result: for all a for all b (a ancestor b) - let mut ts = TripleStore::new(); + let mut ts = Reasoner::default(); for fact in facts { ts.insert(fact); } // This test only does one round of reasoning, no forward chaining. We will need a forward // chaining test eventually. - let mut results = Vec::>::new(); + let mut results = Vec::>::new(); for rule in rules { let Rule { mut if_all, @@ -296,19 +343,25 @@ mod tests { }); } - // We expect the first rule, (?a parent ?b) -> (?a ancestor ?b), to activate once for every - // parent relation. - // The second rule, (?a ancestor ?b) and (?b ancestor ?c) -> (?a ancestor ?c), should not - // activate because results from application of first rule have not been added to the rdf - // store so there are there are are not yet any ancestry relations present. - let mut expected_intantiations: Vec> = nodes + // We expect the first rule, (?a parent ?b ?g) -> (?a ancestor ?b ?g), to activate once for + // every parent relation. + // The second rule, (?a ancestor ?b ?g) and (?b ancestor ?c ?g) -> (?a ancestor ?c ?g), + // should not activate because results from application of first rule have not been added + // to the rdf store so there are there are are not yet any ancestry relations present. + let mut expected_intantiations: Vec> = nodes .iter() .zip(nodes.iter().cycle().skip(1)) .map(|(a, b)| { - [(0, *a), (1, parent), (2, *b), (3, ancestor)] - .iter() - .cloned() - .collect() + [ + (0, *a), + (1, parent.0), + (2, *b), + (3, ancestor.0), + (4, default_graph.0), + ] + .iter() + .cloned() + .collect() }) .collect(); results.sort(); @@ -316,13 +369,15 @@ mod tests { assert_eq!(results, expected_intantiations); } - #[test] - fn swap_behaviour() { - let a = &mut [1, 2, 3]; - a.swap(0, 1); - assert_eq!(a, &[2, 1, 3]); - a.swap(0, 0); - assert_eq!(a, &[2, 1, 3]); + fn quads(qs: &[[usize; 4]]) -> Vec { + qs.iter() + .map(|[s, p, o, g]| Quad { + s: Subj(*s), + p: Prop(*p), + o: Obje(*o), + g: Grap(*g), + }) + .collect() } #[test] @@ -330,51 +385,55 @@ mod tests { // load data let parent = "parent"; let ancestor = "ancestor"; + let default_graph = "default_graph"; let nodes: Vec<_> = (0..10).map(|a| format!("n{}", a)).collect(); - // create a translator to map human readable names to u32 + // create a translator to map human readable names to unique id let tran: Translator<&str> = nodes .iter() .map(AsRef::as_ref) - .chain(once(parent)) - .chain(once(ancestor)) + .chain([parent, ancestor, default_graph].iter().cloned()) .collect(); // load rules - let rules: &[[&[Claim>]; 2]] = &[ + let rules: &[[&[[Entity<&str, &str>; 4]]; 2]] = &[ [ - &[[Unbound("a"), Bound(&parent), Unbound("b")]], - &[[Unbound("a"), Bound(&ancestor), Unbound("b")]], + &[[U("a"), B(parent), U("b"), U("g")]], + &[[U("a"), B(ancestor), U("b"), U("g")]], ], [ &[ - [Unbound("a"), Bound(&ancestor), Unbound("b")], - [Unbound("b"), Bound(&ancestor), Unbound("c")], + [U("a"), B(ancestor), U("b"), U("g")], + [U("b"), B(ancestor), U("c"), U("g")], ], - &[[Unbound("a"), Bound(&ancestor), Unbound("c")]], + &[[U("a"), B(ancestor), U("c"), U("g")]], ], ]; let mut rrs: Vec = rules.iter().map(|rule| low_rule(*rule, &tran)).collect(); // load data into reasoner - let mut ts = TripleStore::new(); + let mut ts = Reasoner::default(); // initial facts: (n_a parent n_a+1), (n_last parent n_0) - let initial_claims: Vec<(&str, &str, &str)> = nodes + let initial_claims: Vec<[&str; 4]> = nodes .iter() .zip(nodes.iter().cycle().skip(1)) - .map(|(a, b)| (a.as_str(), parent, b.as_str())) + .map(|(a, b)| [a.as_str(), parent, b.as_str(), default_graph]) .collect(); - for (s, p, o) in &initial_claims { - ts.insert(Triple::from_tuple( - tran.forward(s).unwrap(), - tran.forward(p).unwrap(), - tran.forward(o).unwrap(), - )); + for [s, p, o, g] in &initial_claims { + ts.insert( + [ + tran.forward(s).unwrap(), + tran.forward(p).unwrap(), + tran.forward(o).unwrap(), + tran.forward(g).unwrap(), + ] + .into(), + ); } // reason loop { - let mut to_add = BTreeSet::::new(); + let mut to_add = BTreeSet::::new(); for rule in rrs.iter_mut() { let mut if_all = &mut rule.if_all; let mut inst = &mut rule.inst; @@ -382,13 +441,15 @@ mod tests { ts.apply(&mut if_all, &mut inst, &mut |inst| { for implied in then { let inst = inst.as_ref(); - let new_triple = Triple::from_tuple( - inst[&implied.subject.0], - inst[&implied.property.0], - inst[&implied.object.0], - ); - if !ts.contains(&new_triple) { - to_add.insert(new_triple); + let new: Quad = [ + inst[&implied.s.0], + inst[&implied.p.0], + inst[&implied.o.0], + inst[&implied.g.0], + ] + .into(); + if !ts.contains(&new) { + to_add.insert(new); } } }); @@ -402,38 +463,41 @@ mod tests { } // convert results back to human readable tuples - let claims: BTreeSet<(&str, &str, &str)> = ts + let claims: BTreeSet<[&str; 4]> = ts .claims .iter() - .map(|triple| { - ( - *tran.back(triple.subject.0).unwrap(), - *tran.back(triple.property.0).unwrap(), - *tran.back(triple.object.0).unwrap(), - ) + .map(|Quad { s, p, o, g }| { + [ + *tran.back(s.0).unwrap(), + *tran.back(p.0).unwrap(), + *tran.back(o.0).unwrap(), + *tran.back(g.0).unwrap(), + ] }) .collect(); // assert results - let expected_claims: BTreeSet<(&str, &str, &str)> = initial_claims - .iter() - .cloned() - .chain(nodes.iter().flat_map(|a| { - nodes - .iter() - .map(move |b| (a.as_str(), ancestor, b.as_str())) - })) + let expected_claims: BTreeSet<[&str; 4]> = pairs(nodes.iter()) + .map(|(a, b)| [a.as_str(), ancestor, b.as_str(), default_graph]) + .chain(initial_claims.iter().cloned()) .collect(); assert_eq!(claims, expected_claims); } /// panics if an unbound name is implied - /// pancis if rule contains bound names that are not present in Translator - fn low_rule(rule: [&[Claim>]; 2], trans: &Translator<&str>) -> LowRule { + /// panics if rule contains bound names that are not present in Translator + fn low_rule(rule: [&[[Entity<&str, &str>; 4]]; 2], trans: &Translator<&str>) -> LowRule { let [if_all, then] = rule; Rule::<&str, &str>::create(if_all.to_vec(), then.to_vec()) .unwrap() .lower(trans) .unwrap() } + + fn pairs<'a, T: 'a + Clone, I: 'a + Iterator + Clone>( + inp: I, + ) -> impl 'a + Iterator { + inp.clone() + .flat_map(move |a| inp.clone().map(move |b| (a.clone(), b))) + } } diff --git a/src/rule.rs b/src/rule.rs index 34a6561..8621a36 100644 --- a/src/rule.rs +++ b/src/rule.rs @@ -3,9 +3,8 @@ //! description datatype that can be lowered to the reasoner's rule representation. use crate::common::inc; -use crate::reasoner::{self, Triple}; +use crate::reasoner::{Instantiations, Quad}; use crate::translator::Translator; -use crate::Claim; use alloc::collections::BTreeMap; use alloc::collections::BTreeSet; use core::fmt::Debug; @@ -17,9 +16,9 @@ use core::fmt::Display; // // TODO: find a way to make fields non-public to protect invariant pub(crate) struct LowRule { - pub if_all: Vec, // contains locally scoped names - pub then: Vec, // contains locally scoped names - pub inst: reasoner::Instantiations, // partially maps the local scope to some global scope + pub if_all: Vec, // contains locally scoped names + pub then: Vec, // contains locally scoped names + pub inst: Instantiations, // partially maps the local scope to some global scope } #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug)] @@ -50,14 +49,14 @@ impl Entity { // invariants held: // unbound names may not exists in `then` unless they exist also in `if_all` pub struct Rule { - if_all: Vec>>, - then: Vec>>, + if_all: Vec<[Entity; 4]>, + then: Vec<[Entity; 4]>, } impl<'a, Unbound: Ord + Clone, Bound: Ord> Rule { pub fn create( - if_all: Vec>>, - then: Vec>>, + if_all: Vec<[Entity; 4]>, + then: Vec<[Entity; 4]>, ) -> Result> { let unbound_if = if_all.iter().flatten().filter_map(Entity::as_unbound); let unbound_then = then.iter().flatten().filter_map(Entity::as_unbound); @@ -74,31 +73,21 @@ impl<'a, Unbound: Ord + Clone, Bound: Ord> Rule { pub(crate) fn lower(&self, tran: &Translator) -> Result> { // There are three types of name at play here. // - human names are represented as Entities - // - local names are local to the rule we are creating. they are represented as u32 - // - global names are from the translator. they are represented as u32 + // - local names are local to the rule we are creating. they are represented as usize + // - global names assigned by Translator. they are represented as usize // assign local names to each human name // local names will be in a continous range from 0. // smaller local names will represent unbound entities, larger ones will represent bound - let mut next_local = 0u32; - let unbound_map = { - let mut unbound_map: BTreeMap<&Unbound, u32> = BTreeMap::new(); - for unbound in self.if_all.iter().flatten().filter_map(Entity::as_unbound) { - unbound_map - .entry(&unbound) - .or_insert_with(|| inc(&mut next_local)); - } - unbound_map - }; - let bound_map = { - let mut bound_map: BTreeMap<&Bound, u32> = BTreeMap::new(); - for bound in self.iter_entities().filter_map(Entity::as_bound) { - bound_map - .entry(&bound) - .or_insert_with(|| inc(&mut next_local)); - } - bound_map - }; + let mut next_local = 0usize; + let unbound_map: BTreeMap<&Unbound, usize> = assign_ids( + self.if_all.iter().flatten().filter_map(Entity::as_unbound), + &mut next_local, + ); + let bound_map = assign_ids( + self.iter_entities().filter_map(Entity::as_bound), + &mut next_local, + ); debug_assert!( bound_map.values().all(|bound_local| unbound_map .values() @@ -106,7 +95,7 @@ impl<'a, Unbound: Ord + Clone, Bound: Ord> Rule { "unbound names are smaller than bound names" ); debug_assert_eq!( - (0..next_local).collect::>(), + (0..next_local).collect::>(), unbound_map .values() .chain(bound_map.values()) @@ -115,19 +104,13 @@ impl<'a, Unbound: Ord + Clone, Bound: Ord> Rule { "no names slots are wasted" ); debug_assert_eq!( - next_local as usize, + next_local, unbound_map.values().chain(bound_map.values()).count(), "no duplicate assignments" ); - debug_assert!(self - .if_all - .iter() - .flatten() - .filter_map(Entity::as_unbound) - .all(|unbound_then| { unbound_map.contains_key(&unbound_then) })); // gets the local name for a human_name - let local_name = |entity: &Entity| -> u32 { + let local_name = |entity: &Entity| -> usize { match entity { Entity::Unbound(s) => unbound_map[s], Entity::Bound(s) => bound_map[s], @@ -135,9 +118,17 @@ impl<'a, Unbound: Ord + Clone, Bound: Ord> Rule { }; // converts a human readable restriction list to a list of locally scoped machine oriented // restrictions - let to_requirements = |hu: &[Claim>]| -> Vec { + let to_requirements = |hu: &[[Entity; 4]]| -> Vec { hu.iter() - .map(|[s, p, o]| Triple::from_tuple(local_name(&s), local_name(&p), local_name(&o))) + .map(|[s, p, o, g]| { + [ + local_name(&s), + local_name(&p), + local_name(&o), + local_name(&g), + ] + .into() + }) .collect() }; @@ -163,14 +154,7 @@ impl<'a, Unbound: Ord, Bound> Rule { .iter() .flatten() .filter_map(Entity::as_unbound) - .filter_map(move |unbound| { - if listed.contains(unbound) { - None - } else { - listed.insert(unbound); - Some(unbound) - } - }) + .filter(move |unbound| listed.insert(unbound)) } } @@ -179,11 +163,11 @@ impl<'a, Unbound, Bound> Rule { self.if_all.iter().chain(self.then.iter()).flatten() } - pub(crate) fn if_all(&self) -> &[Claim>] { + pub fn if_all(&self) -> &[[Entity; 4]] { &self.if_all } - pub(crate) fn then(&self) -> &[Claim>] { + pub fn then(&self) -> &[[Entity; 4]] { &self.then } } @@ -219,10 +203,21 @@ impl std::error::Error for InvalidRule where InvalidRule(T); +// assign a unique id to each unique element in the input +// starts counting with the current next_id. As ids are assigned +// the next_id value is incremented so it always reperesents the next available id +fn assign_ids(inp: impl Iterator, next_id: &mut usize) -> BTreeMap { + let mut ret: BTreeMap = BTreeMap::new(); + for unbound in inp { + ret.entry(unbound).or_insert_with(|| inc(next_id)); + } + ret +} + #[cfg(test)] mod test { + use super::Entity::{Bound, Unbound}; use super::*; - use crate::common::{Bound, Unbound}; use core::iter::FromIterator; #[test] @@ -232,14 +227,14 @@ mod test { // ?a should be a separate entity from let rule = Rule::<&str, &str> { - if_all: vec![[Unbound("a"), Bound("a"), Unbound("b")]], + if_all: vec![[Unbound("a"), Bound("a"), Unbound("b"), Unbound("g")]], then: vec![], }; let trans: Translator<&str> = ["a"].iter().cloned().collect(); let rr = rule.lower(&trans).unwrap(); // ?a != - assert_ne!(rr.if_all[0].subject.0, rr.if_all[0].property.0); + assert_ne!(rr.if_all[0].s.0, rr.if_all[0].p.0); } #[test] @@ -251,105 +246,115 @@ mod test { { let rulea = Rule:: { - if_all: vec![[Unbound(0xa), Bound("parent"), Unbound(0xb)]], - then: vec![[Unbound(0xa), Bound("ancestor"), Unbound(0xb)]], + if_all: vec![[Unbound(0xa), Bound("parent"), Unbound(0xb), Unbound(0xc)]], + then: vec![[Unbound(0xa), Bound("ancestor"), Unbound(0xb), Unbound(0xc)]], }; let re_rulea = rulea.lower(&trans).unwrap(); let keys = [ - re_rulea.if_all[0].subject.0, - re_rulea.if_all[0].property.0, - re_rulea.if_all[0].object.0, - re_rulea.then[0].subject.0, - re_rulea.then[0].property.0, - re_rulea.then[0].object.0, + re_rulea.if_all[0].s.0, + re_rulea.if_all[0].p.0, + re_rulea.if_all[0].o.0, + re_rulea.if_all[0].g.0, + re_rulea.then[0].s.0, + re_rulea.then[0].p.0, + re_rulea.then[0].o.0, + re_rulea.then[0].g.0, ]; - let vals: Vec>> = keys + let vals: Vec> = keys .iter() .map(|local_name| { re_rulea .inst .as_ref() .get(&local_name) - .map(|global_name| trans.back(*global_name)) + .map(|global_name| trans.back(*global_name).unwrap().clone()) }) .collect(); assert_eq!( &vals, &[ None, - Some(Some(&"parent")), + Some("parent"), + None, None, None, - Some(Some(&"ancestor")), + Some("ancestor"), + None, None, ] ); let ifa = re_rulea.if_all; let then = re_rulea.then; - assert_ne!(ifa[0].property.0, then[0].property.0); // "parent" != "ancestor" - assert_eq!(ifa[0].subject.0, then[0].subject.0); // a == a - assert_eq!(ifa[0].object.0, then[0].object.0); // b == b + assert_ne!(ifa[0].p.0, then[0].p.0); // "parent" != "ancestor" + assert_eq!(ifa[0].s.0, then[0].s.0); // a == a + assert_eq!(ifa[0].o.0, then[0].o.0); // b == b } { let ruleb = Rule::<&str, &str> { if_all: vec![ - [Unbound("a"), Bound("ancestor"), Unbound("b")], - [Unbound("b"), Bound("ancestor"), Unbound("c")], + [Unbound("a"), Bound("ancestor"), Unbound("b"), Unbound("g")], + [Unbound("b"), Bound("ancestor"), Unbound("c"), Unbound("g")], ], - then: vec![[Unbound("a"), Bound("ancestor"), Unbound("c")]], + then: vec![[Unbound("a"), Bound("ancestor"), Unbound("c"), Unbound("g")]], }; let re_ruleb = ruleb.lower(&trans).unwrap(); let keys = [ - re_ruleb.if_all[0].subject.0, - re_ruleb.if_all[0].property.0, - re_ruleb.if_all[0].object.0, - re_ruleb.if_all[1].subject.0, - re_ruleb.if_all[1].property.0, - re_ruleb.if_all[1].object.0, - re_ruleb.then[0].subject.0, - re_ruleb.then[0].property.0, - re_ruleb.then[0].object.0, + re_ruleb.if_all[0].s.0, + re_ruleb.if_all[0].p.0, + re_ruleb.if_all[0].o.0, + re_ruleb.if_all[0].g.0, + re_ruleb.if_all[1].s.0, + re_ruleb.if_all[1].p.0, + re_ruleb.if_all[1].o.0, + re_ruleb.if_all[1].g.0, + re_ruleb.then[0].s.0, + re_ruleb.then[0].p.0, + re_ruleb.then[0].o.0, + re_ruleb.then[0].g.0, ]; - let vals: Vec>> = keys + let vals: Vec> = keys .iter() .map(|local_name| { re_ruleb .inst .as_ref() .get(&local_name) - .map(|global_name| trans.back(*global_name)) + .map(|global_name| trans.back(*global_name).unwrap().clone()) }) .collect(); assert_eq!( &vals, &[ None, - Some(Some(&"ancestor")), + Some("ancestor"), + None, + None, None, + Some("ancestor"), None, - Some(Some(&"ancestor")), None, None, - Some(Some(&"ancestor")), + Some("ancestor"), + None, None, ] ); let ifa = re_ruleb.if_all; let then = re_ruleb.then; - assert_ne!(ifa[0].subject.0, ifa[1].subject.0); // a != b - assert_ne!(ifa[0].object.0, then[0].object.0); // b != c + assert_ne!(ifa[0].s.0, ifa[1].s.0); // a != b + assert_ne!(ifa[0].o.0, then[0].o.0); // b != c // "ancestor" == "ancestor" == "ancestor" - assert_eq!(ifa[0].property.0, ifa[1].property.0); - assert_eq!(ifa[1].property.0, then[0].property.0); + assert_eq!(ifa[0].p.0, ifa[1].p.0); + assert_eq!(ifa[1].p.0, then[0].p.0); - assert_eq!(ifa[0].subject.0, then[0].subject.0); // a == a - assert_eq!(ifa[1].object.0, then[0].object.0); // b == b + assert_eq!(ifa[0].s.0, then[0].s.0); // a == a + assert_eq!(ifa[1].o.0, then[0].o.0); // b == b } } @@ -357,13 +362,22 @@ mod test { fn lower_no_translation_err() { let trans = Translator::<&str>::from_iter(vec![]); - let r = Rule::create(vec![[Unbound("a"), Bound("unknown"), Unbound("b")]], vec![]).unwrap(); + let r = Rule::create( + vec![[Unbound("a"), Bound("unknown"), Unbound("b"), Unbound("g")]], + vec![], + ) + .unwrap(); let err = r.lower(&trans).unwrap_err(); assert_eq!(err, NoTranslation(&"unknown")); let r = Rule::<&str, &str>::create( vec![], - vec![[Bound("unknown"), Bound("unknown"), Bound("unknown")]], + vec![[ + Bound("unknown"), + Bound("unknown"), + Bound("unknown"), + Bound("unknown"), + ]], ) .unwrap(); let err = r.lower(&trans).unwrap_err(); @@ -372,10 +386,12 @@ mod test { #[test] fn create_invalid() { - Rule::<&str, &str>::create(vec![], vec![[Unbound("a"), Unbound("a"), Unbound("a")]]) - .unwrap_err(); + use Bound as B; + use Unbound as U; + + Rule::<&str, &str>::create(vec![], vec![[U("a"), U("a"), U("a"), U("a")]]).unwrap_err(); - // Its unfortunate that this one is illeagal but I have yet to find a way around the + // Its unfortunate that this one is illegal but I have yet to find a way around the // limitation. Can you figure out how to do this safely? // // if [super? claims [minor? mayclaim pred?]] @@ -387,23 +403,23 @@ mod test { // let ret = Rule::<&str, &str>::create( vec![ - // if [super? claims [minor? mayclaim pred?]] - [Unbound("super"), Bound("claims"), Unbound("claim1")], - [Unbound("claim1"), Bound("subject"), Unbound("minor")], - [Unbound("claim1"), Bound("predicate"), Bound("mayclaim")], - [Unbound("claim1"), Bound("object"), Unbound("pred")], - // and [minor? claims [s? pred? o?]] - [Unbound("minor"), Bound("claims"), Unbound("claim2")], - [Unbound("claim2"), Bound("subject"), Unbound("s")], - [Unbound("claim2"), Bound("predicate"), Unbound("pred")], - [Unbound("claim2"), Bound("object"), Unbound("o")], + // if [super? claims [minor? mayclaim pred?] g?] + [U("super"), B("claims"), U("claim1"), U("g")], + [U("claim1"), B("subject"), U("minor"), U("g")], + [U("claim1"), B("predicate"), B("mayclaim"), U("g")], + [U("claim1"), B("object"), U("pred"), U("g")], + // and [minor? claims [s? pred? o?] g?] + [U("minor"), B("claims"), U("claim2"), U("g")], + [U("claim2"), B("subject"), U("s"), U("g")], + [U("claim2"), B("predicate"), U("pred"), U("g")], + [U("claim2"), B("object"), U("o"), U("g")], ], vec![ - // then [super? claims [s? pred? o?]] - [Unbound("super"), Bound("claims"), Unbound("claim3")], - [Unbound("claim3"), Bound("subject"), Unbound("s")], - [Unbound("claim3"), Bound("predicate"), Unbound("pred")], - [Unbound("claim3"), Bound("object"), Unbound("o")], + // then [super? claims [s? pred? o?] g?] + [U("super"), B("claims"), U("claim3"), U("g")], + [U("claim3"), B("subject"), U("s"), U("g")], + [U("claim3"), B("predicate"), U("pred"), U("g")], + [U("claim3"), B("object"), U("o"), U("g")], ], ); assert_eq!(ret, Err(InvalidRule::UnboundImplied("claim3"))); @@ -451,7 +467,7 @@ mod test { #[test] fn serde() { #[derive(Debug, serde::Serialize, serde::Deserialize, PartialEq, Eq)] - enum RdfNode { + enum Term { Blank(String), Iri(String), Literal { @@ -460,6 +476,7 @@ mod test { #[serde(skip_serializing_if = "Option::is_none")] language: Option, }, + DefaultGraph, } let jsonrule = serde_json::json![{ @@ -468,11 +485,13 @@ mod test { { "Unbound": "pig" }, { "Bound": { "Iri": "https://example.com/Ability" } }, { "Bound": { "Iri": "https://example.com/Flight" } }, + { "Bound": "DefaultGraph" }, ], [ { "Unbound": "pig" }, { "Bound": { "Iri": "http://www.w3.org/1999/02/22-rdf-syntax-ns#type" } }, { "Bound": { "Iri": "https://example.com/Pig" } }, + { "Bound": "DefaultGraph" }, ], ], "then": [ @@ -487,45 +506,49 @@ mod test { }, }, }, + { "Bound": "DefaultGraph" }, ], ], }]; - let rule = Rule:: { + let rule = Rule:: { if_all: vec![ [ Entity::Unbound("pig".to_string()), - Entity::Bound(RdfNode::Iri("https://example.com/Ability".to_string())), - Entity::Bound(RdfNode::Iri("https://example.com/Flight".to_string())), + Entity::Bound(Term::Iri("https://example.com/Ability".to_string())), + Entity::Bound(Term::Iri("https://example.com/Flight".to_string())), + Entity::Bound(Term::DefaultGraph), ], [ Entity::Unbound("pig".to_string()), - Entity::Bound(RdfNode::Iri( + Entity::Bound(Term::Iri( "http://www.w3.org/1999/02/22-rdf-syntax-ns#type".to_string(), )), - Entity::Bound(RdfNode::Iri("https://example.com/Pig".to_string())), + Entity::Bound(Term::Iri("https://example.com/Pig".to_string())), + Entity::Bound(Term::DefaultGraph), ], ], then: vec![[ - Entity::Bound(RdfNode::Iri("did:dock:bddap".to_string())), - Entity::Bound(RdfNode::Iri( + Entity::Bound(Term::Iri("did:dock:bddap".to_string())), + Entity::Bound(Term::Iri( "http://xmlns.com/foaf/spec/#term_firstName".to_string(), )), - Entity::Bound(RdfNode::Literal { + Entity::Bound(Term::Literal { value: "Gorgadon".to_string(), datatype: "http://www.w3.org/1999/02/22-rdf-syntax-ns#PlainLiteral".to_string(), language: None, }), + Entity::Bound(Term::DefaultGraph), ]], }; assert_eq!( - &serde_json::from_value::>(jsonrule.clone()).unwrap(), + &serde_json::from_value::>(jsonrule.clone()).unwrap(), &rule ); assert_eq!( &jsonrule, - &serde_json::to_value::>(rule).unwrap() + &serde_json::to_value::>(rule).unwrap() ); } } diff --git a/src/translator.rs b/src/translator.rs index 4c47460..5954841 100644 --- a/src/translator.rs +++ b/src/translator.rs @@ -1,52 +1,38 @@ //! The reasoner itself does not process RDF tuples directly. Instead the reasoner operates on //! abstract entities. The only requirement for these entities is that they have some total //! ordering (the reasoner needs to keep indices of entity relationships for efficient joins). -//! The reasoner represents these entities as u32. RDF entities are usually expressed as some -//! other type e.g. String. This module provides a way to generates and stores a mapping from -//! some type T to u32. The mapping is bijective (it goes both ways) so after reasoning is +//! The reasoner represents these entities as usize. RDF entities are usually expressed as some +//! other type e.g. String. This module provides a way to generate and stores a mapping from +//! some type T to usize. The mapping is bijective (it goes both ways) so after reasoning is //! performed, the results can be converted back to the original format with entities of type T. use core::borrow::Borrow; -use core::convert::TryInto; use core::iter::FromIterator; -/// bijective mapping from some type T to u32 +/// bijective mapping from some type T to usize #[derive(Debug)] pub struct Translator { - /// represents both u32 -> T and T -> u32 + /// represents both usize -> T and T -> usize widdershins: Box<[T]>, } impl Translator { /// lookup the entity representing t - pub fn forward(&self, t: &impl Borrow) -> Option { - debug_assert!( - TryInto::::try_into(self.widdershins.len().saturating_sub(1)).is_ok(), - "too many entities to represent as u32" - ); - self.widdershins - .binary_search(t.borrow()) - .ok() - .map(|a: usize| a as u32) + pub fn forward(&self, t: &impl Borrow) -> Option { + self.widdershins.binary_search(t.borrow()).ok() } /// lookup the t that the entity represents - pub fn back(&self, a: u32) -> Option<&T> { - self.widdershins.get(a as usize) + pub fn back(&self, a: usize) -> Option<&T> { + self.widdershins.get(a) } } -// TODO: The user of this impl probably won't expect an implementaion of FromIterator to panic -// This impl panics if there are too many elements to index with a u32. impl FromIterator for Translator { fn from_iter>(src: I) -> Self { let mut table: Vec = src.into_iter().collect(); table.sort_unstable(); table.dedup(); - assert!( - TryInto::::try_into(table.len().saturating_sub(1)).is_ok(), - "too many entities to represent as u32" - ); Self { widdershins: table.into_boxed_slice(), } diff --git a/src/validate.rs b/src/validate.rs index c36aa92..5552865 100644 --- a/src/validate.rs +++ b/src/validate.rs @@ -1,5 +1,5 @@ -use crate::prove::BadRuleApplication; -use crate::{Claim, Rule, RuleApplication}; +use crate::prove::{BadRuleApplication, RuleApplication}; +use crate::rule::Rule; use alloc::collections::BTreeSet; /// Check is a proof is well-formed according to a ruleset. Returns the set of assumptions used by @@ -29,10 +29,15 @@ use alloc::collections::BTreeSet; /// // (?a, is, awesome) ∧ (?a, score, ?s) -> (?a score, awesome) /// let awesome_score_axiom = Rule::create( /// vec![ -/// [Unbound("a"), Bound("is"), Bound("awesome")], // if someone is awesome -/// [Unbound("a"), Bound("score"), Unbound("s")], // and they have some score +/// // if someone is awesome +/// [Unbound("a"), Bound("is"), Bound("awesome"), Bound("default_graph")], +/// // and they have some score +/// [Unbound("a"), Bound("score"), Unbound("s"), Bound("default_graph")], +/// ], +/// vec![ +/// // then they must have an awesome score +/// [Unbound("a"), Bound("score"), Bound("awesome"), Bound("default_graph")] /// ], -/// vec![[Unbound("a"), Bound("score"), Bound("awesome")]], // then they must have an awesome score /// )?; /// /// let proof = vec![ @@ -48,8 +53,8 @@ use alloc::collections::BTreeSet; /// &proof, /// ).map_err(|e| format!("{:?}", e))?; /// -/// // Now we know that under the given rules, if all RDF triples in `assumed` are true, then all -/// // RDF triples in `implied` are also true. +/// // Now we know that under the given rules, if all quads in `assumed` are true, then all +/// // quads in `implied` are also true. /// # Ok(()) /// # } /// ``` @@ -57,8 +62,8 @@ pub fn validate( rules: &[Rule], proof: &[RuleApplication], ) -> Result, Invalid> { - let mut implied: BTreeSet> = BTreeSet::new(); - let mut assumed: BTreeSet> = BTreeSet::new(); + let mut implied: BTreeSet<[Bound; 4]> = BTreeSet::new(); + let mut assumed: BTreeSet<[Bound; 4]> = BTreeSet::new(); for app in proof { let rule = rules.get(app.rule_index).ok_or(Invalid::NoSuchRule)?; for assumption in app.assumptions_when_applied(rule)? { @@ -85,8 +90,8 @@ pub struct Valid { feature = "serde", serde(bound(serialize = "Bound: Ord", deserialize = "Bound: Ord")) )] - pub assumed: BTreeSet>, - pub implied: BTreeSet>, + pub assumed: BTreeSet<[Bound; 4]>, + pub implied: BTreeSet<[Bound; 4]>, } #[derive(Debug, PartialEq, Eq)] @@ -107,22 +112,29 @@ impl From for Invalid { #[cfg(test)] mod test { use super::*; - use crate::common::{decl_rules, Bound, Unbound}; + use crate::common::decl_rules; use crate::prove::prove; + use crate::rule::Entity::{Bound as B, Unbound as U}; #[test] fn irrelevant_facts_ignored() { - let facts: Vec> = vec![["tacos", "are", "tasty"], ["nachos", "are", "tasty"]]; + let facts: Vec<[&str; 4]> = vec![ + ["tacos", "are", "tasty", "default_graph"], + ["nachos", "are", "tasty", "default_graph"], + ]; let rules = decl_rules::<&str, &str>(&[[ - &[[Bound("nachos"), Bound("are"), Bound("tasty")]], - &[[Bound("nachos"), Bound("are"), Bound("food")]], + &[[B("nachos"), B("are"), B("tasty"), B("default_graph")]], + &[[B("nachos"), B("are"), B("food"), B("default_graph")]], ]]); - let composite_claims = vec![["nachos", "are", "food"]]; - let proof = prove(&facts, &composite_claims, &rules).unwrap(); - let Valid { assumed, implied } = validate(&rules, &proof).unwrap(); + let composite_claims = vec![["nachos", "are", "food", "default_graph"]]; + let proof = prove::<&str, &str>(&facts, &composite_claims, &rules).unwrap(); + let Valid { assumed, implied } = validate::<&str, &str>(&rules, &proof).unwrap(); assert_eq!( &assumed, - &[["nachos", "are", "tasty"]].iter().cloned().collect() + &[["nachos", "are", "tasty", "default_graph"]] + .iter() + .cloned() + .collect() ); for claim in composite_claims { assert!(implied.contains(&claim)); @@ -131,30 +143,30 @@ mod test { #[test] fn bad_rule_application() { - let facts: Vec> = vec![["a", "a", "a"]]; + let facts: Vec<[&str; 4]> = vec![["a", "a", "a", "a"]]; let rules_v1 = decl_rules::<&str, &str>(&[[ - &[[Unbound("a"), Bound("a"), Bound("a")]], - &[[Bound("b"), Bound("b"), Bound("b")]], + &[[U("a"), B("a"), B("a"), B("a")]], + &[[B("b"), B("b"), B("b"), B("b")]], ]]); let rules_v2 = decl_rules::<&str, &str>(&[[ - &[[Bound("a"), Bound("a"), Bound("a")]], - &[[Bound("b"), Bound("b"), Bound("b")]], + &[[B("a"), B("a"), B("a"), B("a")]], + &[[B("b"), B("b"), B("b"), B("b")]], ]]); - let composite_claims = vec![["b", "b", "b"]]; - let proof = prove(&facts, &composite_claims, &rules_v1).unwrap(); - let err = validate(&rules_v2, &proof).unwrap_err(); + let composite_claims = vec![["b", "b", "b", "b"]]; + let proof = prove::<&str, &str>(&facts, &composite_claims, &rules_v1).unwrap(); + let err = validate::<&str, &str>(&rules_v2, &proof).unwrap_err(); assert_eq!(err, Invalid::BadRuleApplication); } #[test] fn no_such_rule() { - let facts: Vec> = vec![["a", "a", "a"]]; + let facts: Vec<[&str; 4]> = vec![["a", "a", "a", "a"]]; let rules = decl_rules::<&str, &str>(&[[ - &[[Bound("a"), Bound("a"), Bound("a")]], - &[[Bound("b"), Bound("b"), Bound("b")]], + &[[B("a"), B("a"), B("a"), B("a")]], + &[[B("b"), B("b"), B("b"), B("b")]], ]]); - let composite_claims = vec![["b", "b", "b"]]; - let proof = prove(&facts, &composite_claims, &rules).unwrap(); + let composite_claims = vec![["b", "b", "b", "b"]]; + let proof = prove::<&str, &str>(&facts, &composite_claims, &rules).unwrap(); let err = validate::<&str, &str>(&[], &proof).unwrap_err(); assert_eq!(err, Invalid::NoSuchRule); } @@ -194,37 +206,38 @@ mod test { // and (bob alergyFree true) // therefore (bob mayEat beans) + // all the following rules operate on the default graph let rules = decl_rules(&[ [ &[ - [Bound("andrew"), Bound("claims"), Unbound("c")], - [Unbound("c"), Bound("subject"), Unbound("s")], - [Unbound("c"), Bound("property"), Unbound("p")], - [Unbound("c"), Bound("object"), Unbound("o")], + [B("andrew"), B("claims"), U("c"), B("default_graph")], + [U("c"), B("subject"), U("s"), B("default_graph")], + [U("c"), B("property"), U("p"), B("default_graph")], + [U("c"), B("object"), U("o"), B("default_graph")], ], - &[[Unbound("s"), Unbound("p"), Unbound("o")]], + &[[U("s"), U("p"), U("o"), B("default_graph")]], ], [ - &[[Unbound("a"), Bound("favoriteFood"), Unbound("f")]], + &[[U("a"), B("favoriteFood"), U("f"), B("default_graph")]], &[ - [Unbound("a"), Bound("likes"), Unbound("f")], - [Unbound("f"), Bound("type"), Bound("food")], + [U("a"), B("likes"), U("f"), B("default_graph")], + [U("f"), B("type"), B("food"), B("default_graph")], ], ], [ &[ - [Unbound("f"), Bound("type"), Bound("food")], - [Unbound("a"), Bound("alergyFree"), Bound("true")], + [U("f"), B("type"), B("food"), B("default_graph")], + [U("a"), B("alergyFree"), B("true"), B("default_graph")], ], - &[[Unbound("a"), Bound("mayEat"), Unbound("f")]], + &[[U("a"), B("mayEat"), U("f"), B("default_graph")]], ], ]); - let facts: &[Claim<&str>] = &[ - ["alice", "favoriteFood", "beans"], - ["andrew", "claims", "_:claim1"], - ["_:claim1", "subject", "bob"], - ["_:claim1", "property", "alergyFree"], - ["_:claim1", "object", "true"], + let facts: &[[&str; 4]] = &[ + ["alice", "favoriteFood", "beans", "default_graph"], + ["andrew", "claims", "_:claim1", "default_graph"], + ["_:claim1", "subject", "bob", "default_graph"], + ["_:claim1", "property", "alergyFree", "default_graph"], + ["_:claim1", "object", "true", "default_graph"], ]; let manual_proof = decl_proof(&[ (1, &["alice", "beans"]), @@ -236,10 +249,10 @@ mod test { assert_eq!( implied, [ - ["alice", "likes", "beans"], - ["beans", "type", "food"], - ["bob", "alergyFree", "true"], - ["bob", "mayEat", "beans"] // this is the claim we wished to prove + ["alice", "likes", "beans", "default_graph"], + ["beans", "type", "food", "default_graph"], + ["bob", "alergyFree", "true", "default_graph"], + ["bob", "mayEat", "beans", "default_graph"] // this is the claim we wished to prove ] .iter() .cloned() diff --git a/src/vecset.rs b/src/vecset.rs index c081ba2..c1c8c56 100644 --- a/src/vecset.rs +++ b/src/vecset.rs @@ -1,15 +1,12 @@ use core::cmp::Ordering; /// A sorted Vec of unique elements. +#[derive(Default)] pub struct VecSet { sorted: Vec, } impl VecSet { - pub fn new() -> Self { - Self { sorted: Vec::new() } - } - /// Insert a new element while maintaining the ordering defined by the comparator function. /// If the element already exists in the set according to the comparator it will not be /// inserted. @@ -58,7 +55,7 @@ mod test { fn vecset() { let ordering = |a: &u8, b: &u8| -> Ordering { a.cmp(&b) }; let sub_ordering = |a: &u8, b: &u8| -> Ordering { (a / 2).cmp(&(b / 2)) }; - let mut vs: VecSet = VecSet::new(); + let mut vs: VecSet = VecSet::default(); for n in &[32u8, 5, 2, 2, 4, 3, 2, 23, 24, 253] { vs.insert(*n, ordering); }