Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test: bsp delete multiple files #284

Merged
merged 6 commits into from
Dec 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 0 additions & 8 deletions pnpm-lock.yaml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 0 additions & 1 deletion test/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,6 @@
"postgres": "^3.4.5",
"strip-ansi": "^7.1.0",
"testcontainers": "10.13.0",
"tiny-invariant": "^1.3.3",
"tmp": "0.2.3",
"tsx": "4.19.0",
"yaml": "2.5.1"
Expand Down
4 changes: 2 additions & 2 deletions test/scripts/downloadPolkadot.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import fs from "node:fs";
import path from "node:path";
import invariant from "tiny-invariant";
import assert from "node:assert";

async function main() {
const binaries = ["polkadot", "polkadot-prepare-worker", "polkadot-execute-worker"];
Expand Down Expand Up @@ -46,7 +46,7 @@ async function main() {

function getVersionArg() {
const args = process.argv.slice(2);
invariant(
assert(
args.length > 0,
"No version provided. Usage: pnpm tsx scripts/downloadPolkadot.ts <version>"
);
Expand Down
6 changes: 3 additions & 3 deletions test/scripts/modifyPlainSpec.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import fs from "node:fs/promises";
import assert from "node:assert";
import { convertExponentials } from "@zombienet/utils";
import jsonBg from "json-bigint";
import invariant from "tiny-invariant";

const JSONbig = jsonBg({ useNativeBigInt: true });

Expand All @@ -14,8 +14,8 @@ if (args.length !== 2) {

const [inputPath, outputPath] = args;

invariant(inputPath, "Input path is required");
invariant(outputPath, "Output path is required");
assert(inputPath, "Input path is required");
assert(outputPath, "Output path is required");

process.stdout.write(`Reading from: ${inputPath} ...`);
const plainSpec = JSONbig.parse((await fs.readFile(inputPath)).toString());
Expand Down
251 changes: 251 additions & 0 deletions test/suites/integration/bsp/multiple-delete.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,251 @@
import assert, { strictEqual } from "node:assert";
import { bspKey, describeBspNet, shUser, sleep, type EnrichedBspApi } from "../../../util";

describeBspNet("Single BSP Volunteering", ({ before, createBspApi, it, createUserApi }) => {
let userApi: EnrichedBspApi;
let bspApi: EnrichedBspApi;

before(async () => {
userApi = await createUserApi();
bspApi = await createBspApi();
});

it("Network launches and can be queried", async () => {
const userNodePeerId = await userApi.rpc.system.localPeerId();
strictEqual(userNodePeerId.toString(), userApi.shConsts.NODE_INFOS.user.expectedPeerId);

const bspNodePeerId = await bspApi.rpc.system.localPeerId();
strictEqual(bspNodePeerId.toString(), userApi.shConsts.NODE_INFOS.bsp.expectedPeerId);
});

it("Volunteer for multiple files and delete them", async () => {
const source = ["res/whatsup.jpg", "res/adolphus.jpg", "res/cloud.jpg"];
const destination = ["test/whatsup.jpg", "test/adolphus.jpg", "test/cloud.jpg"];
const bucketName = "something-3";

const newBucketEventEvent = await userApi.createBucket(bucketName);
const newBucketEventDataBlob =
userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data;

assert(newBucketEventDataBlob, "Event doesn't match Type");

const files = [];
const txs = [];
for (let i = 0; i < source.length; i++) {
const { fingerprint, file_size, location } =
await userApi.rpc.storagehubclient.loadFileInStorage(
source[i],
destination[i],
userApi.shConsts.NODE_INFOS.user.AddressId,
newBucketEventDataBlob.bucketId
);

files.push({ fingerprint, file_size, location });
txs.push(
userApi.tx.fileSystem.issueStorageRequest(
newBucketEventDataBlob.bucketId,
location,
fingerprint,
file_size,
userApi.shConsts.DUMMY_MSP_ID,
[userApi.shConsts.NODE_INFOS.user.expectedPeerId]
)
);
}

await userApi.sealBlock(txs, shUser);

// Get the new storage request events, making sure we have 3
const storageRequestEvents = await userApi.assert.eventMany("fileSystem", "NewStorageRequest");
strictEqual(storageRequestEvents.length, 3);

// Get the file keys from the storage request events
const fileKeys = storageRequestEvents.map((event) => {
const dataBlob =
userApi.events.fileSystem.NewStorageRequest.is(event.event) && event.event.data;
assert(dataBlob, "Event doesn't match Type");
return dataBlob.fileKey;
});

// Wait for the BSP to volunteer
await userApi.wait.bspVolunteer(source.length);
for (const fileKey of fileKeys) {
await bspApi.wait.bspFileStorageComplete(fileKey);
}

// Waiting for a confirmation of the first file to be stored
await sleep(500);
await userApi.wait.bspStored(1);

// Here we expect the 2 others files to be batched
await sleep(500);
await userApi.wait.bspStored(1);

const stopStroringTxs = [];
for (let i = 0; i < fileKeys.length; i++) {
const inclusionForestProof = await bspApi.rpc.storagehubclient.generateForestProof(null, [
fileKeys[i]
]);
stopStroringTxs.push(
userApi.tx.fileSystem.bspRequestStopStoring(
fileKeys[i],
newBucketEventDataBlob.bucketId,
files[i].location,
userApi.shConsts.NODE_INFOS.user.AddressId,
files[i].fingerprint,
files[i].file_size,
false,
inclusionForestProof.toString()
)
);
}

await userApi.sealBlock(stopStroringTxs, bspKey);

await userApi.assert.eventMany("fileSystem", "BspRequestedToStopStoring");

// Wait enough blocks for the deletion to be allowed.
const currentBlock = await userApi.rpc.chain.getBlock();
const currentBlockNumber = currentBlock.block.header.number.toNumber();
const cooldown = currentBlockNumber + bspApi.consts.fileSystem.minWaitForStopStoring.toNumber();
await userApi.block.skipTo(cooldown);

for (let i = 0; i < fileKeys.length; i++) {
const inclusionForestProof = await bspApi.rpc.storagehubclient.generateForestProof(null, [
fileKeys[i]
]);
await userApi.sealBlock(
userApi.tx.fileSystem.bspConfirmStopStoring(fileKeys[i], inclusionForestProof.toString()),
bspKey
);

// Check for the confirm stopped storing event.
await userApi.assert.eventPresent("fileSystem", "BspConfirmStoppedStoring");
}
});

it(
"Volunteer for multiple files and delete them (failing to batch when confirming)",
{ skip: "cannot store files again after they have been deleted once" },
async () => {
const source = ["res/whatsup.jpg", "res/adolphus.jpg", "res/cloud.jpg"];
const destination = ["test/whatsup.jpg", "test/adolphus.jpg", "test/cloud.jpg"];
const bucketName = "something-4";

const newBucketEventEvent = await userApi.createBucket(bucketName);
const newBucketEventDataBlob =
userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data;

assert(newBucketEventDataBlob, "Event doesn't match Type");

const files = [];
const txs = [];
for (let i = 0; i < source.length; i++) {
const { fingerprint, file_size, location } =
await userApi.rpc.storagehubclient.loadFileInStorage(
source[i],
destination[i],
userApi.shConsts.NODE_INFOS.user.AddressId,
newBucketEventDataBlob.bucketId
);

files.push({ fingerprint, file_size, location });
txs.push(
userApi.tx.fileSystem.issueStorageRequest(
newBucketEventDataBlob.bucketId,
location,
fingerprint,
file_size,
userApi.shConsts.DUMMY_MSP_ID,
[userApi.shConsts.NODE_INFOS.user.expectedPeerId]
)
);
}

await userApi.sealBlock(txs, shUser);

// Get the new storage request events, making sure we have 3
const storageRequestEvents = await userApi.assert.eventMany(
"fileSystem",
"NewStorageRequest"
);
strictEqual(storageRequestEvents.length, 3);

// Get the file keys from the storage request events
const fileKeys = storageRequestEvents.map((event) => {
const dataBlob =
userApi.events.fileSystem.NewStorageRequest.is(event.event) && event.event.data;
assert(dataBlob, "Event doesn't match Type");
return dataBlob.fileKey;
});

// Wait for the BSP to volunteer
await userApi.wait.bspVolunteer(source.length);
for (const fileKey of fileKeys) {
await bspApi.wait.bspFileStorageComplete(fileKey);
}

// Waiting for a confirmation of the first file to be stored
await sleep(500);
await userApi.wait.bspStored(1);

// Here we expect the 2 others files to be batched
await sleep(500);
await userApi.wait.bspStored(1);

const stopStroringTxs = [];
for (let i = 0; i < fileKeys.length; i++) {
const inclusionForestProof = await bspApi.rpc.storagehubclient.generateForestProof(null, [
fileKeys[i]
]);
stopStroringTxs.push(
userApi.tx.fileSystem.bspRequestStopStoring(
fileKeys[i],
newBucketEventDataBlob.bucketId,
files[i].location,
userApi.shConsts.NODE_INFOS.user.AddressId,
files[i].fingerprint,
files[i].file_size,
false,
inclusionForestProof.toString()
)
);
}

await userApi.sealBlock(stopStroringTxs, bspKey);

await userApi.assert.eventMany("fileSystem", "BspRequestedToStopStoring");

// Wait enough blocks for the deletion to be allowed.
const currentBlock = await userApi.rpc.chain.getBlock();
const currentBlockNumber = currentBlock.block.header.number.toNumber();
const cooldown =
currentBlockNumber + bspApi.consts.fileSystem.minWaitForStopStoring.toNumber();
await userApi.block.skipTo(cooldown);

// Batching the delete confirmation should fail because of the wrong inclusionForestProof for extrinsinc 2 and 3
const confirmStopStoringTxs = [];
for (let i = 0; i < fileKeys.length; i++) {
const inclusionForestProof = await bspApi.rpc.storagehubclient.generateForestProof(null, [
fileKeys[i]
]);
confirmStopStoringTxs.push(
userApi.tx.fileSystem.bspConfirmStopStoring(fileKeys[i], inclusionForestProof.toString())
);
}

await userApi.sealBlock(confirmStopStoringTxs, bspKey);

// Check for the confirm stopped storing event.
const confirmStopStoringEvents = await userApi.assert.eventMany(
"fileSystem",
"BspConfirmStoppedStoring"
);

assert(
confirmStopStoringEvents.length === 1,
"two of the extrinsincs should fail because of wrong inclusion proof"
);
}
);
});
5 changes: 2 additions & 3 deletions test/suites/integration/bsp/storage-delete.test.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import { strictEqual } from "node:assert";
import assert, { strictEqual } from "node:assert";
import { bspKey, describeBspNet, shUser, type EnrichedBspApi } from "../../../util";
import invariant from "tiny-invariant";

describeBspNet(
"BSPNet : stop storing file and other BSPs taking the relay",
Expand All @@ -13,7 +12,7 @@ describeBspNet(

before(async () => {
const launchResponse = await getLaunchResponse();
invariant(
assert(
launchResponse && "bspTwoRpcPort" in launchResponse && "bspThreeRpcPort" in launchResponse,
"BSPNet failed to initialise with required ports"
);
Expand Down
3 changes: 1 addition & 2 deletions test/suites/integration/bsp/submit-proofs.test.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import assert, { strictEqual } from "node:assert";
import invariant from "tiny-invariant";
import {
ShConsts,
bspThreeKey,
Expand All @@ -25,7 +24,7 @@ describeBspNet(

before(async () => {
const launchResponse = await getLaunchResponse();
invariant(
assert(
launchResponse && "bspTwoRpcPort" in launchResponse && "bspThreeRpcPort" in launchResponse,
"BSPNet failed to initialise with required ports"
);
Expand Down
Loading
Loading