-
Notifications
You must be signed in to change notification settings - Fork 3
/
Frost.cpp
4385 lines (3771 loc) · 225 KB
/
Frost.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// We need classical includes
#include <stdio.h>
// We need our declaration
#include "Frost.hpp"
// We need scoped pointer too
#include "ClassPath/include/Utils/ScopePtr.hpp"
// We need hex dump output
#include "ClassPath/include/Utils/Dump.hpp"
// We need encoding too
#include "ClassPath/include/Encoding/Encode.hpp"
// We need the folder scanner too
#include "ClassPath/include/File/ScanFolder.hpp"
// We need the chunker too
#include "ClassPath/include/File/TTTDChunker.hpp"
// We need compression too
#include "ClassPath/include/Streams/CompressStream.hpp"
#include "ClassPath/include/Compress/BSCLib.hpp"
// We need loggers too
#include "ClassPath/include/Logger/Logger.hpp"
// We need StringMap too
#include "ClassPath/include/Hash/StringMap.hpp"
// The global option map
Strings::StringMap optionsMap;
// The warning log that's displayed on output
Strings::StringArray warningLog;
// Error code that's returned to bail out of int functions
const int BailOut = 26748;
namespace Frost
{
bool wasBackingUp = false, backupWorked = false, dumpTimeRequired = false, exitRequired = false;
bool safeIndex = false;
int dumpLevel = 0; // This is the verbosity level
unsigned int previousRevID = 0;
#ifdef _POSIX
void asyncProcess(int signal)
{
static const char stopping[] = "\n| Stopping, please wait... |\n";
switch (signal)
{
case SIGUSR2: dumpTimeRequired = true; return;
case SIGINT: exitRequired = true; write(2, stopping, sizeof(stopping)); fsync(2); return;
default: return;
}
}
#endif
void debugMem(const uint8 * buffer, const uint32 len, const String & title = "")
{
if (dumpLevel < 2) return;
String out;
Utils::hexDump(out, buffer, len, 16, true, false);
fprintf(stdout, "%s%s\n", (const char*)title, (const char*)out);
}
// This will be used later on when i18n'ing the software
#ifdef __GNUC__
__attribute__((format_arg(1)))
#endif
const char * __trans__(const char * format)
{ // Monothreaded conversion here, it's a shame
static String translated;
translated = format;
return (const char*)translated;
}
String TRANS(const String & value) { return __trans__(value); }
void derivePassword(KeyFactory::KeyT & pwKey, const String & password)
{
// We need to derive the low-entropy password to build a Hash out of it, and use that to decrypt the private key
// we have generated earlier.
KeyFactory::PWKeyDerivFuncT hash;
// Cat the password multiple time until it fit the required input size
MemoryBlock inputPW(KeyFactory::BigHashT::DigestSize);
inputPW.stripTo(0);
while (inputPW.getSize() < KeyFactory::BigHashT::DigestSize)
inputPW.Append(password, password.getLength() + 1); // Add 0 to differentiate "a" from "aa" or "aaa" etc...
hash.Hash(inputPW.getConstBuffer(), inputPW.getSize());
hash.Finalize(pwKey);
}
String KeyFactory::loadPrivateKey(const String & fileVault, const MemoryBlock & cipherMasterKey, const String & password, const String & ID)
{
File::Info vault(fileVault, true);
if (!vault.doesExist()) return TRANS("Key vault file does not exist");
// Check the permission for the file
#if defined(_POSIX)
if (vault.getPermission() != 0600) return TRANS("Key vault file permissions are bad, expecting 0600");
#endif
Strings::FastString keyVaultContent = vault.getContent();
if (!keyVaultContent) return TRANS("Unable to read the key vault file");
// Parse the file to find out the ID in the list
String keySizeAndID = keyVaultContent.splitUpTo("\n");
String encKey = keyVaultContent.splitUpTo("\n");
String keyID = keySizeAndID.fromFirst(" ");
while (keyID != ID)
{
keySizeAndID = keyVaultContent.splitUpTo("\n");
encKey = keyVaultContent.splitUpTo("\n");
keyID = keySizeAndID.fromFirst(" ");
}
if (keyID != ID) return TRANS("Could not find a key with the specified ID: ") + ID;
debugMem(cipherMasterKey.getConstBuffer(), cipherMasterKey.getSize(), "Ciphered master key");
debugMem(keyVaultContent, keyVaultContent.getLength(), "Base85 content");
// We need to load the ciphered private key out of the fileVault for our ID
int encryptedKeySize = keySizeAndID;
Utils::ScopePtr<MemoryBlock> cipherKey(MemoryBlock::fromBase85(encKey, encKey.getLength()));
if (!cipherKey) return TRANS("Bad format for the key vault");
debugMem(cipherKey->getConstBuffer(), cipherKey->getSize(), "Encrypted content key");
// Then try to decode it with the given password
KeyT derivedPassword;
derivePassword(derivedPassword, password);
debugMem(derivedPassword, ArrSz(derivedPassword), "Password key");
// Then create the block to decrypt
SymmetricT sym;
sym.setKey(derivedPassword, (SymmetricT::BlockSize)ArrSz(derivedPassword), 0, (SymmetricT::BlockSize)ArrSz(derivedPassword));
MemoryBlock decKey( (uint32)((encryptedKeySize + (ArrSz(derivedPassword) - 1) ) / ArrSz(derivedPassword)) * ArrSz(derivedPassword) );
MemoryBlock clearKey(decKey.getSize());
sym.Decrypt(cipherKey->getConstBuffer(), clearKey.getBuffer(), cipherKey->getSize()); // ECB mode used for a single block anyway
debugMem(clearKey.getConstBuffer(), clearKey.getSize(), "Encryption key");
// And finally decode the cipherMasterKey to the master key.
AsymmetricT::PrivateKey key;
if (!key.Import(clearKey.getConstBuffer(), encryptedKeySize, 0)) return TRANS("Bad key from the key vault"); // Bad key
AsymmetricT asym;
if (!asym.Decrypt(cipherMasterKey.getConstBuffer(), cipherMasterKey.getSize(), masterKey, ArrSz(masterKey), key))
return TRANS("Can't decrypt the master key with the given key vault. Did you try with the wrong remote ?");
debugMem(masterKey, ArrSz(masterKey), "Master key");
return "";
}
String KeyFactory::createMasterKeyForFileVault(MemoryBlock & cipherMasterKey, const String & fileVault, const String & password, const String & ID)
{
// Check for file existence first.
File::Info vault(fileVault, true);
if (vault.doesExist())
{
Strings::FastString keyVaultContent = vault.getContent();
if (!keyVaultContent) return TRANS("Unable to read the existing key vault file");
// Parse the file to find out the ID in the list
int count = 1;
String keySizeAndID = keyVaultContent.splitUpTo("\n");
String encKey = keyVaultContent.splitUpTo("\n");
String keyID = keySizeAndID.fromFirst(" ");
while (keyID != ID)
{
keySizeAndID = keyVaultContent.splitUpTo("\n");
encKey = keyVaultContent.splitUpTo("\n");
keyID = keySizeAndID.fromFirst(" ");
count++;
}
if (keyID == ID) return TRANS("This ID already exists in the key vault: ") + fileVault + String("[")+ count + String("] => ") + ID;
}
File::Info parentFolder(vault.getParentFolder());
if (parentFolder.doesExist() && !parentFolder.isDir()) return TRANS("The parent folder for the key vault file exists but it's not a directory: ") + fileVault;
// Generate a lot of random data, and that'll become the master key.
{
uint8 randomData[2 * BigHashT::DigestSize];
Random::fillBlock(randomData, ArrSz(randomData), true);
// Create the master key
BigHashT hash;
hash.Start(); hash.Hash(randomData, ArrSz(randomData)); hash.Finalize(masterKey);
debugMem(masterKey, ArrSz(masterKey), "Master key");
}
// Then, we need to generate a Asymmetric key pair, and export the key pair
AsymmetricT asym;
AsymmetricT::PrivateKey key;
if (!asym.Generate(key)) return TRANS("Failed to generate a private key");
// Export the key
MemoryBlock exportedKey(key.getRequiredArraySize());
if (!key.Export(exportedKey.getBuffer(), exportedKey.getSize())) return TRANS("Failed to export the private key");
debugMem(exportedKey.getConstBuffer(), exportedKey.getSize(), "EC_IES Private key");
// Encrypt the master key now
if (!cipherMasterKey.ensureSize(asym.getCiphertextLength(ArrSz(masterKey)), true)) return TRANS("Failed to allocate memory for the ciphered master key");
if (!asym.Encrypt(masterKey, ArrSz(masterKey), cipherMasterKey.getBuffer(), cipherMasterKey.getSize())) return TRANS("Failed to encrypt the master key");
debugMem(cipherMasterKey.getConstBuffer(), cipherMasterKey.getSize(), "Ciphered master key");
// Derive the password key
KeyT pwKey;
derivePassword(pwKey, password);
debugMem(pwKey, ArrSz(pwKey), "Password key");
// Then create the block to encrypt
MemoryBlock encKey( (uint32)((exportedKey.getSize() + (ArrSz(pwKey) - 1) ) / ArrSz(pwKey)) * ArrSz(pwKey) );
MemoryBlock cipherKey(encKey.getSize());
// Fill the key
memcpy(encKey.getBuffer(), exportedKey.getConstBuffer(), exportedKey.getSize());
// Finish by some random data (we don't care about it, since we'll drop it finally)
Random::fillBlock(encKey.getBuffer() + exportedKey.getSize(), encKey.getSize() - exportedKey.getSize());
debugMem(encKey.getConstBuffer(), encKey.getSize(), "Encryption key");
SymmetricT sym;
sym.setKey(pwKey, (SymmetricT::BlockSize)ArrSz(pwKey), 0, (SymmetricT::BlockSize)ArrSz(pwKey));
sym.Encrypt(encKey.getConstBuffer(), cipherKey.getBuffer(), encKey.getSize()); // ECB mode used for a single block anyway
debugMem(cipherKey.getConstBuffer(), cipherKey.getSize(), "Encrypted content key");
// And finally create the output key vault with this
if (!parentFolder.doesExist() && !parentFolder.makeDir(true)) return TRANS("Can't create the parent folder for the key vault file");
Utils::ScopePtr<MemoryBlock> base85Encoded(cipherKey.toBase85());
debugMem(base85Encoded->getConstBuffer(), base85Encoded->getSize(), "Base85 Encrypted content key");
if (!vault.setContent(String::Print("%d %s\n%s\n", (int)exportedKey.getSize(), (const char*)ID, (const char*)String(base85Encoded->getConstBuffer(), base85Encoded->getSize())), File::Info::Append)) return TRANS("Can't set the key vault file content");
if (!vault.setPermission(0600)) return TRANS("Can't set the key vault file permission to 0600");
return "";
}
namespace DatabaseModel { String databaseURL = ""; }
#define MapAs(X, Y, Offset) (X*)(Y + Offset)
namespace FileFormat
{
struct _CondScopeProfiler
{
uint32 startTime;
const char * name;
_CondScopeProfiler(const char * name, const bool force = false) : startTime(dumpTimeRequired || force ? Time::getTimeWithBase(65536) : 0), name(name) { }
~_CondScopeProfiler() { if (dumpTimeRequired) { uint32 duration = Time::getTimeWithBase(65536) - startTime; fprintf(stderr, "Time: %s => %.3fs\n", name, duration / 65536.0); } }
};
#define CondScopeProfiler ::Frost::FileFormat::_CondScopeProfiler profile (__FUNCTION__)
template <int instanceID>
struct _AccScopeProfiler
{
uint32 & getAccTime() { static uint32 accTime; return accTime; }
uint32 & getCount() { static uint32 count; return count; }
void flush() { fprintf(stderr, "Time: %s => avg %.3fs over %u runs (total: %.3fs)\n", name, getAccTime() / (getCount() * 65536.0), getCount(), getAccTime() / 65536.0); getCount() = getAccTime() = 0; }
uint32 startTime;
const char * name;
_AccScopeProfiler(const char * name) : startTime(dumpTimeRequired ? Time::getTimeWithBase(65536) : 0), name(name) { }
~_AccScopeProfiler() { if (dumpTimeRequired && startTime) { uint32 duration = Time::getTimeWithBase(65536) - startTime; getAccTime() += duration; getCount()++; } }
};
#define AccScopeProfiler(X) ::Frost::FileFormat::_AccScopeProfiler<X> profile (__FUNCTION__)
// Start a new revision for this backup file
bool IndexFile::startNewRevision(const unsigned rev)
{
const uint32 revision = rev ? rev : catalog->revision + 1;
if (readOnly) return false;
fileTree.revision = revision;
metadata.Reset();
if (!rev) metadata.Append(String::Print(TRANS("Revision %u created on %s"), revision, (const char*)Time::LocalTime::Now().toDate()));
return true;
}
// Resize the chunk index map
bool IndexFile::resizeChunkIndexMap()
{
if (!chunkIndices) return true;
return chunkIndices->resize(); //@todo use a generator to save memory while growing the table
}
// Append a chunk to this index file
bool IndexFile::appendChunk(Chunk & chunk, const uint32 forceUID)
{
if (readOnly) return false;
if (!forceUID) chunk.UID = maxChunkID++ + 1;
AccScopeProfiler(1);
// local.chunks.insertSorted(chunk);
// consolidated.chunks.insertSorted(chunk);
uint32 chunkIndex = (uint32)consolidated.chunks.getSize();
consolidated.chunks.Append(chunk); // Should be O(1)
return chunkIndices->storeValue(chunk.checksum, chunkIndex); // This too
}
// Find a chunk based on its checksum
uint32 IndexFile::findChunk(Chunk & chunk) const
{
AccScopeProfiler(2);
uint32 * pos = chunkIndices->getValue(chunk.checksum);
if (!pos) return -1;
return consolidated.chunks[*pos].UID;
// return consolidated.findChunk(chunk);
}
// Append a multichunk to this file
bool IndexFile::appendMultichunk(Multichunk * mchunk, ChunkList * list)
{
if (readOnly || !mchunk || !list) return false;
// Make sure it does not exists already
mchunk->listID = maxChunkListID + 1;
list->UID = maxChunkListID + 1;
if (multichunks.storeValue(mchunk->UID, mchunk) && chunkList.storeValue(list->UID, list))
{
maxChunkListID++;
return true;
}
return false;
}
bool IndexFile::appendFileItem(FileTree::Item * item, ChunkList * list)
{
if (readOnly || !item || !list) return false;
// Make sure it does not exists already
list->UID = maxChunkListID + 1;
item->fixed->chunkListID = list->UID;
CondScopeProfiler;
fileTree.items.Append(item);
if (chunkList.storeValue(list->UID, list))
{
maxChunkListID++;
return true;
}
return false;
}
/** Dump the current information for all items in this index */
String IndexFile::dumpIndex(uint32 rev) const
{
rev = rev == 0 ? getCurrentRevision() : rev;
String ret = String::Print(TRANS("Revision: %u\n=>Header object\n"), rev);
ret += header->dump();
ret += TRANS("\n=> Catalog object\n");
// Start with the catalog
const Catalog * cat = getCatalogForRevision(rev);
if (!cat) return ret + TRANS("Catalog not found, stopping\n");
ret += cat->dump();
// Now deal with metadata
ret += TRANS("\n=> Metadata\n");
MetaData met;
if (cat->optionMetadata.fileOffset() && Load(met, cat->optionMetadata))
ret += met.dump();
// Then filter arguments
ret += TRANS("\n=> Filter arguments\n");
FilterArguments fa;
if (cat->optionFilterArg.fileOffset() && Load(fa, cat->optionFilterArg))
ret += fa.dump();
// Then with the fileTree
ret += TRANS("\n=> File tree\n");
FileTree ft(rev, true);
if (!Load(ft, cat->fileTree)) return ret += TRANS("File tree not found, stopping\n");
ret += ft.dump();
// Then the chunk lists
ret += TRANS("\n=> Chunk lists\n");
ChunkList cl;
Offset chunkListOffset = cat->chunkLists;
ret += String::Print(" ChunkList count: %u\n", cat->chunkListsCount);
for (uint32 i = 0; i < cat->chunkListsCount; i++)
{
if (Load(cl, chunkListOffset)) ret += cl.dump();
chunkListOffset.fileOffset(chunkListOffset.fileOffset() + cl.getSize());
}
// Read all previous multichunks now
ret += TRANS("\n=> Multichunks\n");
const Multichunk * mc;
Offset mcOffset = cat->multichunks;
ret += String::Print(" Multichunks count: %u\n", cat->multichunksCount);
for (uint32 i = 0; i < cat->multichunksCount; i++)
{
if (Map(mc, mcOffset)) ret += mc->dump();
mcOffset.fileOffset(mcOffset.fileOffset() + mc->getSize());
}
// Then do the multchunks array
ret += TRANS("\n=> Chunks\n");
Chunks chunks;
if (LoadRO(chunks, cat->chunks)) ret += chunks.dump();
return ret;
}
template <typename T>
static uint32 getListSize(T & list)
{
uint32 ret = 0;
typename T::IterT iter = list.getFirstIterator();
while (iter.isValid())
{
ret += (*iter)->getSize();
++iter;
}
return ret;
}
// In a signal handler, we are not supposed to allocate, or call any system function, so this must be called in the processing loop.
String IndexFile::dumpMemStat() const
{
uint64 total = 0, current = 0;
String ret = String::Print("Header size: %llu bytes\n", (current = header->getSize())); total += current;
ret += String::Print("Catalog size: %llu bytes\n", (current = catalog->getSize())); total += current;
ret += String::Print("Consolidated chunks size: %llu bytes\n", (current = consolidated.getSize())); total += current;
ret += String::Print("Chunks index table size: %llu bytes\n", (current = chunkIndices->getMemUsage())); total += current;
ret += String::Print("Readonly chunks list size: %llu bytes\n", (current = getListSize(chunkListRO))); total += current;
ret += String::Print("Chunks list size: %llu bytes\n", (current = getListSize(chunkList))); total += current;
ret += String::Print("Multichunks size: %llu bytes\n", (current = getListSize(multichunks))); total += current;
ret += String::Print("Readonly multichunks size: %llu bytes\n", (current = getListSize(multichunksRO))); total += current;
ret += String::Print("FilterArg size: %llu bytes\n", (current = arguments.getSize())); total += current;
ret += String::Print("Metadata size: %llu bytes\n", (current = metadata.getSize())); total += current;
ret += String::Print("FileTree size: %llu bytes\n", (current = fileTree.getSize())); total += current;
ret += String::Print("Readonly fileTree size: %llu bytes\n", (current = fileTreeRO.getSize())); total += current;
ret += String::Print("Total size: %llu bytes\n", total);
return ret;
}
// Create a new file from scratch.
String IndexFile::createNew(const String & filePath, const Utils::MemoryBlock & cipheredMasterKey, const String & backupPath)
{
File::Info info(filePath, true);
if (info.doesExist()) return TRANS("File already exists: ") + filePath;
if (cipheredMasterKey.getSize() != MainHeader::CipheredMasterKeySize) return TRANS("Invalid ciphered master key format");
file = new Stream::MemoryMappedFileStream(info.getFullPath(), true);
if (!file) return TRANS("Out of memory");
// Compute the size required for the metadata and filter arguments and header
metadata.info.Clear();
metadata.Append(backupPath);
metadata.Append(TRANS("Initial backup started on ") + Time::LocalTime::Now().toDate());
uint64 size = MainHeader::getSize();
if (!file->map(0, size)) return TRANS("Could not allocate file space for creation. Is disk full?");
// Ok, create the buffers now for this file
uint8 * filePtr = file->getBuffer();
if (!filePtr) return TRANS("Failed to get a pointer on the mapped area");
header = *MapAs(MainHeader, filePtr, 0);
new(header) MainHeader(); // Force construction of the object
catalog = new Catalog(0); // This is required for previous linking
memcpy(header->cipheredMasterKey, cipheredMasterKey.getConstBuffer(), ArrSz(header->cipheredMasterKey));
// Ok, header is written, let's unmap the area
readOnly = false;
maxChunkID = 0; maxChunkListID = 0; maxMultichunkID = 0; prevRevisionMaxChunkID = 0;
fileTree.revision = 1;
chunkIndices = new ChunkIndexMap(65535, &consolidated.chunks);
return "";
}
// Load a file from the given storage
String IndexFile::readFile(const String & filePath, const bool readWrite)
{
File::Info info(filePath, true);
if (!info.doesExist()) return TRANS("File does not exists: ") + filePath;
file = new Stream::MemoryMappedFileStream(info.getFullPath(), readWrite);
if (!file) return TRANS("Out of memory");
// Check if we can map the complete file (right now, it's much easier this way)
if (!file->map()) return TRANS("Could not open the given file (permission error ?): ") + filePath;
readOnly = !readWrite;
// Ok, create the buffers now for this file
uint8 * filePtr = file->getBuffer();
if (!filePtr) return TRANS("Failed to get a pointer on the mapped area");
header = *MapAs(MainHeader, filePtr, 0);
if (!header->isCorrect(file->fullSize())) return TRANS("Given index format not correct");
uint64 catalogOffset = header->catalogOffset.fileOffset();
if (!catalogOffset) catalogOffset = file->fullSize() - Catalog::getSize();
// Get the offset to the catalog for reading
catalog = *MapAs(Catalog, filePtr, catalogOffset);
if (!catalog->isCorrect(file->fullSize(), catalogOffset))
return TRANS("Catalog in file is corrupted.");
// Now we have a catalog, let's extract all the data we need
// Fuse the chunks
maxChunkID = 0;
consolidated.Clear();
chunkIndices = 0;
maxChunkListID = 0;
multichunksRO.clearTable();
multichunks.clearTable();
maxMultichunkID = 0;
arguments.arguments.Clear();
metadata.info.Clear();
Catalog * c = catalog;
while (c)
{
if (dumpLevel > 1) c->dump();
Chunks chunk(c->revision);
if (!chunk.loadReadOnly(filePtr + c->chunks.fileOffset(), file->fullSize() - c->chunks.fileOffset())) return String::Print(TRANS("Could not read the chunks for revision %d"), c->revision);
if (chunk.revision != c->revision) return String::Print(TRANS("Unexpected chunks revision %u for catalog revision %u"), chunk.revision, c->revision);
// Insert all chunks in the consolidated array (this can take some time)
for (size_t i = 0; i < chunk.chunks.getSize(); i++)
{
if (chunk.chunks[i].UID > maxChunkID) maxChunkID = chunk.chunks[i].UID;
// if (readWrite) consolidated.chunks.insertSorted(chunk.chunks[i]);
// else
consolidated.chunks.Append(chunk.chunks[i]); // Not sorted, we'll sort them later on
}
// Read all chunk lists now
uint64 chunkListOffset = c->chunkLists.fileOffset();
for (uint32 i = 0; i < c->chunkListsCount; i++)
{
ChunkList * cl = new ChunkList();
if (!cl) return TRANS("Out of memory");
if (!cl->load(filePtr + chunkListOffset, file->fullSize() - chunkListOffset)) return TRANS("Could not load chunk list");
if (!chunkListRO.storeValue(cl->UID, cl)) return String::Print(TRANS("Chunk list with UID %u already exist"), cl->UID);
if (cl->UID > maxChunkListID) maxChunkListID = cl->UID;
chunkListOffset += cl->getSize();
}
// Read all previous multichunks now
uint64 multichunkOffset = c->multichunks.fileOffset();
for (uint32 i = 0; i < c->multichunksCount; i++)
{
Multichunk * mc = MapAs(Multichunk, filePtr, multichunkOffset);
if (!mc->isCorrect(file->fullSize(), file->fullSize() - multichunkOffset)) return String::Print(TRANS("Invalid %u-th multichunk in revision %u"), i, c->revision);
if (mc->UID > maxMultichunkID) maxMultichunkID = mc->UID;
multichunksRO.storeValue(mc->UID, mc);
multichunkOffset += mc->getSize();
}
// Read filter arguments
if (!arguments.arguments.getSize() && c->optionFilterArg.fileOffset())
{
if (!arguments.load(filePtr + c->optionFilterArg.fileOffset(), file->fullSize() - c->optionFilterArg.fileOffset()))
return String::Print(TRANS("Could not read the filters' argument for revision %u"), c->revision);
if (!arguments.isCorrect(file->fullSize(), c->optionFilterArg.fileOffset()))
return String::Print(TRANS("Bad filters' arguments for revision %u"), c->revision);
}
// Read metadata
if (!metadata.info.getSize() && c->optionMetadata.fileOffset())
{
if (!metadata.load(filePtr + c->optionMetadata.fileOffset(), file->fullSize() - c->optionMetadata.fileOffset()))
return String::Print(TRANS("Could not read the metadata for revision %u"), c->revision);
if (!metadata.isCorrect(file->fullSize(), c->optionMetadata.fileOffset()))
return String::Print(TRANS("Bad metadata for revision %u"), c->revision);
}
c = c->previous.fileOffset() ? MapAs(Catalog, filePtr, c->previous.fileOffset()) : 0;
}
// Read the last filetree (that's the only required for now)
fileTree.Clear();
fileTreeRO.Clear();
if (!fileTreeRO.load(filePtr + catalog->fileTree.fileOffset(), file->fullSize() - catalog->fileTree.fileOffset()))
return String::Print(TRANS("Could not load the file tree for revision %u"), catalog->revision);
ChunkUIDSorter sorter;
if (!readWrite) Container::Algorithms<Container::PlainOldData<Chunk>::Array>::sortContainer(consolidated.chunks, sorter); // This is only using UID to sort
// else Container::Algorithms<Container::PlainOldData<Chunk>::Array>::sortContainer(consolidated.chunks, consolidated.chunks[0]); // This is using size and checksum to sort
else
{
chunkIndices = new ChunkIndexMap(consolidated.chunks.getSize() * 2, &consolidated.chunks);
// Insert all chunks index in the map
for (size_t i = 0; i < consolidated.chunks.getSize(); i++)
{
if (!chunkIndices->storeValue(consolidated.chunks.getElementAtUncheckedPosition(i).checksum, i))
return String::Print(TRANS("Could not insert the chunk at pos %u with UID: %u"), (uint32)i, consolidated.chunks.getElementAtUncheckedPosition(i).UID);
}
prevRevisionMaxChunkID = maxMultichunkID;
}
// Ok, done loading this file
return "";
}
const Chunk * IndexFile::findChunk(const uint32 uid) const
{
size_t pos = 0;
CondScopeProfiler;
Chunk item(uid);
if (readOnly)
{ // The consolidated array is sorted by UID, so we can do a O(log N) search here
ChunkUIDSorter sorter;
pos = Container::Algorithms<Container::PlainOldData<Chunk>::Array>::searchContainer(consolidated.chunks, sorter, item);
if (pos == consolidated.chunks.getSize() || consolidated.chunks.getElementAtPosition(pos).UID != uid) return 0;
}
else
{
// This is going to be very slow O(N)
pos = consolidated.chunks.indexOf(item);
if (pos == consolidated.chunks.getSize()) return 0;
}
return &consolidated.chunks.getElementAtPosition(pos);
}
// Close the file (and make sure mapping is actually correct)
String IndexFile::close()
{
if (!file || readOnly || (fileTree.items.getSize() == 0 && !metadata.modified))
{
file = 0; catalog = 0; header = 0; chunkIndices = 0;
fileTree.Clear(); fileTreeRO.Clear();
metadata.Reset(); arguments.Reset();
consolidated.Clear(); prevRevisionMaxChunkID = 0; maxChunkID = 0;
chunkListRO.clearTable(); chunkList.clearTable(); maxChunkListID = 0;
multichunks.clearTable(); multichunksRO.clearTable(); maxMultichunkID = 0;
return ""; // Nothing to do or no modifications done
}
Chunks local(0);
// O(n) operation here for a huge list, but hopefully, this will be in cache most of the time
// This will also consume all new chunks memory additionally
for (size_t i = 0; i < consolidated.chunks.getSize(); i++)
{
if (consolidated.chunks.getElementAtUncheckedPosition(i).multichunkID > prevRevisionMaxChunkID)
local.chunks.Append(consolidated.chunks.getElementAtUncheckedPosition(i));
}
// Get a coarse approximation of the required size for the file expansion required
uint64 requiredAdditionalSize = fileTree.getSize() + (arguments.modified ? arguments.getSize() : 0) + (metadata.modified ? metadata.getSize() : 0) + multichunks.getSize() * Multichunk::getSize()
+ local.getSize() + Catalog::getSize();
// We need to iterate the chunklists to know their size
ChunkLists::IterT cl = chunkList.getFirstIterator();
while (cl.isValid()) { requiredAdditionalSize += (*cl)->getSize(); ++cl; }
uint64 initialSize = file->fullSize();
uint64 initialCatalog = header->catalogOffset.fileOffset();
if (!initialCatalog && initialSize > header->getSize()) initialCatalog = initialSize - Catalog::getSize();
// Make sure we can allocate such size now on file
Offset prevOptMetadata = catalog->optionMetadata, prevFilterArg = catalog->optionFilterArg;
if (!file->map(0, file->fullSize() + requiredAdditionalSize))
return String::Print(TRANS("Cannot allocate %llu more bytes for the index file, is disk full?"), requiredAdditionalSize);
uint8 * filePtr = file->getBuffer();
// Starting from this point, the previous mapping are no more valid, so we can't refer to them
//===========================================================================================================
uint32 prevRev = initialCatalog ? (*MapAs(Catalog, filePtr, initialCatalog)).revision : 0;
// Ok, start by writing all the new informations
Catalog cat(prevRev + 1);
uint64 wo = initialSize;
cat.chunks.fileOffset(wo);
// Write the new chunk array
local.revision = prevRev + 1; // Fix the revision
local.write(filePtr + wo); wo += local.getSize();
// Write the chunk list
cat.chunkLists.fileOffset(wo);
cat.chunkListsCount = chunkList.getSize();
{
ChunkLists::IterT iter = chunkList.getFirstIterator();
while (iter.isValid())
{
(*iter)->write(filePtr + wo); wo += (*iter)->getSize();
++iter;
}
}
// Write the multichunk list
cat.multichunks.fileOffset(wo);
cat.multichunksCount = multichunks.getSize();
{
Multichunks::IterT iter = multichunks.getFirstIterator();
while (iter.isValid())
{
(*iter)->write(filePtr + wo); wo += (*iter)->getSize();
++iter;
}
}
// We need to write the file tree too
cat.fileTree.fileOffset(wo);
fileTree.write(filePtr + wo); wo += fileTree.getSize();
// Check if we need to write the arguments
if (arguments.modified)
{
cat.optionFilterArg.fileOffset(wo);
arguments.write(filePtr + wo); wo += arguments.getSize();
} else cat.optionFilterArg = prevFilterArg;
// Check if we need to write the metadata
if (metadata.modified)
{
cat.optionMetadata.fileOffset(wo);
metadata.write(filePtr + wo); wo += metadata.getSize();
} else cat.optionMetadata = prevOptMetadata;
cat.previous.fileOffset(initialCatalog);
// Now we can write the catalog
if (wo + cat.getSize() != file->fullSize()) return TRANS("Invalid file size computation");
cat.write(filePtr + wo);
file->unmap(true);
file = 0;
return "";
}
// Get the file base name for this multichunk
String Multichunk::getFileName() const
{
String ret; uint32 outSize = (uint32)(ArrSz(checksum) * 2);
if (!Encoding::encodeBase16(checksum, ArrSz(checksum), (uint8*)ret.Alloc(ArrSz(checksum)*2), outSize)) return "";
ret.releaseLock((int)outSize);
ret += ".#";
return ret;
}
Utils::OwnPtr<FileTree> IndexFile::getFileTree(const uint32 revision)
{
// Check easy first, with no destruction
if (!revision || !file) return 0;
if (!readOnly && revision == fileTree.revision) return fileTree;
if (revision == fileTreeRO.revision) return fileTreeRO;
if (revision > fileTree.revision && revision > fileTreeRO.revision) return 0;
// Ok, need to extract the other revisions
uint8 * filePtr = file->getBuffer();
Catalog * c = catalog;
while (c)
{
if (c->revision == revision)
{
Utils::OwnPtr<FileTree> ft(new FileTree(revision));
if (!ft->load(filePtr + c->fileTree.fileOffset(), file->fullSize() - c->fileTree.fileOffset())) return 0;
return ft;
}
c = MapAs(Catalog, filePtr, c->previous.fileOffset());
}
return 0;
}
MetaData IndexFile::getFirstMetaData()
{
const Catalog * c = catalog;
// Find the first catalog
while (c && c->previous.fileOffset()) Map(c, c->previous);
// Then extract the metadata from it
MetaData ret;
// All error would lead to returning the metadata object anyway
if (c->optionMetadata.fileOffset())
Load(ret, c->optionMetadata.fileOffset());
return ret;
}
}
#undef MapAs
namespace Helpers
{
CompressorToUse compressor;
// The entropy threshold
double entropyThreshold = 1.0;
// Excluded file list if found
String excludedFilePath;
// Included file list if found
String includedFilePath;
// The index file we are using
FileFormat::IndexFile indexFile;
// Base 85 encoding
String fromBinary(const uint8 * data, const uint32 size, const bool base)
{
String ret;
uint32 outSize = base ? (size * 5 + 3) / 4 : size * 2;
if (base)
{
if (!Encoding::encodeBase85(data, size, (uint8*)ret.Alloc(outSize), outSize)) return "";
} else if (!Encoding::encodeBase16(data, size, (uint8*)ret.Alloc(outSize), outSize)) return "";
ret.releaseLock((int)outSize);
return ret;
}
// Base 85 decoding
bool toBinary(const String & src, uint8 * data, uint32 & size, const bool base)
{
return base ? Encoding::decodeBase85((const unsigned char*)src, src.getLength(), data, size)
: Encoding::decodeBase16((const unsigned char*)src, src.getLength(), data, size);
}
// Encrypt a block in AES counter mode
bool AESCounterEncrypt(const KeyFactory::KeyT & nonceRandom, const ::Stream::InputStream & input, ::Stream::OutputStream & output)
{
KeyFactory::KeyT nonce = {0}, key = {0}, salt = {0}, plainText = {0}, cipherText = {0};
getKeyFactory().createNewKey(key);
getKeyFactory().getCurrentSalt(salt);
// Write the salt to the output stream
if (!output.write(salt)) return false;
getKeyFactory().createNewNonce(nonceRandom);
Crypto::OSSL_AES cipher;
cipher.setKey(key, (Crypto::BaseSymCrypt::BlockSize)ArrSz(key), 0, (Crypto::BaseSymCrypt::BlockSize)ArrSz(key));
for (uint64 i = 0; i < input.fullSize(); i += ArrSz(nonce))
{
// Increment the nonce including the counter
getKeyFactory().incrementNonce(nonce);
// Read the data
uint64 inputSize = input.read(plainText, (uint64)ArrSz(plainText));
if (inputSize == (uint64)-1) return false;
// And encrypt the data
if (!Crypto::CTR_BlockProcess(cipher, nonce, salt)) return false;
// Encrypt the data
Crypto::Xor(cipherText, plainText, salt, (size_t)inputSize);
if (output.write(cipherText, inputSize) != inputSize) return false;
}
return true;
}
// Decrypt a given block with AES counter mode.
bool AESCounterDecrypt(const KeyFactory::KeyT & nonceRandom, const ::Stream::InputStream & input, ::Stream::OutputStream & output)
{
// Get the salt from the system
KeyFactory::KeyT nonce = {0}, key = {0}, salt = {0}, plainText = {0}, cipherText = {0};
if (!input.read(salt)) return false;
getKeyFactory().setCurrentSalt(salt);
getKeyFactory().deriveNewKey(key);
getKeyFactory().createNewNonce(nonceRandom);
Crypto::OSSL_AES cipher;
cipher.setKey(key, (Crypto::BaseSymCrypt::BlockSize)ArrSz(key), 0, (Crypto::BaseSymCrypt::BlockSize)ArrSz(key));
memset(key, 0, ArrSz(key));
for (uint64 i = ArrSz(salt); i < input.fullSize(); i += ArrSz(nonce))
{
// Increment the nonce including the counter
getKeyFactory().incrementNonce(nonce);
// Read the data
uint64 inputSize = input.read(cipherText, (uint64)ArrSz(cipherText));
if (inputSize == (uint64)-1) return false;
// And encrypt the data
if (!Crypto::CTR_BlockProcess(cipher, nonce, salt)) return false;
// Decrypt the data
Crypto::Xor(plainText, cipherText, salt, (size_t)inputSize);
if (output.write(plainText, inputSize) != inputSize) return false;
}
return true;
}
// Encrypt or decrypt using AES counter mode.
bool AESCounterProcess(const KeyFactory::KeyT & key, const KeyFactory::KeyT & nonceRandom, const ::Stream::InputStream & input, ::Stream::OutputStream & output, ProgressCallback & callback, uint8 * inputHash, uint8 * outputHash)
{
Crypto::OSSL_AES cipher;
cipher.setKey(key, (Crypto::BaseSymCrypt::BlockSize)ArrSz(key), 0, (Crypto::BaseSymCrypt::BlockSize)ArrSz(key));
KeyFactory::KeyT nonce = {}, inputData = {}, tmp = {}, outputData = {};
memcpy(&nonce[0], &nonceRandom[0], 8);
Crypto::OSSL_SHA256 hash;
hash.Start();
callback.progressed(inputHash ? ProgressCallback::Backup : ProgressCallback::Restore, "Processing: " DEFAULT_INDEX, 0, input.fullSize(), 1, 1, ProgressCallback::KeepLine);
uint64 counter = 0;
for (uint64 i = 0; i < input.fullSize(); i += ArrSz(nonce))
{
// Increment the nonce including the counter
counter++;
memcpy(&nonce[8], &counter, sizeof(counter));
// Read the data
uint64 inputSize = input.read(inputData, (uint64)ArrSz(inputData));
if (inputSize == (uint64)-1)
return callback.warn(inputHash ? ProgressCallback::Backup : ProgressCallback::Restore, DEFAULT_INDEX, "Could not read from file") && false;
callback.progressed(inputHash ? ProgressCallback::Backup : ProgressCallback::Restore, "Processing: " DEFAULT_INDEX, i, input.fullSize(), 1, 1, ProgressCallback::KeepLine);
// Hash the data
if (inputHash) hash.Hash(inputData, inputSize);
// And encrypt the data (yes, even for decrypting)
if (!Crypto::CTR_BlockProcess(cipher, nonce, tmp))
return callback.warn(inputHash ? ProgressCallback::Backup : ProgressCallback::Restore, DEFAULT_INDEX, "Could not encrypt or decrypt data") && false;
// Encrypt or decrypt the data
Crypto::Xor(outputData, inputData, tmp, (size_t)inputSize);
// Hash the output data if requested
if (outputHash) hash.Hash(outputData, inputSize);
if (output.write(outputData, inputSize) != inputSize)
return callback.warn(inputHash ? ProgressCallback::Backup : ProgressCallback::Restore, DEFAULT_INDEX, "Could not write to file") && false;
}
// Then store the hash back at the right position
if (inputHash) hash.Finalize(inputHash);
if (outputHash) hash.Finalize(outputHash);
callback.progressed(inputHash ? ProgressCallback::Backup : ProgressCallback::Restore, "Processing: " DEFAULT_INDEX, input.fullSize(), input.fullSize(), 1, 1, ProgressCallback::FlushLine);
// Ok, done
return true;
}
// Ensure the index file is available or recreate if not
String ensureValidIndexFile(const String & encryptedIndexPath, const String & localIndexPath, const KeyFactory::KeyT & key, ProgressCallback & callback, const bool forceDecryption)
{
File::Info encFile(encryptedIndexPath, true);
File::Info decFile(localIndexPath, true);
if (!encFile.doesExist() && forceDecryption) return TRANS("Encrypted file does not exist :") + encryptedIndexPath;
if (!encFile.doesExist())
{
if (!decFile.doesExist()) return TRANS("Both encrypted and cached index file are missing");
return ""; // Nothing we can do here
}
// Ok, now we need to decrypt the file or use the existing cached file
if (!forceDecryption)
{
if (decFile.modification == encFile.modification && encFile.size == (decFile.size + sizeof(FileFormat::CipheredIndexHeader)))
return ""; // Use the existing cached file (don't try to decrypt first)
}
// Ok, here, we must decrypt the index file so let's process!
callback.progressed(ProgressCallback::Restore, "Decrypting: " DEFAULT_INDEX, 0, encFile.size, 1, 1, ProgressCallback::KeepLine);
Stream::InputFileStream input(encFile.getFullPath());
Stream::OutputFileStream output(decFile.getFullPath());
FileFormat::CipheredIndexHeader indexHeader;
if (input.read(&indexHeader, sizeof(indexHeader)) != sizeof(indexHeader)) return TRANS("Could not read header in encrypted file: ") + encryptedIndexPath;
if (!indexHeader.isValid()) return TRANS("Invalid header from encrypted index");
// Extract the nonce from it now
KeyFactory::KeyT nonce = {}, hash = {};
memcpy(nonce, indexHeader.nonce, ArrSz(indexHeader.nonce));
// Then decrypt the file
if (!AESCounterProcess(key, nonce, input, output, callback, 0, hash)) return TRANS("Error while decrypting the index file: ") + encryptedIndexPath;
// Assert it's valid
if (memcmp(hash, indexHeader.hash, sizeof(hash))) return TRANS("The hash of the decrypted index does not match the input file.");
// Ok, done
return "";
}
static String getFilterArgument(CompressorToUse actualComp)
{
if (actualComp == Default) actualComp = compressor;
const char * compressorName[] = { "none", "zLib", "BSC" };
return String::Print("%d:%s:AES_CTR", File::MultiChunk::MaximumSize, compressorName[actualComp]);
}
static uint16 getFilterArgumentIndex(CompressorToUse actualComp, FileFormat::IndexFile * idxFile = 0)
{
const String & filterArg = getFilterArgument(actualComp);
FileFormat::IndexFile & idx = idxFile ? *idxFile : indexFile;
uint16 index = idx.getFilterArguments().getArgumentIndex(filterArg);
if (index == idx.getFilterArguments().arguments.getSize())
return idx.getFilterArguments().appendArgument(filterArg);
return index;
}
typedef Utils::ScopePtr<FileFormat::ChunkList> & ChunkListT;
bool closeMultiChunkBin(String & chunkPath, File::MultiChunk & multiChunk, uint64 * totalOutSize, ProgressCallback & callback, CompressorToUse actualComp, KeyFactory::KeyT & chunkHash)
{
bool worthTelling = multiChunk.getSize() > 2*1024*1024;
if (worthTelling && !callback.progressed(ProgressCallback::Backup, TRANS("Closing multichunk"), 0, 0, 0, 0, ProgressCallback::KeepLine))
return false;
// We need this for the nonce
multiChunk.getChecksum(chunkHash);
const String & multiChunkHash = Helpers::fromBinary(chunkHash, ArrSz(chunkHash), false);
// Then filter the multichunk, compress it and encrypt it
::Stream::OutputMemStream compressedStream;
if (worthTelling && !callback.progressed(ProgressCallback::Backup, TRANS("Compressing multichunk"), 0, 0, 0, 0, ProgressCallback::KeepLine))
return false;
if (actualComp == Default) actualComp = compressor;
switch (actualComp)
{
case ZLib:
{ // Compress the data
Compression::ZLib * zlib = new Compression::ZLib;
zlib->setCompressionFactor(1.0f);
// It owns the pointer
::Stream::CompressOutputStream compressor(compressedStream, zlib);
if (!multiChunk.writeHeaderTo(compressor)) return false;
if (!multiChunk.writeDataTo(compressor)) return false;
break;
}
case BSC:
{ // Compress the data
::Stream::CompressOutputStream compressor(compressedStream, new Compression::BSCLib);
if (!multiChunk.writeHeaderTo(compressor)) return false;
if (!multiChunk.writeDataTo(compressor)) return false;
break;
}
case None:
{ // Avoid compressing the data
if (!multiChunk.writeHeaderTo(compressedStream)) return false;
if (!multiChunk.writeDataTo(compressedStream)) return false;
break;
}
default:
return false;
}