Skip to content

Commit

Permalink
Merge pull request #1460 from ardriveapp/PE-4892-refactor-the-progres…
Browse files Browse the repository at this point in the history
…s-update-uploading-files

PE-4892: refactor the progress update uploading files
  • Loading branch information
thiagocarvalhodev authored Nov 7, 2023
2 parents dc05ee8 + 93023f1 commit 9fcbc1b
Show file tree
Hide file tree
Showing 17 changed files with 489 additions and 621 deletions.
8 changes: 8 additions & 0 deletions lib/blocs/drive_detail/drive_detail_cubit.dart
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ import 'dart:async';

import 'package:ardrive/authentication/ardrive_auth.dart';
import 'package:ardrive/blocs/blocs.dart';
import 'package:ardrive/core/activity_tracker.dart';
import 'package:ardrive/entities/constants.dart';
import 'package:ardrive/models/models.dart';
import 'package:ardrive/pages/pages.dart';
Expand All @@ -24,6 +25,7 @@ class DriveDetailCubit extends Cubit<DriveDetailState> {
final DriveDao _driveDao;
final ConfigService _configService;
final ArDriveAuth _auth;
final ActivityTracker _activityTracker;

StreamSubscription? _folderSubscription;
final _defaultAvailableRowsPerPage = [25, 50, 75, 100];
Expand All @@ -41,8 +43,10 @@ class DriveDetailCubit extends Cubit<DriveDetailState> {
required ProfileCubit profileCubit,
required DriveDao driveDao,
required ConfigService configService,
required ActivityTracker activityTracker,
required ArDriveAuth auth,
}) : _profileCubit = profileCubit,
_activityTracker = activityTracker,
_driveDao = driveDao,
_auth = auth,
_configService = configService,
Expand Down Expand Up @@ -113,6 +117,10 @@ class DriveDetailCubit extends Cubit<DriveDetailState> {
),
_profileCubit.stream.startWith(ProfileCheckingAvailability()),
(drive, folderContents, _) async {
if (_activityTracker.isUploading) {
return;
}

final state = this.state is DriveDetailLoadSuccess
? this.state as DriveDetailLoadSuccess
: null;
Expand Down
2 changes: 2 additions & 0 deletions lib/blocs/drives/drives_cubit.dart
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ import 'dart:async';

import 'package:ardrive/authentication/ardrive_auth.dart';
import 'package:ardrive/blocs/blocs.dart';
import 'package:ardrive/core/activity_tracker.dart';
import 'package:ardrive/models/models.dart';
import 'package:ardrive/utils/user_utils.dart';
import 'package:ardrive_utils/ardrive_utils.dart';
Expand All @@ -26,6 +27,7 @@ class DrivesCubit extends Cubit<DrivesState> {
this.initialSelectedDriveId,
required ProfileCubit profileCubit,
required DriveDao driveDao,
required ActivityTracker activityTracker,
}) : _profileCubit = profileCubit,
_driveDao = driveDao,
_auth = auth,
Expand Down
203 changes: 48 additions & 155 deletions lib/blocs/upload/upload_cubit.dart
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import 'package:ardrive/blocs/blocs.dart';
import 'package:ardrive/blocs/upload/limits.dart';
import 'package:ardrive/blocs/upload/models/models.dart';
import 'package:ardrive/blocs/upload/upload_file_checker.dart';
import 'package:ardrive/core/activity_tracker.dart';
import 'package:ardrive/core/upload/cost_calculator.dart';
import 'package:ardrive/core/upload/uploader.dart';
import 'package:ardrive/entities/file_entity.dart';
Expand Down Expand Up @@ -45,6 +46,7 @@ class UploadCubit extends Cubit<UploadState> {
final UploadFileChecker _uploadFileChecker;
final ArDriveAuth _auth;
final ArDriveUploadPreparationManager _arDriveUploadManager;
final ActivityTracker _activityTracker;

late bool uploadFolders;
late Drive _targetDrive;
Expand Down Expand Up @@ -104,6 +106,7 @@ class UploadCubit extends Cubit<UploadState> {
required UploadFileChecker uploadFileChecker,
required ArDriveAuth auth,
required ArDriveUploadPreparationManager arDriveUploadManager,
required ActivityTracker activityTracker,
this.folder,
this.uploadFolders = false,
}) : _profileCubit = profileCubit,
Expand All @@ -114,6 +117,7 @@ class UploadCubit extends Cubit<UploadState> {
_pst = pst,
_auth = auth,
_arDriveUploadManager = arDriveUploadManager,
_activityTracker = activityTracker,
super(UploadPreparationInProgress());

Future<void> startUploadPreparation({
Expand Down Expand Up @@ -569,6 +573,8 @@ class UploadCubit extends Cubit<UploadState> {
entities.add((fileMetadata, file.ioFile));
}

_activityTracker.setUploading(true);

final uploadController = await ardriveUploader.uploadEntities(
entities: entities,
wallet: _auth.currentUser.wallet,
Expand Down Expand Up @@ -598,133 +604,20 @@ class UploadCubit extends Cubit<UploadState> {
},
);

uploadController.onCompleteTask((task) {
_saveEntityOnDB(task);
});

uploadController.onDone(
(tasks) async {
logger.d('Upload finished');
logger.d('Upload folders and files finished');

if (tasks.any((element) => element.status == UploadStatus.failed)) {
logger.e('Error uploading');
// if any of the files failed, we should throw an error
addError(Exception('Error uploading'));
}

final tasksWithSuccess = tasks
.where((element) => element.status == UploadStatus.complete)
.toList();

try {
final List<ARFSFolderUploadMetatadata> foldersMetadata = [];
final List<ARFSFileUploadMetadata> filesMetadata = [];

for (var metadata
in tasksWithSuccess.expand((element) => element.content ?? [])) {
if (metadata is ARFSFolderUploadMetatadata) {
foldersMetadata.add(metadata);
} else if (metadata is ARFSFileUploadMetadata) {
filesMetadata.add(metadata);
}
}

for (var metadata in foldersMetadata) {
final revisionAction = conflictingFolders.contains(metadata.name)
? RevisionAction.uploadNewVersion
: RevisionAction.create;

final entity = FolderEntity(
driveId: metadata.driveId,
id: metadata.id,
name: metadata.name,
parentFolderId: metadata.parentFolderId,
);

if (metadata.metadataTxId == null) {
logger.e('Metadata tx id is null');
throw Exception('Metadata tx id is null');
}

entity.txId = metadata.metadataTxId!;

final folderPath = foldersByPath.values
.firstWhere((element) =>
element.name == metadata.name &&
element.parentFolderId == metadata.parentFolderId)
.path;

await _driveDao.transaction(() async {
await _driveDao.createFolder(
driveId: _targetDrive.id,
parentFolderId: metadata.parentFolderId,
folderName: metadata.name,
path: folderPath,
folderId: metadata.id,
);
await _driveDao.insertFolderRevision(
entity.toRevisionCompanion(
performedAction: revisionAction,
),
);
});
}

logger.d('Files metadata: ${filesMetadata.length}');

for (var file in filesMetadata) {
final revisionAction = conflictingFiles.values.contains(file.id)
? RevisionAction.uploadNewVersion
: RevisionAction.create;

logger.d('File id: ${file.id}');
logger
.d('Reusing id? ${conflictingFiles.values.contains(file.id)}');

final entity = FileEntity(
dataContentType: file.dataContentType,
dataTxId: file.dataTxId,
driveId: file.driveId,
id: file.id,
lastModifiedDate: file.lastModifiedDate,
name: file.name,
parentFolderId: file.parentFolderId,
size: file.size,
);

if (file.metadataTxId == null) {
logger.e('Metadata tx id is null');
throw Exception('Metadata tx id is null');
}

entity.txId = file.metadataTxId!;

if (revisionAction == RevisionAction.uploadNewVersion) {
final existingFile = await _driveDao
.fileById(driveId: driveId, fileId: file.id)
.getSingle();

final filePath = existingFile.path;
await _driveDao.writeFileEntity(entity, filePath);
await _driveDao.insertFileRevision(
entity.toRevisionCompanion(
performedAction: revisionAction,
),
);
} else {
logger.d(files.first.getIdentifier());
final parentFolderPath = (await _driveDao
.folderById(
driveId: driveId, folderId: file.parentFolderId)
.getSingle())
.path;
await _driveDao.writeFileEntity(entity, parentFolderPath);
await _driveDao.insertFileRevision(
entity.toRevisionCompanion(
performedAction: revisionAction,
),
);
}
}
} catch (e) {
logger.e('Error saving folder', e);
}
emit(UploadComplete());

unawaited(_profileCubit.refreshBalance());
Expand Down Expand Up @@ -774,6 +667,8 @@ class UploadCubit extends Cubit<UploadState> {
uploadFiles.add((args, file.ioFile));
}

_activityTracker.setUploading(true);

/// Creates the uploader and starts the upload.
final uploadController = await ardriveUploader.uploadFiles(
files: uploadFiles,
Expand Down Expand Up @@ -810,26 +705,22 @@ class UploadCubit extends Cubit<UploadState> {
(tasks) async {
logger.d('Upload finished');

for (var task in tasks) {
logger.d('Task status: ${task.status}');
}

if (tasks.any((element) => element.status == UploadStatus.failed)) {
// if any of the files failed, we should throw an error
logger.e('Error uploading');
// if any of the files failed, we should throw an error
addError(Exception('Error uploading'));
}

logger.d('Saving files on database');

for (var task in tasks
.where((element) => element.status == UploadStatus.complete)) {
await _saveEntityOnDB(task);
}

unawaited(_profileCubit.refreshBalance());

// all files are uploaded
emit(UploadComplete());
},
);

uploadController.onCompleteTask((task) {
unawaited(_saveEntityOnDB(task));
});
}

Future<void> _verifyIfUploadContainsLargeFilesUsingTurbo() async {
Expand All @@ -856,7 +747,7 @@ class UploadCubit extends Cubit<UploadState> {
if (metadata is ARFSFileUploadMetadata) {
final fileMetadata = metadata;

final revisionAction = conflictingFiles.containsKey(fileMetadata.name)
final revisionAction = conflictingFiles.values.contains(metadata.id)
? RevisionAction.uploadNewVersion
: RevisionAction.create;

Expand All @@ -879,7 +770,7 @@ class UploadCubit extends Cubit<UploadState> {

entity.txId = fileMetadata.metadataTxId!;

await _driveDao.transaction(() async {
_driveDao.transaction(() async {
// If path is a blob from drag and drop, use file name. Else use the path field from folder upload
// TODO: Changed this logic. PLEASE REVIEW IT.
final filePath = '${_targetFolder.path}/${metadata.name}';
Expand All @@ -892,43 +783,45 @@ class UploadCubit extends Cubit<UploadState> {
);
});
} else if (metadata is ARFSFolderUploadMetatadata) {
final folderMetadata = metadata;

final revisionAction =
conflictingFolders.contains(folderMetadata.name)
? RevisionAction.uploadNewVersion
: RevisionAction.create;
final revisionAction = conflictingFolders.contains(metadata.name)
? RevisionAction.uploadNewVersion
: RevisionAction.create;

final entity = FolderEntity(
driveId: folderMetadata.driveId,
id: folderMetadata.id,
name: folderMetadata.name,
parentFolderId: folderMetadata.parentFolderId,
driveId: metadata.driveId,
id: metadata.id,
name: metadata.name,
parentFolderId: metadata.parentFolderId,
);

await _driveDao.transaction(() async {
final id = await _driveDao.createFolder(
driveId: _targetDrive.id,
parentFolderId: folderMetadata.parentFolderId,
folderName: folderMetadata.name,
path: '${_targetFolder.path}/${metadata.name}',
folderId: folderMetadata.id,
);
if (metadata.metadataTxId == null) {
logger.e('Metadata tx id is null');
throw Exception('Metadata tx id is null');
}

logger.d('Folder created with id: $id');
entity.txId = metadata.metadataTxId!;

entity.txId = metadata.metadataTxId!;
final folderPath = foldersByPath.values
.firstWhere((element) =>
element.name == metadata.name &&
element.parentFolderId == metadata.parentFolderId)
.path;

await _driveDao.transaction(() async {
await _driveDao.createFolder(
driveId: _targetDrive.id,
parentFolderId: metadata.parentFolderId,
folderName: metadata.name,
path: folderPath,
folderId: metadata.id,
);
await _driveDao.insertFolderRevision(
entity.toRevisionCompanion(
performedAction: revisionAction,
),
);
});
}

// all files are uploaded
emit(UploadComplete());
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion lib/blocs/upload/upload_state.dart
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ class UploadInProgressUsingNewUploader extends UploadState {
});

@override
List<Object?> get props => [progress, totalProgress, equatableBust];
List<Object?> get props => [equatableBust];
}

class UploadFailure extends UploadState {
Expand Down
Loading

0 comments on commit 9fcbc1b

Please sign in to comment.