From 49c2157cc4cba633f55ff87bc9012cb845643adf Mon Sep 17 00:00:00 2001
From: Prakash Senthil Vel <23444145+prakashsvmx@users.noreply.github.com>
Date: Tue, 4 Jun 2024 09:06:09 +0530
Subject: [PATCH] refactor `composeObject` api to ts (#1291)
---
README.md | 2 +-
docs/API.md | 12 +-
examples/compose-object-test-example.js | 136 -----------
examples/compose-object-test-example.mjs | 103 ++++++++
.../{compose-object.js => compose-object.mjs} | 14 +-
src/internal/client.ts | 203 +++++++++++++++-
src/internal/type.ts | 9 +
src/internal/xml-parser.ts | 5 +
src/minio.d.ts | 17 +-
src/minio.js | 228 +-----------------
src/transformers.js | 8 -
src/xml-parsers.js | 20 --
tests/unit/test.js | 28 ++-
13 files changed, 343 insertions(+), 442 deletions(-)
delete mode 100644 examples/compose-object-test-example.js
create mode 100644 examples/compose-object-test-example.mjs
rename examples/{compose-object.js => compose-object.mjs} (86%)
diff --git a/README.md b/README.md
index 9cde67a9..87b0c639 100644
--- a/README.md
+++ b/README.md
@@ -237,7 +237,7 @@ The complete API Reference is available here:
- [remove-object-tagging.mjs](https://github.com/minio/minio-js/blob/master/examples/remove-object-tagging.js)
- [set-object-legal-hold.mjs](https://github.com/minio/minio-js/blob/master/examples/set-object-legalhold.mjs)
- [get-object-legal-hold.mjs](https://github.com/minio/minio-js/blob/master/examples/get-object-legal-hold.mjs)
-- [compose-object.js](https://github.com/minio/minio-js/blob/master/examples/compose-object.js)
+- [compose-object.mjs](https://github.com/minio/minio-js/blob/master/examples/compose-object.js)
- [select-object-content.mjs](https://github.com/minio/minio-js/blob/master/examples/select-object-content.mjs)
#### Presigned Operations
diff --git a/docs/API.md b/docs/API.md
index 53233b1b..d4e96de9 100644
--- a/docs/API.md
+++ b/docs/API.md
@@ -1483,7 +1483,7 @@ const legalholdStatus = await minioClient.setObjectLegalHold('bucketName', 'obje
-### composeObject(destObjConfig, sourceObjectList [, callback])
+### composeObject(destObjConfig, sourceObjectList)
Compose an object from parts
@@ -1493,7 +1493,6 @@ Compose an object from parts
| ------------------ | ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `destObjConfig` | _object_ | Destination Object configuration of the type [CopyDestinationOptions](https://github.com/minio/minio-js/blob/master/src/helpers.js) |
| `sourceObjectList` | _object[]_ | Array of object(parts) source to compose into an object. Each part configuration should be of type [CopySourceOptions](https://github.com/minio/minio-js/blob/master/src/helpers.js) |
-| `callback(err)` | _function_ | Callback function is called with non `null` value in case of error. If no callback is passed, a `Promise` is returned. |
**Example 1**
@@ -1527,14 +1526,7 @@ const destOption = new minio.CopyDestinationOptions({
})
//using Promise style.
-const composePromise = minioClient.composeObject(destOption, sourceList)
-composePromise
- .then((result) => {
- console.log('Success...')
- })
- .catch((e) => {
- console.log('error', e)
- })
+await minioClient.composeObject(destOption, sourceList)
```
diff --git a/examples/compose-object-test-example.js b/examples/compose-object-test-example.js
deleted file mode 100644
index 8beb0324..00000000
--- a/examples/compose-object-test-example.js
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * MinIO Javascript Library for Amazon S3 Compatible Cloud Storage, (C) 2021 MinIO, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
-// are dummy values, please replace them with original values.
-import fs from 'node:fs'
-import os from 'node:os'
-
-import * as Minio from 'minio'
-import splitFile from 'split-file'
-
-const s3Client = new Minio.Client({
- endPoint: 's3.amazonaws.com',
- accessKey: 'YOUR-ACCESSKEYID',
- secretKey: 'YOUR-SECRETACCESSKEY',
-})
-
-const oneMB = 1024 * 1024
-
-// Create a bucket prior to running: mc mb local/source-bucket
-function sampleRunComposeObject() {
- const tmpDir = os.tmpdir()
-
- const bucketName = 'source-bucket'
- // generate 100 MB buffer and write to a file.
- const local100mbFileToBeSplitAndComposed = Buffer.alloc(100 * oneMB, 0)
-
- const composedObjName = '_100-mb-file-to-test-compose'
- const tmpSubDir = `${tmpDir}/compose`
- const fileToSplit = `${tmpSubDir}/${composedObjName}`
- const partObjNameList = []
-
- fs.mkdir(tmpSubDir, { recursive: true }, function (err) {
- if (err) {
- console.log(err)
- } else {
- console.log('New Temp directory successfully created.')
- }
- })
-
- try {
- fs.writeFileSync(fileToSplit, local100mbFileToBeSplitAndComposed)
- console.log('Written 100 MB File ')
- // 100 MB split into 26 MB part size. ( just to test unequal parts ). But change as required.
-
- splitFile
- .splitFileBySize(fileToSplit, 26 * oneMB)
- .then((names) => {
- console.log('Split and write 100 MB File(s) ', names)
- const putPartRequests = names.map((partFileName) => {
- const partObjName = partFileName.slice((tmpSubDir + '/').length)
- partObjNameList.push(partObjName)
- return s3Client.fPutObject(bucketName, partObjName, partFileName, {})
- })
-
- Promise.all(putPartRequests)
- .then(() => {
- console.log('Uploaded part Files: ', names)
- const sourcePartObjList = partObjNameList.map((partObjName) => {
- return new Minio.CopySourceOptions({
- Bucket: bucketName,
- Object: partObjName,
- })
- })
-
- const destObjConfig = new Minio.CopyDestinationOptions({
- Bucket: bucketName,
- Object: composedObjName,
- })
-
- s3Client
- .composeObject(destObjConfig, sourcePartObjList)
- .then(() => {
- console.log('Composed to a single file: ', composedObjName)
-
- /** Begin Clean up ***/
- // To verify that the parts are uploaded properly, comment the below code blocks and verify
- const sourcePartObjList = partObjNameList.map((partObjName) => {
- return s3Client.removeObject(bucketName, partObjName)
- })
-
- Promise.all(sourcePartObjList)
- .then(() => {
- console.log('Removed source parts: ')
-
- // Uncomment to remove the composed object itself. commented for verification.
- /*
- s3Client.removeObject(bucketName, composedObjName).then(()=>{
- console.log("Clean up: Removed the composed Object ")
- }).catch(()=>{
- console.log("Error removing composed object", er)
- })
- */
- })
- .catch((er) => {
- console.log('Error removing parts used in composing', er)
- })
-
- /** End Clean up **/
-
- // Clean up generated parts locally
- fs.rmSync(tmpSubDir, { recursive: true, force: true })
- console.log('Clean up temp parts directory : ')
- })
- .catch((e) => {
- console.log('Error Composing parts into an object', e)
- })
- })
- .catch((e) => {
- console.log('Error Uploading parts ', e)
- })
- })
- .catch((e) => {
- // this is a client error not related to compose object
- console.log('Error Splitting files into parts ', e)
- })
- } catch (err) {
- // this is a client error not related to compose object
- console.log('Error Creating local files ', err)
- }
-}
-
-sampleRunComposeObject()
diff --git a/examples/compose-object-test-example.mjs b/examples/compose-object-test-example.mjs
new file mode 100644
index 00000000..8b6194a1
--- /dev/null
+++ b/examples/compose-object-test-example.mjs
@@ -0,0 +1,103 @@
+/*
+ * MinIO Javascript Library for Amazon S3 Compatible Cloud Storage, (C) 2021 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
+// are dummy values, please replace them with original values.
+import fs from 'node:fs'
+import os from 'node:os'
+
+import * as Minio from 'minio'
+import splitFile from 'split-file'
+
+const s3Client = new Minio.Client({
+ endPoint: 's3.amazonaws.com',
+ accessKey: 'YOUR-ACCESSKEYID',
+ secretKey: 'YOUR-SECRETACCESSKEY',
+})
+
+const bucketName = 'my-bucketname'
+const composedObjName = 'my-objectname'
+
+const oneMB = 1024 * 1024
+
+// Create a bucket prior to running: mc mb local/source-bucket
+const sampleRunComposeObject = async () => {
+ const tmpDir = os.tmpdir()
+
+ // generate 100 MB buffer and write to a file.
+ const local100mbFileToBeSplitAndComposed = Buffer.alloc(100 * oneMB, 0)
+
+ const tmpSubDir = `${tmpDir}/compose`
+ const fileToSplit = `${tmpSubDir}/${composedObjName}`
+ const partObjNameList = []
+
+ fs.mkdir(tmpSubDir, { recursive: true }, function (err) {
+ if (err) {
+ console.log(err)
+ } else {
+ console.log('New Temp directory successfully created.')
+ }
+ })
+
+ try {
+ fs.writeFileSync(fileToSplit, local100mbFileToBeSplitAndComposed)
+ console.log('Written 100 MB File ')
+ // 100 MB split into 26 MB part size. ( just to test unequal parts ). But change as required.
+
+ const names = await splitFile.splitFileBySize(fileToSplit, 26 * oneMB)
+
+ console.log('Split and write 100 MB File(s) ', names)
+ const putPartRequests = names.map((partFileName) => {
+ const partObjName = partFileName.slice((tmpSubDir + '/').length)
+ partObjNameList.push(partObjName)
+ return s3Client.fPutObject(bucketName, partObjName, partFileName, {})
+ })
+ await Promise.all(putPartRequests)
+
+ console.log('Uploaded part Files: ', names)
+ const sourcePartObjList = partObjNameList.map((partObjName) => {
+ return new Minio.CopySourceOptions({
+ Bucket: bucketName,
+ Object: partObjName,
+ })
+ })
+
+ const destObjConfig = new Minio.CopyDestinationOptions({
+ Bucket: bucketName,
+ Object: composedObjName,
+ })
+
+ try {
+ const result = await s3Client.composeObject(destObjConfig, sourcePartObjList)
+ console.log(result)
+ console.log('Composed to a single file: ', composedObjName)
+ } catch (err) {
+ console.log('Error in compose object : ', err.message)
+ } finally {
+ console.log('Remove source parts: ')
+ partObjNameList.map((partObjName) => {
+ return s3Client.removeObject(bucketName, partObjName)
+ })
+ // Clean up generated parts locally
+ fs.rmSync(tmpSubDir, { recursive: true, force: true })
+ console.log('Clean up temp parts directory : ')
+ }
+ } catch (e) {
+ console.log('Error Creating local files ', e)
+ }
+}
+
+sampleRunComposeObject()
diff --git a/examples/compose-object.js b/examples/compose-object.mjs
similarity index 86%
rename from examples/compose-object.js
rename to examples/compose-object.mjs
index b8b08d6f..fa953051 100644
--- a/examples/compose-object.js
+++ b/examples/compose-object.mjs
@@ -25,7 +25,8 @@ const s3Client = new Minio.Client({
secretKey: 'YOUR-SECRETACCESSKEY',
})
-const bucketName = 'source-bucket'
+const bucketName = 'my-bucketname'
+const objectName = 'my-objectname'
const sourceList = [
new Minio.CopySourceOptions({
@@ -51,7 +52,7 @@ const sourceList = [
const destOption = new Minio.CopyDestinationOptions({
Bucket: bucketName,
- Object: '100MB.zip',
+ Object: objectName,
/** Other possible options */
/* Encryption:{
type:Helpers.ENCRYPTION_TYPES.KMS,
@@ -67,11 +68,4 @@ const destOption = new Minio.CopyDestinationOptions({
*/
})
-const composePromise = s3Client.composeObject(destOption, sourceList)
-composePromise
- .then((result) => {
- console.log('ComposeObject Success...', result)
- })
- .catch((e) => {
- console.log('composeObject Promise Error', e)
- })
+await s3Client.composeObject(destOption, sourceList)
diff --git a/src/internal/client.ts b/src/internal/client.ts
index decc795d..89433f73 100644
--- a/src/internal/client.ts
+++ b/src/internal/client.ts
@@ -29,6 +29,7 @@ import { fsp, streamPromise } from './async.ts'
import { CopyConditions } from './copy-conditions.ts'
import { Extensions } from './extensions.ts'
import {
+ calculateEvenSplits,
extractMetadata,
getContentLength,
getSourceVersionId,
@@ -50,6 +51,8 @@ import {
isValidPrefix,
isVirtualHostStyle,
makeDateLong,
+ PART_CONSTRAINTS,
+ partsRequired,
prependXAMZMeta,
readableStream,
sanitizeETag,
@@ -103,16 +106,18 @@ import type {
Tags,
Transport,
UploadedObjectInfo,
+ UploadPartConfig,
VersionIdentificator,
} from './type.ts'
import type { ListMultipartResult, UploadedPart } from './xml-parser.ts'
-import * as xmlParsers from './xml-parser.ts'
import {
parseCompleteMultipart,
parseInitiateMultipart,
parseObjectLegalHoldConfig,
parseSelectObjectContentResponse,
+ uploadPartParser,
} from './xml-parser.ts'
+import * as xmlParsers from './xml-parser.ts'
const xml = new xml2js.Builder({ renderOpts: { pretty: false }, headless: true })
@@ -2347,6 +2352,7 @@ export class TypedClient {
const body = await readAsString(res)
return xmlParsers.parseLifecycleConfig(body)
}
+
async setBucketEncryption(bucketName: string, encryptionConfig?: EncryptionConfig): Promise {
if (!isValidBucketName(bucketName)) {
throw new errors.InvalidBucketNameError('Invalid bucket name: ' + bucketName)
@@ -2600,4 +2606,199 @@ export class TypedClient {
const [source, dest] = allArgs as [CopySourceOptions, CopyDestinationOptions]
return await this.copyObjectV2(source, dest)
}
+
+ async uploadPart(partConfig: {
+ bucketName: string
+ objectName: string
+ uploadID: string
+ partNumber: number
+ headers: RequestHeaders
+ }) {
+ const { bucketName, objectName, uploadID, partNumber, headers } = partConfig
+
+ const method = 'PUT'
+ const query = `uploadId=${uploadID}&partNumber=${partNumber}`
+ const requestOptions = { method, bucketName, objectName: objectName, query, headers }
+
+ const res = await this.makeRequestAsync(requestOptions)
+ const body = await readAsString(res)
+ const partRes = uploadPartParser(body)
+
+ return {
+ etag: sanitizeETag(partRes.ETag),
+ key: objectName,
+ part: partNumber,
+ }
+ }
+
+ async composeObject(
+ destObjConfig: CopyDestinationOptions,
+ sourceObjList: CopySourceOptions[],
+ ): Promise | CopyObjectResult> {
+ const sourceFilesLength = sourceObjList.length
+
+ if (!Array.isArray(sourceObjList)) {
+ throw new errors.InvalidArgumentError('sourceConfig should an array of CopySourceOptions ')
+ }
+ if (!(destObjConfig instanceof CopyDestinationOptions)) {
+ throw new errors.InvalidArgumentError('destConfig should of type CopyDestinationOptions ')
+ }
+
+ if (sourceFilesLength < 1 || sourceFilesLength > PART_CONSTRAINTS.MAX_PARTS_COUNT) {
+ throw new errors.InvalidArgumentError(
+ `"There must be as least one and up to ${PART_CONSTRAINTS.MAX_PARTS_COUNT} source objects.`,
+ )
+ }
+
+ for (let i = 0; i < sourceFilesLength; i++) {
+ const sObj = sourceObjList[i] as CopySourceOptions
+ if (!sObj.validate()) {
+ return false
+ }
+ }
+
+ if (!(destObjConfig as CopyDestinationOptions).validate()) {
+ return false
+ }
+
+ const getStatOptions = (srcConfig: CopySourceOptions) => {
+ let statOpts = {}
+ if (!_.isEmpty(srcConfig.VersionID)) {
+ statOpts = {
+ versionId: srcConfig.VersionID,
+ }
+ }
+ return statOpts
+ }
+ const srcObjectSizes: number[] = []
+ let totalSize = 0
+ let totalParts = 0
+
+ const sourceObjStats = sourceObjList.map((srcItem) =>
+ this.statObject(srcItem.Bucket, srcItem.Object, getStatOptions(srcItem)),
+ )
+
+ const srcObjectInfos = await Promise.all(sourceObjStats)
+
+ const validatedStats = srcObjectInfos.map((resItemStat, index) => {
+ const srcConfig: CopySourceOptions | undefined = sourceObjList[index]
+
+ let srcCopySize = resItemStat.size
+ // Check if a segment is specified, and if so, is the
+ // segment within object bounds?
+ if (srcConfig && srcConfig.MatchRange) {
+ // Since range is specified,
+ // 0 <= src.srcStart <= src.srcEnd
+ // so only invalid case to check is:
+ const srcStart = srcConfig.Start
+ const srcEnd = srcConfig.End
+ if (srcEnd >= srcCopySize || srcStart < 0) {
+ throw new errors.InvalidArgumentError(
+ `CopySrcOptions ${index} has invalid segment-to-copy [${srcStart}, ${srcEnd}] (size is ${srcCopySize})`,
+ )
+ }
+ srcCopySize = srcEnd - srcStart + 1
+ }
+
+ // Only the last source may be less than `absMinPartSize`
+ if (srcCopySize < PART_CONSTRAINTS.ABS_MIN_PART_SIZE && index < sourceFilesLength - 1) {
+ throw new errors.InvalidArgumentError(
+ `CopySrcOptions ${index} is too small (${srcCopySize}) and it is not the last part.`,
+ )
+ }
+
+ // Is data to copy too large?
+ totalSize += srcCopySize
+ if (totalSize > PART_CONSTRAINTS.MAX_MULTIPART_PUT_OBJECT_SIZE) {
+ throw new errors.InvalidArgumentError(`Cannot compose an object of size ${totalSize} (> 5TiB)`)
+ }
+
+ // record source size
+ srcObjectSizes[index] = srcCopySize
+
+ // calculate parts needed for current source
+ totalParts += partsRequired(srcCopySize)
+ // Do we need more parts than we are allowed?
+ if (totalParts > PART_CONSTRAINTS.MAX_PARTS_COUNT) {
+ throw new errors.InvalidArgumentError(
+ `Your proposed compose object requires more than ${PART_CONSTRAINTS.MAX_PARTS_COUNT} parts`,
+ )
+ }
+
+ return resItemStat
+ })
+
+ if ((totalParts === 1 && totalSize <= PART_CONSTRAINTS.MAX_PART_SIZE) || totalSize === 0) {
+ return await this.copyObject(sourceObjList[0] as CopySourceOptions, destObjConfig) // use copyObjectV2
+ }
+
+ // preserve etag to avoid modification of object while copying.
+ for (let i = 0; i < sourceFilesLength; i++) {
+ ;(sourceObjList[i] as CopySourceOptions).MatchETag = (validatedStats[i] as BucketItemStat).etag
+ }
+
+ const splitPartSizeList = validatedStats.map((resItemStat, idx) => {
+ return calculateEvenSplits(srcObjectSizes[idx] as number, sourceObjList[idx] as CopySourceOptions)
+ })
+
+ const getUploadPartConfigList = (uploadId: string) => {
+ const uploadPartConfigList: UploadPartConfig[] = []
+
+ splitPartSizeList.forEach((splitSize, splitIndex: number) => {
+ if (splitSize) {
+ const { startIndex: startIdx, endIndex: endIdx, objInfo: objConfig } = splitSize
+
+ const partIndex = splitIndex + 1 // part index starts from 1.
+ const totalUploads = Array.from(startIdx)
+
+ const headers = (sourceObjList[splitIndex] as CopySourceOptions).getHeaders()
+
+ totalUploads.forEach((splitStart, upldCtrIdx) => {
+ const splitEnd = endIdx[upldCtrIdx]
+
+ const sourceObj = `${objConfig.Bucket}/${objConfig.Object}`
+ headers['x-amz-copy-source'] = `${sourceObj}`
+ headers['x-amz-copy-source-range'] = `bytes=${splitStart}-${splitEnd}`
+
+ const uploadPartConfig = {
+ bucketName: destObjConfig.Bucket,
+ objectName: destObjConfig.Object,
+ uploadID: uploadId,
+ partNumber: partIndex,
+ headers: headers,
+ sourceObj: sourceObj,
+ }
+
+ uploadPartConfigList.push(uploadPartConfig)
+ })
+ }
+ })
+
+ return uploadPartConfigList
+ }
+
+ const uploadAllParts = async (uploadList: UploadPartConfig[]) => {
+ const partUploads = uploadList.map(async (item) => {
+ return this.uploadPart(item)
+ })
+ // Process results here if needed
+ return await Promise.all(partUploads)
+ }
+
+ const performUploadParts = async (uploadId: string) => {
+ const uploadList = getUploadPartConfigList(uploadId)
+ const partsRes = await uploadAllParts(uploadList)
+ return partsRes.map((partCopy) => ({ etag: partCopy.etag, part: partCopy.part }))
+ }
+
+ const newUploadHeaders = destObjConfig.getHeaders()
+
+ const uploadId = await this.initiateNewMultipartUpload(destObjConfig.Bucket, destObjConfig.Object, newUploadHeaders)
+ try {
+ const partsDone = await performUploadParts(uploadId)
+ return await this.completeMultipartUpload(destObjConfig.Bucket, destObjConfig.Object, uploadId, partsDone)
+ } catch (err) {
+ return await this.abortMultipartUpload(destObjConfig.Bucket, destObjConfig.Object, uploadId)
+ }
+ }
}
diff --git a/src/internal/type.ts b/src/internal/type.ts
index d2f0a193..786f2512 100644
--- a/src/internal/type.ts
+++ b/src/internal/type.ts
@@ -443,3 +443,12 @@ export type BucketVersioningConfiguration = {
ExcludedPrefixes?: ExcludedPrefix[]
ExcludeFolders?: boolean
}
+
+export type UploadPartConfig = {
+ bucketName: string
+ objectName: string
+ uploadID: string
+ partNumber: number
+ headers: RequestHeaders
+ sourceObj: string
+}
diff --git a/src/internal/xml-parser.ts b/src/internal/xml-parser.ts
index 8b2fc107..bf27d163 100644
--- a/src/internal/xml-parser.ts
+++ b/src/internal/xml-parser.ts
@@ -599,3 +599,8 @@ export function parseCopyObject(xml: string): CopyObjectResultV1 {
return result
}
+export function uploadPartParser(xml: string) {
+ const xmlObj = parseXml(xml)
+ const respEl = xmlObj.CopyPartResult
+ return respEl
+}
diff --git a/src/minio.d.ts b/src/minio.d.ts
index 9ec36cf3..67cf6632 100644
--- a/src/minio.d.ts
+++ b/src/minio.d.ts
@@ -1,13 +1,7 @@
// imported from https://github.com/DefinitelyTyped/DefinitelyTyped/blob/93cfb0ec069731dcdfc31464788613f7cddb8192/types/minio/index.d.ts
/* eslint-disable @typescript-eslint/no-explicit-any */
-import type {
- CopyDestinationOptions,
- CopySourceOptions,
- LEGAL_HOLD_STATUS,
- RETENTION_MODES,
- RETENTION_VALIDITY_UNITS,
-} from './helpers.ts'
+import type { LEGAL_HOLD_STATUS, RETENTION_MODES, RETENTION_VALIDITY_UNITS } from './helpers.ts'
import type { ClientOptions, NoResultCallback, RemoveOptions } from './internal/client.ts'
import { TypedClient } from './internal/client.ts'
import { CopyConditions } from './internal/copy-conditions.ts'
@@ -147,15 +141,6 @@ export class Client extends TypedClient {
listObjectsV2(bucketName: string, prefix?: string, recursive?: boolean, startAfter?: string): BucketStream
- removeIncompleteUpload(bucketName: string, objectName: string, callback: NoResultCallback): void
- removeIncompleteUpload(bucketName: string, objectName: string): Promise
- composeObject(
- destObjConfig: CopyDestinationOptions,
- sourceObjList: CopySourceOptions[],
- callback: ResultCallback,
- ): void
- composeObject(destObjConfig: CopyDestinationOptions, sourceObjList: CopySourceOptions[]): Promise
-
// Presigned operations
presignedUrl(httpMethod: string, bucketName: string, objectName: string, callback: ResultCallback): void
presignedUrl(
diff --git a/src/minio.js b/src/minio.js
index f2f0816d..3a5f08a2 100644
--- a/src/minio.js
+++ b/src/minio.js
@@ -16,18 +16,14 @@
import * as Stream from 'node:stream'
-import async from 'async'
-import _ from 'lodash'
import * as querystring from 'query-string'
import xml2js from 'xml2js'
import * as errors from './errors.ts'
-import { CopyDestinationOptions } from './helpers.ts'
import { callbackify } from './internal/callbackify.js'
import { TypedClient } from './internal/client.ts'
import { CopyConditions } from './internal/copy-conditions.ts'
import {
- calculateEvenSplits,
getScope,
isBoolean,
isFunction,
@@ -39,10 +35,7 @@ import {
isValidObjectName,
isValidPrefix,
makeDateLong,
- PART_CONSTRAINTS,
- partsRequired,
pipesetup,
- sanitizeETag,
uriEscape,
} from './internal/helper.ts'
import { PostPolicy } from './internal/post-policy.ts'
@@ -623,225 +616,6 @@ export class Client extends TypedClient {
return listener
}
-
- /**
- * Internal method to upload a part during compose object.
- * @param partConfig __object__ contains the following.
- * bucketName __string__
- * objectName __string__
- * uploadID __string__
- * partNumber __number__
- * headers __object__
- * @param cb called with null incase of error.
- */
- uploadPartCopy(partConfig, cb) {
- const { bucketName, objectName, uploadID, partNumber, headers } = partConfig
-
- const method = 'PUT'
- let query = `uploadId=${uploadID}&partNumber=${partNumber}`
- const requestOptions = { method, bucketName, objectName: objectName, query, headers }
- return this.makeRequest(requestOptions, '', [200], '', true, (e, response) => {
- let partCopyResult = Buffer.from('')
- if (e) {
- return cb(e)
- }
- pipesetup(response, transformers.uploadPartTransformer())
- .on('data', (data) => {
- partCopyResult = data
- })
- .on('error', cb)
- .on('end', () => {
- let uploadPartCopyRes = {
- etag: sanitizeETag(partCopyResult.ETag),
- key: objectName,
- part: partNumber,
- }
-
- cb(null, uploadPartCopyRes)
- })
- })
- }
-
- composeObject(destObjConfig = {}, sourceObjList = [], cb) {
- const me = this // many async flows. so store the ref.
- const sourceFilesLength = sourceObjList.length
-
- if (!Array.isArray(sourceObjList)) {
- throw new errors.InvalidArgumentError('sourceConfig should an array of CopySourceOptions ')
- }
- if (!(destObjConfig instanceof CopyDestinationOptions)) {
- throw new errors.InvalidArgumentError('destConfig should of type CopyDestinationOptions ')
- }
-
- if (sourceFilesLength < 1 || sourceFilesLength > PART_CONSTRAINTS.MAX_PARTS_COUNT) {
- throw new errors.InvalidArgumentError(
- `"There must be as least one and up to ${PART_CONSTRAINTS.MAX_PARTS_COUNT} source objects.`,
- )
- }
-
- if (!isFunction(cb)) {
- throw new TypeError('callback should be of type "function"')
- }
-
- for (let i = 0; i < sourceFilesLength; i++) {
- if (!sourceObjList[i].validate()) {
- return false
- }
- }
-
- if (!destObjConfig.validate()) {
- return false
- }
-
- const getStatOptions = (srcConfig) => {
- let statOpts = {}
- if (!_.isEmpty(srcConfig.VersionID)) {
- statOpts = {
- versionId: srcConfig.VersionID,
- }
- }
- return statOpts
- }
- const srcObjectSizes = []
- let totalSize = 0
- let totalParts = 0
-
- const sourceObjStats = sourceObjList.map((srcItem) =>
- me.statObject(srcItem.Bucket, srcItem.Object, getStatOptions(srcItem)),
- )
-
- return Promise.all(sourceObjStats)
- .then((srcObjectInfos) => {
- const validatedStats = srcObjectInfos.map((resItemStat, index) => {
- const srcConfig = sourceObjList[index]
-
- let srcCopySize = resItemStat.size
- // Check if a segment is specified, and if so, is the
- // segment within object bounds?
- if (srcConfig.MatchRange) {
- // Since range is specified,
- // 0 <= src.srcStart <= src.srcEnd
- // so only invalid case to check is:
- const srcStart = srcConfig.Start
- const srcEnd = srcConfig.End
- if (srcEnd >= srcCopySize || srcStart < 0) {
- throw new errors.InvalidArgumentError(
- `CopySrcOptions ${index} has invalid segment-to-copy [${srcStart}, ${srcEnd}] (size is ${srcCopySize})`,
- )
- }
- srcCopySize = srcEnd - srcStart + 1
- }
-
- // Only the last source may be less than `absMinPartSize`
- if (srcCopySize < PART_CONSTRAINTS.ABS_MIN_PART_SIZE && index < sourceFilesLength - 1) {
- throw new errors.InvalidArgumentError(
- `CopySrcOptions ${index} is too small (${srcCopySize}) and it is not the last part.`,
- )
- }
-
- // Is data to copy too large?
- totalSize += srcCopySize
- if (totalSize > PART_CONSTRAINTS.MAX_MULTIPART_PUT_OBJECT_SIZE) {
- throw new errors.InvalidArgumentError(`Cannot compose an object of size ${totalSize} (> 5TiB)`)
- }
-
- // record source size
- srcObjectSizes[index] = srcCopySize
-
- // calculate parts needed for current source
- totalParts += partsRequired(srcCopySize)
- // Do we need more parts than we are allowed?
- if (totalParts > PART_CONSTRAINTS.MAX_PARTS_COUNT) {
- throw new errors.InvalidArgumentError(
- `Your proposed compose object requires more than ${PART_CONSTRAINTS.MAX_PARTS_COUNT} parts`,
- )
- }
-
- return resItemStat
- })
-
- if ((totalParts === 1 && totalSize <= PART_CONSTRAINTS.MAX_PART_SIZE) || totalSize === 0) {
- return this.copyObject(sourceObjList[0], destObjConfig, cb) // use copyObjectV2
- }
-
- // preserve etag to avoid modification of object while copying.
- for (let i = 0; i < sourceFilesLength; i++) {
- sourceObjList[i].MatchETag = validatedStats[i].etag
- }
-
- const splitPartSizeList = validatedStats.map((resItemStat, idx) => {
- const calSize = calculateEvenSplits(srcObjectSizes[idx], sourceObjList[idx])
- return calSize
- })
-
- function getUploadPartConfigList(uploadId) {
- const uploadPartConfigList = []
-
- splitPartSizeList.forEach((splitSize, splitIndex) => {
- const { startIndex: startIdx, endIndex: endIdx, objInfo: objConfig } = splitSize
-
- let partIndex = splitIndex + 1 // part index starts from 1.
- const totalUploads = Array.from(startIdx)
-
- const headers = sourceObjList[splitIndex].getHeaders()
-
- totalUploads.forEach((splitStart, upldCtrIdx) => {
- let splitEnd = endIdx[upldCtrIdx]
-
- const sourceObj = `${objConfig.Bucket}/${objConfig.Object}`
- headers['x-amz-copy-source'] = `${sourceObj}`
- headers['x-amz-copy-source-range'] = `bytes=${splitStart}-${splitEnd}`
-
- const uploadPartConfig = {
- bucketName: destObjConfig.Bucket,
- objectName: destObjConfig.Object,
- uploadID: uploadId,
- partNumber: partIndex,
- headers: headers,
- sourceObj: sourceObj,
- }
-
- uploadPartConfigList.push(uploadPartConfig)
- })
- })
-
- return uploadPartConfigList
- }
-
- const performUploadParts = (uploadId) => {
- const uploadList = getUploadPartConfigList(uploadId)
-
- async.map(uploadList, me.uploadPartCopy.bind(me), (err, res) => {
- if (err) {
- this.abortMultipartUpload(destObjConfig.Bucket, destObjConfig.Object, uploadId).then(
- () => cb(),
- (err) => cb(err),
- )
- return
- }
- const partsDone = res.map((partCopy) => ({ etag: partCopy.etag, part: partCopy.part }))
- return me.completeMultipartUpload(destObjConfig.Bucket, destObjConfig.Object, uploadId, partsDone).then(
- (result) => cb(null, result),
- (err) => cb(err),
- )
- })
- }
-
- const newUploadHeaders = destObjConfig.getHeaders()
-
- me.initiateNewMultipartUpload(destObjConfig.Bucket, destObjConfig.Object, newUploadHeaders).then(
- (uploadId) => {
- performUploadParts(uploadId)
- },
- (err) => {
- cb(err, null)
- },
- )
- })
- .catch((error) => {
- cb(error, null)
- })
- }
}
Client.prototype.presignedUrl = promisify(Client.prototype.presignedUrl)
@@ -851,7 +625,6 @@ Client.prototype.presignedPostPolicy = promisify(Client.prototype.presignedPostP
Client.prototype.getBucketNotification = promisify(Client.prototype.getBucketNotification)
Client.prototype.setBucketNotification = promisify(Client.prototype.setBucketNotification)
Client.prototype.removeAllBucketNotification = promisify(Client.prototype.removeAllBucketNotification)
-Client.prototype.composeObject = promisify(Client.prototype.composeObject)
// refactored API use promise internally
Client.prototype.makeBucket = callbackify(Client.prototype.makeBucket)
@@ -896,3 +669,4 @@ Client.prototype.getObjectRetention = callbackify(Client.prototype.getObjectRete
Client.prototype.removeObjects = callbackify(Client.prototype.removeObjects)
Client.prototype.removeIncompleteUpload = callbackify(Client.prototype.removeIncompleteUpload)
Client.prototype.copyObject = callbackify(Client.prototype.copyObject)
+Client.prototype.composeObject = callbackify(Client.prototype.composeObject)
diff --git a/src/transformers.js b/src/transformers.js
index aa883dd8..452e75e3 100644
--- a/src/transformers.js
+++ b/src/transformers.js
@@ -114,11 +114,3 @@ export function getListObjectsV2WithMetadataTransformer() {
export function getBucketNotificationTransformer() {
return getConcater(xmlParsers.parseBucketNotification)
}
-
-export function objectLegalHoldTransformer() {
- return getConcater(xmlParsers.parseObjectLegalHoldConfig)
-}
-
-export function uploadPartTransformer() {
- return getConcater(xmlParsers.uploadPartParser)
-}
diff --git a/src/xml-parsers.js b/src/xml-parsers.js
index 522edc30..b6110a1f 100644
--- a/src/xml-parsers.js
+++ b/src/xml-parsers.js
@@ -271,23 +271,3 @@ export function parseListObjectsV2WithMetadata(xml) {
}
return result
}
-
-export function parseObjectLegalHoldConfig(xml) {
- const xmlObj = parseXml(xml)
- return xmlObj.LegalHold
-}
-
-export function uploadPartParser(xml) {
- const xmlObj = parseXml(xml)
- const respEl = xmlObj.CopyPartResult
- return respEl
-}
-
-export function removeObjectsParser(xml) {
- const xmlObj = parseXml(xml)
- if (xmlObj.DeleteResult && xmlObj.DeleteResult.Error) {
- // return errors as array always. as the response is object in case of single object passed in removeObjects
- return toArray(xmlObj.DeleteResult.Error)
- }
- return []
-}
diff --git a/tests/unit/test.js b/tests/unit/test.js
index c604e114..62713234 100644
--- a/tests/unit/test.js
+++ b/tests/unit/test.js
@@ -1622,30 +1622,32 @@ describe('Client', function () {
describe('Compose Object APIs', () => {
describe('composeObject(destObjConfig, sourceObjectList,cb)', () => {
- it('should fail on null destination config', (done) => {
+ it('should fail on null destination config', async () => {
try {
- client.composeObject(null, function () {})
- } catch (e) {
- done()
+ await client.composeObject(null)
+ } catch (err) {
+ return
}
+ throw new Error('callback should receive error')
})
-
- it('should fail on no array source config', (done) => {
+ it('should fail on no array source config', async () => {
try {
const destOptions = new CopyDestinationOptions({ Bucket: 'test-bucket', Object: 'test-object' })
- client.composeObject(destOptions, 'non-array', function () {})
- } catch (e) {
- done()
+ await client.composeObject(destOptions, 'non-array')
+ } catch (err) {
+ return
}
+ throw new Error('callback should receive error')
})
- it('should fail on null source config', (done) => {
+ it('should fail on null source config', async () => {
try {
const destOptions = new CopyDestinationOptions({ Bucket: 'test-bucket', Object: 'test-object' })
- client.composeObject(destOptions, null, function () {})
- } catch (e) {
- done()
+ await client.composeObject(destOptions, null)
+ } catch (err) {
+ return
}
+ throw new Error('callback should receive error')
})
})
})