Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Bug]: The address and prot in the Minio configuration section of the configuration do not work #466

Open
dafei1288 opened this issue Nov 21, 2024 · 2 comments

Comments

@dafei1288
Copy link

Current Behavior

root@0954a0f727c6:/milvus/bin# cat configs/backup.yaml

# Configures the system log output.
log:
  level: info # Only supports debug, info, warn, error, panic, or fatal. Default 'info'.
  console: true # whether print log to console
  file:
    rootPath: "logs/backup.log"

http:
  simpleResponse: true

# milvus proxy address, compatible to milvus.yaml
milvus:
  address: localhost
  port: 19530
  authorizationEnabled: false
  # tls mode values [0, 1, 2]
  # 0 is close, 1 is one-way authentication, 2 is two-way authentication.
  tlsMode: 0
  user: "root"
  password: "Milvus"

# Related configuration of minio, which is responsible for data persistence for Milvus.
minio:
  # Milvus storage configs, make them the same with milvus config
  storageType: "minio" # support storage type: local, minio, s3, aws, gcp, ali(aliyun), azure, tc(tencent)
  Address: minio # Address of MinIO/S3
  port: 9000   # Port of MinIO/S3
  accessKeyID: minioadmin  # accessKeyID of MinIO/S3
  secretAccessKey: minioadmin # MinIO/S3 encryption string
  useSSL: false # Access to MinIO/S3 with SSL
  useIAM: false
  iamEndpoint: ""
  bucketName: "a-bucket" # Milvus Bucket name in MinIO/S3, make it the same as your milvus instance
  rootPath: "files" # Milvus storage root path in MinIO/S3, make it the same as your milvus instance

  # Backup storage configs, the storage you want to put the backup data
  # backupStorageType: "minio" # support storage type: local, minio, s3, aws, gcp, ali(aliyun), azure, tc(tencent)
  # backupAddress: "minio" # Address of MinIO/S3
  # backupPort: 9000   # Port of MinIO/S3
  backupAccessKeyID: minioadmin  # accessKeyID of MinIO/S3
  backupSecretAccessKey: minioadmin # MinIO/S3 encryption string
  backupBucketName: "a-bucket" # Bucket name to store backup data. Backup data will store to backupBucketName/backupRootPath
  backupRootPath: "backup" # Rootpath to store backup data. Backup data will store to backupBucketName/backupRootPath

  # If you need to back up or restore data between two different storage systems, direct client-side copying is not supported. 
  # Set this option to true to enable data transfer through Milvus Backup.
  # Note: This option will be automatically set to true if `minio.storageType` and `minio.backupStorageType` differ.
  # However, if they are the same but belong to different services, you must manually set this option to `true`.
  # crossStorage: "false"
  
backup:
  maxSegmentGroupSize: 2G

  parallelism: 
    # collection level parallelism to backup
    backupCollection: 4
    # thread pool to copy data. reduce it if blocks your storage's network bandwidth
    copydata: 128
    # Collection level parallelism to restore
    restoreCollection: 2
  
  # keep temporary files during restore, only use to debug 
  keepTempFiles: false
  
  # Pause GC during backup through Milvus Http API. 
  gcPause:
    enable: true
    seconds: 7200
    address: http://minioadmin:9091

root@0954a0f727c6:/milvus/bin# ./milvus-backup create -c merged_embedd_vector_1 -n mybak
0.4.26 (Built on 2024-10-30T09:28:19Z from Git SHA dd45150)
config:backup.yaml
[2024/11/21 07:13:04.235 +00:00] [INFO] [logutil/logutil.go:165] ["Log directory"] [configDir=]
[2024/11/21 07:13:04.235 +00:00] [INFO] [logutil/logutil.go:166] ["Set log file to "] [path=logs/backup.log]
[2024/11/21 07:13:04.237 +00:00] [INFO] [core/backup_impl_create_backup.go:32] ["receive CreateBackupRequest"] [requestId=0cdcf9a7-a7d8-11ef-872d-0242c0a81004] [backupName=mybak] [collections="[merged_embedd_vector_1]"] [databaseCollections=] [async=false] [force=false] [metaOnly=false]
[2024/11/21 07:13:04.237 +00:00] [INFO] [core/backup_context.go:117] ["{Base:0xc0005b2dc0 MaxSegmentGroupSize:2147483648 BackupCollectionParallelism:4 BackupCopyDataParallelism:128 RestoreParallelism:2 KeepTempFiles:false GcPauseEnable:true GcPauseSeconds:7200 GcPauseAddress:http://minioadmin:9091}"]
[2024/11/21 07:13:04.237 +00:00] [INFO] [core/backup_context.go:118] ["{Base:0xc0005b2dc0 Enabled:true DebugMode:false SimpleResponse:true}"]
[2024/11/21 07:13:04.237 +00:00] [ERROR] [core/backup_context.go:212] ["failed to initial storage client"] [error="Endpoint: minio:9000:9000 does not follow ip address or domain name standards."] [stack="github.com/zilliztech/milvus-backup/core.(*BackupContext).getBackupStorageClient\n\t/home/runner/work/milvus-backup/milvus-backup/core/backup_context.go:212\ngithub.com/zilliztech/milvus-backup/core.(*BackupContext).CreateBackup\n\t/home/runner/work/milvus-backup/milvus-backup/core/backup_impl_create_backup.go:59\ngithub.com/zilliztech/milvus-backup/cmd.glob..func3\n\t/home/runner/work/milvus-backup/milvus-backup/cmd/create.go:61\ngithub.com/spf13/cobra.(*Command).execute\n\t/home/runner/go/pkg/mod/github.com/spf13/[email protected]/command.go:876\ngithub.com/spf13/cobra.(*Command).ExecuteC\n\t/home/runner/go/pkg/mod/github.com/spf13/[email protected]/command.go:990\ngithub.com/spf13/cobra.(*Command).Execute\n\t/home/runner/go/pkg/mod/github.com/spf13/[email protected]/command.go:918\ngithub.com/zilliztech/milvus-backup/cmd.Execute\n\t/home/runner/work/milvus-backup/milvus-backup/cmd/root.go:35\nmain.main\n\t/home/runner/work/milvus-backup/milvus-backup/main.go:24\nruntime.main\n\t/opt/hostedtoolcache/go/1.18.10/x64/src/runtime/proc.go:250"]
panic: Endpoint: minio:9000:9000 does not follow ip address or domain name standards.

goroutine 1 [running]:
github.com/zilliztech/milvus-backup/core.(*BackupContext).getBackupStorageClient(0xc00032d880)
/home/runner/work/milvus-backup/milvus-backup/core/backup_context.go:213 +0x653
github.com/zilliztech/milvus-backup/core.(*BackupContext).CreateBackup(0xc00032d880, {0x150f928?, 0xc00013c000}, 0xc0001dd320)
/home/runner/work/milvus-backup/milvus-backup/core/backup_impl_create_backup.go:59 +0x985
github.com/zilliztech/milvus-backup/cmd.glob..func3(0x2582060?, {0x12e1ba0?, 0x4?, 0x4?})
/home/runner/work/milvus-backup/milvus-backup/cmd/create.go:61 +0x6ff
github.com/spf13/cobra.(*Command).execute(0x2582060, {0xc0006296c0, 0x4, 0x4})
/home/runner/go/pkg/mod/github.com/spf13/[email protected]/command.go:876 +0x67b
github.com/spf13/cobra.(*Command).ExecuteC(0x2582ce0)
/home/runner/go/pkg/mod/github.com/spf13/[email protected]/command.go:990 +0x3b4
github.com/spf13/cobra.(*Command).Execute(...)
/home/runner/go/pkg/mod/github.com/spf13/[email protected]/command.go:918
github.com/zilliztech/milvus-backup/cmd.Execute()
/home/runner/work/milvus-backup/milvus-backup/cmd/root.go:35 +0xbf
main.main()
/home/runner/work/milvus-backup/milvus-backup/main.go:24 +0x4a
root@0954a0f727c6:/milvus/bin#

Expected Behavior

No response

Steps To Reproduce

1. 0.4.26 (Built on 2024-10-30T09:28:19Z from Git SHA dd4515017a2d2f7c9493070fe321156a6c9126d4)

Environment

No response

Anything else?

No response

@wayblink
Copy link
Contributor

minio:
  # Milvus storage configs, make them the same with milvus config
  storageType: "minio" # support storage type: local, minio, s3, aws, gcp, ali(aliyun), azure, tc(tencent)
  Address: minio 

The 'Address' should be a ip or address not 'minio'

@dafei1288
Copy link
Author

minio:
  # Milvus storage configs, make them the same with milvus config
  storageType: "minio" # support storage type: local, minio, s3, aws, gcp, ali(aliyun), azure, tc(tencent)
  Address: minio 

The 'Address' should be a ip or address not 'minio'

Thank you for your reply. My operating environment is in Docker, and minio is the container name. Do I also need to change it to IP? Docker-compose.yml as follows

version: '3.5'

services:
  etcd:
    container_name: milvus-etcd
    image: quay.io/coreos/etcd:v3.5.5
    environment:
      - ETCD_AUTO_COMPACTION_MODE=revision
      - ETCD_AUTO_COMPACTION_RETENTION=1000
      - ETCD_QUOTA_BACKEND_BYTES=4294967296
      - ETCD_SNAPSHOT_COUNT=50000
    volumes:
      - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/etcd:/etcd
    command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
    healthcheck:
      test: ["CMD", "etcdctl", "endpoint", "health"]
      interval: 30s
      timeout: 20s
      retries: 3

  minio:
    container_name: milvus-minio
    image: minio/minio:RELEASE.2023-03-20T20-16-18Z
    environment:
      MINIO_ACCESS_KEY: minioadmin
      MINIO_SECRET_KEY: minioadmin
    ports:
      - "9001:9001"
      - "9000:9000"
    volumes:
      - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/minio:/minio_data
    command: minio server /minio_data --console-address ":9001"
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
      interval: 30s
      timeout: 20s
      retries: 3

  standalone:
    container_name: milvus-standalone
    image: milvusdb/milvus:v2.4.4
    command: ["milvus", "run", "standalone"]
    security_opt:
    - seccomp:unconfined
    environment:
      ETCD_ENDPOINTS: etcd:2379
      MINIO_ADDRESS: minio:9000
    volumes:
      - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:9091/healthz"]
      interval: 30s
      start_period: 90s
      timeout: 20s
      retries: 3
    ports:
      - "19530:19530"
      - "9091:9091"
    depends_on:
      - "etcd"
      - "minio"

  attu:
    image: 'zilliz/attu:latest'
    environment:
      - 'MILVUS_URL=http://standalone:19530'
      # - HOST_URL=a
    ports:
      - '8131:3000'
    depends_on:
      - standalone


networks:
  default:
    name: milvus

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants