diff --git a/.github/workflows/candidate-upgrade.yml b/.github/workflows/candidate-upgrade.yml
deleted file mode 100644
index c06b67da..00000000
--- a/.github/workflows/candidate-upgrade.yml
+++ /dev/null
@@ -1,62 +0,0 @@
-name: Upgrade a q/stable cluster to r/candidate
-on:
- # Allows you to run this workflow manually from the Actions tab
- workflow_dispatch: null
-
-jobs:
- # a2b upgrade implies a/stable -> b/candidate release upgrade.
- q2r-upgrade-test:
- name: Test quincy/stable to reef/candidate upgrades
- runs-on: ubuntu-22.04
- steps:
-
- - name: Checkout code
- uses: actions/checkout@v3
- with:
- fetch-depth: 0
-
- - name: Copy utils
- run: cp tests/scripts/actionutils.sh $HOME
-
- - name: Clear FORWARD firewall rules
- run: ~/actionutils.sh cleaript
-
- - name: Free disk
- run: ~/actionutils.sh free_runner_disk
-
- - name: Install dependencies
- run: ~/actionutils.sh setup_lxd
-
- - name: Create containers with loopback devices
- run: ~/actionutils.sh create_containers
-
- - name: Install quincy stable from store
- run: ~/actionutils.sh install_store quincy/stable
-
- - name: Bootstrap
- run: ~/actionutils.sh bootstrap_head
-
- - name: Setup cluster
- run: ~/actionutils.sh cluster_nodes
-
- - name: Add 3 OSDs
- run: |
- for c in node-wrk0 node-wrk1 node-wrk2 ; do
- ~/actionutils.sh add_osd_to_node $c
- done
- ~/actionutils.sh headexec wait_for_osds 3
-
- - name: Enable RGW
- run: ~/actionutils.sh headexec enable_rgw
-
- - name: Exercise RGW
- run: ~/actionutils.sh headexec testrgw
-
- - name: Upgrade to candidate
- run: ~/actionutils.sh refresh_snap reef/candidate
-
- - name: Wait until 3 OSDs are up
- run: ~/actionutils.sh headexec wait_for_osds 3
-
- - name: Exercise RGW again
- run: ~/actionutils.sh headexec testrgw
diff --git a/.github/workflows/q2q-candidate-upgrade.yml b/.github/workflows/q2q-candidate-upgrade.yml
index 9963d7ec..f4454fac 100644
--- a/.github/workflows/q2q-candidate-upgrade.yml
+++ b/.github/workflows/q2q-candidate-upgrade.yml
@@ -25,7 +25,11 @@ jobs:
run: ~/actionutils.sh free_runner_disk
- name: Install dependencies
- run: ~/actionutils.sh setup_lxd
+ run: |
+ # boto3 for appS3 test script.
+ sudo python -m pip install --upgrade pip
+ sudo pip install boto3
+ ~/actionutils.sh setup_lxd
- name: Create containers with loopback devices
run: ~/actionutils.sh create_containers
@@ -50,7 +54,7 @@ jobs:
run: ~/actionutils.sh headexec enable_rgw
- name: Exercise RGW
- run: ~/actionutils.sh headexec testrgw
+ run: ~/actionutils.sh headexec testrgw_old
- name: Upgrade to candidate
run: ~/actionutils.sh refresh_snap quincy/candidate
@@ -59,4 +63,4 @@ jobs:
run: ~/actionutils.sh headexec wait_for_osds 3
- name: Exercise RGW again
- run: ~/actionutils.sh headexec testrgw
+ run: ~/actionutils.sh testrgw_on_headnode
diff --git a/.github/workflows/q2r-candidate-upgrade.yaml b/.github/workflows/q2r-candidate-upgrade.yaml
index c06b67da..99f85476 100644
--- a/.github/workflows/q2r-candidate-upgrade.yaml
+++ b/.github/workflows/q2r-candidate-upgrade.yaml
@@ -25,7 +25,11 @@ jobs:
run: ~/actionutils.sh free_runner_disk
- name: Install dependencies
- run: ~/actionutils.sh setup_lxd
+ run: |
+ # boto3 for appS3 test script.
+ sudo python -m pip install --upgrade pip
+ sudo pip install boto3
+ ~/actionutils.sh setup_lxd
- name: Create containers with loopback devices
run: ~/actionutils.sh create_containers
@@ -50,7 +54,7 @@ jobs:
run: ~/actionutils.sh headexec enable_rgw
- name: Exercise RGW
- run: ~/actionutils.sh headexec testrgw
+ run: ~/actionutils.sh headexec testrgw_old
- name: Upgrade to candidate
run: ~/actionutils.sh refresh_snap reef/candidate
@@ -59,4 +63,4 @@ jobs:
run: ~/actionutils.sh headexec wait_for_osds 3
- name: Exercise RGW again
- run: ~/actionutils.sh headexec testrgw
+ run: ~/actionutils.sh testrgw_on_headnode
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index eb454cee..acc269b8 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -39,6 +39,27 @@ jobs:
path: "*.snap"
retention-days: 5
+ lint-check:
+ name: Build microceph snap
+ runs-on: ubuntu-22.04
+ env:
+ SNAPCRAFT_BUILD_ENVIRONMENT: "lxd"
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+
+ - name: Install dependencies
+ run: |
+ # Python script dependencies
+ sudo python -m pip install --upgrade pip
+ sudo pip install flake8 pep8-naming
+
+ - name: Lint check help scripts.
+ run: |
+ flake8 ./scripts/ --count --show-source --statistics
+
unit-tests:
name: Run Unit tests
runs-on: ubuntu-22.04
@@ -81,6 +102,12 @@ jobs:
- name: Free disk
run: ~/actionutils.sh free_runner_disk
+ - name: Install dependencies
+ run: |
+ # Python script dependencies
+ sudo python -m pip install --upgrade pip
+ sudo pip install boto3
+
- name: Install and setup
run: |
~/actionutils.sh install_microceph
@@ -209,7 +236,11 @@ jobs:
run: ~/actionutils.sh free_runner_disk
- name: Install dependencies
- run: ~/actionutils.sh setup_lxd
+ run: |
+ # Python script dependencies
+ sudo python -m pip install --upgrade pip
+ sudo pip install boto3
+ ~/actionutils.sh setup_lxd
- name: Create containers with loopback devices
run: ~/actionutils.sh create_containers
@@ -272,6 +303,13 @@ jobs:
- name: Enable services on wrk1
run: ~/actionutils.sh headexec enable_services node-wrk1
+ - name: Enable RGW
+ run: ~/actionutils.sh headexec enable_rgw
+
+ - name: Exercise RGW again
+ run: |
+ ~/actionutils.sh testrgw_on_headnode
+
- name: Test remove node wrk3
run: |
set -uex
@@ -304,6 +342,12 @@ jobs:
- name: Copy utils
run: cp tests/scripts/actionutils.sh $HOME
+ - name: Install dependencies
+ run: |
+ # Python script dependencies
+ sudo python -m pip install --upgrade pip
+ sudo pip install boto3
+
- name: Clear FORWARD firewall rules
run: ~/actionutils.sh cleaript
@@ -360,7 +404,11 @@ jobs:
run: ~/actionutils.sh free_runner_disk
- name: Install dependencies
- run: ~/actionutils.sh setup_lxd
+ run: |
+ # Python script dependencies
+ sudo python -m pip install --upgrade pip
+ sudo pip install boto3
+ ~/actionutils.sh setup_lxd
- name: Create containers with loopback devices
run: ~/actionutils.sh create_containers
@@ -384,8 +432,8 @@ jobs:
- name: Enable RGW
run: ~/actionutils.sh headexec enable_rgw
- - name: Exercise RGW
- run: ~/actionutils.sh headexec testrgw
+ - name: Exercise RGW before upgrade
+ run: ~/actionutils.sh headexec testrgw_old
- name: Install local build
run: ~/actionutils.sh install_multinode
@@ -394,5 +442,6 @@ jobs:
run: ~/actionutils.sh headexec wait_for_osds 3
- name: Exercise RGW again
- run: ~/actionutils.sh headexec testrgw
-
+ run: |
+ ~/actionutils.sh testrgw_on_headnode
+
diff --git a/docs/.custom_wordlist.txt b/docs/.custom_wordlist.txt
index 64bd65fa..d97de59f 100644
--- a/docs/.custom_wordlist.txt
+++ b/docs/.custom_wordlist.txt
@@ -47,6 +47,7 @@ Pre
mds
mon
rgw
+radosgw
rbd
RBD
MgrReports
@@ -64,3 +65,4 @@ Noout
Unsetting
cephfs
filesystems
+json
diff --git a/docs/how-to/s3-user.rst b/docs/how-to/s3-user.rst
new file mode 100644
index 00000000..8d32c33b
--- /dev/null
+++ b/docs/how-to/s3-user.rst
@@ -0,0 +1,106 @@
+Manage S3 users on MicroCeph
+=============================
+
+MicroCeph provides an easy to use interface for creating, viewing and deleting s3 users for interfacing with the RGW endpoint.
+This enables smooth and easy access to Object Storage.
+
+.. list-table:: Supported s3-user operations
+ :widths: 30 70
+ :header-rows: 1
+
+ * - Operation
+ - Description
+ * - create
+ - Create provided s3 (radosgw) user with optionally provided access-key and secret
+ * - delete
+ - Delete provided s3 (radosgw) user
+ * - get
+ - Fetch key information of the provided s3 (radosgw) user
+ * - list
+ - List all s3 (radosgw) users
+.. note:: Users can additionally provide --json flag to create and get commands to dump a much detailed
+
+1. Create an S3 user (optionally provide --access-key --secret and --json)
+
+ .. code-block:: shell
+
+ $ sudo microceph s3-user create newTestUser --access-key=ThisIsAccessKey --secret=ThisIsSecret --json
+ {
+ "user_id": "newTestUser",
+ "display_name": "newTestUser",
+ "email": "",
+ "suspended": 0,
+ "max_buckets": 1000,
+ "subusers": [],
+ "keys": [
+ {
+ "user": "newTestUser",
+ "access_key": "ThisIsAccessKey",
+ "secret_key": "ThisIsSecret"
+ }
+ ],
+ "swift_keys": [],
+ "caps": [],
+ "op_mask": "read, write, delete",
+ "default_placement": "",
+ "default_storage_class": "",
+ "placement_tags": [],
+ "bucket_quota": {
+ "enabled": false,
+ "check_on_raw": false,
+ "max_size": -1,
+ "max_size_kb": 0,
+ "max_objects": -1
+ },
+ "user_quota": {
+ "enabled": false,
+ "check_on_raw": false,
+ "max_size": -1,
+ "max_size_kb": 0,
+ "max_objects": -1
+ },
+ "temp_url_keys": [],
+ "type": "rgw",
+ "mfa_ids": []
+ }
+
+2. List all s3 users :
+
+ .. code-block:: shell
+
+ $ sudo microceph s3-user list
+ +---+-------------+
+ | # | NAME |
+ +---+-------------+
+ | 1 | newTestUser |
+ +---+-------------+
+ | 2 | testUser |
+ +---+-------------+
+
+3. Get details of a an s3 user (optionally use --json flag to get complete details):
+
+ .. code-block:: shell
+
+ $ sudo microceph s3-user get testUser
+ +----------+----------------------+---------+
+ | NAME | ACCESS KEY | SECRET |
+ +----------+----------------------+---------+
+ | testUser | ThisIsAccessKey | ThisIsSecret |
+ +----------+----------------------+---------+
+
+4. Delete an s3 user:
+
+ .. code-block:: shell
+
+ $ sudo microceph s3-user delete newTestUser
+ $ sudo microceph s3-user list
+ +---+----------+
+ | # | NAME |
+ +---+----------+
+ | 1 | testUser |
+ +---+----------+
+
+ .. warning:: All the related buckets+objects should be deleted before deletion of the user.
+
+For more fine-tuned user management use `radosgw-admin CLI `_
+
diff --git a/microceph/api/client.go b/microceph/api/client.go
new file mode 100644
index 00000000..41ddf5c8
--- /dev/null
+++ b/microceph/api/client.go
@@ -0,0 +1,8 @@
+package api
+
+import "github.com/canonical/microcluster/rest"
+
+// Top level client API
+var clientCmd = rest.Endpoint{
+ Path: "client",
+}
diff --git a/microceph/api/client_configs.go b/microceph/api/client_configs.go
index 34cd7e57..03f8305a 100644
--- a/microceph/api/client_configs.go
+++ b/microceph/api/client_configs.go
@@ -18,11 +18,6 @@ import (
"github.com/canonical/microcluster/state"
)
-// Top level client API
-var clientCmd = rest.Endpoint{
- Path: "client",
-}
-
// client configs API
var clientConfigsCmd = rest.Endpoint{
Path: "client/configs",
diff --git a/microceph/api/client_s3.go b/microceph/api/client_s3.go
new file mode 100644
index 00000000..dfa6dd94
--- /dev/null
+++ b/microceph/api/client_s3.go
@@ -0,0 +1,79 @@
+package api
+
+import (
+ "encoding/json"
+ "net/http"
+
+ "github.com/canonical/lxd/lxd/response"
+ "github.com/canonical/microceph/microceph/api/types"
+ "github.com/canonical/microceph/microceph/ceph"
+ "github.com/canonical/microcluster/rest"
+ "github.com/canonical/microcluster/state"
+)
+
+// /1.0/resources endpoint.
+var clientS3Cmd = rest.Endpoint{
+ Path: "client/s3",
+ Get: rest.EndpointAction{Handler: cmdClientS3Get, ProxyTarget: true},
+ Put: rest.EndpointAction{Handler: cmdClientS3Put, ProxyTarget: true},
+ Delete: rest.EndpointAction{Handler: cmdClientS3Delete, ProxyTarget: true},
+}
+
+func cmdClientS3Get(s *state.State, r *http.Request) response.Response {
+ var err error
+ var req types.S3User
+
+ err = json.NewDecoder(r.Body).Decode(&req)
+ if err != nil {
+ return response.InternalError(err)
+ }
+
+ // If a user name is passed.
+ if len(req.Name) > 0 {
+ getOutput, err := ceph.GetS3User(req)
+ if err != nil {
+ return response.SmartError(err)
+ }
+ return response.SyncResponse(true, getOutput)
+ } else {
+ listOutput, err := ceph.ListS3Users()
+ if err != nil {
+ return response.SmartError(err)
+ }
+ return response.SyncResponse(true, listOutput)
+ }
+}
+
+func cmdClientS3Put(s *state.State, r *http.Request) response.Response {
+ var err error
+ var req types.S3User
+
+ err = json.NewDecoder(r.Body).Decode(&req)
+ if err != nil {
+ return response.InternalError(err)
+ }
+
+ output, err := ceph.CreateS3User(req)
+ if err != nil {
+ return response.SmartError(err)
+ }
+
+ return response.SyncResponse(true, output)
+}
+
+func cmdClientS3Delete(s *state.State, r *http.Request) response.Response {
+ var err error
+ var req types.S3User
+
+ err = json.NewDecoder(r.Body).Decode(&req)
+ if err != nil {
+ return response.InternalError(err)
+ }
+
+ err = ceph.DeleteS3User(req.Name)
+ if err != nil {
+ return response.SmartError(err)
+ }
+
+ return response.EmptySyncResponse
+}
diff --git a/microceph/api/endpoints.go b/microceph/api/endpoints.go
index 845fc417..427e0730 100644
--- a/microceph/api/endpoints.go
+++ b/microceph/api/endpoints.go
@@ -11,7 +11,6 @@ var Endpoints = []rest.Endpoint{
disksDelCmd,
resourcesCmd,
servicesCmd,
- rgwServiceCmd,
configsCmd,
restartServiceCmd,
mdsServiceCmd,
@@ -21,4 +20,5 @@ var Endpoints = []rest.Endpoint{
clientCmd,
clientConfigsCmd,
clientConfigsKeyCmd,
+ clientS3Cmd,
}
diff --git a/microceph/api/types/s3.go b/microceph/api/types/s3.go
new file mode 100644
index 00000000..c61f1945
--- /dev/null
+++ b/microceph/api/types/s3.go
@@ -0,0 +1,9 @@
+// Package types provides shared types and structs.
+package types
+
+// holds the name, access and secretkey required for exposing an S3 user.
+type S3User struct {
+ Name string `json:"name" yaml:"name"`
+ Key string `json:"key" yaml:"key"`
+ Secret string `json:"secret" yaml:"secret"`
+}
\ No newline at end of file
diff --git a/microceph/ceph/rgw_s3.go b/microceph/ceph/rgw_s3.go
new file mode 100644
index 00000000..3c549110
--- /dev/null
+++ b/microceph/ceph/rgw_s3.go
@@ -0,0 +1,78 @@
+package ceph
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/canonical/microceph/microceph/api/types"
+)
+
+func CreateS3User(user types.S3User) (string, error) {
+ args := []string{
+ "user",
+ "create",
+ fmt.Sprintf("--uid=%s", user.Name),
+ fmt.Sprintf("--display-name=%s", user.Name),
+ }
+
+ if len(user.Key) > 0 {
+ args = append(args, fmt.Sprintf("--access-key=%s", user.Key))
+ }
+
+ if len(user.Secret) > 0 {
+ args = append(args, fmt.Sprintf("--secret=%s", user.Secret))
+ }
+
+ output, err := processExec.RunCommand("radosgw-admin", args...)
+ if err != nil {
+ return "", err
+ }
+
+ return output, nil
+}
+
+func GetS3User(user types.S3User) (string, error) {
+ args := []string{
+ "user",
+ "info",
+ fmt.Sprintf("--uid=%s", user.Name),
+ }
+
+ output, err := processExec.RunCommand("radosgw-admin", args...)
+ if err != nil {
+ return "", err
+ }
+
+ return output, nil
+}
+
+func ListS3Users() ([]string, error) {
+ args := []string{
+ "user",
+ "list",
+ }
+
+ output, err := processExec.RunCommand("radosgw-admin", args...)
+ if err != nil {
+ return []string{}, err
+ }
+
+ ret := []string{}
+ json.Unmarshal([]byte(output), &ret)
+ return ret, nil
+}
+
+func DeleteS3User(name string) error {
+ args := []string{
+ "user",
+ "rm",
+ fmt.Sprintf("--uid=%s", name),
+ }
+
+ _, err := processExec.RunCommand("radosgw-admin", args...)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/microceph/client/s3.go b/microceph/client/s3.go
new file mode 100644
index 00000000..9a7947b1
--- /dev/null
+++ b/microceph/client/s3.go
@@ -0,0 +1,69 @@
+// Package client provides a full Go API client.
+package client
+
+import (
+ "context"
+ "time"
+
+ "github.com/canonical/lxd/shared/api"
+ "github.com/canonical/lxd/shared/logger"
+ "github.com/canonical/microceph/microceph/api/types"
+ "github.com/canonical/microcluster/client"
+)
+
+func GetS3User(ctx context.Context, c *client.Client, user *types.S3User) (string, error) {
+ queryCtx, cancel := context.WithTimeout(ctx, time.Second*60)
+ defer cancel()
+
+ ret := ""
+ err := c.Query(queryCtx, "GET", api.NewURL().Path("client", "s3"), user, &ret)
+ if err != nil {
+ logger.Error(err.Error())
+ return ret, err
+ }
+
+ return ret, nil
+}
+
+func ListS3Users(ctx context.Context, c *client.Client) ([]string, error) {
+ queryCtx, cancel := context.WithTimeout(ctx, time.Second*60)
+ defer cancel()
+
+ ret := []string{} // List of usernames
+ // GET request with no user name fetches all users.
+ err := c.Query(queryCtx, "GET", api.NewURL().Path("client", "s3"), &types.S3User{Name: ""}, &ret)
+ if err != nil {
+ logger.Error(err.Error())
+ return ret, err
+ }
+
+ return ret, nil
+}
+
+func CreateS3User(ctx context.Context, c *client.Client, user *types.S3User) (string, error) {
+ queryCtx, cancel := context.WithTimeout(ctx, time.Second*60)
+ defer cancel()
+
+ ret := ""
+ err := c.Query(queryCtx, "PUT", api.NewURL().Path("client", "s3"), user, &ret)
+ if err != nil {
+ logger.Error(err.Error())
+ return ret, err
+ }
+
+ return ret, nil
+}
+
+func DeleteS3User(ctx context.Context, c *client.Client, user *types.S3User) error {
+ queryCtx, cancel := context.WithTimeout(ctx, time.Second*60)
+ defer cancel()
+
+ ret := types.S3User{}
+ err := c.Query(queryCtx, "DELETE", api.NewURL().Path("client", "s3"), user, &ret)
+ if err != nil {
+ logger.Error(err.Error())
+ return err
+ }
+
+ return nil
+}
diff --git a/microceph/cmd/microceph/client.go b/microceph/cmd/microceph/client.go
index a8940fd1..37a42371 100644
--- a/microceph/cmd/microceph/client.go
+++ b/microceph/cmd/microceph/client.go
@@ -6,6 +6,7 @@ import (
type cmdClient struct {
common *CmdControl
+ client *cmdClient
}
func (c *cmdClient) Command() *cobra.Command {
@@ -18,6 +19,10 @@ func (c *cmdClient) Command() *cobra.Command {
clientConfigCmd := cmdClientConfig{common: c.common, client: c}
cmd.AddCommand(clientConfigCmd.Command())
+ // S3 Subcommand
+ clientS3Cmd := cmdClientS3{common: c.common, client: c.client}
+ cmd.AddCommand(clientS3Cmd.Command())
+
// Workaround for subcommand usage errors. See: https://github.com/spf13/cobra/issues/706
cmd.Args = cobra.NoArgs
cmd.Run = func(cmd *cobra.Command, args []string) { _ = cmd.Usage() }
diff --git a/microceph/cmd/microceph/client_s3.go b/microceph/cmd/microceph/client_s3.go
new file mode 100644
index 00000000..cfe5d74d
--- /dev/null
+++ b/microceph/cmd/microceph/client_s3.go
@@ -0,0 +1,276 @@
+package main
+
+import (
+ "context"
+ "fmt"
+
+ lxdCmd "github.com/canonical/lxd/shared/cmd"
+ "github.com/canonical/microceph/microceph/api/types"
+ "github.com/canonical/microceph/microceph/client"
+ "github.com/canonical/microcluster/microcluster"
+ "github.com/spf13/cobra"
+ "github.com/tidwall/gjson"
+)
+
+type cmdClientS3 struct {
+ common *CmdControl
+ client *cmdClient
+}
+
+type cmdClientS3Get struct {
+ common *CmdControl
+ client *cmdClient
+ s3 *cmdClientS3
+ jsonOutput bool
+}
+
+type cmdClientS3Create struct {
+ common *CmdControl
+ client *cmdClient
+ s3 *cmdClientS3
+ accessKey string
+ secret string
+ jsonOutput bool
+}
+
+type cmdClientS3Delete struct {
+ common *CmdControl
+ client *cmdClient
+ s3 *cmdClientS3
+}
+
+type cmdClientS3List struct {
+ common *CmdControl
+ client *cmdClient
+ s3 *cmdClientS3
+}
+
+// parent s3 command handle
+func (c *cmdClientS3) Command() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "s3",
+ Short: "Manage S3 users for Object storage",
+ }
+
+ // Create
+ s3CreateCmd := cmdClientS3Create{common: c.common, client: c.client, s3: c}
+ cmd.AddCommand(s3CreateCmd.Command())
+
+ // Delete
+ s3DeleteCmd := cmdClientS3Delete{common: c.common, client: c.client, s3: c}
+ cmd.AddCommand(s3DeleteCmd.Command())
+
+ // Get
+ s3GetCmd := cmdClientS3Get{common: c.common, client: c.client, s3: c}
+ cmd.AddCommand(s3GetCmd.Command())
+
+ // List
+ s3ListCmd := cmdClientS3List{common: c.common, client: c.client, s3: c}
+ cmd.AddCommand(s3ListCmd.Command())
+
+ // Workaround for subcommand usage errors. See: https://github.com/spf13/cobra/issues/706
+ cmd.Args = cobra.NoArgs
+ cmd.Run = func(cmd *cobra.Command, args []string) { _ = cmd.Usage() }
+
+ return cmd
+}
+
+// s3 Get command handle
+func (c *cmdClientS3Get) Command() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "get ",
+ Short: "Fetch details of an existing S3 user",
+ RunE: c.Run,
+ }
+
+ cmd.Flags().BoolVar(&c.jsonOutput, "json", false, "Provide output in json format")
+ return cmd
+}
+
+func (c *cmdClientS3Get) Run(cmd *cobra.Command, args []string) error {
+ // Get should be called with a single name param.
+ if len(args) != 1 {
+ return cmd.Help()
+ }
+
+ m, err := microcluster.App(context.Background(), microcluster.Args{StateDir: c.common.FlagStateDir, Verbose: c.common.FlagLogVerbose, Debug: c.common.FlagLogDebug})
+ if err != nil {
+ return fmt.Errorf("unable to fetch S3 user: %w", err)
+ }
+
+ cli, err := m.LocalClient()
+ if err != nil {
+ return err
+ }
+
+ input := &types.S3User{Name: args[0]}
+ user, err := client.GetS3User(context.Background(), cli, input)
+ if err != nil {
+ return err
+ }
+
+ err = renderOutput(user, c.jsonOutput)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// s3 create command handle
+func (c *cmdClientS3Create) Command() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "create ",
+ Short: "Create a new S3 user",
+ RunE: c.Run,
+ }
+
+ cmd.Flags().StringVar(&c.accessKey, "access-key", "", "custom access-key for new S3 user.")
+ cmd.Flags().StringVar(&c.secret, "secret", "", "custom secret for new S3 user.")
+ cmd.Flags().BoolVar(&c.jsonOutput, "json", false, "Provide output in json format")
+ return cmd
+}
+
+func (c *cmdClientS3Create) Run(cmd *cobra.Command, args []string) error {
+ // Get should be called with a single name param.
+ if len(args) != 1 {
+ return cmd.Help()
+ }
+
+ m, err := microcluster.App(context.Background(), microcluster.Args{StateDir: c.common.FlagStateDir, Verbose: c.common.FlagLogVerbose, Debug: c.common.FlagLogDebug})
+ if err != nil {
+ return fmt.Errorf("unable to create S3 user: %w", err)
+ }
+
+ cli, err := m.LocalClient()
+ if err != nil {
+ return err
+ }
+
+ // Create a user with given keys.
+ input := &types.S3User{
+ Name: args[0],
+ Key: c.accessKey,
+ Secret: c.secret,
+ }
+ user, err := client.CreateS3User(context.Background(), cli, input)
+ if err != nil {
+ return err
+ }
+
+ err = renderOutput(user, c.jsonOutput)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// s3 delete command handle
+func (c *cmdClientS3Delete) Command() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "delete ",
+ Short: "Delete an existing S3 user",
+ RunE: c.Run,
+ }
+ return cmd
+}
+
+func (c *cmdClientS3Delete) Run(cmd *cobra.Command, args []string) error {
+ // Get should be called with a single name param.
+ if len(args) != 1 {
+ return cmd.Help()
+ }
+
+ m, err := microcluster.App(context.Background(), microcluster.Args{StateDir: c.common.FlagStateDir, Verbose: c.common.FlagLogVerbose, Debug: c.common.FlagLogDebug})
+ if err != nil {
+ return fmt.Errorf("unable to delete S3 user: %w", err)
+ }
+
+ cli, err := m.LocalClient()
+ if err != nil {
+ return err
+ }
+
+ err = client.DeleteS3User(context.Background(), cli, &types.S3User{Name: args[0]})
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// s3 list command handle
+func (c *cmdClientS3List) Command() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "list",
+ Short: "List all existing S3 users",
+ RunE: c.Run,
+ }
+
+ return cmd
+}
+
+func (c *cmdClientS3List) Run(cmd *cobra.Command, args []string) error {
+ // Should not be called with any params
+ if len(args) > 1 {
+ return cmd.Help()
+ }
+
+ m, err := microcluster.App(context.Background(), microcluster.Args{StateDir: c.common.FlagStateDir, Verbose: c.common.FlagLogVerbose, Debug: c.common.FlagLogDebug})
+ if err != nil {
+ return fmt.Errorf("unable to list S3 users: %w", err)
+ }
+
+ cli, err := m.LocalClient()
+ if err != nil {
+ return err
+ }
+
+ users, err := client.ListS3Users(context.Background(), cli)
+ if err != nil {
+ return err
+ }
+
+ data := make([][]string, len(users))
+ for i := range users {
+ data[i] = []string{fmt.Sprintf("%d", i+1), users[i]}
+ }
+
+ header := []string{"#", "Name"}
+ err = lxdCmd.RenderTable(lxdCmd.TableFormatTable, header, data, users)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func renderOutput(output string, isJson bool) error {
+ if isJson {
+ fmt.Print(output)
+ } else {
+ user := types.S3User{
+ Name: gjson.Get(output, "keys.0.user").Str,
+ Key: gjson.Get(output, "keys.0.access_key").Str,
+ Secret: gjson.Get(output, "keys.0.secret_key").Str,
+ }
+ err := renderSingleS3User(user)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func renderSingleS3User(user types.S3User) error {
+ data := make([][]string, 1)
+ data[0] = []string{user.Name, user.Key, user.Secret}
+
+ header := []string{"Name", "Access Key", "Secret"}
+ err := lxdCmd.RenderTable(lxdCmd.TableFormatTable, header, data, user)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/microceph/cmd/microceph/s3-user.go b/microceph/cmd/microceph/s3-user.go
new file mode 100644
index 00000000..06ab7d0f
--- /dev/null
+++ b/microceph/cmd/microceph/s3-user.go
@@ -0,0 +1 @@
+package main
diff --git a/scripts/appS3.py b/scripts/appS3.py
new file mode 100644
index 00000000..e298d993
--- /dev/null
+++ b/scripts/appS3.py
@@ -0,0 +1,99 @@
+import string
+import random
+import boto3
+import json
+import argparse
+
+
+def app_handle(args):
+ keys_path = args.keys
+ endpoint = args.endpoint
+
+ # Fetch Auth
+ with open(keys_path, 'r') as keys_file:
+ keys_dict = json.load(keys_file)
+
+ # Create Boto3 Client
+ keys = keys_dict["keys"][0]
+ client = boto3.resource("s3", verify=False,
+ endpoint_url=endpoint,
+ aws_access_key_id=keys["access_key"],
+ aws_secret_access_key=keys["secret_key"])
+
+ # Perform IO
+ objects = []
+ bucket_name = "test-bucket"
+ client.Bucket(bucket_name).create()
+ for i in range(args.obj_num):
+ object_name = "test-object"+rand_str(4)
+ data = str(rand_str(random.randint(10, 30)))*1024*1024
+ primary_object_one = client.Object(
+ bucket_name,
+ object_name
+ )
+ primary_object_one.put(Body=data)
+ object_size = primary_object_one.content_length/(1024*1024)
+ # Store for cleanup.
+ objects.append(
+ (object_name, object_size)
+ )
+ # Print object IO summary:
+ print(
+ "Object #{}: {}/{} -> Size: {}MB"
+ .format(i, bucket_name, object_name, object_size)
+ )
+
+ # Print Summary
+ print(
+ "IO Summary: Object Count {}, Total Size {}MB"
+ .format(args.obj_num, sum(size for _, size in objects))
+ )
+
+ # Cleanup (if asked for)
+ if not args.no_delete:
+ print("Performing Cleanup")
+ for obj, size in objects:
+ client.Object(bucket_name, obj).delete()
+ client.Bucket(bucket_name).delete()
+
+
+def rand_str(length: int):
+ return "".join(
+ random.choices(string.ascii_uppercase + string.digits, k=length)
+ )
+
+
+if __name__ == "__main__":
+ argparse = argparse.ArgumentParser(
+ description="An application which uses S3 for storage",
+ epilog="Ex: python3 appS3.py --keys keys.txt",
+ )
+
+ argparse.add_argument(
+ "endpoint",
+ type=str,
+ help="Provide RGW endpoint to talk to.",
+ )
+ argparse.add_argument(
+ "keys",
+ type=str,
+ help="Provide JSON file generated from Ceph RGW Admin.",
+ )
+ argparse.add_argument(
+ "--obj-num",
+ type=int,
+ default=1,
+ help="Number of objects to upload to S3.",
+ )
+ argparse.add_argument(
+ "--no-delete",
+ action="store_true",
+ help="Setting this to true would not cleanup the pushed objects.",
+ )
+ argparse.set_defaults(func=app_handle)
+
+ # Parse the args.
+ args = argparse.parse_args()
+
+ # Call the subcommand.
+ args.func(args)
diff --git a/tests/scripts/actionutils.sh b/tests/scripts/actionutils.sh
index 1e4f1394..c5e1270a 100755
--- a/tests/scripts/actionutils.sh
+++ b/tests/scripts/actionutils.sh
@@ -303,8 +303,14 @@ function free_runner_disk() {
sudo docker rmi $(docker images -q)
}
+function install_boto3() {
+ # Python script dependencies
+ sudo apt update && sudo apt install python3-pip
+ sudo pip3 install boto3
+}
-function testrgw() {
+# uses pre S3 user management methods for upgrade scenarios.
+function testrgw_old() {
set -eu
sudo microceph.ceph status
sudo systemctl status snap.microceph.rgw
@@ -320,6 +326,30 @@ function testrgw() {
( curl -s http://localhost/testbucket/test.txt | grep -F hello-radosgw ) || return -1
}
+function testrgw() {
+ set -eux
+
+ sudo microceph client s3 create testUser --json > keys.json
+ sudo python3 ./scripts/appS3.py http://localhost:80 keys.json --obj-num 2
+
+ # cleanup
+ sudo microceph client s3 delete testUser
+ rm keys.json
+}
+
+function testrgw_on_headnode() {
+ set -eux
+ local container="${1?missing}"
+ local nw=$(get_lxd_network public)
+ gw=$(echo "$nw" | cut -d/ -f1)
+ lxc exec $container -- sh -c "microceph client s3 create testUser --json" > keys.json
+ sudo python3 ./scripts/appS3.py http://${gw}0:80 keys.json --obj-num 2
+
+ # cleanup
+ lxc exec $container -- sh -c "microceph client s3 delete testUser"
+ rm keys.json
+}
+
function enable_services() {
local node="${1?missing}"
for s in mon mds mgr ; do