Skip to content
This repository has been archived by the owner on Dec 15, 2022. It is now read-only.

Commit

Permalink
Merge pull request #2 from jkneubuh/feature/unsensible
Browse files Browse the repository at this point in the history
Feature/unsensible
  • Loading branch information
mbwhite authored Jul 19, 2022
2 parents db0555d + 53881ac commit 5ea1f58
Show file tree
Hide file tree
Showing 3 changed files with 198 additions and 111 deletions.
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,8 @@ just kind
This will deploy the Fabric Operator and the Fabric Operations console via two Ansible Playbooks, and some configuration variables.


Creation of the operator - `ansible-playbook ./infrastructure/01-operator-install.yml`
Creation of the console - `ansible-playbook ./infrastructure/02-console-install.yml`
- Creation of the operator: `ansible-playbook ./infrastructure/01-operator-install.yml`
- Creation of the console: `ansible-playbook ./infrastructure/02-console-install.yml`

The configuration file is `vars.yml`

Expand Down
148 changes: 148 additions & 0 deletions infrastructure/kind_with_nginx.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
#!/bin/bash
#
# Copyright contributors to the Hyperledgendary Full Stack Asset Transfer project
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

set -eo pipefail
set -x

function kind_with_nginx() {
local cluster_name=$1

delete_cluster $cluster_name

create_cluster $cluster_name

start_nginx

apply_coredns_override
}


#
# Delete a kind cluster if it exists
#
function delete_cluster() {
local cluster_name=$1

kind delete cluster --name $cluster_name
}


#
# Create a local KIND cluster
#
function create_cluster() {
local cluster_name=$1

cat << EOF | kind create cluster --name $cluster_name --config=-
---
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
kubeadmConfigPatches:
- |
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "ingress-ready=true"
extraPortMappings:
- containerPort: 80
hostPort: 80
protocol: TCP
- containerPort: 443
hostPort: 443
protocol: TCP
EOF

#
# Work around a bug in KIND where DNS is not always resolved correctly on machines with IPv6
#
for node in $(kind get nodes);
do
docker exec "$node" sysctl net.ipv4.conf.all.route_localnet=1;
done
}


#
# Install an Nginx ingress controller bound to port 80 and 443.
# ssl_passthrough mode is enabled for TLS termination at the Fabric node enpdoints.
#
function start_nginx() {
kubectl apply -k https://github.com/hyperledger-labs/fabric-operator.git/config/ingress/kind

sleep 10

kubectl wait --namespace ingress-nginx \
--for=condition=ready pod \
--selector=app.kubernetes.io/component=controller \
--timeout=2m
}


#
# Override Core DNS with a wildcard matcher for the "*.localho.st" domain, binding to the
# IP address of the Nginx ingress controller on the kubernetes internal network. Effectively this
# "steals" the domain name for *.localho.st, directing traffic to the Nginx load balancer, rather
# than to the loopback interface at 127.0.0.1.
#
function apply_coredns_override() {
CLUSTER_IP=$(kubectl -n ingress-nginx get svc ingress-nginx-controller -o json | jq -r .spec.clusterIP)

cat << EOF | kubectl apply -f -
---
kind: ConfigMap
apiVersion: v1
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
rewrite name regex (.*)\.localho\.st host.ingress.internal
hosts {
${CLUSTER_IP} host.ingress.internal
fallthrough
}
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
EOF

kubectl -n kube-system rollout restart deployment/coredns
}


kind_with_nginx $1
157 changes: 48 additions & 109 deletions justfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,21 @@
# Apache-2.0
#
# Copyright contributors to the Hyperledgendary Full Stack Asset Transfer project
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#


# Main justfile to run all the scripts
#
Expand All @@ -18,133 +35,55 @@ bootstrap:
#!/bin/bash



cluster_name := "kind"

cluster_name := "kind"

# Starts a local KIND Kubernetes cluster
# Installs Nginx ingress controller
# Adds a DNS override in kube DNS for *.localho.st -> Nginx LB IP
kind:
#!/bin/bash
set -eo pipefail

set -x


#
# Create a local KIND cluster
#
cat << EOF | kind create cluster --name {{cluster_name}} --config=-
---
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
kubeadmConfigPatches:
- |
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "ingress-ready=true"
extraPortMappings:
- containerPort: 80
hostPort: 80
protocol: TCP
- containerPort: 443
hostPort: 443
protocol: TCP
EOF


#
# Work around a bug in KIND where DNS is not always resolved correctly on machines with IPv6
#
for node in $(kind get nodes);
do
docker exec "$node" sysctl net.ipv4.conf.all.route_localnet=1;
done


#
# Install an Nginx ingress controller bound to port 80 and 443.
# ssl_passthrough mode is enabled for TLS termination at the Fabric node enpdoints.
#
kubectl apply -k https://github.com/hyperledger-labs/fabric-operator.git/config/ingress/kind
sleep 10
kubectl wait --namespace ingress-nginx \
--for=condition=ready pod \
--selector=app.kubernetes.io/component=controller \
--timeout=2m


#
# Override Core DNS with a wildcard matcher for the "*.localho.st" domain, binding to the
# IP address of the Nginx ingress controller on the kubernetes internal network. Effectively this
# "steals" the domain name for *.localho.st, directing traffic to the Nginx load balancer, rather
# than to the loopback interface at 127.0.0.1.
#
CLUSTER_IP=$(kubectl -n ingress-nginx get svc ingress-nginx-controller -o json | jq -r .spec.clusterIP)

cat << EOF | kubectl apply -f -
---
kind: ConfigMap
apiVersion: v1
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
rewrite name regex (.*)\.localho\.st host.ingress.internal
hosts {
${CLUSTER_IP} host.ingress.internal
fallthrough
}
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
EOF

kubectl -n kube-system rollout restart deployment/coredns

infrastructure/kind_with_nginx.sh {{cluster_name}}

unkind:
#!/bin/bash
kind delete cluster --name {{cluster_name}}


# Installs and configures a sample Fabric Network
network:
sample-network: console
#!/bin/bash
set -ex -o pipefail

docker run --rm -u $(id -u) -v ${HOME}/.kube/:/home/ibp-user/.kube/ -v ${CWDIR}/infrastructure/fabric_network_playbooks:/playbooks -v ${CWDIR}/_cfg:/_cfg --network=host ofs-ansible:latest ansible-playbook /playbooks/00-complete.yml
docker run \
--rm \
-u $(id -u) \
-v ${HOME}/.kube/:/home/ibp-user/.kube/ \
-v ${CWDIR}/infrastructure/fabric_network_playbooks:/playbooks \
-v ${CWDIR}/_cfg:/_cfg \
--network=host \
ofs-ansible:latest \
ansible-playbook /playbooks/00-complete.yml


# Install the operations console
# Install the operations console and fabric-operator
console: operator
#!/bin/bash
set -ex -o pipefail

docker run --rm -v ${HOME}/.kube/:/home/ibp-user/.kube/ -v $(pwd)/infrastructure/operator_console_playbooks:/playbooks --network=host ofs-ansible:latest ansible-playbook /playbooks/01-operator-install.yml
docker run --rm -v ${HOME}/.kube/:/home/ibp-user/.kube/ -v $(pwd)/infrastructure/operator_console_playbooks:/playbooks --network=host ofs-ansible:latest ansible-playbook /playbooks/02-console-install.yml
docker run \
--rm \
-v ${HOME}/.kube/:/home/ibp-user/.kube/ \
-v $(pwd)/infrastructure/operator_console_playbooks:/playbooks \
--network=host \
ofs-ansible:latest \
ansible-playbook /playbooks/01-operator-install.yml

docker run \
--rm \
-v ${HOME}/.kube/:/home/ibp-user/.kube/ \
-v $(pwd)/infrastructure/operator_console_playbooks:/playbooks \
--network=host \
ofs-ansible:latest \
ansible-playbook /playbooks/02-console-install.yml

AUTH=$(curl -X POST https://fabricinfra-hlf-console-console.localho.st:443/ak/api/v2/permissions/keys -u admin:password -k -H 'Content-Type: application/json' -d '{"roles": ["writer", "manager"],"description": "newkey"}')
KEY=$(echo $AUTH | jq .api_key | tr -d '"')
Expand All @@ -161,7 +100,7 @@ console: operator
cat ${CWDIR}/_cfg/auth-vars.yml


# Just install the operator
# Just install the fabric-operator
operator:
#!/bin/bash
set -ex -o pipefail
Expand All @@ -172,5 +111,5 @@ operator:
-v $(pwd)/infrastructure/operator_console_playbooks:/playbooks \
--network=host \
ofs-ansible:latest \
ansible-playbook /playbooks/01-operator-install.yml
ansible-playbook /playbooks/01-operator-install.yml

0 comments on commit 5ea1f58

Please sign in to comment.