Skip to content

Commit

Permalink
Slight improvements.
Browse files Browse the repository at this point in the history
  • Loading branch information
Gerrit91 committed Oct 28, 2024
1 parent b046c19 commit c3a9ae7
Show file tree
Hide file tree
Showing 6 changed files with 83 additions and 58 deletions.
59 changes: 37 additions & 22 deletions pkg/controllers/loadbalancer/config/cilium_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,57 +21,61 @@ import (
)

type ciliumConfig struct {
cfg *baseConfig
base *baseConfig
client client.Client
k8sClient clientset.Interface
}

func newCiliumConfig(cfg *baseConfig, k8sClient clientset.Interface) *ciliumConfig {
return &ciliumConfig{cfg: cfg, k8sClient: k8sClient}
func newCiliumConfig(base *baseConfig, c client.Client, k8sClient clientset.Interface) *ciliumConfig {
return &ciliumConfig{base: base, client: c, k8sClient: k8sClient}
}

func (cfg *ciliumConfig) WriteCRs(ctx context.Context, c client.Client) error {
err := cfg.writeCiliumBGPPeeringPolicies(ctx, c)
func (c *ciliumConfig) WriteCRs(ctx context.Context) error {
err := c.writeCiliumBGPPeeringPolicies(ctx)
if err != nil {
return fmt.Errorf("failed to write ciliumbgppeeringpolicy resources %w", err)
}

err = cfg.writeCiliumLoadBalancerIPPools(ctx, c)
err = c.writeCiliumLoadBalancerIPPools(ctx)
if err != nil {
return fmt.Errorf("failed to write ciliumloadbalancerippool resources %w", err)
}

err = cfg.writeNodeAnnotations(ctx)
err = c.writeNodeAnnotations(ctx)
if err != nil {
return fmt.Errorf("failed to write node annotations %w", err)
}

return nil
}

func (cfg *ciliumConfig) writeCiliumBGPPeeringPolicies(ctx context.Context, c client.Client) error {
func (c *ciliumConfig) writeCiliumBGPPeeringPolicies(ctx context.Context) error {
existingPolicies := ciliumv2alpha1.CiliumBGPPeeringPolicyList{}
err := c.List(ctx, &existingPolicies)
err := c.client.List(ctx, &existingPolicies)
if err != nil {
return err
}

for _, existingPolicy := range existingPolicies.Items {
existingPolicy := existingPolicy
found := false
for _, peer := range cfg.cfg.Peers {

for _, peer := range c.base.Peers {
if fmt.Sprintf("%d", peer.ASN) == existingPolicy.Name {
found = true
break
}
}

if !found {
err := c.Delete(ctx, &existingPolicy)
err := c.client.Delete(ctx, &existingPolicy)
if err != nil {
return err
}
}
}

for _, peer := range cfg.cfg.Peers {
for _, peer := range c.base.Peers {
bgpPeeringPolicy := &ciliumv2alpha1.CiliumBGPPeeringPolicy{
TypeMeta: metav1.TypeMeta{
APIVersion: ciliumv2alpha1.CustomResourceDefinitionGroup + "/" + ciliumv2alpha1.CustomResourceDefinitionVersion,
Expand All @@ -81,7 +85,8 @@ func (cfg *ciliumConfig) writeCiliumBGPPeeringPolicies(ctx context.Context, c cl
Name: fmt.Sprintf("%d", peer.ASN),
},
}
res, err := controllerutil.CreateOrUpdate(ctx, c, bgpPeeringPolicy, func() error {

res, err := controllerutil.CreateOrUpdate(ctx, c.client, bgpPeeringPolicy, func() error {
bgpPeeringPolicy.Spec = ciliumv2alpha1.CiliumBGPPeeringPolicySpec{
NodeSelector: convertNodeSelector(&peer.NodeSelector),
VirtualRouters: []ciliumv2alpha1.CiliumBGPVirtualRouter{
Expand Down Expand Up @@ -113,6 +118,7 @@ func (cfg *ciliumConfig) writeCiliumBGPPeeringPolicies(ctx context.Context, c cl
if err != nil {
return err
}

if res != controllerutil.OperationResultNone {
klog.Infof("bgppeer: %v", res)
}
Expand All @@ -121,30 +127,33 @@ func (cfg *ciliumConfig) writeCiliumBGPPeeringPolicies(ctx context.Context, c cl
return nil
}

func (cfg *ciliumConfig) writeCiliumLoadBalancerIPPools(ctx context.Context, c client.Client) error {
func (c *ciliumConfig) writeCiliumLoadBalancerIPPools(ctx context.Context) error {
existingPools := ciliumv2alpha1.CiliumLoadBalancerIPPoolList{}
err := c.List(ctx, &existingPools)
err := c.client.List(ctx, &existingPools)
if err != nil {
return err
}

for _, existingPool := range existingPools.Items {
existingPool := existingPool
found := false
for _, pool := range cfg.cfg.AddressPools {

for _, pool := range c.base.AddressPools {
if pool.Name == existingPool.Name {
found = true
break
}
}

if !found {
err := c.Delete(ctx, &existingPool)
err := c.client.Delete(ctx, &existingPool)
if err != nil {
return err
}
}
}

for _, pool := range cfg.cfg.AddressPools {
for _, pool := range c.base.AddressPools {
ipPool := &ciliumv2alpha1.CiliumLoadBalancerIPPool{
TypeMeta: metav1.TypeMeta{
APIVersion: ciliumv2alpha1.CustomResourceDefinitionGroup + "/" + ciliumv2alpha1.CustomResourceDefinitionVersion,
Expand All @@ -154,7 +163,8 @@ func (cfg *ciliumConfig) writeCiliumLoadBalancerIPPools(ctx context.Context, c c
Name: pool.Name,
},
}
res, err := controllerutil.CreateOrUpdate(ctx, c, ipPool, func() error {

res, err := controllerutil.CreateOrUpdate(ctx, c.client, ipPool, func() error {
cidrs := make([]ciliumv2alpha1.CiliumLoadBalancerIPPoolIPBlock, 0)
for _, cidr := range pool.CIDRs {
ipPoolBlock := ciliumv2alpha1.CiliumLoadBalancerIPPoolIPBlock{
Expand All @@ -170,6 +180,7 @@ func (cfg *ciliumConfig) writeCiliumLoadBalancerIPPools(ctx context.Context, c c
if err != nil {
return err
}

if res != controllerutil.OperationResultNone {
klog.Infof("ipaddresspool: %v", res)
}
Expand All @@ -178,25 +189,29 @@ func (cfg *ciliumConfig) writeCiliumLoadBalancerIPPools(ctx context.Context, c c
return nil
}

func (cfg *ciliumConfig) writeNodeAnnotations(ctx context.Context) error {
nodes, err := kubernetes.GetNodes(ctx, cfg.k8sClient)
func (c *ciliumConfig) writeNodeAnnotations(ctx context.Context) error {
nodes, err := kubernetes.GetNodes(ctx, c.k8sClient)
if err != nil {
return fmt.Errorf("failed to write node annotations: %w", err)
}

backoff := wait.Backoff{
Steps: 20,
Duration: 50 * time.Millisecond,
Jitter: 1.0,
}

for _, n := range nodes {
asn, err := getASNFromNodeLabels(n)
if err != nil {
return fmt.Errorf("failed to write node annotations for node %s: %w", n.Name, err)
}

annotations := map[string]string{
fmt.Sprintf("cilium.io/bgp-virtual-router.%d", asn): "router-id=127.0.0.1",
}
err = kubernetes.UpdateNodeAnnotationsWithBackoff(ctx, cfg.k8sClient, n.Name, annotations, backoff)

err = kubernetes.UpdateNodeAnnotationsWithBackoff(ctx, c.k8sClient, n.Name, annotations, backoff)
if err != nil {
return fmt.Errorf("failed to write node annotations for node %s: %w", n.Name, err)
}
Expand Down
10 changes: 5 additions & 5 deletions pkg/controllers/loadbalancer/config/cilium_config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ func TestCiliumConfig(t *testing.T) {
nodes: []v1.Node{},
wantErr: nil,
want: &ciliumConfig{
cfg: &baseConfig{
base: &baseConfig{
AddressPools: addressPools{
{
Name: "internet-ephemeral",
Expand Down Expand Up @@ -83,7 +83,7 @@ func TestCiliumConfig(t *testing.T) {
nodes: []v1.Node{},
wantErr: nil,
want: &ciliumConfig{
cfg: &baseConfig{
base: &baseConfig{
AddressPools: addressPools{
{
Name: "internet-ephemeral",
Expand Down Expand Up @@ -137,7 +137,7 @@ func TestCiliumConfig(t *testing.T) {
nodes: []v1.Node{},
wantErr: nil,
want: &ciliumConfig{
cfg: &baseConfig{
base: &baseConfig{
AddressPools: addressPools{
{
Name: "internet-ephemeral",
Expand Down Expand Up @@ -239,7 +239,7 @@ func TestCiliumConfig(t *testing.T) {
nodes: []v1.Node{},
wantErr: nil,
want: &ciliumConfig{
cfg: &baseConfig{
base: &baseConfig{
AddressPools: addressPools{
{
Name: "internet-ephemeral",
Expand Down Expand Up @@ -298,7 +298,7 @@ func TestCiliumConfig(t *testing.T) {
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
cfg, err := New("cilium", tt.ips, tt.nws, tt.nodes, nil)
cfg, err := New("cilium", tt.ips, tt.nws, tt.nodes, nil, nil)
if diff := cmp.Diff(err, tt.wantErr); diff != "" {
t.Errorf("error = %v", diff)
return
Expand Down
10 changes: 5 additions & 5 deletions pkg/controllers/loadbalancer/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,27 +16,27 @@ import (
)

type LoadBalancerConfig interface {
WriteCRs(ctx context.Context, c client.Client) error
WriteCRs(ctx context.Context) error
}

type baseConfig struct {
Peers []*peer `json:"peers,omitempty" yaml:"peers,omitempty"`
AddressPools addressPools `json:"address-pools,omitempty" yaml:"address-pools,omitempty"`
}

func New(loadBalancerType string, ips []*models.V1IPResponse, nws sets.Set[string], nodes []v1.Node, k8sClientSet clientset.Interface) (LoadBalancerConfig, error) {
func New(loadBalancerType string, ips []*models.V1IPResponse, nws sets.Set[string], nodes []v1.Node, c client.Client, k8sClientSet clientset.Interface) (LoadBalancerConfig, error) {
bc, err := newBaseConfig(ips, nws, nodes)
if err != nil {
return nil, err
}

switch loadBalancerType {
case "metallb":
return newMetalLBConfig(bc), nil
return newMetalLBConfig(bc, c), nil
case "cilium":
return newCiliumConfig(bc, k8sClientSet), nil
return newCiliumConfig(bc, c, k8sClientSet), nil
default:
return newMetalLBConfig(bc), nil
return newMetalLBConfig(bc, c), nil
}
}

Expand Down
Loading

0 comments on commit c3a9ae7

Please sign in to comment.