Skip to content

Commit 182791c

Browse files
authored
Enable frontend NLB (#4126)
1 parent 66556cd commit 182791c

17 files changed

+2434
-79
lines changed

Diff for: controllers/gateway/gateway_controller.go

+2-1
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ package gateway
33
import (
44
"context"
55
"fmt"
6+
67
"github.com/go-logr/logr"
78
"github.com/pkg/errors"
89
corev1 "k8s.io/api/core/v1"
@@ -242,7 +243,7 @@ func (r *gatewayReconciler) reconcileUpdate(ctx context.Context, gw *gwv1.Gatewa
242243
}
243244

244245
func (r *gatewayReconciler) deployModel(ctx context.Context, gw *gwv1.Gateway, stack core.Stack) error {
245-
if err := r.stackDeployer.Deploy(ctx, stack, r.metricsCollector, r.controllerName); err != nil {
246+
if err := r.stackDeployer.Deploy(ctx, stack, r.metricsCollector, r.controllerName, nil); err != nil {
246247
var requeueNeededAfter *runtime.RequeueNeededAfter
247248
if errors.As(err, &requeueNeededAfter) {
248249
return err

Diff for: controllers/ingress/group_controller.go

+70-15
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ func (r *groupReconciler) reconcile(ctx context.Context, req reconcile.Request)
151151
return errmetrics.NewErrorWithMetrics(controllerName, "add_group_finalizer_error", err, r.metricsCollector)
152152
}
153153

154-
_, lb, err := r.buildAndDeployModel(ctx, ingGroup)
154+
_, lb, frontendNlb, err := r.buildAndDeployModel(ctx, ingGroup)
155155
if err != nil {
156156
return err
157157
}
@@ -164,7 +164,14 @@ func (r *groupReconciler) reconcile(ctx context.Context, req reconcile.Request)
164164
if statusErr != nil {
165165
return
166166
}
167-
statusErr = r.updateIngressGroupStatus(ctx, ingGroup, lbDNS)
167+
var frontendNlbDNS string
168+
if frontendNlb != nil {
169+
frontendNlbDNS, statusErr = frontendNlb.DNSName().Resolve(ctx)
170+
if statusErr != nil {
171+
return
172+
}
173+
}
174+
statusErr = r.updateIngressGroupStatus(ctx, ingGroup, lbDNS, frontendNlbDNS)
168175
if statusErr != nil {
169176
r.recordIngressGroupEvent(ctx, ingGroup, corev1.EventTypeWarning, k8s.IngressEventReasonFailedUpdateStatus,
170177
fmt.Sprintf("Failed update status due to %v", statusErr))
@@ -191,38 +198,40 @@ func (r *groupReconciler) reconcile(ctx context.Context, req reconcile.Request)
191198
return nil
192199
}
193200

194-
func (r *groupReconciler) buildAndDeployModel(ctx context.Context, ingGroup ingress.Group) (core.Stack, *elbv2model.LoadBalancer, error) {
201+
func (r *groupReconciler) buildAndDeployModel(ctx context.Context, ingGroup ingress.Group) (core.Stack, *elbv2model.LoadBalancer, *elbv2model.LoadBalancer, error) {
195202
var stack core.Stack
196203
var lb *elbv2model.LoadBalancer
197204
var secrets []types.NamespacedName
198205
var backendSGRequired bool
199206
var err error
207+
var frontendNlbTargetGroupDesiredState *core.FrontendNlbTargetGroupDesiredState
208+
var frontendNlb *elbv2model.LoadBalancer
200209
buildModelFn := func() {
201-
stack, lb, secrets, backendSGRequired, err = r.modelBuilder.Build(ctx, ingGroup, r.metricsCollector)
210+
stack, lb, secrets, backendSGRequired, frontendNlbTargetGroupDesiredState, frontendNlb, err = r.modelBuilder.Build(ctx, ingGroup, r.metricsCollector)
202211
}
203212
r.metricsCollector.ObserveControllerReconcileLatency(controllerName, "build_model", buildModelFn)
204213
if err != nil {
205214
r.recordIngressGroupEvent(ctx, ingGroup, corev1.EventTypeWarning, k8s.IngressEventReasonFailedBuildModel, fmt.Sprintf("Failed build model due to %v", err))
206-
return nil, nil, errmetrics.NewErrorWithMetrics(controllerName, "build_model_error", err, r.metricsCollector)
215+
return nil, nil, nil, errmetrics.NewErrorWithMetrics(controllerName, "build_model_error", err, r.metricsCollector)
207216
}
208217
stackJSON, err := r.stackMarshaller.Marshal(stack)
209218
if err != nil {
210219
r.recordIngressGroupEvent(ctx, ingGroup, corev1.EventTypeWarning, k8s.IngressEventReasonFailedBuildModel, fmt.Sprintf("Failed build model due to %v", err))
211-
return nil, nil, err
220+
return nil, nil, nil, err
212221
}
213222
r.logger.Info("successfully built model", "model", stackJSON)
214223

215224
deployModelFn := func() {
216-
err = r.stackDeployer.Deploy(ctx, stack, r.metricsCollector, "ingress")
225+
err = r.stackDeployer.Deploy(ctx, stack, r.metricsCollector, "ingress", frontendNlbTargetGroupDesiredState)
217226
}
218227
r.metricsCollector.ObserveControllerReconcileLatency(controllerName, "deploy_model", deployModelFn)
219228
if err != nil {
220229
var requeueNeededAfter *runtime.RequeueNeededAfter
221230
if errors.As(err, &requeueNeededAfter) {
222-
return nil, nil, err
231+
return nil, nil, nil, err
223232
}
224233
r.recordIngressGroupEvent(ctx, ingGroup, corev1.EventTypeWarning, k8s.IngressEventReasonFailedDeployModel, fmt.Sprintf("Failed deploy model due to %v", err))
225-
return nil, nil, errmetrics.NewErrorWithMetrics(controllerName, "deploy_model_error", err, r.metricsCollector)
234+
return nil, nil, nil, errmetrics.NewErrorWithMetrics(controllerName, "deploy_model_error", err, r.metricsCollector)
226235
}
227236
r.logger.Info("successfully deployed model", "ingressGroup", ingGroup.ID)
228237
r.secretsManager.MonitorSecrets(ingGroup.ID.String(), secrets)
@@ -232,9 +241,9 @@ func (r *groupReconciler) buildAndDeployModel(ctx context.Context, ingGroup ingr
232241
inactiveResources = append(inactiveResources, k8s.ToSliceOfNamespacedNames(ingGroup.Members)...)
233242
}
234243
if err := r.backendSGProvider.Release(ctx, networkingpkg.ResourceTypeIngress, inactiveResources); err != nil {
235-
return nil, nil, errmetrics.NewErrorWithMetrics(controllerName, "release_auto_generated_backend_sg_error", err, r.metricsCollector)
244+
return nil, nil, nil, errmetrics.NewErrorWithMetrics(controllerName, "release_auto_generated_backend_sg_error", err, r.metricsCollector)
236245
}
237-
return stack, lb, nil
246+
return stack, lb, frontendNlb, nil
238247
}
239248

240249
func (r *groupReconciler) recordIngressGroupEvent(_ context.Context, ingGroup ingress.Group, eventType string, reason string, message string) {
@@ -243,29 +252,41 @@ func (r *groupReconciler) recordIngressGroupEvent(_ context.Context, ingGroup in
243252
}
244253
}
245254

246-
func (r *groupReconciler) updateIngressGroupStatus(ctx context.Context, ingGroup ingress.Group, lbDNS string) error {
255+
func (r *groupReconciler) updateIngressGroupStatus(ctx context.Context, ingGroup ingress.Group, lbDNS string, frontendNLBDNS string) error {
247256
for _, member := range ingGroup.Members {
248-
if err := r.updateIngressStatus(ctx, lbDNS, member.Ing); err != nil {
257+
if err := r.updateIngressStatus(ctx, lbDNS, frontendNLBDNS, member.Ing); err != nil {
249258
return err
250259
}
251260
}
252261
return nil
253262
}
254263

255-
func (r *groupReconciler) updateIngressStatus(ctx context.Context, lbDNS string, ing *networking.Ingress) error {
264+
func (r *groupReconciler) updateIngressStatus(ctx context.Context, lbDNS string, frontendNlbDNS string, ing *networking.Ingress) error {
265+
ingOld := ing.DeepCopy()
256266
if len(ing.Status.LoadBalancer.Ingress) != 1 ||
257267
ing.Status.LoadBalancer.Ingress[0].IP != "" ||
258268
ing.Status.LoadBalancer.Ingress[0].Hostname != lbDNS {
259-
ingOld := ing.DeepCopy()
260269
ing.Status.LoadBalancer.Ingress = []networking.IngressLoadBalancerIngress{
261270
{
262271
Hostname: lbDNS,
263272
},
264273
}
274+
}
275+
276+
// Ensure frontendNLBDNS is appended if it is not already added
277+
if frontendNlbDNS != "" && !hasFrontendNlbHostName(ing.Status.LoadBalancer.Ingress, frontendNlbDNS) {
278+
ing.Status.LoadBalancer.Ingress = append(ing.Status.LoadBalancer.Ingress, networking.IngressLoadBalancerIngress{
279+
Hostname: frontendNlbDNS,
280+
})
281+
}
282+
283+
if !isIngressStatusEqual(ingOld.Status.LoadBalancer.Ingress, ing.Status.LoadBalancer.Ingress) {
265284
if err := r.k8sClient.Status().Patch(ctx, ing, client.MergeFrom(ingOld)); err != nil {
266285
return errors.Wrapf(err, "failed to update ingress status: %v", k8s.NamespacedName(ing))
267286
}
287+
268288
}
289+
269290
return nil
270291
}
271292

@@ -387,3 +408,37 @@ func isResourceKindAvailable(resList *metav1.APIResourceList, kind string) bool
387408
}
388409
return false
389410
}
411+
412+
func isIngressStatusEqual(a, b []networking.IngressLoadBalancerIngress) bool {
413+
if len(a) != len(b) {
414+
return false
415+
}
416+
417+
setA := make(map[string]struct{}, len(a))
418+
setB := make(map[string]struct{}, len(b))
419+
420+
for _, ingress := range a {
421+
setA[ingress.Hostname] = struct{}{}
422+
}
423+
424+
for _, ingress := range b {
425+
setB[ingress.Hostname] = struct{}{}
426+
}
427+
428+
for key := range setA {
429+
if _, exists := setB[key]; !exists {
430+
return false
431+
}
432+
}
433+
return true
434+
}
435+
436+
func hasFrontendNlbHostName(ingressList []networking.IngressLoadBalancerIngress, frontendNlbDNS string) bool {
437+
for _, ingress := range ingressList {
438+
if ingress.Hostname == frontendNlbDNS {
439+
return true
440+
}
441+
442+
}
443+
return false
444+
}

Diff for: controllers/service/service_controller.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,7 @@ func (r *serviceReconciler) buildModel(ctx context.Context, svc *corev1.Service)
152152
}
153153

154154
func (r *serviceReconciler) deployModel(ctx context.Context, svc *corev1.Service, stack core.Stack) error {
155-
if err := r.stackDeployer.Deploy(ctx, stack, r.metricsCollector, "service"); err != nil {
155+
if err := r.stackDeployer.Deploy(ctx, stack, r.metricsCollector, "service", nil); err != nil {
156156
var requeueNeededAfter *runtime.RequeueNeededAfter
157157
if errors.As(err, &requeueNeededAfter) {
158158
return err

Diff for: docs/guide/ingress/annotations.md

+128
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,19 @@ You can add annotations to kubernetes Ingress and Service objects to customize t
6363
| [alb.ingress.kubernetes.io/listener-attributes.${Protocol}-${Port}](#listener-attributes) | stringMap |N/A| Ingress |Merge|
6464
| [alb.ingress.kubernetes.io/minimum-load-balancer-capacity](#load-balancer-capacity-reservation) | stringMap |N/A| Ingress | Exclusive |
6565
| [alb.ingress.kubernetes.io/ipam-ipv4-pool-id](#ipam-ipv4-pool-id) | string |N/A| Ingress | Exclusive |
66+
| [alb.ingress.kubernetes.io/enable-frontend-nlb](#enable-frontend-nlb) | boolean |false | Ingress | Exclusive |
67+
| [alb.ingress.kubernetes.io/frontend-nlb-scheme](#frontend-nlb-scheme) | internal \| internet-facing |internal| Ingress | Exclusive |
68+
| [alb.ingress.kubernetes.io/frontend-nlb-subnets](#frontend-nlb-subnets) | stringList |N/A| Ingress | Exclusive |
69+
| [alb.ingress.kubernetes.io/frontend-nlb-security-groups](#frontend-nlb-security-groups) | stringList |N/A| Ingress | Exclusive |
70+
| [alb.ingress.kubernetes.io/frontend-nlb-listener-port-mapping](#frontend-nlb-listener-port-mapping) | stringMap |N/A| Ingress | Merge |
71+
| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-port](#frontend-nlb-healthcheck-port) | integer \| traffic-port |traffic-port| Ingress | N/A |
72+
| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-protocol](#frontend-nlb-healthcheck-protocol) | HTTP \| HTTPS |HTTP| Ingress | N/A |
73+
| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-path](#frontend-nlb-healthcheck-path) | string |/| Ingress | N/A |
74+
| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-interval-seconds](#frontend-nlb-healthcheck-interval-seconds) | integer |15| Ingress | N/A |
75+
| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-timeout-seconds](#frontend-nlb-healthcheck-timeout-seconds) | integer |5| Ingress | N/A |
76+
| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-healthy-threshold-count](#frontend-nlb-healthcheck-healthy-threshold-count) | integer |3| Ingress | N/A |
77+
| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-unhealthy-threshold-count](#frontend-nlb-healthcheck-unhealthy-threshold-count) | integer |3| Ingress | N/A |
78+
| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-success-codes](#frontend-nlb-healthcheck-success-codes) | string |200| Ingress | N/A |
6679

6780
## IngressGroup
6881
IngressGroup feature enables you to group multiple Ingress resources together.
@@ -1024,3 +1037,118 @@ Load balancer capacity unit reservation can be configured via following annotati
10241037
- disable shield protection
10251038
```alb.ingress.kubernetes.io/shield-advanced-protection: 'false'
10261039
```
1040+
1041+
1042+
## Enable frontend NLB
1043+
When this option is set to true, the controller will automatically provision a Network Load Balancer and register the Application Load Balancer as its target. Additional annotations are available to customize the NLB configurations, including options for scheme, security groups, subnets, and health check. The ingress resource will have two status entries, one for the NLB DNS and one for the ALB DNS. This allows users to combine the benefits of NLB and ALB into a single solution, leveraging NLB features like static IP address and PrivateLink, while retaining the rich routing capabilities of ALB.
1044+
1045+
!!!warning
1046+
- If you need to change the ALB [scheme](#scheme), make sure to disable this feature first. Changing the scheme will create a new ALB, which could interfere with the current configuration.
1047+
- If you create ingress and enable the feature at once, provisioning the NLB and registering the ALB as target can take up to 3-4 mins to complete.
1048+
1049+
- <a name="enable-frontend-nlb">`alb.ingress.kubernetes.io/enable-frontend-nlb`</a> enables frontend Network Load Balancer functionality.
1050+
1051+
!!!example
1052+
- Enable frontend nlb
1053+
```
1054+
alb.ingress.kubernetes.io/enable-frontend-nlb: "true"
1055+
```
1056+
1057+
- <a name="frontend-nlb-scheme">`alb.ingress.kubernetes.io/frontend-nlb-scheme`</a> specifies the scheme for the Network Load Balancer.
1058+
1059+
!!!example
1060+
- Set NLB scheme to internet-facing
1061+
```
1062+
alb.ingress.kubernetes.io/frontend-nlb-scheme: internet-facing
1063+
```
1064+
1065+
- <a name="frontend-nlb-subnets">`alb.ingress.kubernetes.io/frontend-nlb-subnets`</a> specifies the subnets for the Network Load Balancer.
1066+
1067+
!!!example
1068+
- Specify subnets for NLB
1069+
```
1070+
alb.ingress.kubernetes.io/frontend-nlb-subnets: subnet-xxxx1,subnet-xxxx2
1071+
```
1072+
1073+
- <a name="frontend-nlb-security-groups">`alb.ingress.kubernetes.io/frontend-nlb-security-groups`</a> specifies the security groups for the Network Load Balancer.
1074+
1075+
!!!example
1076+
- Specify security groups for NLB
1077+
```
1078+
alb.ingress.kubernetes.io/frontend-nlb-security-groups: sg-xxxx1,sg-xxxx2
1079+
```
1080+
1081+
- <a name="frontend-nlb-listener-port-mapping">`alb.ingress.kubernetes.io/frontend-nlb-listener-port-mapping`</a> specifies the port mapping configuration for the Network Load Balancer listeners.
1082+
1083+
!!!note "Default"
1084+
- The port defaults to match the ALB listener port, based on whether `alb.ingress.kubernetes.io/listen-ports`(#listen-ports) is specified.
1085+
1086+
!!!example
1087+
- Forward TCP traffic from NLB:80 to ALB:443
1088+
```
1089+
alb.ingress.kubernetes.io/frontend-nlb-listener-port-mapping: 80=443
1090+
```
1091+
1092+
- <a name="frontend-nlb-healthcheck-port">`alb.ingress.kubernetes.io/frontend-nlb-healthcheck-port`</a> specifies the port used for health checks.
1093+
1094+
!!!example
1095+
- Set health check port
1096+
```
1097+
alb.ingress.kubernetes.io/frontend-nlb-healthcheck-port: traffic-port
1098+
```
1099+
1100+
- <a name="frontend-nlb-healthcheck-protocol">`alb.ingress.kubernetes.io/frontend-nlb-healthcheck-protocol`</a> specifies the protocol used for health checks.
1101+
1102+
!!!example
1103+
- Set health check protocol
1104+
```
1105+
alb.ingress.kubernetes.io/frontend-nlb-healthcheck-protocol: HTTP
1106+
```
1107+
1108+
- <a name="frontend-nlb-healthcheck-path">`alb.ingress.kubernetes.io/frontend-nlb-healthcheck-path`</a> specifies the destination path for health checks.
1109+
1110+
!!!example
1111+
- Set health check path
1112+
```
1113+
alb.ingress.kubernetes.io/frontend-nlb-healthcheck-path: /health
1114+
```
1115+
1116+
- <a name="frontend-nlb-healthcheck-interval-seconds">`alb.ingress.kubernetes.io/frontend-nlb-healthcheck-interval-seconds`</a> specifies the interval between consecutive health checks.
1117+
1118+
!!!example
1119+
- Set health check interval
1120+
```
1121+
alb.ingress.kubernetes.io/frontend-nlb-healthcheck-interval-seconds: '15'
1122+
```
1123+
1124+
- <a name="frontend-nlb-healthcheck-timeout-seconds">`alb.ingress.kubernetes.io/frontend-nlb-healthcheck-timeout-seconds`</a> specifies the target group health check timeout.
1125+
1126+
!!!example
1127+
- Set health check timeout
1128+
```
1129+
alb.ingress.kubernetes.io/frontend-nlb-healthcheck-timeout-seconds: '5'
1130+
```
1131+
1132+
- <a name="frontend-nlb-healthcheck-healthy-threshold-count">`alb.ingress.kubernetes.io/frontend-nlb-healthcheck-healthy-threshold-count`</a> specifies the consecutive health check successes required before a target is considered healthy.
1133+
1134+
!!!example
1135+
- Set healthy threshold count
1136+
```
1137+
alb.ingress.kubernetes.io/frontend-nlb-healthcheck-healthy-threshold-count: '3'
1138+
```
1139+
1140+
- <a name="frontend-nlb-healthcheck-unhealthy-threshold-count">`alb.ingress.kubernetes.io/frontend-nlb-healthcheck-unhealthy-threshold-count`</a> specifies the consecutive health check failures before a target gets marked unhealthy.
1141+
1142+
!!!example
1143+
- Set unhealthy threshold count
1144+
```
1145+
alb.ingress.kubernetes.io/frontend-nlb-healthcheck-unhealthy-threshold-count: '3'
1146+
```
1147+
1148+
- <a name="frontend-nlb-healthcheck-success-codes">`alb.ingress.kubernetes.io/frontend-nlb-healthcheck-success-codes`</a> specifies the HTTP codes that indicate a successful health check.
1149+
1150+
!!!example
1151+
- Set success codes for health check
1152+
```
1153+
alb.ingress.kubernetes.io/frontend-nlb-healthcheck-success-codes: '200'
1154+
```

0 commit comments

Comments
 (0)