Skip to content

Commit

Permalink
Implement support for configuring the connection pool of inbound clus…
Browse files Browse the repository at this point in the history
…ters via the Sidecar API (istio#47313)

* update API to HEAD to get new connection pool fields -- note we'll need to remove the replace directive before merge

* Implement support for ConnectionPoolSettings on the inbound cluster configured via the Sidecar API (see istio/api#2961). This PR implements the following precedence for picking connection pool config (highest to lowest): 1) per-port ConnectionPool from the Sidecar; 2) top-level InboundConnectionPool from the Sidecar; 3) per-port TrafficPolicy.ConnectionPool from the DestinationRule; 4) top-level TrafficPolicy.ConnectionPool from the DestinationRule. (3 and 4 are unchanged from before this PR.) This PR does not change how client-side connection pools are configured (which remains DestinationRule only).

* point to latest revision of the istio/api change

* remove one useless test case, and format a few things

* simplify if statement logic

* remove the replace directive and update to the 1.20 release of the API repo; restore test file I messed up during rebase

* Add release notes that mirror those from istio/api; fix linter error

* roll back go mod changes as they weren't needed anyway -- yay caches 🙃

* include port name+number in the warning about HTTP settings on TCP ports
  • Loading branch information
ZackButcher authored Oct 27, 2023
1 parent 7945294 commit 4ab1fb7
Show file tree
Hide file tree
Showing 7 changed files with 506 additions and 39 deletions.
20 changes: 20 additions & 0 deletions pilot/pkg/model/sidecar.go
Original file line number Diff line number Diff line change
Expand Up @@ -549,6 +549,26 @@ func (sc *SidecarScope) HasIngressListener() bool {
return true
}

// InboundConnectionPoolForPort returns the connection pool settings for a specific inbound port. If there's not a
// setting for that specific port, then the settings at the Sidecar resource are returned. If neither exist,
// then nil is returned so the caller can decide what values to fall back on.
func (sc *SidecarScope) InboundConnectionPoolForPort(port int) *networking.ConnectionPoolSettings {
if sc == nil || sc.Sidecar == nil {
return nil
}

for _, in := range sc.Sidecar.Ingress {
if int(in.Port.Number) == port {
if in.GetConnectionPool() != nil {
return in.ConnectionPool
}
}
}

// if set, it'll be non-nil and have values (guaranteed by validation); or if unset it'll be nil
return sc.Sidecar.GetInboundConnectionPool()
}

// Services returns the list of services imported by this egress listener
func (ilw *IstioEgressListenerWrapper) Services() []*Service {
return ilw.services
Expand Down
159 changes: 159 additions & 0 deletions pilot/pkg/model/sidecar_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,9 @@ import (
"fmt"
"reflect"
"strconv"
"strings"
"testing"
"time"

"github.com/golang/protobuf/ptypes/wrappers"
"google.golang.org/protobuf/types/known/durationpb"
Expand Down Expand Up @@ -2515,6 +2517,163 @@ outboundTrafficPolicy:
}
}

func TestInboundConnectionPoolForPort(t *testing.T) {
connectionPoolSettings := &networking.ConnectionPoolSettings{
Http: &networking.ConnectionPoolSettings_HTTPSettings{
Http1MaxPendingRequests: 1024,
Http2MaxRequests: 1024,
MaxRequestsPerConnection: 1024,
MaxRetries: 1024,
IdleTimeout: durationpb.New(5 * time.Second),
H2UpgradePolicy: networking.ConnectionPoolSettings_HTTPSettings_UPGRADE,
},
Tcp: &networking.ConnectionPoolSettings_TCPSettings{
MaxConnections: 1024,
ConnectTimeout: durationpb.New(6 * time.Second),
TcpKeepalive: &networking.ConnectionPoolSettings_TCPSettings_TcpKeepalive{
Probes: 3,
Time: durationpb.New(7 * time.Second),
Interval: durationpb.New(8 * time.Second),
},
MaxConnectionDuration: durationpb.New(9 * time.Second),
},
}

overrideConnectionPool := &networking.ConnectionPoolSettings{
Http: &networking.ConnectionPoolSettings_HTTPSettings{
Http1MaxPendingRequests: 1,
Http2MaxRequests: 2,
MaxRequestsPerConnection: 3,
MaxRetries: 4,
IdleTimeout: durationpb.New(1 * time.Second),
H2UpgradePolicy: networking.ConnectionPoolSettings_HTTPSettings_DO_NOT_UPGRADE,
},
}

tests := map[string]struct {
sidecar *networking.Sidecar
// port to settings map
want map[int]*networking.ConnectionPoolSettings
}{
"no settings": {
sidecar: &networking.Sidecar{},
want: map[int]*networking.ConnectionPoolSettings{
22: nil,
80: nil,
443: nil,
},
},
"no settings multiple ports": {
sidecar: &networking.Sidecar{
Ingress: []*networking.IstioIngressListener{
{
Port: &networking.SidecarPort{
Number: 80,
Protocol: "HTTP",
Name: "http",
},
},
{
Port: &networking.SidecarPort{
Number: 443,
Protocol: "HTTPS",
Name: "https",
},
},
},
},
want: map[int]*networking.ConnectionPoolSettings{
22: nil,
80: nil,
443: nil,
},
},
"single port with settings": {
sidecar: &networking.Sidecar{
Ingress: []*networking.IstioIngressListener{
{
Port: &networking.SidecarPort{
Number: 80,
Protocol: "HTTP",
Name: "http",
},
ConnectionPool: connectionPoolSettings,
},
},
},
want: map[int]*networking.ConnectionPoolSettings{
22: nil,
80: connectionPoolSettings,
443: nil,
},
},
"top level settings": {
sidecar: &networking.Sidecar{
InboundConnectionPool: connectionPoolSettings,
Ingress: []*networking.IstioIngressListener{
{
Port: &networking.SidecarPort{
Number: 80,
Protocol: "HTTP",
Name: "http",
},
},
},
},
want: map[int]*networking.ConnectionPoolSettings{
// with a default setting on the sidecar, we'll return it for any port we're asked about
22: connectionPoolSettings,
80: connectionPoolSettings,
443: connectionPoolSettings,
},
},
"port settings override top level": {
sidecar: &networking.Sidecar{
InboundConnectionPool: connectionPoolSettings,
Ingress: []*networking.IstioIngressListener{
{
Port: &networking.SidecarPort{
Number: 80,
Protocol: "HTTP",
Name: "http",
},
ConnectionPool: overrideConnectionPool,
},
},
},
want: map[int]*networking.ConnectionPoolSettings{
// with a default setting on the sidecar, we'll return it for any port we're asked about
22: connectionPoolSettings,
80: overrideConnectionPool,
443: connectionPoolSettings,
},
},
}
for name, tt := range tests {
t.Run(name, func(t *testing.T) {
ps := NewPushContext()
ps.Mesh = mesh.DefaultMeshConfig()

sidecar := &config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.Sidecar,
Name: "sidecar",
Namespace: strings.Replace(name, " ", "-", -1),
},
Spec: tt.sidecar,
}
scope := ConvertToSidecarScope(ps, sidecar, sidecar.Namespace)

for port, expected := range tt.want {
actual := scope.InboundConnectionPoolForPort(port)
if !reflect.DeepEqual(actual, expected) {
t.Errorf("for port %d, wanted %#v but got: %#v", port, expected, actual)
}
}
})
}
}

func BenchmarkConvertIstioListenerToWrapper(b *testing.B) {
b.Run("small-exact", func(b *testing.B) {
benchmarkConvertIstioListenerToWrapper(b, 10, 3, "", false)
Expand Down
10 changes: 10 additions & 0 deletions pilot/pkg/networking/core/v1alpha3/cluster_builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -386,6 +386,16 @@ func (cb *ClusterBuilder) buildInboundCluster(clusterPort int, bind string,
util.AddConfigInfoMetadata(localCluster.cluster.Metadata, cfg.Meta)
}
}
// If there's a connection pool set on the Sidecar then override any settings derived from the DestinationRule
// with those set by Sidecar resource. This allows the user to resolve any ambiguity, e.g. in the case that
// multiple services are listening on the same port.
if sidecarConnPool := proxy.SidecarScope.InboundConnectionPoolForPort(clusterPort); sidecarConnPool != nil {
if opts.policy == nil {
// There was no destination rule, so no inbound traffic policy; we'll create a default
opts.policy = &networking.TrafficPolicy{}
}
opts.policy.ConnectionPool = sidecarConnPool
}
cb.applyTrafficPolicy(opts)

if bind != LocalhostAddress && bind != LocalhostIPv6Address {
Expand Down
Loading

0 comments on commit 4ab1fb7

Please sign in to comment.