From c052ff4665dd6fa35ef276e36fd5956e4a3ac3ce Mon Sep 17 00:00:00 2001 From: Daria Anton Date: Mon, 20 Apr 2026 17:11:08 +0200 Subject: [PATCH 1/4] add rate limiting via socket Rate limiting via socket Rate limiting via socket --- jobs/haproxy/templates/haproxy.config.erb | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/jobs/haproxy/templates/haproxy.config.erb b/jobs/haproxy/templates/haproxy.config.erb index 07c14cf5..83125f4c 100644 --- a/jobs/haproxy/templates/haproxy.config.erb +++ b/jobs/haproxy/templates/haproxy.config.erb @@ -319,6 +319,14 @@ global <%- if backend_match_http_protocol && backends.length == 2 -%> set-var proc.h2_alpn_tag str(h2) <%- end -%> + <%- if_p("ha_proxy.connections_rate_limit.table_size", "ha_proxy.connections_rate_limit.window_size") do -%> + <%- if_p("ha_proxy.connections_rate_limit.connections") do |connections| -%> + set-var proc.conn_rate_limit int(<%= connections %>) + <%- end -%> + <%- if_p("ha_proxy.connections_rate_limit.block") do |block| -%> + set-var proc.conn_rate_limit_enabled bool(<%= block ? 1 : 0 %>) + <%- end -%> + <%- end -%> <%- if p("ha_proxy.always_allow_body_http10") %> h1-accept-payload-with-any-method <%- end %> @@ -432,11 +440,7 @@ frontend http-in tcp-request connection reject if layer4_block <%- if_p("ha_proxy.connections_rate_limit.table_size", "ha_proxy.connections_rate_limit.window_size") do -%> tcp-request connection track-sc0 src table st_tcp_conn_rate - <%- if_p("ha_proxy.connections_rate_limit.block", "ha_proxy.connections_rate_limit.connections") do |block, connections| -%> - <%-if block -%> - tcp-request connection reject if { sc_conn_rate(0) gt <%= connections %> } - <%- end -%> - <%- end -%> + tcp-request connection reject if { var(proc.conn_rate_limit_enabled) -m bool } { sc_conn_rate(0),sub(proc.conn_rate_limit) gt 0 } <%- end -%> <%- if_p("ha_proxy.requests_rate_limit.table_size", "ha_proxy.requests_rate_limit.window_size") do -%> http-request track-sc1 src table st_http_req_rate @@ -565,12 +569,8 @@ frontend https-in acl layer4_block src -f /var/vcap/jobs/haproxy/config/blocklist_cidrs_tcp.txt tcp-request connection reject if layer4_block <%- if_p("ha_proxy.connections_rate_limit.table_size", "ha_proxy.connections_rate_limit.window_size") do -%> - tcp-request connection track-sc0 src table st_tcp_conn_rate - <%- if_p("ha_proxy.connections_rate_limit.block", "ha_proxy.connections_rate_limit.connections") do |block, connections| -%> - <%-if block -%> - tcp-request connection reject if { sc_conn_rate(0) gt <%= connections %> } - <%- end -%> - <%- end -%> + tcp-request connection track-sc0 src table st_tcp_conn_rate + tcp-request connection reject if { var(proc.conn_rate_limit_enabled) -m bool } { sc_conn_rate(0),sub(proc.conn_rate_limit) gt 0 } <%- end -%> <%- if_p("ha_proxy.requests_rate_limit.table_size", "ha_proxy.requests_rate_limit.window_size") do -%> http-request track-sc1 src table st_http_req_rate From ce5ae8b92649486c2ca1f4ceeea401a514457cf1 Mon Sep 17 00:00:00 2001 From: Daria Anton Date: Wed, 22 Apr 2026 10:31:27 +0200 Subject: [PATCH 2/4] Add acceptance test --- acceptance-tests/socket_rate_limit_test.go | 67 ++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 acceptance-tests/socket_rate_limit_test.go diff --git a/acceptance-tests/socket_rate_limit_test.go b/acceptance-tests/socket_rate_limit_test.go new file mode 100644 index 00000000..340f434d --- /dev/null +++ b/acceptance-tests/socket_rate_limit_test.go @@ -0,0 +1,67 @@ +package acceptance_tests + +import ( + "fmt" + "net/http" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Socket Rate Limiting", func() { + It("enforces socket rate limits as configured", func() { + socketLimit := 3 + opsfileSocketRateLimit := fmt.Sprintf(`--- +- type: replace + path: /instance_groups/name=haproxy/jobs/name=haproxy/properties/ha_proxy/socket_rate_limit?/sockets + value: %d +- type: replace + path: /instance_groups/name=haproxy/jobs/name=haproxy/properties/ha_proxy/socket_rate_limit/window_size? + value: 10s +- type: replace + path: /instance_groups/name=haproxy/jobs/name=haproxy/properties/ha_proxy/socket_rate_limit/table_size? + value: 100 +- type: replace + path: /instance_groups/name=haproxy/jobs/name=haproxy/properties/ha_proxy/socket_rate_limit/block? + value: true +`, socketLimit) + + haproxyBackendPort := 12000 + haproxyInfo, _ := deployHAProxy(baseManifestVars{ + haproxyBackendPort: haproxyBackendPort, + haproxyBackendServers: []string{"127.0.0.1"}, + deploymentName: deploymentNameForTestNode(), + }, []string{opsfileSocketRateLimit}, map[string]interface{}{}, true) + + closeLocalServer, localPort := startDefaultTestServer() + defer closeLocalServer() + + closeTunnel := setupTunnelFromHaproxyToTestServer(haproxyInfo, haproxyBackendPort, localPort) + defer closeTunnel() + + testRequestCount := int(float64(socketLimit) * 1.5) + firstFailure := -1 + successfulRequestCount := 0 + for i := 0; i < testRequestCount; i++ { + rt := &http.Transport{ + DisableKeepAlives: true, + } + client := &http.Client{Transport: rt} + resp, err := client.Get(fmt.Sprintf("http://%s/foo", haproxyInfo.PublicIP)) + if err == nil { + if resp.StatusCode == 200 { + successfulRequestCount++ + continue + } + } + if firstFailure == -1 { + firstFailure = i + } + } + + By("The first socket should fail after we've sent the amount of requests specified in the Socket Rate Limit") + Expect(firstFailure).To(Equal(socketLimit)) + By("The total amount of successful sockets per time window should equal the Socket Rate Limit") + Expect(successfulRequestCount).To(Equal(socketLimit)) + }) +}) From 1fbc6d44082e4a79cd63a3d7bb97b600b6574b60 Mon Sep 17 00:00:00 2001 From: M Rizwan Shaik Date: Wed, 22 Apr 2026 11:10:36 +0200 Subject: [PATCH 3/4] update rate-limiting docs --- docs/rate_limiting.md | 71 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/docs/rate_limiting.md b/docs/rate_limiting.md index 2a948bef..006009f1 100644 --- a/docs/rate_limiting.md +++ b/docs/rate_limiting.md @@ -16,6 +16,8 @@ Both groups contain the (roughly) same attributes : - `table_size`: Size of the stick table in which the IPs and counters are stored. - `block`: Whether or not to block connections. If `block` is disabled (or not provided), incoming requests/connections will still be tracked in the respective stick-tables, but will not be denied. +> **Note for `connections_rate_limit`:** The `block` flag and `connections` threshold are stored as HAProxy process-level variables (`proc.conn_rate_limit_enabled` and `proc.conn_rate_limit`). The `tcp-request connection reject` rule is always present in the config as long as `table_size` and `window_size` are configured — enforcement is controlled entirely at runtime via these variables. Their initial values are set from the BOSH manifest at startup, but they can be adjusted at runtime without reloading HAProxy via the stats socket. See [Runtime adjustment via stats socket](#runtime-adjustment-of-connections_rate_limit-via-stats-socket) for details. + ## Effects of Rate Limiting Once a rate-limit is reached, haproxy-boshrelease will no longer proxy incoming request from the rate-limited client IP to a backend. Depending on the type of rate limiting, haproxy will respond with one of the following: @@ -119,3 +121,72 @@ $ echo "show table st_http_req_rate" | socat /var/vcap/sys/run/haproxy/stats.soc ``` > Please note you will likely need 'sudo' permission to run socat. + +## Runtime adjustment of connections_rate_limit via stats socket + +The `connections_rate_limit.block` flag and `connections_rate_limit.connections` threshold are stored as HAProxy process-level variables and can be changed at runtime without a reload. This requires `ha_proxy.master_cli_enable: true` or `ha_proxy.stats_enable: true`. + +The socket is located at `/var/vcap/sys/run/haproxy/stats.sock`. You will likely need `sudo` to access it. + +> **Note:** The `tcp-request connection reject` rule is always present in the config as long as `connections_rate_limit.table_size` and `connections_rate_limit.window_size` are set. Enforcement is controlled entirely at runtime via `proc.conn_rate_limit_enabled` and `proc.conn_rate_limit`. Setting `connections_rate_limit.connections` and `connections_rate_limit.block` in the manifest only sets their **initial values** at startup — they can be freely overridden via socket without a reload. + +### Inspect current variable values + +```bash +echo "get var proc.conn_rate_limit" | socat stdio /var/vcap/sys/run/haproxy/stats.sock +# => proc.conn_rate_limit: type=sint value=<100> + +echo "get var proc.conn_rate_limit_enabled" | socat stdio /var/vcap/sys/run/haproxy/stats.sock +# => proc.conn_rate_limit_enabled: type=bool value=<1> +``` + +### Enable or disable blocking at runtime + +```bash +# Enable blocking (equivalent to setting block: true in the manifest) +echo "experimental-mode on; set var proc.conn_rate_limit_enabled bool(true)" | socat stdio /var/vcap/sys/run/haproxy/stats.sock + +# Disable blocking without reloading (equivalent to setting block: false in the manifest) +echo "experimental-mode on; set var proc.conn_rate_limit_enabled bool(false)" | socat stdio /var/vcap/sys/run/haproxy/stats.sock +``` + +### Adjust the connections threshold at runtime + +```bash +# Allow up to 100 connections per window (equivalent to setting connections: 100 in the manifest) +echo "experimental-mode on; set var proc.conn_rate_limit int(100)" | socat stdio /var/vcap/sys/run/haproxy/stats.sock +``` + +### Combine enable + threshold change in one step + +```bash +echo "experimental-mode on; set var proc.conn_rate_limit int(100); set var proc.conn_rate_limit_enabled bool(true)" | socat stdio /var/vcap/sys/run/haproxy/stats.sock +``` + +### Inspect current stick-table entries + +```bash +echo "show table st_tcp_conn_rate" | socat stdio /var/vcap/sys/run/haproxy/stats.sock +# => # table: st_tcp_conn_rate, type: ipv6, size:1048576, used:2 +# => 0x...: key=::ffff:203.0.113.42 use=0 exp=8123 shard=0 conn_rate(10000)=5 + +# Show only IPs with an active connection rate +echo "show table st_tcp_conn_rate data.conn_rate gt 0" | socat stdio /var/vcap/sys/run/haproxy/stats.sock + +# Find the IP with the highest connection rate +echo "show table st_tcp_conn_rate" | socat stdio /var/vcap/sys/run/haproxy/stats.sock | sort -t= -k2 -rn | head -1 +``` + +### Clear an IP from the stick table (unblock a specific client) + +> **Note:** IPs are stored as IPv6-mapped IPv4 addresses. Always prefix IPv4 addresses with `::ffff:`. + +```bash +# Remove a specific IP entry (only works when the entry is not actively in use) +echo "clear table st_tcp_conn_rate key ::ffff:203.0.113.42" | socat stdio /var/vcap/sys/run/haproxy/stats.sock + +# Clear all entries from the table +echo "clear table st_tcp_conn_rate" | socat stdio /var/vcap/sys/run/haproxy/stats.sock +``` + +> **Note:** Runtime changes to `proc.conn_rate_limit` and `proc.conn_rate_limit_enabled` are lost on HAProxy reload or restart. The values will be re-initialized from the BOSH manifest properties (`connections_rate_limit.connections` and `connections_rate_limit.block`) on next startup. From d946c87398b77ba2514b94b3e67113b5dcef47c0 Mon Sep 17 00:00:00 2001 From: M Rizwan Shaik Date: Wed, 22 Apr 2026 11:14:01 +0200 Subject: [PATCH 4/4] update spec tests for rate_limiting via socket add acceptance test for connection rate limit via socket Co-authored-by: Dariquest fix indentation in config enhance acceptance tests optimize acceptance for connection rate limiting via socket add root permission to socat --- acceptance-tests/rate_limit_test.go | 250 ++++++++++++++++++ acceptance-tests/socket_rate_limit_test.go | 67 ----- jobs/haproxy/templates/haproxy.config.erb | 4 +- .../haproxy_config/rate_limit_spec.rb | 66 ++++- 4 files changed, 311 insertions(+), 76 deletions(-) delete mode 100644 acceptance-tests/socket_rate_limit_test.go diff --git a/acceptance-tests/rate_limit_test.go b/acceptance-tests/rate_limit_test.go index 18c74765..72c719ba 100644 --- a/acceptance-tests/rate_limit_test.go +++ b/acceptance-tests/rate_limit_test.go @@ -3,11 +3,21 @@ package acceptance_tests import ( "fmt" "net/http" + "strings" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) +const haproxySocketPath = "/var/vcap/sys/run/haproxy/stats.sock" + +func runHAProxySocketCommand(haproxyInfo haproxyInfo, command string) string { + cmd := fmt.Sprintf(`echo "%s" | sudo socat stdio %s`, command, haproxySocketPath) + stdout, _, err := runOnRemote(haproxyInfo.SSHUser, haproxyInfo.PublicIP, haproxyInfo.SSHPrivateKey, cmd) + Expect(err).NotTo(HaveOccurred()) + return strings.TrimSpace(stdout) +} + var _ = Describe("Rate-Limiting", func() { It("Connections/Requests aren't blocked when block config isn't set", func() { rateLimit := 5 @@ -165,6 +175,246 @@ var _ = Describe("Rate-Limiting", func() { Expect(successfulRequestCount).To(Equal(connLimit)) }) + It("Connection Based Limiting works via manifest and can be overridden at runtime via socket", func() { + connLimit := 5 + opsfileConnectionsRateLimit := fmt.Sprintf(`--- +- type: replace + path: /instance_groups/name=haproxy/jobs/name=haproxy/properties/ha_proxy/connections_rate_limit?/connections + value: %d +- type: replace + path: /instance_groups/name=haproxy/jobs/name=haproxy/properties/ha_proxy/connections_rate_limit/window_size? + value: 100s +- type: replace + path: /instance_groups/name=haproxy/jobs/name=haproxy/properties/ha_proxy/connections_rate_limit/table_size? + value: 100 +- type: replace + path: /instance_groups/name=haproxy/jobs/name=haproxy/properties/ha_proxy/connections_rate_limit/block? + value: true +`, connLimit) + haproxyBackendPort := 12000 + haproxyInfo, _ := deployHAProxy(baseManifestVars{ + haproxyBackendPort: haproxyBackendPort, + haproxyBackendServers: []string{"127.0.0.1"}, + deploymentName: deploymentNameForTestNode(), + }, []string{opsfileConnectionsRateLimit}, map[string]interface{}{}, true) + + closeLocalServer, localPort := startDefaultTestServer() + defer closeLocalServer() + + closeTunnel := setupTunnelFromHaproxyToTestServer(haproxyInfo, haproxyBackendPort, localPort) + defer closeTunnel() + + By("Verifying proc.conn_rate_limit is initialised from manifest value") + output := runHAProxySocketCommand(haproxyInfo, "get var proc.conn_rate_limit") + Expect(output).To(ContainSubstring(fmt.Sprintf("value=<%d>", connLimit))) + + By("Verifying proc.conn_rate_limit_enabled is initialised as true from manifest block: true") + output = runHAProxySocketCommand(haproxyInfo, "get var proc.conn_rate_limit_enabled") + Expect(output).To(ContainSubstring("value=<1>")) + + By("Verifying connections are blocked after exceeding the manifest-configured limit") + testRequestCount := int(float64(connLimit) * 1.5) + firstFailure := -1 + successfulRequestCount := 0 + for i := 0; i < testRequestCount; i++ { + rt := &http.Transport{DisableKeepAlives: true} + client := &http.Client{Transport: rt} + resp, err := client.Get(fmt.Sprintf("http://%s/foo", haproxyInfo.PublicIP)) + if err == nil && resp.StatusCode == 200 { + resp.Body.Close() + successfulRequestCount++ + continue + } + if err == nil { + resp.Body.Close() + } + if firstFailure == -1 { + firstFailure = i + } + } + Expect(firstFailure).To(Equal(connLimit)) + Expect(successfulRequestCount).To(Equal(connLimit)) + + By("Clearing stick table before overriding limit") + runHAProxySocketCommand(haproxyInfo, "clear table st_tcp_conn_rate") + + By("Overriding the limit at runtime via socket to a higher value") + newLimit := connLimit * 3 + runHAProxySocketCommand(haproxyInfo, fmt.Sprintf("experimental-mode on; set var proc.conn_rate_limit int(%d)", newLimit)) + + By("Verifying the override is reflected via get var") + output = runHAProxySocketCommand(haproxyInfo, "get var proc.conn_rate_limit") + Expect(output).To(ContainSubstring(fmt.Sprintf("value=<%d>", newLimit))) + + By("Verifying connections are allowed up to the new higher socket-configured limit") + testRequestCount = int(float64(newLimit) * 1.5) + firstFailure = -1 + successfulRequestCount = 0 + for i := 0; i < testRequestCount; i++ { + rt := &http.Transport{DisableKeepAlives: true} + client := &http.Client{Transport: rt} + resp, err := client.Get(fmt.Sprintf("http://%s/foo", haproxyInfo.PublicIP)) + if err == nil && resp.StatusCode == 200 { + resp.Body.Close() + successfulRequestCount++ + continue + } + if err == nil { + resp.Body.Close() + } + if firstFailure == -1 { + firstFailure = i + } + } + Expect(firstFailure).To(Equal(newLimit)) + Expect(successfulRequestCount).To(Equal(newLimit)) + }) + + It("Connection Based Limiting can be enabled and disabled at runtime via socket with manifest block false", func() { + connLimit := 5 + // block: false in manifest, no connections property — both limit and enablement come via socket + opsfileConnectionsRateLimit := `--- +- type: replace + path: /instance_groups/name=haproxy/jobs/name=haproxy/properties/ha_proxy/connections_rate_limit?/window_size + value: 100s +- type: replace + path: /instance_groups/name=haproxy/jobs/name=haproxy/properties/ha_proxy/connections_rate_limit/table_size? + value: 100 +- type: replace + path: /instance_groups/name=haproxy/jobs/name=haproxy/properties/ha_proxy/connections_rate_limit/block? + value: false +` + haproxyBackendPort := 12000 + haproxyInfo, _ := deployHAProxy(baseManifestVars{ + haproxyBackendPort: haproxyBackendPort, + haproxyBackendServers: []string{"127.0.0.1"}, + deploymentName: deploymentNameForTestNode(), + }, []string{opsfileConnectionsRateLimit}, map[string]interface{}{}, true) + + closeLocalServer, localPort := startDefaultTestServer() + defer closeLocalServer() + + closeTunnel := setupTunnelFromHaproxyToTestServer(haproxyInfo, haproxyBackendPort, localPort) + defer closeTunnel() + + By("Verifying proc.conn_rate_limit_enabled is initialised as false from manifest block: false") + output := runHAProxySocketCommand(haproxyInfo, "get var proc.conn_rate_limit_enabled") + Expect(output).To(ContainSubstring("value=<0>")) + + By("Setting conn_rate_limit and enabling blocking via socket") + runHAProxySocketCommand(haproxyInfo, fmt.Sprintf("experimental-mode on; set var proc.conn_rate_limit int(%d)", connLimit)) + runHAProxySocketCommand(haproxyInfo, "experimental-mode on; set var proc.conn_rate_limit_enabled bool(true)") + + By("Verifying proc.conn_rate_limit_enabled is now true") + output = runHAProxySocketCommand(haproxyInfo, "get var proc.conn_rate_limit_enabled") + Expect(output).To(ContainSubstring("value=<1>")) + + By("Verifying connections are blocked after exceeding the limit") + testRequestCount := int(float64(connLimit) * 1.5) + firstFailure := -1 + successfulRequestCount := 0 + for i := 0; i < testRequestCount; i++ { + rt := &http.Transport{DisableKeepAlives: true} + client := &http.Client{Transport: rt} + resp, err := client.Get(fmt.Sprintf("http://%s/foo", haproxyInfo.PublicIP)) + if err == nil && resp.StatusCode == 200 { + resp.Body.Close() + successfulRequestCount++ + continue + } + if err == nil { + resp.Body.Close() + } + if firstFailure == -1 { + firstFailure = i + } + } + Expect(firstFailure).To(Equal(connLimit)) + Expect(successfulRequestCount).To(Equal(connLimit)) + + By("Disabling blocking at runtime via socket") + runHAProxySocketCommand(haproxyInfo, "experimental-mode on; set var proc.conn_rate_limit_enabled bool(false)") + + By("Verifying proc.conn_rate_limit_enabled is now false") + output = runHAProxySocketCommand(haproxyInfo, "get var proc.conn_rate_limit_enabled") + Expect(output).To(ContainSubstring("value=<0>")) + + By("Clearing stick table to reset counters") + runHAProxySocketCommand(haproxyInfo, "clear table st_tcp_conn_rate") + + By("Verifying all connections are now allowed after disabling via socket") + for i := 0; i < testRequestCount; i++ { + rt := &http.Transport{DisableKeepAlives: true} + client := &http.Client{Transport: rt} + resp, err := client.Get(fmt.Sprintf("http://%s/foo", haproxyInfo.PublicIP)) + Expect(err).NotTo(HaveOccurred()) + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + resp.Body.Close() + } + }) + + It("Connection Based Limiting works when limit is set entirely via socket without manifest connections property", func() { + connLimit := 5 + // Only table_size and window_size are set — no connections or block in manifest + opsfileConnectionsRateLimit := `--- +- type: replace + path: /instance_groups/name=haproxy/jobs/name=haproxy/properties/ha_proxy/connections_rate_limit?/window_size + value: 100s +- type: replace + path: /instance_groups/name=haproxy/jobs/name=haproxy/properties/ha_proxy/connections_rate_limit/table_size? + value: 100 +` + haproxyBackendPort := 12000 + haproxyInfo, _ := deployHAProxy(baseManifestVars{ + haproxyBackendPort: haproxyBackendPort, + haproxyBackendServers: []string{"127.0.0.1"}, + deploymentName: deploymentNameForTestNode(), + }, []string{opsfileConnectionsRateLimit}, map[string]interface{}{}, true) + + closeLocalServer, localPort := startDefaultTestServer() + defer closeLocalServer() + + closeTunnel := setupTunnelFromHaproxyToTestServer(haproxyInfo, haproxyBackendPort, localPort) + defer closeTunnel() + + By("Setting conn_rate_limit and enabling blocking via socket") + runHAProxySocketCommand(haproxyInfo, fmt.Sprintf("experimental-mode on; set var proc.conn_rate_limit int(%d)", connLimit)) + runHAProxySocketCommand(haproxyInfo, "experimental-mode on; set var proc.conn_rate_limit_enabled bool(true)") + + By("Verifying proc.conn_rate_limit is set correctly via socket") + output := runHAProxySocketCommand(haproxyInfo, "get var proc.conn_rate_limit") + Expect(output).To(ContainSubstring(fmt.Sprintf("value=<%d>", connLimit))) + + By("Verifying proc.conn_rate_limit_enabled is set correctly via socket") + output = runHAProxySocketCommand(haproxyInfo, "get var proc.conn_rate_limit_enabled") + Expect(output).To(ContainSubstring("value=<1>")) + + By("Verifying connections are blocked after exceeding the socket-configured limit") + testRequestCount := int(float64(connLimit) * 1.5) + firstFailure := -1 + successfulRequestCount := 0 + for i := 0; i < testRequestCount; i++ { + rt := &http.Transport{DisableKeepAlives: true} + client := &http.Client{Transport: rt} + resp, err := client.Get(fmt.Sprintf("http://%s/foo", haproxyInfo.PublicIP)) + if err == nil && resp.StatusCode == 200 { + resp.Body.Close() + successfulRequestCount++ + continue + } + if err == nil { + resp.Body.Close() + } + if firstFailure == -1 { + firstFailure = i + } + } + Expect(firstFailure).To(Equal(connLimit)) + Expect(successfulRequestCount).To(Equal(connLimit)) + }) +}) + +var _ = Describe("Rate-Limiting Both Types", func() { It("Both types of rate limiting work in parallel", func() { requestLimit := 5 connLimit := 6 // needs to be higher than request limit for this test diff --git a/acceptance-tests/socket_rate_limit_test.go b/acceptance-tests/socket_rate_limit_test.go deleted file mode 100644 index 340f434d..00000000 --- a/acceptance-tests/socket_rate_limit_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package acceptance_tests - -import ( - "fmt" - "net/http" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("Socket Rate Limiting", func() { - It("enforces socket rate limits as configured", func() { - socketLimit := 3 - opsfileSocketRateLimit := fmt.Sprintf(`--- -- type: replace - path: /instance_groups/name=haproxy/jobs/name=haproxy/properties/ha_proxy/socket_rate_limit?/sockets - value: %d -- type: replace - path: /instance_groups/name=haproxy/jobs/name=haproxy/properties/ha_proxy/socket_rate_limit/window_size? - value: 10s -- type: replace - path: /instance_groups/name=haproxy/jobs/name=haproxy/properties/ha_proxy/socket_rate_limit/table_size? - value: 100 -- type: replace - path: /instance_groups/name=haproxy/jobs/name=haproxy/properties/ha_proxy/socket_rate_limit/block? - value: true -`, socketLimit) - - haproxyBackendPort := 12000 - haproxyInfo, _ := deployHAProxy(baseManifestVars{ - haproxyBackendPort: haproxyBackendPort, - haproxyBackendServers: []string{"127.0.0.1"}, - deploymentName: deploymentNameForTestNode(), - }, []string{opsfileSocketRateLimit}, map[string]interface{}{}, true) - - closeLocalServer, localPort := startDefaultTestServer() - defer closeLocalServer() - - closeTunnel := setupTunnelFromHaproxyToTestServer(haproxyInfo, haproxyBackendPort, localPort) - defer closeTunnel() - - testRequestCount := int(float64(socketLimit) * 1.5) - firstFailure := -1 - successfulRequestCount := 0 - for i := 0; i < testRequestCount; i++ { - rt := &http.Transport{ - DisableKeepAlives: true, - } - client := &http.Client{Transport: rt} - resp, err := client.Get(fmt.Sprintf("http://%s/foo", haproxyInfo.PublicIP)) - if err == nil { - if resp.StatusCode == 200 { - successfulRequestCount++ - continue - } - } - if firstFailure == -1 { - firstFailure = i - } - } - - By("The first socket should fail after we've sent the amount of requests specified in the Socket Rate Limit") - Expect(firstFailure).To(Equal(socketLimit)) - By("The total amount of successful sockets per time window should equal the Socket Rate Limit") - Expect(successfulRequestCount).To(Equal(socketLimit)) - }) -}) diff --git a/jobs/haproxy/templates/haproxy.config.erb b/jobs/haproxy/templates/haproxy.config.erb index 83125f4c..057dadea 100644 --- a/jobs/haproxy/templates/haproxy.config.erb +++ b/jobs/haproxy/templates/haproxy.config.erb @@ -569,8 +569,8 @@ frontend https-in acl layer4_block src -f /var/vcap/jobs/haproxy/config/blocklist_cidrs_tcp.txt tcp-request connection reject if layer4_block <%- if_p("ha_proxy.connections_rate_limit.table_size", "ha_proxy.connections_rate_limit.window_size") do -%> - tcp-request connection track-sc0 src table st_tcp_conn_rate - tcp-request connection reject if { var(proc.conn_rate_limit_enabled) -m bool } { sc_conn_rate(0),sub(proc.conn_rate_limit) gt 0 } + tcp-request connection track-sc0 src table st_tcp_conn_rate + tcp-request connection reject if { var(proc.conn_rate_limit_enabled) -m bool } { sc_conn_rate(0),sub(proc.conn_rate_limit) gt 0 } <%- end -%> <%- if_p("ha_proxy.requests_rate_limit.table_size", "ha_proxy.requests_rate_limit.window_size") do -%> http-request track-sc1 src table st_http_req_rate diff --git a/spec/haproxy/templates/haproxy_config/rate_limit_spec.rb b/spec/haproxy/templates/haproxy_config/rate_limit_spec.rb index 5baddf8d..6fc7a959 100644 --- a/spec/haproxy/templates/haproxy_config/rate_limit_spec.rb +++ b/spec/haproxy/templates/haproxy_config/rate_limit_spec.rb @@ -9,6 +9,7 @@ let(:frontend_http) { haproxy_conf['frontend http-in'] } let(:frontend_https) { haproxy_conf['frontend https-in'] } + let(:global) { haproxy_conf['global'] } let(:properties) { {} } @@ -88,16 +89,67 @@ expect(frontend_https).to include('tcp-request connection track-sc0 src table st_tcp_conn_rate') end - context 'when "connections" and "block" are also provided' do + it 'always adds tcp-request connection reject rule without requiring connections property' do + expect(frontend_http).to include('tcp-request connection reject if { var(proc.conn_rate_limit_enabled) -m bool } { sc_conn_rate(0),sub(proc.conn_rate_limit) gt 0 }') + expect(frontend_https).to include('tcp-request connection reject if { var(proc.conn_rate_limit_enabled) -m bool } { sc_conn_rate(0),sub(proc.conn_rate_limit) gt 0 }') + end + + it 'does not set proc.conn_rate_limit in global when connections is not provided' do + expect(global).not_to include('set-var proc.conn_rate_limit') + end + + it 'does not set proc.conn_rate_limit_enabled in global when block is not provided' do + expect(global).not_to include('set-var proc.conn_rate_limit_enabled') + end + + context 'when "connections" is provided' do let(:properties) do - temp_properties.deep_merge({ 'connections_rate_limit' => { 'connections' => '5', 'block' => 'true' } }) + temp_properties.deep_merge({ 'connections_rate_limit' => { 'connections' => '100' } }) end - it 'adds http-request deny condition to http-in and https-in frontends' do - expect(frontend_http).to include('tcp-request connection reject if { sc_conn_rate(0) gt 5 }') - expect(frontend_http).to include('tcp-request connection track-sc0 src table st_tcp_conn_rate') - expect(frontend_https).to include('tcp-request connection reject if { sc_conn_rate(0) gt 5 }') - expect(frontend_https).to include('tcp-request connection track-sc0 src table st_tcp_conn_rate') + it 'sets proc.conn_rate_limit in global section' do + expect(global).to include('set-var proc.conn_rate_limit int(100)') + end + + it 'still adds tcp-request connection reject rule in both frontends' do + expect(frontend_http).to include('tcp-request connection reject if { var(proc.conn_rate_limit_enabled) -m bool } { sc_conn_rate(0),sub(proc.conn_rate_limit) gt 0 }') + expect(frontend_https).to include('tcp-request connection reject if { var(proc.conn_rate_limit_enabled) -m bool } { sc_conn_rate(0),sub(proc.conn_rate_limit) gt 0 }') + end + end + + context 'when "block" is set to true' do + let(:properties) do + temp_properties.deep_merge({ 'connections_rate_limit' => { 'block' => true } }) + end + + it 'sets proc.conn_rate_limit_enabled to bool(1) in global section' do + expect(global).to include('set-var proc.conn_rate_limit_enabled bool(1)') + end + end + + context 'when "block" is set to false' do + let(:properties) do + temp_properties.deep_merge({ 'connections_rate_limit' => { 'block' => false } }) + end + + it 'sets proc.conn_rate_limit_enabled to bool(0) in global section' do + expect(global).to include('set-var proc.conn_rate_limit_enabled bool(0)') + end + end + + context 'when both "connections" and "block" are provided' do + let(:properties) do + temp_properties.deep_merge({ 'connections_rate_limit' => { 'connections' => '50', 'block' => true } }) + end + + it 'sets both proc vars in global section' do + expect(global).to include('set-var proc.conn_rate_limit int(50)') + expect(global).to include('set-var proc.conn_rate_limit_enabled bool(1)') + end + + it 'adds reject rule controlled by proc vars in both frontends' do + expect(frontend_http).to include('tcp-request connection reject if { var(proc.conn_rate_limit_enabled) -m bool } { sc_conn_rate(0),sub(proc.conn_rate_limit) gt 0 }') + expect(frontend_https).to include('tcp-request connection reject if { var(proc.conn_rate_limit_enabled) -m bool } { sc_conn_rate(0),sub(proc.conn_rate_limit) gt 0 }') end end end