I have a kubernetes cluster with the dashboard exposed over a node
If I go to the node:port combination directly everнерштп works well. Quick response in browser
But since the dashboard may work on different nodes I installed haproxy and use it to access 1 of 2 nodes to go to the dashboard using one domain address. But in this case response is very slow
Here it is
Jul 11 04:44:21 kube-master haproxy[34570]: 192.168.35.95:53413 [11/Jul/2022:04:44:11.409] manager_http http_be/kube-node-1 0/10000/0/12/10012 200 1115 - - ---- 5/5/3/1/+1 0/0 "GET /api/v1/replicaset/kubernetes-dashboard?itemsPerPage=10&page=1&sortBy=d,creationTimestamp HTTP/1.1"
Jul 11 04:44:21 kube-master haproxy[34570]: 192.168.35.95:53414 [11/Jul/2022:04:44:11.414] manager_http http_be/kube-node-1 0/10001/0/7/10008 200 703 - - ---- 4/4/2/0/+1 0/0 "GET /api/v1/pod/kubernetes-dashboard?itemsPerPage=10&page=1&sortBy=d,creationTimestamp HTTP/1.1"
Jul 11 04:44:21 kube-master haproxy[34570]: 192.168.35.95:53412 [11/Jul/2022:04:44:11.468] manager_http http_be/kube-node-1 0/10002/0/8/10010 200 313 - - ---- 3/3/1/0/+1 0/0 "GET /api/v1/job/kubernetes-dashboard?itemsPerPage=10&page=1&sortBy=d,creationTimestamp HTTP/1.1"
Jul 11 04:44:25 kube-master haproxy[34570]: 192.168.35.95:53415 [11/Jul/2022:04:44:15.342] manager_http http_be/kube-node-1 0/10002/1/21/10024 200 313 - - ---- 1/1/0/0/+1 0/0 "GET /api/v1/job/kubernetes-dashboard?itemsPerPage=10&page=1&sortBy=d,creationTimestamp HTTP/1.1"
Jul 11 04:52:22 kube-master haproxy[34570]: 192.168.35.95:53656 [11/Jul/2022:04:52:22.898] manager_http http_be/kube-node-1 0/0/0/6/7 200 313 - - ---- 2/2/1/0/0 0/0 "GET /api/v1/job/kubernetes-dashboard?itemsPerPage=10&page=1&sortBy=d,creationTimestamp HTTP/1.1"
Jul 11 04:52:24 kube-master haproxy[34570]: 192.168.35.95:53657 [11/Jul/2022:04:52:24.496] manager_http http_be/kube-node-1 0/0/0/15/15 200 313 - - ---- 3/3/2/0/0 0/0 "GET /api/v1/cronjob/kubernetes-dashboard?itemsPerPage=10&page=1&sortBy=d,creationTimestamp HTTP/1.1"
Jul 11 04:52:26 kube-master haproxy[34570]: 192.168.35.95:53658 [11/Jul/2022:04:52:26.174] manager_http http_be/kube-node-1 0/0/0/2/2 200 260 - - ---- 4/4/3/0/0 0/0 "GET /api/v1/login/status HTTP/1.1"
Jul 11 04:52:29 kube-master haproxy[34570]: 192.168.35.95:53659 [11/Jul/2022:04:52:29.509] manager_http http_be/kube-node-1 0/0/1/22/23 200 313 - - ---- 5/5/4/0/0 0/0 "GET /api/v1/cronjob/kubernetes-dashboard?itemsPerPage=10&page=1&sortBy=d,creationTimestamp HTTP/1.1"
Jul 11 04:52:30 kube-master haproxy[34570]: 192.168.35.95:53660 [11/Jul/2022:04:52:30.626] manager_http http_be/kube-node-1 0/0/1/10/11 200 717 - - ---- 6/6/5/0/0 0/0 "GET /api/v1/namespace HTTP/1.1"
Jul 11 04:52:32 kube-master haproxy[34570]: 192.168.35.95:53655 [11/Jul/2022:04:52:22.898] manager_http http_be/kube-node-1 0/10002/1/3/10007 200 717 - - ---- 6/6/4/0/+1 0/0 "GET /api/v1/namespace HTTP/1.1"
Jul 11 04:52:34 kube-master haproxy[34570]: 192.168.35.95:53656 [11/Jul/2022:04:52:24.442] manager_http http_be/kube-node-1 0/10002/1/1/10004 200 260 - - ---- 5/5/3/0/+1 0/0 "GET /api/v1/login/status HTTP/1.1"
Jul 11 04:52:36 kube-master haproxy[34570]: 192.168.35.95:53657 [11/Jul/2022:04:52:26.165] manager_http http_be/kube-node-1 0/10003/1/2/10007 200 1951 - - ---- 4/4/2/0/+1 0/0 "GET /854.fb9a5a284d546e93.js HTTP/1.1"
Jul 11 04:52:37 kube-master haproxy[34570]: 192.168.35.95:53658 [11/Jul/2022:04:52:27.891] manager_http http_be/kube-node-1 0/10001/1/12/10014 200 717 - - ---- 4/4/2/0/+1 0/0 "GET /api/v1/namespace HTTP/1.1"
Jul 11 04:52:40 kube-master haproxy[34570]: 192.168.35.95:53659 [11/Jul/2022:04:52:30.620] manager_http http_be/kube-node-1 0/10000/1/15/10016 200 313 - - ---- 3/3/1/0/+1 0/0 "GET /api/v1/cronjob/kubernetes-dashboard?itemsPerPage=10&page=1&sortBy=d,creationTimestamp HTTP/1.1"
Jul 11 04:52:46 kube-master haproxy[34570]: 192.168.35.95:53657 [11/Jul/2022:04:52:36.262] manager_http http_be/kube-node-1 0/10002/1/19/10022 200 313 - - ---- 1/1/0/0/+1 0/0 "GET /api/v1/daemonset/kubernetes-dashboard?itemsPerPage=10&page=1&sortBy=d,creationTimestamp HTTP/1.1"
And here is mine configuration
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
# to have these messages end up in /var/log/haproxy.log you will
# need to:
#
# 1) configure syslog to accept network log events. This is done
# by adding the '-r' option to the SYSLOGD_OPTIONS in
# /etc/sysconfig/syslog
#
# 2) configure local2 events to go to the /var/log/haproxy.log
# file. A line like the following can be added to
# /etc/sysconfig/syslog
#
# local2.* /var/log/haproxy.log
#
# log 127.0.0.1 local2
log /dev/log local0
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
# utilize system-wide crypto-policies
ssl-default-bind-ciphers PROFILE=SYSTEM
ssl-default-server-ciphers PROFILE=SYSTEM
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
# Enable HAProxy stats
listen stats
bind :9000
stats uri /stats
stats refresh 10000ms
frontend manager_http
bind *:80
default_backend http_be
backend http_be
server kube-node-1 192.168.35.61:30924
server kube-node-2 192.168.35.62:30924
I use http protocol to go to the dashboard. Can anyone tell me what is wrong here and how can I set up the haproxy to get quick response?
1 post - 1 participant