Compare commits
21 commits
Author | SHA1 | Date | |
---|---|---|---|
65de2adc47 | |||
150373a819 | |||
91fbebd25e | |||
f889527cc7 | |||
1ba52cb8fd | |||
c42d85a1d5 | |||
923261a527 | |||
f797d909cf | |||
d010200de7 | |||
5e645198a7 | |||
27ab196460 | |||
b90d855736 | |||
9fef25c49d | |||
a331343f71 | |||
8705a80ca8 | |||
edef83ffd3 | |||
be43cb93b4 | |||
f0b618c712 | |||
b15afb8c06 | |||
144fbdd18b | |||
08f953b7ef |
2 changed files with 133 additions and 38 deletions
79
README_GO.md
79
README_GO.md
|
@ -15,14 +15,85 @@ export HAPROXY_TEMPLATE="./haproxy.tmpl"
|
||||||
{{- range .backends }}
|
{{- range .backends }}
|
||||||
|
|
||||||
backend {{ .Name }}
|
backend {{ .Name }}
|
||||||
mode tcp
|
mode {{ .Mode }}
|
||||||
balance leastconn
|
balance {{ .Balance }}
|
||||||
cookie {{ .Name }} insert indirect nocache
|
{{- if .CookieName }}
|
||||||
|
cookie {{ .CookieName }} {{ .CookieFlags }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- if .HealthCheck }}
|
||||||
|
option httpchk GET {{ .HealthCheck }}
|
||||||
|
http-check expect status 200
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- $backend := . }}
|
||||||
{{- range .Servers }}
|
{{- range .Servers }}
|
||||||
server {{ .Name }} {{ .Address }}:{{ .Port }} check cookie {{ .Cookie }}
|
server {{ .Name }} {{ .Address }}:{{ .Port }}{{ if $backend.HealthCheck }} check{{ end }}{{ if $backend.CookieName }} cookie {{ .Cookie }}{{ end }}{{ if $backend.ServerOptions }} {{ $backend.ServerOptions }}{{ end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
# Systemd Service and Timer
|
||||||
|
|
||||||
```
|
```
|
||||||
|
[Unit]
|
||||||
|
Description=HAProxy Config Generator
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=oneshot
|
||||||
|
Environment=KUBERNETES_HOST=https://10.0.20.7:6443
|
||||||
|
Environment=KUBERNETES_TOKEN=eyJhbGciOi...
|
||||||
|
Environment=KUBERNETES_VERIFYSSL=false
|
||||||
|
Environment=HAPROXY_TEMPLATE=/etc/haproxy/haproxy.tmpl
|
||||||
|
ExecStart=/bin/bash /etc/haproxy/haproxy-generator.sh
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
Timer for Systemd
|
||||||
|
```
|
||||||
|
[Unit]
|
||||||
|
Description=Run HAProxy Config Generator every minute
|
||||||
|
|
||||||
|
[Timer]
|
||||||
|
OnBootSec=1min
|
||||||
|
OnUnitActiveSec=1min
|
||||||
|
Unit=haproxy-generator.service
|
||||||
|
Persistent=true
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=timers.target
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
# Reload Script for post start
|
||||||
|
|
||||||
|
Copy the script to /etc/haproxy/haproxy-generator.sh
|
||||||
|
```
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
/usr/local/bin/haproxy-generator > /etc/haproxy/haproxy.cfg.new
|
||||||
|
DIFF=$(diff /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.new)
|
||||||
|
/sbin/haproxy -f /etc/haproxy/haproxy.cfg.new -c
|
||||||
|
VALID=$?
|
||||||
|
|
||||||
|
if [ "$DIFF" != "" ] && [ $VALID -eq 0 ]
|
||||||
|
then
|
||||||
|
mv /etc/haproxy/haproxy.cfg.new /etc/haproxy/haproxy.cfg
|
||||||
|
/usr/sbin/service haproxy restart
|
||||||
|
fi
|
||||||
|
```
|
||||||
|
|
||||||
|
# Service Annotations
|
||||||
|
|
||||||
|
| Annotation-Key | Beschreibung | Typ | Beispielwert |
|
||||||
|
|--------------------------------|-----------------------------------------------------------------------------|---------|---------------------------|
|
||||||
|
| `haproxy/enabled` | **Aktiviert** die Aufnahme des Services in die HAProxy-Konfiguration | `bool` | `"true"` |
|
||||||
|
| `haproxy/mode` | Betriebsmodus des Backends (`tcp` oder `http`) | `string`| `"tcp"` / `"http"` |
|
||||||
|
| `haproxy/cookie-name` | Name des Cookies zur Session-Persistenz | `string`| `"SRVCOOKIE"` |
|
||||||
|
| `haproxy/cookie-flags` | Zusätzliche Cookie-Optionen für HAProxy | `string`| `"insert indirect nocache"`|
|
||||||
|
| `haproxy/health-check` | Deaktiviert Health-Checks, wenn `"false"` gesetzt wird | `bool` | `"false"` |
|
||||||
|
| `haproxy/server-options` | Zusätzliche Optionen für die einzelnen `server`-Zeilen in HAProxy | `string`| `"ssl verify none"` |
|
||||||
|
| `haproxy/port` | Nutze nur **diesen Port** des Services, wenn mehrere Ports definiert sind | `int` |`"8080"` |
|
90
main.go
90
main.go
|
@ -28,9 +28,14 @@ type BackendServer struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type Backend struct {
|
type Backend struct {
|
||||||
Name string
|
Name string
|
||||||
Balance string
|
Balance string
|
||||||
Servers []BackendServer
|
Mode string
|
||||||
|
CookieName string
|
||||||
|
CookieFlags string
|
||||||
|
HealthCheck string
|
||||||
|
ServerOptions string
|
||||||
|
Servers []BackendServer
|
||||||
}
|
}
|
||||||
|
|
||||||
func getEnv(key, fallback string) string {
|
func getEnv(key, fallback string) string {
|
||||||
|
@ -117,8 +122,8 @@ func main() {
|
||||||
|
|
||||||
type Service struct {
|
type Service struct {
|
||||||
Metadata struct {
|
Metadata struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Namespace string `json:"namespace"`
|
Namespace string `json:"namespace"`
|
||||||
Annotations map[string]string `json:"annotations"`
|
Annotations map[string]string `json:"annotations"`
|
||||||
} `json:"metadata"`
|
} `json:"metadata"`
|
||||||
Spec struct {
|
Spec struct {
|
||||||
|
@ -132,16 +137,13 @@ func main() {
|
||||||
type EndpointSubsetAddress struct {
|
type EndpointSubsetAddress struct {
|
||||||
IP string `json:"ip"`
|
IP string `json:"ip"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type EndpointSubsetPort struct {
|
type EndpointSubsetPort struct {
|
||||||
Port int `json:"port"`
|
Port int `json:"port"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type EndpointSubset struct {
|
type EndpointSubset struct {
|
||||||
Addresses []EndpointSubsetAddress `json:"addresses"`
|
Addresses []EndpointSubsetAddress `json:"addresses"`
|
||||||
Ports []EndpointSubsetPort `json:"ports"`
|
Ports []EndpointSubsetPort `json:"ports"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Endpoint struct {
|
type Endpoint struct {
|
||||||
Metadata struct {
|
Metadata struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
|
@ -179,45 +181,67 @@ func main() {
|
||||||
backends := []Backend{}
|
backends := []Backend{}
|
||||||
|
|
||||||
for _, svc := range services {
|
for _, svc := range services {
|
||||||
if svc.Spec.Type != "ClusterIP" {
|
ann := svc.Metadata.Annotations
|
||||||
continue
|
if val, ok := ann["haproxy/enabled"]; !ok || val != "true" {
|
||||||
}
|
|
||||||
|
|
||||||
if val, ok := svc.Metadata.Annotations["haproxy/enabled"]; !ok || val != "true" {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
key := svc.Metadata.Namespace + "/" + svc.Metadata.Name
|
key := svc.Metadata.Namespace + "/" + svc.Metadata.Name
|
||||||
ep, found := endpointMap[key]
|
ep, found := endpointMap[key]
|
||||||
if !found || len(ep.Subsets) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
b := Backend{
|
b := Backend{
|
||||||
Name: "SRV_" + strings.ReplaceAll(svc.Metadata.Name, " ", "-"),
|
Name: "SRV_" + strings.ReplaceAll(svc.Metadata.Name, " ", "-"),
|
||||||
Balance: "leastconn",
|
Balance: "leastconn",
|
||||||
|
Mode: "tcp",
|
||||||
|
HealthCheck: "",
|
||||||
|
}
|
||||||
|
|
||||||
|
if val, ok := ann["haproxy/mode"]; ok && val != "" {
|
||||||
|
b.Mode = val
|
||||||
|
}
|
||||||
|
if val, ok := ann["haproxy/cookie-name"]; ok && val != "" {
|
||||||
|
b.CookieName = val
|
||||||
|
}
|
||||||
|
if val, ok := ann["haproxy/cookie-flags"]; ok && val != "" {
|
||||||
|
b.CookieFlags = val
|
||||||
|
}
|
||||||
|
if val, ok := ann["haproxy/health-check"]; ok && val != "" {
|
||||||
|
b.HealthCheck = val
|
||||||
|
}
|
||||||
|
if val, ok := ann["haproxy/server-options"]; ok && val != "" {
|
||||||
|
b.ServerOptions = val
|
||||||
|
}
|
||||||
|
|
||||||
|
targetPort := 0
|
||||||
|
if val, ok := ann["haproxy/port"]; ok && val != "" {
|
||||||
|
if p, err := strconv.Atoi(val); err == nil {
|
||||||
|
targetPort = p
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
servers := []BackendServer{}
|
servers := []BackendServer{}
|
||||||
serverIndex := 1
|
|
||||||
for _, subset := range ep.Subsets {
|
|
||||||
for _, addr := range subset.Addresses {
|
|
||||||
for _, port := range subset.Ports {
|
|
||||||
cookie := hashString(fmt.Sprintf("%s-%s-%d", svc.Metadata.Name, addr.IP, port.Port))
|
|
||||||
serverName := fmt.Sprintf("%s_%d", svc.Metadata.Name, serverIndex)
|
|
||||||
serverIndex++
|
|
||||||
|
|
||||||
servers = append(servers, BackendServer{
|
if found && len(ep.Subsets) > 0 {
|
||||||
Name: serverName,
|
for _, subset := range ep.Subsets {
|
||||||
Address: addr.IP,
|
for _, addr := range subset.Addresses {
|
||||||
Port: port.Port,
|
for _, port := range subset.Ports {
|
||||||
Cookie: cookie,
|
if targetPort != 0 && port.Port != targetPort {
|
||||||
})
|
continue
|
||||||
|
}
|
||||||
|
cookie := hashString(fmt.Sprintf("%s-%s-%d", svc.Metadata.Name, addr.IP, port.Port))
|
||||||
|
serverName := fmt.Sprintf("%s_%d", svc.Metadata.Name, cookie)
|
||||||
|
servers = append(servers, BackendServer{
|
||||||
|
Name: serverName,
|
||||||
|
Address: addr.IP,
|
||||||
|
Port: port.Port,
|
||||||
|
Cookie: cookie,
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
b.Servers = servers
|
|
||||||
|
|
||||||
|
b.Servers = servers
|
||||||
backends = append(backends, b)
|
backends = append(backends, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -225,11 +249,11 @@ func main() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to get absolute path: %v", err)
|
log.Fatalf("Failed to get absolute path: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
tmpl, err := getTemplate(tmplAbsPath)
|
tmpl, err := getTemplate(tmplAbsPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to parse template: %v", err)
|
log.Fatalf("Failed to parse template: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = tmpl.Execute(os.Stdout, map[string]interface{}{
|
err = tmpl.Execute(os.Stdout, map[string]interface{}{
|
||||||
"backends": backends,
|
"backends": backends,
|
||||||
})
|
})
|
||||||
|
|
Loading…
Add table
Reference in a new issue