mirror of
https://github.com/k3s-io/k3s.git
synced 2024-06-07 19:41:36 +00:00
2f5ee914f9
In k3s today the kubernetes API and the /v1-k3s API are combined into one http server. In rke2 we are running unmodified, non-embedded Kubernetes and as such it is preferred to run k8s and the /v1-k3s API on different ports. The /v1-k3s API port is called the SupervisorPort in the code. To support this separation of ports a new shim was added on the client in then pkg/agent/proxy package that will launch two load balancers instead of just one load balancer. One load balancer for 6443 and the other for 9345 (which is the supervisor port).
82 lines
2.0 KiB
Go
82 lines
2.0 KiB
Go
package cluster
|
|
|
|
import (
|
|
"context"
|
|
"crypto/tls"
|
|
"net"
|
|
"net/http"
|
|
"path/filepath"
|
|
|
|
"github.com/rancher/dynamiclistener"
|
|
"github.com/rancher/dynamiclistener/factory"
|
|
"github.com/rancher/dynamiclistener/storage/file"
|
|
"github.com/rancher/dynamiclistener/storage/kubernetes"
|
|
"github.com/rancher/dynamiclistener/storage/memory"
|
|
"github.com/rancher/k3s/pkg/daemons/config"
|
|
"github.com/rancher/wrangler-api/pkg/generated/controllers/core"
|
|
"github.com/sirupsen/logrus"
|
|
)
|
|
|
|
func (c *Cluster) newListener(ctx context.Context) (net.Listener, http.Handler, error) {
|
|
tcp, err := dynamiclistener.NewTCPListener(c.config.BindAddress, c.config.SupervisorPort)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
|
|
cert, key, err := factory.LoadCerts(c.runtime.ServerCA, c.runtime.ServerCAKey)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
|
|
storage := tlsStorage(ctx, c.config.DataDir, c.runtime)
|
|
return dynamiclistener.NewListener(tcp, storage, cert, key, dynamiclistener.Config{
|
|
CN: "k3s",
|
|
Organization: []string{"k3s"},
|
|
TLSConfig: tls.Config{
|
|
ClientAuth: tls.RequestClientCert,
|
|
},
|
|
SANs: append(c.config.SANs, "localhost", "kubernetes", "kubernetes.default", "kubernetes.default.svc."+c.config.ClusterDomain),
|
|
})
|
|
}
|
|
|
|
func (c *Cluster) startClusterAndHTTPS(ctx context.Context) error {
|
|
l, handler, err := c.newListener(ctx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
handler, err = c.getHandler(handler)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
l, handler, err = c.initClusterDB(ctx, l, handler)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
server := http.Server{
|
|
Handler: handler,
|
|
}
|
|
|
|
go func() {
|
|
err := server.Serve(l)
|
|
logrus.Fatalf("server stopped: %v", err)
|
|
}()
|
|
|
|
go func() {
|
|
<-ctx.Done()
|
|
server.Shutdown(context.Background())
|
|
}()
|
|
|
|
return nil
|
|
}
|
|
|
|
func tlsStorage(ctx context.Context, dataDir string, runtime *config.ControlRuntime) dynamiclistener.TLSStorage {
|
|
fileStorage := file.New(filepath.Join(dataDir, "tls/dynamic-cert.json"))
|
|
cache := memory.NewBacked(fileStorage)
|
|
return kubernetes.New(ctx, func() *core.Factory {
|
|
return runtime.Core
|
|
}, "kube-system", "k3s-serving", cache)
|
|
}
|