sed -i "s#^net.ipv4.ip_forward.*#net.ipv4.ip_forward=1#g" /etc/sysctl.conf sed -i "s#^net.bridge.bridge-nf-call-ip6tables.*#net.bridge.bridge-nf-call-ip6tables=1#g" /etc/sysctl.conf sed -i "s#^net.bridge.bridge-nf-call-iptables.*#net.bridge.bridge-nf-call-iptables=1#g" /etc/sysctl.conf sed -i "s#^net.ipv6.conf.all.disable_ipv6.*#net.ipv6.conf.all.disable_ipv6=1#g" /etc/sysctl.conf sed -i "s#^net.ipv6.conf.default.disable_ipv6.*#net.ipv6.conf.default.disable_ipv6=1#g" /etc/sysctl.conf sed -i "s#^net.ipv6.conf.lo.disable_ipv6.*#net.ipv6.conf.lo.disable_ipv6=1#g" /etc/sysctl.conf sed -i "s#^net.ipv6.conf.all.forwarding.*#net.ipv6.conf.all.forwarding=1#g" /etc/sysctl.conf
[ERROR FileContent--proc-sys-net-bridge-bridge-nf-call-iptables]: /proc/sys/net/bridge/bridge-nf-call-iptables contents are not set to 1 [preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...` To see the stack trace of this error execute with --v=5 or higher
--- # Source: calico/templates/calico-config.yaml # This ConfigMap is used to configure a self-hosted Calico installation. kind:ConfigMap apiVersion:v1 metadata: name:calico-config namespace:kube-system data: # Typha is disabled. typha_service_name:"none" # Configure the backend to use. calico_backend:"bird"
# Configure the MTU to use veth_mtu:"1440"
# The CNI network configuration to install on each node. The special # values in this config will be automatically populated. cni_network_config:|- { "name": "k8s-pod-network", "cniVersion": "0.3.1", "plugins": [ { "type": "calico", "log_level": "info", "datastore_type": "kubernetes", "nodename": "__KUBERNETES_NODE_NAME__", "mtu": __CNI_MTU__, "ipam": { "type": "calico-ipam" }, "policy": { "type": "k8s" }, "kubernetes": { "kubeconfig": "__KUBECONFIG_FILEPATH__" } }, { "type": "portmap", "snat": true, "capabilities": {"portMappings": true} }, { "type": "bandwidth", "capabilities": {"bandwidth": true} } ] } --- # Source: calico/templates/kdd-crds.yaml
# Include a clusterrole for the kube-controllers component, # and bind it to the calico-kube-controllers serviceaccount. kind:ClusterRole apiVersion:rbac.authorization.k8s.io/v1 metadata: name:calico-kube-controllers rules: # Nodes are watched to monitor for deletions. -apiGroups: [""] resources: -nodes verbs: -watch -list -get # Pods are queried to check for existence. -apiGroups: [""] resources: -pods verbs: -get # IPAM resources are manipulated when nodes are deleted. -apiGroups: ["crd.projectcalico.org"] resources: -ippools verbs: -list -apiGroups: ["crd.projectcalico.org"] resources: -blockaffinities -ipamblocks -ipamhandles verbs: -get -list -create -update -delete # Needs access to update clusterinformations. -apiGroups: ["crd.projectcalico.org"] resources: -clusterinformations verbs: -get -create -update --- kind:ClusterRoleBinding apiVersion:rbac.authorization.k8s.io/v1 metadata: name:calico-kube-controllers roleRef: apiGroup:rbac.authorization.k8s.io kind:ClusterRole name:calico-kube-controllers subjects: -kind:ServiceAccount name:calico-kube-controllers namespace:kube-system --- # Include a clusterrole for the calico-node DaemonSet, # and bind it to the calico-node serviceaccount. kind:ClusterRole apiVersion:rbac.authorization.k8s.io/v1 metadata: name:calico-node rules: # The CNI plugin needs to get pods, nodes, and namespaces. -apiGroups: [""] resources: -pods -nodes -namespaces verbs: -get -apiGroups: [""] resources: -endpoints -services verbs: # Used to discover service IPs for advertisement. -watch -list # Used to discover Typhas. -get # Pod CIDR auto-detection on kubeadm needs access to config maps. -apiGroups: [""] resources: -configmaps verbs: -get -apiGroups: [""] resources: -nodes/status verbs: # Needed for clearing NodeNetworkUnavailable flag. -patch # Calico stores some configuration information in node annotations. -update # Watch for changes to Kubernetes NetworkPolicies. -apiGroups: ["networking.k8s.io"] resources: -networkpolicies verbs: -watch -list # Used by Calico for policy information. -apiGroups: [""] resources: -pods -namespaces -serviceaccounts verbs: -list -watch # The CNI plugin patches pods/status. -apiGroups: [""] resources: -pods/status verbs: -patch # Calico monitors various CRDs for config. -apiGroups: ["crd.projectcalico.org"] resources: -globalfelixconfigs -felixconfigurations -bgppeers -globalbgpconfigs -bgpconfigurations -ippools -ipamblocks -globalnetworkpolicies -globalnetworksets -networkpolicies -networksets -clusterinformations -hostendpoints -blockaffinities verbs: -get -list -watch # Calico must create and update some CRDs on startup. -apiGroups: ["crd.projectcalico.org"] resources: -ippools -felixconfigurations -clusterinformations verbs: -create -update # Calico stores some configuration information on the node. -apiGroups: [""] resources: -nodes verbs: -get -list -watch # These permissions are only requried for upgrade from v2.6, and can # be removed after upgrade or on fresh installations. -apiGroups: ["crd.projectcalico.org"] resources: -bgpconfigurations -bgppeers verbs: -create -update # These permissions are required for Calico CNI to perform IPAM allocations. -apiGroups: ["crd.projectcalico.org"] resources: -blockaffinities -ipamblocks -ipamhandles verbs: -get -list -create -update -delete -apiGroups: ["crd.projectcalico.org"] resources: -ipamconfigs verbs: -get # Block affinities must also be watchable by confd for route aggregation. -apiGroups: ["crd.projectcalico.org"] resources: -blockaffinities verbs: -watch # The Calico IPAM migration needs to get daemonsets. These permissions can be # removed if not upgrading from an installation using host-local IPAM. -apiGroups: ["apps"] resources: -daemonsets verbs: -get
--- # Source: calico/templates/calico-node.yaml # This manifest installs the calico-node container, as well # as the CNI plugins and network config on # each master and worker node in a Kubernetes cluster. kind:DaemonSet apiVersion:apps/v1 metadata: name:calico-node namespace:kube-system labels: k8s-app:calico-node spec: selector: matchLabels: k8s-app:calico-node updateStrategy: type:RollingUpdate rollingUpdate: maxUnavailable:1 template: metadata: labels: k8s-app:calico-node annotations: # This, along with the CriticalAddonsOnly toleration below, # marks the pod as a critical add-on, ensuring it gets # priority scheduling and that its resources are reserved # if it ever gets evicted. scheduler.alpha.kubernetes.io/critical-pod:'' spec: nodeSelector: kubernetes.io/os:linux hostNetwork:true tolerations: # Make sure calico-node gets scheduled on all nodes. -effect:NoSchedule operator:Exists # Mark the pod as a critical add-on for rescheduling. -key:CriticalAddonsOnly operator:Exists -effect:NoExecute operator:Exists serviceAccountName:calico-node # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. terminationGracePeriodSeconds:0 priorityClassName:system-node-critical initContainers: # This container performs upgrade from host-local IPAM to calico-ipam. # It can be deleted if this is a fresh installation, or if you have already # upgraded to use calico-ipam. -name:upgrade-ipam image:calico/cni:v3.13.1 command: ["/opt/cni/bin/calico-ipam", "-upgrade"] env: -name:KUBERNETES_NODE_NAME valueFrom: fieldRef: fieldPath:spec.nodeName -name:CALICO_NETWORKING_BACKEND valueFrom: configMapKeyRef: name:calico-config key:calico_backend volumeMounts: -mountPath:/var/lib/cni/networks name:host-local-net-dir -mountPath:/host/opt/cni/bin name:cni-bin-dir securityContext: privileged:true # This container installs the CNI binaries # and CNI network config file on each node. -name:install-cni image:calico/cni:v3.13.1 command: ["/install-cni.sh"] env: # Name of the CNI config file to create. -name:CNI_CONF_NAME value:"10-calico.conflist" # The CNI network config to install on each node. -name:CNI_NETWORK_CONFIG valueFrom: configMapKeyRef: name:calico-config key:cni_network_config # Set the hostname based on the k8s node name. -name:KUBERNETES_NODE_NAME valueFrom: fieldRef: fieldPath:spec.nodeName # CNI MTU Config variable -name:CNI_MTU valueFrom: configMapKeyRef: name:calico-config key:veth_mtu # Prevents the container from sleeping forever. -name:SLEEP value:"false" volumeMounts: -mountPath:/host/opt/cni/bin name:cni-bin-dir -mountPath:/host/etc/cni/net.d name:cni-net-dir securityContext: privileged:true # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes # to communicate with Felix over the Policy Sync API. -name:flexvol-driver image:calico/pod2daemon-flexvol:v3.13.1 volumeMounts: -name:flexvol-driver-host mountPath:/host/driver securityContext: privileged:true containers: # Runs calico-node container on each Kubernetes node. This # container programs network policy and routes on each # host. -name:calico-node image:calico/node:v3.13.1 env: # Use Kubernetes API as the backing datastore. -name:DATASTORE_TYPE value:"kubernetes" # Wait for the datastore. -name:WAIT_FOR_DATASTORE value:"true" # Set based on the k8s node name. -name:NODENAME valueFrom: fieldRef: fieldPath:spec.nodeName # Choose the backend to use. -name:CALICO_NETWORKING_BACKEND valueFrom: configMapKeyRef: name:calico-config key:calico_backend # Cluster type to identify the deployment type -name:CLUSTER_TYPE value:"k8s,bgp" # Auto-detect the BGP IP address. -name:IP value:"autodetect" # Enable IPIP -name:CALICO_IPV4POOL_IPIP value:"Always" # Set MTU for tunnel device used if ipip is enabled -name:FELIX_IPINIPMTU valueFrom: configMapKeyRef: name:calico-config key:veth_mtu # The default IPv4 pool to create on startup if none exists. Pod IPs will be # chosen from this range. Changing this value after installation will have # no effect. This should fall within `--cluster-cidr`. # - name: CALICO_IPV4POOL_CIDR # value: "192.168.0.0/16" # Disable file logging so `kubectl logs` works. -name:CALICO_DISABLE_FILE_LOGGING value:"true" # Set Felix endpoint to host default action to ACCEPT. -name:FELIX_DEFAULTENDPOINTTOHOSTACTION value:"ACCEPT" # Disable IPv6 on Kubernetes. -name:FELIX_IPV6SUPPORT value:"false" # Set Felix logging to "info" -name:FELIX_LOGSEVERITYSCREEN value:"info" -name:FELIX_HEALTHENABLED value:"true" securityContext: privileged:true resources: requests: cpu:250m livenessProbe: exec: command: -/bin/calico-node --felix-live --bird-live periodSeconds:10 initialDelaySeconds:10 failureThreshold:6 readinessProbe: exec: command: -/bin/calico-node --felix-ready --bird-ready periodSeconds:10 volumeMounts: -mountPath:/lib/modules name:lib-modules readOnly:true -mountPath:/run/xtables.lock name:xtables-lock readOnly:false -mountPath:/var/run/calico name:var-run-calico readOnly:false -mountPath:/var/lib/calico name:var-lib-calico readOnly:false -name:policysync mountPath:/var/run/nodeagent volumes: # Used by calico-node. -name:lib-modules hostPath: path:/lib/modules -name:var-run-calico hostPath: path:/var/run/calico -name:var-lib-calico hostPath: path:/var/lib/calico -name:xtables-lock hostPath: path:/run/xtables.lock type:FileOrCreate # Used to install CNI. -name:cni-bin-dir hostPath: path:/opt/cni/bin -name:cni-net-dir hostPath: path:/etc/cni/net.d # Mount in the directory for host-local IPAM allocations. This is # used when upgrading from host-local to calico-ipam, and can be removed # if not using the upgrade-ipam init container. -name:host-local-net-dir hostPath: path:/var/lib/cni/networks # Used to create per-pod Unix Domain Sockets -name:policysync hostPath: type:DirectoryOrCreate path:/var/run/nodeagent # Used to install Flex Volume Driver -name:flexvol-driver-host hostPath: type:DirectoryOrCreate path:/usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds --- apiVersion:v1 kind:ServiceAccount metadata: name:calico-node namespace:kube-system
# See https://github.com/projectcalico/kube-controllers apiVersion:apps/v1 kind:Deployment metadata: name:calico-kube-controllers namespace:kube-system labels: k8s-app:calico-kube-controllers spec: # The controllers can only have a single active instance. replicas:1 selector: matchLabels: k8s-app:calico-kube-controllers strategy: type:Recreate template: metadata: name:calico-kube-controllers namespace:kube-system labels: k8s-app:calico-kube-controllers annotations: scheduler.alpha.kubernetes.io/critical-pod:'' spec: nodeSelector: kubernetes.io/os:linux tolerations: # Mark the pod as a critical add-on for rescheduling. -key:CriticalAddonsOnly operator:Exists -key:node-role.kubernetes.io/master effect:NoSchedule serviceAccountName:calico-kube-controllers priorityClassName:system-cluster-critical containers: -name:calico-kube-controllers image:calico/kube-controllers:v3.13.1 env: # Choose which controllers to run. -name:ENABLED_CONTROLLERS value:node -name:DATASTORE_TYPE value:kubernetes readinessProbe: exec: command: -/usr/bin/check-status --r
error: the server doesn't have a resource type "calico-kube-controllers-544658cf79-bv5mj"
这个异常
解决: 可以试下重启一下kubelet服务,然后等待一下,应该就可以了
1
systemctl restart kubelet
执行令牌(在从节点操作)
这里注意的是,一定成初始化成功后面获取复制。
1 2 3 4 5 6 7
kubeadm join 192.168.142.138:6443 --token 8yvi2m.466nhemzvyqcxkny --discovery-token-ca-cert-hash sha256:99a5fdcb4af4dd9c2ee40e48ba420d7630676a77a3c0f2445c260921fdcaf83a # 如果在两个从节点运行执行令牌报错: [ERROR FileContent--proc-sys-net-bridge-bridge-nf-call-iptables]: /proc/sys/net/bridge/bridge-nf-call-iptables contents are not set to 1 [preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...` To see the stack trace of this error execute with --v=5 or higher
--- apiVersion:installer.kubesphere.io/v1alpha1 kind:ClusterConfiguration metadata: name:ks-installer namespace:kubesphere-system labels: version:v3.0.0 spec: persistence: storageClass:""# If there is not a default StorageClass in your cluster, you need to specify an existing StorageClass here. authentication: jwtSecret:""# Keep the jwtSecret consistent with the host cluster. Retrive the jwtSecret by executing "kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret" on the host cluster. etcd: monitoring:true# Whether to enable etcd monitoring dashboard installation. You have to create a secret for etcd before you enable it. endpointIps:10.12.0.9# etcd cluster EndpointIps, it can be a bunch of IPs here. port:2379# etcd port tlsEnable:true common: mysqlVolumeSize:20Gi# MySQL PVC size. minioVolumeSize:20Gi# Minio PVC size. etcdVolumeSize:20Gi# etcd PVC size. openldapVolumeSize:2Gi# openldap PVC size. redisVolumSize:2Gi# Redis PVC size. es:# Storage backend for logging, events and auditing. # elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number # elasticsearchDataReplicas: 1 # total number of data nodes. elasticsearchMasterVolumeSize:4Gi# Volume size of Elasticsearch master nodes. elasticsearchDataVolumeSize:20Gi# Volume size of Elasticsearch data nodes. logMaxAge:7# Log retention time in built-in Elasticsearch, it is 7 days by default. elkPrefix:logstash# The string making up index names. The index name will be formatted as ks-<elk_prefix>-log. console: enableMultiLogin:true# enable/disable multiple sing on, it allows an account can be used by different users at the same time. port:30880 alerting:# (CPU: 0.3 Core, Memory: 300 MiB) Whether to install KubeSphere alerting system. It enables Users to customize alerting policies to send messages to receivers in time with different time intervals and alerting levels to choose from. enabled:true auditing:# Whether to install KubeSphere audit log system. It provides a security-relevant chronological set of records,recording the sequence of activities happened in platform, initiated by different tenants. enabled:true devops:# (CPU: 0.47 Core, Memory: 8.6 G) Whether to install KubeSphere DevOps System. It provides out-of-box CI/CD system based on Jenkins, and automated workflow tools including Source-to-Image & Binary-to-Image. enabled:true jenkinsMemoryLim:2Gi# Jenkins memory limit. jenkinsMemoryReq:1500Mi# Jenkins memory request. jenkinsVolumeSize:8Gi# Jenkins volume size. jenkinsJavaOpts_Xms:512m# The following three fields are JVM parameters. jenkinsJavaOpts_Xmx:512m jenkinsJavaOpts_MaxRAM:2g events:# Whether to install KubeSphere events system. It provides a graphical web console for Kubernetes Events exporting, filtering and alerting in multi-tenant Kubernetes clusters. enabled:true ruler: enabled:true replicas:2 logging:# (CPU: 57 m, Memory: 2.76 G) Whether to install KubeSphere logging system. Flexible logging functions are provided for log query, collection and management in a unified console. Additional log collectors can be added, such as Elasticsearch, Kafka and Fluentd. enabled:true logsidecarReplicas:2 metrics_server:# (CPU: 56 m, Memory: 44.35 MiB) Whether to install metrics-server. IT enables HPA (Horizontal Pod Autoscaler). enabled:false monitoring: # prometheusReplicas: 1 # Prometheus replicas are responsible for monitoring different segments of data source and provide high availability as well. prometheusMemoryRequest:400Mi# Prometheus request memory. prometheusVolumeSize:20Gi# Prometheus PVC size. # alertmanagerReplicas: 1 # AlertManager Replicas. multicluster: clusterRole:none# host | member | none # You can install a solo cluster, or specify it as the role of host or member cluster. networkpolicy:# Network policies allow network isolation within the same cluster, which means firewalls can be set up between certain instances (Pods). # Make sure that the CNI network plugin used by the cluster supports NetworkPolicy. There are a number of CNI network plugins that support NetworkPolicy, including Calico, Cilium, Kube-router, Romana and Weave Net. enabled:true notification:# Email Notification support for the legacy alerting system, should be enabled/disabled together with the above alerting option. enabled:true openpitrix:# (2 Core, 3.6 G) Whether to install KubeSphere Application Store. It provides an application store for Helm-based applications, and offer application lifecycle management. enabled:true servicemesh:# (0.3 Core, 300 MiB) Whether to install KubeSphere Service Mesh (Istio-based). It provides fine-grained traffic management, observability and tracing, and offer visualization for traffic topology. enabled:true