diff --git a/charts/tidb-cluster/templates/scripts/_start_pd.sh.tpl b/charts/tidb-cluster/templates/scripts/_start_pd.sh.tpl index 36273d8693..b5058aef65 100644 --- a/charts/tidb-cluster/templates/scripts/_start_pd.sh.tpl +++ b/charts/tidb-cluster/templates/scripts/_start_pd.sh.tpl @@ -30,7 +30,7 @@ fi # the general form of variable PEER_SERVICE_NAME is: "-pd-peer" cluster_name=`echo ${PEER_SERVICE_NAME} | sed 's/-pd-peer//'` -domain="${HOSTNAME}.${PEER_SERVICE_NAME}.${NAMESPACE}.svc" +domain="${POD_NAME}.${PEER_SERVICE_NAME}.${NAMESPACE}.svc" discovery_url="${cluster_name}-discovery.${NAMESPACE}.svc:10261" encoded_domain_url=`echo ${domain}:2380 | base64 | tr "\n" " " | sed "s/ //g"` @@ -57,7 +57,7 @@ while true; do done ARGS="--data-dir=/var/lib/pd \ ---name=${HOSTNAME} \ +--name=${POD_NAME} \ --peer-urls=http://0.0.0.0:2380 \ --advertise-peer-urls=http://${domain}:2380 \ --client-urls=http://0.0.0.0:2379 \ diff --git a/charts/tidb-cluster/templates/scripts/_start_tikv.sh.tpl b/charts/tidb-cluster/templates/scripts/_start_tikv.sh.tpl index 06bf6c9253..e161d01923 100644 --- a/charts/tidb-cluster/templates/scripts/_start_tikv.sh.tpl +++ b/charts/tidb-cluster/templates/scripts/_start_tikv.sh.tpl @@ -29,7 +29,7 @@ then fi ARGS="--pd=${CLUSTER_NAME}-pd:2379 \ ---advertise-addr=${HOSTNAME}.${HEADLESS_SERVICE_NAME}.${NAMESPACE}.svc:20160 \ +--advertise-addr=${POD_NAME}.${HEADLESS_SERVICE_NAME}.${NAMESPACE}.svc:20160 \ --addr=0.0.0.0:20160 \ --status-addr=0.0.0.0:20180 \ --data-dir=/var/lib/tikv \ diff --git a/charts/tidb-cluster/templates/tidb-cluster.yaml b/charts/tidb-cluster/templates/tidb-cluster.yaml index 3d3aaf493f..f7f0b3e3e9 100644 --- a/charts/tidb-cluster/templates/tidb-cluster.yaml +++ b/charts/tidb-cluster/templates/tidb-cluster.yaml @@ -49,6 +49,7 @@ spec: podSecurityContext: {{ toYaml .Values.pd.podSecurityContext | indent 6}} {{- end }} + hostNetwork: {{ .Values.pd.hostNetwork }} tikv: replicas: {{ .Values.tikv.replicas }} image: {{ .Values.tikv.image }} @@ -76,6 +77,7 @@ spec: {{ toYaml .Values.tikv.podSecurityContext | indent 6}} {{- end }} maxFailoverCount: {{ .Values.tikv.maxFailoverCount | default 3 }} + hostNetwork: {{ .Values.tikv.hostNetwork }} tidb: replicas: {{ .Values.tidb.replicas }} image: {{ .Values.tidb.image }} @@ -99,6 +101,7 @@ spec: podSecurityContext: {{ toYaml .Values.tidb.podSecurityContext | indent 6}} {{- end }} + hostNetwork: {{ .Values.tidb.hostNetwork }} binlogEnabled: {{ .Values.binlog.pump.create | default false }} maxFailoverCount: {{ .Values.tidb.maxFailoverCount | default 3 }} separateSlowLog: {{ .Values.tidb.separateSlowLog | default false }} diff --git a/charts/tidb-cluster/values.yaml b/charts/tidb-cluster/values.yaml index 0088c0213e..d56d8ca406 100644 --- a/charts/tidb-cluster/values.yaml +++ b/charts/tidb-cluster/values.yaml @@ -158,6 +158,10 @@ pd: # refer to https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod podSecurityContext: {} + # Use the host's network namespace if enabled. + # Default to false. + hostNetwork: false + tikv: # Please refer to https://github.com/tikv/tikv/blob/master/etc/config-template.toml for the default # tikv configurations (change to the tags of your tikv version), @@ -242,6 +246,10 @@ tikv: # refer to https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod podSecurityContext: {} + # Use the host's network namespace if enabled. + # Default to false. + hostNetwork: false + tidb: # Please refer to https://github.com/pingcap/tidb/blob/master/config/config.toml.example for the default # tidb configurations(change to the tags of your tidb version), @@ -303,6 +311,10 @@ tidb: # refer to https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod podSecurityContext: {} + # Use the host's network namespace if enabled. + # Default to false. + hostNetwork: false + maxFailoverCount: 3 service: type: NodePort diff --git a/pkg/apis/pingcap.com/v1alpha1/types.go b/pkg/apis/pingcap.com/v1alpha1/types.go index 0e49b261f6..8958a82257 100644 --- a/pkg/apis/pingcap.com/v1alpha1/types.go +++ b/pkg/apis/pingcap.com/v1alpha1/types.go @@ -158,6 +158,7 @@ type PodAttributesSpec struct { Tolerations []corev1.Toleration `json:"tolerations,omitempty"` Annotations map[string]string `json:"annotations,omitempty"` PodSecurityContext *corev1.PodSecurityContext `json:"podSecurityContext,omitempty"` + HostNetwork bool `json:"hostNetwork,omitempty"` } // Service represent service type used in TidbCluster diff --git a/pkg/manager/member/pd_member_manager.go b/pkg/manager/member/pd_member_manager.go index 9f96fb317b..e69f8faf40 100644 --- a/pkg/manager/member/pd_member_manager.go +++ b/pkg/manager/member/pd_member_manager.go @@ -484,6 +484,11 @@ func (pmm *pdMemberManager) getNewPDSetForTidbCluster(tc *v1alpha1.TidbCluster) } } + dnsPolicy := corev1.DNSClusterFirst // same as k8s defaults + if tc.Spec.PD.HostNetwork { + dnsPolicy = corev1.DNSClusterFirstWithHostNet + } + pdSet := &apps.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: setName, @@ -503,6 +508,8 @@ func (pmm *pdMemberManager) getNewPDSetForTidbCluster(tc *v1alpha1.TidbCluster) SchedulerName: tc.Spec.SchedulerName, Affinity: tc.Spec.PD.Affinity, NodeSelector: tc.Spec.PD.NodeSelector, + HostNetwork: tc.Spec.PD.HostNetwork, + DNSPolicy: dnsPolicy, Containers: []corev1.Container{ { Name: v1alpha1.PDMemberType.String(), @@ -532,6 +539,14 @@ func (pmm *pdMemberManager) getNewPDSetForTidbCluster(tc *v1alpha1.TidbCluster) }, }, }, + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, { Name: "PEER_SERVICE_NAME", Value: controller.PDPeerMemberName(tcName), diff --git a/pkg/manager/member/tidb_member_manager.go b/pkg/manager/member/tidb_member_manager.go index 9e42234fe4..e6de192766 100644 --- a/pkg/manager/member/tidb_member_manager.go +++ b/pkg/manager/member/tidb_member_manager.go @@ -329,6 +329,11 @@ func (tmm *tidbMemberManager) getNewTiDBSetForTidbCluster(tc *v1alpha1.TidbClust }, }) + dnsPolicy := corev1.DNSClusterFirst // same as k8s defaults + if tc.Spec.PD.HostNetwork { + dnsPolicy = corev1.DNSClusterFirstWithHostNet + } + tidbLabel := label.New().Instance(instanceName).TiDB() podAnnotations := CombineAnnotations(controller.AnnProm(10080), tc.Spec.TiDB.Annotations) tidbSet := &apps.StatefulSet{ @@ -355,6 +360,7 @@ func (tmm *tidbMemberManager) getNewTiDBSetForTidbCluster(tc *v1alpha1.TidbClust Tolerations: tc.Spec.TiDB.Tolerations, Volumes: vols, SecurityContext: tc.Spec.TiDB.PodSecurityContext, + HostNetwork: tc.Spec.PD.HostNetwork, }, }, ServiceName: controller.TiDBPeerMemberName(tcName), diff --git a/pkg/manager/member/tikv_member_manager.go b/pkg/manager/member/tikv_member_manager.go index c5ee40b8a0..46c093cf63 100644 --- a/pkg/manager/member/tikv_member_manager.go +++ b/pkg/manager/member/tikv_member_manager.go @@ -320,6 +320,11 @@ func (tkmm *tikvMemberManager) getNewSetForTidbCluster(tc *v1alpha1.TidbCluster) storageClassName = controller.DefaultStorageClassName } + dnsPolicy := corev1.DNSClusterFirst // same as k8s defaults + if tc.Spec.PD.HostNetwork { + dnsPolicy = corev1.DNSClusterFirstWithHostNet + } + tikvset := &apps.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: setName, @@ -339,6 +344,8 @@ func (tkmm *tikvMemberManager) getNewSetForTidbCluster(tc *v1alpha1.TidbCluster) SchedulerName: tc.Spec.SchedulerName, Affinity: tc.Spec.TiKV.Affinity, NodeSelector: tc.Spec.TiKV.NodeSelector, + HostNetwork: tc.Spec.PD.HostNetwork, + DNSPolicy: dnsPolicy, Containers: []corev1.Container{ { Name: v1alpha1.TiKVMemberType.String(), @@ -366,6 +373,14 @@ func (tkmm *tikvMemberManager) getNewSetForTidbCluster(tc *v1alpha1.TidbCluster) }, }, }, + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, { Name: "CLUSTER_NAME", Value: tcName, diff --git a/tests/cluster_info.go b/tests/cluster_info.go index 853e3c14c1..e2acced3f4 100644 --- a/tests/cluster_info.go +++ b/tests/cluster_info.go @@ -33,6 +33,17 @@ func (tc *TidbClusterConfig) ScaleTiDB(replicas uint) *TidbClusterConfig { return tc } +func (tc *TidbClusterConfig) RunInHost(flag bool) *TidbClusterConfig { + val := "false" + if flag { + val = "true" + } + tc.set("pd.hostNetwork", val) + tc.set("tikv.hostNetwork", val) + tc.set("tidb.hostNetwork", val) + return tc +} + func (tc *TidbClusterConfig) UpgradePD(image string) *TidbClusterConfig { tc.PDImage = image return tc diff --git a/tests/cmd/e2e/main.go b/tests/cmd/e2e/main.go index a2cf73670f..63c185a814 100644 --- a/tests/cmd/e2e/main.go +++ b/tests/cmd/e2e/main.go @@ -115,6 +115,16 @@ func main() { UpdateTiDBTokenLimit(cfg.TiDBTokenLimit) oa.UpgradeTidbClusterOrDie(cluster1) oa.CheckTidbClusterStatusOrDie(cluster1) + + // switch to host network + cluster1.RunInHost(true) + oa.UpgradeTidbClusterOrDie(cluster1) + oa.CheckTidbClusterStatusOrDie(cluster1) + + // switch to pod network + cluster1.RunInHost(false) + oa.UpgradeTidbClusterOrDie(cluster1) + oa.CheckTidbClusterStatusOrDie(cluster1) } fn2 := func(wg *sync.WaitGroup) { defer wg.Done()