@@ -11020,7 +11020,7 @@ string
- Name of the PD Micro Service
+Name of the PD microservice
|
@@ -11068,7 +11068,7 @@ ServiceSpec
(Optional)
- Service defines a Kubernetes service of PD Micro Service cluster.
+ Service defines a Kubernetes service of PD microservice cluster.
Optional: Defaults to .spec.services in favor of backward compatibility
|
@@ -11096,7 +11096,7 @@ PDConfigWraper
(Optional)
- Config is the Configuration of pd Micro Service servers
+Config is the Configuration of pd microservice servers
|
@@ -11145,7 +11145,7 @@ string
(Optional)
- The storageClassName of the persistent volume for PD Micro Service log storage.
+ The storageClassName of the persistent volume for PD microservice log storage.
Defaults to Kubernetes default storage class.
|
@@ -11160,7 +11160,7 @@ Defaults to Kubernetes default storage class.
(Optional)
- StorageVolumes configure additional storage for PD Micro Service pods.
+StorageVolumes configure additional storage for PD microservice pods.
|
@@ -11182,7 +11182,7 @@ int
TidbClusterStatus)
-
PDMSStatus is PD Micro Service Status
+PDMSStatus is PD microservice status
diff --git a/pkg/apis/pingcap/v1alpha1/openapi_generated.go b/pkg/apis/pingcap/v1alpha1/openapi_generated.go
index 9b7d3e28c47..488bd301d81 100644
--- a/pkg/apis/pingcap/v1alpha1/openapi_generated.go
+++ b/pkg/apis/pingcap/v1alpha1/openapi_generated.go
@@ -5592,7 +5592,7 @@ func schema_pkg_apis_pingcap_v1alpha1_PDMSSpec(ref common.ReferenceCallback) com
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
- Description: "PDMSSpec contains details of PD Micro Service",
+ Description: "PDMSSpec contains details of PD microservice",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"version": {
@@ -5933,7 +5933,7 @@ func schema_pkg_apis_pingcap_v1alpha1_PDMSSpec(ref common.ReferenceCallback) com
},
"name": {
SchemaProps: spec.SchemaProps{
- Description: "Name of the PD Micro Service",
+ Description: "Name of the PD microservice",
Default: "",
Type: []string{"string"},
Format: "",
@@ -5963,7 +5963,7 @@ func schema_pkg_apis_pingcap_v1alpha1_PDMSSpec(ref common.ReferenceCallback) com
},
"service": {
SchemaProps: spec.SchemaProps{
- Description: "Service defines a Kubernetes service of PD Micro Service cluster. Optional: Defaults to `.spec.services` in favor of backward compatibility",
+ Description: "Service defines a Kubernetes service of PD microservice cluster. Optional: Defaults to `.spec.services` in favor of backward compatibility",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ServiceSpec"),
},
},
@@ -5976,7 +5976,7 @@ func schema_pkg_apis_pingcap_v1alpha1_PDMSSpec(ref common.ReferenceCallback) com
},
"config": {
SchemaProps: spec.SchemaProps{
- Description: "Config is the Configuration of pd Micro Service servers",
+ Description: "Config is the Configuration of PD microservice servers",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDConfigWraper"),
},
},
@@ -6003,14 +6003,14 @@ func schema_pkg_apis_pingcap_v1alpha1_PDMSSpec(ref common.ReferenceCallback) com
},
"storageClassName": {
SchemaProps: spec.SchemaProps{
- Description: "The storageClassName of the persistent volume for PD Micro Service log storage. Defaults to Kubernetes default storage class.",
+ Description: "The storageClassName of the persistent volume for PD microservice log storage. Defaults to Kubernetes default storage class.",
Type: []string{"string"},
Format: "",
},
},
"storageVolumes": {
SchemaProps: spec.SchemaProps{
- Description: "StorageVolumes configure additional storage for PD Micro Service pods.",
+ Description: "StorageVolumes configure additional storage for PD microservice pods.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
diff --git a/pkg/apis/pingcap/v1alpha1/tidbcluster.go b/pkg/apis/pingcap/v1alpha1/tidbcluster.go
index fb0b7f75a5b..189f32743ee 100644
--- a/pkg/apis/pingcap/v1alpha1/tidbcluster.go
+++ b/pkg/apis/pingcap/v1alpha1/tidbcluster.go
@@ -98,7 +98,7 @@ func (tc *TidbCluster) PDVersion() string {
return getImageVersion(tc.PDImage())
}
-// PDMSImage return the image used by specified PD MicroService.
+// PDMSImage return the image used by specified PD microservice.
//
// If PDMS isn't specified, return empty string.
func (tc *TidbCluster) PDMSImage(spec *PDMSSpec) string {
@@ -127,7 +127,7 @@ func (tc *TidbCluster) PDMSImage(spec *PDMSSpec) string {
return image
}
-// PDMSVersion return the image version used by specified PD MicroService.
+// PDMSVersion return the image version used by specified PD microservice.
//
// If PDMS isn't specified, return empty string.
func (tc *TidbCluster) PDMSVersion(name string) string {
diff --git a/pkg/apis/pingcap/v1alpha1/types.go b/pkg/apis/pingcap/v1alpha1/types.go
index 278f90efd4f..efc11c2f82c 100644
--- a/pkg/apis/pingcap/v1alpha1/types.go
+++ b/pkg/apis/pingcap/v1alpha1/types.go
@@ -600,12 +600,12 @@ type PDSpec struct {
}
// +k8s:openapi-gen=true
-// PDMSSpec contains details of PD Micro Service
+// PDMSSpec contains details of PD microservice
type PDMSSpec struct {
ComponentSpec `json:",inline"`
corev1.ResourceRequirements `json:",inline"`
- // Name of the PD Micro Service
+ // Name of the PD microservice
// +kubebuilder:validation:Enum:="tso";"scheduling"
Name string `json:"name"`
@@ -621,7 +621,7 @@ type PDMSSpec struct {
// +optional
BaseImage *string `json:"baseImage"`
- // Service defines a Kubernetes service of PD Micro Service cluster.
+ // Service defines a Kubernetes service of PD microservice cluster.
// Optional: Defaults to `.spec.services` in favor of backward compatibility
// +optional
Service *ServiceSpec `json:"service,omitempty"`
@@ -632,7 +632,7 @@ type PDMSSpec struct {
// +optional
MaxFailoverCount *int32 `json:"maxFailoverCount,omitempty"`
- // Config is the Configuration of pd Micro Service servers
+ // Config is the Configuration of pd microservice servers
// +optional
// +kubebuilder:validation:Schemaless
// +kubebuilder:validation:XPreserveUnknownFields
@@ -652,12 +652,12 @@ type PDMSSpec struct {
// +kubebuilder:validation:Enum:="";"v1"
StartUpScriptVersion string `json:"startUpScriptVersion,omitempty"`
- // The storageClassName of the persistent volume for PD Micro Service log storage.
+ // The storageClassName of the persistent volume for PD microservice log storage.
// Defaults to Kubernetes default storage class.
// +optional
StorageClassName *string `json:"storageClassName,omitempty"`
- // StorageVolumes configure additional storage for PD Micro Service pods.
+ // StorageVolumes configure additional storage for PD microservice pods.
// +optional
StorageVolumes []StorageVolume `json:"storageVolumes,omitempty"`
@@ -1522,7 +1522,7 @@ type PDStatus struct {
VolReplaceInProgress bool `json:"volReplaceInProgress,omitempty"`
}
-// PDMSStatus is PD Micro Service Status
+// PDMSStatus is PD microservice status
type PDMSStatus struct {
Name string `json:"name,omitempty"`
// +optional
diff --git a/pkg/manager/member/pd_member_manager.go b/pkg/manager/member/pd_member_manager.go
index f4d14d9fe6f..77526dd8856 100644
--- a/pkg/manager/member/pd_member_manager.go
+++ b/pkg/manager/member/pd_member_manager.go
@@ -83,7 +83,7 @@ func (m *pdMemberManager) Sync(tc *v1alpha1.TidbCluster) error {
if (tc.Spec.PD.Mode == "ms" && tc.Spec.PDMS == nil) ||
(tc.Spec.PDMS != nil && tc.Spec.PD.Mode != "ms") {
- klog.Infof("tidbcluster: [%s/%s]'s enable micro service failed, please check `PD.Mode` and `PDMS`", tc.GetNamespace(), tc.GetName())
+ klog.Infof("tidbcluster: [%s/%s]'s enable microservice failed, please check `PD.Mode` and `PDMS`", tc.GetNamespace(), tc.GetName())
}
// skip sync if pd is suspended
diff --git a/pkg/manager/member/pd_ms_member_manager.go b/pkg/manager/member/pd_ms_member_manager.go
index 9db67ca571a..9f1993991ab 100644
--- a/pkg/manager/member/pd_ms_member_manager.go
+++ b/pkg/manager/member/pd_ms_member_manager.go
@@ -56,14 +56,14 @@ func NewPDMSMemberManager(dependencies *controller.Dependencies, pdMSScaler Scal
}
}
-// Sync for all PD Micro Service components.
+// Sync for all PD microservice components.
func (m *pdMSMemberManager) Sync(tc *v1alpha1.TidbCluster) error {
// Need to start PD API
if tc.Spec.PDMS != nil && tc.Spec.PD == nil {
- klog.Infof("PD Micro Service is enabled, but PD is not enabled, skip syncing PD Micro Service")
+ klog.Infof("PD microservice is enabled, but PD is not enabled, skip syncing PD microservice")
return nil
}
- // remove all micro service components if PDMS is not enabled
+ // remove all microservice components if PDMS is not enabled
// PDMS need to be enabled when PD.Mode is ms && PDMS is not nil
if tc.Spec.PDMS == nil || (tc.Spec.PD != nil && tc.Spec.PD.Mode != "ms") {
for _, comp := range tc.Status.PDMS {
@@ -95,7 +95,7 @@ func (m *pdMSMemberManager) Sync(tc *v1alpha1.TidbCluster) error {
return nil
}
- // init PD Micro Service status
+ // init PD microservice status
if tc.Status.PDMS == nil {
tc.Status.PDMS = make(map[string]*v1alpha1.PDMSStatus)
}
@@ -115,10 +115,10 @@ func (m *pdMSMemberManager) Sync(tc *v1alpha1.TidbCluster) error {
return nil
}
-// syncSingleService for single PD Micro Service components.
+// syncSingleService for single PD microservice components.
func (m *pdMSMemberManager) syncSingleService(tc *v1alpha1.TidbCluster, curSpec *v1alpha1.PDMSSpec) error {
curService := curSpec.Name
- // Skip sync if PD Micro Service is suspended
+ // Skip sync if PD microservice is suspended
componentMemberType := v1alpha1.PDMSMemberType(curService)
needSuspend, err := m.suspender.SuspendComponent(tc, componentMemberType)
if err != nil {
@@ -129,17 +129,17 @@ func (m *pdMSMemberManager) syncSingleService(tc *v1alpha1.TidbCluster, curSpec
return nil
}
- // Sync PD Micro Service
+ // Sync PD microservice
if err := m.syncPDMSService(tc, curSpec); err != nil {
return err
}
- // Sync PD Micro Service Headless Service
+ // Sync PD microservice headless service
if err := m.syncPDMSHeadlessService(tc, curSpec); err != nil {
return err
}
- // Sync PD Micro Service StatefulSet
+ // Sync PD microservice statefulSet
return m.syncPDMSStatefulSet(tc, curSpec)
}
diff --git a/pkg/manager/member/pd_ms_scaler.go b/pkg/manager/member/pd_ms_scaler.go
index 90b6b5dfe6f..165130f0cc5 100644
--- a/pkg/manager/member/pd_ms_scaler.go
+++ b/pkg/manager/member/pd_ms_scaler.go
@@ -28,7 +28,7 @@ type pdMSScaler struct {
generalScaler
}
-// NewPDMSScaler returns a PD Micro Service Scaler.
+// NewPDMSScaler returns a PD microservice scaler.
func NewPDMSScaler(deps *controller.Dependencies) *pdMSScaler {
return &pdMSScaler{generalScaler: generalScaler{deps: deps}}
}
diff --git a/pkg/manager/member/pd_ms_upgrader.go b/pkg/manager/member/pd_ms_upgrader.go
index 3c99ad64999..d9b72378a2d 100644
--- a/pkg/manager/member/pd_ms_upgrader.go
+++ b/pkg/manager/member/pd_ms_upgrader.go
@@ -31,7 +31,7 @@ type pdMSUpgrader struct {
deps *controller.Dependencies
}
-// NewPDMSUpgrader returns a PD Micro Service Upgrader
+// NewPDMSUpgrader returns a PD microservice upgrader
func NewPDMSUpgrader(deps *controller.Dependencies) Upgrader {
return &pdMSUpgrader{
deps: deps,
@@ -129,7 +129,7 @@ func (u *pdMSUpgrader) gracefulUpgrade(tc *v1alpha1.TidbCluster, oldSet *apps.St
func (u *pdMSUpgrader) upgradePDMSPod(tc *v1alpha1.TidbCluster, ordinal int32, newSet *apps.StatefulSet, curService string) error {
// Only support after `8.3.0` to keep compatibility.
- if check, err := pdMSSupportMicroServicesWithName.Check(tc.PDMSVersion(curService)); check && err == nil {
+ if check, err := pdMSSupportMicroservicesWithName.Check(tc.PDMSVersion(curService)); check && err == nil {
ns := tc.GetNamespace()
tcName := tc.GetName()
upgradePDMSName := PDMSName(tcName, ordinal, tc.Namespace, tc.Spec.ClusterDomain, tc.Spec.AcrossK8s, curService)
@@ -206,9 +206,9 @@ func choosePDMSToTransferFromMembers(tc *v1alpha1.TidbCluster, newSet *apps.Stat
return targetName
}
-// PDMSSupportMicroServicesWithName returns true if the given version of PDMS supports microservices with name.
+// PDMSSupportMicroservicesWithName returns true if the given version of PDMS supports microservices with name.
// related https://github.com/tikv/pd/pull/8157.
-var pdMSSupportMicroServicesWithName, _ = cmpver.NewConstraint(cmpver.GreaterOrEqual, "v8.3.0")
+var pdMSSupportMicroservicesWithName, _ = cmpver.NewConstraint(cmpver.GreaterOrEqual, "v8.3.0")
type fakePDMSUpgrader struct{}
diff --git a/pkg/manager/member/pvc_resizer.go b/pkg/manager/member/pvc_resizer.go
index 295de0a629c..cedee4963d4 100644
--- a/pkg/manager/member/pvc_resizer.go
+++ b/pkg/manager/member/pvc_resizer.go
@@ -203,7 +203,7 @@ func (p *pvcResizer) buildContextForTC(tc *v1alpha1.TidbCluster, status v1alpha1
ctx.desiredVolumeQuantity[v1alpha1.GetStorageVolumeName("", v1alpha1.PDMemberType)] = quantity
}
storageVolumes = tc.Spec.PD.StorageVolumes
- // PD Micro Service
+ // PD microservice
case v1alpha1.PDMSTSOMemberType, v1alpha1.PDMSSchedulingMemberType:
pdmsRequirement := util.MustNewRequirement(label.ComponentLabelKey, selection.Equals, []string{label.PDMSLabel(comp.String())})
ctx.selector = selector.Add(*pdmsRequirement)
diff --git a/pkg/manager/member/startscript/v2/common.go b/pkg/manager/member/startscript/v2/common.go
index 455becce1e6..ed8823c6601 100644
--- a/pkg/manager/member/startscript/v2/common.go
+++ b/pkg/manager/member/startscript/v2/common.go
@@ -42,7 +42,7 @@ fi
`
dnsAwaitPart = "<>"
- pdEnableMicroService = "<>"
+ pdEnableMicroservice = "<>"
componentCommonWaitForDnsIpMatchScript = `
elapseTime=0
diff --git a/pkg/manager/member/startscript/v2/pd_start_script.go b/pkg/manager/member/startscript/v2/pd_start_script.go
index 8cfe4bb2dcd..27e2d4969c3 100644
--- a/pkg/manager/member/startscript/v2/pd_start_script.go
+++ b/pkg/manager/member/startscript/v2/pd_start_script.go
@@ -42,7 +42,7 @@ type PDStartScriptModel struct {
PDInitWaitTime int
}
-// PDMSStartScriptModel contain fields for rendering PD Micro Service start script
+// PDMSStartScriptModel contain fields for rendering PD microservice start script
type PDMSStartScriptModel struct {
PDStartTimeout int
PDInitWaitTime int
@@ -112,7 +112,7 @@ func RenderPDStartScript(tc *v1alpha1.TidbCluster) (string, error) {
componentCommonScript +
replacePdStartScriptCustomPorts(
replacePdStartScriptDnsAwaitPart(waitForDnsNameIpMatchOnStartup,
- enableMicroServiceModeDynamic(mode, pdStartScript)))),
+ enableMicroserviceModeDynamic(mode, pdStartScript)))),
)
return renderTemplateFunc(pdStartScriptTpl, m)
@@ -138,7 +138,7 @@ func renderPDMSStartScript(tc *v1alpha1.TidbCluster, name string) (string, error
m.PDMSDomain = m.PDMSDomain + "." + tc.Spec.ClusterDomain
}
- if check, err := pdMSSupportMicroServicesWithName.Check(tc.PDMSVersion(name)); check && err == nil {
+ if check, err := pdMSSupportMicroservicesWithName.Check(tc.PDMSVersion(name)); check && err == nil {
m.PDMSName = "${PDMS_POD_NAME}"
if tc.Spec.ClusterDomain != "" {
m.PDMSName = m.PDMSDomain
@@ -185,7 +185,7 @@ func renderPDMSStartScript(tc *v1alpha1.TidbCluster, name string) (string, error
componentCommonScript +
replacePdStartScriptCustomPorts(
replacePdStartScriptDnsAwaitPart(waitForDnsNameIpMatchOnStartup,
- enableMicroServiceModeDynamic(name, pdmsStartScriptTplText)))))
+ enableMicroserviceModeDynamic(name, pdmsStartScriptTplText)))))
return renderTemplateFunc(msStartScriptTpl, m)
}
@@ -202,7 +202,7 @@ sleep initWaitTime
nsLookupCmd="dig ${componentDomain} A ${componentDomain} AAAA +search +short"
` + componentCommonWaitForDnsIpMatchScript
- pdEnableMicroServiceSubScript = "services"
+ pdEnableMicroserviceSubScript = "services"
pdWaitForDnsOnlySubScript = `
@@ -241,7 +241,7 @@ done
PD_POD_NAME=${POD_NAME:-$HOSTNAME}
PD_DOMAIN={{ .PDDomain }}` +
dnsAwaitPart + `
-ARGS="` + pdEnableMicroService + `--data-dir={{ .DataDir }} \
+ARGS="` + pdEnableMicroservice + `--data-dir={{ .DataDir }} \
--name={{ .PDName }} \
--peer-urls={{ .PeerURL }} \
--advertise-peer-urls={{ .AdvertisePeerURL }} \
@@ -295,7 +295,7 @@ PD_DOMAIN={{ .PDMSDomain }}` +
dnsAwaitPart + `
{{- if .AcrossK8s -}} {{ template "AcrossK8sSubscript" . }} {{- end }}
-ARGS="` + pdEnableMicroService + `--listen-addr={{ .ListenAddr }} \
+ARGS="` + pdEnableMicroservice + `--listen-addr={{ .ListenAddr }} \
--advertise-listen-addr={{ .AdvertiseListenAddr }} \
--backend-endpoints={{ .PDAddresses }} \
--config=/etc/pd/pd.toml \
@@ -330,15 +330,15 @@ func replacePdStartScriptDnsAwaitPart(withLocalIpMatch bool, startScript string)
// - for `PD API` service, startParams should be `api`
// - for `TSO` and `Scheduling`, startParams should be `tso` and `scheduling` respectively.
// NOTICE: in `8.3.0` we have supported `name` start parameter, so we will pass `tso name=${PDMS_POD_NAME}` to startParams.
-func enableMicroServiceModeDynamic(startParams string, startScript string) string {
+func enableMicroserviceModeDynamic(startParams string, startScript string) string {
if startParams != "" {
- return strings.ReplaceAll(startScript, pdEnableMicroService, fmt.Sprintf(" %s %s ", pdEnableMicroServiceSubScript, startParams))
+ return strings.ReplaceAll(startScript, pdEnableMicroservice, fmt.Sprintf(" %s %s ", pdEnableMicroserviceSubScript, startParams))
} else {
// for original `PD`, should be empty.
- return strings.ReplaceAll(startScript, pdEnableMicroService, "")
+ return strings.ReplaceAll(startScript, pdEnableMicroservice, "")
}
}
-// PDMSSupportMicroServicesWithName returns true if the given version of PDMS supports microservices with name.
+// PDMSSupportMicroservicesWithName returns true if the given version of PDMS supports microservices with name.
// related https://github.com/tikv/pd/pull/8461.
-var pdMSSupportMicroServicesWithName, _ = cmpver.NewConstraint(cmpver.GreaterOrEqual, "v8.3.0")
+var pdMSSupportMicroservicesWithName, _ = cmpver.NewConstraint(cmpver.GreaterOrEqual, "v8.3.0")
diff --git a/pkg/manager/member/startscript/v2/pd_start_script_test.go b/pkg/manager/member/startscript/v2/pd_start_script_test.go
index 703cd994eda..4b61c941d50 100644
--- a/pkg/manager/member/startscript/v2/pd_start_script_test.go
+++ b/pkg/manager/member/startscript/v2/pd_start_script_test.go
@@ -922,19 +922,19 @@ func TestPDMSWithName(t *testing.T) {
for _, spec := range tc.Spec.PDMS {
spec.Image = "pingcap/pd:v8.2.0"
}
- check, err := pdMSSupportMicroServicesWithName.Check(tc.PDMSVersion("tso"))
+ check, err := pdMSSupportMicroservicesWithName.Check(tc.PDMSVersion("tso"))
re.Nil(err)
re.False(check)
for _, spec := range tc.Spec.PDMS {
spec.Image = "pingcap/pd:v8.3.0"
}
- check, err = pdMSSupportMicroServicesWithName.Check(tc.PDMSVersion("tso"))
+ check, err = pdMSSupportMicroservicesWithName.Check(tc.PDMSVersion("tso"))
re.Nil(err)
re.True(check)
for _, spec := range tc.Spec.PDMS {
spec.Image = "pingcap/pd:v9.1.0"
}
- check, err = pdMSSupportMicroServicesWithName.Check(tc.PDMSVersion("tso"))
+ check, err = pdMSSupportMicroservicesWithName.Check(tc.PDMSVersion("tso"))
re.Nil(err)
re.True(check)
}
diff --git a/pkg/manager/member/tikv_member_manager.go b/pkg/manager/member/tikv_member_manager.go
index 58e20aaafbd..61de2ea74b4 100644
--- a/pkg/manager/member/tikv_member_manager.go
+++ b/pkg/manager/member/tikv_member_manager.go
@@ -112,12 +112,12 @@ func (m *tikvMemberManager) Sync(tc *v1alpha1.TidbCluster) error {
if tc.Spec.PD != nil && !tc.PDIsAvailable() {
return controller.RequeueErrorf("TidbCluster: [%s/%s], waiting for PD cluster running", ns, tcName)
}
- // Check if all PD Micro Services are available
+ // Check if all PD microservices are available
if tc.Spec.PDMS != nil && (tc.Spec.PD != nil && tc.Spec.PD.Mode == "ms") {
for _, pdms := range tc.Spec.PDMS {
if cli := controller.GetPDMSClient(m.deps.PDControl, tc, pdms.Name); cli == nil {
return controller.RequeueErrorf("PDMS component %s for TidbCluster: [%s/%s], "+
- "waiting for PD micro service cluster running", pdms.Name, ns, tcName)
+ "waiting for PD microservice cluster running", pdms.Name, ns, tcName)
}
}
}
diff --git a/pkg/pdapi/pdapi.go b/pkg/pdapi/pdapi.go
index 4a633d6906d..25f24bed385 100644
--- a/pkg/pdapi/pdapi.go
+++ b/pkg/pdapi/pdapi.go
@@ -94,9 +94,9 @@ type PDClient interface {
GetAutoscalingPlans(strategy Strategy) ([]Plan, error)
// GetRecoveringMark return the pd recovering mark
GetRecoveringMark() (bool, error)
- // GetMSMembers returns all PDMS members service-addr from cluster by specific Micro Service
+ // GetMSMembers returns all PDMS members service-addr from cluster by specific microservice
GetMSMembers(service string) ([]string, error)
- // GetMSPrimary returns the primary PDMS member service-addr from cluster by specific Micro Service
+ // GetMSPrimary returns the primary PDMS member service-addr from cluster by specific microservice
GetMSPrimary(service string) (string, error)
}
@@ -116,8 +116,8 @@ var (
evictLeaderSchedulerConfigPrefix = "pd/api/v1/scheduler-config/evict-leader-scheduler/list"
autoscalingPrefix = "autoscaling"
recoveringMarkPrefix = "pd/api/v1/admin/cluster/markers/snapshot-recovering"
- // Micro Service
- MicroServicePrefix = "pd/api/v2/ms"
+ // microservice
+ MicroservicePrefix = "pd/api/v2/ms"
)
// pdClient is default implementation of PDClient
@@ -200,7 +200,7 @@ type MembersInfo struct {
EtcdLeader *pdpb.Member `json:"etcd_leader,omitempty"`
}
-// ServiceRegistryEntry is the registry entry of PD Micro Service
+// ServiceRegistryEntry is the registry entry of PD microservice
type ServiceRegistryEntry struct {
ServiceAddr string `json:"service-addr"`
Version string `json:"version"`
@@ -326,7 +326,7 @@ func (c *pdClient) GetMembers() (*MembersInfo, error) {
}
func (c *pdClient) GetMSMembers(service string) ([]string, error) {
- apiURL := fmt.Sprintf("%s/%s/members/%s", c.url, MicroServicePrefix, service)
+ apiURL := fmt.Sprintf("%s/%s/members/%s", c.url, MicroservicePrefix, service)
body, err := httputil.GetBodyOK(c.httpClient, apiURL)
if err != nil {
return nil, err
@@ -344,7 +344,7 @@ func (c *pdClient) GetMSMembers(service string) ([]string, error) {
}
func (c *pdClient) GetMSPrimary(service string) (string, error) {
- apiURL := fmt.Sprintf("%s/%s/primary/%s", c.url, MicroServicePrefix, service)
+ apiURL := fmt.Sprintf("%s/%s/primary/%s", c.url, MicroservicePrefix, service)
body, err := httputil.GetBodyOK(c.httpClient, apiURL)
if err != nil {
return "", err