Skip to content

Commit

Permalink
Merge branch 'master' into pr3236
Browse files Browse the repository at this point in the history
  • Loading branch information
shikanime authored Nov 14, 2024
2 parents 5eff77d + 470b53c commit 47c542e
Show file tree
Hide file tree
Showing 188 changed files with 8,911 additions and 3,344 deletions.
63 changes: 63 additions & 0 deletions .github/workflows/backport-pr.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
name: Link-Backport-PR-Issue

on:
pull_request:
types: [opened]
branches:
- master
- "v*"

jobs:
check-backport:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4

- name: Check if PR is a backport
run: |
if [[ "${{ github.event.pull_request.title }}" =~ "backport #" ]]; then
echo "BACKPORT=true" >> $GITHUB_ENV
else
echo "BACKPORT=false" >> $GITHUB_ENV
fi
- name: Extract backport branch and issue number
if: env.BACKPORT == 'true'
run: |
# Extract branch from the target branch of the PR
BRANCH=$(echo "${{ github.event.pull_request.base.ref }}")
BRANCH=${BRANCH%.x} # Remove the '.x' suffix
BRANCH=$(echo "${BRANCH}" | sed 's/\./\\./g') # Escape periods
echo "BRANCH=$BRANCH" >> $GITHUB_ENV
# Extract issue number from the PR description
ORIGINAL_ISSUE_NUMBER=$(echo "${{ github.event.pull_request.body }}" | grep -oE 'issues/[0-9]+' | cut -d'/' -f2)
echo "ORIGINAL_ISSUE_NUMBER=$ORIGINAL_ISSUE_NUMBER" >> $GITHUB_ENV
- name: Get the original issue
if: env.BACKPORT == 'true'
id: original-issue
uses: octokit/[email protected]
with:
route: GET /repos/longhorn/longhorn/issues/${{ env.ORIGINAL_ISSUE_NUMBER }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

- name: URL encode the original issue title
if: env.BACKPORT == 'true'
run: echo "ORIGINAL_ISSUE_TITLE=$(node -e 'console.log(encodeURIComponent("${{ fromJson(steps.original-issue.outputs.data).title }}"))')" >> $GITHUB_ENV

- name: Find corresponding backport issue number
if: env.BACKPORT == 'true'
run: |
BACKPORT_ISSUE_NUMBER=$(curl -s "https://api.github.com/search/issues?q=repo:longhorn/longhorn+is:open+is:issue+in:title+${{ env.BRANCH }}+${{ env.ORIGINAL_ISSUE_TITLE }}" | jq .items[0].number)
echo "BACKPORT_ISSUE_NUMBER=$BACKPORT_ISSUE_NUMBER" >> $GITHUB_ENV
- name: Link the PR with the corresponding backport issue
if: env.BACKPORT == 'true'
run: |
# Relate the pull request to the backport issue
gh issue comment --repo longhorn/longhorn ${{ env.BACKPORT_ISSUE_NUMBER }} --body "Related PR: ${{ github.event.pull_request.html_url }}"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
10 changes: 1 addition & 9 deletions app/daemon.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,15 +254,7 @@ func startManager(c *cli.Context) error {

server := api.NewServer(m, wsc)
router := http.Handler(api.NewRouter(server))
router = util.FilteredLoggingHandler(map[string]struct{}{
"/v1/apiversions": {},
"/v1/schemas": {},
"/v1/settings": {},
"/v1/volumes": {},
"/v1/nodes": {},
"/v1/engineimages": {},
"/v1/events": {},
}, os.Stdout, router)
router = util.FilteredLoggingHandler(os.Stdout, router)
router = handlers.ProxyHeaders(router)

listen := types.GetAPIServerAddressFromIP(currentIP)
Expand Down
35 changes: 23 additions & 12 deletions app/driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,6 @@ func DeployDriverCmd() cli.Command {
Name: FlagCSIAttacherImage,
Usage: "Specify CSI attacher image",
EnvVar: EnvCSIAttacherImage,
Value: csi.DefaultCSIAttacherImage,
},
cli.IntFlag{
Name: FlagCSIAttacherReplicaCount,
Expand All @@ -88,7 +87,6 @@ func DeployDriverCmd() cli.Command {
Name: FlagCSIProvisionerImage,
Usage: "Specify CSI provisioner image",
EnvVar: EnvCSIProvisionerImage,
Value: csi.DefaultCSIProvisionerImage,
},
cli.IntFlag{
Name: FlagCSIProvisionerReplicaCount,
Expand All @@ -100,7 +98,6 @@ func DeployDriverCmd() cli.Command {
Name: FlagCSIResizerImage,
Usage: "Specify CSI resizer image",
EnvVar: EnvCSIResizerImage,
Value: csi.DefaultCSIResizerImage,
},
cli.IntFlag{
Name: FlagCSIResizerReplicaCount,
Expand All @@ -112,7 +109,6 @@ func DeployDriverCmd() cli.Command {
Name: FlagCSISnapshotterImage,
Usage: "Specify CSI snapshotter image",
EnvVar: EnvCSISnapshotterImage,
Value: csi.DefaultCSISnapshotterImage,
},
cli.IntFlag{
Name: FlagCSISnapshotterReplicaCount,
Expand All @@ -124,36 +120,51 @@ func DeployDriverCmd() cli.Command {
Name: FlagCSINodeDriverRegistrarImage,
Usage: "Specify CSI node-driver-registrar image",
EnvVar: EnvCSINodeDriverRegistrarImage,
Value: csi.DefaultCSINodeDriverRegistrarImage,
},
cli.StringFlag{
Name: FlagCSILivenessProbeImage,
Usage: "Specify CSI liveness probe image",
EnvVar: EnvCSILivenessProbeImage,
Value: csi.DefaultCSILivenessProbeImage,
},
cli.StringFlag{
Name: FlagKubeConfig,
Usage: "Specify path to kube config (optional)",
},
},
Action: func(c *cli.Context) {
if err := validateFlags(c); err != nil {
logrus.Fatalf("Error validating flags: %v", err)
}

if err := deployDriver(c); err != nil {
logrus.Fatalf("Error deploying driver: %v", err)
}
},
}
}

func validateFlags(c *cli.Context) error {
for _, flag := range []string{
FlagManagerImage,
FlagManagerURL,
FlagCSIAttacherImage,
FlagCSIProvisionerImage,
FlagCSIResizerImage,
FlagCSISnapshotterImage,
FlagCSINodeDriverRegistrarImage,
FlagCSILivenessProbeImage,
} {
if c.String(flag) == "" {
return fmt.Errorf("%q cannot be empty", flag)
}
}

return nil
}

func deployDriver(c *cli.Context) error {
managerImage := c.String(FlagManagerImage)
if managerImage == "" {
return fmt.Errorf("require %v", FlagManagerImage)
}
managerURL := c.String(FlagManagerURL)
if managerURL == "" {
return fmt.Errorf("require %v", FlagManagerURL)
}

config, err := clientcmd.BuildConfigFromFlags("", c.String(FlagKubeConfig))
if err != nil {
Expand Down
6 changes: 5 additions & 1 deletion app/post_upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,11 @@ func postUpgrade(c *cli.Context) error {
if err != nil {
return errors.Wrap(err, "failed to create event broadcaster")
}
defer eventBroadcaster.Shutdown()
defer func() {
eventBroadcaster.Shutdown()
// Allow a little time for the event to flush, but not greatly delay response to the calling job.
time.Sleep(5 * time.Second)
}()

scheme := runtime.NewScheme()
if err := longhorn.SchemeBuilder.AddToScheme(scheme); err != nil {
Expand Down
8 changes: 7 additions & 1 deletion app/pre_upgrade.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
package app

import (
"time"

"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
Expand Down Expand Up @@ -61,7 +63,11 @@ func preUpgrade(c *cli.Context) error {
if err != nil {
return errors.Wrap(err, "failed to create event broadcaster")
}
defer eventBroadcaster.Shutdown()
defer func() {
eventBroadcaster.Shutdown()
// Allow a little time for the event to flush, but not greatly delay response to the calling job.
time.Sleep(5 * time.Second)
}()

scheme := runtime.NewScheme()
if err := longhorn.SchemeBuilder.AddToScheme(scheme); err != nil {
Expand Down
105 changes: 101 additions & 4 deletions controller/node_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"github.com/pkg/errors"
"github.com/rancher/lasso/pkg/log"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"

"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
Expand All @@ -25,6 +26,8 @@ import (
clientset "k8s.io/client-go/kubernetes"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"

iscsiutil "github.com/longhorn/go-iscsi-helper/util"

lhexec "github.com/longhorn/go-common-libs/exec"
lhio "github.com/longhorn/go-common-libs/io"
lhns "github.com/longhorn/go-common-libs/ns"
Expand All @@ -49,6 +52,7 @@ const (
unknownDiskID = "UNKNOWN_DISKID"

kernelConfigFilePathPrefix = "/host/boot/config-"
kernelConfigGzFilePath = "/proc/config.gz"

snapshotChangeEventQueueMax = 1048576
)
Expand Down Expand Up @@ -956,6 +960,9 @@ func (nc *NodeController) syncPackagesInstalled(kubeNode *corev1.Node, node *lon
pipeFlag := false

switch {
case strings.Contains(osImage, "talos"):
nc.syncPackagesInstalledTalosLinux(node, namespaces)
return
case strings.Contains(osImage, "ubuntu"):
fallthrough
case strings.Contains(osImage, "debian"):
Expand Down Expand Up @@ -1032,6 +1039,82 @@ func (nc *NodeController) syncPackagesInstalled(kubeNode *corev1.Node, node *lon
fmt.Sprintf("All required packages %v are installed on node %v", packages, node.Name))
}

func (nc *NodeController) syncPackagesInstalledTalosLinux(node *longhorn.Node, namespaces []lhtypes.Namespace) {
type validateCommand struct {
binary string
args []string
}

packagesIsInstalled := map[string]bool{}

// Helper function to validate packages within a namespace and update node
// status if there is an error.
validatePackages := func(process string, binaryToValidateCommand map[string]validateCommand) (ok bool) {
nsexec, err := lhns.NewNamespaceExecutor(process, lhtypes.HostProcDirectory, namespaces)
if err != nil {
node.Status.Conditions = types.SetCondition(
node.Status.Conditions, longhorn.NodeConditionTypeRequiredPackages, longhorn.ConditionStatusFalse,
string(longhorn.NodeConditionReasonNamespaceExecutorErr), fmt.Sprintf("Failed to get namespace executor: %v", err.Error()),
)
return false
}

for binary, command := range binaryToValidateCommand {
_, err := nsexec.Execute(nil, command.binary, command.args, lhtypes.ExecuteDefaultTimeout)
if err != nil {
nc.logger.WithError(err).Debugf("Package %v is not found in node %v", binary, node.Name)
packagesIsInstalled[binary] = false
} else {
packagesIsInstalled[binary] = true
}
}
return true
}

// The validation commands by process.
hostPackageToValidateCmd := map[string]validateCommand{
"cryptsetup": {binary: "cryptsetup", args: []string{"--version"}},
"dmsetup": {binary: "dmsetup", args: []string{"--version"}},
}
kubeletPackageToValidateCmd := map[string]validateCommand{
"nfs-common": {binary: "dpkg", args: []string{"-s", "nfs-common"}},
}
iscsiPackageToValidateCmd := map[string]validateCommand{
"iscsiadm": {binary: "iscsiadm", args: []string{"--version"}},
}

// Check each set of packagesl return immediately if there is an error.
if !validatePackages(lhtypes.ProcessNone, hostPackageToValidateCmd) ||
!validatePackages(lhns.GetDefaultProcessName(), kubeletPackageToValidateCmd) ||
!validatePackages(iscsiutil.ISCSIdProcess, iscsiPackageToValidateCmd) {
return
}

// Organize the installed and not installed packages.
installedPackages := []string{}
notInstalledPackages := []string{}
for binary, isInstalled := range packagesIsInstalled {
if isInstalled {
installedPackages = append(installedPackages, binary)
} else {
notInstalledPackages = append(notInstalledPackages, binary)
}
}

// Update node condition based on packages installed status.
if len(notInstalledPackages) > 0 {
node.Status.Conditions = types.SetCondition(
node.Status.Conditions, longhorn.NodeConditionTypeRequiredPackages, longhorn.ConditionStatusFalse,
string(longhorn.NodeConditionReasonPackagesNotInstalled), fmt.Sprintf("Missing packages: %v", notInstalledPackages),
)
} else {
node.Status.Conditions = types.SetCondition(
node.Status.Conditions, longhorn.NodeConditionTypeRequiredPackages, longhorn.ConditionStatusTrue,
"", fmt.Sprintf("All required packages %v are installed on node %v", installedPackages, node.Name),
)
}
}

func (nc *NodeController) syncMultipathd(node *longhorn.Node, namespaces []lhtypes.Namespace) {
nsexec, err := lhns.NewNamespaceExecutor(lhtypes.ProcessNone, lhtypes.HostProcDirectory, namespaces)
if err != nil {
Expand Down Expand Up @@ -1066,7 +1149,7 @@ func (nc *NodeController) checkKernelModulesLoaded(kubeNode *corev1.Node, node *
return
}

notLoadedModules, err := checkModulesLoadedByConfigFile(nc.logger, notFoundModulesUsingkmod, kubeNode.Status.NodeInfo.KernelVersion)
notLoadedModules, err := checkModulesLoadedByConfigFile(nc.logger, notFoundModulesUsingkmod, kubeNode.Status.NodeInfo.KernelVersion, namespaces)
if err != nil {
node.Status.Conditions = types.SetCondition(node.Status.Conditions, longhorn.NodeConditionTypeKernelModulesLoaded, longhorn.ConditionStatusFalse,
string(longhorn.NodeConditionReasonCheckKernelConfigFailed),
Expand Down Expand Up @@ -1101,12 +1184,26 @@ func checkModulesLoadedUsingkmod(modules map[string]string) (map[string]string,
return notFoundModules, nil
}

func checkModulesLoadedByConfigFile(log *logrus.Entry, modules map[string]string, kernelVersion string) ([]string, error) {
func checkModulesLoadedByConfigFile(log *logrus.Entry, modules map[string]string, kernelVersion string, namespaces []lhtypes.Namespace) ([]string, error) {
kernelConfigPath := kernelConfigFilePathPrefix + kernelVersion
kernelConfigContent, err := lhio.ReadFileContent(kernelConfigPath)
if err != nil {
return nil, err
if !errors.Is(err, unix.ENOENT) {
return nil, err
}

// If the kernel config file is not found, try to get it from the host proc directory
nsexec, err := lhns.NewNamespaceExecutor(lhns.GetDefaultProcessName(), lhtypes.HostProcDirectory, namespaces)
if err != nil {
return nil, err
}

kernelConfigContent, err = nsexec.Execute(nil, "zcat", []string{kernelConfigGzFilePath}, lhtypes.ExecuteDefaultTimeout)
if err != nil {
return nil, err
}
}

kernelConfigMap := getKernelModuleConfigMap(kernelConfigContent)

notLoadedModules := []string{}
Expand Down Expand Up @@ -1174,7 +1271,7 @@ func getModulesConfigsList(modulesMap map[string]string, needModules bool) []str
}

func (nc *NodeController) syncNFSClientVersion(kubeNode *corev1.Node, node *longhorn.Node, namespaces []lhtypes.Namespace) {
notLoadedModules, err := checkModulesLoadedByConfigFile(nc.logger, nfsClientVersions, kubeNode.Status.NodeInfo.KernelVersion)
notLoadedModules, err := checkModulesLoadedByConfigFile(nc.logger, nfsClientVersions, kubeNode.Status.NodeInfo.KernelVersion, namespaces)
if err != nil {
node.Status.Conditions = types.SetCondition(node.Status.Conditions, longhorn.NodeConditionTypeNFSClientInstalled, longhorn.ConditionStatusFalse,
string(longhorn.NodeConditionReasonCheckKernelConfigFailed),
Expand Down
Loading

0 comments on commit 47c542e

Please sign in to comment.