diff --git a/_run/kube/provider.yaml b/_run/kube/provider.yaml index 57371ebfe..1052c5283 100644 --- a/_run/kube/provider.yaml +++ b/_run/kube/provider.yaml @@ -11,3 +11,8 @@ attributes: value: true - key: capabilities/storage/2/class value: beta2 + - key: capabilities/storage/3/class + value: ram + - key: capabilities/storage/3/persistent + value: false + diff --git a/cluster/kube/builder/deployment.go b/cluster/kube/builder/deployment.go index f337a36b1..5769d76a2 100644 --- a/cluster/kube/builder/deployment.go +++ b/cluster/kube/builder/deployment.go @@ -54,6 +54,7 @@ func (b *deployment) Create() (*appsv1.Deployment, error) { // nolint:golint,unp AutomountServiceAccountToken: &falseValue, Containers: []corev1.Container{b.container()}, ImagePullSecrets: b.imagePullSecrets(), + Volumes: b.volumes(), }, }, }, diff --git a/cluster/kube/builder/statefulset.go b/cluster/kube/builder/statefulset.go index c5fcda50f..0b43be118 100644 --- a/cluster/kube/builder/statefulset.go +++ b/cluster/kube/builder/statefulset.go @@ -54,6 +54,7 @@ func (b *statefulSet) Create() (*appsv1.StatefulSet, error) { // nolint:golint,u AutomountServiceAccountToken: &falseValue, Containers: []corev1.Container{b.container()}, ImagePullSecrets: b.imagePullSecrets(), + Volumes: b.volumes(), }, }, VolumeClaimTemplates: b.persistentVolumeClaims(), diff --git a/cluster/kube/builder/workload.go b/cluster/kube/builder/workload.go index 48683f286..ad3d0879c 100644 --- a/cluster/kube/builder/workload.go +++ b/cluster/kube/builder/workload.go @@ -154,6 +154,42 @@ func (b *Workload) container() corev1.Container { return kcontainer } +// Return RAM volumes +func (b *Workload) volumes() []corev1.Volume { + var volumes []corev1.Volume // nolint:prealloc + + service := &b.deployment.ManifestGroup().Services[b.serviceIdx] + + for _, storage := range service.Resources.Storage { + + // Only RAM volumes + sclass, ok := storage.Attributes.Find(sdl.StorageAttributeClass).AsString() + if !ok || sclass != sdl.StorageClassRAM { + continue + } + + // No persistent volumes + persistent, ok := storage.Attributes.Find(sdl.StorageAttributePersistent).AsBool() + if !ok || persistent { + continue + } + + size := resource.NewQuantity(storage.Quantity.Val.Int64(), resource.DecimalSI).DeepCopy() + + volumes = append(volumes, corev1.Volume{ + Name: fmt.Sprintf("%s-%s", service.Name, storage.Name), + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + SizeLimit: &size, + }, + }, + }) + } + + return volumes +} + func (b *Workload) persistentVolumeClaims() []corev1.PersistentVolumeClaim { var pvcs []corev1.PersistentVolumeClaim // nolint:prealloc diff --git a/go.mod b/go.mod index 00ca87ded..409bdb218 100644 --- a/go.mod +++ b/go.mod @@ -20,6 +20,7 @@ require ( github.com/gorilla/context v1.1.1 github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.5.0 + github.com/gyuho/linux-inspect v0.0.0-20180929231013-a492bfc5f12a github.com/jaypipes/ghw v0.12.0 github.com/moby/term v0.5.0 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index 70d7ab7bc..0af2fda9b 100644 --- a/go.sum +++ b/go.sum @@ -953,6 +953,8 @@ github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc= github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o= +github.com/gyuho/linux-inspect v0.0.0-20180929231013-a492bfc5f12a h1:rhA92KsUcMmncbNtK/0qfF8oLhVbohNkPrfPuNO2og0= +github.com/gyuho/linux-inspect v0.0.0-20180929231013-a492bfc5f12a/go.mod h1:u2l1k3IAT/RZKOAwDCcFBlG2CLl9+JE/6hWy6KBLJa0= github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/consul-template v0.25.0/go.mod h1:/vUsrJvDuuQHcxEw0zik+YXTS7ZKWZjQeaQhshBmfH0= diff --git a/integration/deployment_update_test.go b/integration/deployment_update_test.go index 071a66137..c4fb21db3 100644 --- a/integration/deployment_update_test.go +++ b/integration/deployment_update_test.go @@ -257,5 +257,4 @@ func (s *E2EDeploymentUpdate) TestE2ELeaseShell() { lID, 99, false, false, "notaservice", "/bin/echo", "/foo") require.Error(s.T(), err) require.Regexp(s.T(), ".*no such service exists with that name.*", err.Error()) - } diff --git a/integration/e2e_test.go b/integration/e2e_test.go index f345087dc..3a545086f 100644 --- a/integration/e2e_test.go +++ b/integration/e2e_test.go @@ -579,6 +579,7 @@ func TestIntegrationTestSuite(t *testing.T) { suite.Run(t, new(E2EPersistentStorageDefault)) suite.Run(t, new(E2EPersistentStorageBeta2)) suite.Run(t, new(E2EPersistentStorageDeploymentUpdate)) + // suite.Run(t, new(E2EStorageClassRam)) suite.Run(t, new(E2EMigrateHostname)) suite.Run(t, new(E2EJWTServer)) suite.Run(t, new(E2ECustomCurrency)) diff --git a/integration/storageclassram_test.go b/integration/storageclassram_test.go new file mode 100644 index 000000000..f04d8f37b --- /dev/null +++ b/integration/storageclassram_test.go @@ -0,0 +1,128 @@ +//go:build e2e + +package integration + +import ( + "context" + "fmt" + "path/filepath" + "time" + + "github.com/cosmos/cosmos-sdk/client/flags" + sdktest "github.com/cosmos/cosmos-sdk/testutil" + "github.com/gyuho/linux-inspect/df" + + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" + mtypes "github.com/akash-network/akash-api/go/node/market/v1beta4" + clitestutil "github.com/akash-network/node/testutil/cli" + deploycli "github.com/akash-network/node/x/deployment/client/cli" + mcli "github.com/akash-network/node/x/market/client/cli" + + ptestutil "github.com/akash-network/provider/testutil/provider" +) + +type E2EStorageClassRam struct { + IntegrationTestSuite +} + +func (s *E2EStorageClassRam) TestRAM() { + deploymentPath, err := filepath.Abs("../testdata/deployment/deployment-v2-storage-ram.yaml") + s.Require().NoError(err) + + deploymentID := dtypes.DeploymentID{ + Owner: s.keyTenant.GetAddress().String(), + DSeq: uint64(100), + } + + // Create Deployments + res, err := deploycli.TxCreateDeploymentExec( + s.validator.ClientCtx, + s.keyTenant.GetAddress(), + deploymentPath, + cliGlobalFlags(fmt.Sprintf("--dseq=%v", deploymentID.DSeq))..., + ) + s.Require().NoError(err) + s.Require().NoError(s.waitForBlocksCommitted(7)) + clitestutil.ValidateTxSuccessful(s.T(), s.validator.ClientCtx, res.Bytes()) + + bidID := mtypes.MakeBidID( + mtypes.MakeOrderID(dtypes.MakeGroupID(deploymentID, 1), 1), + s.keyProvider.GetAddress(), + ) + + _, err = mcli.QueryBidExec(s.validator.ClientCtx, bidID) + s.Require().NoError(err) + + _, err = mcli.TxCreateLeaseExec( + s.validator.ClientCtx, + bidID, + s.keyTenant.GetAddress(), + cliGlobalFlags()..., + ) + s.Require().NoError(err) + s.Require().NoError(s.waitForBlocksCommitted(2)) + clitestutil.ValidateTxSuccessful(s.T(), s.validator.ClientCtx, res.Bytes()) + + lid := bidID.LeaseID() + + // Send Manifest to Provider ---------------------------------------------- + _, err = ptestutil.TestSendManifest( + s.validator.ClientCtx.WithOutputFormat("json"), + lid.BidID(), + deploymentPath, + fmt.Sprintf("--%s=%s", flags.FlagFrom, s.keyTenant.GetAddress().String()), + fmt.Sprintf("--%s=%s", flags.FlagHome, s.validator.ClientCtx.HomeDir), + ) + s.Require().NoError(err) + s.Require().NoError(s.waitForBlocksCommitted(2)) + + extraArgs := []string{ + fmt.Sprintf("--%s=%s", flags.FlagFrom, s.keyTenant.GetAddress().String()), + fmt.Sprintf("--%s=%s", flags.FlagHome, s.validator.ClientCtx.HomeDir), + } + + logged := make(map[string]struct{}) + + cmd := `df --all --sync --block-size=1024 --output=source,target,fstype,file,itotal,iavail,iused,ipcent,size,avail,used,pcent` + var out sdktest.BufferWriter + leaseShellCtx, cancel := context.WithTimeout(s.ctx, 2*time.Minute) + defer cancel() + + // Loop until we get a shell or the context times out + for { + select { + case <-leaseShellCtx.Done(): + // s.T().Fatalf("context is done while trying to run lease-shell: %v", leaseShellCtx.Err()) + return + default: + } + out, err = ptestutil.TestLeaseShell(leaseShellCtx, s.validator.ClientCtx.WithOutputFormat("json"), extraArgs, lid, 0, false, false, "web", cmd) + if err != nil { + _, hasBeenLogged := logged[err.Error()] + if !hasBeenLogged { + // Don't spam an error message in a test, that is very annoying + s.T().Logf("encountered %v, waiting before next attempt", err) + logged[err.Error()] = struct{}{} + } + time.Sleep(2000 * time.Millisecond) + continue // Try again until the context times out + } + s.Require().NotNil(s.T(), out) + break + } + + dfRes, err := df.Parse(out.String()) + s.Require().NoError(err) + + var found *df.Row + + for i := range dfRes { + if dfRes[i].MountedOn == "/dev/shm" { + found = &dfRes[i] + break + } + } + + s.Require().NotNil(found) + s.Require().Equal(int64(65536), found.TotalBlocks) +} diff --git a/integration/test_helpers.go b/integration/test_helpers.go index e794b43e0..2ded90c48 100644 --- a/integration/test_helpers.go +++ b/integration/test_helpers.go @@ -30,6 +30,10 @@ attributes: value: true - key: capabilities/storage/2/class value: beta2 + - key: capabilities/storage/3/persistent + value: false + - key: capabilities/storage/3/class + value: ram ` ) diff --git a/script/usd_pricing_oracle.sh b/script/usd_pricing_oracle.sh index 71da020fa..291733b77 100755 --- a/script/usd_pricing_oracle.sh +++ b/script/usd_pricing_oracle.sh @@ -50,6 +50,7 @@ STORAGE_USD_SCALE[default]=0.02 STORAGE_USD_SCALE[beta1]=0.02 STORAGE_USD_SCALE[beta2]=0.03 STORAGE_USD_SCALE[beta3]=0.04 +STORAGE_USD_SCALE[ram]=0.02 # ram storage class is for tmp disks like /dev/shm, making assumption for now pricing is same of for regular RAM # used later for validation MAX_INT64=9223372036854775807 diff --git a/testdata/deployment/deployment-v2-storage-ram.yaml b/testdata/deployment/deployment-v2-storage-ram.yaml new file mode 100644 index 000000000..01c5a5b8e --- /dev/null +++ b/testdata/deployment/deployment-v2-storage-ram.yaml @@ -0,0 +1,41 @@ +--- +version: "2.0" +services: + web: + image: ghcr.io/ovrclk/e2e-test + expose: + - port: 8080 + as: 80 + to: + - global: true + accept: + - webdistest.localhost +# params: +# storage: +# shm: +# mount: /dev/shm +profiles: + compute: + web: + resources: + cpu: + units: "0.01" + memory: + size: "128Mi" + storage: + - size: "512Mi" +# - name: shm +# size: "256Mi" +# attributes: +# class: ram + placement: + global: + pricing: + web: + denom: uakt + amount: 10 +deployment: + web: + global: + profile: web + count: 1