diff --git a/client/client.go b/client/client.go index 5308b97b..f103d802 100644 --- a/client/client.go +++ b/client/client.go @@ -807,6 +807,8 @@ type Destination interface { // - The target does not exist in any targets // - Metadata cannot be generated for the downloaded data // - Generated metadata does not match local metadata for the given file +// - Size of the download does not match if the reported size is known and +// incorrect func (c *Client) Download(name string, dest Destination) (err error) { // delete dest if there is an error defer func() { diff --git a/client/client_test.go b/client/client_test.go index c583e2f1..327b1371 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -31,15 +31,19 @@ import ( func Test(t *testing.T) { TestingT(t) } type ClientSuite struct { - store tuf.LocalStore - repo *tuf.Repo - local LocalStore - remote *fakeRemoteStore - expiredTime time.Time - keyIDs map[string][]string + store tuf.LocalStore + repo *tuf.Repo + local LocalStore + remote RemoteStore + expiredTime time.Time + keyIDs map[string][]string + useFileStore bool + // Only used with FileStore + tmpDir string } -var _ = Suite(&ClientSuite{}) +var _ = Suite(&ClientSuite{useFileStore: false}) +var _ = Suite(&ClientSuite{useFileStore: true}) func newFakeRemoteStore() *fakeRemoteStore { return &fakeRemoteStore{ @@ -69,6 +73,66 @@ func (f *fakeRemoteStore) get(name string, store map[string]*fakeFile) (io.ReadC return file, file.size, nil } +// These are helper methods for manipulating the internals of the Stores +// because the set/delete methods are not part of the Interface, we need to +// switch on the underlying implementation. +// Also readMeta method is convenience for ease of testing. +func (s *ClientSuite) setRemoteMeta(path string, data []byte) error { + switch impl := s.remote.(type) { + case *fakeRemoteStore: + impl.meta[path] = newFakeFile(data) + return nil + case *FileRemoteStore: + return impl.addMeta(path, data) + default: + return fmt.Errorf("non-supoprted RemoteStore, got %+v", impl) + } +} + +func (s *ClientSuite) setRemoteTarget(path string, data []byte) error { + switch impl := s.remote.(type) { + case *fakeRemoteStore: + impl.targets[path] = newFakeFile(data) + return nil + case *FileRemoteStore: + return impl.addTarget(path, data) + default: + return fmt.Errorf("non-supoprted RemoteStore, got %+v", impl) + } +} + +func (s *ClientSuite) deleteMeta(path string) error { + switch impl := s.remote.(type) { + case *fakeRemoteStore: + delete(impl.meta, path) + return nil + case *FileRemoteStore: + return impl.deleteMeta(path) + default: + return fmt.Errorf("non-supported RemoteStore, got %+v", impl) + } +} + +func (s *ClientSuite) deleteTarget(path string) error { + switch impl := s.remote.(type) { + case *fakeRemoteStore: + delete(impl.targets, path) + return nil + case *FileRemoteStore: + return impl.deleteTarget(path) + default: + return fmt.Errorf("non-supported RemoteStore, got %+v", impl) + } +} + +func (s *ClientSuite) readMeta(name string) ([]byte, error) { + stream, _, err := s.remote.GetMeta(name) + if err != nil { + return nil, err + } + return io.ReadAll(stream) +} + func newFakeFile(b []byte) *fakeFile { return &fakeFile{buf: bytes.NewReader(b), size: int64(len(b))} } @@ -118,15 +182,28 @@ func (s *ClientSuite) SetUpTest(c *C) { c.Assert(s.repo.Commit(), IsNil) // create a remote store containing valid repo files - s.remote = newFakeRemoteStore() + if s.useFileStore { + s.remote, s.tmpDir, err = newTestFileStoreFS() + if err != nil { + c.Fatalf("failed to create new FileStore: %v", err) + } + } else { + s.remote = newFakeRemoteStore() + } s.syncRemote(c) for path, data := range targetFiles { - s.remote.targets[path] = newFakeFile(data) + s.setRemoteTarget(path, data) } s.expiredTime = time.Now().Add(time.Hour) } +func (s *ClientSuite) TearDownTest(c *C) { + if s.tmpDir != "" { + rmrf(s.tmpDir, c.Logf) + } +} + func (s *ClientSuite) genKey(c *C, role string) []string { ids, err := s.repo.GenKey(role) c.Assert(err, IsNil) @@ -163,7 +240,9 @@ func (s *ClientSuite) syncRemote(c *C) { meta, err := s.store.GetMeta() c.Assert(err, IsNil) for name, data := range meta { - s.remote.meta[name] = newFakeFile(data) + if err := s.setRemoteMeta(name, data); err != nil { + panic(fmt.Sprintf("setMetadata failed: %v", err)) + } } } @@ -252,7 +331,7 @@ func (s *ClientSuite) TestInitAllowsExpired(c *C) { c.Assert(s.repo.Commit(), IsNil) s.syncRemote(c) client := NewClient(MemoryLocalStore(), s.remote) - bytes, err := io.ReadAll(s.remote.meta["root.json"]) + bytes, err := s.readMeta("root.json") c.Assert(err, IsNil) s.withMetaExpired(func() { c.Assert(client.Init(bytes), IsNil) @@ -261,7 +340,7 @@ func (s *ClientSuite) TestInitAllowsExpired(c *C) { func (s *ClientSuite) TestInit(c *C) { client := NewClient(MemoryLocalStore(), s.remote) - bytes, err := io.ReadAll(s.remote.meta["root.json"]) + bytes, err := s.readMeta("root.json") c.Assert(err, IsNil) dataSigned := &data.Signed{} c.Assert(json.Unmarshal(bytes, dataSigned), IsNil) @@ -299,11 +378,11 @@ func (s *ClientSuite) TestFirstUpdate(c *C) { func (s *ClientSuite) TestMissingRemoteMetadata(c *C) { client := s.newClient(c) - delete(s.remote.meta, "targets.json") + s.deleteMeta("targets.json") _, err := client.Update() c.Assert(err, Equals, ErrMissingRemoteMetadata{"targets.json"}) - delete(s.remote.meta, "timestamp.json") + s.deleteMeta("timestamp.json") _, err = client.Update() c.Assert(err, Equals, ErrMissingRemoteMetadata{"timestamp.json"}) } @@ -774,7 +853,7 @@ func (s *ClientSuite) TestLocalExpired(c *C) { } func (s *ClientSuite) TestTimestampTooLarge(c *C) { - s.remote.meta["timestamp.json"] = newFakeFile(make([]byte, defaultTimestampDownloadLimit+1)) + s.setRemoteMeta("timestamp.json", make([]byte, defaultTimestampDownloadLimit+1)) _, err := s.newClient(c).Update() c.Assert(err, Equals, ErrMetaTooLarge{"timestamp.json", defaultTimestampDownloadLimit + 1, defaultTimestampDownloadLimit}) } @@ -901,8 +980,8 @@ func (s *ClientSuite) TestUpdateMixAndMatchAttack(c *C) { client := s.updatedClient(c) // grab the remote targets.json - oldTargets, ok := s.remote.meta["targets.json"] - if !ok { + oldTargets, err := s.readMeta("targets.json") + if err != nil { c.Fatal("missing remote targets.json") } @@ -912,22 +991,22 @@ func (s *ClientSuite) TestUpdateMixAndMatchAttack(c *C) { c.Assert(s.repo.Timestamp(), IsNil) c.Assert(s.repo.Commit(), IsNil) s.syncRemote(c) - newTargets, ok := s.remote.meta["targets.json"] - if !ok { + newTargets, err := s.readMeta("targets.json") + if err != nil { c.Fatal("missing remote targets.json") } - s.remote.meta["targets.json"] = oldTargets + s.setRemoteMeta("targets.json", oldTargets) // check update returns ErrWrongSize for targets.json - _, err := client.Update() - c.Assert(err, DeepEquals, ErrWrongSize{"targets.json", oldTargets.size, newTargets.size}) + _, err = client.Update() + c.Assert(err, DeepEquals, ErrWrongSize{"targets.json", int64(len(oldTargets)), int64(len(newTargets))}) // do the same but keep the size the same c.Assert(s.repo.RemoveTargetWithExpires("foo.txt", expires), IsNil) c.Assert(s.repo.Snapshot(), IsNil) c.Assert(s.repo.Timestamp(), IsNil) s.syncRemote(c) - s.remote.meta["targets.json"] = oldTargets + s.setRemoteMeta("targets.json", oldTargets) // check update returns ErrWrongHash _, err = client.Update() @@ -938,8 +1017,8 @@ func (s *ClientSuite) TestUpdateReplayAttack(c *C) { client := s.updatedClient(c) // grab the remote timestamp.json - oldTimestamp, ok := s.remote.meta["timestamp.json"] - if !ok { + oldTimestamp, err := s.readMeta("timestamp.json") + if err != nil { c.Fatal("missing remote timestamp.json") } @@ -948,12 +1027,12 @@ func (s *ClientSuite) TestUpdateReplayAttack(c *C) { c.Assert(version > 0, Equals, true) c.Assert(s.repo.Timestamp(), IsNil) s.syncRemote(c) - _, err := client.Update() + _, err = client.Update() c.Assert(err, IsNil) c.Assert(client.timestampVer > version, Equals, true) // replace remote timestamp.json with the old one - s.remote.meta["timestamp.json"] = oldTimestamp + s.setRemoteMeta("timestamp.json", oldTimestamp) // check update returns ErrLowVersion _, err = client.Update() @@ -970,8 +1049,8 @@ func (s *ClientSuite) TestUpdateForkTimestamp(c *C) { client := s.updatedClient(c) // grab the remote timestamp.json - oldTimestamp, ok := s.remote.meta["timestamp.json"] - if !ok { + oldTimestamp, err := s.readMeta("timestamp.json") + if err != nil { c.Fatal("missing remote timestamp.json") } @@ -980,13 +1059,13 @@ func (s *ClientSuite) TestUpdateForkTimestamp(c *C) { c.Assert(version > 0, Equals, true) c.Assert(s.repo.Timestamp(), IsNil) s.syncRemote(c) - _, err := client.Update() + _, err = client.Update() c.Assert(err, IsNil) newVersion := client.timestampVer c.Assert(newVersion > version, Equals, true) // generate a new, different timestamp with the *same version* - s.remote.meta["timestamp.json"] = oldTimestamp + s.setRemoteMeta("timestamp.json", oldTimestamp) c.Assert(s.repo.Timestamp(), IsNil) c.Assert(client.timestampVer, Equals, newVersion) // double-check: same version? s.syncRemote(c) @@ -1098,7 +1177,7 @@ func (s *ClientSuite) TestDownloadUnknownTarget(c *C) { func (s *ClientSuite) TestDownloadNoExist(c *C) { client := s.updatedClient(c) - delete(s.remote.targets, "foo.txt") + s.deleteTarget("foo.txt") var dest testDestination c.Assert(client.Download("foo.txt", &dest), Equals, ErrNotFound{"foo.txt"}) c.Assert(dest.deleted, Equals, true) @@ -1117,29 +1196,24 @@ func (s *ClientSuite) TestDownloadOK(c *C) { func (s *ClientSuite) TestDownloadWrongSize(c *C) { client := s.updatedClient(c) - remoteFile := &fakeFile{buf: bytes.NewReader([]byte("wrong-size")), size: 10} - s.remote.targets["foo.txt"] = remoteFile + // Update with a file that's incorrect size. + s.setRemoteTarget("foo.txt", []byte("wrong-size")) var dest testDestination c.Assert(client.Download("foo.txt", &dest), DeepEquals, ErrWrongSize{"foo.txt", 10, 3}) - c.Assert(remoteFile.bytesRead, Equals, 0) c.Assert(dest.deleted, Equals, true) } func (s *ClientSuite) TestDownloadTargetTooLong(c *C) { client := s.updatedClient(c) - remoteFile := s.remote.targets["foo.txt"] - remoteFile.buf = bytes.NewReader([]byte("foo-ooo")) + s.setRemoteTarget("foo.txt", []byte("foo-ooo")) var dest testDestination - c.Assert(client.Download("foo.txt", &dest), IsNil) - c.Assert(remoteFile.bytesRead, Equals, 3) - c.Assert(dest.deleted, Equals, false) - c.Assert(dest.String(), Equals, "foo") + c.Assert(client.Download("foo.txt", &dest), DeepEquals, ErrWrongSize{"foo.txt", 7, 3}) + c.Assert(dest.deleted, Equals, true) } func (s *ClientSuite) TestDownloadTargetTooShort(c *C) { client := s.updatedClient(c) - remoteFile := s.remote.targets["foo.txt"] - remoteFile.buf = bytes.NewReader([]byte("fo")) + s.setRemoteTarget("foo.txt", []byte("fo")) var dest testDestination c.Assert(client.Download("foo.txt", &dest), DeepEquals, ErrWrongSize{"foo.txt", 2, 3}) c.Assert(dest.deleted, Equals, true) @@ -1147,8 +1221,7 @@ func (s *ClientSuite) TestDownloadTargetTooShort(c *C) { func (s *ClientSuite) TestDownloadTargetCorruptData(c *C) { client := s.updatedClient(c) - remoteFile := s.remote.targets["foo.txt"] - remoteFile.buf = bytes.NewReader([]byte("corrupt")) + s.setRemoteTarget("foo.txt", []byte("ooo")) var dest testDestination assertWrongHash(c, client.Download("foo.txt", &dest)) c.Assert(dest.deleted, Equals, true) diff --git a/client/file_store.go b/client/file_store.go new file mode 100644 index 00000000..520bbe73 --- /dev/null +++ b/client/file_store.go @@ -0,0 +1,90 @@ +package client + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/fs" +) + +// FileRemoteStore provides a RemoteStore interface compatible +// implementation that can be used where the RemoteStore is backed by a +// fs.FS. This is useful for example in air-gapped environments where there's no +// possibility to make outbound network connections. +// By having this be a fs.FS instead of directories allows the repository to +// be backed by something that's not persisted to disk. +func NewFileRemoteStore(fsys fs.FS, targetDir string) (*FileRemoteStore, error) { + if fsys == nil { + return nil, errors.New("nil fs.FS") + } + t := targetDir + if t == "" { + t = "targets" + } + // Make sure directory exists + d, err := fsys.Open(t) + if err != nil { + return nil, fmt.Errorf("failed to open targets directory %s: %w", t, err) + } + fi, err := d.Stat() + if err != nil { + return nil, fmt.Errorf("failed to stat targets directory %s: %w", t, err) + } + if !fi.IsDir() { + return nil, fmt.Errorf("targets directory not a directory %s", t) + } + + fsysT, err := fs.Sub(fsys, t) + if err != nil { + return nil, fmt.Errorf("failed to open targets directory %s: %w", t, err) + } + return &FileRemoteStore{fsys: fsys, targetDir: fsysT}, nil +} + +type FileRemoteStore struct { + // Meta directory fs + fsys fs.FS + // Target directory fs. + targetDir fs.FS + // In order to be able to make write operations (create, delete) we can't + // use fs.FS for it (it's read only), so we have to know the underlying + // directory that add/delete test methods can use. This is only necessary + // for testing purposes. + testDir string +} + +func (f *FileRemoteStore) GetMeta(name string) (io.ReadCloser, int64, error) { + rc, b, err := f.get(f.fsys, name) + return handleErrors(name, rc, b, err) +} + +func (f *FileRemoteStore) GetTarget(name string) (io.ReadCloser, int64, error) { + rc, b, err := f.get(f.targetDir, name) + return handleErrors(name, rc, b, err) +} + +func (f *FileRemoteStore) get(fsys fs.FS, s string) (io.ReadCloser, int64, error) { + if !fs.ValidPath(s) { + return nil, 0, fmt.Errorf("invalid path %s", s) + } + + b, err := fs.ReadFile(fsys, s) + if err != nil { + return nil, -1, err + } + return io.NopCloser(bytes.NewReader(b)), int64(len(b)), nil +} + +// handleErrors converts NotFound errors to something that TUF knows how to +// handle properly. For example, when looking for n+1 root files, this is a +// signal that it will stop looking. +func handleErrors(name string, rc io.ReadCloser, b int64, err error) (io.ReadCloser, int64, error) { + if err == nil { + return rc, b, err + } + if errors.Is(err, fs.ErrNotExist) { + return rc, b, ErrNotFound{name} + } + return rc, b, err +} diff --git a/client/file_store_test.go b/client/file_store_test.go new file mode 100644 index 00000000..95e02e38 --- /dev/null +++ b/client/file_store_test.go @@ -0,0 +1,197 @@ +package client + +import ( + "bytes" + "io" + "io/fs" + "os" + "path/filepath" + "runtime" + "strings" + "testing" +) + +const targetsDir = "targets" + +func TestCreates(t *testing.T) { + runningWindows := false + if runtime.GOOS == "windows" { + runningWindows = true + } + tmpDir := t.TempDir() + defer os.RemoveAll(tmpDir) + dir := filepath.Join(tmpDir, "repository") + os.Mkdir(dir, os.ModePerm) + os.Mkdir(filepath.Join(dir, "targets"), os.ModePerm) + if !runningWindows { + targetDirThatIsFile := filepath.Join(dir, "targets-that-isfile") + f, err := os.Create(targetDirThatIsFile) + if err != nil { + t.Fatalf("failed to create file: %s: %v", targetDirThatIsFile, err) + } + defer f.Close() + } + t.Cleanup(func() { rmrf(dir, t.Logf) }) + t.Cleanup(func() { rmrf(tmpDir, t.Logf) }) + + tests := []struct { + name string + fsys fs.FS + td string + wantErr string + doNotRunOnWindows bool + }{{ + name: "nil, error", + wantErr: "nil fs.FS", + }, { + name: "missing targets directory", + fsys: os.DirFS(dir), + td: "targets-not-there", + wantErr: "failed to open targets directory targets-not-there", + }, { + name: "targets directory is not a file", + fsys: os.DirFS(dir), + td: "targets-that-isfile", + wantErr: "targets directory not a directory targets-that-isfile", + doNotRunOnWindows: true, + }, { + name: "works, explicit targets", + fsys: os.DirFS(dir), + td: "targets", + }, { + name: "works, explicit targets", + fsys: os.DirFS(dir), + td: "targets", + }} + + for _, tc := range tests { + if tc.doNotRunOnWindows { + t.Skip("Can't figure out how to make this work on windows") + } + _, err := NewFileRemoteStore(tc.fsys, tc.td) + if tc.wantErr != "" && err == nil { + t.Errorf("%q wanted error %s, got none", tc.name, tc.wantErr) + } else if tc.wantErr == "" && err != nil { + t.Errorf("%q did not want error, got: %v", tc.name, err) + } else if err != nil && !strings.Contains(err.Error(), tc.wantErr) { + t.Errorf("%q wanted error %s but got: %s", tc.name, tc.wantErr, err) + } + } +} + +func TestBasicOps(t *testing.T) { + metas := map[string][]byte{ + "root.json": []byte("root"), + "snapshot.json": []byte("snapshot"), + "timestamp": []byte("timestamp"), + } + + fsys, dir, err := newTestFileStoreFS() + if err != nil { + t.Fatalf("Failed to create test FileStore") + } + t.Cleanup(func() { rmrf(dir, t.Logf) }) + defer os.RemoveAll(dir) + + // Add targets and metas and check them. + for k, v := range targetFiles { + if err := fsys.addTarget(k, v); err != nil { + t.Errorf("failed to add target %s: %v", k, err) + } + rc, size, err := fsys.GetTarget(k) + if err != nil { + t.Errorf("failed to GetTarget %s: %v", k, err) + } + if size != int64(len(v)) { + t.Errorf("unexpected size returned for GetTarget: %s want %d got %d", k, len(v), size) + } + got, err := io.ReadAll(rc) + if err != nil { + t.Errorf("failed to ReadAll returned ReacCloser %s: %v", k, err) + } + if !bytes.Equal(v, got) { + t.Errorf("Read unexpected bytes, want: %s got: %s", string(k), string(got)) + } + } + for k, v := range metas { + if err := fsys.addMeta(k, v); err != nil { + t.Errorf("failed to add meta %s %v", k, err) + } + rc, size, err := fsys.GetMeta(k) + if err != nil { + t.Errorf("failed to GetMeta %s: %v", k, err) + } + if size != int64(len(v)) { + t.Errorf("unexpected size returned for GetMeta: %s want %d got %d", k, len(v), size) + } + got, err := io.ReadAll(rc) + if err != nil { + t.Errorf("failed to ReadAll returned ReacCloser %s: %v", k, err) + } + if !bytes.Equal(v, got) { + t.Errorf("Read unexpected bytes, want: %s got: %s", string(k), string(got)) + } + } +} + +// Test helper methods +func (f *FileRemoteStore) addMeta(name string, data []byte) error { + return os.WriteFile(filepath.Join(f.testDir, name), data, os.ModePerm) +} + +func (f *FileRemoteStore) addTarget(name string, data []byte) error { + fname := filepath.Join(f.testDir, targetsDir, name) + err := os.WriteFile(fname, data, os.ModePerm) + return err +} + +func (f *FileRemoteStore) deleteMeta(name string) error { + return os.Remove(filepath.Join(f.testDir, name)) +} + +func (f *FileRemoteStore) deleteTarget(name string) error { + return os.Remove(filepath.Join(f.testDir, targetsDir, name)) +} + +func newTestFileStoreFS() (*FileRemoteStore, string, error) { + tmpDir := os.TempDir() + tufDir := filepath.Join(tmpDir, "tuf-file-store-test") + // Clean it in case there is cruft left around + os.RemoveAll(tufDir) + os.Mkdir(tufDir, os.ModePerm) + os.Mkdir(filepath.Join(tufDir, targetsDir), os.ModePerm) + fs, err := NewFileRemoteStore(os.DirFS(tufDir), targetsDir) + fs.testDir = tufDir + return fs, tufDir, err +} + +// goes through a dir and removes everything. This is to work around: +// https://github.com/golang/go/issues/51442 +func rmrf(dir string, logger func(string, ...interface{})) { + if dir == "" { + logger("cowardly refusing to remove a not fully specified fir") + return + } + logger("Removing %s", dir) + d, err := os.Open(dir) + if err != nil { + logger("Failed to open %s: %v", dir, err) + return + } + defer d.Close() + // -1 means give me everything, we don't have that many entries, so + // fine here. + names, err := d.Readdirnames(-1) + if err != nil { + logger("Failed to ReaddirNames %s: %v", dir, err) + return + } + for _, name := range names { + toRemove := filepath.Join(dir, name) + err = os.RemoveAll(toRemove) + if err != nil { + logger("Failed to RemoveAll %s: %v", toRemove, err) + // Do not want to fail here, just keep doing the best we can + } + } +}