From 82fc7c90cd463973ba712d91503efb5ccd36fc9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20Min=C3=A1=C5=99?= Date: Fri, 22 Jul 2016 11:30:16 +0200 Subject: [PATCH] Added unit tests for repository and blobdescriptorservice MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Michal Minář --- .../server/blobdescriptorservice_test.go | 492 +++++++++++ pkg/dockerregistry/server/digestcache_test.go | 12 - .../server/repositorymiddleware_test.go | 782 ++++++++++++++++++ pkg/dockerregistry/testutil/util.go | 198 +++++ 4 files changed, 1472 insertions(+), 12 deletions(-) create mode 100644 pkg/dockerregistry/server/blobdescriptorservice_test.go create mode 100644 pkg/dockerregistry/server/repositorymiddleware_test.go create mode 100644 pkg/dockerregistry/testutil/util.go diff --git a/pkg/dockerregistry/server/blobdescriptorservice_test.go b/pkg/dockerregistry/server/blobdescriptorservice_test.go new file mode 100644 index 000000000000..068e6cea8475 --- /dev/null +++ b/pkg/dockerregistry/server/blobdescriptorservice_test.go @@ -0,0 +1,492 @@ +package server + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "sync" + "testing" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + registryauth "github.com/docker/distribution/registry/auth" + "github.com/docker/distribution/registry/handlers" + "github.com/docker/distribution/registry/middleware/registry" + "github.com/docker/distribution/registry/storage" + + registrytest "github.com/openshift/origin/pkg/dockerregistry/testutil" + "k8s.io/kubernetes/pkg/client/restclient" + kclient "k8s.io/kubernetes/pkg/client/unversioned" + ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient" + + osclient "github.com/openshift/origin/pkg/client" + "github.com/openshift/origin/pkg/client/testclient" + imagetest "github.com/openshift/origin/pkg/image/admission/testutil" +) + +// TestBlobDescriptorServiceIsApplied ensures that blobDescriptorService middleware gets applied. +// It relies on the fact that blobDescriptorService requires higher levels to set repository object on given +// context. If the object isn't given, its method will err out. +func TestBlobDescriptorServiceIsApplied(t *testing.T) { + ctx := context.Background() + + // don't do any authorization check + installFakeAccessController(t) + m := fakeBlobDescriptorService(t) + // to make other unit tests working + defer m.changeUnsetRepository(false) + + testImage, err := registrytest.NewImageForManifest(t, "user/app", registrytest.SampleImageManifestSchema1, true) + if err != nil { + t.Fatal(err) + } + testImageStream := testNewImageStreamObject("user", "app", "latest", testImage.Name) + client := &testclient.Fake{} + client.AddReactor("get", "imagestreams", imagetest.GetFakeImageStreamGetHandler(t, *testImageStream)) + client.AddReactor("get", "images", getFakeImageGetHandler(t, *testImage)) + + // TODO: get rid of those nasty global vars + backupRegistryClient := DefaultRegistryClient + DefaultRegistryClient = makeFakeRegistryClient(client, ktestclient.NewSimpleFake()) + defer func() { + // set it back once this test finishes to make other unit tests working + DefaultRegistryClient = backupRegistryClient + }() + + app := handlers.NewApp(ctx, &configuration.Configuration{ + Loglevel: "debug", + Auth: map[string]configuration.Parameters{ + fakeAuthorizerName: {"realm": fakeAuthorizerName}, + }, + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + "cache": configuration.Parameters{ + "blobdescriptor": "inmemory", + }, + "delete": configuration.Parameters{ + "enabled": true, + }, + }, + Middleware: map[string][]configuration.Middleware{ + "registry": {{Name: "openshift"}}, + "repository": {{Name: "openshift"}}, + "storage": {{Name: "openshift"}}, + }, + }) + server := httptest.NewServer(app) + router := v2.Router() + + serverURL, err := url.Parse(server.URL) + if err != nil { + t.Fatalf("error parsing server url: %v", err) + } + os.Setenv("DOCKER_REGISTRY_URL", serverURL.Host) + + desc := registrytest.UploadTestBlob(t, serverURL, "user/app") + + for _, tc := range []struct { + name string + method string + endpoint string + vars []string + unsetRepository bool + expectedStatus int + expectedMethodInvocations map[string]int + }{ + { + name: "get blob with repository unset", + method: http.MethodGet, + endpoint: v2.RouteNameBlob, + vars: []string{ + "name", "user/app", + "digest", desc.Digest.String(), + }, + unsetRepository: true, + expectedStatus: http.StatusInternalServerError, + expectedMethodInvocations: map[string]int{"Stat": 1}, + }, + + { + name: "get blob", + method: http.MethodGet, + endpoint: v2.RouteNameBlob, + vars: []string{ + "name", "user/app", + "digest", desc.Digest.String(), + }, + expectedStatus: http.StatusOK, + // 1st stat is invoked in (*distribution/registry/handlers.blobHandler).GetBlob() as a + // check of blob existence + // 2nd stat happens in (*errorBlobStore).ServeBlob() invoked by the same GetBlob handler + // 3rd stat is done by (*blobServiceListener).ServeBlob once the blob serving is finished; + // it may happen with a slight delay after the blob was served + expectedMethodInvocations: map[string]int{"Stat": 3}, + }, + + { + name: "stat blob with repository unset", + method: http.MethodHead, + endpoint: v2.RouteNameBlob, + vars: []string{ + "name", "user/app", + "digest", desc.Digest.String(), + }, + unsetRepository: true, + expectedStatus: http.StatusInternalServerError, + expectedMethodInvocations: map[string]int{"Stat": 1}, + }, + + { + name: "stat blob", + method: http.MethodHead, + endpoint: v2.RouteNameBlob, + vars: []string{ + "name", "user/app", + "digest", desc.Digest.String(), + }, + expectedStatus: http.StatusOK, + // 1st stat is invoked in (*distribution/registry/handlers.blobHandler).GetBlob() as a + // check of blob existence + // 2nd stat happens in (*errorBlobStore).ServeBlob() invoked by the same GetBlob handler + // 3rd stat is done by (*blobServiceListener).ServeBlob once the blob serving is finished; + // it may happen with a slight delay after the blob was served + expectedMethodInvocations: map[string]int{"Stat": 3}, + }, + + { + name: "delete blob with repository unset", + method: http.MethodDelete, + endpoint: v2.RouteNameBlob, + vars: []string{ + "name", "user/app", + "digest", desc.Digest.String(), + }, + unsetRepository: true, + expectedStatus: http.StatusInternalServerError, + expectedMethodInvocations: map[string]int{"Stat": 1}, + }, + + { + name: "delete blob", + method: http.MethodDelete, + endpoint: v2.RouteNameBlob, + vars: []string{ + "name", "user/app", + "digest", desc.Digest.String(), + }, + expectedStatus: http.StatusAccepted, + expectedMethodInvocations: map[string]int{"Stat": 1, "Clear": 1}, + }, + + { + name: "get manifest with repository unset", + method: http.MethodGet, + endpoint: v2.RouteNameManifest, + vars: []string{ + "name", "user/app", + "reference", "latest", + }, + unsetRepository: true, + // succeed because blob store is not involved + expectedStatus: http.StatusOK, + // manifest is retrieved from etcd + expectedMethodInvocations: map[string]int{"Stat": 0}, + }, + + { + name: "get manifest", + method: http.MethodGet, + endpoint: v2.RouteNameManifest, + vars: []string{ + "name", "user/app", + "reference", "latest", + }, + expectedStatus: http.StatusOK, + // manifest is retrieved from etcd + expectedMethodInvocations: map[string]int{"Stat": 0}, + }, + + { + name: "delete manifest with repository unset", + method: http.MethodDelete, + endpoint: v2.RouteNameManifest, + vars: []string{ + "name", "user/app", + "reference", testImage.Name, + }, + unsetRepository: true, + expectedStatus: http.StatusInternalServerError, + // we don't allow to delete manifests from etcd; in this case, we attempt to delete layer link + expectedMethodInvocations: map[string]int{"Stat": 1}, + }, + + { + name: "delete manifest", + method: http.MethodDelete, + endpoint: v2.RouteNameManifest, + vars: []string{ + "name", "user/app", + "reference", testImage.Name, + }, + expectedStatus: http.StatusNotFound, + // we don't allow to delete manifests from etcd; in this case, we attempt to delete layer link + expectedMethodInvocations: map[string]int{"Stat": 1}, + }, + } { + m.clearStats() + m.changeUnsetRepository(tc.unsetRepository) + + route := router.GetRoute(tc.endpoint).Host(serverURL.Host) + u, err := route.URL(tc.vars...) + if err != nil { + t.Errorf("[%s] failed to build route: %v", tc.name, err) + continue + } + + req, err := http.NewRequest(tc.method, u.String(), nil) + if err != nil { + t.Errorf("[%s] failed to make request: %v", tc.name, err) + } + + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + t.Errorf("[%s] failed to do the request: %v", tc.name, err) + continue + } + defer resp.Body.Close() + + if resp.StatusCode != tc.expectedStatus { + t.Errorf("[%s] unexpected status code: %v != %v", tc.name, resp.StatusCode, tc.expectedStatus) + } + + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted { + content, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Errorf("[%s] failed to read body: %v", tc.name, err) + } else if len(content) > 0 { + errs := errcode.Errors{} + err := errs.UnmarshalJSON(content) + if err != nil { + t.Logf("[%s] failed to parse body as error: %v", tc.name, err) + t.Logf("[%s] received body: %v", tc.name, string(content)) + } else { + t.Logf("[%s] received errors: %#+v", tc.name, errs) + } + } + } + + stats, err := m.getStats(tc.expectedMethodInvocations, time.Second*5) + if err != nil { + t.Errorf("[%s] failed to get stats: %v", tc.name, err) + } + for method, exp := range tc.expectedMethodInvocations { + invoked := stats[method] + if invoked != exp { + t.Errorf("[%s] unexpected number of invocations of method %q: %v != %v", tc.name, method, invoked, exp) + } + } + for method, invoked := range stats { + if _, ok := tc.expectedMethodInvocations[method]; !ok { + t.Errorf("[%s] unexpected method %q invoked %d times", tc.name, method, invoked) + } + } + } +} + +type testBlobDescriptorManager struct { + mu sync.Mutex + cond *sync.Cond + stats map[string]int + unsetRepository bool +} + +// NewTestBlobDescriptorManager allows to control blobDescriptorService and collects statistics of called +// methods. +func NewTestBlobDescriptorManager() *testBlobDescriptorManager { + m := &testBlobDescriptorManager{ + stats: make(map[string]int), + } + m.cond = sync.NewCond(&m.mu) + return m +} + +func (m *testBlobDescriptorManager) clearStats() { + m.mu.Lock() + defer m.mu.Unlock() + + for k := range m.stats { + delete(m.stats, k) + } +} + +func (m *testBlobDescriptorManager) methodInvoked(methodName string) int { + m.mu.Lock() + defer m.mu.Unlock() + + newCount := m.stats[methodName] + 1 + m.stats[methodName] = newCount + m.cond.Signal() + + return newCount +} + +// unsetRepository returns true if the testBlobDescriptorService should unset repository from context before +// passing down the call +func (m *testBlobDescriptorManager) getUnsetRepository() bool { + m.mu.Lock() + defer m.mu.Unlock() + + return m.unsetRepository +} + +// changeUnsetRepository allows to configure whether the testBlobDescriptorService should unset repository +// from context before passing down the call +func (m *testBlobDescriptorManager) changeUnsetRepository(unset bool) { + m.mu.Lock() + defer m.mu.Unlock() + + m.unsetRepository = unset +} + +// getStats waits until blob descriptor service's methods are called specified number of times and returns +// collected numbers of invocations per each method watched. An error will be returned if a given timeout is +// reached without satisfying minimum limit.s +func (m *testBlobDescriptorManager) getStats(minimumLimits map[string]int, timeout time.Duration) (map[string]int, error) { + m.mu.Lock() + defer m.mu.Unlock() + + var err error + end := time.Now().Add(timeout) + + if len(minimumLimits) > 0 { + Loop: + for !statsGreaterThanOrEqual(m.stats, minimumLimits) { + c := make(chan struct{}) + go func() { m.cond.Wait(); c <- struct{}{} }() + + now := time.Now() + select { + case <-time.After(end.Sub(now)): + err = fmt.Errorf("timeout while waiting on expected stats") + break Loop + case <-c: + continue Loop + } + } + } + + stats := make(map[string]int) + for k, v := range m.stats { + stats[k] = v + } + + return stats, err +} + +func statsGreaterThanOrEqual(stats, minimumLimits map[string]int) bool { + for key, val := range minimumLimits { + if val > stats[key] { + return false + } + } + return true +} + +// fakeBlobDescriptorService installs a fake blob descriptor on top of blobDescriptorService that collects +// stats of method invocations. unsetRepository commands the controller to remove repository object from +// context passed down to blobDescriptorService if true. +func fakeBlobDescriptorService(t *testing.T) *testBlobDescriptorManager { + m := NewTestBlobDescriptorManager() + middleware.RegisterOptions(storage.BlobDescriptorServiceFactory(&testBlobDescriptorServiceFactory{t: t, m: m})) + return m +} + +type testBlobDescriptorServiceFactory struct { + t *testing.T + m *testBlobDescriptorManager +} + +func (bf *testBlobDescriptorServiceFactory) BlobAccessController(svc distribution.BlobDescriptorService) distribution.BlobDescriptorService { + if _, ok := svc.(*blobDescriptorService); !ok { + svc = (&blobDescriptorServiceFactory{}).BlobAccessController(svc) + } + return &testBlobDescriptorService{BlobDescriptorService: svc, t: bf.t, m: bf.m} +} + +type testBlobDescriptorService struct { + distribution.BlobDescriptorService + t *testing.T + m *testBlobDescriptorManager +} + +func (bs *testBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + bs.m.methodInvoked("Stat") + if bs.m.getUnsetRepository() { + bs.t.Logf("unsetting repository from the context") + ctx = WithRepository(ctx, nil) + } + + return bs.BlobDescriptorService.Stat(ctx, dgst) +} +func (bs *testBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error { + bs.m.methodInvoked("Clear") + if bs.m.getUnsetRepository() { + bs.t.Logf("unsetting repository from the context") + ctx = WithRepository(ctx, nil) + } + return bs.BlobDescriptorService.Clear(ctx, dgst) +} + +const fakeAuthorizerName = "fake" + +// installFakeAccessController installs an authorizer that allows access anywhere to anybody. +func installFakeAccessController(t *testing.T) { + registryauth.Register(fakeAuthorizerName, registryauth.InitFunc( + func(options map[string]interface{}) (registryauth.AccessController, error) { + t.Log("instantiating fake access controller") + return &fakeAccessController{t: t}, nil + })) +} + +type fakeAccessController struct { + t *testing.T +} + +var _ registryauth.AccessController = &fakeAccessController{} + +func (f *fakeAccessController) Authorized(ctx context.Context, access ...registryauth.Access) (context.Context, error) { + for _, access := range access { + f.t.Logf("fake authorizer: authorizing access to %s:%s:%s", access.Resource.Type, access.Resource.Name, access.Action) + } + + ctx = WithAuthPerformed(ctx) + return ctx, nil +} + +func makeFakeRegistryClient(client osclient.Interface, kClient kclient.Interface) RegistryClient { + return &fakeRegistryClient{ + client: client, + kClient: kClient, + } +} + +type fakeRegistryClient struct { + client osclient.Interface + kClient kclient.Interface +} + +func (f *fakeRegistryClient) Clients() (osclient.Interface, kclient.Interface, error) { + return f.client, f.kClient, nil +} +func (f *fakeRegistryClient) SafeClientConfig() restclient.Config { + return (®istryClient{}).SafeClientConfig() +} diff --git a/pkg/dockerregistry/server/digestcache_test.go b/pkg/dockerregistry/server/digestcache_test.go index 9489581dd52a..75da3a2c27ed 100644 --- a/pkg/dockerregistry/server/digestcache_test.go +++ b/pkg/dockerregistry/server/digestcache_test.go @@ -18,8 +18,6 @@ const ( ttl8m = time.Minute * 8 ) -var stale = time.Unix(0, 0) - func TestRepositoryBucketAdd(t *testing.T) { now := time.Now() clock := util.NewFakeClock(now) @@ -193,7 +191,6 @@ func TestRepositoryBucketAdd(t *testing.T) { }, { repository: "apple", - evictOn: stale, }, { repository: "pear", @@ -223,15 +220,12 @@ func TestRepositoryBucketAdd(t *testing.T) { entries: []bucketEntry{ { repository: "orange", - evictOn: stale, }, { repository: "apple", - evictOn: stale, }, { repository: "pear", - evictOn: stale, }, }, expectedEntries: []bucketEntry{ @@ -249,7 +243,6 @@ func TestRepositoryBucketAdd(t *testing.T) { entries: []bucketEntry{ { repository: "melon", - evictOn: stale, }, { repository: "orange", @@ -265,7 +258,6 @@ func TestRepositoryBucketAdd(t *testing.T) { }, { repository: "plum", - evictOn: stale, }, }, expectedEntries: []bucketEntry{ @@ -475,7 +467,6 @@ func TestRepositoryBucketRemove(t *testing.T) { entries: []bucketEntry{ { repository: "orange", - evictOn: stale, }, { repository: "apple", @@ -489,7 +480,6 @@ func TestRepositoryBucketRemove(t *testing.T) { expectedEntries: []bucketEntry{ { repository: "orange", - evictOn: stale, }, { repository: "apple", @@ -551,7 +541,6 @@ func TestRepositoryBucketRemove(t *testing.T) { } func TestRepositoryBucketCopy(t *testing.T) { - stale := time.Unix(0, 0) now := time.Now() clock := util.NewFakeClock(now) @@ -571,7 +560,6 @@ func TestRepositoryBucketCopy(t *testing.T) { entries: []bucketEntry{ { repository: "1", - evictOn: stale, }, }, expectedRepos: []string{}, diff --git a/pkg/dockerregistry/server/repositorymiddleware_test.go b/pkg/dockerregistry/server/repositorymiddleware_test.go new file mode 100644 index 000000000000..2ccbdd19b355 --- /dev/null +++ b/pkg/dockerregistry/server/repositorymiddleware_test.go @@ -0,0 +1,782 @@ +package server + +import ( + "fmt" + "io" + "reflect" + "strings" + "testing" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/cache" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/libtrust" + + kapi "k8s.io/kubernetes/pkg/api" + kerrors "k8s.io/kubernetes/pkg/api/errors" + ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/diff" + + "github.com/openshift/origin/pkg/client" + "github.com/openshift/origin/pkg/client/testclient" + registrytest "github.com/openshift/origin/pkg/dockerregistry/testutil" + imagetest "github.com/openshift/origin/pkg/image/admission/testutil" + imageapi "github.com/openshift/origin/pkg/image/api" +) + +const ( + // testImageLayerCount says how many layers to generate per image + testImageLayerCount = 2 + testBlobRepositoryCacheTTL = time.Millisecond * 500 +) + +func TestRepositoryBlobStat(t *testing.T) { + quotaEnforcing = "aEnforcingConfig{} + + ctx := context.Background() + // this driver holds all the testing blobs in memory during the whole test run + driver := inmemory.New() + // generate two images and store their blobs in the driver + testImages, err := populateTestStorage(t, driver, true, 1, map[string]int{"nm/is:latest": 1, "nm/repo:missing-layer-links": 1}, nil) + if err != nil { + t.Fatal(err) + } + // generate an image and store its blobs in the driver; the resulting image will lack managed by openshift + // annotation + testImages, err = populateTestStorage(t, driver, false, 1, map[string]int{"nm/unmanaged:missing-layer-links": 1}, testImages) + if err != nil { + t.Fatal(err) + } + + // remove layer repository links from two of the above images; keep the uploaded blobs in the global + // blostore though + for _, name := range []string{"nm/repo:missing-layer-links", "nm/unmanaged:missing-layer-links"} { + repoName := strings.Split(name, ":")[0] + for _, layer := range testImages[name][0].DockerImageLayers { + dgst := digest.Digest(layer.Name) + alg, hex := dgst.Algorithm(), dgst.Hex() + err := driver.Delete(ctx, fmt.Sprintf("/docker/registry/v2/repositories/%s/_layers/%s/%s", repoName, alg, hex)) + if err != nil { + t.Fatalf("failed to delete layer link %q from repository %q: %v", layer.Name, repoName, err) + } + } + } + + // generate random images without storing its blobs in the driver + etcdOnlyImages := map[string]*imageapi.Image{} + for _, d := range []struct { + name string + managed bool + }{{"nm/is", true}, {"registry.org:5000/user/app", false}} { + img, err := registrytest.NewImageForManifest(t, d.name, registrytest.SampleImageManifestSchema1, d.managed) + if err != nil { + t.Fatal(err) + } + etcdOnlyImages[d.name] = img + } + + for _, tc := range []struct { + name string + stat string + images []imageapi.Image + imageStreams []imageapi.ImageStream + pullthrough bool + skipAuth bool + deferredErrors deferredErrors + expectedDescriptor distribution.Descriptor + expectedError error + expectedActions []clientAction + }{ + { + name: "local stat", + stat: "nm/is@" + testImages["nm/is:latest"][0].DockerImageLayers[0].Name, + imageStreams: []imageapi.ImageStream{{ObjectMeta: kapi.ObjectMeta{Namespace: "nm", Name: "is"}}}, + expectedDescriptor: testNewDescriptorForLayer(testImages["nm/is:latest"][0].DockerImageLayers[0]), + }, + + { + name: "blob only tagged in image stream", + stat: "nm/repo@" + testImages["nm/repo:missing-layer-links"][0].DockerImageLayers[1].Name, + images: []imageapi.Image{*testImages["nm/repo:missing-layer-links"][0]}, + imageStreams: []imageapi.ImageStream{ + { + ObjectMeta: kapi.ObjectMeta{ + Namespace: "nm", + Name: "repo", + }, + Status: imageapi.ImageStreamStatus{ + Tags: map[string]imageapi.TagEventList{ + "latest": { + Items: []imageapi.TagEvent{ + { + Image: testImages["nm/repo:missing-layer-links"][0].Name, + }, + }, + }, + }, + }, + }, + }, + expectedDescriptor: testNewDescriptorForLayer(testImages["nm/repo:missing-layer-links"][0].DockerImageLayers[1]), + expectedActions: []clientAction{{"get", "imagestreams"}, {"get", "images"}}, + }, + + { + name: "blob referenced only by not managed image with pullthrough on", + stat: "nm/unmanaged@" + testImages["nm/unmanaged:missing-layer-links"][0].DockerImageLayers[1].Name, + images: []imageapi.Image{*testImages["nm/unmanaged:missing-layer-links"][0]}, + imageStreams: []imageapi.ImageStream{ + { + ObjectMeta: kapi.ObjectMeta{ + Namespace: "nm", + Name: "unmanaged", + }, + Status: imageapi.ImageStreamStatus{ + Tags: map[string]imageapi.TagEventList{ + "latest": { + Items: []imageapi.TagEvent{ + { + Image: testImages["nm/unmanaged:missing-layer-links"][0].Name, + }, + }, + }, + }, + }, + }, + }, + pullthrough: true, + expectedDescriptor: testNewDescriptorForLayer(testImages["nm/unmanaged:missing-layer-links"][0].DockerImageLayers[1]), + expectedActions: []clientAction{{"get", "imagestreams"}, {"get", "images"}}, + }, + + { + // TODO: this should err out because of missing image stream. + // Unfortunately, it's not the case. Until we start storing layer links in etcd, we depend on + // local layer links. + name: "layer link present while image stream not found", + stat: "nm/is@" + testImages["nm/is:latest"][0].DockerImageLayers[0].Name, + images: []imageapi.Image{*testImages["nm/is:latest"][0]}, + expectedDescriptor: testNewDescriptorForLayer(testImages["nm/is:latest"][0].DockerImageLayers[0]), + }, + + { + name: "blob only tagged by not managed image with pullthrough off", + stat: "nm/repo@" + testImages["nm/unmanaged:missing-layer-links"][0].DockerImageLayers[1].Name, + images: []imageapi.Image{*testImages["nm/unmanaged:missing-layer-links"][0]}, + imageStreams: []imageapi.ImageStream{ + { + ObjectMeta: kapi.ObjectMeta{ + Namespace: "nm", + Name: "repo", + }, + Status: imageapi.ImageStreamStatus{ + Tags: map[string]imageapi.TagEventList{ + "latest": { + Items: []imageapi.TagEvent{ + { + Image: testImages["nm/unmanaged:missing-layer-links"][0].DockerImageLayers[1].Name, + }, + }, + }, + }, + }, + }, + }, + expectedError: distribution.ErrBlobUnknown, + expectedActions: []clientAction{{"get", "imagestreams"}, {"get", "images"}}, + }, + + { + name: "blob not stored locally but referred in image stream", + stat: "nm/is@" + etcdOnlyImages["nm/is"].DockerImageLayers[1].Name, + images: []imageapi.Image{*etcdOnlyImages["nm/is"]}, + imageStreams: []imageapi.ImageStream{ + { + ObjectMeta: kapi.ObjectMeta{ + Namespace: "nm", + Name: "is", + }, + Status: imageapi.ImageStreamStatus{ + Tags: map[string]imageapi.TagEventList{ + "latest": { + Items: []imageapi.TagEvent{ + { + Image: etcdOnlyImages["nm/is"].Name, + }, + }, + }, + }, + }, + }, + }, + expectedError: distribution.ErrBlobUnknown, + }, + + { + name: "blob does not exist", + stat: "nm/repo@" + etcdOnlyImages["nm/is"].DockerImageLayers[0].Name, + images: []imageapi.Image{*testImages["nm/is:latest"][0]}, + imageStreams: []imageapi.ImageStream{ + { + ObjectMeta: kapi.ObjectMeta{ + Namespace: "nm", + Name: "repo", + }, + Status: imageapi.ImageStreamStatus{ + Tags: map[string]imageapi.TagEventList{ + "latest": { + Items: []imageapi.TagEvent{ + { + Image: testImages["nm/is:latest"][0].Name, + }, + }, + }, + }, + }, + }, + }, + expectedError: distribution.ErrBlobUnknown, + }, + + { + name: "auth not performed", + stat: "nm/is@" + testImages["nm/is:latest"][0].DockerImageLayers[0].Name, + imageStreams: []imageapi.ImageStream{{ObjectMeta: kapi.ObjectMeta{Namespace: "nm", Name: "is"}}}, + skipAuth: true, + expectedError: fmt.Errorf("openshift.auth.completed missing from context"), + }, + + { + name: "deferred error", + stat: "nm/is@" + testImages["nm/is:latest"][0].DockerImageLayers[0].Name, + imageStreams: []imageapi.ImageStream{{ObjectMeta: kapi.ObjectMeta{Namespace: "nm", Name: "is"}}}, + deferredErrors: deferredErrors{"nm/is": ErrOpenShiftAccessDenied}, + expectedError: ErrOpenShiftAccessDenied, + }, + } { + ref, err := reference.Parse(tc.stat) + if err != nil { + t.Errorf("[%s] failed to parse blob reference %q: %v", tc.name, tc.stat, err) + continue + } + canonical, ok := ref.(reference.Canonical) + if !ok { + t.Errorf("[%s] not a canonical reference %q", tc.name, ref.String()) + continue + } + + cachedLayers, err = newDigestToRepositoryCache(defaultDigestToRepositoryCacheSize) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + if !tc.skipAuth { + ctx = WithAuthPerformed(ctx) + } + if tc.deferredErrors != nil { + ctx = WithDeferredErrors(ctx, tc.deferredErrors) + } + + client := &testclient.Fake{} + client.AddReactor("get", "imagestreams", imagetest.GetFakeImageStreamGetHandler(t, tc.imageStreams...)) + client.AddReactor("get", "images", getFakeImageGetHandler(t, tc.images...)) + + reg, err := newTestRegistry(ctx, client, driver, defaultBlobRepositoryCacheTTL, tc.pullthrough, true) + if err != nil { + t.Errorf("[%s] unexpected error: %v", tc.name, err) + continue + } + + repo, err := reg.Repository(ctx, canonical) + if err != nil { + t.Errorf("[%s] unexpected error: %v", tc.name, err) + continue + } + + desc, err := repo.Blobs(ctx).Stat(ctx, canonical.Digest()) + if err != nil && tc.expectedError == nil { + t.Errorf("[%s] got unexpected stat error: %v", tc.name, err) + continue + } + if err == nil && tc.expectedError != nil { + t.Errorf("[%s] got unexpected non-error", tc.name) + continue + } + if !reflect.DeepEqual(err, tc.expectedError) { + t.Errorf("[%s] got unexpected error: %s", tc.name, diff.ObjectGoPrintDiff(err, tc.expectedError)) + continue + } + if tc.expectedError == nil && !reflect.DeepEqual(desc, tc.expectedDescriptor) { + t.Errorf("[%s] got unexpected descriptor: %s", tc.name, diff.ObjectGoPrintDiff(desc, tc.expectedDescriptor)) + } + + compareActions(t, tc.name, client.Actions(), tc.expectedActions) + } +} + +func TestRepositoryBlobStatCacheEviction(t *testing.T) { + const blobRepoCacheTTL = time.Millisecond * 500 + + quotaEnforcing = "aEnforcingConfig{} + ctx := WithAuthPerformed(context.Background()) + + // this driver holds all the testing blobs in memory during the whole test run + driver := inmemory.New() + // generate two images and store their blobs in the driver + testImages, err := populateTestStorage(t, driver, true, 1, map[string]int{"nm/is:latest": 1}, nil) + if err != nil { + t.Fatal(err) + } + testImage := testImages["nm/is:latest"][0] + testImageStream := testNewImageStreamObject("nm", "is", "latest", testImage.Name) + + blob1Desc := testNewDescriptorForLayer(testImage.DockerImageLayers[0]) + blob1Dgst := blob1Desc.Digest + blob2Desc := testNewDescriptorForLayer(testImage.DockerImageLayers[1]) + blob2Dgst := blob2Desc.Digest + + // remove repo layer repo link of the image's second blob + alg, hex := blob2Dgst.Algorithm(), blob2Dgst.Hex() + err = driver.Delete(ctx, fmt.Sprintf("/docker/registry/v2/repositories/%s/_layers/%s/%s", "nm/is", alg, hex)) + + cachedLayers, err = newDigestToRepositoryCache(defaultDigestToRepositoryCacheSize) + if err != nil { + t.Fatal(err) + } + + client := &testclient.Fake{} + client.AddReactor("get", "imagestreams", imagetest.GetFakeImageStreamGetHandler(t, *testImageStream)) + client.AddReactor("get", "images", getFakeImageGetHandler(t, *testImage)) + + reg, err := newTestRegistry(ctx, client, driver, blobRepoCacheTTL, false, false) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + ref, err := reference.ParseNamed("nm/is") + if err != nil { + t.Errorf("failed to parse blob reference %q: %v", "nm/is", err) + } + + repo, err := reg.Repository(ctx, ref) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // hit the layer repo link - cache the result + desc, err := repo.Blobs(ctx).Stat(ctx, blob1Dgst) + if err != nil { + t.Fatalf("got unexpected stat error: %v", err) + } + if !reflect.DeepEqual(desc, blob1Desc) { + t.Fatalf("got unexpected descriptor: %#+v != %#+v", desc, blob1Desc) + } + + compareActions(t, "no actions expected", client.Actions(), []clientAction{}) + + // remove layer repo link, delete the association from cache as well + err = repo.Blobs(ctx).Delete(ctx, blob1Dgst) + if err != nil { + t.Fatalf("got unexpected error: %v", err) + } + + // query etcd + desc, err = repo.Blobs(ctx).Stat(ctx, blob1Dgst) + if err != nil { + t.Fatalf("got unexpected stat error: %v", err) + } + if !reflect.DeepEqual(desc, blob1Desc) { + t.Fatalf("got unexpected descriptor: %#+v != %#+v", desc, blob1Desc) + } + + expectedActions := []clientAction{{"get", "imagestreams"}, {"get", "images"}} + compareActions(t, "1st roundtrip to etcd", client.Actions(), expectedActions) + + // remove the underlying blob + vacuum := storage.NewVacuum(ctx, driver) + err = vacuum.RemoveBlob(blob1Dgst.String()) + if err != nil { + t.Fatalf("got unexpected error: %v", err) + } + + // fail because the blob isn't stored locally + desc, err = repo.Blobs(ctx).Stat(ctx, blob1Dgst) + if err == nil { + t.Fatalf("got unexpected non error: %v", err) + } + if err != distribution.ErrBlobUnknown { + t.Fatalf("got unexpected error: %#+v", err) + } + + // cache hit - don't query etcd + desc, err = repo.Blobs(ctx).Stat(ctx, blob2Dgst) + if err != nil { + t.Fatalf("got unexpected stat error: %v", err) + } + if !reflect.DeepEqual(desc, blob2Desc) { + t.Fatalf("got unexpected descriptor: %#+v != %#+v", desc, blob2Desc) + } + + compareActions(t, "no etcd query", client.Actions(), expectedActions) + + lastStatTimestamp := time.Now() + + // hit the cache + desc, err = repo.Blobs(ctx).Stat(ctx, blob2Dgst) + if err != nil { + t.Fatalf("got unexpected stat error: %v", err) + } + if !reflect.DeepEqual(desc, blob2Desc) { + t.Fatalf("got unexpected descriptor: %#+v != %#+v", desc, blob2Desc) + } + + // cache hit - no additional etcd query + compareActions(t, "no roundrip to etcd", client.Actions(), expectedActions) + + t.Logf("sleeping %s while waiting for eviction of blob %q from cache", blobRepoCacheTTL.String(), blob2Dgst.String()) + time.Sleep(blobRepoCacheTTL - (time.Now().Sub(lastStatTimestamp))) + + desc, err = repo.Blobs(ctx).Stat(ctx, blob2Dgst) + if err != nil { + t.Fatalf("got unexpected stat error: %v", err) + } + if !reflect.DeepEqual(desc, blob2Desc) { + t.Fatalf("got unexpected descriptor: %#+v != %#+v", desc, blob2Desc) + } + + expectedActions = append(expectedActions, []clientAction{{"get", "imagestreams"}, {"get", "images"}}...) + compareActions(t, "2nd roundtrip to etcd", client.Actions(), expectedActions) + + err = vacuum.RemoveBlob(blob2Dgst.String()) + if err != nil { + t.Fatalf("got unexpected error: %v", err) + } + + // fail because the blob isn't stored locally + desc, err = repo.Blobs(ctx).Stat(ctx, blob2Dgst) + if err == nil { + t.Fatalf("got unexpected non error: %v", err) + } + if err != distribution.ErrBlobUnknown { + t.Fatalf("got unexpected error: %#+v", err) + } +} + +type clientAction struct { + verb string + resource string +} + +func getFakeImageGetHandler(t *testing.T, iss ...imageapi.Image) ktestclient.ReactionFunc { + return func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) { + switch a := action.(type) { + case ktestclient.GetAction: + for _, is := range iss { + if a.GetName() == is.Name { + t.Logf("images get handler: returning image %s", is.Name) + return true, &is, nil + } + } + + err := kerrors.NewNotFound(kapi.Resource("images"), a.GetName()) + t.Logf("image get handler: %v", err) + return true, nil, err + } + return false, nil, nil + } +} + +func storeTestImage( + ctx context.Context, + reg distribution.Namespace, + imageReference reference.NamedTagged, + schemaVersion int, + managedByOpenShift bool, +) (*imageapi.Image, error) { + repo, err := reg.Repository(ctx, imageReference) + if err != nil { + return nil, fmt.Errorf("unexpected error getting repo %q: %v", imageReference.Name(), err) + } + + var ( + m distribution.Manifest + m1 schema1.Manifest + ) + switch schemaVersion { + case 1: + m1 = schema1.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: imageReference.Name(), + Tag: imageReference.Tag(), + } + case 2: + // TODO + fallthrough + default: + return nil, fmt.Errorf("unsupported manifest version %d", schemaVersion) + } + + for i := 0; i < testImageLayerCount; i++ { + rs, ds, err := registrytest.CreateRandomTarFile() + if err != nil { + return nil, fmt.Errorf("unexpected error generating test layer file: %v", err) + } + dgst := digest.Digest(ds) + + wr, err := repo.Blobs(ctx).Create(ctx) + if err != nil { + return nil, fmt.Errorf("unexpected error creating test upload: %v", err) + } + defer wr.Close() + + n, err := io.Copy(wr, rs) + if err != nil { + return nil, fmt.Errorf("unexpected error copying to upload: %v", err) + } + + if schemaVersion == 1 { + m1.FSLayers = append(m1.FSLayers, schema1.FSLayer{BlobSum: dgst}) + m1.History = append(m1.History, schema1.History{V1Compatibility: fmt.Sprintf(`{"size":%d}`, n)}) + } // TODO v2 + + if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst, MediaType: schema1.MediaTypeManifestLayer}); err != nil { + return nil, fmt.Errorf("unexpected error finishing upload: %v", err) + } + } + + var dgst digest.Digest + var payload []byte + + if schemaVersion == 1 { + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("unexpected error generating private key: %v", err) + } + + m, err = schema1.Sign(&m1, pk) + if err != nil { + return nil, fmt.Errorf("error signing manifest: %v", err) + } + + _, payload, err = m.Payload() + if err != nil { + return nil, fmt.Errorf("error getting payload %#v", err) + } + + dgst = digest.FromBytes(payload) + } //TODO v2 + + image := &imageapi.Image{ + ObjectMeta: kapi.ObjectMeta{ + Name: dgst.String(), + }, + DockerImageManifest: string(payload), + DockerImageReference: imageReference.Name() + "@" + dgst.String(), + } + + if managedByOpenShift { + image.Annotations = map[string]string{imageapi.ManagedByOpenShiftAnnotation: "true"} + } + + if schemaVersion == 1 { + signedManifest := m.(*schema1.SignedManifest) + signatures, err := signedManifest.Signatures() + if err != nil { + return nil, err + } + + for _, signDigest := range signatures { + image.DockerImageSignatures = append(image.DockerImageSignatures, signDigest) + } + } + + err = imageapi.ImageWithMetadata(image) + if err != nil { + return nil, fmt.Errorf("failed to fill image with metadata: %v", err) + } + + return image, nil +} + +func populateTestStorage( + t *testing.T, + driver driver.StorageDriver, + setManagedByOpenShift bool, + schemaVersion int, + repoImages map[string]int, + testImages map[string][]*imageapi.Image, +) (map[string][]*imageapi.Image, error) { + ctx := context.Background() + reg, err := storage.NewRegistry(ctx, driver) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + + result := make(map[string][]*imageapi.Image) + for key, value := range testImages { + images := make([]*imageapi.Image, len(value)) + copy(images, value) + result[key] = images + } + + for imageReference := range repoImages { + parsed, err := reference.Parse(imageReference) + if err != nil { + t.Fatalf("failed to parse reference %q: %v", imageReference, err) + } + namedTagged, ok := parsed.(reference.NamedTagged) + if !ok { + t.Fatalf("expected NamedTagged reference, not %T", parsed) + } + + imageCount := repoImages[imageReference] + + for i := 0; i < imageCount; i++ { + img, err := storeTestImage(ctx, reg, namedTagged, schemaVersion, setManagedByOpenShift) + if err != nil { + t.Fatal(err) + } + arr := result[imageReference] + t.Logf("created image %s@%s image with layers:", namedTagged.Name(), img.Name) + for _, l := range img.DockerImageLayers { + t.Logf(" %s of size %d", l.Name, l.LayerSize) + } + result[imageReference] = append(arr, img) + } + } + + return result, nil +} + +func newTestRegistry( + ctx context.Context, + osClient client.Interface, + storageDriver driver.StorageDriver, + blobrepositorycachettl time.Duration, + pullthrough bool, + useBlobDescriptorCacheProvider bool, +) (*testRegistry, error) { + if storageDriver == nil { + storageDriver = inmemory.New() + } + dockerStorageDriver = storageDriver + + opts := []storage.RegistryOption{ + storage.BlobDescriptorServiceFactory(&blobDescriptorServiceFactory{}), + storage.EnableDelete, + storage.EnableRedirect, + } + if useBlobDescriptorCacheProvider { + cacheProvider := cache.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()) + opts = append(opts, storage.BlobDescriptorCacheProvider(cacheProvider)) + } + + reg, err := storage.NewRegistry(ctx, dockerStorageDriver, opts...) + if err != nil { + return nil, err + } + dockerRegistry = reg + + return &testRegistry{ + Namespace: dockerRegistry, + osClient: osClient, + blobrepositorycachettl: blobrepositorycachettl, + pullthrough: pullthrough, + }, nil +} + +type testRegistry struct { + distribution.Namespace + osClient client.Interface + pullthrough bool + blobrepositorycachettl time.Duration +} + +var _ distribution.Namespace = &testRegistry{} + +func (r *testRegistry) Repository(ctx context.Context, ref reference.Named) (distribution.Repository, error) { + repo, err := r.Namespace.Repository(ctx, ref) + if err != nil { + return nil, err + } + + kFakeClient := ktestclient.NewSimpleFake() + + parts := strings.SplitN(ref.Name(), "/", 3) + if len(parts) != 2 { + return nil, fmt.Errorf("failed to parse repository name %q", ref.Name()) + } + + return &repository{ + Repository: repo, + + ctx: ctx, + quotaClient: kFakeClient, + limitClient: kFakeClient, + registryOSClient: r.osClient, + registryAddr: "localhost:5000", + namespace: parts[0], + name: parts[1], + blobrepositorycachettl: r.blobrepositorycachettl, + cachedLayers: cachedLayers, + pullthrough: r.pullthrough, + }, nil +} + +func testNewImageStreamObject(namespace, name, tag, imageName string) *imageapi.ImageStream { + return &imageapi.ImageStream{ + ObjectMeta: kapi.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + Status: imageapi.ImageStreamStatus{ + Tags: map[string]imageapi.TagEventList{ + tag: { + Items: []imageapi.TagEvent{ + { + Image: imageName, + }, + }, + }, + }, + }, + } +} + +func testNewDescriptorForLayer(layer imageapi.ImageLayer) distribution.Descriptor { + return distribution.Descriptor{ + Digest: digest.Digest(layer.Name), + MediaType: "application/octet-stream", + Size: layer.LayerSize, + } +} + +func compareActions(t *testing.T, testCaseName string, actions []ktestclient.Action, expectedActions []clientAction) { + for i, action := range actions { + if i >= len(expectedActions) { + t.Errorf("[%s] got unexpected client action: %#+v", testCaseName, action) + continue + } + expected := expectedActions[i] + if !action.Matches(expected.verb, expected.resource) { + t.Errorf("[%s] expected client action %s[%s], got instead: %#+v", testCaseName, expected.verb, expected.resource, action) + } + } + for i := len(actions); i < len(expectedActions); i++ { + expected := expectedActions[i] + t.Errorf("[%s] expected action %s[%s] did not happen", testCaseName, expected.verb, expected.resource) + } +} diff --git a/pkg/dockerregistry/testutil/util.go b/pkg/dockerregistry/testutil/util.go new file mode 100644 index 000000000000..5db417e935b2 --- /dev/null +++ b/pkg/dockerregistry/testutil/util.go @@ -0,0 +1,198 @@ +package testutil + +import ( + "archive/tar" + "bytes" + "crypto/rand" + "encoding/json" + "fmt" + "io" + mrand "math/rand" + "net/url" + "testing" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/reference" + distclient "github.com/docker/distribution/registry/client" + + kapi "k8s.io/kubernetes/pkg/api" + + imageapi "github.com/openshift/origin/pkg/image/api" +) + +func NewImageForManifest(t *testing.T, repoName string, rawManifest string, managedByOpenShift bool) (*imageapi.Image, error) { + var versioned manifest.Versioned + if err := json.Unmarshal([]byte(rawManifest), &versioned); err != nil { + return nil, err + } + + _, desc, err := distribution.UnmarshalManifest(versioned.MediaType, []byte(rawManifest)) + if err != nil { + return nil, err + } + + annotations := make(map[string]string) + if managedByOpenShift { + annotations[imageapi.ManagedByOpenShiftAnnotation] = "true" + } + + img := &imageapi.Image{ + ObjectMeta: kapi.ObjectMeta{ + Name: desc.Digest.String(), + Annotations: annotations, + }, + DockerImageReference: fmt.Sprintf("localhost:5000/%s@%s", repoName, desc.Digest.String()), + DockerImageManifest: string(rawManifest), + } + + if err := imageapi.ImageWithMetadata(img); err != nil { + return nil, err + } + + return img, nil +} + +// UploadTestBlob generates a random tar file and uploads it to the given repository. +func UploadTestBlob(t *testing.T, serverURL *url.URL, repoName string) distribution.Descriptor { + rs, ds, err := CreateRandomTarFile() + if err != nil { + t.Fatalf("unexpected error generating test layer file: %v", err) + } + dgst := digest.Digest(ds) + + ctx := context.Background() + ref, err := reference.ParseNamed(repoName) + if err != nil { + t.Fatal(err) + } + t.Logf("server url: %s", serverURL.String()) + repo, err := distclient.NewRepository(ctx, ref, serverURL.String(), nil) + if err != nil { + t.Fatalf("failed to get repository %q: %v", repoName, err) + } + blobs := repo.Blobs(ctx) + wr, err := blobs.Create(ctx) + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(wr, rs) + if err != nil { + t.Fatalf("unexpected error copying to upload: %v", err) + } + desc, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}) + if err != nil { + t.Fatal(err) + } + + t.Logf("uploaded generated layer of size %d with digest %q\n", n, dgst.String()) + + return desc +} + +// createRandomTarFile creates a random tarfile, returning it as an io.ReadSeeker along with its digest. An +// error is returned if there is a problem generating valid content. Inspired by +// github.com/vendor/docker/distribution/testutil/tarfile.go. +func CreateRandomTarFile() (rs io.ReadSeeker, dgst digest.Digest, err error) { + nFiles := 2 + target := &bytes.Buffer{} + wr := tar.NewWriter(target) + + // Perturb this on each iteration of the loop below. + header := &tar.Header{ + Mode: 0644, + ModTime: time.Now(), + Typeflag: tar.TypeReg, + Uname: "randocalrissian", + Gname: "cloudcity", + AccessTime: time.Now(), + ChangeTime: time.Now(), + } + + for fileNumber := 0; fileNumber < nFiles; fileNumber++ { + fileSize := mrand.Int63n(1<<9) + 1<<9 + + header.Name = fmt.Sprint(fileNumber) + header.Size = fileSize + + if err := wr.WriteHeader(header); err != nil { + return nil, "", err + } + + randomData := make([]byte, fileSize) + + // Fill up the buffer with some random data. + n, err := rand.Read(randomData) + + if n != len(randomData) { + return nil, "", fmt.Errorf("short read creating random reader: %v bytes != %v bytes", n, len(randomData)) + } + + if err != nil { + return nil, "", err + } + + nn, err := io.Copy(wr, bytes.NewReader(randomData)) + if nn != fileSize { + return nil, "", fmt.Errorf("short copy writing random file to tar") + } + + if err != nil { + return nil, "", err + } + + if err := wr.Flush(); err != nil { + return nil, "", err + } + } + + if err := wr.Close(); err != nil { + return nil, "", err + } + + dgst = digest.FromBytes(target.Bytes()) + + return bytes.NewReader(target.Bytes()), dgst, nil +} + +const SampleImageManifestSchema1 = `{ + "schemaVersion": 1, + "name": "nm/is", + "tag": "latest", + "architecture": "", + "fsLayers": [ + { + "blobSum": "sha256:b2c5513bd934a7efb412c0dd965600b8cb00575b585eaff1cb980b69037fe6cd" + }, + { + "blobSum": "sha256:2dde6f11a89463bf20dba3b47d8b3b6de7cdcc19e50634e95a18dd95c278768d" + } + ], + "history": [ + { + "v1Compatibility": "{\"size\":18407936}" + }, + { + "v1Compatibility": "{\"size\":19387392}" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "5HTY:A24B:L6PG:TQ3G:GMAK:QGKZ:ICD4:S7ZJ:P5JX:UTMP:XZLK:ZXVH", + "kty": "EC", + "x": "j5YnDSyrVIt3NquUKvcZIpbfeD8HLZ7BVBFL4WutRBM", + "y": "PBgFAZ3nNakYN3H9enhrdUrQ_HPYzb8oX5rtJxJo1Y8" + }, + "alg": "ES256" + }, + "signature": "1rXiEmWnf9eL7m7Wy3K4l25-Zv2XXl5GgqhM_yjT0ujPmTn0uwfHcCWlweHa9gput3sECj507eQyGpBOF5rD6Q", + "protected": "eyJmb3JtYXRMZW5ndGgiOjQ4NSwiZm9ybWF0VGFpbCI6IkNuMCIsInRpbWUiOiIyMDE2LTA3LTI2VDExOjQ2OjQ2WiJ9" + } + ] +}`