diff --git a/pkg/cmd/dockerregistry/dockerregistry.go b/pkg/cmd/dockerregistry/dockerregistry.go index 5126d0997b33..885f708b9cf1 100644 --- a/pkg/cmd/dockerregistry/dockerregistry.go +++ b/pkg/cmd/dockerregistry/dockerregistry.go @@ -3,14 +3,17 @@ package dockerregistry import ( "crypto/tls" "crypto/x509" + "flag" "fmt" "io" "io/ioutil" "net/http" "os" + "strings" "time" log "github.com/Sirupsen/logrus" + "github.com/docker/go-units" gorillahandlers "github.com/gorilla/handlers" "github.com/Sirupsen/logrus/formatters/logstash" @@ -19,8 +22,10 @@ import ( "github.com/docker/distribution/health" "github.com/docker/distribution/registry/auth" "github.com/docker/distribution/registry/handlers" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/driver/factory" "github.com/docker/distribution/uuid" - "github.com/docker/distribution/version" + distversion "github.com/docker/distribution/version" _ "github.com/docker/distribution/registry/auth/htpasswd" _ "github.com/docker/distribution/registry/auth/token" @@ -35,7 +40,7 @@ import ( _ "github.com/docker/distribution/registry/storage/driver/s3-aws" _ "github.com/docker/distribution/registry/storage/driver/swift" - "strings" + kubeversion "k8s.io/kubernetes/pkg/version" "github.com/openshift/origin/pkg/cmd/server/crypto" "github.com/openshift/origin/pkg/cmd/util/clientcmd" @@ -43,10 +48,96 @@ import ( "github.com/openshift/origin/pkg/dockerregistry/server/api" "github.com/openshift/origin/pkg/dockerregistry/server/audit" registryconfig "github.com/openshift/origin/pkg/dockerregistry/server/configuration" + "github.com/openshift/origin/pkg/dockerregistry/server/prune" + "github.com/openshift/origin/pkg/version" ) +var pruneMode = flag.String("prune", "", "prune blobs from the storage and exit (check, delete)") + +func versionFields() log.Fields { + return log.Fields{ + "distribution_version": distversion.Version, + "kubernetes_version": kubeversion.Get(), + "openshift_version": version.Get(), + } +} + +// ExecutePruner runs the pruner. +func ExecutePruner(configFile io.Reader, dryRun bool) { + config, _, err := registryconfig.Parse(configFile) + if err != nil { + log.Fatalf("error parsing configuration file: %s", err) + } + + // A lot of installations have the 'debug' log level in their config files, + // but it's too verbose for pruning. Therefore we ignore it, but we still + // respect overrides using environment variables. + config.Loglevel = "" + config.Log.Level = configuration.Loglevel(os.Getenv("REGISTRY_LOG_LEVEL")) + if len(config.Log.Level) == 0 { + config.Log.Level = "warning" + } + + ctx := context.Background() + ctx, err = configureLogging(ctx, config) + if err != nil { + log.Fatalf("error configuring logging: %s", err) + } + + startPrune := "start prune" + var registryOptions []storage.RegistryOption + if dryRun { + startPrune += " (dry-run mode)" + } else { + registryOptions = append(registryOptions, storage.EnableDelete) + } + log.WithFields(versionFields()).Info(startPrune) + + registryClient := server.NewRegistryClient(clientcmd.NewConfig().BindToFile()) + + storageDriver, err := factory.Create(config.Storage.Type(), config.Storage.Parameters()) + if err != nil { + log.Fatalf("error creating storage driver: %s", err) + } + + registry, err := storage.NewRegistry(ctx, storageDriver, registryOptions...) + if err != nil { + log.Fatalf("error creating registry: %s", err) + } + + stats, err := prune.Prune(ctx, storageDriver, registry, registryClient, dryRun) + if err != nil { + log.Error(err) + } + if dryRun { + fmt.Printf("Would delete %d blobs\n", stats.Blobs) + fmt.Printf("Would free up %s of disk space\n", units.BytesSize(float64(stats.DiskSpace))) + fmt.Println("Use -prune=delete to actually delete the data") + } else { + fmt.Printf("Deleted %d blobs\n", stats.Blobs) + fmt.Printf("Freed up %s of disk space\n", units.BytesSize(float64(stats.DiskSpace))) + } + if err != nil { + os.Exit(1) + } +} + // Execute runs the Docker registry. func Execute(configFile io.Reader) { + if len(*pruneMode) != 0 { + var dryRun bool + switch *pruneMode { + case "delete": + dryRun = false + case "check": + dryRun = true + default: + log.Fatal("invalid value for the -prune option") + } + ExecutePruner(configFile, dryRun) + return + } + dockerConfig, extraConfig, err := registryconfig.Parse(configFile) if err != nil { log.Fatalf("error parsing configuration file: %s", err) @@ -64,7 +155,7 @@ func Execute(configFile io.Reader) { registryClient := server.NewRegistryClient(clientcmd.NewConfig().BindToFile()) ctx = server.WithRegistryClient(ctx, registryClient) - log.Infof("version=%s", version.Version) + log.WithFields(versionFields()).Info("start registry") // inject a logger into the uuid library. warns us if there is a problem // with uuid generation under low entropy. uuid.Loggerf = context.GetLogger(ctx).Warnf diff --git a/pkg/dockerregistry/server/errorblobstore.go b/pkg/dockerregistry/server/errorblobstore.go index 3db686f65dc7..c56eb240a501 100644 --- a/pkg/dockerregistry/server/errorblobstore.go +++ b/pkg/dockerregistry/server/errorblobstore.go @@ -151,7 +151,8 @@ func (f statCrossMountCreateOptions) Apply(v interface{}) error { if err != nil { context.GetLogger(f.ctx).Infof("cannot mount blob %s from repository %s: %v - disabling cross-repo mount", opts.Mount.From.Digest().String(), - opts.Mount.From.Name()) + opts.Mount.From.Name(), + err) opts.Mount.ShouldMount = false return nil } diff --git a/pkg/dockerregistry/server/prune/prune.go b/pkg/dockerregistry/server/prune/prune.go new file mode 100644 index 000000000000..f9dec5edef7c --- /dev/null +++ b/pkg/dockerregistry/server/prune/prune.go @@ -0,0 +1,200 @@ +package prune + +import ( + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/driver" + + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/openshift/origin/pkg/dockerregistry/server" + imageapi "github.com/openshift/origin/pkg/image/apis/image" +) + +func imageStreamHasManifestDigest(is *imageapi.ImageStream, dgst digest.Digest) bool { + for _, tagEventList := range is.Status.Tags { + for _, tagEvent := range tagEventList.Items { + if tagEvent.Image == string(dgst) { + return true + } + } + } + return false +} + +// Summary is cumulative information about what was pruned. +type Summary struct { + Blobs int + DiskSpace int64 +} + +// Prune removes blobs which are not used by Images in OpenShift. +// +// On error, the Summary will contain what was deleted so far. +// +// TODO(dmage): remove layer links to a blob if the blob is removed or it doesn't belong to the ImageStream. +// TODO(dmage): keep young blobs (docker/distribution#2297). +func Prune(ctx context.Context, storageDriver driver.StorageDriver, registry distribution.Namespace, registryClient server.RegistryClient, dryRun bool) (Summary, error) { + logger := context.GetLogger(ctx) + + repositoryEnumerator, ok := registry.(distribution.RepositoryEnumerator) + if !ok { + return Summary{}, fmt.Errorf("unable to convert Namespace to RepositoryEnumerator") + } + + oc, _, err := registryClient.Clients() + if err != nil { + return Summary{}, fmt.Errorf("error getting clients: %v", err) + } + + imageList, err := oc.Images().List(metav1.ListOptions{}) + if err != nil { + return Summary{}, fmt.Errorf("error listing images: %v", err) + } + + inuse := make(map[string]string) + for _, image := range imageList.Items { + // Keep the manifest. + inuse[image.Name] = image.DockerImageReference + + // Keep the config for a schema 2 manifest. + if image.DockerImageManifestMediaType == schema2.MediaTypeManifest { + inuse[image.DockerImageMetadata.ID] = image.DockerImageReference + } + + // Keep image layers. + for _, layer := range image.DockerImageLayers { + inuse[layer.Name] = image.DockerImageReference + } + } + + var stats Summary + + var reposToDelete []string + err = repositoryEnumerator.Enumerate(ctx, func(repoName string) error { + logger.Debugln("Processing repository", repoName) + + named, err := reference.WithName(repoName) + if err != nil { + return fmt.Errorf("failed to parse the repo name %s: %v", repoName, err) + } + + ref, err := imageapi.ParseDockerImageReference(repoName) + if err != nil { + return fmt.Errorf("failed to parse the image reference %s: %v", repoName, err) + } + + is, err := oc.ImageStreams(ref.Namespace).Get(ref.Name, metav1.GetOptions{}) + if kerrors.IsNotFound(err) { + logger.Printf("The image stream %s/%s is not found, will remove the whole repository", ref.Namespace, ref.Name) + + // We cannot delete the repository at this point, because it would break Enumerate. + reposToDelete = append(reposToDelete, repoName) + + return nil + } else if err != nil { + return fmt.Errorf("failed to get the image stream %s: %v", repoName, err) + } + + repository, err := registry.Repository(ctx, named) + if err != nil { + return err + } + + manifestService, err := repository.Manifests(ctx) + if err != nil { + return err + } + + manifestEnumerator, ok := manifestService.(distribution.ManifestEnumerator) + if !ok { + return fmt.Errorf("unable to convert ManifestService into ManifestEnumerator") + } + + err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error { + if _, ok := inuse[string(dgst)]; ok && imageStreamHasManifestDigest(is, dgst) { + logger.Debugf("Keeping the manifest link %s@%s", repoName, dgst) + return nil + } + + if dryRun { + logger.Printf("Would delete manifest link: %s@%s", repoName, dgst) + return nil + } + + logger.Printf("Deleting manifest link: %s@%s", repoName, dgst) + if err := manifestService.Delete(ctx, dgst); err != nil { + return fmt.Errorf("failed to delete the manifest link %s@%s: %v", repoName, dgst, err) + } + + return nil + }) + if e, ok := err.(driver.PathNotFoundError); ok { + logger.Printf("Skipped manifest link pruning for the repository %s: %v", repoName, e) + } else if err != nil { + return fmt.Errorf("failed to prune manifest links in the repository %s: %v", repoName, err) + } + + return nil + }) + if e, ok := err.(driver.PathNotFoundError); ok { + logger.Warnf("No repositories found: %v", e) + return stats, nil + } else if err != nil { + return stats, err + } + + vacuum := storage.NewVacuum(ctx, storageDriver) + + logger.Debugln("Removing repositories") + for _, repoName := range reposToDelete { + if dryRun { + logger.Printf("Would delete repository: %s", repoName) + continue + } + + if err = vacuum.RemoveRepository(repoName); err != nil { + return stats, fmt.Errorf("unable to remove the repository %s: %v", repoName, err) + } + } + + logger.Debugln("Processing blobs") + blobStatter := registry.BlobStatter() + err = registry.Blobs().Enumerate(ctx, func(dgst digest.Digest) error { + if imageReference, ok := inuse[string(dgst)]; ok { + logger.Debugf("Keeping the blob %s (it belongs to the image %s)", dgst, imageReference) + return nil + } + + desc, err := blobStatter.Stat(ctx, dgst) + if err != nil { + return err + } + + stats.Blobs++ + stats.DiskSpace += desc.Size + + if dryRun { + logger.Printf("Would delete blob: %s", dgst) + return nil + } + + if err := vacuum.RemoveBlob(string(dgst)); err != nil { + return fmt.Errorf("failed to delete the blob %s: %v", dgst, err) + } + + return nil + }) + if e, ok := err.(driver.PathNotFoundError); ok { + logger.Warnf("No repositories found: %v", e) + return stats, nil + } + return stats, err +} diff --git a/test/extended/imageapis/limitrange_admission.go b/test/extended/imageapis/limitrange_admission.go index eba1778342db..d9a343c39dcf 100644 --- a/test/extended/imageapis/limitrange_admission.go +++ b/test/extended/imageapis/limitrange_admission.go @@ -184,10 +184,14 @@ var _ = g.Describe("[Feature:ImageQuota] Image limit range", func() { g.It(fmt.Sprintf("should deny an import of a repository exceeding limit on %s resource", imageapi.ResourceImageStreamTags), func() { oc.SetOutputDir(exutil.TestContext.OutputDir) - defer tearDown(oc) maxBulkImport, err := getMaxImagesBulkImportedPerRepository() - o.Expect(err).NotTo(o.HaveOccurred()) + if err != nil { + g.Skip(err.Error()) + return + } + + defer tearDown(oc) s1tag2Image, err := buildAndPushTestImagesTo(oc, "src1st", "tag", maxBulkImport+1) s2tag2Image, err := buildAndPushTestImagesTo(oc, "src2nd", "t", 2) @@ -235,7 +239,7 @@ func buildAndPushTestImagesTo(oc *exutil.CLI, isName string, tagPrefix string, n for i := 1; i <= numberOfImages; i++ { tag := fmt.Sprintf("%s%d", tagPrefix, i) - dgst, err := imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, isName, tag, imageSize, 2, g.GinkgoWriter, true) + dgst, _, err := imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, isName, tag, imageSize, 2, g.GinkgoWriter, true, true) if err != nil { return nil, err } @@ -309,7 +313,7 @@ func bumpLimit(oc *exutil.CLI, resourceName kapi.ResourceName, limit string) (ka func getMaxImagesBulkImportedPerRepository() (int, error) { max := os.Getenv("MAX_IMAGES_BULK_IMPORTED_PER_REPOSITORY") if len(max) == 0 { - return 0, fmt.Errorf("MAX_IMAGES_BULK_IMAGES_IMPORTED_PER_REPOSITORY needs to be set") + return 0, fmt.Errorf("MAX_IMAGES_BULK_IMAGES_IMPORTED_PER_REPOSITORY is not set") } return strconv.Atoi(max) } diff --git a/test/extended/imageapis/quota_admission.go b/test/extended/imageapis/quota_admission.go index fda559a03fe3..a8d78452cfde 100644 --- a/test/extended/imageapis/quota_admission.go +++ b/test/extended/imageapis/quota_admission.go @@ -60,25 +60,25 @@ var _ = g.Describe("[Feature:ImageQuota] Image resource quota", func() { o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push image exceeding quota %v", quota)) - _, err = imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, "first", "refused", imageSize, 1, outSink, false) + _, _, err = imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, "first", "refused", imageSize, 1, outSink, false, true) o.Expect(err).NotTo(o.HaveOccurred()) quota, err = bumpQuota(oc, imageapi.ResourceImageStreams, 1) o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push image below quota %v", quota)) - _, err = imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, "first", "tag1", imageSize, 1, outSink, true) + _, _, err = imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, "first", "tag1", imageSize, 1, outSink, true, true) o.Expect(err).NotTo(o.HaveOccurred()) used, err := waitForResourceQuotaSync(oc, quotaName, quota) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(assertQuotasEqual(used, quota)).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push image to existing image stream %v", quota)) - _, err = imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, "first", "tag2", imageSize, 1, outSink, true) + _, _, err = imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, "first", "tag2", imageSize, 1, outSink, true, true) o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push image exceeding quota %v", quota)) - _, err = imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, "second", "refused", imageSize, 1, outSink, false) + _, _, err = imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, "second", "refused", imageSize, 1, outSink, false, true) quota, err = bumpQuota(oc, imageapi.ResourceImageStreams, 2) o.Expect(err).NotTo(o.HaveOccurred()) @@ -86,14 +86,14 @@ var _ = g.Describe("[Feature:ImageQuota] Image resource quota", func() { o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push image below quota %v", quota)) - _, err = imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, "second", "tag1", imageSize, 1, outSink, true) + _, _, err = imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, "second", "tag1", imageSize, 1, outSink, true, true) o.Expect(err).NotTo(o.HaveOccurred()) used, err = waitForResourceQuotaSync(oc, quotaName, quota) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(assertQuotasEqual(used, quota)).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push image exceeding quota %v", quota)) - _, err = imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, "third", "refused", imageSize, 1, outSink, false) + _, _, err = imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, "third", "refused", imageSize, 1, outSink, false, true) o.Expect(err).NotTo(o.HaveOccurred()) g.By("deleting first image stream") @@ -110,7 +110,7 @@ var _ = g.Describe("[Feature:ImageQuota] Image resource quota", func() { o.Expect(assertQuotasEqual(used, kapi.ResourceList{imageapi.ResourceImageStreams: resource.MustParse("1")})).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("trying to push image below quota %v", quota)) - _, err = imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, "third", "tag", imageSize, 1, outSink, true) + _, _, err = imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, "third", "tag", imageSize, 1, outSink, true, true) o.Expect(err).NotTo(o.HaveOccurred()) used, err = waitForResourceQuotaSync(oc, quotaName, quota) o.Expect(err).NotTo(o.HaveOccurred()) diff --git a/test/extended/images/hardprune.go b/test/extended/images/hardprune.go new file mode 100644 index 000000000000..47ec668680a6 --- /dev/null +++ b/test/extended/images/hardprune.go @@ -0,0 +1,361 @@ +package images + +import ( + "fmt" + "strings" + + g "github.com/onsi/ginkgo" + o "github.com/onsi/gomega" + + "github.com/docker/distribution/manifest/schema2" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + imageapi "github.com/openshift/origin/pkg/image/apis/image" + registryutil "github.com/openshift/origin/test/extended/registry/util" + exutil "github.com/openshift/origin/test/extended/util" + testutil "github.com/openshift/origin/test/util" +) + +var _ = g.Describe("[Feature:ImagePrune] Image hard prune", func() { + defer g.GinkgoRecover() + var oc = exutil.NewCLI("prune-images", exutil.KubeConfigPath()) + var originalAcceptSchema2 *bool + + g.JustBeforeEach(func() { + if originalAcceptSchema2 == nil { + accepts, err := registryutil.DoesRegistryAcceptSchema2(oc) + o.Expect(err).NotTo(o.HaveOccurred()) + originalAcceptSchema2 = &accepts + } + + readOnly := false + acceptSchema2 := true + err := registryutil.ConfigureRegistry(oc, + registryutil.RegistryConfiguration{ + ReadOnly: &readOnly, + AcceptSchema2: &acceptSchema2, + }) + o.Expect(err).NotTo(o.HaveOccurred()) + + err = exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace())) + o.Expect(err).NotTo(o.HaveOccurred()) + + defer func(ns string) { oc.SetNamespace(ns) }(oc.Namespace()) + g.By(fmt.Sprintf("give a user %s a right to prune images with %s role", oc.Username(), "system:image-pruner")) + err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-user", "system:image-pruner", oc.Username()).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + err = oc.AsAdmin().SetNamespace(metav1.NamespaceDefault).Run("adm"). + Args("policy", "add-cluster-role-to-user", "system:image-pruner", + fmt.Sprintf("system:serviceaccount:%s:registry", metav1.NamespaceDefault)).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + + // make sure to remove all unreferenced blobs from the storage + _, err = RunHardPrune(oc, false) + o.Expect(err).NotTo(o.HaveOccurred()) + }) + + g.AfterEach(func() { + readOnly := false + err := registryutil.ConfigureRegistry(oc, + registryutil.RegistryConfiguration{ + ReadOnly: &readOnly, + AcceptSchema2: originalAcceptSchema2, + }) + o.Expect(err).NotTo(o.HaveOccurred()) + }) + + mergeOrSetExpectedDeletions := func(expected, new *RegistryStorageFiles, merge bool) *RegistryStorageFiles { + if !merge { + return new + } + for _, repo := range new.Repos { + expected.Repos = append(expected.Repos, repo) + } + for name, links := range new.ManifestLinks { + expected.ManifestLinks.Add(name, links...) + } + for name, links := range new.LayerLinks { + expected.LayerLinks.Add(name, links...) + } + for _, blob := range new.Blobs { + expected.Blobs = append(expected.Blobs, blob) + } + return expected + } + + testHardPrune := func(dryRun bool) { + oc.SetOutputDir(exutil.TestContext.OutputDir) + outSink := g.GinkgoWriter + registryURL, err := registryutil.GetDockerRegistryURL(oc) + o.Expect(err).NotTo(o.HaveOccurred()) + + cleanUp := NewCleanUpContainer(oc) + defer cleanUp.Run() + + dClient, err := testutil.NewDockerClient() + o.Expect(err).NotTo(o.HaveOccurred()) + + baseImg1, imageId, err := BuildAndPushImageOfSizeWithDocker(oc, dClient, "a", "latest", testImageSize, 2, outSink, true, false) + o.Expect(err).NotTo(o.HaveOccurred()) + cleanUp.AddImage(baseImg1, imageId, "") + baseImg1Spec := fmt.Sprintf("%s/%s/a:latest", registryURL, oc.Namespace()) + + baseImg2, imageId, err := BuildAndPushImageOfSizeWithDocker(oc, dClient, "b", "latest", testImageSize, 2, outSink, true, false) + o.Expect(err).NotTo(o.HaveOccurred()) + cleanUp.AddImage(baseImg2, imageId, "") + baseImg2Spec := fmt.Sprintf("%s/%s/b:latest", registryURL, oc.Namespace()) + + baseImg3, imageId, err := BuildAndPushImageOfSizeWithDocker(oc, dClient, "c", "latest", testImageSize, 2, outSink, true, false) + o.Expect(err).NotTo(o.HaveOccurred()) + cleanUp.AddImage(baseImg3, imageId, "") + baseImg3Spec := fmt.Sprintf("%s/%s/c:latest", registryURL, oc.Namespace()) + + baseImg4, imageId, err := BuildAndPushImageOfSizeWithDocker(oc, dClient, "a", "img4", testImageSize, 2, outSink, true, false) + o.Expect(err).NotTo(o.HaveOccurred()) + cleanUp.AddImage(baseImg4, imageId, "") + + childImg1, imageId, err := BuildAndPushChildImage(oc, dClient, baseImg1Spec, "c", "latest", 1, outSink, true) + o.Expect(err).NotTo(o.HaveOccurred()) + cleanUp.AddImage(childImg1, "", "") + childImg2, imageId, err := BuildAndPushChildImage(oc, dClient, baseImg2Spec, "b", "latest", 1, outSink, true) + o.Expect(err).NotTo(o.HaveOccurred()) + cleanUp.AddImage(childImg2, "", "") + childImg3, imageId, err := BuildAndPushChildImage(oc, dClient, baseImg3Spec, "c", "latest", 1, outSink, true) + o.Expect(err).NotTo(o.HaveOccurred()) + cleanUp.AddImage(childImg3, "", "") + + err = oc.Run("tag").Args("--source=istag", "a:latest", "a-tagged:latest").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + + imgs := map[string]*imageapi.Image{} + for _, imgName := range []string{baseImg1, baseImg2, baseImg3, baseImg4, childImg1, childImg2, childImg3} { + img, err := oc.AsAdmin().Client().Images().Get(imgName, metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + imgs[imgName] = img + o.Expect(img.DockerImageManifestMediaType).To(o.Equal(schema2.MediaTypeManifest)) + } + + // this shouldn't delete anything + deleted, err := RunHardPrune(oc, dryRun) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(deleted.Len()).To(o.Equal(0)) + + /* TODO: use a persistent storage for the registry to preserve data across re-deployments + readOnly := true + err = registryutil.ConfigureRegistry(oc, registryutil.RegistryConfiguration{ReadOnly: &readOnly}) + o.Expect(err).NotTo(o.HaveOccurred()) + */ + + /* imageName | parent | layers | imagestreams + * ---------- | -------- | ------ | ------------ + * baseImg1 | | 1 2 | a a-tagged + * baseImg2 | | 4 5 | b + * baseImg3 | | 7 8 | c + * baseImg4 | | 11 12 | a + * childImg1 | baseImg1 | 1 2 3 | c + * childImg2 | baseImg2 | 4 5 6 | b + * childImg3 | baseImg3 | 7 8 9 | c + */ + + err = oc.AsAdmin().Client().ImageStreamTags(oc.Namespace()).Delete("a", "latest") + o.Expect(err).NotTo(o.HaveOccurred()) + deleted, err = RunHardPrune(oc, dryRun) + o.Expect(err).NotTo(o.HaveOccurred()) + expectedDeletions := &RegistryStorageFiles{ + /* TODO: reenable once we delete layer links as well + LayerLinks: RepoLinks{oc.Namespace()+"/a": []string{ + imgs[baseImg1].DockerImageMetadata.ID, + imgs[baseImg1].DockerImageLayers[0].Name, + imgs[baseImg1].DockerImageLayers[1].Name, + }}, + */ + ManifestLinks: RepoLinks{oc.Namespace() + "/a": []string{baseImg1}}, + } + err = AssertDeletedStorageFiles(deleted, expectedDeletions) + o.Expect(err).NotTo(o.HaveOccurred()) + + err = oc.AsAdmin().Client().Images().Delete(childImg1) + o.Expect(err).NotTo(o.HaveOccurred()) + // The repository a-tagged will not be removed even though it has no tags anymore. + // For the repository to be removed, the image stream itself needs to be deleted. + err = oc.AsAdmin().Client().ImageStreamTags(oc.Namespace()).Delete("a-tagged", "latest") + o.Expect(err).NotTo(o.HaveOccurred()) + deleted, err = RunHardPrune(oc, dryRun) + o.Expect(err).NotTo(o.HaveOccurred()) + expectedDeletions = mergeOrSetExpectedDeletions(expectedDeletions, + &RegistryStorageFiles{ + /* TODO: reenable once we delete layer links as well + LayerLinks: RepoLinks{oc.Namespace()+"/c": []string{ + imgs[childImg1].DockerImageMetadata.ID, + imgs[childImg1].DockerImageLayers[0].Name, + }}, + */ + ManifestLinks: RepoLinks{oc.Namespace() + "/c": []string{childImg1}}, + Blobs: []string{ + childImg1, // manifest blob + imgs[childImg1].DockerImageMetadata.ID, // manifest config + imgs[childImg1].DockerImageLayers[0].Name, + }, + }, + dryRun) + err = AssertDeletedStorageFiles(deleted, expectedDeletions) + o.Expect(err).NotTo(o.HaveOccurred()) + + err = oc.AsAdmin().Client().Images().Delete(baseImg1) + o.Expect(err).NotTo(o.HaveOccurred()) + deleted, err = RunHardPrune(oc, dryRun) + o.Expect(err).NotTo(o.HaveOccurred()) + expectedDeletions = mergeOrSetExpectedDeletions(expectedDeletions, + &RegistryStorageFiles{ + Blobs: []string{ + baseImg1, // manifest blob + imgs[baseImg1].DockerImageMetadata.ID, // manifest config + imgs[baseImg1].DockerImageLayers[0].Name, + imgs[baseImg1].DockerImageLayers[1].Name, + }, + }, + dryRun) + err = AssertDeletedStorageFiles(deleted, expectedDeletions) + o.Expect(err).NotTo(o.HaveOccurred()) + + err = oc.AsAdmin().Client().Images().Delete(childImg2) + o.Expect(err).NotTo(o.HaveOccurred()) + deleted, err = RunHardPrune(oc, dryRun) + o.Expect(err).NotTo(o.HaveOccurred()) + expectedDeletions = mergeOrSetExpectedDeletions(expectedDeletions, + &RegistryStorageFiles{ + /* TODO: reenable once we delete layer links as well + LayerLinks: RepoLinks{oc.Namespace()+"/b": []string{ + imgs[childImg2].DockerImageMetadata.ID, + imgs[childImg2].DockerImageLayers[0].Name, + }}, + */ + ManifestLinks: RepoLinks{oc.Namespace() + "/b": []string{childImg2}}, + Blobs: []string{ + childImg2, // manifest blob + imgs[childImg2].DockerImageMetadata.ID, // manifest config + imgs[childImg2].DockerImageLayers[0].Name, + }, + }, + dryRun) + err = AssertDeletedStorageFiles(deleted, expectedDeletions) + o.Expect(err).NotTo(o.HaveOccurred()) + + // untag both baseImg2 and childImg2 + err = oc.AsAdmin().Client().ImageStreams(oc.Namespace()).Delete("b") + o.Expect(err).NotTo(o.HaveOccurred()) + delete(expectedDeletions.ManifestLinks, oc.Namespace()+"/b") + err = oc.AsAdmin().Client().Images().Delete(baseImg2) + o.Expect(err).NotTo(o.HaveOccurred()) + deleted, err = RunHardPrune(oc, dryRun) + o.Expect(err).NotTo(o.HaveOccurred()) + expectedDeletions = mergeOrSetExpectedDeletions(expectedDeletions, + &RegistryStorageFiles{ + /* TODO: reenable once we delete layer links as well + LayerLinks: RepoLinks{oc.Namespace()+"/b": []string{ + imgs[baseImg2].DockerImageMetadata.ID, + imgs[baseImg2].DockerImageLayers[0].Name, + imgs[baseImg2].DockerImageLayers[1].Name, + }}, + */ + Repos: []string{oc.Namespace() + "/b"}, + Blobs: []string{ + baseImg2, // manifest blob + imgs[baseImg2].DockerImageMetadata.ID, // manifest config + imgs[baseImg2].DockerImageLayers[0].Name, + imgs[baseImg2].DockerImageLayers[1].Name, + }, + }, + dryRun) + err = AssertDeletedStorageFiles(deleted, expectedDeletions) + o.Expect(err).NotTo(o.HaveOccurred()) + + /* updated is/image table + * imageName | parent | layers | imagestreams + * ---------- | -------- | ------ | ------------ + * baseImg3 | | 7 8 | c + * baseImg4 | | 11 12 | a + * childImg3 | baseImg3 | 7 8 9 | c + */ + + // delete baseImg3 using soft prune + output, err := oc.WithoutNamespace().Run("adm").Args( + "prune", "images", "--keep-tag-revisions=1", "--keep-younger-than=0").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(output).To(o.ContainSubstring(baseImg3)) + o.Expect(output).To(o.ContainSubstring(imgs[baseImg3].DockerImageMetadata.ID)) + for _, layer := range imgs[baseImg3].DockerImageLayers { + o.Expect(output).To(o.ContainSubstring(layer.Name)) + } + o.Expect(output).NotTo(o.ContainSubstring(baseImg4)) + o.Expect(output).NotTo(o.ContainSubstring(childImg3)) + + // there should be nothing left for hard pruner to delete + deleted, err = RunHardPrune(oc, dryRun) + o.Expect(err).NotTo(o.HaveOccurred()) + if !dryRun { + expectedDeletions = &RegistryStorageFiles{} + } + err = AssertDeletedStorageFiles(deleted, expectedDeletions) + o.Expect(err).NotTo(o.HaveOccurred()) + + err = oc.AsAdmin().Client().Images().Delete(childImg3) + o.Expect(err).NotTo(o.HaveOccurred()) + deleted, err = RunHardPrune(oc, dryRun) + o.Expect(err).NotTo(o.HaveOccurred()) + expectedDeletions = mergeOrSetExpectedDeletions(expectedDeletions, + &RegistryStorageFiles{ + /* TODO: reenable once we delete layer links as well + LayerLinks: RepoLinks{oc.Namespace()+"/b": []string{ + imgs[baseImg2].DockerImageMetadata.ID, + imgs[baseImg2].DockerImageLayers[0].Name, + imgs[baseImg2].DockerImageLayers[1].Name, + }}, + */ + ManifestLinks: RepoLinks{oc.Namespace() + "/c": []string{childImg3}}, + Blobs: []string{ + childImg3, + imgs[childImg3].DockerImageMetadata.ID, // manifest config + imgs[childImg3].DockerImageLayers[0].Name, + }, + }, + dryRun) + err = AssertDeletedStorageFiles(deleted, expectedDeletions) + o.Expect(err).NotTo(o.HaveOccurred()) + + /* updated is/image table + * imageName | parent | layers | imagestreams + * ---------- | -------- | ------ | ------------ + * baseImg3 | | 7 8 | c + * baseImg4 | | 11 12 | a + */ + + assertImageBlobsPresent := func(present bool, img *imageapi.Image) { + for _, layer := range img.DockerImageLayers { + o.Expect(pathExistsInRegistry(oc, strings.Split(blobToPath("", layer.Name), "/")...)). + To(o.Equal(present)) + } + o.Expect(pathExistsInRegistry(oc, strings.Split(blobToPath("", img.DockerImageMetadata.ID), "/")...)). + To(o.Equal(present)) + o.Expect(pathExistsInRegistry(oc, strings.Split(blobToPath("", img.Name), "/")...)). + To(o.Equal(present)) + } + + for _, img := range []string{baseImg1, childImg1, baseImg2, childImg2} { + assertImageBlobsPresent(dryRun, imgs[img]) + } + for _, img := range []string{baseImg3, baseImg4} { + assertImageBlobsPresent(true, imgs[img]) + } + } + + g.It("should show orphaned blob deletions in dry-run mode", func() { + testHardPrune(true) + }) + + g.It("should delete orphaned blobs", func() { + testHardPrune(false) + }) + +}) diff --git a/test/extended/images/helper.go b/test/extended/images/helper.go index 43f220bc1025..1bbf40de4e19 100644 --- a/test/extended/images/helper.go +++ b/test/extended/images/helper.go @@ -3,6 +3,7 @@ package images import ( "bytes" cryptorand "crypto/rand" + "crypto/tls" "fmt" "io" "io/ioutil" @@ -14,14 +15,21 @@ import ( "strings" "time" - "github.com/docker/distribution/digest" dockerclient "github.com/fsouza/go-dockerclient" + g "github.com/onsi/ginkgo" + + "github.com/docker/distribution/digest" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kerrors "k8s.io/apimachinery/pkg/util/errors" + knet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/kubernetes/pkg/client/retry" "github.com/openshift/origin/pkg/client" imageapi "github.com/openshift/origin/pkg/image/apis/image" + registryutil "github.com/openshift/origin/test/extended/registry/util" exutil "github.com/openshift/origin/test/extended/util" testutil "github.com/openshift/origin/test/util" ) @@ -30,10 +38,92 @@ const ( // There are coefficients used to multiply layer data size to get a rough size of uploaded blob. layerSizeMultiplierForDocker18 = 2.0 layerSizeMultiplierForLatestDocker = 0.8 + defaultLayerSize = 1024 digestSHA256GzippedEmptyTar = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") digestSha256EmptyTar = digest.Digest("sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") + + dockerRegistryBinary = "dockerregistry" + registryGCLauncherScript = `#!/bin/sh +bin="$(which %[1]s 2>/dev/null)" +if [ -z "${bin}" -a -e "/usr/bin/%[1]s" ]; then + bin="/usr/bin/%[1]s" +elif [ -z "${bin}" -a -e "/%[1]s" ]; then + bin="/%[1]s" +fi +export REGISTRY_LOG_LEVEL=info +exec "${bin}" -prune=%[2]s +` ) +// RepoLinks maps digests of layer links to a repository +type RepoLinks map[string][]string + +func (rl RepoLinks) Add(repo string, links ...string) { + rl[repo] = append(rl[repo], links...) +} + +type RegistryStorageFiles struct { + Repos []string + ManifestLinks RepoLinks + LayerLinks RepoLinks + Blobs []string +} + +// ToPaths returns a list of paths of files contained in _sfs_ corresponding to their location in registry +// pod's storage under _root_ directory. +func (sfs *RegistryStorageFiles) ToPaths(root string) []string { + result := []string{} + if sfs == nil { + return result + } + for _, repo := range sfs.Repos { + result = append(result, repoToPath(root, repo)) + } + for repo, links := range sfs.ManifestLinks { + for _, link := range links { + result = append(result, repoLinkToPath(root, "manifest", repo, link)) + } + } + for repo, links := range sfs.LayerLinks { + for _, link := range links { + result = append(result, repoLinkToPath(root, "layer", repo, link)) + } + } + for _, blob := range sfs.Blobs { + result = append(result, blobToPath(root, blob)) + } + return result +} + +// Len returns a number of files contained in the sfs container. +func (sfs *RegistryStorageFiles) Len() int { + if sfs == nil { + return 0 + } + count := len(sfs.Blobs) + len(sfs.Repos) + for _, links := range sfs.ManifestLinks { + count += len(links) + } + for _, links := range sfs.LayerLinks { + count += len(links) + } + return count +} + +func repoToPath(root, repository string) string { + return path.Join(root, fmt.Sprintf("repositories/%s", repository)) +} +func repoLinkToPath(root, fileType, repository, dgst string) string { + d := digest.Digest(dgst) + return path.Join(root, fmt.Sprintf("repositories/%s/_%ss/%s/%s/link", + repository, fileType, d.Algorithm(), d.Hex())) +} +func blobToPath(root, dgst string) string { + d := digest.Digest(dgst) + return path.Join(root, fmt.Sprintf("blobs/%s/%s/%s/data", + d.Algorithm(), d.Hex()[0:2], d.Hex())) +} + var ( pushDeniedErrorMessages []string = []string{ // docker < 1.10 @@ -143,6 +233,7 @@ func BuildAndPushImageOfSizeWithBuilder( // Docker daemon directly. Built image is stored as an image stream tag :. If shouldSucceed is // false, a push is expected to fail with a denied error. Note the size is only approximate. Resulting image // size will be different depending on used compression algorithm and metadata overhead. +// Returned is an image digest, its ID (docker daemon's internal representation) and an error if any. func BuildAndPushImageOfSizeWithDocker( oc *exutil.CLI, dClient *dockerclient.Client, @@ -151,14 +242,85 @@ func BuildAndPushImageOfSizeWithDocker( numberOfLayers int, outSink io.Writer, shouldSucceed bool, -) (imageDigest string, err error) { - registryURL, err := GetDockerRegistryURL(oc) + removeBuiltImage bool, +) (string, string, error) { + imageName, image, err := buildImageOfSizeWithDocker( + oc, + dClient, + "scratch", + name, tag, + size, + numberOfLayers, + outSink) if err != nil { - return "", err + return "", "", err + } + + digest, err := pushImageWithDocker( + oc, + dClient, + image, + imageName, tag, + outSink, + shouldSucceed, + removeBuiltImage) + if err != nil { + return "", "", err + } + + return digest, image.ID, nil +} + +// BuildAndPushChildImage tries to build and push an image of given name and number of layers. It instructs +// Docker daemon directly. Built image is stored as an image stream tag :. +// Returned is an image digest, its ID (docker daemon's internal representation) and an error if any. +func BuildAndPushChildImage( + oc *exutil.CLI, + dClient *dockerclient.Client, + parent string, + name, tag string, + numberOfNewLayers int, + outSink io.Writer, + removeBuiltImage bool, +) (string, string, error) { + imageName, image, err := buildImageOfSizeWithDocker( + oc, dClient, + parent, name, tag, + defaultLayerSize, + numberOfNewLayers, + outSink) + if err != nil { + return "", "", err + } + + digest, err := pushImageWithDocker( + oc, dClient, + image, imageName, tag, + outSink, + true, + removeBuiltImage) + if err != nil { + return "", "", err + } + + return digest, image.ID, nil +} + +func buildImageOfSizeWithDocker( + oc *exutil.CLI, + dClient *dockerclient.Client, + parent, name, tag string, + size uint64, + numberOfLayers int, + outSink io.Writer, +) (string, *dockerclient.Image, error) { + registryURL, err := registryutil.GetDockerRegistryURL(oc) + if err != nil { + return "", nil, err } tempDir, err := ioutil.TempDir("", "name-build") if err != nil { - return "", err + return "", nil, err } dataSize := calculateRoughDataSize(oc.Stdout(), size, numberOfLayers) @@ -168,12 +330,12 @@ func BuildAndPushImageOfSizeWithDocker( for i := 1; i <= numberOfLayers; i++ { blobName := fmt.Sprintf("data%d", i) if err := createRandomBlob(path.Join(tempDir, blobName), dataSize); err != nil { - return "", err + return "", nil, err } lines[i] = fmt.Sprintf("COPY %s /%s", blobName, blobName) } if err := ioutil.WriteFile(path.Join(tempDir, "Dockerfile"), []byte(strings.Join(lines, "\n")+"\n"), 0644); err != nil { - return "", err + return "", nil, err } imageName := fmt.Sprintf("%s/%s/%s", registryURL, oc.Namespace(), name) @@ -187,28 +349,49 @@ func BuildAndPushImageOfSizeWithDocker( OutputStream: outSink, }) if err != nil { - return "", fmt.Errorf("failed to build %q image: %v", taggedName, err) + return "", nil, fmt.Errorf("failed to build %q image: %v", taggedName, err) } image, err := dClient.InspectImage(taggedName) if err != nil { - return + return "", nil, err + } + + return imageName, image, nil +} + +func pushImageWithDocker( + oc *exutil.CLI, + dClient *dockerclient.Client, + image *dockerclient.Image, + name, tag string, + outSink io.Writer, + shouldSucceed bool, + removeBuiltImage bool, +) (string, error) { + if removeBuiltImage { + defer dClient.RemoveImageExtended(image.ID, dockerclient.RemoveImageOptions{Force: true}) } - defer dClient.RemoveImageExtended(image.ID, dockerclient.RemoveImageOptions{Force: true}) + var imageDigest string if len(image.RepoDigests) == 1 { imageDigest = image.RepoDigests[0] } out, err := oc.Run("whoami").Args("-t").Output() if err != nil { - return + return "", err } token := strings.TrimSpace(out) + registryURL, err := registryutil.GetDockerRegistryURL(oc) + if err != nil { + return "", err + } + var buf bytes.Buffer err = dClient.PushImage(dockerclient.PushImageOptions{ - Name: imageName, + Name: name, Tag: tag, Registry: registryURL, OutputStream: &buf, @@ -221,42 +404,30 @@ func BuildAndPushImageOfSizeWithDocker( out = buf.String() outSink.Write([]byte(out)) - if shouldSucceed { - if err != nil { - return "", fmt.Errorf("Got unexpected push error: %v", err) + if !shouldSucceed { + if err == nil { + return "", fmt.Errorf("Push unexpectedly succeeded") } - if len(imageDigest) == 0 { - match := rePushedImageDigest.FindStringSubmatch(out) - if len(match) < 2 { - return imageDigest, fmt.Errorf("Failed to parse digest") - } - imageDigest = match[1] + if !reExpectedDeniedError.MatchString(err.Error()) { + return "", fmt.Errorf("Failed to match expected %q in: %q", reExpectedDeniedError.String(), err.Error()) } - return - } - if err == nil { - return "", fmt.Errorf("Push unexpectedly succeeded") - } - if !reExpectedDeniedError.MatchString(err.Error()) { - return "", fmt.Errorf("Failed to match expected %q in: %q", reExpectedDeniedError.String(), err.Error()) + // push failed with expected error -> no results + return "", nil } - return "", nil -} - -// GetDockerRegistryURL returns a cluster URL of internal docker registry if available. -func GetDockerRegistryURL(oc *exutil.CLI) (string, error) { - svc, err := oc.AdminKubeClient().Core().Services("default").Get("docker-registry", metav1.GetOptions{}) if err != nil { - return "", err + return "", fmt.Errorf("Got unexpected push error: %v", err) } - url := svc.Spec.ClusterIP - for _, p := range svc.Spec.Ports { - url = fmt.Sprintf("%s:%d", url, p.Port) - break + if len(imageDigest) == 0 { + match := rePushedImageDigest.FindStringSubmatch(out) + if len(match) < 2 { + return imageDigest, fmt.Errorf("Failed to parse digest") + } + imageDigest = match[1] } - return url, nil + + return imageDigest, nil } // createRandomBlob creates a random data with bytes from `letters` in order to let docker take advantage of @@ -344,11 +515,7 @@ func MirrorBlobInRegistry(oc *exutil.CLI, dgst digest.Digest, repository string, if presentGlobally || inRepository { return fmt.Errorf("blob %q is already present in the registry", dgst.String()) } - registryURL, err := GetDockerRegistryURL(oc) - if err != nil { - return err - } - req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/v2/%s/blobs/%s", registryURL, repository, dgst.String()), nil) + registryURL, err := registryutil.GetDockerRegistryURL(oc) if err != nil { return err } @@ -356,12 +523,37 @@ func MirrorBlobInRegistry(oc *exutil.CLI, dgst digest.Digest, repository string, if err != nil { return err } - req.Header.Set("range", "bytes=0-1") - req.Header.Set("Authorization", "Bearer "+token) - c := http.Client{} - resp, err := c.Do(req) - if err != nil { - return err + + c := http.Client{ + Transport: knet.SetTransportDefaults(&http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }), + } + + peekAtBlob := func(schema string) (*http.Request, *http.Response, error) { + req, err := http.NewRequest("GET", fmt.Sprintf("%s://%s/v2/%s/blobs/%s", schema, registryURL, repository, dgst.String()), nil) + if err != nil { + return nil, nil, err + } + req.Header.Set("range", "bytes=0-1") + req.Header.Set("Authorization", "Bearer "+token) + resp, err := c.Do(req) + if err != nil { + fmt.Fprintf(g.GinkgoWriter, "failed to %s %s: %v (%#+v)\n", req.Method, req.URL, err, err) + return nil, nil, err + } + return req, resp, nil + } + + var ( + req *http.Request + resp *http.Response + getErr error + ) + if req, resp, getErr = peekAtBlob("https"); getErr != nil { + if req, resp, getErr = peekAtBlob("http"); getErr != nil { + return getErr + } } defer resp.Body.Close() @@ -382,7 +574,8 @@ func IsEmptyDigest(dgst digest.Digest) bool { func pathExistsInRegistry(oc *exutil.CLI, pthComponents ...string) (bool, error) { pth := path.Join(append([]string{"/registry/docker/registry/v2"}, pthComponents...)...) - cmd := fmt.Sprintf("[ -e %s ] && echo exists || echo missing", pth) + cmd := fmt.Sprintf("test -e '%s' && echo exists || echo missing", pth) + defer func(ns string) { oc.SetNamespace(ns) }(oc.Namespace()) out, err := oc.SetNamespace(metav1.NamespaceDefault).AsAdmin().Run("rsh").Args( "dc/docker-registry", "/bin/sh", "-c", cmd).Output() if err != nil { @@ -423,3 +616,207 @@ func IsBlobStoredInRegistry( } return present, presentInRepository, err } + +// RunHardPrune executes into a docker-registry pod and runs a garbage collector. The docker-registry is +// assumed to be in a read-only mode and using filesystem as a storage driver. It returns lists of deleted +// files. +func RunHardPrune(oc *exutil.CLI, dryRun bool) (*RegistryStorageFiles, error) { + pod, err := registryutil.GetRegistryPod(oc.AsAdmin().KubeClient().Core()) + if err != nil { + return nil, err + } + + defer func(ns string) { oc.SetNamespace(ns) }(oc.Namespace()) + output, err := oc.AsAdmin().SetNamespace(metav1.NamespaceDefault).Run("env").Args("--list", "dc/docker-registry").Output() + if err != nil { + return nil, err + } + + deleted := &RegistryStorageFiles{ + Repos: []string{}, + ManifestLinks: make(RepoLinks), + LayerLinks: make(RepoLinks), + Blobs: []string{}, + } + + err = wait.ExponentialBackoff(retry.DefaultBackoff, func() (bool, error) { + pruneType := "delete" + if dryRun { + pruneType = "check" + } + out, err := oc.SetNamespace(metav1.NamespaceDefault).AsAdmin(). + Run("exec").Args("--stdin", pod.Name, "--", "/bin/sh", "-s"). + InputString(fmt.Sprintf(registryGCLauncherScript, dockerRegistryBinary, pruneType)).Output() + if exitError, ok := err.(*exutil.ExitError); ok && strings.Contains(exitError.StdErr, "unable to upgrade connection") { + fmt.Fprintf(g.GinkgoWriter, "failed to execute into registry pod %s: %v\n", pod.Name, err) + return false, nil + } + output = out + return true, err + }) + if len(output) > 0 { + fmt.Fprintf(g.GinkgoWriter, "prune output: \n%s\n\n", output) + } + + if err != nil { + return nil, err + } + + const reCommon = `(?im)\bmsg="(?:would\s+)?delet(?:e|ing)\s+(?:the\s+)?` + var reDeleteRepository = regexp.MustCompile(reCommon + `repo(?:sitory)?:\s+` + + `(?:[^"]+/)?` /* root path of the repository */ + + `([^"/]+/[^"/]+)/?"` /* repository */) + var reDeleteRepositoryLink = regexp.MustCompile(reCommon + + `(manifest|layer)(?:\s+link)?:\s+` /* type of link's destination file */ + + `([^#@]+)[#@]` /* repository */ + + `([^"]+)"` /* digest */) + var reDeleteBlob = regexp.MustCompile(reCommon + `blob:\s+` + + `(?:[^"]+/)?` /* root path to the blob */ + + `([^":/]+)` /* digest algorithm */ + + `(?:/[^"/]{2}/|:)` /* directory whose name matches the first two characters of digest hex */ + + `([^":/]+)"` /* digest hex */) + var reDeletedBlobs = regexp.MustCompile(`(?im)^(?:would\s+)?deleted?\s+(\d+)\s+blobs$`) + + for _, match := range reDeleteRepository.FindAllStringSubmatch(output, -1) { + deleted.Repos = append(deleted.Repos, match[1]) + } + for _, match := range reDeleteRepositoryLink.FindAllStringSubmatch(output, -1) { + fileType, repository, digest := match[1], match[2], match[3] + + switch strings.ToLower(fileType) { + case "manifest": + deleted.ManifestLinks.Add(repository, digest) + case "link": + deleted.LayerLinks.Add(repository, digest) + default: + fmt.Fprintf(g.GinkgoWriter, "unrecognized type of deleted file: %s\n", match[1]) + continue + } + } + for _, match := range reDeleteBlob.FindAllStringSubmatch(output, -1) { + deleted.Blobs = append(deleted.Blobs, fmt.Sprintf("%s:%s", match[1], match[2])) + } + + match := reDeletedBlobs.FindStringSubmatch(output) + if match == nil { + return nil, fmt.Errorf("missing the number of deleted blobs in the output") + } + + deletedBlobCount, err := strconv.Atoi(match[1]) + if err != nil { + return nil, fmt.Errorf("failed to parse deleted number of blobs %q: %v", match[1], err) + } + if deletedBlobCount != len(deleted.Blobs) { + return nil, fmt.Errorf("numbers of deleted blobs doesn't match %d != %d", len(deleted.Blobs), deletedBlobCount) + } + + return deleted, nil +} + +// AssertDeletedStorageFiles compares lists of deleted files against expected. An error will be generated for +// each entry present in just one of these sets. +func AssertDeletedStorageFiles(deleted, expected *RegistryStorageFiles) error { + var errors []error + deletedSet := sets.NewString(deleted.ToPaths("")...) + expectedPaths := sets.NewString(expected.ToPaths("")...) + verifiedSet := sets.NewString() + + for pth := range expectedPaths { + if deletedSet.Has(pth) { + verifiedSet.Insert(pth) + } else { + errors = append(errors, fmt.Errorf("expected path %s was not deleted", pth)) + } + } + for pth := range deletedSet { + if !expectedPaths.Has(pth) { + errors = append(errors, fmt.Errorf("path %s got unexpectedly deleted", pth)) + } + } + + return kerrors.NewAggregate(errors) +} + +// CleanUpContainer holds names of image names, docker image IDs, imagestreamtags and imagestreams that shall +// be deleted at the end of the test. +type CleanUpContainer struct { + OC *exutil.CLI + + imageNames sets.String + imageIDs sets.String + isTags sets.String + isNames sets.String +} + +// NewCleanUpContainer creates a new instance of CleanUpContainer. +func NewCleanUpContainer(oc *exutil.CLI) *CleanUpContainer { + return &CleanUpContainer{ + OC: oc, + imageNames: sets.NewString(), + imageIDs: sets.NewString(), + isTags: sets.NewString(), + isNames: sets.NewString(), + } +} + +// AddImage marks given image name, docker image id and imagestreamtag as candidates for deletion. +func (c *CleanUpContainer) AddImage(name, id, isTag string) { + if len(name) > 0 { + c.imageNames.Insert(name) + } + if len(id) > 0 { + c.imageIDs.Insert(id) + } + if len(isTag) > 0 { + c.isNames.Insert(isTag) + } +} + +// AddImageStream marks the given image stream name for removal. +func (c *CleanUpContainer) AddImageStream(isName string) { + c.isNames.Insert(isName) +} + +// Run deletes all the marked objects. +func (c *CleanUpContainer) Run() { + for image := range c.imageNames { + err := c.OC.AsAdmin().Client().Images().Delete(image) + if err != nil { + fmt.Fprintf(g.GinkgoWriter, "clean up of image %q failed: %v\n", image, err) + } + } + for isName := range c.isNames { + err := c.OC.AsAdmin().Client().ImageStreams(c.OC.Namespace()).Delete(isName) + if err != nil { + fmt.Fprintf(g.GinkgoWriter, "clean up of image stream %q failed: %v\n", isName, err) + } + } + for isTag := range c.isTags { + parts := strings.SplitN(isTag, ":", 2) + if len(parts) != 2 { + fmt.Fprintf(g.GinkgoWriter, "cannot remove invalid istag %q", isTag) + continue + } + err := c.OC.Client().ImageStreamTags(c.OC.Namespace()).Delete(parts[0], parts[1]) + if err != nil { + fmt.Fprintf(g.GinkgoWriter, "clean up of image stream tag %q failed: %v\n", isTag, err) + } + } + + if len(c.imageIDs) == 0 { + return + } + + dClient, err := testutil.NewDockerClient() + if err != nil { + fmt.Fprintf(g.GinkgoWriter, "failed to create a new docker client: %v\n", err) + return + } + + for id := range c.imageIDs { + err := dClient.RemoveImageExtended(id, dockerclient.RemoveImageOptions{Force: true}) + if err != nil { + fmt.Fprintf(g.GinkgoWriter, "failed to remove image %q: %v\n", id, err) + } + } +} diff --git a/test/extended/images/prune.go b/test/extended/images/prune.go index 5173dea01eee..f48eb1d50e71 100644 --- a/test/extended/images/prune.go +++ b/test/extended/images/prune.go @@ -2,9 +2,7 @@ package images import ( "fmt" - "regexp" "sort" - "strconv" "strings" "time" @@ -17,8 +15,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - dockerregistryserver "github.com/openshift/origin/pkg/dockerregistry/server" imageapi "github.com/openshift/origin/pkg/image/apis/image" + registryutil "github.com/openshift/origin/test/extended/registry/util" exutil "github.com/openshift/origin/test/extended/util" testutil "github.com/openshift/origin/test/util" ) @@ -30,19 +28,15 @@ const ( externalImageReference = "docker.io/openshift/origin-release:golang-1.4" ) -type cleanUpContainer struct { - imageNames []string - isNames []string -} - var _ = g.Describe("[Feature:ImagePrune] Image prune", func() { defer g.GinkgoRecover() var oc = exutil.NewCLI("prune-images", exutil.KubeConfigPath()) + var originalAcceptSchema2 *bool g.JustBeforeEach(func() { if originalAcceptSchema2 == nil { - accepts, err := doesRegistryAcceptSchema2(oc) + accepts, err := registryutil.DoesRegistryAcceptSchema2(oc) o.Expect(err).NotTo(o.HaveOccurred()) originalAcceptSchema2 = &accepts } @@ -59,14 +53,14 @@ var _ = g.Describe("[Feature:ImagePrune] Image prune", func() { g.JustBeforeEach(func() { if *originalAcceptSchema2 { g.By("ensure the registry does not accept schema 2") - err := ensureRegistryAcceptsSchema2(oc, false) + err := registryutil.EnsureRegistryAcceptsSchema2(oc, false) o.Expect(err).NotTo(o.HaveOccurred()) } }) g.AfterEach(func() { if *originalAcceptSchema2 { - err := ensureRegistryAcceptsSchema2(oc, true) + err := registryutil.EnsureRegistryAcceptsSchema2(oc, true) o.Expect(err).NotTo(o.HaveOccurred()) } }) @@ -78,14 +72,14 @@ var _ = g.Describe("[Feature:ImagePrune] Image prune", func() { g.JustBeforeEach(func() { if !*originalAcceptSchema2 { g.By("ensure the registry accepts schema 2") - err := ensureRegistryAcceptsSchema2(oc, true) + err := registryutil.EnsureRegistryAcceptsSchema2(oc, true) o.Expect(err).NotTo(o.HaveOccurred()) } }) g.AfterEach(func() { if !*originalAcceptSchema2 { - err := ensureRegistryAcceptsSchema2(oc, false) + err := registryutil.EnsureRegistryAcceptsSchema2(oc, false) o.Expect(err).NotTo(o.HaveOccurred()) } }) @@ -94,9 +88,17 @@ var _ = g.Describe("[Feature:ImagePrune] Image prune", func() { }) g.Describe("with default --all flag", func() { + g.JustBeforeEach(func() { + if !*originalAcceptSchema2 { + g.By("ensure the registry accepts schema 2") + err := registryutil.EnsureRegistryAcceptsSchema2(oc, true) + o.Expect(err).NotTo(o.HaveOccurred()) + } + }) + g.AfterEach(func() { if !*originalAcceptSchema2 { - err := ensureRegistryAcceptsSchema2(oc, false) + err := registryutil.EnsureRegistryAcceptsSchema2(oc, false) o.Expect(err).NotTo(o.HaveOccurred()) } }) @@ -105,9 +107,17 @@ var _ = g.Describe("[Feature:ImagePrune] Image prune", func() { }) g.Describe("with --all=false flag", func() { + g.JustBeforeEach(func() { + if !*originalAcceptSchema2 { + g.By("ensure the registry accepts schema 2") + err := registryutil.EnsureRegistryAcceptsSchema2(oc, true) + o.Expect(err).NotTo(o.HaveOccurred()) + } + }) + g.AfterEach(func() { if !*originalAcceptSchema2 { - err := ensureRegistryAcceptsSchema2(oc, false) + err := registryutil.EnsureRegistryAcceptsSchema2(oc, false) o.Expect(err).NotTo(o.HaveOccurred()) } }) @@ -133,23 +143,23 @@ func testPruneImages(oc *exutil.CLI, schemaVersion int) { oc.SetOutputDir(exutil.TestContext.OutputDir) outSink := g.GinkgoWriter - cleanUp := cleanUpContainer{} - defer tearDownPruneImagesTest(oc, &cleanUp) + cleanUp := NewCleanUpContainer(oc) + defer cleanUp.Run() dClient, err := testutil.NewDockerClient() o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("build two images using Docker and push them as schema %d", schemaVersion)) - imgPruneName, err := BuildAndPushImageOfSizeWithDocker(oc, dClient, isName, "latest", testImageSize, 2, outSink, true) + imgPruneName, _, err := BuildAndPushImageOfSizeWithDocker(oc, dClient, isName, "latest", testImageSize, 2, outSink, true, true) o.Expect(err).NotTo(o.HaveOccurred()) - cleanUp.imageNames = append(cleanUp.imageNames, imgPruneName) - cleanUp.isNames = append(cleanUp.isNames, isName) - pruneSize, err := getRegistryStorageSize(oc) + cleanUp.AddImage(imgPruneName, "", "") + cleanUp.AddImageStream(isName) + pruneSize, err := registryutil.GetRegistryStorageSize(oc) o.Expect(err).NotTo(o.HaveOccurred()) - imgKeepName, err := BuildAndPushImageOfSizeWithDocker(oc, dClient, isName, "latest", testImageSize, 2, outSink, true) + imgKeepName, _, err := BuildAndPushImageOfSizeWithDocker(oc, dClient, isName, "latest", testImageSize, 2, outSink, true, true) o.Expect(err).NotTo(o.HaveOccurred()) - cleanUp.imageNames = append(cleanUp.imageNames, imgKeepName) - keepSize, err := getRegistryStorageSize(oc) + cleanUp.AddImage(imgKeepName, "", "") + keepSize, err := registryutil.GetRegistryStorageSize(oc) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(pruneSize < keepSize).To(o.BeTrue()) @@ -185,7 +195,7 @@ func testPruneImages(oc *exutil.CLI, schemaVersion int) { } } - noConfirmSize, err := getRegistryStorageSize(oc) + noConfirmSize, err := registryutil.GetRegistryStorageSize(oc) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(noConfirmSize).To(o.Equal(keepSize)) @@ -221,7 +231,7 @@ func testPruneImages(oc *exutil.CLI, schemaVersion int) { o.Expect(inRepository).To(o.BeTrue()) } - confirmSize, err := getRegistryStorageSize(oc) + confirmSize, err := registryutil.GetRegistryStorageSize(oc) o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("confirming storage size: sizeOfKeepImage=%d <= sizeAfterPrune=%d < beforePruneSize=%d", imgKeep.DockerImageMetadata.Size, confirmSize, keepSize)) o.Expect(confirmSize >= imgKeep.DockerImageMetadata.Size).To(o.BeTrue()) @@ -231,23 +241,23 @@ func testPruneImages(oc *exutil.CLI, schemaVersion int) { } func testPruneAllImages(oc *exutil.CLI, setAllImagesToFalse bool, schemaVersion int) { - isName := "prune" + isName := fmt.Sprintf("prune-schema%d-all-images-%t", schemaVersion, setAllImagesToFalse) repository := oc.Namespace() + "/" + isName oc.SetOutputDir(exutil.TestContext.OutputDir) outSink := g.GinkgoWriter - cleanUp := cleanUpContainer{} - defer tearDownPruneImagesTest(oc, &cleanUp) + cleanUp := NewCleanUpContainer(oc) + defer cleanUp.Run() dClient, err := testutil.NewDockerClient() o.Expect(err).NotTo(o.HaveOccurred()) g.By("build one image using Docker and push it") - managedImageName, err := BuildAndPushImageOfSizeWithDocker(oc, dClient, isName, "latest", testImageSize, 2, outSink, true) + managedImageName, _, err := BuildAndPushImageOfSizeWithDocker(oc, dClient, isName, "latest", testImageSize, 2, outSink, true, true) o.Expect(err).NotTo(o.HaveOccurred()) - cleanUp.imageNames = append(cleanUp.imageNames, managedImageName) - cleanUp.isNames = append(cleanUp.isNames, isName) + cleanUp.AddImage(managedImageName, "", "") + cleanUp.AddImageStream(isName) o.Expect(err).NotTo(o.HaveOccurred()) managedImage, err := oc.AsAdmin().Client().Images().Get(managedImageName, metav1.GetOptions{}) @@ -255,8 +265,8 @@ func testPruneAllImages(oc *exutil.CLI, setAllImagesToFalse bool, schemaVersion externalImage, blobdgst, err := importImageAndMirrorItsSmallestBlob(oc, externalImageReference, "origin-release:latest") o.Expect(err).NotTo(o.HaveOccurred()) - cleanUp.imageNames = append(cleanUp.imageNames, externalImage.Name) - cleanUp.isNames = append(cleanUp.isNames, "origin-release") + cleanUp.AddImage(externalImage.Name, "", "") + cleanUp.AddImageStream("origin-release") checkAdminPruneOutput := func(output string, dryRun bool) { o.Expect(output).To(o.ContainSubstring(managedImage.Name)) @@ -315,88 +325,6 @@ func testPruneAllImages(oc *exutil.CLI, setAllImagesToFalse bool, schemaVersion checkAdminPruneOutput(output, false) } -func tearDownPruneImagesTest(oc *exutil.CLI, cleanUp *cleanUpContainer) { - for _, image := range cleanUp.imageNames { - err := oc.AsAdmin().Client().Images().Delete(image) - if err != nil { - fmt.Fprintf(g.GinkgoWriter, "clean up of image %q failed: %v\n", image, err) - } - } - for _, isName := range cleanUp.isNames { - err := oc.AsAdmin().Client().ImageStreams(oc.Namespace()).Delete(isName) - if err != nil { - fmt.Fprintf(g.GinkgoWriter, "clean up of image stream %q failed: %v\n", isName, err) - } - } -} - -func getRegistryStorageSize(oc *exutil.CLI) (int64, error) { - ns := oc.Namespace() - defer oc.SetNamespace(ns) - out, err := oc.SetNamespace(metav1.NamespaceDefault).AsAdmin().Run("rsh").Args("dc/docker-registry", "du", "--bytes", "--summarize", "/registry/docker/registry").Output() - if err != nil { - return 0, err - } - m := regexp.MustCompile(`^\d+`).FindString(out) - if len(m) == 0 { - return 0, fmt.Errorf("failed to parse du output: %s", out) - } - - size, err := strconv.ParseInt(m, 10, 64) - if err != nil { - return 0, fmt.Errorf("failed to parse du output: %s", m) - } - - return size, nil -} - -func doesRegistryAcceptSchema2(oc *exutil.CLI) (bool, error) { - ns := oc.Namespace() - defer oc.SetNamespace(ns) - env, err := oc.SetNamespace(metav1.NamespaceDefault).AsAdmin().Run("env").Args("dc/docker-registry", "--list").Output() - if err != nil { - return false, err - } - - return strings.Contains(env, fmt.Sprintf("%s=true", dockerregistryserver.AcceptSchema2EnvVar)), nil -} - -// ensureRegistryAcceptsSchema2 checks whether the registry is configured to accept manifests V2 schema 2 or -// not. If the result doesn't match given accept argument, registry's deployment config is updated accordingly -// and the function blocks until the registry is re-deployed and ready for new requests. -func ensureRegistryAcceptsSchema2(oc *exutil.CLI, accept bool) error { - ns := oc.Namespace() - oc = oc.SetNamespace(metav1.NamespaceDefault).AsAdmin() - defer oc.SetNamespace(ns) - env, err := oc.Run("env").Args("dc/docker-registry", "--list").Output() - if err != nil { - return err - } - - value := fmt.Sprintf("%s=%t", dockerregistryserver.AcceptSchema2EnvVar, accept) - if strings.Contains(env, value) { - if accept { - g.By("docker-registry is already configured to accept schema 2") - } else { - g.By("docker-registry is already configured to refuse schema 2") - } - return nil - } - - dc, err := oc.Client().DeploymentConfigs(metav1.NamespaceDefault).Get("docker-registry", metav1.GetOptions{}) - if err != nil { - return err - } - - g.By("configuring Docker registry to accept schema 2") - err = oc.Run("env").Args("dc/docker-registry", value).Execute() - if err != nil { - return fmt.Errorf("failed to update registry environment: %v", err) - } - - return exutil.WaitForDeploymentConfig(oc.AdminKubeClient(), oc.AdminClient(), metav1.NamespaceDefault, "docker-registry", dc.Status.LatestVersion+1, oc) -} - type byLayerSize []imageapi.ImageLayer func (bls byLayerSize) Len() int { return len(bls) } diff --git a/test/extended/registry/registry.go b/test/extended/registry/registry.go index 9ae8b6ca4039..c892dcf239f3 100644 --- a/test/extended/registry/registry.go +++ b/test/extended/registry/registry.go @@ -1,7 +1,6 @@ package registry import ( - "fmt" "time" g "github.com/onsi/ginkgo" @@ -15,6 +14,7 @@ import ( regclient "github.com/openshift/origin/pkg/dockerregistry" imageapi "github.com/openshift/origin/pkg/image/apis/image" imagesutil "github.com/openshift/origin/test/extended/images" + registryutil "github.com/openshift/origin/test/extended/registry/util" exutil "github.com/openshift/origin/test/extended/util" testutil "github.com/openshift/origin/test/util" ) @@ -29,15 +29,10 @@ var _ = g.Describe("[Conformance][registry][migration] manifest migration from e defer g.GinkgoRecover() var oc = exutil.NewCLI("registry-migration", exutil.KubeConfigPath()) - // needs to be run at the top of each It; cannot be run in AfterEach which is run after the project - // is destroyed - tearDown := func(oc *exutil.CLI) { - deleteTestImages(oc) - } - g.It("registry can get access to manifest [local]", func() { oc.SetOutputDir(exutil.TestContext.OutputDir) - defer tearDown(oc) + cleanUp := imagesutil.NewCleanUpContainer(oc) + defer cleanUp.Run() g.By("set up policy for registry to have anonymous access to images") err := oc.Run("policy").Args("add-role-to-user", "registry-viewer", "system:anonymous").Execute() @@ -46,12 +41,13 @@ var _ = g.Describe("[Conformance][registry][migration] manifest migration from e dClient, err := testutil.NewDockerClient() o.Expect(err).NotTo(o.HaveOccurred()) - registryURL, err := imagesutil.GetDockerRegistryURL(oc) + registryURL, err := registryutil.GetDockerRegistryURL(oc) o.Expect(err).NotTo(o.HaveOccurred()) g.By("pushing image...") - imageDigest, err := imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, repoName, tagName, imageSize, 1, g.GinkgoWriter, true) + imageDigest, _, err := imagesutil.BuildAndPushImageOfSizeWithDocker(oc, dClient, repoName, tagName, imageSize, 1, g.GinkgoWriter, true, true) o.Expect(err).NotTo(o.HaveOccurred()) + cleanUp.AddImage(imageDigest, "", "") g.By("checking that the image converted...") image, err := oc.AsAdmin().Client().Images().Get(imageDigest, metav1.GetOptions{}) @@ -139,20 +135,3 @@ func waitForImageUpdate(oc *exutil.CLI, image *imageapi.Image) error { return (image.ResourceVersion < newImage.ResourceVersion), nil }) } - -// deleteTestImages deletes test images built in current and shared -// namespaces. It also deletes shared projects. -func deleteTestImages(oc *exutil.CLI) { - g.By(fmt.Sprintf("Deleting images and image streams in project %q", oc.Namespace())) - iss, err := oc.AdminClient().ImageStreams(oc.Namespace()).List(metav1.ListOptions{}) - if err != nil { - return - } - for _, is := range iss.Items { - for _, history := range is.Status.Tags { - for i := range history.Items { - oc.AdminClient().Images().Delete(history.Items[i].Image) - } - } - } -} diff --git a/test/extended/registry/signature.go b/test/extended/registry/signature.go index 34c418c4e63e..5b323a46e595 100644 --- a/test/extended/registry/signature.go +++ b/test/extended/registry/signature.go @@ -6,7 +6,7 @@ import ( g "github.com/onsi/ginkgo" o "github.com/onsi/gomega" - imagesutil "github.com/openshift/origin/test/extended/images" + registryutil "github.com/openshift/origin/test/extended/registry/util" exutil "github.com/openshift/origin/test/extended/util" e2e "k8s.io/kubernetes/test/e2e/framework" @@ -33,7 +33,7 @@ var _ = g.Describe("[imageapis][registry] image signature workflow", func() { o.Expect(err).NotTo(o.HaveOccurred()) g.By("looking up the openshift registry URL") - registryURL, err := imagesutil.GetDockerRegistryURL(oc) + registryURL, err := registryutil.GetDockerRegistryURL(oc) signerImage := fmt.Sprintf("%s/%s/signer:latest", registryURL, oc.Namespace()) signedImage := fmt.Sprintf("%s/%s/signed:latest", registryURL, oc.Namespace()) o.Expect(err).NotTo(o.HaveOccurred()) diff --git a/test/extended/registry/util/util.go b/test/extended/registry/util/util.go new file mode 100644 index 000000000000..a6c3b6542a19 --- /dev/null +++ b/test/extended/registry/util/util.go @@ -0,0 +1,172 @@ +package images + +import ( + "fmt" + "regexp" + "sort" + "strconv" + "strings" + + g "github.com/onsi/ginkgo" + //o "github.com/onsi/gomega" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + kapiv1 "k8s.io/kubernetes/pkg/api/v1" + kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1" + + dockerregistryserver "github.com/openshift/origin/pkg/dockerregistry/server" + exutil "github.com/openshift/origin/test/extended/util" +) + +const ( + readOnlyEnvVar = "REGISTRY_STORAGE_MAINTENANCE_READONLY" + defaultAcceptSchema2 = true +) + +// GetDockerRegistryURL returns a cluster URL of internal docker registry if available. +func GetDockerRegistryURL(oc *exutil.CLI) (string, error) { + svc, err := oc.AdminKubeClient().Core().Services("default").Get("docker-registry", metav1.GetOptions{}) + if err != nil { + return "", err + } + url := svc.Spec.ClusterIP + for _, p := range svc.Spec.Ports { + url = fmt.Sprintf("%s:%d", url, p.Port) + break + } + return url, nil +} + +// GetRegistryStorageSize returns a number of bytes occupied by registry's data on its filesystem. +func GetRegistryStorageSize(oc *exutil.CLI) (int64, error) { + defer func(ns string) { oc.SetNamespace(ns) }(oc.Namespace()) + out, err := oc.SetNamespace(metav1.NamespaceDefault).AsAdmin().Run("rsh").Args( + "dc/docker-registry", "du", "--bytes", "--summarize", "/registry/docker/registry").Output() + if err != nil { + return 0, err + } + m := regexp.MustCompile(`^\d+`).FindString(out) + if len(m) == 0 { + return 0, fmt.Errorf("failed to parse du output: %s", out) + } + + size, err := strconv.ParseInt(m, 10, 64) + if err != nil { + return 0, fmt.Errorf("failed to parse du output: %s", m) + } + + return size, nil +} + +// DoesRegistryAcceptSchema2 returns true if the integrated registry is configured to accept manifest V2 +// schema 2. +func DoesRegistryAcceptSchema2(oc *exutil.CLI) (bool, error) { + defer func(ns string) { oc.SetNamespace(ns) }(oc.Namespace()) + env, err := oc.SetNamespace(metav1.NamespaceDefault).AsAdmin().Run("env").Args("dc/docker-registry", "--list").Output() + if err != nil { + return defaultAcceptSchema2, err + } + + if strings.Contains(env, fmt.Sprintf("%s=", dockerregistryserver.AcceptSchema2EnvVar)) { + return strings.Contains(env, fmt.Sprintf("%s=true", dockerregistryserver.AcceptSchema2EnvVar)), nil + } + + return defaultAcceptSchema2, nil +} + +// RegistriConfiguration holds desired configuration options for the integrated registry. *nil* stands for +// "no change". +type RegistryConfiguration struct { + ReadOnly *bool + AcceptSchema2 *bool +} + +type byAgeDesc []kapiv1.Pod + +func (ba byAgeDesc) Len() int { return len(ba) } +func (ba byAgeDesc) Swap(i, j int) { ba[i], ba[j] = ba[j], ba[i] } +func (ba byAgeDesc) Less(i, j int) bool { + return ba[j].CreationTimestamp.Before(ba[i].CreationTimestamp) +} + +// GetRegistryPod returns the youngest registry pod deployed. +func GetRegistryPod(podsGetter kcoreclient.PodsGetter) (*kapiv1.Pod, error) { + podList, err := podsGetter.Pods(metav1.NamespaceDefault).List(metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set{"deploymentconfig": "docker-registry"}).String(), + }) + if err != nil { + return nil, err + } + if len(podList.Items) == 0 { + return nil, fmt.Errorf("failed to find any docker-registry pod") + } + + sort.Sort(byAgeDesc(podList.Items)) + + return &podList.Items[0], nil +} + +// ConfigureRegistry re-deploys the registry pod if its configuration doesn't match the desiredState. The +// function blocks until the registry is ready. +func ConfigureRegistry(oc *exutil.CLI, desiredState RegistryConfiguration) error { + defer func(ns string) { oc.SetNamespace(ns) }(oc.Namespace()) + oc = oc.SetNamespace(metav1.NamespaceDefault).AsAdmin() + env, err := oc.Run("env").Args("dc/docker-registry", "--list").Output() + if err != nil { + return err + } + + envOverrides := []string{} + + if desiredState.AcceptSchema2 != nil { + current := defaultAcceptSchema2 + if strings.Contains(env, fmt.Sprintf("%s=%t", dockerregistryserver.AcceptSchema2EnvVar, !defaultAcceptSchema2)) { + current = !defaultAcceptSchema2 + } + if current != *desiredState.AcceptSchema2 { + new := fmt.Sprintf("%s=%t", dockerregistryserver.AcceptSchema2EnvVar, *desiredState.AcceptSchema2) + envOverrides = append(envOverrides, new) + } + } + if desiredState.ReadOnly != nil { + value := fmt.Sprintf("%s=%s", readOnlyEnvVar, makeReadonlyEnvValue(*desiredState.ReadOnly)) + if !strings.Contains(env, value) { + envOverrides = append(envOverrides, value) + } + } + if len(envOverrides) == 0 { + g.By("docker-registry is already in the desired state of configuration") + return nil + } + + dc, err := oc.Client().DeploymentConfigs(metav1.NamespaceDefault).Get("docker-registry", metav1.GetOptions{}) + if err != nil { + return err + } + waitForVersion := dc.Status.LatestVersion + 1 + + err = oc.Run("env").Args(append([]string{"dc/docker-registry"}, envOverrides...)...).Execute() + if err != nil { + return fmt.Errorf("failed to update registry's environment with %s: %v", &waitForVersion, err) + } + return exutil.WaitForDeploymentConfig( + oc.AdminKubeClient(), + oc.AdminClient(), + metav1.NamespaceDefault, + "docker-registry", + waitForVersion, + oc) +} + +// EnsureRegistryAcceptsSchema2 checks whether the registry is configured to accept manifests V2 schema 2 or +// not. If the result doesn't match given accept argument, registry's deployment config will be updated +// accordingly and the function will block until the registry have been re-deployed and ready for new +// requests. +func EnsureRegistryAcceptsSchema2(oc *exutil.CLI, accept bool) error { + return ConfigureRegistry(oc, RegistryConfiguration{AcceptSchema2: &accept}) +} + +func makeReadonlyEnvValue(on bool) string { + return fmt.Sprintf(`{"enabled":%t}`, on) +} diff --git a/test/extended/util/cli.go b/test/extended/util/cli.go index f88e970584d2..5088c2fe1696 100644 --- a/test/extended/util/cli.go +++ b/test/extended/util/cli.go @@ -134,9 +134,9 @@ func (c *CLI) SetNamespace(ns string) *CLI { } // WithoutNamespace instructs the command should be invoked without adding --namespace parameter -func (c *CLI) WithoutNamespace() *CLI { +func (c CLI) WithoutNamespace() *CLI { c.withoutNamespace = true - return c + return &c } // SetOutputDir change the default output directory for temporary files