Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Addressed most of vrothberg comments #4

Merged
merged 1 commit into from
Sep 12, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
70 changes: 36 additions & 34 deletions copy/copy.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import (
"github.com/containers/image/pkg/blobinfocache"
"github.com/containers/ocicrypt"
encconfig "github.com/containers/ocicrypt/config"
ociencspec "github.com/containers/ocicrypt/spec"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"

"github.com/containers/image/pkg/compression"
Expand All @@ -41,24 +42,24 @@ type digestingReader struct {
expectedDigest digest.Digest
validationFailed bool
validationSucceeded bool
skipValidation bool
validateDigests bool
}

var (
// ErrDecryptParamsMissing is returned if there is missing decryption parameters
ErrDecryptParamsMissing = errors.New("Necessary DecryptParameters not present")
)

// maxParallelDownloads is used to limit the maxmimum number of parallel
// downloads. Let's follow Firefox by limiting it to 6.
var maxParallelDownloads = 6
// maxParallelDownloads is used to limit the maxmimum number of parallel
// downloads. Let's follow Firefox by limiting it to 6.
maxParallelDownloads = 6
)

// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest.
// (neither is set if EOF is never reached).
func newDigestingReader(source io.Reader, expectedDigest digest.Digest, skipValidation bool) (*digestingReader, error) {
func newDigestingReader(source io.Reader, expectedDigest digest.Digest, validateDigests bool) (*digestingReader, error) {
var digester digest.Digester
if !skipValidation {
if validateDigests {
if err := expectedDigest.Validate(); err != nil {
return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
}
Expand All @@ -73,13 +74,13 @@ func newDigestingReader(source io.Reader, expectedDigest digest.Digest, skipVali
digester: digester,
expectedDigest: expectedDigest,
validationFailed: false,
skipValidation: skipValidation,
validateDigests: validateDigests,
}, nil
}

func (d *digestingReader) Read(p []byte) (int, error) {
n, err := d.source.Read(p)
if !d.skipValidation {
if d.validateDigests {
if n > 0 {
if n2, err := d.digester.Hash().Write(p[:n]); n2 != n || err != nil {
// Coverage: This should not happen, the hash.Hash interface requires
Expand All @@ -89,7 +90,7 @@ func (d *digestingReader) Read(p []byte) (int, error) {
}
}
}
if err == io.EOF && !d.skipValidation {
if err == io.EOF && d.validateDigests {
actualDigest := d.digester.Digest()
if actualDigest != d.expectedDigest {
d.validationFailed = true
Expand Down Expand Up @@ -141,11 +142,14 @@ type Options struct {
// manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type
ForceManifestMIMEType string
CheckAuthorization bool
// If non-nil indicates that image should be encrypted.
// If EncryptConfig is non-nil, it indicates that an image should be encrypted.
// The encryption options is derived from the construction of EncryptConfig object.
EncryptConfig *encconfig.EncryptConfig
// EncryptLayers represents the list of layers to encrypt.
// If nil, don't encrypt any layers
// If nil, don't encrypt any layers.
// If non-nil and len==0, denotes encrypt all layers.
// integers in the slice represent 0-indexed layer indices, with support for negative
// indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer.
EncryptLayers *[]int
}

Expand Down Expand Up @@ -316,11 +320,9 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli

// Set up encryption structs
var (
//ec *encconfig.EncryptConfig
dc *encconfig.DecryptConfig
)
if options.SourceCtx.CryptoConfig != nil {
//ec = options.SourceCtx.CryptoConfig.EncryptConfig
dc = options.SourceCtx.CryptoConfig.DecryptConfig
}

Expand Down Expand Up @@ -542,14 +544,19 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
// Create layer Encryption map
encLayerBitmap := map[int]bool{}
var encryptAll bool
encryptLayers := ic.encryptLayers != nil
if ic.encryptLayers != nil {
encryptAll = len(*ic.encryptLayers) == 0
totalLayers := len(srcInfos)
for _, l := range *ic.encryptLayers {
// if layer is negative, it is reverse indexed.
encLayerBitmap[(totalLayers+l)%totalLayers] = true
}

if encryptAll {
for i := 0; i < len(srcInfos); i++ {
encLayerBitmap[i] = true
}
}
}

func() { // A scope for defer
Expand All @@ -558,8 +565,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {

for i, srcLayer := range srcInfos {
copySemaphore.Acquire(ctx, 1)
toEncrypt := encryptLayers && (encryptAll || encLayerBitmap[i])
go copyLayerHelper(i, srcLayer, toEncrypt, progressPool)
go copyLayerHelper(i, srcLayer, encLayerBitmap[i], progressPool)
}

// Wait for all layers to be copied
Expand Down Expand Up @@ -739,28 +745,26 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// the symmetric key with the provided private keys. If we fail, we will
// not allow the image to be provisioned.
if ic.checkAuthorization {
if srcInfo.MediaType == manifest.DockerV2Schema2LayerGzipEncMediaType ||
srcInfo.MediaType == manifest.DockerV2Schema2LayerEncMediaType {
if srcInfo.MediaType == ociencspec.MediaTypeLayerGzipEnc ||
srcInfo.MediaType == ociencspec.MediaTypeLayerEnc {

if ic.decryptConfig == nil {
return types.BlobInfo{}, "", errors.New("Necessary DecryptParameters not present")
}

dc := ic.decryptConfig

newDesc := ocispec.Descriptor{
Annotations: srcInfo.Annotations,
}

_, _, err := ocicrypt.DecryptLayer(dc, nil, newDesc, true)
_, _, err := ocicrypt.DecryptLayer(ic.decryptConfig, nil, newDesc, true)
if err != nil {
return types.BlobInfo{}, "", errors.Wrapf(err, "Image authentication failed for the digest %+v", srcInfo.Digest)
}
}
}

cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
// Diffs are needed if we are encrypting an image
// Diffs are needed if we are encrypting an image
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" || ic.encryptConfig != nil

// If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source.
Expand Down Expand Up @@ -894,39 +898,37 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr

var decrypted bool
var err error
if srcInfo.MediaType == manifest.DockerV2Schema2LayerGzipEncMediaType ||
srcInfo.MediaType == manifest.DockerV2Schema2LayerEncMediaType {
if srcInfo.MediaType == ociencspec.MediaTypeLayerGzipEnc ||
srcInfo.MediaType == ociencspec.MediaTypeLayerEnc {

if c.decryptConfig == nil {
return types.BlobInfo{}, ErrDecryptParamsMissing
}

dc := c.decryptConfig

newDesc := ocispec.Descriptor{
Annotations: srcInfo.Annotations,
}

var d digest.Digest
srcStream, d, err = ocicrypt.DecryptLayer(dc, srcStream, newDesc, false)
srcStream, d, err = ocicrypt.DecryptLayer(c.decryptConfig, srcStream, newDesc, false)
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "Error decrypting layer %s", srcInfo.Digest)
}

srcInfo.Digest = d
srcInfo.Size = -1
switch srcInfo.MediaType {
case manifest.DockerV2Schema2LayerGzipEncMediaType:
case ociencspec.MediaTypeLayerGzipEnc:
srcInfo.MediaType = ocispec.MediaTypeImageLayerGzip
case manifest.DockerV2Schema2LayerEncMediaType:
case ociencspec.MediaTypeLayerEnc:
srcInfo.MediaType = ocispec.MediaTypeImageLayer
}
decrypted = true
}

skipDigestValidation := srcInfo.Digest == ""
validateDigest := srcInfo.Digest != ""

digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest, skipDigestValidation)
digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest, validateDigest)
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "Error preparing to verify blob %s", srcInfo.Digest)
}
Expand Down Expand Up @@ -993,9 +995,9 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
if toEncrypt {
switch srcInfo.MediaType {
case manifest.DockerV2Schema2LayerMediaType, ocispec.MediaTypeImageLayerGzip:
encryptMediaType = manifest.DockerV2Schema2LayerGzipEncMediaType
encryptMediaType = ociencspec.MediaTypeLayerGzipEnc
case ocispec.MediaTypeImageLayer:
encryptMediaType = manifest.DockerV2Schema2LayerEncMediaType
encryptMediaType = ociencspec.MediaTypeLayerEnc
}

if encryptMediaType != "" && c.encryptConfig != nil {
Expand Down
8 changes: 4 additions & 4 deletions copy/copy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ func TestNewDigestingReader(t *testing.T) {
"sha256:0", // Invalid hex value
"sha256:01", // Invalid length of hex value
} {
_, err := newDigestingReader(source, input, false)
_, err := newDigestingReader(source, input, true)
assert.Error(t, err, input.String())
}
}
Expand All @@ -43,7 +43,7 @@ func TestDigestingReaderRead(t *testing.T) {
// Valid input
for _, c := range cases {
source := bytes.NewReader(c.input)
reader, err := newDigestingReader(source, c.digest, false)
reader, err := newDigestingReader(source, c.digest, true)
require.NoError(t, err, c.digest.String())
dest := bytes.Buffer{}
n, err := io.Copy(&dest, reader)
Expand All @@ -56,7 +56,7 @@ func TestDigestingReaderRead(t *testing.T) {
// Modified input
for _, c := range cases {
source := bytes.NewReader(bytes.Join([][]byte{c.input, []byte("x")}, nil))
reader, err := newDigestingReader(source, c.digest, false)
reader, err := newDigestingReader(source, c.digest, true)
require.NoError(t, err, c.digest.String())
dest := bytes.Buffer{}
_, err = io.Copy(&dest, reader)
Expand All @@ -67,7 +67,7 @@ func TestDigestingReaderRead(t *testing.T) {
// Truncated input
for _, c := range cases {
source := bytes.NewReader(c.input)
reader, err := newDigestingReader(source, c.digest, false)
reader, err := newDigestingReader(source, c.digest, true)
require.NoError(t, err, c.digest.String())
if len(c.input) != 0 {
dest := bytes.Buffer{}
Expand Down
4 changes: 0 additions & 4 deletions manifest/manifest.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,6 @@ const (
DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json"
// DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers.
DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip"
// DockerV2Schema2LayerGzipEncMediaType is MIME type used for encrypted compressed layers.
DockerV2Schema2LayerGzipEncMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip+enc"
// DockerV2Schema2LayerEncMediaType is MIME type used for encrypted layers.
DockerV2Schema2LayerEncMediaType = "application/vnd.docker.image.rootfs.diff.tar+enc"
// DockerV2ListMediaType MIME type represents Docker manifest schema 2 list
DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json"
// DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers.
Expand Down
2 changes: 1 addition & 1 deletion vendor.conf
Original file line number Diff line number Diff line change
Expand Up @@ -51,4 +51,4 @@ github.com/mattn/go-isatty v0.0.4
github.com/VividCortex/ewma v1.1.1
gopkg.in/square/go-jose.v2 8254d6c783765f38c8675fae4427a1fe73fbd09d
github.com/fullsailor/pkcs7 8306686428a5fe132eac8cb7c4848af725098bd4
github.com/containers/ocicrypt master
github.com/containers/ocicrypt 4e484bab285f4f32e3533ba8bb425dadc22d6e5c