Skip to content

Commit

Permalink
Merge pull request #2406 from mtrmac/opencontainers-upgrade-prereq
Browse files Browse the repository at this point in the history
Allow using recent opencontainers/go-digest
  • Loading branch information
rhatdan authored May 14, 2024
2 parents 19bd1d4 + b71a3e3 commit 6caf212
Show file tree
Hide file tree
Showing 15 changed files with 35 additions and 35 deletions.
12 changes: 6 additions & 6 deletions docker/internal/tarfile/writer.go
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
return fmt.Errorf("marshaling layer config: %w", err)
}
delete(layerConfig, "layer_id")
layerID := digest.Canonical.FromBytes(b).Hex()
layerID := digest.Canonical.FromBytes(b).Encoded()
layerConfig["id"] = layerID

configBytes, err := json.Marshal(layerConfig)
Expand Down Expand Up @@ -309,26 +309,26 @@ func (w *Writer) Close() error {
// NOTE: This is an internal implementation detail, not a format property, and can change
// any time.
func (w *Writer) configPath(configDigest digest.Digest) (string, error) {
if err := configDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
if err := configDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
return "", err
}
return configDigest.Hex() + ".json", nil
return configDigest.Encoded() + ".json", nil
}

// physicalLayerPath returns a path we choose for storing a layer with the specified digest
// (the actual path, i.e. a regular file, not a symlink that may be used in the legacy format).
// NOTE: This is an internal implementation detail, not a format property, and can change
// any time.
func (w *Writer) physicalLayerPath(layerDigest digest.Digest) (string, error) {
if err := layerDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
if err := layerDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
return "", err
}
// Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way
// Note that this can't be e.g. filepath.Join(l.Digest.Encoded(), legacyLayerFileName); due to the way
// writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described
// inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
// tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers
// in the root of the tarball.
return layerDigest.Hex() + ".tar", nil
return layerDigest.Encoded() + ".tar", nil
}

type tarFI struct {
Expand Down
4 changes: 2 additions & 2 deletions docker/registries_d.go
Original file line number Diff line number Diff line change
Expand Up @@ -288,10 +288,10 @@ func (ns registryNamespace) signatureTopLevel(write bool) string {
// base is not nil from the caller
// NOTE: Keep this in sync with docs/signature-protocols.md!
func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) (*url.URL, error) {
if err := manifestDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in a path with ../, so validate explicitly.
if err := manifestDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
return nil, err
}
sigURL := *base
sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Encoded(), index+1)
return &sigURL, nil
}
2 changes: 1 addition & 1 deletion internal/image/docker_schema2.go
Original file line number Diff line number Diff line change
Expand Up @@ -366,7 +366,7 @@ func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string)
if err := blobDigest.Validate(); err != nil {
return "", err
}
parts := append([]string{blobDigest.Hex()}, others...)
parts := append([]string{blobDigest.Encoded()}, others...)
v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " ")))
return hex.EncodeToString(v1IDHash[:]), nil
}
Expand Down
2 changes: 1 addition & 1 deletion internal/manifest/testdata/oci1.index.zstd-selection.json
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@
},
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"digest": "sha256:gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg",
"digest": "sha256:0000000000000000000000000000000000000000000000000000000000000000",
"size": 772,
"annotations": {
"io.github.containers.compression.zstd": "true"
Expand Down
2 changes: 1 addition & 1 deletion manifest/docker_schema1.go
Original file line number Diff line number Diff line change
Expand Up @@ -342,5 +342,5 @@ func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) {
if err != nil {
return "", err
}
return digest.FromBytes(image).Hex(), nil
return digest.FromBytes(image).Encoded(), nil
}
2 changes: 1 addition & 1 deletion manifest/docker_schema2.go
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ func (m *Schema2) ImageID([]digest.Digest) (string, error) {
if err := m.ConfigDescriptor.Digest.Validate(); err != nil {
return "", err
}
return m.ConfigDescriptor.Digest.Hex(), nil
return m.ConfigDescriptor.Digest.Encoded(), nil
}

// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
Expand Down
2 changes: 1 addition & 1 deletion manifest/oci.go
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ func (m *OCI1) ImageID(diffIDs []digest.Digest) (string, error) {
if err := m.Config.Digest.Validate(); err != nil {
return "", err
}
return m.Config.Digest.Hex(), nil
return m.Config.Digest.Encoded(), nil
}

// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
Expand Down
4 changes: 2 additions & 2 deletions oci/layout/oci_delete_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -318,15 +318,15 @@ func loadFixture(t *testing.T, fixtureName string) string {
func assertBlobExists(t *testing.T, blobsDir string, blobDigest string) {
digest, err := digest.Parse(blobDigest)
require.NoError(t, err)
blobPath := filepath.Join(blobsDir, digest.Algorithm().String(), digest.Hex())
blobPath := filepath.Join(blobsDir, digest.Algorithm().String(), digest.Encoded())
_, err = os.Stat(blobPath)
require.NoError(t, err)
}

func assertBlobDoesNotExist(t *testing.T, blobsDir string, blobDigest string) {
digest, err := digest.Parse(blobDigest)
require.NoError(t, err)
blobPath := filepath.Join(blobsDir, digest.Algorithm().String(), digest.Hex())
blobPath := filepath.Join(blobsDir, digest.Algorithm().String(), digest.Encoded())
_, err = os.Stat(blobPath)
require.True(t, os.IsNotExist(err))
}
2 changes: 1 addition & 1 deletion oci/layout/oci_transport.go
Original file line number Diff line number Diff line change
Expand Up @@ -256,5 +256,5 @@ func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (st
} else {
blobDir = filepath.Join(ref.dir, imgspecv1.ImageBlobsDir)
}
return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil
return filepath.Join(blobDir, digest.Algorithm().String(), digest.Encoded()), nil
}
16 changes: 8 additions & 8 deletions ostree/ostree_dest.go
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream
return private.UploadedBlob{}, err
}

hash := blobDigest.Hex()
hash := blobDigest.Encoded()
d.blobs[hash] = &blobToImport{Size: size, Digest: blobDigest, BlobPath: blobPath}
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
}
Expand Down Expand Up @@ -282,8 +282,8 @@ func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest,
func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error {
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.

ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root")
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Encoded())
destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Encoded(), "root")
if err := ensureDirectoryExists(destinationPath); err != nil {
return err
}
Expand Down Expand Up @@ -323,7 +323,7 @@ func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle,
}

func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error {
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Encoded())
destinationPath := filepath.Dir(blob.BlobPath)

return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
Expand All @@ -348,10 +348,10 @@ func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context,
d.repo = repo
}

if err := info.Digest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, so validate explicitly.
if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return false, private.ReusedBlob{}, err
}
branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex())
branch := fmt.Sprintf("ociimage/%s", info.Digest.Encoded())

found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
if err != nil || !found {
Expand Down Expand Up @@ -479,7 +479,7 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
if err := layer.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return err
}
hash := layer.Digest.Hex()
hash := layer.Digest.Encoded()
if err = checkLayer(hash); err != nil {
return err
}
Expand All @@ -488,7 +488,7 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
if err := layer.BlobSum.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return err
}
hash := layer.BlobSum.Hex()
hash := layer.BlobSum.Encoded()
if err = checkLayer(hash); err != nil {
return err
}
Expand Down
6 changes: 3 additions & 3 deletions ostree/ostree_src.go
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return nil, -1, err
}
blob := info.Digest.Hex()
blob := info.Digest.Encoded()

// Ensure s.compressed is initialized. It is build by LayerInfosForCopy.
if s.compressed == nil {
Expand All @@ -301,7 +301,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
}
compressedBlob, isCompressed := s.compressed[info.Digest]
if isCompressed {
blob = compressedBlob.Hex()
blob = compressedBlob.Encoded()
}
branch := fmt.Sprintf("ociimage/%s", blob)

Expand Down Expand Up @@ -424,7 +424,7 @@ func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context, instanceDiges
layerBlobs := man.LayerInfos()

for _, layerBlob := range layerBlobs {
branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Hex())
branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Encoded())
found, uncompressedDigestStr, err := readMetadata(s.repo, branch, "docker.uncompressed_digest")
if err != nil || !found {
return nil, err
Expand Down
2 changes: 1 addition & 1 deletion sif/src.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifRefere
History: []imgspecv1.History{
{
Created: &created,
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Hex(), os.PathSeparator),
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Encoded(), os.PathSeparator),
Comment: "imported from SIF, uuid: " + sifImg.ID(),
},
{
Expand Down
8 changes: 4 additions & 4 deletions storage/storage_dest.go
Original file line number Diff line number Diff line change
Expand Up @@ -564,7 +564,7 @@ func (s *storageImageDestination) computeID(m manifest.Manifest) string {
}
// ordinaryImageID is a digest of a config, which is a JSON value.
// To avoid the risk of collisions, start the input with @ so that the input is not a valid JSON.
tocImageID := digest.FromString("@With TOC:" + tocIDInput).Hex()
tocImageID := digest.FromString("@With TOC:" + tocIDInput).Encoded()
logrus.Debugf("Ordinary storage image ID %s; a layer was looked up by TOC, so using image ID %s", ordinaryImageID, tocImageID)
return tocImageID
}
Expand Down Expand Up @@ -651,11 +651,11 @@ func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDig
defer s.lock.Unlock()

if d, found := s.lockProtected.indexToTOCDigest[layerIndex]; found {
return "@TOC=" + d.Hex(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous.
return "@TOC=" + d.Encoded(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous.
}

if d, found := s.lockProtected.blobDiffIDs[blobDigest]; found {
return d.Hex(), true // This looks like chain IDs, and it uses the traditional value.
return d.Encoded(), true // This looks like chain IDs, and it uses the traditional value.
}
return "", false
}
Expand Down Expand Up @@ -731,7 +731,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si

id := layerIDComponent
if !layerIDComponentStandalone || parentLayer != "" {
id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Hex()
id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Encoded()
}
if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
// There's already a layer that should have the right contents, just reuse it.
Expand Down
4 changes: 2 additions & 2 deletions storage/storage_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -437,8 +437,8 @@ func TestWriteRead(t *testing.T) {
manifest = strings.ReplaceAll(manifest, "%ch", config.compressedDigest.String())
manifest = strings.ReplaceAll(manifest, "%ls", fmt.Sprintf("%d", layer.compressedSize))
manifest = strings.ReplaceAll(manifest, "%cs", fmt.Sprintf("%d", config.compressedSize))
manifest = strings.ReplaceAll(manifest, "%li", layer.compressedDigest.Hex())
manifest = strings.ReplaceAll(manifest, "%ci", config.compressedDigest.Hex())
manifest = strings.ReplaceAll(manifest, "%li", layer.compressedDigest.Encoded())
manifest = strings.ReplaceAll(manifest, "%ci", config.compressedDigest.Encoded())
t.Logf("this manifest is %q", manifest)
err = dest.PutManifest(context.Background(), []byte(manifest), nil)
require.NoError(t, err)
Expand Down
2 changes: 1 addition & 1 deletion tarball/tarball_src.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System

history = append(history, imgspecv1.History{
Created: &blobTime,
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffID.Hex(), os.PathSeparator),
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffID.Encoded(), os.PathSeparator),
Comment: comment,
})
// Use the mtime of the most recently modified file as the image's creation time.
Expand Down

0 comments on commit 6caf212

Please sign in to comment.