diff --git a/docker/docker_image_dest.go b/docker/docker_image_dest.go
index 877d11b738..9ba7ff4c47 100644
--- a/docker/docker_image_dest.go
+++ b/docker/docker_image_dest.go
@@ -344,35 +344,24 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
 	}
 
 	// Then try reusing blobs from other locations.
-	candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, options.CanSubstitute)
+	candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, blobinfocache.CandidateLocations2Options{
+		CanSubstitute:           options.CanSubstitute,
+		PossibleManifestFormats: options.PossibleManifestFormats,
+		RequiredCompression:     options.RequiredCompression,
+	})
 	for _, candidate := range candidates {
-		var err error
-		compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName)
-		if err != nil {
-			logrus.Debugf("OperationAndAlgorithmForCompressor Failed: %v", err)
-			continue
-		}
 		var candidateRepo reference.Named
 		if !candidate.UnknownLocation {
+			var err error
 			candidateRepo, err = parseBICLocationReference(candidate.Location)
 			if err != nil {
 				logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
 				continue
 			}
 		}
-		if !impl.CandidateMatchesTryReusingBlobOptions(options, compressionAlgorithm) {
-			if !candidate.UnknownLocation {
-				logrus.Debugf("Ignoring candidate blob %s in %s, compression %s does not match required %s or MIME types %#v", candidate.Digest.String(), candidateRepo.Name(),
-					optionalCompressionName(compressionAlgorithm), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats)
-			} else {
-				logrus.Debugf("Ignoring candidate blob %s with no known location, compression %s does not match required %s or MIME types %#v", candidate.Digest.String(),
-					optionalCompressionName(compressionAlgorithm), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats)
-			}
-			continue
-		}
 		if !candidate.UnknownLocation {
-			if candidate.CompressorName != blobinfocache.Uncompressed {
-				logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name())
+			if candidate.CompressionAlgorithm != nil {
+				logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressionAlgorithm.Name(), candidateRepo.Name())
 			} else {
 				logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo %s", candidate.Digest.String(), candidateRepo.Name())
 			}
@@ -387,8 +376,8 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
 				continue
 			}
 		} else {
-			if candidate.CompressorName != blobinfocache.Uncompressed {
-				logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s with no location match, checking current repo", candidate.Digest.String(), candidate.CompressorName)
+			if candidate.CompressionAlgorithm != nil {
+				logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s with no location match, checking current repo", candidate.Digest.String(), candidate.CompressionAlgorithm.Name())
 			} else {
 				logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo with no location match, checking current repo", candidate.Digest.String())
 			}
@@ -439,8 +428,8 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
 		return true, private.ReusedBlob{
 			Digest:               candidate.Digest,
 			Size:                 size,
-			CompressionOperation: compressionOperation,
-			CompressionAlgorithm: compressionAlgorithm}, nil
+			CompressionOperation: candidate.CompressionOperation,
+			CompressionAlgorithm: candidate.CompressionAlgorithm}, nil
 	}
 
 	return false, private.ReusedBlob{}, nil
diff --git a/internal/blobinfocache/blobinfocache.go b/internal/blobinfocache/blobinfocache.go
index 2767c39507..893aa959d4 100644
--- a/internal/blobinfocache/blobinfocache.go
+++ b/internal/blobinfocache/blobinfocache.go
@@ -1,8 +1,6 @@
 package blobinfocache
 
 import (
-	"github.com/containers/image/v5/pkg/compression"
-	compressiontypes "github.com/containers/image/v5/pkg/compression/types"
 	"github.com/containers/image/v5/types"
 	digest "github.com/opencontainers/go-digest"
 )
@@ -32,7 +30,7 @@ func (bic *v1OnlyBlobInfoCache) Close() {
 func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
 }
 
-func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2 {
+func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2 {
 	return nil
 }
 
@@ -48,23 +46,3 @@ func CandidateLocationsFromV2(v2candidates []BICReplacementCandidate2) []types.B
 	}
 	return candidates
 }
-
-// OperationAndAlgorithmForCompressor returns CompressionOperation and CompressionAlgorithm
-// values suitable for inclusion in a types.BlobInfo structure, based on the name of the
-// compression algorithm, or Uncompressed, or UnknownCompression.  This is typically used by
-// TryReusingBlob() implementations to set values in the BlobInfo structure that they return
-// upon success.
-func OperationAndAlgorithmForCompressor(compressorName string) (types.LayerCompression, *compressiontypes.Algorithm, error) {
-	switch compressorName {
-	case Uncompressed:
-		return types.Decompress, nil, nil
-	case UnknownCompression:
-		return types.PreserveOriginal, nil, nil
-	default:
-		algo, err := compression.AlgorithmByName(compressorName)
-		if err == nil {
-			return types.Compress, &algo, nil
-		}
-		return types.PreserveOriginal, nil, err
-	}
-}
diff --git a/internal/blobinfocache/types.go b/internal/blobinfocache/types.go
index 4d3858ab8d..c9e4aaa485 100644
--- a/internal/blobinfocache/types.go
+++ b/internal/blobinfocache/types.go
@@ -1,6 +1,7 @@
 package blobinfocache
 
 import (
+	compressiontypes "github.com/containers/image/v5/pkg/compression/types"
 	"github.com/containers/image/v5/types"
 	digest "github.com/opencontainers/go-digest"
 )
@@ -35,19 +36,24 @@ type BlobInfoCache2 interface {
 	// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
 	// that could possibly be reused within the specified (transport scope) (if they still
 	// exist, which is not guaranteed).
-	//
-	// If !canSubstitute, the returned candidates will match the submitted digest exactly; if
-	// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
+	CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2
+}
+
+// CandidateLocations2Options are used in CandidateLocations2.
+type CandidateLocations2Options struct {
+	// If !CanSubstitute, the returned candidates will match the submitted digest exactly; if
+	// CanSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
 	// up variants of the blob which have the same uncompressed digest.
-	//
-	// The CompressorName fields in returned data must never be UnknownCompression.
-	CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2
+	CanSubstitute           bool
+	PossibleManifestFormats []string                    // If set, a set of possible manifest formats; at least one should support the reused layer
+	RequiredCompression     *compressiontypes.Algorithm // If set, only reuse layers with a matching algorithm
 }
 
 // BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2.
 type BICReplacementCandidate2 struct {
-	Digest          digest.Digest
-	CompressorName  string                     // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression
-	UnknownLocation bool                       // is true when `Location` for this blob is not set
-	Location        types.BICLocationReference // not set if UnknownLocation is set to `true`
+	Digest               digest.Digest
+	CompressionOperation types.LayerCompression      // Either types.Decompress for uncompressed, or types.Compress for compressed
+	CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
+	UnknownLocation      bool                        // is true when `Location` for this blob is not set
+	Location             types.BICLocationReference  // not set if UnknownLocation is set to `true`
 }
diff --git a/internal/imagedestination/impl/helpers.go b/internal/imagedestination/impl/helpers.go
index 553569a030..9b42cfbec0 100644
--- a/internal/imagedestination/impl/helpers.go
+++ b/internal/imagedestination/impl/helpers.go
@@ -3,40 +3,13 @@ package impl
 import (
 	"github.com/containers/image/v5/internal/manifest"
 	"github.com/containers/image/v5/internal/private"
-	compression "github.com/containers/image/v5/pkg/compression/types"
-	"golang.org/x/exp/slices"
 )
 
-// CandidateMatchesTryReusingBlobOptions validates if compression is required by the caller while selecting a blob, if it is required
-// then function performs a match against the compression requested by the caller and compression of existing blob
-// (which can be nil to represent uncompressed or unknown)
-func CandidateMatchesTryReusingBlobOptions(options private.TryReusingBlobOptions, candidateCompression *compression.Algorithm) bool {
-	if options.RequiredCompression != nil {
-		if options.RequiredCompression.Name() == compression.ZstdChunkedAlgorithmName {
-			// HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs.
-			// The caller must re-compress to build those annotations.
-			return false
-		}
-		if candidateCompression == nil ||
-			(options.RequiredCompression.Name() != candidateCompression.Name() && options.RequiredCompression.Name() != candidateCompression.BaseVariantName()) {
-			return false
-		}
-	}
-
-	// For candidateCompression == nil, we can’t tell the difference between “uncompressed” and “unknown”;
-	// and “uncompressed” is acceptable in all known formats (well, it seems to work in practice for schema1),
-	// so don’t impose any restrictions if candidateCompression == nil
-	if options.PossibleManifestFormats != nil && candidateCompression != nil {
-		if !slices.ContainsFunc(options.PossibleManifestFormats, func(mt string) bool {
-			return manifest.MIMETypeSupportsCompressionAlgorithm(mt, *candidateCompression)
-		}) {
-			return false
-		}
-	}
-
-	return true
-}
-
+// OriginalCandidateMatchesTryReusingBlobOptions returns true if the original blob passed to TryReusingBlobWithOptions
+// is acceptable based on opts.
 func OriginalCandidateMatchesTryReusingBlobOptions(opts private.TryReusingBlobOptions) bool {
-	return CandidateMatchesTryReusingBlobOptions(opts, opts.OriginalCompression)
+	return manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
+		PossibleManifestFormats: opts.PossibleManifestFormats,
+		RequiredCompression:     opts.RequiredCompression,
+	}, opts.OriginalCompression)
 }
diff --git a/internal/imagedestination/impl/helpers_test.go b/internal/imagedestination/impl/helpers_test.go
deleted file mode 100644
index 93749b1bd5..0000000000
--- a/internal/imagedestination/impl/helpers_test.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package impl
-
-import (
-	"testing"
-
-	"github.com/containers/image/v5/internal/private"
-	"github.com/containers/image/v5/manifest"
-	"github.com/containers/image/v5/pkg/compression"
-	compressionTypes "github.com/containers/image/v5/pkg/compression/types"
-	imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/stretchr/testify/assert"
-)
-
-func TestCandidateMatchesTryReusingBlobOptions(t *testing.T) {
-	cases := []struct {
-		requiredCompression     *compressionTypes.Algorithm
-		possibleManifestFormats []string
-		candidateCompression    *compressionTypes.Algorithm
-		result                  bool
-	}{
-		// RequiredCompression restrictions
-		{&compression.Zstd, nil, &compression.Zstd, true},
-		{&compression.Gzip, nil, &compression.Zstd, false},
-		{&compression.Zstd, nil, nil, false},
-		{nil, nil, &compression.Zstd, true},
-		// PossibleManifestFormats restrictions
-		{nil, []string{imgspecv1.MediaTypeImageManifest}, &compression.Zstd, true},
-		{nil, []string{manifest.DockerV2Schema2MediaType}, &compression.Zstd, false},
-		{nil, []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest}, &compression.Zstd, true},
-		{nil, nil, &compression.Zstd, true},
-		{nil, []string{imgspecv1.MediaTypeImageManifest}, &compression.Gzip, true},
-		{nil, []string{manifest.DockerV2Schema2MediaType}, &compression.Gzip, true},
-		{nil, []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest}, &compression.Gzip, true},
-		{nil, nil, &compression.Gzip, true},
-		// Some possible combinations (always 1 constraint not matching)
-		{&compression.Zstd, []string{manifest.DockerV2Schema2MediaType}, &compression.Zstd, false},
-		{&compression.Gzip, []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest}, &compression.Zstd, false},
-	}
-
-	for _, c := range cases {
-		opts := private.TryReusingBlobOptions{
-			RequiredCompression:     c.requiredCompression,
-			PossibleManifestFormats: c.possibleManifestFormats,
-		}
-		assert.Equal(t, c.result, CandidateMatchesTryReusingBlobOptions(opts, c.candidateCompression))
-	}
-}
diff --git a/internal/manifest/manifest.go b/internal/manifest/manifest.go
index c77db7522a..3556902e4b 100644
--- a/internal/manifest/manifest.go
+++ b/internal/manifest/manifest.go
@@ -7,6 +7,7 @@ import (
 	"github.com/containers/libtrust"
 	digest "github.com/opencontainers/go-digest"
 	imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+	"golang.org/x/exp/slices"
 )
 
 // FIXME: Should we just use docker/distribution and docker/docker implementations directly?
@@ -192,3 +193,39 @@ func MIMETypeSupportsCompressionAlgorithm(mimeType string, algo compressiontypes
 		return false
 	}
 }
+
+// ReuseConditions are an input to CandidateCompressionMatchesReuseConditions;
+// it is a struct to allow longer and better-documented field names.
+type ReuseConditions struct {
+	PossibleManifestFormats []string                    // If set, a set of possible manifest formats; at least one should support the reused layer
+	RequiredCompression     *compressiontypes.Algorithm // If set, only reuse layers with a matching algorithm
+}
+
+// CandidateCompressionMatchesReuseConditions returns true if a layer with candidateCompression
+// (which can be nil to represent uncompressed or unknown) matches reuseConditions.
+func CandidateCompressionMatchesReuseConditions(c ReuseConditions, candidateCompression *compressiontypes.Algorithm) bool {
+	if c.RequiredCompression != nil {
+		if c.RequiredCompression.Name() == compressiontypes.ZstdChunkedAlgorithmName {
+			// HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs.
+			// The caller must re-compress to build those annotations.
+			return false
+		}
+		if candidateCompression == nil ||
+			(c.RequiredCompression.Name() != candidateCompression.Name() && c.RequiredCompression.Name() != candidateCompression.BaseVariantName()) {
+			return false
+		}
+	}
+
+	// For candidateCompression == nil, we can’t tell the difference between “uncompressed” and “unknown”;
+	// and “uncompressed” is acceptable in all known formats (well, it seems to work in practice for schema1),
+	// so don’t impose any restrictions if candidateCompression == nil
+	if c.PossibleManifestFormats != nil && candidateCompression != nil {
+		if !slices.ContainsFunc(c.PossibleManifestFormats, func(mt string) bool {
+			return MIMETypeSupportsCompressionAlgorithm(mt, *candidateCompression)
+		}) {
+			return false
+		}
+	}
+
+	return true
+}
diff --git a/internal/manifest/manifest_test.go b/internal/manifest/manifest_test.go
index 0b01549670..ae2cc32ca0 100644
--- a/internal/manifest/manifest_test.go
+++ b/internal/manifest/manifest_test.go
@@ -182,3 +182,38 @@ func TestMIMETypeSupportsCompressionAlgorithm(t *testing.T) {
 		}
 	}
 }
+
+func TestCandidateCompressionMatchesReuseConditions(t *testing.T) {
+	cases := []struct {
+		requiredCompression     *compression.Algorithm
+		possibleManifestFormats []string
+		candidateCompression    *compression.Algorithm
+		result                  bool
+	}{
+		// RequiredCompression restrictions
+		{&compression.Zstd, nil, &compression.Zstd, true},
+		{&compression.Gzip, nil, &compression.Zstd, false},
+		{&compression.Zstd, nil, nil, false},
+		{nil, nil, &compression.Zstd, true},
+		// PossibleManifestFormats restrictions
+		{nil, []string{imgspecv1.MediaTypeImageManifest}, &compression.Zstd, true},
+		{nil, []string{DockerV2Schema2MediaType}, &compression.Zstd, false},
+		{nil, []string{DockerV2Schema2MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest}, &compression.Zstd, true},
+		{nil, nil, &compression.Zstd, true},
+		{nil, []string{imgspecv1.MediaTypeImageManifest}, &compression.Gzip, true},
+		{nil, []string{DockerV2Schema2MediaType}, &compression.Gzip, true},
+		{nil, []string{DockerV2Schema2MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest}, &compression.Gzip, true},
+		{nil, nil, &compression.Gzip, true},
+		// Some possible combinations (always 1 constraint not matching)
+		{&compression.Zstd, []string{DockerV2Schema2MediaType}, &compression.Zstd, false},
+		{&compression.Gzip, []string{DockerV2Schema2MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest}, &compression.Zstd, false},
+	}
+
+	for _, c := range cases {
+		conds := ReuseConditions{
+			RequiredCompression:     c.requiredCompression,
+			PossibleManifestFormats: c.possibleManifestFormats,
+		}
+		assert.Equal(t, c.result, CandidateCompressionMatchesReuseConditions(conds, c.candidateCompression))
+	}
+}
diff --git a/pkg/blobinfocache/boltdb/boltdb.go b/pkg/blobinfocache/boltdb/boltdb.go
index 9a8fa22ab9..3d8382ecd4 100644
--- a/pkg/blobinfocache/boltdb/boltdb.go
+++ b/pkg/blobinfocache/boltdb/boltdb.go
@@ -300,9 +300,10 @@ func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope type
 // (which might be nil) with corresponding compression
 // info from compressionBucket (which might be nil), and returns the result of appending them
 // to candidates.
-// v2Output allows including candidates with unknown location, and filters out candidates
+// v2Options is not nil if the caller is CandidateLocations2: this allows including candidates with unknown location, and filters out candidates
 // with unknown compression.
-func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket, compressionBucket *bolt.Bucket, digest digest.Digest, v2Output bool) []prioritize.CandidateWithTime {
+func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket, compressionBucket *bolt.Bucket, digest digest.Digest,
+	v2Options *blobinfocache.CandidateLocations2Options) []prioritize.CandidateWithTime {
 	digestKey := []byte(digest.String())
 	compressorName := blobinfocache.UnknownCompression
 	if compressionBucket != nil {
@@ -312,9 +313,11 @@ func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
 			compressorName = string(compressorNameValue)
 		}
 	}
-	if compressorName == blobinfocache.UnknownCompression && v2Output {
+	ok, compressionOp, compressionAlgo := prioritize.CandidateCompression(v2Options, digest, compressorName)
+	if !ok {
 		return candidates
 	}
+
 	var b *bolt.Bucket
 	if scopeBucket != nil {
 		b = scopeBucket.Bucket(digestKey)
@@ -327,21 +330,23 @@ func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
 			}
 			candidates = append(candidates, prioritize.CandidateWithTime{
 				Candidate: blobinfocache.BICReplacementCandidate2{
-					Digest:         digest,
-					CompressorName: compressorName,
-					Location:       types.BICLocationReference{Opaque: string(k)},
+					Digest:               digest,
+					CompressionOperation: compressionOp,
+					CompressionAlgorithm: compressionAlgo,
+					Location:             types.BICLocationReference{Opaque: string(k)},
 				},
 				LastSeen: t,
 			})
 			return nil
 		}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
-	} else if v2Output {
+	} else if v2Options != nil {
 		candidates = append(candidates, prioritize.CandidateWithTime{
 			Candidate: blobinfocache.BICReplacementCandidate2{
-				Digest:          digest,
-				CompressorName:  compressorName,
-				UnknownLocation: true,
-				Location:        types.BICLocationReference{Opaque: ""},
+				Digest:               digest,
+				CompressionOperation: compressionOp,
+				CompressionAlgorithm: compressionAlgo,
+				UnknownLocation:      true,
+				Location:             types.BICLocationReference{Opaque: ""},
 			},
 			LastSeen: time.Time{},
 		})
@@ -349,17 +354,17 @@ func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
 	return candidates
 }
 
-// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) that could possibly be reused
-// within the specified (transport scope) (if they still exist, which is not guaranteed).
-//
-// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
-// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
-// uncompressed digest.
-func (bdc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
-	return bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, true)
+// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
+// that could possibly be reused within the specified (transport scope) (if they still
+// exist, which is not guaranteed).
+func (bdc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, options blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 {
+	return bdc.candidateLocations(transport, scope, primaryDigest, options.CanSubstitute, &options)
 }
 
-func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 {
+// candidateLocations implements CandidateLocations / CandidateLocations2.
+// v2Options is not nil if the caller is CandidateLocations2.
+func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool,
+	v2Options *blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 {
 	res := []prioritize.CandidateWithTime{}
 	var uncompressedDigestValue digest.Digest // = ""
 	if err := bdc.view(func(tx *bolt.Tx) error {
@@ -374,7 +379,7 @@ func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types
 		// and we don't want to fail just because of that
 		compressionBucket := tx.Bucket(digestCompressorBucket)
 
-		res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, primaryDigest, v2Output)
+		res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, primaryDigest, v2Options)
 		if canSubstitute {
 			if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" {
 				b := tx.Bucket(digestByUncompressedBucket)
@@ -387,7 +392,7 @@ func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types
 								return err
 							}
 							if d != primaryDigest && d != uncompressedDigestValue {
-								res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, d, v2Output)
+								res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, d, v2Options)
 							}
 							return nil
 						}); err != nil {
@@ -396,7 +401,7 @@ func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types
 					}
 				}
 				if uncompressedDigestValue != primaryDigest {
-					res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, uncompressedDigestValue, v2Output)
+					res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, uncompressedDigestValue, v2Options)
 				}
 			}
 		}
@@ -415,5 +420,5 @@ func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types
 // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
 // uncompressed digest.
 func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
-	return blobinfocache.CandidateLocationsFromV2(bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, false))
+	return blobinfocache.CandidateLocationsFromV2(bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, nil))
 }
diff --git a/pkg/blobinfocache/internal/prioritize/prioritize.go b/pkg/blobinfocache/internal/prioritize/prioritize.go
index cfae4fa9e4..97937ccc6f 100644
--- a/pkg/blobinfocache/internal/prioritize/prioritize.go
+++ b/pkg/blobinfocache/internal/prioritize/prioritize.go
@@ -1,4 +1,4 @@
-// Package prioritize provides utilities for prioritizing locations in
+// Package prioritize provides utilities for filtering and prioritizing locations in
 // types.BlobInfoCache.CandidateLocations.
 package prioritize
 
@@ -6,7 +6,11 @@ import (
 	"time"
 
 	"github.com/containers/image/v5/internal/blobinfocache"
+	"github.com/containers/image/v5/internal/manifest"
+	"github.com/containers/image/v5/pkg/compression"
+	"github.com/containers/image/v5/types"
 	"github.com/opencontainers/go-digest"
+	"github.com/sirupsen/logrus"
 	"golang.org/x/exp/slices"
 )
 
@@ -20,6 +24,53 @@ const replacementAttempts = 5
 // This is a heuristic/guess, and could well use a different value.
 const replacementUnknownLocationAttempts = 2
 
+// CandidateCompression returns (true, compressionOp, compressionAlgo) if a blob
+// with compressionName (which can be Uncompressed or UnknownCompression) is acceptable for a CandidateLocations* call with v2Options.
+//
+// v2Options can be set to nil if the call is CandidateLocations (i.e. compression is not required to be known);
+// if not nil, the call is assumed to be CandidateLocations2.
+//
+// The (compressionOp, compressionAlgo) values are suitable for BICReplacementCandidate2
+func CandidateCompression(v2Options *blobinfocache.CandidateLocations2Options, digest digest.Digest, compressorName string) (bool, types.LayerCompression, *compression.Algorithm) {
+	if v2Options == nil {
+		return true, types.PreserveOriginal, nil // Anything goes. The (compressionOp, compressionAlgo) values are not used.
+	}
+
+	var op types.LayerCompression
+	var algo *compression.Algorithm
+	switch compressorName {
+	case blobinfocache.Uncompressed:
+		op = types.Decompress
+		algo = nil
+	case blobinfocache.UnknownCompression:
+		logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unknown compression", digest.String())
+		return false, types.PreserveOriginal, nil // Not allowed with CandidateLocations2
+	default:
+		op = types.Compress
+		algo_, err := compression.AlgorithmByName(compressorName)
+		if err != nil {
+			logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unrecognized compression %q: %v",
+				digest.String(), compressorName, err)
+			return false, types.PreserveOriginal, nil // The BICReplacementCandidate2.CompressionAlgorithm field is required
+		}
+		algo = &algo_
+	}
+	if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
+		PossibleManifestFormats: v2Options.PossibleManifestFormats,
+		RequiredCompression:     v2Options.RequiredCompression,
+	}, algo) {
+		requiredCompresssion := "nil"
+		if v2Options.RequiredCompression != nil {
+			requiredCompresssion = v2Options.RequiredCompression.Name()
+		}
+		logrus.Debugf("Ignoring BlobInfoCache record of digest %q, compression %q does not match required %s or MIME types %#v",
+			digest.String(), compressorName, requiredCompresssion, v2Options.PossibleManifestFormats)
+		return false, types.PreserveOriginal, nil
+	}
+
+	return true, op, algo
+}
+
 // CandidateWithTime is the input to types.BICReplacementCandidate prioritization.
 type CandidateWithTime struct {
 	Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate
diff --git a/pkg/blobinfocache/internal/prioritize/prioritize_test.go b/pkg/blobinfocache/internal/prioritize/prioritize_test.go
index e0bee11840..1ec99a4d03 100644
--- a/pkg/blobinfocache/internal/prioritize/prioritize_test.go
+++ b/pkg/blobinfocache/internal/prioritize/prioritize_test.go
@@ -6,7 +6,7 @@ import (
 	"time"
 
 	"github.com/containers/image/v5/internal/blobinfocache"
-	compressiontypes "github.com/containers/image/v5/pkg/compression/types"
+	"github.com/containers/image/v5/pkg/compression"
 	"github.com/containers/image/v5/types"
 	"github.com/opencontainers/go-digest"
 	"github.com/stretchr/testify/assert"
@@ -23,33 +23,33 @@ const (
 var (
 	// inputReplacementCandidates contains a non-trivial candidateSortState shared among several tests below.
 	inputReplacementCandidates = []CandidateWithTime{
-		{blobinfocache.BICReplacementCandidate2{Digest: digestCompressedA, Location: types.BICLocationReference{Opaque: "A1"}, CompressorName: compressiontypes.XzAlgorithmName}, time.Unix(1, 0)},
-		{blobinfocache.BICReplacementCandidate2{Digest: digestUncompressed, Location: types.BICLocationReference{Opaque: "U2"}, CompressorName: compressiontypes.GzipAlgorithmName}, time.Unix(1, 1)},
-		{blobinfocache.BICReplacementCandidate2{Digest: digestCompressedA, Location: types.BICLocationReference{Opaque: "A2"}, CompressorName: blobinfocache.Uncompressed}, time.Unix(1, 1)},
-		{blobinfocache.BICReplacementCandidate2{Digest: digestCompressedPrimary, Location: types.BICLocationReference{Opaque: "P1"}, CompressorName: blobinfocache.UnknownCompression}, time.Unix(1, 0)},
-		{blobinfocache.BICReplacementCandidate2{Digest: digestCompressedB, Location: types.BICLocationReference{Opaque: "B1"}, CompressorName: compressiontypes.Bzip2AlgorithmName}, time.Unix(1, 1)},
-		{blobinfocache.BICReplacementCandidate2{Digest: digestCompressedPrimary, Location: types.BICLocationReference{Opaque: "P2"}, CompressorName: compressiontypes.GzipAlgorithmName}, time.Unix(1, 1)},
-		{blobinfocache.BICReplacementCandidate2{Digest: digestCompressedB, Location: types.BICLocationReference{Opaque: "B2"}, CompressorName: blobinfocache.Uncompressed}, time.Unix(2, 0)},
-		{blobinfocache.BICReplacementCandidate2{Digest: digestUncompressed, Location: types.BICLocationReference{Opaque: "U1"}, CompressorName: blobinfocache.UnknownCompression}, time.Unix(1, 0)},
-		{blobinfocache.BICReplacementCandidate2{Digest: digestUncompressed, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}, CompressorName: blobinfocache.UnknownCompression}, time.Time{}},
-		{blobinfocache.BICReplacementCandidate2{Digest: digestCompressedA, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}, CompressorName: blobinfocache.UnknownCompression}, time.Time{}},
-		{blobinfocache.BICReplacementCandidate2{Digest: digestCompressedB, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}, CompressorName: blobinfocache.UnknownCompression}, time.Time{}},
-		{blobinfocache.BICReplacementCandidate2{Digest: digestCompressedPrimary, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}, CompressorName: blobinfocache.UnknownCompression}, time.Time{}},
+		{blobinfocache.BICReplacementCandidate2{Digest: digestCompressedA, Location: types.BICLocationReference{Opaque: "A1"}, CompressionOperation: types.Compress, CompressionAlgorithm: &compression.Xz}, time.Unix(1, 0)},
+		{blobinfocache.BICReplacementCandidate2{Digest: digestUncompressed, Location: types.BICLocationReference{Opaque: "U2"}, CompressionOperation: types.Compress, CompressionAlgorithm: &compression.Gzip}, time.Unix(1, 1)},
+		{blobinfocache.BICReplacementCandidate2{Digest: digestCompressedA, Location: types.BICLocationReference{Opaque: "A2"}, CompressionOperation: types.Decompress, CompressionAlgorithm: nil}, time.Unix(1, 1)},
+		{blobinfocache.BICReplacementCandidate2{Digest: digestCompressedPrimary, Location: types.BICLocationReference{Opaque: "P1"}}, time.Unix(1, 0)},
+		{blobinfocache.BICReplacementCandidate2{Digest: digestCompressedB, Location: types.BICLocationReference{Opaque: "B1"}, CompressionOperation: types.Compress, CompressionAlgorithm: &compression.Bzip2}, time.Unix(1, 1)},
+		{blobinfocache.BICReplacementCandidate2{Digest: digestCompressedPrimary, Location: types.BICLocationReference{Opaque: "P2"}, CompressionOperation: types.Compress, CompressionAlgorithm: &compression.Gzip}, time.Unix(1, 1)},
+		{blobinfocache.BICReplacementCandidate2{Digest: digestCompressedB, Location: types.BICLocationReference{Opaque: "B2"}, CompressionOperation: types.Decompress, CompressionAlgorithm: nil}, time.Unix(2, 0)},
+		{blobinfocache.BICReplacementCandidate2{Digest: digestUncompressed, Location: types.BICLocationReference{Opaque: "U1"}}, time.Unix(1, 0)},
+		{blobinfocache.BICReplacementCandidate2{Digest: digestUncompressed, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}}, time.Time{}},
+		{blobinfocache.BICReplacementCandidate2{Digest: digestCompressedA, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}}, time.Time{}},
+		{blobinfocache.BICReplacementCandidate2{Digest: digestCompressedB, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}}, time.Time{}},
+		{blobinfocache.BICReplacementCandidate2{Digest: digestCompressedPrimary, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}}, time.Time{}},
 	}
 	// expectedReplacementCandidates is the fully-sorted, unlimited, result of prioritizing inputReplacementCandidates.
 	expectedReplacementCandidates = []blobinfocache.BICReplacementCandidate2{
-		{Digest: digestCompressedPrimary, Location: types.BICLocationReference{Opaque: "P2"}, CompressorName: compressiontypes.GzipAlgorithmName},
-		{Digest: digestCompressedPrimary, Location: types.BICLocationReference{Opaque: "P1"}, CompressorName: blobinfocache.UnknownCompression},
-		{Digest: digestCompressedB, Location: types.BICLocationReference{Opaque: "B2"}, CompressorName: blobinfocache.Uncompressed},
-		{Digest: digestCompressedA, Location: types.BICLocationReference{Opaque: "A2"}, CompressorName: blobinfocache.Uncompressed},
-		{Digest: digestCompressedB, Location: types.BICLocationReference{Opaque: "B1"}, CompressorName: compressiontypes.Bzip2AlgorithmName},
-		{Digest: digestCompressedA, Location: types.BICLocationReference{Opaque: "A1"}, CompressorName: compressiontypes.XzAlgorithmName},
-		{Digest: digestUncompressed, Location: types.BICLocationReference{Opaque: "U2"}, CompressorName: compressiontypes.GzipAlgorithmName},
-		{Digest: digestUncompressed, Location: types.BICLocationReference{Opaque: "U1"}, CompressorName: blobinfocache.UnknownCompression},
-		{Digest: digestCompressedPrimary, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}, CompressorName: blobinfocache.UnknownCompression},
-		{Digest: digestCompressedA, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}, CompressorName: blobinfocache.UnknownCompression},
-		{Digest: digestCompressedB, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}, CompressorName: blobinfocache.UnknownCompression},
-		{Digest: digestUncompressed, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}, CompressorName: blobinfocache.UnknownCompression},
+		{Digest: digestCompressedPrimary, Location: types.BICLocationReference{Opaque: "P2"}, CompressionOperation: types.Compress, CompressionAlgorithm: &compression.Gzip},
+		{Digest: digestCompressedPrimary, Location: types.BICLocationReference{Opaque: "P1"}},
+		{Digest: digestCompressedB, Location: types.BICLocationReference{Opaque: "B2"}, CompressionOperation: types.Decompress, CompressionAlgorithm: nil},
+		{Digest: digestCompressedA, Location: types.BICLocationReference{Opaque: "A2"}, CompressionOperation: types.Decompress, CompressionAlgorithm: nil},
+		{Digest: digestCompressedB, Location: types.BICLocationReference{Opaque: "B1"}, CompressionOperation: types.Compress, CompressionAlgorithm: &compression.Bzip2},
+		{Digest: digestCompressedA, Location: types.BICLocationReference{Opaque: "A1"}, CompressionOperation: types.Compress, CompressionAlgorithm: &compression.Xz},
+		{Digest: digestUncompressed, Location: types.BICLocationReference{Opaque: "U2"}, CompressionOperation: types.Compress, CompressionAlgorithm: &compression.Gzip},
+		{Digest: digestUncompressed, Location: types.BICLocationReference{Opaque: "U1"}},
+		{Digest: digestCompressedPrimary, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}},
+		{Digest: digestCompressedA, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}},
+		{Digest: digestCompressedB, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}},
+		{Digest: digestUncompressed, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}},
 	}
 )
 
@@ -71,8 +71,8 @@ func TestCandidateSortStateLess(t *testing.T) {
 	} {
 		for _, tms := range [][2]int64{{1, 2}, {2, 1}, {1, 1}} {
 			caseName := fmt.Sprintf("%s %v", c.name, tms)
-			c0 := CandidateWithTime{blobinfocache.BICReplacementCandidate2{Digest: c.d0, Location: types.BICLocationReference{Opaque: "L0"}, CompressorName: compressiontypes.GzipAlgorithmName}, time.Unix(tms[0], 0)}
-			c1 := CandidateWithTime{blobinfocache.BICReplacementCandidate2{Digest: c.d1, Location: types.BICLocationReference{Opaque: "L1"}, CompressorName: compressiontypes.ZstdAlgorithmName}, time.Unix(tms[1], 0)}
+			c0 := CandidateWithTime{blobinfocache.BICReplacementCandidate2{Digest: c.d0, Location: types.BICLocationReference{Opaque: "L0"}, CompressionOperation: types.Compress, CompressionAlgorithm: &compression.Gzip}, time.Unix(tms[0], 0)}
+			c1 := CandidateWithTime{blobinfocache.BICReplacementCandidate2{Digest: c.d1, Location: types.BICLocationReference{Opaque: "L1"}, CompressionOperation: types.Compress, CompressionAlgorithm: &compression.Zstd}, time.Unix(tms[1], 0)}
 			css := candidateSortState{
 				primaryDigest:      digestCompressedPrimary,
 				uncompressedDigest: digestUncompressed,
@@ -108,8 +108,8 @@ func TestCandidateSortStateLess(t *testing.T) {
 		{"any: t=1 == t=1, d=A < d=B", -1, p{digestCompressedA, 1}, p{digestCompressedB, 1}},
 		{"any: t=1 == t=1, d=A == d=A", 0, p{digestCompressedA, 1}, p{digestCompressedA, 1}},
 	} {
-		c0 := CandidateWithTime{blobinfocache.BICReplacementCandidate2{Digest: c.p0.d, Location: types.BICLocationReference{Opaque: "L0"}, CompressorName: compressiontypes.GzipAlgorithmName}, time.Unix(c.p0.t, 0)}
-		c1 := CandidateWithTime{blobinfocache.BICReplacementCandidate2{Digest: c.p1.d, Location: types.BICLocationReference{Opaque: "L1"}, CompressorName: compressiontypes.ZstdAlgorithmName}, time.Unix(c.p1.t, 0)}
+		c0 := CandidateWithTime{blobinfocache.BICReplacementCandidate2{Digest: c.p0.d, Location: types.BICLocationReference{Opaque: "L0"}, CompressionOperation: types.Compress, CompressionAlgorithm: &compression.Gzip}, time.Unix(c.p0.t, 0)}
+		c1 := CandidateWithTime{blobinfocache.BICReplacementCandidate2{Digest: c.p1.d, Location: types.BICLocationReference{Opaque: "L1"}, CompressionOperation: types.Compress, CompressionAlgorithm: &compression.Zstd}, time.Unix(c.p1.t, 0)}
 		css := candidateSortState{
 			primaryDigest:      digestCompressedPrimary,
 			uncompressedDigest: digestUncompressed,
diff --git a/pkg/blobinfocache/internal/test/test.go b/pkg/blobinfocache/internal/test/test.go
index c310bb6ae5..66d6d0c4a6 100644
--- a/pkg/blobinfocache/internal/test/test.go
+++ b/pkg/blobinfocache/internal/test/test.go
@@ -6,9 +6,14 @@ import (
 
 	"github.com/containers/image/v5/internal/blobinfocache"
 	"github.com/containers/image/v5/internal/testing/mocks"
+	"github.com/containers/image/v5/manifest"
+	"github.com/containers/image/v5/pkg/compression"
+	compressiontypes "github.com/containers/image/v5/pkg/compression/types"
 	"github.com/containers/image/v5/types"
 	digest "github.com/opencontainers/go-digest"
+	imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 )
 
 const (
@@ -16,12 +21,17 @@ const (
 	digestUncompressed        = digest.Digest("sha256:2222222222222222222222222222222222222222222222222222222222222222")
 	digestCompressedA         = digest.Digest("sha256:3333333333333333333333333333333333333333333333333333333333333333")
 	digestCompressedB         = digest.Digest("sha256:4444444444444444444444444444444444444444444444444444444444444444")
-	digestUncompressedC       = digest.Digest("sha256:7777777777777777777777777777777777777777777777777777777777777777")
 	digestCompressedUnrelated = digest.Digest("sha256:5555555555555555555555555555555555555555555555555555555555555555")
-	compressorNameU           = "compressorName/U"
-	compressorNameA           = "compressorName/A"
-	compressorNameB           = "compressorName/B"
-	compressorNameCU          = "compressorName/CU"
+	compressorNameU           = blobinfocache.Uncompressed
+	compressorNameA           = compressiontypes.GzipAlgorithmName
+	compressorNameB           = compressiontypes.ZstdAlgorithmName
+	compressorNameCU          = compressiontypes.ZstdChunkedAlgorithmName
+
+	digestUnknownLocation       = digest.Digest("sha256:7777777777777777777777777777777777777777777777777777777777777777")
+	digestFilteringUncompressed = digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
+	digestGzip                  = digest.Digest("sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb")
+	digestZstd                  = digest.Digest("sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc")
+	digestZstdChunked           = digest.Digest("sha256:dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd")
 )
 
 // GenericCache runs an implementation-independent set of tests, given a
@@ -103,7 +113,10 @@ func testGenericRecordKnownLocations(t *testing.T, cache blobinfocache.BlobInfoC
 					{Digest: digest, Location: lr1},
 					{Digest: digest, Location: lr2},
 				}, cache.CandidateLocations(transport, scope, digest, false))
-				assert.Equal(t, []blobinfocache.BICReplacementCandidate2{}, cache.CandidateLocations2(transport, scope, digest, false))
+				res := cache.CandidateLocations2(transport, scope, digest, blobinfocache.CandidateLocations2Options{
+					CanSubstitute: false,
+				})
+				assert.Equal(t, []blobinfocache.BICReplacementCandidate2{}, res)
 			}
 		}
 	}
@@ -124,12 +137,47 @@ func assertCandidatesMatch(t *testing.T, scopeName string, expected []candidate,
 	assert.Equal(t, e, actual)
 }
 
+func assertCandidateMatches2(t *testing.T, expected, actual blobinfocache.BICReplacementCandidate2) {
+	// Verify actual[i].CompressionAlgorithm separately; assert.Equal would do a pointer comparison, and fail.
+	if expected.CompressionAlgorithm != nil {
+		require.NotNil(t, actual.CompressionAlgorithm)
+		assert.Equal(t, expected.CompressionAlgorithm.Name(), actual.CompressionAlgorithm.Name())
+	} else {
+		assert.Nil(t, actual.CompressionAlgorithm)
+	}
+	c := expected                                        // A shallow copy
+	c.CompressionAlgorithm = actual.CompressionAlgorithm // Already verified above
+
+	assert.Equal(t, c, actual)
+}
+
+func assertCandidatesMatch2Native(t *testing.T, expected, actual []blobinfocache.BICReplacementCandidate2) {
+	assert.Len(t, actual, len(expected))
+	for i := range expected {
+		assertCandidateMatches2(t, expected[i], actual[i])
+	}
+}
+
 func assertCandidatesMatch2(t *testing.T, scopeName string, expected []candidate, actual []blobinfocache.BICReplacementCandidate2) {
 	e := make([]blobinfocache.BICReplacementCandidate2, len(expected))
 	for i, ev := range expected {
-		e[i] = blobinfocache.BICReplacementCandidate2{Digest: ev.d, CompressorName: ev.cn, Location: types.BICLocationReference{Opaque: scopeName + ev.lr}}
+		op := types.Decompress
+		var algo *compressiontypes.Algorithm = nil
+		if ev.cn != blobinfocache.Uncompressed {
+			algo_, err := compression.AlgorithmByName(ev.cn)
+			require.NoError(t, err)
+			op = types.Compress
+			algo = &algo_
+		}
+		e[i] = blobinfocache.BICReplacementCandidate2{
+			Digest:               ev.d,
+			CompressionOperation: op,
+			CompressionAlgorithm: algo,
+			UnknownLocation:      false,
+			Location:             types.BICLocationReference{Opaque: scopeName + ev.lr},
+		}
 	}
-	assert.Equal(t, e, actual)
+	assertCandidatesMatch2Native(t, e, actual)
 }
 
 func testGenericCandidateLocations(t *testing.T, cache blobinfocache.BlobInfoCache2) {
@@ -201,7 +249,7 @@ func testGenericCandidateLocations2(t *testing.T, cache blobinfocache.BlobInfoCa
 	cache.RecordDigestUncompressedPair(digestCompressedA, digestUncompressed)
 	cache.RecordDigestUncompressedPair(digestCompressedB, digestUncompressed)
 	cache.RecordDigestUncompressedPair(digestUncompressed, digestUncompressed)
-	digestNameSet := []struct {
+	digestNameSetPrioritization := []struct { // Used primarily to test prioritization
 		n string
 		d digest.Digest
 		m string
@@ -211,179 +259,280 @@ func testGenericCandidateLocations2(t *testing.T, cache blobinfocache.BlobInfoCa
 		{"B", digestCompressedB, compressorNameB},
 		{"CU", digestCompressedUnrelated, compressorNameCU},
 	}
+	digestNameSetFiltering := []struct { // Used primarily to test filtering in CandidateLocations2Options
+		n string
+		d digest.Digest
+		m string
+	}{
+		{"gzip", digestGzip, compressiontypes.GzipAlgorithmName},
+		{"zstd", digestZstd, compressiontypes.ZstdAlgorithmName},
+		{"zstdChunked", digestZstdChunked, compressiontypes.ZstdChunkedAlgorithmName},
+	}
+	for _, e := range digestNameSetFiltering { // digestFilteringUncompressed exists only to allow the three entries to be considered as candidates
+		cache.RecordDigestUncompressedPair(e.d, digestFilteringUncompressed)
+	}
 
 	for scopeIndex, scopeName := range []string{"A", "B", "C"} { // Run the test in two different scopes to verify they don't affect each other.
 		scope := types.BICTransportScope{Opaque: scopeName}
 
 		// Nothing is known.
-		assert.Equal(t, []blobinfocache.BICReplacementCandidate2{}, cache.CandidateLocations2(transport, scope, digestUnknown, false))
-		assert.Equal(t, []blobinfocache.BICReplacementCandidate2{}, cache.CandidateLocations2(transport, scope, digestUnknown, true))
+		// -----------------
+		res := cache.CandidateLocations2(transport, scope, digestUnknown, blobinfocache.CandidateLocations2Options{
+			CanSubstitute: false,
+		})
+		assert.Equal(t, []blobinfocache.BICReplacementCandidate2{}, res)
+		res = cache.CandidateLocations2(transport, scope, digestUnknown, blobinfocache.CandidateLocations2Options{
+			CanSubstitute: true,
+		})
+		assert.Equal(t, []blobinfocache.BICReplacementCandidate2{}, res)
 
+		// Results with UnknownLocation
+		// ----------------------------
 		// If a record exists with compression without Location then
 		// then return a record without location and with `UnknownLocation: true`
-		cache.RecordDigestCompressorName(digestUncompressedC, "somecompression")
-		assert.Equal(t, []blobinfocache.BICReplacementCandidate2{
+		cache.RecordDigestCompressorName(digestUnknownLocation, compressiontypes.Bzip2AlgorithmName)
+		res = cache.CandidateLocations2(transport, scope, digestUnknownLocation, blobinfocache.CandidateLocations2Options{
+			CanSubstitute: true,
+		})
+		assertCandidatesMatch2Native(t, []blobinfocache.BICReplacementCandidate2{
 			{
-				Digest:          digestUncompressedC,
-				CompressorName:  "somecompression",
-				UnknownLocation: true,
-				Location:        types.BICLocationReference{Opaque: ""},
-			}}, cache.CandidateLocations2(transport, scope, digestUncompressedC, true))
+				Digest:               digestUnknownLocation,
+				CompressionOperation: types.Compress,
+				CompressionAlgorithm: &compression.Bzip2,
+				UnknownLocation:      true,
+				Location:             types.BICLocationReference{Opaque: ""},
+			}}, res)
 		// When another entry with scope and Location is set then it should be returned as it has higher
 		// priority.
-		cache.RecordKnownLocation(transport, scope, digestUncompressedC, types.BICLocationReference{Opaque: "somelocation"})
-		assert.Equal(t, []blobinfocache.BICReplacementCandidate2{
+		cache.RecordKnownLocation(transport, scope, digestUnknownLocation, types.BICLocationReference{Opaque: "somelocation"})
+		res = cache.CandidateLocations2(transport, scope, digestUnknownLocation, blobinfocache.CandidateLocations2Options{
+			CanSubstitute: true,
+		})
+		assertCandidatesMatch2Native(t, []blobinfocache.BICReplacementCandidate2{
 			{
-				Digest:          digestUncompressedC,
-				CompressorName:  "somecompression",
-				UnknownLocation: false,
-				Location:        types.BICLocationReference{Opaque: "somelocation"},
-			}}, cache.CandidateLocations2(transport, scope, digestUncompressedC, true))
+				Digest:               digestUnknownLocation,
+				CompressionOperation: types.Compress,
+				CompressionAlgorithm: &compression.Bzip2,
+				UnknownLocation:      false,
+				Location:             types.BICLocationReference{Opaque: "somelocation"},
+			}}, res)
+
+		// Tests of lookups / prioritization when compression is unknown
+		// -------------------------------------------------------------
 
 		// Record "2" entries before "1" entries; then results should sort "1" (more recent) before "2" (older)
 		for _, suffix := range []string{"2", "1"} {
-			for _, e := range digestNameSet {
+			for _, e := range digestNameSetPrioritization {
 				cache.RecordKnownLocation(transport, scope, e.d, types.BICLocationReference{Opaque: scopeName + e.n + suffix})
 			}
 		}
-
 		// Clear any "known" compression values, except on the first loop where they've never been set.
 		// This probably triggers “Compressor for blob with digest … previously recorded as …, now unknown” warnings here, for test purposes;
 		// that shouldn’t happen in real-world usage.
 		if scopeIndex != 0 {
-			for _, e := range digestNameSet {
+			for _, e := range digestNameSetPrioritization {
 				cache.RecordDigestCompressorName(e.d, blobinfocache.UnknownCompression)
 			}
 		}
 
 		// No substitutions allowed:
-		for _, e := range digestNameSet {
+		for _, e := range digestNameSetPrioritization {
 			assertCandidatesMatch(t, scopeName, []candidate{
-				{d: e.d, lr: e.n + "1"},
-				{d: e.d, lr: e.n + "2"},
+				{d: e.d, lr: e.n + "1"}, {d: e.d, lr: e.n + "2"},
 			}, cache.CandidateLocations(transport, scope, e.d, false))
-			assertCandidatesMatch2(t, scopeName, []candidate{}, cache.CandidateLocations2(transport, scope, e.d, false))
+			// Unknown compression -> no candidates
+			res := cache.CandidateLocations2(transport, scope, e.d, blobinfocache.CandidateLocations2Options{
+				CanSubstitute: false,
+			})
+			assertCandidatesMatch2(t, scopeName, []candidate{}, res)
 		}
 
 		// With substitutions: The original digest is always preferred, then other compressed, then the uncompressed one.
 		assertCandidatesMatch(t, scopeName, []candidate{
-			{d: digestCompressedA, lr: "A1"},
-			{d: digestCompressedA, lr: "A2"},
-			{d: digestCompressedB, lr: "B1"},
-			{d: digestCompressedB, lr: "B2"},
-			{d: digestUncompressed, lr: "U1"},
-			// Beyond the replacementAttempts limit: {d: digestUncompressed, cn: compressorNameCU, lr: "U2"},
+			{d: digestCompressedA, lr: "A1"}, {d: digestCompressedA, lr: "A2"},
+			{d: digestCompressedB, lr: "B1"}, {d: digestCompressedB, lr: "B2"},
+			{d: digestUncompressed, lr: "U1"}, // Beyond the replacementAttempts limit: {d: digestUncompressed, cn: compressorNameCU, lr: "U2"},
 		}, cache.CandidateLocations(transport, scope, digestCompressedA, true))
 		// Unknown compression -> no candidates
-		assertCandidatesMatch2(t, scopeName, []candidate{}, cache.CandidateLocations2(transport, scope, digestCompressedA, true))
+		res = cache.CandidateLocations2(transport, scope, digestCompressedA, blobinfocache.CandidateLocations2Options{
+			CanSubstitute: true,
+		})
+		assertCandidatesMatch2(t, scopeName, []candidate{}, res)
 
 		assertCandidatesMatch(t, scopeName, []candidate{
-			{d: digestCompressedB, lr: "B1"},
-			{d: digestCompressedB, lr: "B2"},
-			{d: digestCompressedA, lr: "A1"},
-			{d: digestCompressedA, lr: "A2"},
+			{d: digestCompressedB, lr: "B1"}, {d: digestCompressedB, lr: "B2"},
+			{d: digestCompressedA, lr: "A1"}, {d: digestCompressedA, lr: "A2"},
 			{d: digestUncompressed, lr: "U1"}, // Beyond the replacementAttempts limit: {d: digestUncompressed, lr: "U2"},
 		}, cache.CandidateLocations(transport, scope, digestCompressedB, true))
 		// Unknown compression -> no candidates
-		assertCandidatesMatch2(t, scopeName, []candidate{}, cache.CandidateLocations2(transport, scope, digestCompressedB, true))
+		res = cache.CandidateLocations2(transport, scope, digestCompressedB, blobinfocache.CandidateLocations2Options{
+			CanSubstitute: true,
+		})
+		assertCandidatesMatch2(t, scopeName, []candidate{}, res)
 
 		assertCandidatesMatch(t, scopeName, []candidate{
-			{d: digestUncompressed, lr: "U1"},
-			{d: digestUncompressed, lr: "U2"},
-			// "1" entries were added after "2", and A/Bs are sorted in the reverse of digestNameSet order
+			{d: digestUncompressed, lr: "U1"}, {d: digestUncompressed, lr: "U2"},
+			// "1" entries were added after "2", and A/Bs are sorted in the reverse of digestNameSetPrioritization order
 			{d: digestCompressedB, lr: "B1"},
 			{d: digestCompressedA, lr: "A1"},
 			{d: digestCompressedB, lr: "B2"},
 			// Beyond the replacementAttempts limit: {d: digestCompressedA, lr: "A2"},
 		}, cache.CandidateLocations(transport, scope, digestUncompressed, true))
 		// Unknown compression -> no candidates
-		assertCandidatesMatch2(t, scopeName, []candidate{}, cache.CandidateLocations2(transport, scope, digestUncompressed, true))
+		res = cache.CandidateLocations2(transport, scope, digestUncompressed, blobinfocache.CandidateLocations2Options{
+			CanSubstitute: true,
+		})
+		assertCandidatesMatch2(t, scopeName, []candidate{}, res)
 
 		// Locations are known, but no relationships
 		assertCandidatesMatch(t, scopeName, []candidate{
-			{d: digestCompressedUnrelated, lr: "CU1"},
-			{d: digestCompressedUnrelated, lr: "CU2"},
+			{d: digestCompressedUnrelated, lr: "CU1"}, {d: digestCompressedUnrelated, lr: "CU2"},
 		}, cache.CandidateLocations(transport, scope, digestCompressedUnrelated, true))
 		// Unknown compression -> no candidates
-		assertCandidatesMatch2(t, scopeName, []candidate{}, cache.CandidateLocations2(transport, scope, digestCompressedUnrelated, true))
+		res = cache.CandidateLocations2(transport, scope, digestCompressedUnrelated, blobinfocache.CandidateLocations2Options{
+			CanSubstitute: true,
+		})
+		assertCandidatesMatch2(t, scopeName, []candidate{}, res)
+
+		// Tests of lookups / prioritization when compression is unknown
+		// -------------------------------------------------------------
 
 		// Set the "known" compression values
-		for _, e := range digestNameSet {
+		for _, e := range digestNameSetPrioritization {
 			cache.RecordDigestCompressorName(e.d, e.m)
 		}
 
 		// No substitutions allowed:
-		for _, e := range digestNameSet {
+		for _, e := range digestNameSetPrioritization {
 			assertCandidatesMatch(t, scopeName, []candidate{
-				{d: e.d, lr: e.n + "1"},
-				{d: e.d, lr: e.n + "2"},
+				{d: e.d, lr: e.n + "1"}, {d: e.d, lr: e.n + "2"},
 			}, cache.CandidateLocations(transport, scope, e.d, false))
+			res := cache.CandidateLocations2(transport, scope, e.d, blobinfocache.CandidateLocations2Options{
+				CanSubstitute: false,
+			})
 			assertCandidatesMatch2(t, scopeName, []candidate{
-				{d: e.d, cn: e.m, lr: e.n + "1"},
-				{d: e.d, cn: e.m, lr: e.n + "2"},
-			}, cache.CandidateLocations2(transport, scope, e.d, false))
+				{d: e.d, cn: e.m, lr: e.n + "1"}, {d: e.d, cn: e.m, lr: e.n + "2"},
+			}, res)
 		}
 
 		// With substitutions: The original digest is always preferred, then other compressed, then the uncompressed one.
 		assertCandidatesMatch(t, scopeName, []candidate{
-			{d: digestCompressedA, lr: "A1"},
-			{d: digestCompressedA, lr: "A2"},
-			{d: digestCompressedB, lr: "B1"},
-			{d: digestCompressedB, lr: "B2"},
-			{d: digestUncompressed, lr: "U1"},
-			// Beyond the replacementAttempts limit: {d: digestUncompressed, lr: "U2"},
+			{d: digestCompressedA, lr: "A1"}, {d: digestCompressedA, lr: "A2"},
+			{d: digestCompressedB, lr: "B1"}, {d: digestCompressedB, lr: "B2"},
+			{d: digestUncompressed, lr: "U1"}, // Beyond the replacementAttempts limit: {d: digestUncompressed, lr: "U2"},
 		}, cache.CandidateLocations(transport, scope, digestCompressedA, true))
+		res = cache.CandidateLocations2(transport, scope, digestCompressedA, blobinfocache.CandidateLocations2Options{
+			CanSubstitute: true,
+		})
 		assertCandidatesMatch2(t, scopeName, []candidate{
-			{d: digestCompressedA, cn: compressorNameA, lr: "A1"},
-			{d: digestCompressedA, cn: compressorNameA, lr: "A2"},
-			{d: digestCompressedB, cn: compressorNameB, lr: "B1"},
-			{d: digestCompressedB, cn: compressorNameB, lr: "B2"},
-			{d: digestUncompressed, cn: compressorNameU, lr: "U1"},
-			// Beyond the replacementAttempts limit: {d: digestUncompressed, cn: compressorNameCU, lr: "U2"},
-		}, cache.CandidateLocations2(transport, scope, digestCompressedA, true))
+			{d: digestCompressedA, cn: compressorNameA, lr: "A1"}, {d: digestCompressedA, cn: compressorNameA, lr: "A2"},
+			{d: digestCompressedB, cn: compressorNameB, lr: "B1"}, {d: digestCompressedB, cn: compressorNameB, lr: "B2"},
+			{d: digestUncompressed, cn: compressorNameU, lr: "U1"}, // Beyond the replacementAttempts limit: {d: digestUncompressed, cn: compressorNameCU, lr: "U2"},
+		}, res)
 
 		assertCandidatesMatch(t, scopeName, []candidate{
-			{d: digestCompressedB, lr: "B1"},
-			{d: digestCompressedB, lr: "B2"},
-			{d: digestCompressedA, lr: "A1"},
-			{d: digestCompressedA, lr: "A2"},
+			{d: digestCompressedB, lr: "B1"}, {d: digestCompressedB, lr: "B2"},
+			{d: digestCompressedA, lr: "A1"}, {d: digestCompressedA, lr: "A2"},
 			{d: digestUncompressed, lr: "U1"}, // Beyond the replacementAttempts limit: {d: digestUncompressed, lr: "U2"},
 		}, cache.CandidateLocations(transport, scope, digestCompressedB, true))
+		res = cache.CandidateLocations2(transport, scope, digestCompressedB, blobinfocache.CandidateLocations2Options{
+			CanSubstitute: true,
+		})
 		assertCandidatesMatch2(t, scopeName, []candidate{
-			{d: digestCompressedB, cn: compressorNameB, lr: "B1"},
-			{d: digestCompressedB, cn: compressorNameB, lr: "B2"},
-			{d: digestCompressedA, cn: compressorNameA, lr: "A1"},
-			{d: digestCompressedA, cn: compressorNameA, lr: "A2"},
+			{d: digestCompressedB, cn: compressorNameB, lr: "B1"}, {d: digestCompressedB, cn: compressorNameB, lr: "B2"},
+			{d: digestCompressedA, cn: compressorNameA, lr: "A1"}, {d: digestCompressedA, cn: compressorNameA, lr: "A2"},
 			{d: digestUncompressed, cn: compressorNameU, lr: "U1"}, // Beyond the replacementAttempts limit: {d: digestUncompressed, cn: compressorNameU, lr: "U2"},
-		}, cache.CandidateLocations2(transport, scope, digestCompressedB, true))
+		}, res)
 
 		assertCandidatesMatch(t, scopeName, []candidate{
-			{d: digestUncompressed, lr: "U1"},
-			{d: digestUncompressed, lr: "U2"},
-			// "1" entries were added after "2", and A/Bs are sorted in the reverse of digestNameSet order
+			{d: digestUncompressed, lr: "U1"}, {d: digestUncompressed, lr: "U2"},
+			// "1" entries were added after "2", and A/Bs are sorted in the reverse of digestNameSetPrioritization order
 			{d: digestCompressedB, lr: "B1"},
 			{d: digestCompressedA, lr: "A1"},
 			{d: digestCompressedB, lr: "B2"},
 			// Beyond the replacementAttempts limit: {d: digestCompressedA, lr: "A2"},
 		}, cache.CandidateLocations(transport, scope, digestUncompressed, true))
+		res = cache.CandidateLocations2(transport, scope, digestUncompressed, blobinfocache.CandidateLocations2Options{
+			CanSubstitute: true,
+		})
 		assertCandidatesMatch2(t, scopeName, []candidate{
-			{d: digestUncompressed, cn: compressorNameU, lr: "U1"},
-			{d: digestUncompressed, cn: compressorNameU, lr: "U2"},
-			// "1" entries were added after "2", and A/Bs are sorted in the reverse of digestNameSet order
+			{d: digestUncompressed, cn: compressorNameU, lr: "U1"}, {d: digestUncompressed, cn: compressorNameU, lr: "U2"},
+			// "1" entries were added after "2", and A/Bs are sorted in the reverse of digestNameSetPrioritization order
 			{d: digestCompressedB, cn: compressorNameB, lr: "B1"},
 			{d: digestCompressedA, cn: compressorNameA, lr: "A1"},
 			{d: digestCompressedB, cn: compressorNameB, lr: "B2"},
 			// Beyond the replacementAttempts limit: {d: digestCompressedA, cn: compressorNameA, lr: "A2"},
-		}, cache.CandidateLocations2(transport, scope, digestUncompressed, true))
+		}, res)
 
 		// Locations are known, but no relationships
 		assertCandidatesMatch(t, scopeName, []candidate{
-			{d: digestCompressedUnrelated, lr: "CU1"},
-			{d: digestCompressedUnrelated, lr: "CU2"},
+			{d: digestCompressedUnrelated, lr: "CU1"}, {d: digestCompressedUnrelated, lr: "CU2"},
 		}, cache.CandidateLocations(transport, scope, digestCompressedUnrelated, true))
+		res = cache.CandidateLocations2(transport, scope, digestCompressedUnrelated, blobinfocache.CandidateLocations2Options{
+			CanSubstitute: true,
+		})
 		assertCandidatesMatch2(t, scopeName, []candidate{
-			{d: digestCompressedUnrelated, cn: compressorNameCU, lr: "CU1"},
-			{d: digestCompressedUnrelated, cn: compressorNameCU, lr: "CU2"},
-		}, cache.CandidateLocations2(transport, scope, digestCompressedUnrelated, true))
+			{d: digestCompressedUnrelated, cn: compressorNameCU, lr: "CU1"}, {d: digestCompressedUnrelated, cn: compressorNameCU, lr: "CU2"},
+		}, res)
+
+		// Tests of candidate filtering
+		// ----------------------------
+		for _, e := range digestNameSetFiltering {
+			cache.RecordKnownLocation(transport, scope, e.d, types.BICLocationReference{Opaque: scopeName + e.n})
+		}
+		for _, e := range digestNameSetFiltering {
+			cache.RecordDigestCompressorName(e.d, e.m)
+		}
+
+		// No filtering
+		res = cache.CandidateLocations2(transport, scope, digestFilteringUncompressed, blobinfocache.CandidateLocations2Options{
+			CanSubstitute: true,
+		})
+		assertCandidatesMatch2(t, scopeName, []candidate{ // Sorted in the reverse of digestNameSetFiltering order
+			{d: digestZstdChunked, cn: compressiontypes.ZstdChunkedAlgorithmName, lr: "zstdChunked"},
+			{d: digestZstd, cn: compressiontypes.ZstdAlgorithmName, lr: "zstd"},
+			{d: digestGzip, cn: compressiontypes.GzipAlgorithmName, lr: "gzip"},
+		}, res)
+
+		// Manifest format filters
+		res = cache.CandidateLocations2(transport, scope, digestFilteringUncompressed, blobinfocache.CandidateLocations2Options{
+			CanSubstitute:           true,
+			PossibleManifestFormats: []string{manifest.DockerV2Schema2MediaType},
+		})
+		assertCandidatesMatch2(t, scopeName, []candidate{
+			{d: digestGzip, cn: compressiontypes.GzipAlgorithmName, lr: "gzip"},
+		}, res)
+		res = cache.CandidateLocations2(transport, scope, digestFilteringUncompressed, blobinfocache.CandidateLocations2Options{
+			CanSubstitute:           true,
+			PossibleManifestFormats: []string{imgspecv1.MediaTypeImageManifest},
+		})
+		assertCandidatesMatch2(t, scopeName, []candidate{ // Sorted in the reverse of digestNameSetFiltering order
+			{d: digestZstdChunked, cn: compressiontypes.ZstdChunkedAlgorithmName, lr: "zstdChunked"},
+			{d: digestZstd, cn: compressiontypes.ZstdAlgorithmName, lr: "zstd"},
+			{d: digestGzip, cn: compressiontypes.GzipAlgorithmName, lr: "gzip"},
+		}, res)
+
+		// Compression algorithm filters
+		res = cache.CandidateLocations2(transport, scope, digestFilteringUncompressed, blobinfocache.CandidateLocations2Options{
+			CanSubstitute:       true,
+			RequiredCompression: &compression.Gzip,
+		})
+		assertCandidatesMatch2(t, scopeName, []candidate{
+			{d: digestGzip, cn: compressiontypes.GzipAlgorithmName, lr: "gzip"},
+		}, res)
+		res = cache.CandidateLocations2(transport, scope, digestFilteringUncompressed, blobinfocache.CandidateLocations2Options{
+			CanSubstitute:       true,
+			RequiredCompression: &compression.ZstdChunked,
+		})
+		// Right now, zstd:chunked requests never match a candidate, see CandidateCompressionMatchesReuseConditions().
+		assertCandidatesMatch2(t, scopeName, []candidate{}, res)
+		res = cache.CandidateLocations2(transport, scope, digestFilteringUncompressed, blobinfocache.CandidateLocations2Options{
+			CanSubstitute:       true,
+			RequiredCompression: &compression.Zstd,
+		})
+		assertCandidatesMatch2(t, scopeName, []candidate{ // When the user asks for zstd, zstd:chunked candidates are also acceptable.
+			{d: digestZstdChunked, cn: compressiontypes.ZstdChunkedAlgorithmName, lr: "zstdChunked"},
+			{d: digestZstd, cn: compressiontypes.ZstdAlgorithmName, lr: "zstd"},
+		}, res)
 	}
 }
diff --git a/pkg/blobinfocache/memory/memory.go b/pkg/blobinfocache/memory/memory.go
index 16193db952..1185e9d45e 100644
--- a/pkg/blobinfocache/memory/memory.go
+++ b/pkg/blobinfocache/memory/memory.go
@@ -135,14 +135,17 @@ func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compresso
 
 // appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory
 // with corresponding compression info from mem.compressors, and returns the result of appending
-// them to candidates. v2Output allows including candidates with unknown location, and filters out
-// candidates with unknown compression.
-func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) []prioritize.CandidateWithTime {
+// them to candidates.
+// v2Options is not nil if the caller is CandidateLocations2: this allows including candidates with unknown location, and filters out candidates
+// with unknown compression.
+func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest,
+	v2Options *blobinfocache.CandidateLocations2Options) []prioritize.CandidateWithTime {
 	compressorName := blobinfocache.UnknownCompression
 	if v, ok := mem.compressors[digest]; ok {
 		compressorName = v
 	}
-	if compressorName == blobinfocache.UnknownCompression && v2Output {
+	ok, compressionOp, compressionAlgo := prioritize.CandidateCompression(v2Options, digest, compressorName)
+	if !ok {
 		return candidates
 	}
 	locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
@@ -150,20 +153,22 @@ func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
 		for l, t := range locations {
 			candidates = append(candidates, prioritize.CandidateWithTime{
 				Candidate: blobinfocache.BICReplacementCandidate2{
-					Digest:         digest,
-					CompressorName: compressorName,
-					Location:       l,
+					Digest:               digest,
+					CompressionOperation: compressionOp,
+					CompressionAlgorithm: compressionAlgo,
+					Location:             l,
 				},
 				LastSeen: t,
 			})
 		}
-	} else if v2Output {
+	} else if v2Options != nil {
 		candidates = append(candidates, prioritize.CandidateWithTime{
 			Candidate: blobinfocache.BICReplacementCandidate2{
-				Digest:          digest,
-				CompressorName:  compressorName,
-				UnknownLocation: true,
-				Location:        types.BICLocationReference{Opaque: ""},
+				Digest:               digest,
+				CompressionOperation: compressionOp,
+				CompressionAlgorithm: compressionAlgo,
+				UnknownLocation:      true,
+				Location:             types.BICLocationReference{Opaque: ""},
 			},
 			LastSeen: time.Time{},
 		})
@@ -178,24 +183,24 @@ func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
 // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
 // uncompressed digest.
 func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
-	return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, false))
+	return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, nil))
 }
 
-// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) that could possibly be reused
-// within the specified (transport scope) (if they still exist, which is not guaranteed).
-//
-// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
-// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
-// uncompressed digest.
-func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
-	return mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, true)
+// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
+// that could possibly be reused within the specified (transport scope) (if they still
+// exist, which is not guaranteed).
+func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, options blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 {
+	return mem.candidateLocations(transport, scope, primaryDigest, options.CanSubstitute, &options)
 }
 
-func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 {
+// candidateLocations implements CandidateLocations / CandidateLocations2.
+// v2Options is not nil if the caller is CandidateLocations2.
+func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool,
+	v2Options *blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 {
 	mem.mutex.Lock()
 	defer mem.mutex.Unlock()
 	res := []prioritize.CandidateWithTime{}
-	res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, v2Output)
+	res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, v2Options)
 	var uncompressedDigest digest.Digest // = ""
 	if canSubstitute {
 		if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" {
@@ -203,12 +208,12 @@ func (mem *cache) candidateLocations(transport types.ImageTransport, scope types
 			if otherDigests != nil {
 				for _, d := range otherDigests.Values() {
 					if d != primaryDigest && d != uncompressedDigest {
-						res = mem.appendReplacementCandidates(res, transport, scope, d, v2Output)
+						res = mem.appendReplacementCandidates(res, transport, scope, d, v2Options)
 					}
 				}
 			}
 			if uncompressedDigest != primaryDigest {
-				res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, v2Output)
+				res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, v2Options)
 			}
 		}
 	}
diff --git a/pkg/blobinfocache/sqlite/sqlite.go b/pkg/blobinfocache/sqlite/sqlite.go
index d8bde2fa0e..a5be85a652 100644
--- a/pkg/blobinfocache/sqlite/sqlite.go
+++ b/pkg/blobinfocache/sqlite/sqlite.go
@@ -428,88 +428,86 @@ func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressor
 }
 
 // appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest),
-// and returns the result of appending them to candidates. v2Output allows including candidates with unknown
-// location, and filters out candidates with unknown compression.
-func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) ([]prioritize.CandidateWithTime, error) {
-	var rows *sql.Rows
-	var err error
-	if v2Output {
-		rows, err = tx.Query("SELECT location, time, compressor FROM KnownLocations JOIN DigestCompressors "+
-			"ON KnownLocations.digest = DigestCompressors.digest "+
-			"WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?",
-			transport.Name(), scope.Opaque, digest.String())
-	} else {
-		rows, err = tx.Query("SELECT location, time, IFNULL(compressor, ?) FROM KnownLocations "+
-			"LEFT JOIN DigestCompressors ON KnownLocations.digest = DigestCompressors.digest "+
-			"WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?",
-			blobinfocache.UnknownCompression,
-			transport.Name(), scope.Opaque, digest.String())
+// and returns the result of appending them to candidates.
+// v2Options is not nil if the caller is CandidateLocations2: this allows including candidates with unknown location, and filters out candidates
+// with unknown compression.
+func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest,
+	v2Options *blobinfocache.CandidateLocations2Options) ([]prioritize.CandidateWithTime, error) {
+	compressorName := blobinfocache.UnknownCompression
+	if v2Options != nil {
+		compressor, found, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", digest.String())
+		if err != nil {
+			return nil, fmt.Errorf("scanning compressorName: %w", err)
+		}
+		if found {
+			compressorName = compressor
+		}
+	}
+	ok, compressionOp, compressionAlgo := prioritize.CandidateCompression(v2Options, digest, compressorName)
+	if !ok {
+		return candidates, nil
 	}
+
+	rows, err := tx.Query("SELECT location, time FROM KnownLocations "+
+		"WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?",
+		transport.Name(), scope.Opaque, digest.String())
 	if err != nil {
 		return nil, fmt.Errorf("looking up candidate locations: %w", err)
 	}
 	defer rows.Close()
 
-	res := []prioritize.CandidateWithTime{}
+	rowAdded := false
 	for rows.Next() {
 		var location string
 		var time time.Time
-		var compressorName string
-		if err := rows.Scan(&location, &time, &compressorName); err != nil {
+		if err := rows.Scan(&location, &time); err != nil {
 			return nil, fmt.Errorf("scanning candidate: %w", err)
 		}
-		res = append(res, prioritize.CandidateWithTime{
+		candidates = append(candidates, prioritize.CandidateWithTime{
 			Candidate: blobinfocache.BICReplacementCandidate2{
-				Digest:         digest,
-				CompressorName: compressorName,
-				Location:       types.BICLocationReference{Opaque: location},
+				Digest:               digest,
+				CompressionOperation: compressionOp,
+				CompressionAlgorithm: compressionAlgo,
+				Location:             types.BICLocationReference{Opaque: location},
 			},
 			LastSeen: time,
 		})
+		rowAdded = true
 	}
 	if err := rows.Err(); err != nil {
 		return nil, fmt.Errorf("iterating through locations: %w", err)
 	}
 
-	if len(res) == 0 && v2Output {
-		compressor, found, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", digest.String())
-		if err != nil {
-			return nil, fmt.Errorf("scanning compressorName: %w", err)
-		}
-		if found {
-			res = append(res, prioritize.CandidateWithTime{
-				Candidate: blobinfocache.BICReplacementCandidate2{
-					Digest:          digest,
-					CompressorName:  compressor,
-					UnknownLocation: true,
-					Location:        types.BICLocationReference{Opaque: ""},
-				},
-				LastSeen: time.Time{},
-			})
-		}
+	if !rowAdded && v2Options != nil {
+		candidates = append(candidates, prioritize.CandidateWithTime{
+			Candidate: blobinfocache.BICReplacementCandidate2{
+				Digest:               digest,
+				CompressionOperation: compressionOp,
+				CompressionAlgorithm: compressionAlgo,
+				UnknownLocation:      true,
+				Location:             types.BICLocationReference{Opaque: ""},
+			},
+			LastSeen: time.Time{},
+		})
 	}
-	candidates = append(candidates, res...)
 	return candidates, nil
 }
 
 // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
 // that could possibly be reused within the specified (transport scope) (if they still
 // exist, which is not guaranteed).
-//
-// If !canSubstitute, the returned candidates will match the submitted digest exactly; if
-// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
-// up variants of the blob which have the same uncompressed digest.
-//
-// The CompressorName fields in returned data must never be UnknownCompression.
-func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
-	return sqc.candidateLocations(transport, scope, digest, canSubstitute, true)
+func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 {
+	return sqc.candidateLocations(transport, scope, digest, options.CanSubstitute, &options)
 }
 
-func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 {
+// candidateLocations implements CandidateLocations / CandidateLocations2.
+// v2Options is not nil if the caller is CandidateLocations2.
+func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool,
+	v2Options *blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 {
 	var uncompressedDigest digest.Digest // = ""
 	res, err := transaction(sqc, func(tx *sql.Tx) ([]prioritize.CandidateWithTime, error) {
 		res := []prioritize.CandidateWithTime{}
-		res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, v2Output)
+		res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, v2Options)
 		if err != nil {
 			return nil, err
 		}
@@ -538,7 +536,7 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
 					return nil, err
 				}
 				if otherDigest != primaryDigest && otherDigest != uncompressedDigest {
-					res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Output)
+					res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options)
 					if err != nil {
 						return nil, err
 					}
@@ -549,7 +547,7 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
 			}
 
 			if uncompressedDigest != primaryDigest {
-				res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Output)
+				res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options)
 				if err != nil {
 					return nil, err
 				}
@@ -571,5 +569,5 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
 // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
 // uncompressed digest.
 func (sqc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
-	return blobinfocache.CandidateLocationsFromV2(sqc.candidateLocations(transport, scope, digest, canSubstitute, false))
+	return blobinfocache.CandidateLocationsFromV2(sqc.candidateLocations(transport, scope, digest, canSubstitute, nil))
 }