Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
50 commits
Select commit Hold shift + click to select a range
db617d4
docs: removed labs ref for ADD checksum and git
dvdksn Aug 1, 2023
4835d12
docs: troubleshoot for `mount proc:/proc (via /proc/self/fd/6), flags…
AkihiroSuda Jul 28, 2023
b45e8ae
docs/build-repro.md: fix description about squashing
AkihiroSuda Jun 30, 2023
6f9024e
filesync: write closer err discarded
crazy-max Aug 8, 2023
42c8b53
Add configurable keepalives
jacobwgillespie Mar 2, 2023
1c8171c
feat: return stable cache digests via progress
goller Feb 28, 2023
3d29883
refactor: use existing progress writer
goller Feb 28, 2023
2782482
feat(push): push multiple images in parallel
goller Mar 15, 2023
7e70406
feat(s3): add parallel push of layers to s3
goller Mar 16, 2023
f05f8e1
feat(s3): wait until all layers are uploaded
goller Mar 16, 2023
b5a8f39
fix: remove newline for linting
goller Mar 16, 2023
3cc4950
feat: add optional profiling when env vars are set
goller Apr 5, 2023
014775c
feat: add architecture for profiling tags
goller Apr 5, 2023
fe9fdac
fix(export): allow OCI worker to unlazy image output
goller Apr 12, 2023
999b2c4
perf: switch num concurrent layer fetch/push to 12
goller Apr 17, 2023
db20c05
perf(export): use pgzip to compress layers
goller Apr 18, 2023
12bf4e5
test: update expected digest with new compression
goller Apr 18, 2023
478ecfc
test: update tests to use pgzip writers
goller Apr 18, 2023
ae28474
feat: optionally turn off parallel gzip
goller Apr 18, 2023
0c8b318
feat: return index, config, and manifest
goller Apr 19, 2023
b4ed61d
test: update stable sha
goller Apr 19, 2023
53373ed
perf: use simd to accelerate SHA256 computation
goller Apr 28, 2023
04344cb
Background the cache metadata cleanup
jacobwgillespie May 1, 2023
01ddeca
Update expected digest
jacobwgillespie May 2, 2023
2c9a113
feat(load): add new lease tracking all image layers
goller May 4, 2023
32a2179
Reduce default auth credential expiration to 10 minutes
jacobwgillespie May 6, 2023
68e0315
fix(debug): check for nil payload
goller Jun 2, 2023
c49e44d
feat: log actives changes during debug debugScheduler
goller Jun 2, 2023
6092cbe
feat: log the vertex information for state failures
goller Jun 2, 2023
f083372
fix(provenance): check for nil and protect with lock
goller Jun 6, 2023
3ebe2c9
feat: selectively turn off mergeTo functionality
goller Jun 5, 2023
fb7ecd3
feat: add gpu support for oci worker
goller Jun 6, 2023
9d0cab6
refactor(load): return manifests via solve response
goller Jun 13, 2023
852e81a
Fix inconsistent graph state error
jacobwgillespie Jun 16, 2023
25a5068
fix: lock images to prevent race condition
goller Jun 29, 2023
c1e27ff
perf: use simd sha256 for cacheContext checksumming
goller Jun 29, 2023
d13f63b
fix: check snapshot labels to avoid panic
goller Jul 9, 2023
c7771c6
refactor: remove panic in dockerfile-frontend
goller Jul 13, 2023
9db0ff6
feat: respond to gRPC health status requests
goller Jul 14, 2023
cdf1ae7
feat: add buildctl health check
goller Jul 15, 2023
65cf5d4
Disable boltdb freelist sync
jacobwgillespie Jul 15, 2023
098561a
Remove NoFreelistSync
jacobwgillespie Jul 20, 2023
b6f728b
Enable NoFreelistSync for containerdmeta.db
jacobwgillespie Jul 20, 2023
4d1e763
fix: rollback boltdb transaction to stop deadlock
goller Jul 26, 2023
1cf5511
Use Depot stargz-snapshotter fix
jacobwgillespie Aug 8, 2023
baddaac
fix: close bolt metadata on shutdown
goller Aug 21, 2023
c225dbb
Make gRPC server terminate TLS
jacobwgillespie Aug 25, 2023
95fb1e5
feat: report status to depot API
goller Aug 22, 2023
1fb3c98
Background reporting status to API
jacobwgillespie Aug 31, 2023
f32d4e8
feat: add creator and stable digests to disk usage
goller Sep 1, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
451 changes: 308 additions & 143 deletions api/services/control/control.pb.go

Large diffs are not rendered by default.

9 changes: 8 additions & 1 deletion api/services/control/control.proto
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ message PruneRequest {
}

message DiskUsageRequest {
repeated string filter = 1;
repeated string filter = 1;
}

message DiskUsageResponse {
Expand All @@ -55,6 +55,8 @@ message UsageRecord {
string RecordType = 10;
bool Shared = 11;
repeated string Parents = 12;
repeated string stable_digests = 9998;
string creator_digest = 9999;
}

message SolveRequest {
Expand Down Expand Up @@ -124,6 +126,11 @@ message Vertex {
google.protobuf.Timestamp completed = 6 [(gogoproto.stdtime) = true ];
string error = 7; // typed errors?
pb.ProgressGroup progressGroup = 8;

string stableDigest = 9999 [
(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest",
(gogoproto.nullable) = false
];
}

message VertexStatus {
Expand Down
17 changes: 17 additions & 0 deletions buf.gen.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
version: v1

managed:
enabled: true
go_package_prefix:
default: github.com/moby/buildkit/depot/api
except:
- buf.build/googleapis/googleapis
- buf.build/depot/buildkit

plugins:
- plugin: buf.build/protocolbuffers/go:v1.31.0
out: depot/api
opt: paths=source_relative
- plugin: buf.build/connectrpc/go:v1.11.1
out: depot/api
opt: paths=source_relative
3 changes: 3 additions & 0 deletions buf.work.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
version: v1
directories:
- depot/api
4 changes: 2 additions & 2 deletions cache/blobs_linux.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@ import (
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/mount"
"github.com/moby/buildkit/depot"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/util/overlay"
digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
Expand Down Expand Up @@ -53,7 +53,7 @@ func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper
bufW := bufio.NewWriterSize(cw, 128*1024)
var labels map[string]string
if compressorFunc != nil {
dgstr := digest.SHA256.Digester()
dgstr := depot.NewFastDigester()
compressed, err := compressorFunc(bufW, mediaType)
if err != nil {
return emptyDesc, false, errors.Wrap(err, "failed to get compressed stream")
Expand Down
4 changes: 3 additions & 1 deletion cache/compression_nydus.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,12 @@
package cache

import (
"compress/gzip"
"context"
"io"

// DEPOT: Using parallel gzip for faster image layer compression
gzip "github.com/klauspost/pgzip"

"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/moby/buildkit/cache/config"
Expand Down
12 changes: 6 additions & 6 deletions cache/contenthash/checksum.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ package contenthash
import (
"bytes"
"context"
"crypto/sha256"
"io"
"os"
"path"
Expand All @@ -14,6 +13,7 @@ import (
iradix "github.com/hashicorp/go-immutable-radix"
"github.com/hashicorp/golang-lru/simplelru"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/depot"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/snapshot"
"github.com/moby/locker"
Expand Down Expand Up @@ -434,7 +434,7 @@ func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable,
return includedPaths[0].record.Digest, nil
}

digester := digest.Canonical.Digester()
digester := depot.NewFastDigester()
for i, w := range includedPaths {
if i != 0 {
digester.Hash().Write([]byte{0})
Expand Down Expand Up @@ -906,7 +906,7 @@ func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *ir

switch cr.Type {
case CacheRecordTypeDir:
h := sha256.New()
digester := depot.NewFastDigester()
next := append(k, 0)
iter := root.Iterator()
iter.SeekLowerBound(append(append([]byte{}, next...), 0))
Expand All @@ -916,14 +916,14 @@ func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *ir
if !ok || !bytes.HasPrefix(subk, next) {
break
}
h.Write(bytes.TrimPrefix(subk, k))
digester.Hash().Write(bytes.TrimPrefix(subk, k))

subcr, _, err := cc.checksum(ctx, root, txn, m, subk, true)
if err != nil {
return nil, false, err
}

h.Write([]byte(subcr.Digest))
digester.Hash().Write([]byte(subcr.Digest))

if subcr.Type == CacheRecordTypeDir { // skip subfiles
next := append(subk, 0, 0xff)
Expand All @@ -932,7 +932,7 @@ func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *ir
}
subk, _, ok = iter.Next()
}
dgst = digest.NewDigest(digest.SHA256, h)
dgst = digester.Digest()

default:
p := string(convertKeyToPath(bytes.TrimSuffix(k, []byte{0})))
Expand Down
2 changes: 1 addition & 1 deletion cache/contenthash/filehash.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,12 @@ package contenthash

import (
"archive/tar"
"crypto/sha256"
"hash"
"os"
"path/filepath"
"time"

"github.com/minio/sha256-simd"
fstypes "github.com/tonistiigi/fsutil/types"
)

Expand Down
84 changes: 51 additions & 33 deletions cache/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -1294,18 +1294,20 @@ func (cm *cacheManager) markShared(m map[string]*cacheUsageInfo) error {
}

type cacheUsageInfo struct {
refs int
parents []string
size int64
mutable bool
createdAt time.Time
usageCount int
lastUsedAt *time.Time
description string
doubleRef bool
recordType client.UsageRecordType
shared bool
parentChain []digest.Digest
refs int
parents []string
size int64
mutable bool
createdAt time.Time
usageCount int
lastUsedAt *time.Time
description string
doubleRef bool
recordType client.UsageRecordType
shared bool
parentChain []digest.Digest
stableDigests []string
creatorDigest string
}

func (cm *cacheManager) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) {
Expand All @@ -1329,16 +1331,18 @@ func (cm *cacheManager) DiskUsage(ctx context.Context, opt client.DiskUsageInfo)

usageCount, lastUsedAt := cr.getLastUsed()
c := &cacheUsageInfo{
refs: len(cr.refs),
mutable: cr.mutable,
size: cr.getSize(),
createdAt: cr.GetCreatedAt(),
usageCount: usageCount,
lastUsedAt: lastUsedAt,
description: cr.GetDescription(),
doubleRef: cr.equalImmutable != nil,
recordType: cr.GetRecordType(),
parentChain: cr.layerDigestChain(),
refs: len(cr.refs),
mutable: cr.mutable,
size: cr.getSize(),
createdAt: cr.GetCreatedAt(),
usageCount: usageCount,
lastUsedAt: lastUsedAt,
description: cr.GetDescription(),
doubleRef: cr.equalImmutable != nil,
recordType: cr.GetRecordType(),
parentChain: cr.layerDigestChain(),
stableDigests: cr.GetStringSlice("depot.stableDigests"),
creatorDigest: cr.GetString("depot.vertexDigest"),
}
if c.recordType == "" {
c.recordType = client.UsageRecordTypeRegular
Expand Down Expand Up @@ -1395,17 +1399,19 @@ func (cm *cacheManager) DiskUsage(ctx context.Context, opt client.DiskUsageInfo)
var du []*client.UsageInfo
for id, cr := range m {
c := &client.UsageInfo{
ID: id,
Mutable: cr.mutable,
InUse: cr.refs > 0,
Size: cr.size,
Parents: cr.parents,
CreatedAt: cr.createdAt,
Description: cr.description,
LastUsedAt: cr.lastUsedAt,
UsageCount: cr.usageCount,
RecordType: cr.recordType,
Shared: cr.shared,
ID: id,
Mutable: cr.mutable,
InUse: cr.refs > 0,
Size: cr.size,
Parents: cr.parents,
CreatedAt: cr.createdAt,
Description: cr.description,
LastUsedAt: cr.lastUsedAt,
UsageCount: cr.usageCount,
RecordType: cr.recordType,
Shared: cr.shared,
StableDigests: cr.stableDigests,
CreatorDigest: cr.creatorDigest,
}
if filter.Match(adaptUsageInfo(c)) {
du = append(du, c)
Expand Down Expand Up @@ -1475,6 +1481,18 @@ func WithDescription(descr string) RefOption {
}
}

func WithStableDigests(digests []string) RefOption {
return func(m *cacheMetadata) error {
return m.AppendStringSlice("depot.stableDigests", digests...)
}
}

func WithVertexDigest(digest string) RefOption {
return func(m *cacheMetadata) error {
return m.InsertIfNotExists("depot.vertexDigest", digest)
}
}

func WithRecordType(t client.UsageRecordType) RefOption {
return func(m *cacheMetadata) error {
return m.queueRecordType(t)
Expand Down
4 changes: 3 additions & 1 deletion cache/manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ package cache
import (
"archive/tar"
"bytes"
"compress/gzip"
"context"
"encoding/binary"
"fmt"
Expand All @@ -18,6 +17,9 @@ import (
"testing"
"time"

// DEPOT: Using parallel gzip for faster image layer compression
gzip "github.com/klauspost/pgzip"

ctdcompression "github.com/containerd/containerd/archive/compression"
"github.com/containerd/containerd/archive/tarheader"
"github.com/containerd/containerd/content"
Expand Down
19 changes: 16 additions & 3 deletions cache/metadata.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,10 @@ type RefMetadata interface {
// generic getters/setters for external packages
GetString(string) string
SetString(key, val, index string) error
// AppendStringSlice will append the values to the existing slice; values are deduplicated.
AppendStringSlice(key string, values ...string) error
// InsertIfNotExists will insert the value if the key does not exist; otherwise, the value is ignored.
InsertIfNotExists(key, value string) error

GetExternal(string) ([]byte, error)
SetExternal(string, []byte) error
Expand Down Expand Up @@ -292,7 +296,7 @@ func (md *cacheMetadata) appendURLs(urls []string) error {
if len(urls) == 0 {
return nil
}
return md.appendStringSlice(keyURLs, urls...)
return md.AppendStringSlice(keyURLs, urls...)
}

func (md *cacheMetadata) getURLs() []string {
Expand Down Expand Up @@ -363,7 +367,7 @@ func (md *cacheMetadata) getSize() int64 {
}

func (md *cacheMetadata) appendImageRef(s string) error {
return md.appendStringSlice(keyImageRefs, s)
return md.AppendStringSlice(keyImageRefs, s)
}

func (md *cacheMetadata) getImageRefs() []string {
Expand Down Expand Up @@ -542,7 +546,7 @@ func (md *cacheMetadata) getInt64(key string) (int64, bool) {
return i, true
}

func (md *cacheMetadata) appendStringSlice(key string, values ...string) error {
func (md *cacheMetadata) AppendStringSlice(key string, values ...string) error {
return md.si.GetAndSetValue(key, func(v *metadata.Value) (*metadata.Value, error) {
var slice []string
if v != nil {
Expand Down Expand Up @@ -586,3 +590,12 @@ func (md *cacheMetadata) getStringSlice(key string) []string {
}
return s
}

func (md *cacheMetadata) InsertIfNotExists(key, value string) error {
return md.si.GetAndSetValue(key, func(v *metadata.Value) (*metadata.Value, error) {
if v != nil {
return nil, metadata.ErrSkipSetValue
}
return metadata.NewValue(value)
})
}
6 changes: 4 additions & 2 deletions cache/refs.go
Original file line number Diff line number Diff line change
Expand Up @@ -1098,8 +1098,10 @@ func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s
if err == nil { // usable as remote snapshot without unlazying.
defer func() {
// Remove tmp labels appended in this func
for k := range tmpLabels {
info.Labels[k] = ""
if info.Labels != nil {
for k := range tmpLabels {
info.Labels[k] = ""
}
}
if _, err := r.cm.Snapshotter.Update(ctx, info, tmpFields...); err != nil {
bklog.G(ctx).Warn(errors.Wrapf(err,
Expand Down
Loading