Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
84 changes: 55 additions & 29 deletions image-fetcher/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,11 @@ import (
"fmt"
"os"
"runtime"
"time"

containerd "github.com/containerd/containerd/v2/client"
"github.com/containerd/containerd/v2/core/images"
"github.com/containerd/containerd/v2/core/leases"
"github.com/containerd/containerd/v2/pkg/namespaces"
"github.com/containerd/platforms"
)
Expand All @@ -23,6 +26,7 @@ const (
func main() {
if len(os.Args) < 2 {
fmt.Fprintf(os.Stderr, "Usage: %s <image-ref> [image-ref...]\n", os.Args[0])
fmt.Fprintf(os.Stderr, " %s --gc\n", os.Args[0])
fmt.Fprintf(os.Stderr, "Example: %s mcr.microsoft.com/oss/kubernetes/pause:3.9\n", os.Args[0])
os.Exit(1)
}
Expand All @@ -45,6 +49,15 @@ func main() {

ctx := namespaces.WithNamespace(context.Background(), ns)

if len(os.Args) == 2 && os.Args[1] == "--gc" {
if err := triggerGarbageCollection(ctx, client); err != nil {
fmt.Fprintf(os.Stderr, "Failed to trigger containerd GC: %v\n", err)
os.Exit(1)
}
fmt.Println("Triggered containerd GC")
return
}

failed := 0
for _, ref := range os.Args[1:] {
if err := fetchImage(ctx, client, ref); err != nil {
Expand All @@ -68,7 +81,7 @@ func main() {
// already-fetched content from the store and handles snapshotter resolution
// internally (namespace label → platform default).
func fetchImage(ctx context.Context, client *containerd.Client, ref string) error {
fetchOnly := os.Getenv("IMAGE_FETCH_ONLY") == "true"
//fetchOnly := os.Getenv("IMAGE_FETCH_ONLY") == "true"

fmt.Printf("Fetching %s ...\n", ref)

Expand All @@ -79,42 +92,55 @@ func fetchImage(ctx context.Context, client *containerd.Client, ref string) erro
}
platformMatcher := platforms.OnlyStrict(p)

imageMeta, err := client.Fetch(ctx, ref,
// imageMeta, err := client.Fetch(ctx, ref,
// containerd.WithPlatformMatcher(platformMatcher),
// )
// if err != nil {
// return fmt.Errorf("fetch failed: %w", err)
// }

// if fetchOnly {
// fmt.Printf("OK %s -> %s (fetched)\n", imageMeta.Name, imageMeta.Target.Digest)
// return nil
// }

// image := containerd.NewImage(client, imageMeta)

// size, err := image.Size(ctx)
// if err != nil {
// fmt.Fprintf(os.Stderr, "WARN %s: could not determine image size, skipping unpack: %v\n", ref, err)
// fmt.Printf("OK %s -> %s (fetched)\n", imageMeta.Name, imageMeta.Target.Digest)
// return nil
// }

// if size < pullSizeThreshold {
// We use pull here instead of use unpack because some runtimes (e.g. containerd-shim-runsc-v1),
// require pull to trigger unpacking into the correct snapshotter based on the image's platform.
pullOpts := []containerd.RemoteOpt{
containerd.WithPlatformMatcher(platformMatcher),
)
if err != nil {
return fmt.Errorf("fetch failed: %w", err)
containerd.WithPullUnpack,
containerd.WithChildLabelMap(images.ChildGCLabelsFilterLayers),
}

if fetchOnly {
fmt.Printf("OK %s -> %s (fetched)\n", imageMeta.Name, imageMeta.Target.Digest)
return nil
imageMeta, err := client.Pull(ctx, ref, pullOpts...)
if err != nil {
return fmt.Errorf("pull failed: %w", err)
}
fmt.Printf("OK %s (pulled)\n", imageMeta.Name)
// } else {
// fmt.Printf("OK %s -> %s (fetched, %s)\n", imageMeta.Name, imageMeta.Target.Digest, formatSize(size))
// }

image := containerd.NewImage(client, imageMeta)
return nil
}

size, err := image.Size(ctx)
func triggerGarbageCollection(ctx context.Context, client *containerd.Client) error {
ls := client.LeasesService()
l, err := ls.Create(ctx, leases.WithRandomID(), leases.WithExpiration(time.Hour))
if err != nil {
fmt.Fprintf(os.Stderr, "WARN %s: could not determine image size, skipping unpack: %v\n", ref, err)
fmt.Printf("OK %s -> %s (fetched)\n", imageMeta.Name, imageMeta.Target.Digest)
return nil
}

if size < pullSizeThreshold {
// We use pull here instead of use unpack because some runtimes (e.g. containerd-shim-runsc-v1),
// require pull to trigger unpacking into the correct snapshotter based on the image's platform.
if _, err := client.Pull(ctx, ref,
containerd.WithPlatformMatcher(platformMatcher),
containerd.WithPullUnpack,
); err != nil {
return fmt.Errorf("pull failed: %w", err)
}
fmt.Printf("OK %s -> %s (pulled, %s)\n", imageMeta.Name, imageMeta.Target.Digest, formatSize(size))
} else {
fmt.Printf("OK %s -> %s (fetched, %s)\n", imageMeta.Name, imageMeta.Target.Digest, formatSize(size))
return err
}

return nil
return ls.Delete(ctx, l, leases.SynchronousDelete)
}

func formatSize(bytes int64) string {
Expand Down
1 change: 1 addition & 0 deletions vhdbuilder/packer/install-dependencies.sh
Original file line number Diff line number Diff line change
Expand Up @@ -699,6 +699,7 @@ while IFS= read -r imageToBePulled; do
done <<< "$ContainerImages"
echo "Waiting for container image pulls to finish. PID: ${image_pids[@]}"
wait ${image_pids[@]}
Copy link

Copilot AI Mar 10, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

wait ${image_pids[@]} only returns the exit status of the last PID waited for, so failures in earlier background pulls can be missed (and with set -e the script would still proceed to run image-fetcher --gc). Consider iterating over PIDs (or looping wait -n until all jobs finish) and tracking any non-zero exit status so the build fails reliably before triggering GC.

Suggested change
wait ${image_pids[@]}
overall_status=0
for pid in "${image_pids[@]}"; do
if ! wait "$pid"; then
status=$?
echo "Container image pull job with PID $pid failed with exit code $status" >&2
overall_status=$status
fi
done
if [ "$overall_status" -ne 0 ]; then
echo "One or more container image pulls failed; skipping image-fetcher GC and exiting with code $overall_status" >&2
exit "$overall_status"
fi

Copilot uses AI. Check for mistakes.
/opt/azure/containers/image-fetcher --gc
capture_benchmark "${SCRIPT_NAME}_caching_container_images"

retagAKSNodeCAWatcher() {
Expand Down
Loading