Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add initial OCI container support #172

Merged
merged 1 commit into from
Dec 15, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 24 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,30 @@ To include your own signing keys in the EDK2 build and capsule update, make
sure the option `hardware.nvidia-jetpack.firmware.uefi.capsuleAuthentication.enable`
is turned on and each signing key option is set.

### OCI Container Support

You can run OCI containers with jetpack-nixos by enabling the following nixos options:

```nix
{
virtualisation.podman.enable = true;
virtualisation.podman.enableNvidia = true;
}
```

To run a container with access to nvidia hardware, you must specify a device to
passthrough to the container in the [CDI](https://github.com/cncf-tags/container-device-interface/blob/main/SPEC.md#overview)
format. By default, there will be a single device setup of the kind
"nvidia.com/gpu" named "all". To use this device, pass
`--device=nvidia.com/gpu=all` when starting your container. If you need to
configure more CDI devices on the NixOS host, just note that the path
/var/run/cdi/jetpack-nixos.yaml will be taken by jetpack-nixos.

As of December 2023, Docker does not have a released version that supports the
CDI specification, so Podman is recommended for running containers on Jetson
devices. Docker is set to get experimental CDI support in their version 25
release.

## Additional Links

Much of this is inspired by the great work done by [OpenEmbedded for Tegra](https://github.com/OE4T).
Expand Down
1 change: 1 addition & 0 deletions UPGRADE_CHECKLIST.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
- [ ] Compare files from `unpackedDebs` before and after
- [ ] Ensure the soc variants in `modules/flash-script.nix` match those in jetson_board_spec.cfg from BSP
- [ ] Ensure logic in ota-utils/ota_helpers.func matches nvidia-l4t-init/opt/nvidia/nv-l4t-bootloader-config.sh
- [ ] Run `nix build .#genL4tJson` and copy output to `pkgs/containers/l4t.json`

### Testing
- [ ] Run `nix flake check`
Expand Down
21 changes: 18 additions & 3 deletions default.nix
Original file line number Diff line number Diff line change
Expand Up @@ -29,13 +29,20 @@ let
mv Linux_for_Tegra $out
'';

# Just for convenience. Unused
# Here for convenience, to see what is in upstream Jetpack
unpackedDebs = pkgs.runCommand "unpackedDebs" { nativeBuildInputs = [ dpkg ]; } ''
mkdir -p $out
${lib.concatStringsSep "\n" (lib.mapAttrsToList (n: p: "echo Unpacking ${n}; dpkg -x ${p.src} $out/${n}") debs.common)}
${lib.concatStringsSep "\n" (lib.mapAttrsToList (n: p: "echo Unpacking ${n}; dpkg -x ${p.src} $out/${n}") debs.t234)}
'';

# Also just for convenience,
unpackedDebsFilenames = pkgs.runCommand "unpackedDebsFilenames" { nativeBuildInputs = [ dpkg ]; } ''
mkdir -p $out
${lib.concatStringsSep "\n" (lib.mapAttrsToList (n: p: "echo Extracting file list from ${n}; dpkg --fsys-tarfile ${p.src} | tar --list > $out/${n}") debs.common)}
${lib.concatStringsSep "\n" (lib.mapAttrsToList (n: p: "echo Extracting file list from ${n}; dpkg --fsys-tarfile ${p.src} | tar --list > $out/${n}") debs.t234)}
'';

inherit (pkgsAarch64.callPackages ./pkgs/uefi-firmware { inherit l4tVersion; })
edk2-jetson uefi-firmware;

Expand Down Expand Up @@ -117,11 +124,12 @@ let
otaUtils = callPackage ./pkgs/ota-utils {
inherit tegra-eeprom-tool l4tVersion;
};
in rec {
in
rec {
inherit jetpackVersion l4tVersion cudaVersion;

# Just for convenience
inherit bspSrc debs unpackedDebs;
inherit bspSrc debs unpackedDebs unpackedDebsFilenames;

inherit cudaPackages samples;
inherit flash-tools;
Expand Down Expand Up @@ -164,6 +172,13 @@ in rec {
};
};

l4tCsv = callPackage ./pkgs/containers/l4t-csv.nix { inherit bspSrc; };
genL4tJson = runCommand "l4t.json" { nativeBuildInputs = [ python3 ]; } ''
python3 ${./pkgs/containers/gen_l4t_json.py} ${l4tCsv} ${unpackedDebsFilenames} > $out
'';
containerDeps = callPackage ./pkgs/containers/deps.nix { inherit debs; };
nvidia-ctk = callPackage ./pkgs/containers/nvidia-ctk.nix { };

flashScripts = lib.mapAttrs' (n: c: lib.nameValuePair "flash-${n}" c.flashScript) devicePkgs;
initrdFlashScripts = lib.mapAttrs' (n: c: lib.nameValuePair "initrd-flash-${n}" c.initrdFlashScript) devicePkgs;
uefiCapsuleUpdates = lib.mapAttrs' (n: c: lib.nameValuePair "uefi-capsule-update-${n}" c.uefiCapsuleUpdate) devicePkgs;
Expand Down
37 changes: 36 additions & 1 deletion modules/default.nix
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@ let
name = "tee-supplicant-plugins";
paths = cfg.firmware.optee.supplicantPlugins;
};

nvidiaContainerRuntimeActive = with config.virtualisation; (docker.enable && docker.enableNvidia) || (podman.enable && podman.enableNvidia);
in
{
imports = [
Expand Down Expand Up @@ -79,7 +81,14 @@ in
};

config = mkIf cfg.enable {
nixpkgs.overlays = [ (import ../overlay.nix) ];
assertions = [{
assertion = (config.virtualisation.docker.enable && config.virtualisation.docker.enableNvidia) -> lib.versionAtLeast config.virtualisation.docker.package.version "25";
message = "Docker version < 25 does not support CDI";
}];

nixpkgs.overlays = [
(import ../overlay.nix)
];

boot.kernelPackages =
if cfg.kernel.realtime
Expand Down Expand Up @@ -226,6 +235,32 @@ in
otaUtils # Tools for UEFI capsule updates
];

systemd.tmpfiles.rules = lib.optional nvidiaContainerRuntimeActive "d /var/run/cdi 0755 root root - -";

systemd.services.nvidia-cdi-generate = {
enable = nvidiaContainerRuntimeActive;
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart =
let
exe = "${pkgs.nvidia-jetpack.nvidia-ctk}/bin/nvidia-ctk";
in
toString [
exe
"cdi"
"generate"
"--nvidia-ctk-path=${exe}" # it is odd that this is needed, should be the same as /proc/self/exe?
"--driver-root=${pkgs.nvidia-jetpack.containerDeps}" # the root where nvidia libs will be resolved from
"--dev-root=/" # the root where chardevs will be resolved from
"--mode=csv"
"--csv.file=${pkgs.nvidia-jetpack.l4tCsv}"
"--output=/var/run/cdi/jetpack-nixos" # a yaml file extension is added by the nvidia-ctk tool
];
};
wantedBy = [ "multi-user.target" ];
};

# Used by libEGL_nvidia.so.0
environment.etc."egl/egl_external_platform.d".source = "/run/opengl-driver/share/egl/egl_external_platform.d/";
};
Expand Down
24 changes: 24 additions & 0 deletions pkgs/containers/deps.nix
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
{ lib
, runCommand
, dpkg
, debs
}:

runCommand "container-deps" { nativeBuildInputs = [ dpkg ]; }
(lib.concatStringsSep "\n"
(lib.mapAttrsToList
(deb: debFiles:
(if builtins.hasAttr deb debs.t234 then ''
echo Unpacking ${deb}; dpkg -x ${debs.t234.${deb}.src} debs
'' else ''
echo Unpacking ${deb}; dpkg -x ${debs.common.${deb}.src} debs
'') + (lib.concatStringsSep "\n" (map
(file: ''
if [[ -f debs${file} ]]; then
install -D --target-directory=$out${builtins.dirOf file} debs${file}
else
echo "WARNING: file ${file} not found in deb ${deb}"
fi
'')
debFiles)))
(lib.importJSON ./l4t.json)))
59 changes: 59 additions & 0 deletions pkgs/containers/gen_l4t_json.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
#!/usr/bin/env nix-shell
#!nix-shell -i python -p python3

import json
import os.path
import sys

# Run like ./gen_l4t_json.py ./l4t.csv ./unpackedDebs


def main():
l4tcsv_filename = sys.argv[1]
filelist_dir = sys.argv[2]

l4tfiles = [] # Files we need to extract
with open(l4tcsv_filename, "r") as l4tcsv:
for line in l4tcsv:
filetype, filename = line.split(",")
filetype = filetype.strip()
filename = filename.strip()

if filetype in ["lib", "sym"]:
l4tfiles.append(filename)
elif filetype in ["dev", "dir"]:
# Nothing to extract
pass
else:
raise Exception(f"Don't know how to handle filetype {filetype}")

output = {}
for fn in os.listdir(filelist_dir):
fullpath = os.path.join(filelist_dir, fn)
if not os.path.isfile(fullpath):
raise Exception(f"Don't know how to handle {fullpath}")

files_needed = []
with open(fullpath, "r") as filelist:
for debfilename in filelist:
# filename, stripped off leading "./"
debfilename = debfilename[1:].strip()

# Skip directories
if debfilename.endswith("/"):
pass

# Naive O(n^2) matching used here
if (debfilename in l4tfiles) or any(
debfilename.startswith(l4tdir) for l4tdir in l4tfiles
):
files_needed.append(debfilename)

if len(files_needed) > 0:
output[fn] = files_needed

print(json.dumps(output, sort_keys=True, indent=2, separators=(",", ": ")))


if __name__ == "__main__":
main()
5 changes: 5 additions & 0 deletions pkgs/containers/l4t-csv.nix
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{ bspSrc, runCommand }:
runCommand "l4t.csv" { } ''
tar -xf "${bspSrc}/nv_tegra/config.tbz2"
install etc/nvidia-container-runtime/host-files-for-container.d/l4t.csv $out
''
Loading