{ pkgs, lib, config, ... }: { xdg.desktopEntries.ollama = { name = "Ollama"; type = "Application"; icon = ../icons/ollama.png; exec = let inherit (lib) getExe getExe'; notify-send = "${getExe' pkgs.libnotify "notify-send"} -a \"Ollama\""; systemctl = "${getExe' pkgs.systemd "systemctl"}"; podman = "${getExe pkgs.podman}"; in "${pkgs.writeShellScript "ollama" '' set -euo pipefail exit_error() { ${notify-send} -w "Failure" $1 exit 1 } container_checks() { if [ "$(${podman} inspect -f {{.State.Health.Status}} ollama)" == "healthy" ]; then return 0 else return 1 fi } ${notify-send} "Launching Ollama.." "Please be patient." ${systemctl} --user start podman-ollama checks=0 until container_checks; do sleep 2 checks=$((checks+1)) if [ $((checks%10)) -eq 0 ]; then ${notify-send} "Launching.." fi if [ $checks -ge 60 ]; then ${systemctl} --no-block --user stop podman-ollama.target exit_error "Failed to launch!" fi done ${notify-send} "Ollama serving on port 11434." ''}"; }; services.podman = { containers.ollama = let username = config.mainUser; in { image = "docker.io/ollama/ollama:latest"; devices = ["nvidia.com/gpu=all"]; autoStart = false; autoUpdate = "registry"; network = ["ollama"]; ports = ["11434:11434"]; volumes = [ "/home/${username}/.local/share/ollama:/models" ]; environment.OLLAMA_MODELS = "/models"; extraPodmanArgs = [ "--health-cmd" (lib.escapeShellArg "bash -c 'cat < /dev/null > /dev/tcp/localhost/11434'") ]; }; networks.ollama = { subnet = "192.168.10.0/24"; gateway = "192.168.10.1"; }; }; }