From 712888d9e8f34c8058acd735a98115cacdcee4d1 Mon Sep 17 00:00:00 2001 From: Derock Date: Mon, 15 Jan 2024 19:24:44 -0500 Subject: [PATCH] containers page progress --- package.json | 5 +- pnpm-lock.yaml | 94 +- .../project/[id]/(home)/layout.tsx | 2 + .../project/[id]/ProjectLayout.tsx | 6 +- .../service/[serviceId]/containers/page.tsx | 104 + .../[id]/service/[serviceId]/layout.tsx | 45 +- src/components/SidebarNav.tsx | 19 +- src/components/ui/checkbox.tsx | 30 + src/server/api/middleware/logger.ts | 28 + src/server/api/middleware/project.ts | 9 +- src/server/api/middleware/service.ts | 53 + src/server/api/routers/projects/deploy.ts | 3 + .../routers/projects/service/containers.ts | 162 + .../api/routers/projects/service/index.ts | 3 + src/server/api/trpc.ts | 5 +- src/server/docker/docker.ts | 7 + src/server/docker/stack.ts | 17 +- src/server/docker/types.d.ts | 9794 +++++++++++++++++ src/server/utils/logger.ts | 8 +- 19 files changed, 10375 insertions(+), 19 deletions(-) create mode 100644 src/app/(dashboard)/project/[id]/service/[serviceId]/containers/page.tsx create mode 100644 src/components/ui/checkbox.tsx create mode 100644 src/server/api/middleware/logger.ts create mode 100644 src/server/api/middleware/service.ts create mode 100644 src/server/api/routers/projects/service/containers.ts create mode 100644 src/server/docker/types.d.ts diff --git a/package.json b/package.json index f934686..ff5b031 100644 --- a/package.json +++ b/package.json @@ -13,13 +13,15 @@ "dev:run": "node --enable-source-maps dist/server.js", "lint": "next lint", "start": "node -r tsconfig", - "fetch-compose-types": "curl -s https://raw.githubusercontent.com/compose-spec/compose-spec/master/schema/compose-spec.json | json2ts > src/server/docker/compose.d.ts" + "fetch-compose-types": "curl -s https://raw.githubusercontent.com/compose-spec/compose-spec/master/schema/compose-spec.json | json2ts > src/server/docker/compose.d.ts", + "fetch-docker-types": "openapi-typescript https://docs.docker.com/reference/engine/v1.43.yaml -o ./src/server/docker/types.d.ts" }, "dependencies": { "@hookform/resolvers": "^3.3.4", "@mantine/form": "^7.4.0", "@nicktomlin/codemirror-lang-yaml-lite": "^0.0.3", "@prisma/migrate": "^5.7.1", + "@radix-ui/react-checkbox": "^1.0.4", "@radix-ui/react-dialog": "^1.0.5", "@radix-ui/react-dropdown-menu": "^2.0.6", "@radix-ui/react-icons": "^1.3.0", @@ -91,6 +93,7 @@ "eslint-config-next": "^14.0.4", "json-schema-to-typescript": "^13.1.1", "npm-run-all": "^4.1.5", + "openapi-typescript": "^5.4.1", "postcss": "^8.4.32", "prettier": "^3.1.1", "prettier-plugin-tailwindcss": "^0.5.10", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 4ff452a..f922da0 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -17,6 +17,9 @@ dependencies: '@prisma/migrate': specifier: ^5.7.1 version: 5.7.1(@prisma/generator-helper@5.7.1)(@prisma/internals@5.7.1) + '@radix-ui/react-checkbox': + specifier: ^1.0.4 + version: 1.0.4(@types/react-dom@18.2.18)(@types/react@18.2.46)(react-dom@18.2.0)(react@18.2.0) '@radix-ui/react-dialog': specifier: ^1.0.5 version: 1.0.5(@types/react-dom@18.2.18)(@types/react@18.2.46)(react-dom@18.2.0)(react@18.2.0) @@ -226,6 +229,9 @@ devDependencies: npm-run-all: specifier: ^4.1.5 version: 4.1.5 + openapi-typescript: + specifier: ^5.4.1 + version: 5.4.1 postcss: specifier: ^8.4.32 version: 8.4.32 @@ -1075,6 +1081,11 @@ packages: engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} dev: true + /@fastify/busboy@2.1.0: + resolution: {integrity: sha512-+KpH+QxZU7O4675t3mnkQKcZZg56u+K/Ct2K+N2AZYNVK8kyeo/bI18tI8aPm3tvNNRyTWfj6s5tnGNlcbQRsA==} + engines: {node: '>=14'} + dev: true + /@floating-ui/core@1.5.2: resolution: {integrity: sha512-Ii3MrfY/GAIN3OhXNzpCKaLxHQfJF9qvwq/kEJYdqDxeIHa01K8sldugal6TmeeXl+WMvhv9cnVzUTaFFJF09A==} dependencies: @@ -1579,6 +1590,34 @@ packages: react-dom: 18.2.0(react@18.2.0) dev: false + /@radix-ui/react-checkbox@1.0.4(@types/react-dom@18.2.18)(@types/react@18.2.46)(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-CBuGQa52aAYnADZVt/KBQzXrwx6TqnlwtcIPGtVt5JkkzQwMOLJjPukimhfKEr4GQNd43C+djUh5Ikopj8pSLg==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 + react-dom: ^16.8 || ^17.0 || ^18.0 + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + dependencies: + '@babel/runtime': 7.23.7 + '@radix-ui/primitive': 1.0.1 + '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.46)(react@18.2.0) + '@radix-ui/react-context': 1.0.1(@types/react@18.2.46)(react@18.2.0) + '@radix-ui/react-presence': 1.0.1(@types/react-dom@18.2.18)(@types/react@18.2.46)(react-dom@18.2.0)(react@18.2.0) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.46)(react-dom@18.2.0)(react@18.2.0) + '@radix-ui/react-use-controllable-state': 1.0.1(@types/react@18.2.46)(react@18.2.0) + '@radix-ui/react-use-previous': 1.0.1(@types/react@18.2.46)(react@18.2.0) + '@radix-ui/react-use-size': 1.0.1(@types/react@18.2.46)(react@18.2.0) + '@types/react': 18.2.46 + '@types/react-dom': 18.2.18 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + dev: false + /@radix-ui/react-collection@1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.46)(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-3SzW+0PW7yBBoQlT8wNcGtaxaD0XSu0uLUFgrtHY08Acx05TaHaOmVLR73c0j/cqpDy53KBMO7s0dx2wmOIDIA==} peerDependencies: @@ -2095,6 +2134,20 @@ packages: react: 18.2.0 dev: false + /@radix-ui/react-use-previous@1.0.1(@types/react@18.2.46)(react@18.2.0): + resolution: {integrity: sha512-cV5La9DPwiQ7S0gf/0qiD6YgNqM5Fk97Kdrlc5yBcrF3jyEZQwm7vYFqMo4IfeHgJXsRaMvLABFtd0OVEmZhDw==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 + peerDependenciesMeta: + '@types/react': + optional: true + dependencies: + '@babel/runtime': 7.23.7 + '@types/react': 18.2.46 + react: 18.2.0 + dev: false + /@radix-ui/react-use-rect@1.0.1(@types/react@18.2.46)(react@18.2.0): resolution: {integrity: sha512-Cq5DLuSiuYVKNU8orzJMbl15TXilTnJKUCltMVQg53BQOF1/C5toAaGrowkgksdBQ9H+SRL23g0HDmg9tvmxXw==} peerDependencies: @@ -5019,6 +5072,10 @@ packages: define-properties: 1.2.1 dev: true + /globalyzer@0.1.0: + resolution: {integrity: sha512-40oNTM9UfG6aBmuKxk/giHn5nQ8RVz/SS4Ir6zgzOv9/qC3kKZ9v4etGTcJbEl/NyVQH7FGU7d+X1egr57Md2Q==} + dev: true + /globby@11.1.0: resolution: {integrity: sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==} engines: {node: '>=10'} @@ -5047,6 +5104,10 @@ packages: - supports-color dev: true + /globrex@0.1.2: + resolution: {integrity: sha512-uHJgbwAMwNFf5mLst7IWLNg14x1CkeqglJb/K3doi4dw6q2IvAAmM/Y81kevy83wP+Sst+nutFTYOGg3d1lsxg==} + dev: true + /gopd@1.0.1: resolution: {integrity: sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==} dependencies: @@ -5909,7 +5970,6 @@ packages: resolution: {integrity: sha512-jSCU7/VB1loIWBZe14aEYHU/+1UMEHoaO7qxCOVJOw9GgH72VAWppxNcjU+x9a2k3GSIBXNKxXQFqRvvZ7vr3A==} engines: {node: '>=10.0.0'} hasBin: true - dev: false /mimic-fn@2.1.0: resolution: {integrity: sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==} @@ -6354,6 +6414,19 @@ packages: resolution: {integrity: sha512-N4YtSYJqghVu4iek2ZUvcN/0aqH1kRDuNqzcycDxhOUpg7GdvLa2F3DgS6yBNhInhv2r/6I0Flkn7CqL8+nIcw==} dev: false + /openapi-typescript@5.4.1: + resolution: {integrity: sha512-AGB2QiZPz4rE7zIwV3dRHtoUC/CWHhUjuzGXvtmMQN2AFV8xCTLKcZUHLcdPQmt/83i22nRE7+TxXOXkK+gf4Q==} + engines: {node: '>= 14.0.0'} + hasBin: true + dependencies: + js-yaml: 4.1.0 + mime: 3.0.0 + prettier: 2.8.8 + tiny-glob: 0.2.9 + undici: 5.28.2 + yargs-parser: 21.1.1 + dev: true + /optionator@0.9.3: resolution: {integrity: sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg==} engines: {node: '>= 0.8.0'} @@ -7624,6 +7697,13 @@ packages: next-tick: 1.1.0 dev: true + /tiny-glob@0.2.9: + resolution: {integrity: sha512-g/55ssRPUjShh+xkfx9UPDXqhckHEsHr4Vd9zX55oSdGZc/MD0m3sferOkwWtp98bv+kcVfEHtRJgBVJzelrzg==} + dependencies: + globalyzer: 0.1.0 + globrex: 0.1.2 + dev: true + /tiny-invariant@1.3.1: resolution: {integrity: sha512-AD5ih2NlSssTCwsMznbvwMZpJ1cbhkGd2uueNxzv2jDlEeZdU04JQfRnggJQ8DrcVBGjAsCKwFBbDlVNtEMlzw==} dev: false @@ -7889,6 +7969,13 @@ packages: /undici-types@5.26.5: resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==} + /undici@5.28.2: + resolution: {integrity: sha512-wh1pHJHnUeQV5Xa8/kyQhO7WFa8M34l026L5P/+2TYiakvGy5Rdc8jWZVyG7ieht/0WgJLEd3kcU5gKx+6GC8w==} + engines: {node: '>=14.0'} + dependencies: + '@fastify/busboy': 2.1.0 + dev: true + /unenv@1.8.0: resolution: {integrity: sha512-uIGbdCWZfhRRmyKj1UioCepQ0jpq638j/Cf0xFTn4zD1nGJ2lSdzYHLzfdXN791oo/0juUiSWW1fBklXMTsuqg==} dependencies: @@ -8183,6 +8270,11 @@ packages: resolution: {integrity: sha512-8aAvwVUSHpfEqTQ4w/KMlf3HcRdt50E5ODIQJBw1fQ5RL34xabzxtUlzTXVqc4rkZsPbvrXKWnABCD7kWSmocA==} engines: {node: '>= 14'} + /yargs-parser@21.1.1: + resolution: {integrity: sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==} + engines: {node: '>=12'} + dev: true + /yocto-queue@0.1.0: resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} engines: {node: '>=10'} diff --git a/src/app/(dashboard)/project/[id]/(home)/layout.tsx b/src/app/(dashboard)/project/[id]/(home)/layout.tsx index f6a6adb..e55f4b9 100644 --- a/src/app/(dashboard)/project/[id]/(home)/layout.tsx +++ b/src/app/(dashboard)/project/[id]/(home)/layout.tsx @@ -1,5 +1,6 @@ "use client"; +import { HomeIcon } from "lucide-react"; import { SettingsHeader } from "~/app/(dashboard)/settings/SettingsHeader"; import { SidebarNav } from "~/components/SidebarNav"; import { Separator } from "~/components/ui/separator"; @@ -10,6 +11,7 @@ const sidebarNavItems = [ title: "Home", description: "Quick overview of all containers for this project.", href: "/", + icon: HomeIcon, }, { title: "Sessions", diff --git a/src/app/(dashboard)/project/[id]/ProjectLayout.tsx b/src/app/(dashboard)/project/[id]/ProjectLayout.tsx index 07b0203..f52bdf2 100644 --- a/src/app/(dashboard)/project/[id]/ProjectLayout.tsx +++ b/src/app/(dashboard)/project/[id]/ProjectLayout.tsx @@ -17,6 +17,7 @@ export function ProjectLayout(props: { }) { const params = useParams(); const projectPath = `/project/${params.id as string}`; + const servicePath = `${projectPath}/service/${params.serviceId as string}`; const project = api.projects.get.useQuery( { projectId: props.project.id }, @@ -33,9 +34,9 @@ export function ProjectLayout(props: { ).length ?? 0; const selectedService = - typeof params.serviceid === "string" + typeof params.serviceId === "string" ? project.data.services.find((service) => - [service.id, service.name].includes(params.serviceid as string), + [service.id, service.name].includes(params.serviceId as string), ) : undefined; @@ -44,6 +45,7 @@ export function ProjectLayout(props: { data={{ ...project.data, path: projectPath, + servicePath, selectedService, }} > diff --git a/src/app/(dashboard)/project/[id]/service/[serviceId]/containers/page.tsx b/src/app/(dashboard)/project/[id]/service/[serviceId]/containers/page.tsx new file mode 100644 index 0000000..12a17a1 --- /dev/null +++ b/src/app/(dashboard)/project/[id]/service/[serviceId]/containers/page.tsx @@ -0,0 +1,104 @@ +"use client"; + +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuLabel, + DropdownMenuTrigger, +} from "@radix-ui/react-dropdown-menu"; +import { ClipboardIcon } from "lucide-react"; +import { FaGear } from "react-icons/fa6"; +import { toast } from "sonner"; +import { Button } from "~/components/ui/button"; +import { + Table, + TableBody, + TableCell, + TableHead, + TableHeader, + TableRow, +} from "~/components/ui/table"; +import { api } from "~/trpc/react"; +import { useProject } from "../../../_context/ProjectContext"; + +export default function Containers() { + const project = useProject(); + + const containers = api.projects.services.containers.useQuery({ + serviceId: project.selectedService!.id, + projectId: project.id, + }); + + return ( + + + + Container ID + Type + Status + Actions + + + + {containers.data?.containers.map((container) => ( + + { + if (!navigator.clipboard || !window.isSecureContext) { + return toast.error( + "Cannot copy to clipboard when not using HTTPS.", + ); + } + + navigator.clipboard + .writeText(container.containerId) + .then(() => { + toast.success("Copied to clipboard."); + }) + .catch((err) => { + console.error(err); + toast.error("Failed to copy to clipboard"); + }); + }} + > + {container.containerId?.substring(0, 8) ?? "N/A (deploying)"} + {container.containerId && ( + + )} + + Deployed (updated) + {container.error} + + + + + + + Actions + test + + + + + ))} + + {project.services.length === 0 && ( + + + No services + + + )} + +
+ ); +} diff --git a/src/app/(dashboard)/project/[id]/service/[serviceId]/layout.tsx b/src/app/(dashboard)/project/[id]/service/[serviceId]/layout.tsx index 71ae5e6..32911e0 100644 --- a/src/app/(dashboard)/project/[id]/service/[serviceId]/layout.tsx +++ b/src/app/(dashboard)/project/[id]/service/[serviceId]/layout.tsx @@ -1,6 +1,15 @@ "use client"; -import { BoxesIcon, CloudyIcon, CodeIcon, HomeIcon } from "lucide-react"; +import { + BoxesIcon, + CloudyIcon, + CodeIcon, + ContainerIcon, + GlobeIcon, + HomeIcon, + SaveAllIcon, + ServerCogIcon, +} from "lucide-react"; import { SidebarNav, type SidebarNavProps } from "~/components/SidebarNav"; import { useProject } from "../../_context/ProjectContext"; @@ -29,12 +38,42 @@ const sidebarNavItems = [ href: "/deployments", icon: CloudyIcon, }, + + { + type: "divider", + title: "Build Settings", + }, + { title: "Source", description: "Source settings", href: "/source", icon: CodeIcon, }, + { + title: "Domains", + description: "Domain settings", + href: "/domains", + icon: GlobeIcon, + }, + { + title: "Environment", + description: "Environment settings", + href: "/environment", + icon: ContainerIcon, + }, + { + title: "Volumes", + description: "Volume settings", + href: "/volumes", + icon: SaveAllIcon, + }, + { + title: "Advanced", + description: "Advanced settings", + href: "/replication", + icon: ServerCogIcon, + }, ] as const; export default function ProjectHomeLayout({ @@ -45,7 +84,7 @@ export default function ProjectHomeLayout({ const project = useProject(); const items = sidebarNavItems.map((item) => ({ ...item, - href: "href" in item ? `${project.path}${item.href}` : undefined, + href: "href" in item ? `${project.servicePath}${item.href}` : undefined, })) as SidebarNavProps["items"]; return ( @@ -54,7 +93,7 @@ export default function ProjectHomeLayout({ -
+
{/* */} {/* */} {children} diff --git a/src/components/SidebarNav.tsx b/src/components/SidebarNav.tsx index e34416c..35d199d 100644 --- a/src/components/SidebarNav.tsx +++ b/src/components/SidebarNav.tsx @@ -35,10 +35,15 @@ export function SidebarNav({ className, items, ...props }: SidebarNavProps) { )} {...props} > - {items.map((item, i) => - item.type === "divider" ? ( + {items.map((item, i) => { + const isActive = + item.type !== "divider" + ? pathname === item.href.replace(/\/$/, "") + : false; + + return item.type === "divider" ? (

{item.title} @@ -49,21 +54,21 @@ export function SidebarNav({ className, items, ...props }: SidebarNavProps) { href={item.href} className={cn( buttonVariants({ variant: "ghost" }), - pathname === item.href.replace(/\/$/, "") + isActive ? "bg-muted hover:bg-muted" : "hover:bg-transparent hover:underline", "justify-start", )} > {item.icon && ( -

+
)} {item.title} - ), - )} + ); + })} ); } diff --git a/src/components/ui/checkbox.tsx b/src/components/ui/checkbox.tsx new file mode 100644 index 0000000..7b1b46d --- /dev/null +++ b/src/components/ui/checkbox.tsx @@ -0,0 +1,30 @@ +"use client" + +import * as React from "react" +import * as CheckboxPrimitive from "@radix-ui/react-checkbox" +import { CheckIcon } from "@radix-ui/react-icons" + +import { cn } from "~/utils/utils.ts" + +const Checkbox = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + + + + + +)) +Checkbox.displayName = CheckboxPrimitive.Root.displayName + +export { Checkbox } diff --git a/src/server/api/middleware/logger.ts b/src/server/api/middleware/logger.ts new file mode 100644 index 0000000..6b2b078 --- /dev/null +++ b/src/server/api/middleware/logger.ts @@ -0,0 +1,28 @@ +import { experimental_standaloneMiddleware } from "@trpc/server"; +import chalk from "chalk"; +import logger from "~/server/utils/logger"; + +const log = logger.child({ module: "trpc:server" }); +export const loggerMiddleware = experimental_standaloneMiddleware().create( + async ({ type, path, next }) => { + const result = await next(); + + if (result.ok === false) { + if (result.error.code === "INTERNAL_SERVER_ERROR") { + log.error( + `Internal server error on ${chalk.red(type)}: ${chalk.red(path)}`, + result.error, + ); + } else { + log.warn( + `${result.error.code} on ${chalk.yellow(type)}: ${chalk.yellow( + path, + )}`, + result.error, + ); + } + } + + return result; + }, +); diff --git a/src/server/api/middleware/project.ts b/src/server/api/middleware/project.ts index 6f2d3fa..4fbd010 100644 --- a/src/server/api/middleware/project.ts +++ b/src/server/api/middleware/project.ts @@ -3,6 +3,13 @@ import { eq, or } from "drizzle-orm"; import { type db } from "~/server/db"; import { projects } from "~/server/db/schema"; +export type BasicProjectDetails = { + id: string; + friendlyName: string; + internalName: string; + createdAt: number; +}; + export const projectMiddleware = experimental_standaloneMiddleware<{ ctx: { db: typeof db }; input: { projectId: string }; @@ -38,7 +45,7 @@ export const projectMiddleware = experimental_standaloneMiddleware<{ return next({ ctx: { - project: project, + project: project as BasicProjectDetails, }, }); }); diff --git a/src/server/api/middleware/service.ts b/src/server/api/middleware/service.ts new file mode 100644 index 0000000..be55b0e --- /dev/null +++ b/src/server/api/middleware/service.ts @@ -0,0 +1,53 @@ +import { TRPCError, experimental_standaloneMiddleware } from "@trpc/server"; +import { and, eq, or } from "drizzle-orm"; +import { type db } from "~/server/db"; +import { service } from "~/server/db/schema"; +import { type BasicProjectDetails } from "./project"; + +export const serviceMiddleware = experimental_standaloneMiddleware<{ + ctx: { db: typeof db; project: BasicProjectDetails }; + input: { serviceId: string }; +}>().create(async ({ ctx, input, next }) => { + if (typeof input.serviceId != "string") { + throw new TRPCError({ + code: "INTERNAL_SERVER_ERROR", + message: "Expected a service ID or internal name.", + }); + } + + if (typeof ctx.project?.id != "string") { + throw new TRPCError({ + code: "INTERNAL_SERVER_ERROR", + message: + "Expected a project ID. (maybe projectMiddleware is not being used?)", + }); + } + + const [serviceDetails] = await ctx.db + .select({ + id: service.id, + name: service.name, + createdAt: service.createdAt, + }) + .from(service) + .where( + and( + eq(service.projectId, ctx.project.id), + or(eq(service.name, input.serviceId), eq(service.id, input.serviceId)), + ), + ) + .limit(1); + + if (!serviceDetails) + throw new TRPCError({ + code: "NOT_FOUND", + message: + "Service not found or insufficient permissions: " + input.serviceId, + }); + + return next({ + ctx: { + service: serviceDetails, + }, + }); +}); diff --git a/src/server/api/routers/projects/deploy.ts b/src/server/api/routers/projects/deploy.ts index 05bec08..9328938 100644 --- a/src/server/api/routers/projects/deploy.ts +++ b/src/server/api/routers/projects/deploy.ts @@ -2,6 +2,7 @@ import { eq } from "drizzle-orm"; import { z } from "zod"; import { service } from "~/server/db/schema"; import { buildDockerStackFile } from "~/server/docker/stack"; +import logger from "~/server/utils/logger"; import { projectMiddleware } from "../../middleware/project"; import { authenticatedProcedure } from "../../trpc"; @@ -33,6 +34,8 @@ export const deployProject = authenticatedProcedure }); const dockerStackFile = await buildDockerStackFile(services); + logger.debug("deploying stack", { dockerStackFile }); + const response = await ctx.docker.cli( ["stack", "deploy", "--compose-file", "-", ctx.project.internalName], { diff --git a/src/server/api/routers/projects/service/containers.ts b/src/server/api/routers/projects/service/containers.ts new file mode 100644 index 0000000..fb73b70 --- /dev/null +++ b/src/server/api/routers/projects/service/containers.ts @@ -0,0 +1,162 @@ +import assert from "assert"; +import { z } from "zod"; +import { projectMiddleware } from "~/server/api/middleware/project"; +import { serviceMiddleware } from "~/server/api/middleware/service"; +import { authenticatedProcedure } from "~/server/api/trpc"; +import { type paths as DockerAPITypes } from "~/server/docker/types"; + +const getServiceContainersOutput = z.object({ + replication: z.object({ + running: z.number(), + desired: z.number(), + }), + + containers: z.array( + z.object({ + status: z + .string({ + description: + "Same as [].Status https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerList", + }) + .optional(), + state: z + .string({ + description: + "Same as [].State https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerList", + }) + .optional(), + taskState: z.enum([ + "complete", + "new", + "allocated", + "pending", + "assigned", + "accepted", + "preparing", + "ready", + "starting", + "running", + "shutdown", + "failed", + "rejected", + "remove", + "orphaned", + ]), + + containerId: z.string(), + containerCreatedAt: z.number(), + taskUpdatedAt: z.number(), + + error: z.string().optional(), + node: z.string().optional(), + }), + ), +}); + +export const getServiceContainers = authenticatedProcedure + .meta({ + openapi: { + method: "GET", + path: "/api/projects/:projectId/services/:serviceId/containers", + summary: "Get service containers", + }, + }) + .input( + z.object({ + projectId: z.string(), + serviceId: z.string(), + }), + ) + .output(getServiceContainersOutput) + // .output(z.unknown()) + .use(projectMiddleware) + .use(serviceMiddleware) + .query(async ({ ctx, input }) => { + // get docker service stats + const service = (await ctx.docker + .getService(`${ctx.project.internalName}_${ctx.service.name}`) + .inspect()) as DockerAPITypes["/services/{id}"]["get"]["responses"]["200"]["schema"]; + + assert(service.ID, "Unable to retrieve service ID."); + + // list all the containers related to this service + const containersPromise = ctx.docker.listContainers({ + all: true, + filters: { + label: [`com.docker.swarm.service.id=${service.ID}`], + }, + }); + + // and find the current task ID for this service + const tasksPromise = ctx.docker.listTasks({ + filters: { + service: [service.ID], + }, + }) as Promise< + DockerAPITypes["/tasks"]["get"]["responses"]["200"]["schema"] + >; + + // and list all nodes + const nodesPromise = ctx.docker.listNodes() as Promise< + DockerAPITypes["/nodes"]["get"]["responses"]["200"]["schema"] + >; + + const [containers, tasks, nodes] = await Promise.all([ + containersPromise, + tasksPromise, + nodesPromise, + ]); + + // format stats + const formatted = { + serviceId: service.ID, + + replication: { + running: service.Spec?.Mode?.Replicated?.Replicas ?? 0, + desired: service.Spec?.Mode?.Replicated?.Replicas ?? 0, + }, + + containers: tasks + .sort((a, b) => { + // order in descending order of creation + if (a.CreatedAt && b.CreatedAt) { + return ( + new Date(b.CreatedAt).getTime() - new Date(a.CreatedAt).getTime() + ); + } else { + return 0; + } + }) + .map((task) => { + // find the associated container + const container = containers.find( + (container) => + container.Id === task.Status?.ContainerStatus?.ContainerID, + ); + + const taskUpdatedAt = new Date(task.UpdatedAt ?? 0).getTime(); + const containerCreatedAt = new Date( + container?.Created ?? 0, + ).getTime(); + + return { + status: container?.Status, + state: container?.State, + taskState: task.Status?.State, + + containerId: task.Status?.ContainerStatus?.ContainerID ?? "", + containerCreatedAt, + taskUpdatedAt, + + error: task.Status?.Err, + node: nodes.find((node) => node.ID === task.NodeID)?.Description + ?.Hostname, + }; + }), + }; + + // return formatted; + + // I don't feel like writing a lot of assert's because for some reason all the types are `| undefined` and I don't know why + return getServiceContainersOutput.parse(formatted); + }); diff --git a/src/server/api/routers/projects/service/index.ts b/src/server/api/routers/projects/service/index.ts index b47cace..e909605 100644 --- a/src/server/api/routers/projects/service/index.ts +++ b/src/server/api/routers/projects/service/index.ts @@ -8,8 +8,11 @@ import { authenticatedProcedure, createTRPCRouter } from "~/server/api/trpc"; import { service } from "~/server/db/schema"; import { ServiceSource } from "~/server/db/types"; import { zDockerName } from "~/server/utils/zod"; +import { getServiceContainers } from "./containers"; export const serviceRouter = createTRPCRouter({ + containers: getServiceContainers, + create: authenticatedProcedure .meta({ openapi: { diff --git a/src/server/api/trpc.ts b/src/server/api/trpc.ts index 84da704..a18fc9e 100644 --- a/src/server/api/trpc.ts +++ b/src/server/api/trpc.ts @@ -17,6 +17,7 @@ import { Session } from "../auth/Session"; import { getDockerInstance } from "../docker"; import { type Docker } from "../docker/docker"; import logger from "../utils/logger"; +import { loggerMiddleware } from "./middleware/logger"; // import { OpenApiMeta, generateOpenApiDocument } from "trpc-openapi"; export type ExtendedRequest = IncomingMessage & { @@ -163,7 +164,7 @@ export const createTRPCRouter = t.router; * guarantee that a user querying is authorized, but you can still access user session data if they * are logged in. */ -export const publicProcedure = t.procedure; +export const publicProcedure = t.procedure.use(loggerMiddleware); /** * Authenticated procedure @@ -171,7 +172,7 @@ export const publicProcedure = t.procedure; * This is the base piece you use to build new queries and mutations on your tRPC API. It guarantees * that a user querying is authorized, and you can access user session data. */ -export const authenticatedProcedure = t.procedure.use( +export const authenticatedProcedure = t.procedure.use(loggerMiddleware).use( t.middleware(({ ctx, next }) => { if (!ctx.session) { throw new TRPCError({ diff --git a/src/server/docker/docker.ts b/src/server/docker/docker.ts index 1e2cab8..d95e92c 100644 --- a/src/server/docker/docker.ts +++ b/src/server/docker/docker.ts @@ -70,4 +70,11 @@ export class Docker extends Dockerode { }); }); } + + // /** + // * Lists all containers on all nodes. + // */ + // public async listContainersOnAllNodes() { + // this.listTasks + // } } diff --git a/src/server/docker/stack.ts b/src/server/docker/stack.ts index 9d861b5..5b30824 100644 --- a/src/server/docker/stack.ts +++ b/src/server/docker/stack.ts @@ -155,6 +155,21 @@ export async function buildDockerStackFile( return { version: "3.8", - services: swarmServices, + services: cleanObject(swarmServices), }; } + +/** + * Small utility function to clean out keys with null value from an object. + * Useful because sometimes docker will treat `null` as '', causing issues. + */ +export function cleanObject>(obj: T): T { + for (const key in obj) { + if (obj[key] === null) delete obj[key]; + if (typeof obj[key] === "object") + // @ts-expect-error - idk how to type this any better + obj[key] = cleanObject(obj[key] as Record) as unknown; + } + + return obj; +} diff --git a/src/server/docker/types.d.ts b/src/server/docker/types.d.ts new file mode 100644 index 0000000..b53e1a0 --- /dev/null +++ b/src/server/docker/types.d.ts @@ -0,0 +1,9794 @@ +/** + * This file was auto-generated by openapi-typescript. + * Do not make direct changes to the file. + */ + +export interface paths { + "/containers/json": { + /** + * Returns a list of containers. For details on the format, see the + * [inspect endpoint](#operation/ContainerInspect). + * + * Note that it uses a different, smaller representation of a container + * than inspecting a single container. For example, the list of linked + * containers is not propagated . + */ + get: operations["ContainerList"]; + }; + "/containers/create": { + post: operations["ContainerCreate"]; + }; + "/containers/{id}/json": { + /** Return low-level information about a container. */ + get: operations["ContainerInspect"]; + }; + "/containers/{id}/top": { + /** + * On Unix systems, this is done by running the `ps` command. This endpoint + * is not supported on Windows. + */ + get: operations["ContainerTop"]; + }; + "/containers/{id}/logs": { + /** + * Get `stdout` and `stderr` logs from a container. + * + * Note: This endpoint works only for containers with the `json-file` or + * `journald` logging driver. + */ + get: operations["ContainerLogs"]; + }; + "/containers/{id}/changes": { + /** + * Returns which files in a container's filesystem have been added, deleted, + * or modified. The `Kind` of modification can be one of: + * + * - `0`: Modified ("C") + * - `1`: Added ("A") + * - `2`: Deleted ("D") + */ + get: operations["ContainerChanges"]; + }; + "/containers/{id}/export": { + /** Export the contents of a container as a tarball. */ + get: operations["ContainerExport"]; + }; + "/containers/{id}/stats": { + /** + * This endpoint returns a live stream of a container’s resource usage + * statistics. + * + * The `precpu_stats` is the CPU statistic of the *previous* read, and is + * used to calculate the CPU usage percentage. It is not an exact copy + * of the `cpu_stats` field. + * + * If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is + * nil then for compatibility with older daemons the length of the + * corresponding `cpu_usage.percpu_usage` array should be used. + * + * On a cgroup v2 host, the following fields are not set + * * `blkio_stats`: all fields other than `io_service_bytes_recursive` + * * `cpu_stats`: `cpu_usage.percpu_usage` + * * `memory_stats`: `max_usage` and `failcnt` + * Also, `memory_stats.stats` fields are incompatible with cgroup v1. + * + * To calculate the values shown by the `stats` command of the docker cli tool + * the following formulas can be used: + * * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * * available_memory = `memory_stats.limit` + * * Memory usage % = `(used_memory / available_memory) * 100.0` + * * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` + * * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` + * * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` + * * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` + */ + get: operations["ContainerStats"]; + }; + "/containers/{id}/resize": { + /** Resize the TTY for a container. */ + post: operations["ContainerResize"]; + }; + "/containers/{id}/start": { + post: operations["ContainerStart"]; + }; + "/containers/{id}/stop": { + post: operations["ContainerStop"]; + }; + "/containers/{id}/restart": { + post: operations["ContainerRestart"]; + }; + "/containers/{id}/kill": { + /** + * Send a POSIX signal to a container, defaulting to killing to the + * container. + */ + post: operations["ContainerKill"]; + }; + "/containers/{id}/update": { + /** + * Change various configuration options of a container without having to + * recreate it. + */ + post: operations["ContainerUpdate"]; + }; + "/containers/{id}/rename": { + post: operations["ContainerRename"]; + }; + "/containers/{id}/pause": { + /** + * Use the freezer cgroup to suspend all processes in a container. + * + * Traditionally, when suspending a process the `SIGSTOP` signal is used, + * which is observable by the process being suspended. With the freezer + * cgroup the process is unaware, and unable to capture, that it is being + * suspended, and subsequently resumed. + */ + post: operations["ContainerPause"]; + }; + "/containers/{id}/unpause": { + /** Resume a container which has been paused. */ + post: operations["ContainerUnpause"]; + }; + "/containers/{id}/attach": { + /** + * Attach to a container to read its output or send it input. You can attach + * to the same container multiple times and you can reattach to containers + * that have been detached. + * + * Either the `stream` or `logs` parameter must be `true` for this endpoint + * to do anything. + * + * See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) + * for more details. + * + * ### Hijacking + * + * This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, + * and `stderr` on the same socket. + * + * This is the response from the daemon for an attach request: + * + * ``` + * HTTP/1.1 200 OK + * Content-Type: application/vnd.docker.raw-stream + * + * [STREAM] + * ``` + * + * After the headers and two new lines, the TCP connection can now be used + * for raw, bidirectional communication between the client and server. + * + * To hint potential proxies about connection hijacking, the Docker client + * can also optionally send connection upgrade headers. + * + * For example, the client sends this request to upgrade the connection: + * + * ``` + * POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + * Upgrade: tcp + * Connection: Upgrade + * ``` + * + * The Docker daemon will respond with a `101 UPGRADED` response, and will + * similarly follow with the raw stream: + * + * ``` + * HTTP/1.1 101 UPGRADED + * Content-Type: application/vnd.docker.raw-stream + * Connection: Upgrade + * Upgrade: tcp + * + * [STREAM] + * ``` + * + * ### Stream format + * + * When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), + * the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream + * and the stream over the hijacked connected is multiplexed to separate out + * `stdout` and `stderr`. The stream consists of a series of frames, each + * containing a header and a payload. + * + * The header contains the information which the stream writes (`stdout` or + * `stderr`). It also contains the size of the associated frame encoded in + * the last four bytes (`uint32`). + * + * It is encoded on the first eight bytes like this: + * + * ```go + * header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + * ``` + * + * `STREAM_TYPE` can be: + * + * - 0: `stdin` (is written on `stdout`) + * - 1: `stdout` + * - 2: `stderr` + * + * `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size + * encoded as big endian. + * + * Following the header is the payload, which is the specified number of + * bytes of `STREAM_TYPE`. + * + * The simplest way to implement this protocol is the following: + * + * 1. Read 8 bytes. + * 2. Choose `stdout` or `stderr` depending on the first byte. + * 3. Extract the frame size from the last four bytes. + * 4. Read the extracted size and output it on the correct output. + * 5. Goto 1. + * + * ### Stream format when using a TTY + * + * When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), + * the stream is not multiplexed. The data exchanged over the hijacked + * connection is simply the raw data from the process PTY and client's + * `stdin`. + */ + post: operations["ContainerAttach"]; + }; + "/containers/{id}/attach/ws": { + get: operations["ContainerAttachWebsocket"]; + }; + "/containers/{id}/wait": { + /** Block until a container stops, then returns the exit code. */ + post: operations["ContainerWait"]; + }; + "/containers/{id}": { + delete: operations["ContainerDelete"]; + }; + "/containers/{id}/archive": { + /** Get a tar archive of a resource in the filesystem of container id. */ + get: operations["ContainerArchive"]; + /** + * Upload a tar archive to be extracted to a path in the filesystem of container id. + * `path` parameter is asserted to be a directory. If it exists as a file, 400 error + * will be returned with message "not a directory". + */ + put: operations["PutContainerArchive"]; + /** + * A response header `X-Docker-Container-Path-Stat` is returned, containing + * a base64 - encoded JSON object with some filesystem header information + * about the path. + */ + head: operations["ContainerArchiveInfo"]; + }; + "/containers/prune": { + post: operations["ContainerPrune"]; + }; + "/images/json": { + /** Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image. */ + get: operations["ImageList"]; + }; + "/build": { + /** + * Build an image from a tar archive with a `Dockerfile` in it. + * + * The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + * + * The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + * + * The build is canceled if the client drops the connection by quitting or being killed. + */ + post: operations["ImageBuild"]; + }; + "/build/prune": { + post: operations["BuildPrune"]; + }; + "/images/create": { + /** Create an image by either pulling it from a registry or importing it. */ + post: operations["ImageCreate"]; + }; + "/images/{name}/json": { + /** Return low-level information about an image. */ + get: operations["ImageInspect"]; + }; + "/images/{name}/history": { + /** Return parent layers of an image. */ + get: operations["ImageHistory"]; + }; + "/images/{name}/push": { + /** + * Push an image to a registry. + * + * If you wish to push an image on to a private registry, that image must + * already have a tag which references the registry. For example, + * `registry.example.com/myimage:latest`. + * + * The push is cancelled if the HTTP connection is closed. + */ + post: operations["ImagePush"]; + }; + "/images/{name}/tag": { + /** Tag an image so that it becomes part of a repository. */ + post: operations["ImageTag"]; + }; + "/images/{name}": { + /** + * Remove an image, along with any untagged parent images that were + * referenced by that image. + * + * Images can't be removed if they have descendant images, are being + * used by a running container or are being used by a build. + */ + delete: operations["ImageDelete"]; + }; + "/images/search": { + /** Search for an image on Docker Hub. */ + get: operations["ImageSearch"]; + }; + "/images/prune": { + post: operations["ImagePrune"]; + }; + "/auth": { + /** + * Validate credentials for a registry and, if available, get an identity + * token for accessing the registry without password. + */ + post: operations["SystemAuth"]; + }; + "/info": { + get: operations["SystemInfo"]; + }; + "/version": { + /** Returns the version of Docker that is running and various information about the system that Docker is running on. */ + get: operations["SystemVersion"]; + }; + "/_ping": { + /** This is a dummy endpoint you can use to test if the server is accessible. */ + get: operations["SystemPing"]; + /** This is a dummy endpoint you can use to test if the server is accessible. */ + head: operations["SystemPingHead"]; + }; + "/commit": { + post: operations["ImageCommit"]; + }; + "/events": { + /** + * Stream real-time events from the server. + * + * Various objects within Docker report events when something happens to them. + * + * Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` + * + * Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` + * + * Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` + * + * Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` + * + * The Docker daemon reports these events: `reload` + * + * Services report these events: `create`, `update`, and `remove` + * + * Nodes report these events: `create`, `update`, and `remove` + * + * Secrets report these events: `create`, `update`, and `remove` + * + * Configs report these events: `create`, `update`, and `remove` + * + * The Builder reports `prune` events + */ + get: operations["SystemEvents"]; + }; + "/system/df": { + get: operations["SystemDataUsage"]; + }; + "/images/{name}/get": { + /** + * Get a tarball containing all images and metadata for a repository. + * + * If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + * + * ### Image tarball format + * + * An image tarball contains one directory per image layer (named using its long ID), each containing these files: + * + * - `VERSION`: currently `1.0` - the file format version + * - `json`: detailed layer information, similar to `docker inspect layer_id` + * - `layer.tar`: A tarfile containing the filesystem changes in this layer + * + * The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + * + * If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + * + * ```json + * { + * "hello-world": { + * "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + * } + * } + * ``` + */ + get: operations["ImageGet"]; + }; + "/images/get": { + /** + * Get a tarball containing all images and metadata for several image + * repositories. + * + * For each value of the `names` parameter: if it is a specific name and + * tag (e.g. `ubuntu:latest`), then only that image (and its parents) are + * returned; if it is an image ID, similarly only that image (and its parents) + * are returned and there would be no names referenced in the 'repositories' + * file for this image ID. + * + * For details on the format, see the [export image endpoint](#operation/ImageGet). + */ + get: operations["ImageGetAll"]; + }; + "/images/load": { + /** + * Load a set of images and tags into a repository. + * + * For details on the format, see the [export image endpoint](#operation/ImageGet). + */ + post: operations["ImageLoad"]; + }; + "/containers/{id}/exec": { + /** Run a command inside a running container. */ + post: operations["ContainerExec"]; + }; + "/exec/{id}/start": { + /** + * Starts a previously set up exec instance. If detach is true, this endpoint + * returns immediately after starting the command. Otherwise, it sets up an + * interactive session with the command. + */ + post: operations["ExecStart"]; + }; + "/exec/{id}/resize": { + /** + * Resize the TTY session used by an exec instance. This endpoint only works + * if `tty` was specified as part of creating and starting the exec instance. + */ + post: operations["ExecResize"]; + }; + "/exec/{id}/json": { + /** Return low-level information about an exec instance. */ + get: operations["ExecInspect"]; + }; + "/volumes": { + get: operations["VolumeList"]; + }; + "/volumes/create": { + post: operations["VolumeCreate"]; + }; + "/volumes/{name}": { + get: operations["VolumeInspect"]; + put: operations["VolumeUpdate"]; + /** Instruct the driver to remove the volume. */ + delete: operations["VolumeDelete"]; + }; + "/volumes/prune": { + post: operations["VolumePrune"]; + }; + "/networks": { + /** + * Returns a list of networks. For details on the format, see the + * [network inspect endpoint](#operation/NetworkInspect). + * + * Note that it uses a different, smaller representation of a network than + * inspecting a single network. For example, the list of containers attached + * to the network is not propagated in API versions 1.28 and up. + */ + get: operations["NetworkList"]; + }; + "/networks/{id}": { + get: operations["NetworkInspect"]; + delete: operations["NetworkDelete"]; + }; + "/networks/create": { + post: operations["NetworkCreate"]; + }; + "/networks/{id}/connect": { + post: operations["NetworkConnect"]; + }; + "/networks/{id}/disconnect": { + post: operations["NetworkDisconnect"]; + }; + "/networks/prune": { + post: operations["NetworkPrune"]; + }; + "/plugins": { + /** Returns information about installed plugins. */ + get: operations["PluginList"]; + }; + "/plugins/privileges": { + get: operations["GetPluginPrivileges"]; + }; + "/plugins/pull": { + /** + * Pulls and installs a plugin. After the plugin is installed, it can be + * enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + */ + post: operations["PluginPull"]; + }; + "/plugins/{name}/json": { + get: operations["PluginInspect"]; + }; + "/plugins/{name}": { + delete: operations["PluginDelete"]; + }; + "/plugins/{name}/enable": { + post: operations["PluginEnable"]; + }; + "/plugins/{name}/disable": { + post: operations["PluginDisable"]; + }; + "/plugins/{name}/upgrade": { + post: operations["PluginUpgrade"]; + }; + "/plugins/create": { + post: operations["PluginCreate"]; + }; + "/plugins/{name}/push": { + /** Push a plugin to the registry. */ + post: operations["PluginPush"]; + }; + "/plugins/{name}/set": { + post: operations["PluginSet"]; + }; + "/nodes": { + get: operations["NodeList"]; + }; + "/nodes/{id}": { + get: operations["NodeInspect"]; + delete: operations["NodeDelete"]; + }; + "/nodes/{id}/update": { + post: operations["NodeUpdate"]; + }; + "/swarm": { + get: operations["SwarmInspect"]; + }; + "/swarm/init": { + post: operations["SwarmInit"]; + }; + "/swarm/join": { + post: operations["SwarmJoin"]; + }; + "/swarm/leave": { + post: operations["SwarmLeave"]; + }; + "/swarm/update": { + post: operations["SwarmUpdate"]; + }; + "/swarm/unlockkey": { + get: operations["SwarmUnlockkey"]; + }; + "/swarm/unlock": { + post: operations["SwarmUnlock"]; + }; + "/services": { + get: operations["ServiceList"]; + }; + "/services/create": { + post: operations["ServiceCreate"]; + }; + "/services/{id}": { + get: operations["ServiceInspect"]; + delete: operations["ServiceDelete"]; + }; + "/services/{id}/update": { + post: operations["ServiceUpdate"]; + }; + "/services/{id}/logs": { + /** + * Get `stdout` and `stderr` logs from a service. See also + * [`/containers/{id}/logs`](#operation/ContainerLogs). + * + * **Note**: This endpoint works only for services with the `local`, + * `json-file` or `journald` logging drivers. + */ + get: operations["ServiceLogs"]; + }; + "/tasks": { + get: operations["TaskList"]; + }; + "/tasks/{id}": { + get: operations["TaskInspect"]; + }; + "/tasks/{id}/logs": { + /** + * Get `stdout` and `stderr` logs from a task. + * See also [`/containers/{id}/logs`](#operation/ContainerLogs). + * + * **Note**: This endpoint works only for services with the `local`, + * `json-file` or `journald` logging drivers. + */ + get: operations["TaskLogs"]; + }; + "/secrets": { + get: operations["SecretList"]; + }; + "/secrets/create": { + post: operations["SecretCreate"]; + }; + "/secrets/{id}": { + get: operations["SecretInspect"]; + delete: operations["SecretDelete"]; + }; + "/secrets/{id}/update": { + post: operations["SecretUpdate"]; + }; + "/configs": { + get: operations["ConfigList"]; + }; + "/configs/create": { + post: operations["ConfigCreate"]; + }; + "/configs/{id}": { + get: operations["ConfigInspect"]; + delete: operations["ConfigDelete"]; + }; + "/configs/{id}/update": { + post: operations["ConfigUpdate"]; + }; + "/distribution/{name}/json": { + /** Return image digest and platform information by contacting the registry. */ + get: operations["DistributionInspect"]; + }; + "/session": { + /** + * Start a new interactive session with a server. Session allows server to + * call back to the client for advanced capabilities. + * + * ### Hijacking + * + * This endpoint hijacks the HTTP connection to HTTP2 transport that allows + * the client to expose gPRC services on that connection. + * + * For example, the client sends this request to upgrade the connection: + * + * ``` + * POST /session HTTP/1.1 + * Upgrade: h2c + * Connection: Upgrade + * ``` + * + * The Docker daemon responds with a `101 UPGRADED` response follow with + * the raw stream: + * + * ``` + * HTTP/1.1 101 UPGRADED + * Connection: Upgrade + * Upgrade: h2c + * ``` + */ + post: operations["Session"]; + }; +} + +export interface definitions { + /** + * @description An open port on a container + * @example { + * "PrivatePort": 8080, + * "PublicPort": 80, + * "Type": "tcp" + * } + */ + Port: { + /** + * Format: ip-address + * @description Host IP address that the container's port is mapped to + */ + IP?: string; + /** + * Format: uint16 + * @description Port on the container + */ + PrivatePort: number; + /** + * Format: uint16 + * @description Port exposed on the host + */ + PublicPort?: number; + /** @enum {string} */ + Type: "tcp" | "udp" | "sctp"; + }; + /** + * @description MountPoint represents a mount point configuration inside the container. + * This is used for reporting the mountpoints in use by a container. + */ + MountPoint: { + /** + * @description The mount type: + * + * - `bind` a mount of a file or directory from the host into the container. + * - `volume` a docker volume with the given `Name`. + * - `tmpfs` a `tmpfs`. + * - `npipe` a named pipe from the host into the container. + * - `cluster` a Swarm cluster volume + * + * @example volume + * @enum {string} + */ + Type?: "bind" | "volume" | "tmpfs" | "npipe" | "cluster"; + /** + * @description Name is the name reference to the underlying data defined by `Source` + * e.g., the volume name. + * + * @example myvolume + */ + Name?: string; + /** + * @description Source location of the mount. + * + * For volumes, this contains the storage location of the volume (within + * `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + * the source (host) part of the bind-mount. For `tmpfs` mount points, this + * field is empty. + * + * @example /var/lib/docker/volumes/myvolume/_data + */ + Source?: string; + /** + * @description Destination is the path relative to the container root (`/`) where + * the `Source` is mounted inside the container. + * + * @example /usr/share/nginx/html/ + */ + Destination?: string; + /** + * @description Driver is the volume driver used to create the volume (if it is a volume). + * + * @example local + */ + Driver?: string; + /** + * @description Mode is a comma separated list of options supplied by the user when + * creating the bind/volume mount. + * + * The default is platform-specific (`"z"` on Linux, empty on Windows). + * + * @example z + */ + Mode?: string; + /** + * @description Whether the mount is mounted writable (read-write). + * + * @example true + */ + RW?: boolean; + /** + * @description Propagation describes how mounts are propagated from the host into the + * mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) + * for details. This field is not used on Windows. + * + * @example + */ + Propagation?: string; + }; + /** + * @description A device mapping between the host and container + * @example { + * "PathOnHost": "/dev/deviceName", + * "PathInContainer": "/dev/deviceName", + * "CgroupPermissions": "mrw" + * } + */ + DeviceMapping: { + PathOnHost?: string; + PathInContainer?: string; + CgroupPermissions?: string; + }; + /** @description A request for devices to be sent to device drivers */ + DeviceRequest: { + /** @example nvidia */ + Driver?: string; + /** @example -1 */ + Count?: number; + /** + * @example [ + * "0", + * "1", + * "GPU-fef8089b-4820-abfc-e83e-94318197576e" + * ] + */ + DeviceIDs?: string[]; + /** + * @description A list of capabilities; an OR list of AND lists of capabilities. + * + * @example [ + * [ + * "gpu", + * "nvidia", + * "compute" + * ] + * ] + */ + Capabilities?: string[][]; + /** + * @description Driver-specific options, specified as a key/value pairs. These options + * are passed directly to the driver. + */ + Options?: { [key: string]: string }; + }; + ThrottleDevice: { + /** @description Device path */ + Path?: string; + /** + * Format: int64 + * @description Rate + */ + Rate?: number; + }; + Mount: { + /** @description Container path. */ + Target?: string; + /** @description Mount source (e.g. a volume name, a host path). */ + Source?: string; + /** + * @description The mount type. Available types: + * + * - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + * - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + * - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. + * - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + * - `cluster` a Swarm cluster volume + * + * @enum {string} + */ + Type?: "bind" | "volume" | "tmpfs" | "npipe" | "cluster"; + /** @description Whether the mount should be read-only. */ + ReadOnly?: boolean; + /** @description The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`. */ + Consistency?: string; + /** @description Optional configuration for the `bind` type. */ + BindOptions?: { + /** + * @description A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`. + * @enum {string} + */ + Propagation?: + | "private" + | "rprivate" + | "shared" + | "rshared" + | "slave" + | "rslave"; + /** + * @description Disable recursive bind mount. + * @default false + */ + NonRecursive?: boolean; + /** + * @description Create mount point on host if missing + * @default false + */ + CreateMountpoint?: boolean; + }; + /** @description Optional configuration for the `volume` type. */ + VolumeOptions?: { + /** + * @description Populate volume with data from the target. + * @default false + */ + NoCopy?: boolean; + /** @description User-defined key/value metadata. */ + Labels?: { [key: string]: string }; + /** @description Map of driver specific options */ + DriverConfig?: { + /** @description Name of the driver to use to create the volume. */ + Name?: string; + /** @description key/value map of driver specific options. */ + Options?: { [key: string]: string }; + }; + }; + /** @description Optional configuration for the `tmpfs` type. */ + TmpfsOptions?: { + /** + * Format: int64 + * @description The size for the tmpfs mount in bytes. + */ + SizeBytes?: number; + /** @description The permission mode for the tmpfs mount in an integer. */ + Mode?: number; + }; + }; + /** + * @description The behavior to apply when the container exits. The default is not to + * restart. + * + * An ever increasing delay (double the previous delay, starting at 100ms) is + * added before each restart to prevent flooding the server. + */ + RestartPolicy: { + /** + * @description - Empty string means not to restart + * - `no` Do not automatically restart + * - `always` Always restart + * - `unless-stopped` Restart always except when the user has manually stopped the container + * - `on-failure` Restart only when the container exit code is non-zero + * + * @enum {string} + */ + Name?: "" | "no" | "always" | "unless-stopped" | "on-failure"; + /** @description If `on-failure` is used, the number of times to retry before giving up. */ + MaximumRetryCount?: number; + }; + /** @description A container's resources (cgroups config, ulimits, etc) */ + Resources: { + /** + * @description An integer value representing this container's relative CPU weight + * versus other containers. + */ + CpuShares?: number; + /** + * Format: int64 + * @description Memory limit in bytes. + * @default 0 + */ + Memory?: number; + /** + * @description Path to `cgroups` under which the container's `cgroup` is created. If + * the path is not absolute, the path is considered to be relative to the + * `cgroups` path of the init process. Cgroups are created if they do not + * already exist. + */ + CgroupParent?: string; + /** @description Block IO weight (relative weight). */ + BlkioWeight?: number; + /** + * @description Block IO weight (relative device weight) in the form: + * + * ``` + * [{"Path": "device_path", "Weight": weight}] + * ``` + */ + BlkioWeightDevice?: { + Path?: string; + Weight?: number; + }[]; + /** + * @description Limit read rate (bytes per second) from a device, in the form: + * + * ``` + * [{"Path": "device_path", "Rate": rate}] + * ``` + */ + BlkioDeviceReadBps?: definitions["ThrottleDevice"][]; + /** + * @description Limit write rate (bytes per second) to a device, in the form: + * + * ``` + * [{"Path": "device_path", "Rate": rate}] + * ``` + */ + BlkioDeviceWriteBps?: definitions["ThrottleDevice"][]; + /** + * @description Limit read rate (IO per second) from a device, in the form: + * + * ``` + * [{"Path": "device_path", "Rate": rate}] + * ``` + */ + BlkioDeviceReadIOps?: definitions["ThrottleDevice"][]; + /** + * @description Limit write rate (IO per second) to a device, in the form: + * + * ``` + * [{"Path": "device_path", "Rate": rate}] + * ``` + */ + BlkioDeviceWriteIOps?: definitions["ThrottleDevice"][]; + /** + * Format: int64 + * @description The length of a CPU period in microseconds. + */ + CpuPeriod?: number; + /** + * Format: int64 + * @description Microseconds of CPU time that the container can get in a CPU period. + */ + CpuQuota?: number; + /** + * Format: int64 + * @description The length of a CPU real-time period in microseconds. Set to 0 to + * allocate no time allocated to real-time tasks. + */ + CpuRealtimePeriod?: number; + /** + * Format: int64 + * @description The length of a CPU real-time runtime in microseconds. Set to 0 to + * allocate no time allocated to real-time tasks. + */ + CpuRealtimeRuntime?: number; + /** + * @description CPUs in which to allow execution (e.g., `0-3`, `0,1`). + * + * @example 0-3 + */ + CpusetCpus?: string; + /** + * @description Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only + * effective on NUMA systems. + */ + CpusetMems?: string; + /** @description A list of devices to add to the container. */ + Devices?: definitions["DeviceMapping"][]; + /** @description a list of cgroup rules to apply to the container */ + DeviceCgroupRules?: string[]; + /** @description A list of requests for devices to be sent to device drivers. */ + DeviceRequests?: definitions["DeviceRequest"][]; + /** + * Format: int64 + * @description Hard limit for kernel TCP buffer memory (in bytes). Depending on the + * OCI runtime in use, this option may be ignored. It is no longer supported + * by the default (runc) runtime. + * + * This field is omitted when empty. + */ + KernelMemoryTCP?: number; + /** + * Format: int64 + * @description Memory soft limit in bytes. + */ + MemoryReservation?: number; + /** + * Format: int64 + * @description Total memory limit (memory + swap). Set as `-1` to enable unlimited + * swap. + */ + MemorySwap?: number; + /** + * Format: int64 + * @description Tune a container's memory swappiness behavior. Accepts an integer + * between 0 and 100. + */ + MemorySwappiness?: number; + /** + * Format: int64 + * @description CPU quota in units of 10-9 CPUs. + */ + NanoCpus?: number; + /** @description Disable OOM Killer for the container. */ + OomKillDisable?: boolean; + /** + * @description Run an init inside the container that forwards signals and reaps + * processes. This field is omitted if empty, and the default (as + * configured on the daemon) is used. + */ + Init?: boolean; + /** + * Format: int64 + * @description Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` + * to not change. + */ + PidsLimit?: number; + /** + * @description A list of resource limits to set in the container. For example: + * + * ``` + * {"Name": "nofile", "Soft": 1024, "Hard": 2048} + * ``` + */ + Ulimits?: { + /** @description Name of ulimit */ + Name?: string; + /** @description Soft limit */ + Soft?: number; + /** @description Hard limit */ + Hard?: number; + }[]; + /** + * Format: int64 + * @description The number of usable CPUs (Windows only). + * + * On Windows Server containers, the processor resource controls are + * mutually exclusive. The order of precedence is `CPUCount` first, then + * `CPUShares`, and `CPUPercent` last. + */ + CpuCount?: number; + /** + * Format: int64 + * @description The usable percentage of the available CPUs (Windows only). + * + * On Windows Server containers, the processor resource controls are + * mutually exclusive. The order of precedence is `CPUCount` first, then + * `CPUShares`, and `CPUPercent` last. + */ + CpuPercent?: number; + /** + * Format: int64 + * @description Maximum IOps for the container system drive (Windows only) + */ + IOMaximumIOps?: number; + /** + * Format: int64 + * @description Maximum IO in bytes per second for the container system drive + * (Windows only). + */ + IOMaximumBandwidth?: number; + }; + /** @description An object describing a limit on resources which can be requested by a task. */ + Limit: { + /** + * Format: int64 + * @example 4000000000 + */ + NanoCPUs?: number; + /** + * Format: int64 + * @example 8272408576 + */ + MemoryBytes?: number; + /** + * Format: int64 + * @description Limits the maximum number of PIDs in the container. Set `0` for unlimited. + * + * @default 0 + * @example 100 + */ + Pids?: number; + }; + /** + * @description An object describing the resources which can be advertised by a node and + * requested by a task. + */ + ResourceObject: { + /** + * Format: int64 + * @example 4000000000 + */ + NanoCPUs?: number; + /** + * Format: int64 + * @example 8272408576 + */ + MemoryBytes?: number; + GenericResources?: definitions["GenericResources"]; + }; + /** + * @description User-defined resources can be either Integer resources (e.g, `SSD=3`) or + * String resources (e.g, `GPU=UUID1`). + * + * @example [ + * { + * "DiscreteResourceSpec": { + * "Kind": "SSD", + * "Value": 3 + * } + * }, + * { + * "NamedResourceSpec": { + * "Kind": "GPU", + * "Value": "UUID1" + * } + * }, + * { + * "NamedResourceSpec": { + * "Kind": "GPU", + * "Value": "UUID2" + * } + * } + * ] + */ + GenericResources: { + NamedResourceSpec?: { + Kind?: string; + Value?: string; + }; + DiscreteResourceSpec?: { + Kind?: string; + /** Format: int64 */ + Value?: number; + }; + }[]; + /** @description A test to perform to check that the container is healthy. */ + HealthConfig: { + /** + * @description The test to perform. Possible values are: + * + * - `[]` inherit healthcheck from image or parent image + * - `["NONE"]` disable healthcheck + * - `["CMD", args...]` exec arguments directly + * - `["CMD-SHELL", command]` run command with system's default shell + */ + Test?: string[]; + /** + * Format: int64 + * @description The time to wait between checks in nanoseconds. It should be 0 or at + * least 1000000 (1 ms). 0 means inherit. + */ + Interval?: number; + /** + * Format: int64 + * @description The time to wait before considering the check to have hung. It should + * be 0 or at least 1000000 (1 ms). 0 means inherit. + */ + Timeout?: number; + /** + * @description The number of consecutive failures needed to consider a container as + * unhealthy. 0 means inherit. + */ + Retries?: number; + /** + * Format: int64 + * @description Start period for the container to initialize before starting + * health-retries countdown in nanoseconds. It should be 0 or at least + * 1000000 (1 ms). 0 means inherit. + */ + StartPeriod?: number; + }; + /** @description Health stores information about the container's healthcheck results. */ + Health: { + /** + * @description Status is one of `none`, `starting`, `healthy` or `unhealthy` + * + * - "none" Indicates there is no healthcheck + * - "starting" Starting indicates that the container is not yet ready + * - "healthy" Healthy indicates that the container is running correctly + * - "unhealthy" Unhealthy indicates that the container has a problem + * + * @example healthy + * @enum {string} + */ + Status?: "none" | "starting" | "healthy" | "unhealthy"; + /** + * @description FailingStreak is the number of consecutive failures + * @example 0 + */ + FailingStreak?: number; + /** @description Log contains the last few results (oldest first) */ + Log?: definitions["HealthcheckResult"][]; + }; + /** @description HealthcheckResult stores information about a single run of a healthcheck probe */ + HealthcheckResult: { + /** + * Format: date-time + * @description Date and time at which this check started in + * [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + * + * @example 2020-01-04T10:44:24.496525531Z + */ + Start?: string; + /** + * Format: dateTime + * @description Date and time at which this check ended in + * [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + * + * @example 2020-01-04T10:45:21.364524523Z + */ + End?: string; + /** + * @description ExitCode meanings: + * + * - `0` healthy + * - `1` unhealthy + * - `2` reserved (considered unhealthy) + * - other values: error running probe + * + * @example 0 + */ + ExitCode?: number; + /** @description Output from last check */ + Output?: string; + }; + /** @description Container configuration that depends on the host we are running on */ + HostConfig: definitions["Resources"] & { + /** + * @description A list of volume bindings for this container. Each volume binding + * is a string in one of these forms: + * + * - `host-src:container-dest[:options]` to bind-mount a host path + * into the container. Both `host-src`, and `container-dest` must + * be an _absolute_ path. + * - `volume-name:container-dest[:options]` to bind-mount a volume + * managed by a volume driver into the container. `container-dest` + * must be an _absolute_ path. + * + * `options` is an optional, comma-delimited list of: + * + * - `nocopy` disables automatic copying of data from the container + * path to the volume. The `nocopy` flag only applies to named volumes. + * - `[ro|rw]` mounts a volume read-only or read-write, respectively. + * If omitted or set to `rw`, volumes are mounted read-write. + * - `[z|Z]` applies SELinux labels to allow or deny multiple containers + * to read and write to the same volume. + * - `z`: a _shared_ content label is applied to the content. This + * label indicates that multiple containers can share the volume + * content, for both reading and writing. + * - `Z`: a _private unshared_ label is applied to the content. + * This label indicates that only the current container can use + * a private volume. Labeling systems such as SELinux require + * proper labels to be placed on volume content that is mounted + * into a container. Without a label, the security system can + * prevent a container's processes from using the content. By + * default, the labels set by the host operating system are not + * modified. + * - `[[r]shared|[r]slave|[r]private]` specifies mount + * [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). + * This only applies to bind-mounted volumes, not internal volumes + * or named volumes. Mount propagation requires the source mount + * point (the location where the source directory is mounted in the + * host operating system) to have the correct propagation properties. + * For shared volumes, the source mount point must be set to `shared`. + * For slave volumes, the mount must be set to either `shared` or + * `slave`. + */ + Binds?: string[]; + /** @description Path to a file where the container ID is written */ + ContainerIDFile?: string; + /** @description The logging configuration for this container */ + LogConfig?: { + /** @enum {string} */ + Type?: + | "json-file" + | "syslog" + | "journald" + | "gelf" + | "fluentd" + | "awslogs" + | "splunk" + | "etwlogs" + | "none"; + Config?: { [key: string]: string }; + }; + /** + * @description Network mode to use for this container. Supported standard values + * are: `bridge`, `host`, `none`, and `container:`. Any + * other value is taken as a custom network's name to which this + * container should connect to. + */ + NetworkMode?: string; + PortBindings?: definitions["PortMap"]; + RestartPolicy?: definitions["RestartPolicy"]; + /** + * @description Automatically remove the container when the container's process + * exits. This has no effect if `RestartPolicy` is set. + */ + AutoRemove?: boolean; + /** @description Driver that this container uses to mount volumes. */ + VolumeDriver?: string; + /** + * @description A list of volumes to inherit from another container, specified in + * the form `[:]`. + */ + VolumesFrom?: string[]; + /** @description Specification for mounts to be added to the container. */ + Mounts?: definitions["Mount"][]; + /** @description Initial console size, as an `[height, width]` array. */ + ConsoleSize?: number[]; + /** + * @description Arbitrary non-identifying metadata attached to container and + * provided to the runtime when the container is started. + */ + Annotations?: { [key: string]: string }; + /** + * @description A list of kernel capabilities to add to the container. Conflicts + * with option 'Capabilities'. + */ + CapAdd?: string[]; + /** + * @description A list of kernel capabilities to drop from the container. Conflicts + * with option 'Capabilities'. + */ + CapDrop?: string[]; + /** + * @description cgroup namespace mode for the container. Possible values are: + * + * - `"private"`: the container runs in its own private cgroup namespace + * - `"host"`: use the host system's cgroup namespace + * + * If not specified, the daemon default is used, which can either be `"private"` + * or `"host"`, depending on daemon version, kernel support and configuration. + * + * @enum {string} + */ + CgroupnsMode?: "private" | "host"; + /** @description A list of DNS servers for the container to use. */ + Dns?: string[]; + /** @description A list of DNS options. */ + DnsOptions?: string[]; + /** @description A list of DNS search domains. */ + DnsSearch?: string[]; + /** + * @description A list of hostnames/IP mappings to add to the container's `/etc/hosts` + * file. Specified in the form `["hostname:IP"]`. + */ + ExtraHosts?: string[]; + /** @description A list of additional groups that the container process will run as. */ + GroupAdd?: string[]; + /** + * @description IPC sharing mode for the container. Possible values are: + * + * - `"none"`: own private IPC namespace, with /dev/shm not mounted + * - `"private"`: own private IPC namespace + * - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers + * - `"container:"`: join another (shareable) container's IPC namespace + * - `"host"`: use the host system's IPC namespace + * + * If not specified, daemon default is used, which can either be `"private"` + * or `"shareable"`, depending on daemon version and configuration. + */ + IpcMode?: string; + /** @description Cgroup to use for the container. */ + Cgroup?: string; + /** @description A list of links for the container in the form `container_name:alias`. */ + Links?: string[]; + /** + * @description An integer value containing the score given to the container in + * order to tune OOM killer preferences. + * + * @example 500 + */ + OomScoreAdj?: number; + /** + * @description Set the PID (Process) Namespace mode for the container. It can be + * either: + * + * - `"container:"`: joins another container's PID namespace + * - `"host"`: use the host's PID namespace inside the container + */ + PidMode?: string; + /** @description Gives the container full access to the host. */ + Privileged?: boolean; + /** + * @description Allocates an ephemeral host port for all of a container's + * exposed ports. + * + * Ports are de-allocated when the container stops and allocated when + * the container starts. The allocated port might be changed when + * restarting the container. + * + * The port is selected from the ephemeral port range that depends on + * the kernel. For example, on Linux the range is defined by + * `/proc/sys/net/ipv4/ip_local_port_range`. + */ + PublishAllPorts?: boolean; + /** @description Mount the container's root filesystem as read only. */ + ReadonlyRootfs?: boolean; + /** + * @description A list of string values to customize labels for MLS systems, such + * as SELinux. + */ + SecurityOpt?: string[]; + /** @description Storage driver options for this container, in the form `{"size": "120G"}`. */ + StorageOpt?: { [key: string]: string }; + /** + * @description A map of container directories which should be replaced by tmpfs + * mounts, and their corresponding mount options. For example: + * + * ``` + * { "/run": "rw,noexec,nosuid,size=65536k" } + * ``` + */ + Tmpfs?: { [key: string]: string }; + /** @description UTS namespace to use for the container. */ + UTSMode?: string; + /** + * @description Sets the usernamespace mode for the container when usernamespace + * remapping option is enabled. + */ + UsernsMode?: string; + /** + * Format: int64 + * @description Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. + */ + ShmSize?: number; + /** + * @description A list of kernel parameters (sysctls) to set in the container. + * For example: + * + * ``` + * {"net.ipv4.ip_forward": "1"} + * ``` + */ + Sysctls?: { [key: string]: string }; + /** @description Runtime to use with this container. */ + Runtime?: string; + /** + * @description Isolation technology of the container. (Windows only) + * + * @enum {string} + */ + Isolation?: "default" | "process" | "hyperv"; + /** + * @description The list of paths to be masked inside the container (this overrides + * the default set of paths). + */ + MaskedPaths?: string[]; + /** + * @description The list of paths to be set as read-only inside the container + * (this overrides the default set of paths). + */ + ReadonlyPaths?: string[]; + }; + /** + * @description Configuration for a container that is portable between hosts. + * + * When used as `ContainerConfig` field in an image, `ContainerConfig` is an + * optional field containing the configuration of the container that was last + * committed when creating the image. + * + * Previous versions of Docker builder used this field to store build cache, + * and it is not in active use anymore. + */ + ContainerConfig: { + /** + * @description The hostname to use for the container, as a valid RFC 1123 hostname. + * + * @example 439f4e91bd1d + */ + Hostname?: string; + /** @description The domain name to use for the container. */ + Domainname?: string; + /** @description The user that commands are run as inside the container. */ + User?: string; + /** + * @description Whether to attach to `stdin`. + * @default false + */ + AttachStdin?: boolean; + /** + * @description Whether to attach to `stdout`. + * @default true + */ + AttachStdout?: boolean; + /** + * @description Whether to attach to `stderr`. + * @default true + */ + AttachStderr?: boolean; + /** + * @description An object mapping ports to an empty object in the form: + * + * `{"/": {}}` + * + * @example { + * "80/tcp": {}, + * "443/tcp": {} + * } + */ + ExposedPorts?: { [key: string]: {} }; + /** + * @description Attach standard streams to a TTY, including `stdin` if it is not closed. + * + * @default false + */ + Tty?: boolean; + /** + * @description Open `stdin` + * @default false + */ + OpenStdin?: boolean; + /** + * @description Close `stdin` after one attached client disconnects + * @default false + */ + StdinOnce?: boolean; + /** + * @description A list of environment variables to set inside the container in the + * form `["VAR=value", ...]`. A variable without `=` is removed from the + * environment, rather than to have an empty value. + * + * @example [ + * "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + * ] + */ + Env?: string[]; + /** + * @description Command to run specified as a string or an array of strings. + * + * @example [ + * "/bin/sh" + * ] + */ + Cmd?: string[]; + Healthcheck?: definitions["HealthConfig"]; + /** + * @description Command is already escaped (Windows only) + * @default false + * @example false + */ + ArgsEscaped?: boolean; + /** + * @description The name (or reference) of the image to use when creating the container, + * or which was used when the container was created. + * + * @example example-image:1.0 + */ + Image?: string; + /** + * @description An object mapping mount point paths inside the container to empty + * objects. + */ + Volumes?: { [key: string]: {} }; + /** + * @description The working directory for commands to run in. + * @example /public/ + */ + WorkingDir?: string; + /** + * @description The entry point for the container as a string or an array of strings. + * + * If the array consists of exactly one empty string (`[""]`) then the + * entry point is reset to system default (i.e., the entry point used by + * docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + * + * @example [] + */ + Entrypoint?: string[]; + /** @description Disable networking for the container. */ + NetworkDisabled?: boolean; + /** @description MAC address of the container. */ + MacAddress?: string; + /** + * @description `ONBUILD` metadata that were defined in the image's `Dockerfile`. + * + * @example [] + */ + OnBuild?: string[]; + /** + * @description User-defined key/value metadata. + * @example { + * "com.example.some-label": "some-value", + * "com.example.some-other-label": "some-other-value" + * } + */ + Labels?: { [key: string]: string }; + /** + * @description Signal to stop a container as a string or unsigned integer. + * + * @example SIGTERM + */ + StopSignal?: string; + /** + * @description Timeout to stop a container in seconds. + * @default 10 + */ + StopTimeout?: number; + /** + * @description Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + * + * @example [ + * "/bin/sh", + * "-c" + * ] + */ + Shell?: string[]; + }; + /** + * @description NetworkingConfig represents the container's networking configuration for + * each of its interfaces. + * It is used for the networking configs specified in the `docker create` + * and `docker network connect` commands. + * + * @example { + * "EndpointsConfig": { + * "isolated_nw": { + * "IPAMConfig": { + * "IPv4Address": "172.20.30.33", + * "IPv6Address": "2001:db8:abcd::3033", + * "LinkLocalIPs": [ + * "169.254.34.68", + * "fe80::3468" + * ] + * }, + * "Links": [ + * "container_1", + * "container_2" + * ], + * "Aliases": [ + * "server_x", + * "server_y" + * ] + * } + * } + * } + */ + NetworkingConfig: { + /** @description A mapping of network name to endpoint configuration for that network. */ + EndpointsConfig?: { [key: string]: definitions["EndpointSettings"] }; + }; + /** @description NetworkSettings exposes the network settings in the API */ + NetworkSettings: { + /** + * @description Name of the network's bridge (for example, `docker0`). + * @example docker0 + */ + Bridge?: string; + /** + * @description SandboxID uniquely represents a container's network stack. + * @example 9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3 + */ + SandboxID?: string; + /** + * @description Indicates if hairpin NAT should be enabled on the virtual interface. + * + * @example false + */ + HairpinMode?: boolean; + /** + * @description IPv6 unicast address using the link-local prefix. + * @example fe80::42:acff:fe11:1 + */ + LinkLocalIPv6Address?: string; + /** + * @description Prefix length of the IPv6 unicast address. + * @example 64 + */ + LinkLocalIPv6PrefixLen?: number; + Ports?: definitions["PortMap"]; + /** + * @description SandboxKey identifies the sandbox + * @example /var/run/docker/netns/8ab54b426c38 + */ + SandboxKey?: string; + SecondaryIPAddresses?: definitions["Address"][]; + SecondaryIPv6Addresses?: definitions["Address"][]; + /** + * @description EndpointID uniquely represents a service endpoint in a Sandbox. + * + *


+ * + * > **Deprecated**: This field is only propagated when attached to the + * > default "bridge" network. Use the information from the "bridge" + * > network inside the `Networks` map instead, which contains the same + * > information. This field was deprecated in Docker 1.9 and is scheduled + * > to be removed in Docker 17.12.0 + * + * @example b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b + */ + EndpointID?: string; + /** + * @description Gateway address for the default "bridge" network. + * + *


+ * + * > **Deprecated**: This field is only propagated when attached to the + * > default "bridge" network. Use the information from the "bridge" + * > network inside the `Networks` map instead, which contains the same + * > information. This field was deprecated in Docker 1.9 and is scheduled + * > to be removed in Docker 17.12.0 + * + * @example 172.17.0.1 + */ + Gateway?: string; + /** + * @description Global IPv6 address for the default "bridge" network. + * + *


+ * + * > **Deprecated**: This field is only propagated when attached to the + * > default "bridge" network. Use the information from the "bridge" + * > network inside the `Networks` map instead, which contains the same + * > information. This field was deprecated in Docker 1.9 and is scheduled + * > to be removed in Docker 17.12.0 + * + * @example 2001:db8::5689 + */ + GlobalIPv6Address?: string; + /** + * @description Mask length of the global IPv6 address. + * + *


+ * + * > **Deprecated**: This field is only propagated when attached to the + * > default "bridge" network. Use the information from the "bridge" + * > network inside the `Networks` map instead, which contains the same + * > information. This field was deprecated in Docker 1.9 and is scheduled + * > to be removed in Docker 17.12.0 + * + * @example 64 + */ + GlobalIPv6PrefixLen?: number; + /** + * @description IPv4 address for the default "bridge" network. + * + *


+ * + * > **Deprecated**: This field is only propagated when attached to the + * > default "bridge" network. Use the information from the "bridge" + * > network inside the `Networks` map instead, which contains the same + * > information. This field was deprecated in Docker 1.9 and is scheduled + * > to be removed in Docker 17.12.0 + * + * @example 172.17.0.4 + */ + IPAddress?: string; + /** + * @description Mask length of the IPv4 address. + * + *


+ * + * > **Deprecated**: This field is only propagated when attached to the + * > default "bridge" network. Use the information from the "bridge" + * > network inside the `Networks` map instead, which contains the same + * > information. This field was deprecated in Docker 1.9 and is scheduled + * > to be removed in Docker 17.12.0 + * + * @example 16 + */ + IPPrefixLen?: number; + /** + * @description IPv6 gateway address for this network. + * + *


+ * + * > **Deprecated**: This field is only propagated when attached to the + * > default "bridge" network. Use the information from the "bridge" + * > network inside the `Networks` map instead, which contains the same + * > information. This field was deprecated in Docker 1.9 and is scheduled + * > to be removed in Docker 17.12.0 + * + * @example 2001:db8:2::100 + */ + IPv6Gateway?: string; + /** + * @description MAC address for the container on the default "bridge" network. + * + *


+ * + * > **Deprecated**: This field is only propagated when attached to the + * > default "bridge" network. Use the information from the "bridge" + * > network inside the `Networks` map instead, which contains the same + * > information. This field was deprecated in Docker 1.9 and is scheduled + * > to be removed in Docker 17.12.0 + * + * @example 02:42:ac:11:00:04 + */ + MacAddress?: string; + /** @description Information about all networks that the container is connected to. */ + Networks?: { [key: string]: definitions["EndpointSettings"] }; + }; + /** @description Address represents an IPv4 or IPv6 IP address. */ + Address: { + /** @description IP address. */ + Addr?: string; + /** @description Mask length of the IP address. */ + PrefixLen?: number; + }; + /** + * @description PortMap describes the mapping of container ports to host ports, using the + * container's port-number and protocol as key in the format `/`, + * for example, `80/udp`. + * + * If a container's port is mapped for multiple protocols, separate entries + * are added to the mapping table. + * + * @example { + * "443/tcp": [ + * { + * "HostIp": "127.0.0.1", + * "HostPort": "4443" + * } + * ], + * "80/tcp": [ + * { + * "HostIp": "0.0.0.0", + * "HostPort": "80" + * }, + * { + * "HostIp": "0.0.0.0", + * "HostPort": "8080" + * } + * ], + * "80/udp": [ + * { + * "HostIp": "0.0.0.0", + * "HostPort": "80" + * } + * ], + * "53/udp": [ + * { + * "HostIp": "0.0.0.0", + * "HostPort": "53" + * } + * ], + * "2377/tcp": null + * } + */ + PortMap: { [key: string]: definitions["PortBinding"][] }; + /** + * @description PortBinding represents a binding between a host IP address and a host + * port. + */ + PortBinding: { + /** + * @description Host IP address that the container's port is mapped to. + * @example 127.0.0.1 + */ + HostIp?: string; + /** + * @description Host port number that the container's port is mapped to. + * @example 4443 + */ + HostPort?: string; + }; + /** + * @description Information about the storage driver used to store the container's and + * image's filesystem. + */ + GraphDriverData: { + /** + * @description Name of the storage driver. + * @example overlay2 + */ + Name: string; + /** + * @description Low-level storage metadata, provided as key/value pairs. + * + * This information is driver-specific, and depends on the storage-driver + * in use, and should be used for informational purposes only. + * + * @example { + * "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", + * "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", + * "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work" + * } + */ + Data: { [key: string]: string }; + }; + /** @description Change in the container's filesystem. */ + FilesystemChange: { + /** @description Path to file or directory that has changed. */ + Path: string; + Kind: definitions["ChangeType"]; + }; + /** + * Format: uint8 + * @description Kind of change + * + * Can be one of: + * + * - `0`: Modified ("C") + * - `1`: Added ("A") + * - `2`: Deleted ("D") + * + * @enum {integer} + */ + ChangeType: 0 | 1 | 2; + /** @description Information about an image in the local image cache. */ + ImageInspect: { + /** + * @description ID is the content-addressable ID of an image. + * + * This identifier is a content-addressable digest calculated from the + * image's configuration (which includes the digests of layers used by + * the image). + * + * Note that this digest differs from the `RepoDigests` below, which + * holds digests of image manifests that reference the image. + * + * @example sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710 + */ + Id?: string; + /** + * @description List of image names/tags in the local image cache that reference this + * image. + * + * Multiple image tags can refer to the same image, and this list may be + * empty if no tags reference the image, in which case the image is + * "untagged", in which case it can still be referenced by its ID. + * + * @example [ + * "example:1.0", + * "example:latest", + * "example:stable", + * "internal.registry.example.com:5000/example:1.0" + * ] + */ + RepoTags?: string[]; + /** + * @description List of content-addressable digests of locally available image manifests + * that the image is referenced from. Multiple manifests can refer to the + * same image. + * + * These digests are usually only available if the image was either pulled + * from a registry, or if the image was pushed to a registry, which is when + * the manifest is generated and its digest calculated. + * + * @example [ + * "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb", + * "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + * ] + */ + RepoDigests?: string[]; + /** + * @description ID of the parent image. + * + * Depending on how the image was created, this field may be empty and + * is only set for images that were built/created locally. This field + * is empty if the image was pulled from an image registry. + * + * @example + */ + Parent?: string; + /** + * @description Optional message that was set when committing or importing the image. + * + * @example + */ + Comment?: string; + /** + * @description Date and time at which the image was created, formatted in + * [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + * + * @example 2022-02-04T21:20:12.497794809Z + */ + Created?: string; + /** + * @description The ID of the container that was used to create the image. + * + * Depending on how the image was created, this field may be empty. + * + * @example 65974bc86f1770ae4bff79f651ebdbce166ae9aada632ee3fa9af3a264911735 + */ + Container?: string; + ContainerConfig?: definitions["ContainerConfig"]; + /** + * @description The version of Docker that was used to build the image. + * + * Depending on how the image was created, this field may be empty. + * + * @example 20.10.7 + */ + DockerVersion?: string; + /** + * @description Name of the author that was specified when committing the image, or as + * specified through MAINTAINER (deprecated) in the Dockerfile. + * + * @example + */ + Author?: string; + Config?: definitions["ContainerConfig"]; + /** + * @description Hardware CPU architecture that the image runs on. + * + * @example arm + */ + Architecture?: string; + /** + * @description CPU architecture variant (presently ARM-only). + * + * @example v7 + */ + Variant?: string; + /** + * @description Operating System the image is built to run on. + * + * @example linux + */ + Os?: string; + /** + * @description Operating System version the image is built to run on (especially + * for Windows). + * + * @example + */ + OsVersion?: string; + /** + * Format: int64 + * @description Total size of the image including all layers it is composed of. + * + * @example 1239828 + */ + Size?: number; + /** + * Format: int64 + * @description Total size of the image including all layers it is composed of. + * + * In versions of Docker before v1.10, this field was calculated from + * the image itself and all of its parent images. Images are now stored + * self-contained, and no longer use a parent-chain, making this field + * an equivalent of the Size field. + * + * > **Deprecated**: this field is kept for backward compatibility, but + * > will be removed in API v1.44. + * + * @example 1239828 + */ + VirtualSize?: number; + GraphDriver?: definitions["GraphDriverData"]; + /** @description Information about the image's RootFS, including the layer IDs. */ + RootFS?: { + /** @example layers */ + Type: string; + /** + * @example [ + * "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6", + * "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + * ] + */ + Layers?: string[]; + }; + /** + * @description Additional metadata of the image in the local cache. This information + * is local to the daemon, and not part of the image itself. + */ + Metadata?: { + /** + * Format: dateTime + * @description Date and time at which the image was last tagged in + * [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + * + * This information is only available if the image was tagged locally, + * and omitted otherwise. + * + * @example 2022-02-28T14:40:02.623929178Z + */ + LastTagTime?: string; + }; + }; + ImageSummary: { + /** + * @description ID is the content-addressable ID of an image. + * + * This identifier is a content-addressable digest calculated from the + * image's configuration (which includes the digests of layers used by + * the image). + * + * Note that this digest differs from the `RepoDigests` below, which + * holds digests of image manifests that reference the image. + * + * @example sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710 + */ + Id: string; + /** + * @description ID of the parent image. + * + * Depending on how the image was created, this field may be empty and + * is only set for images that were built/created locally. This field + * is empty if the image was pulled from an image registry. + * + * @example + */ + ParentId: string; + /** + * @description List of image names/tags in the local image cache that reference this + * image. + * + * Multiple image tags can refer to the same image, and this list may be + * empty if no tags reference the image, in which case the image is + * "untagged", in which case it can still be referenced by its ID. + * + * @example [ + * "example:1.0", + * "example:latest", + * "example:stable", + * "internal.registry.example.com:5000/example:1.0" + * ] + */ + RepoTags: string[]; + /** + * @description List of content-addressable digests of locally available image manifests + * that the image is referenced from. Multiple manifests can refer to the + * same image. + * + * These digests are usually only available if the image was either pulled + * from a registry, or if the image was pushed to a registry, which is when + * the manifest is generated and its digest calculated. + * + * @example [ + * "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb", + * "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + * ] + */ + RepoDigests: string[]; + /** + * @description Date and time at which the image was created as a Unix timestamp + * (number of seconds sinds EPOCH). + * + * @example 1644009612 + */ + Created: number; + /** + * Format: int64 + * @description Total size of the image including all layers it is composed of. + * + * @example 172064416 + */ + Size: number; + /** + * Format: int64 + * @description Total size of image layers that are shared between this image and other + * images. + * + * This size is not calculated by default. `-1` indicates that the value + * has not been set / calculated. + * + * @example 1239828 + */ + SharedSize: number; + /** + * Format: int64 + * @description Total size of the image including all layers it is composed of. + * + * In versions of Docker before v1.10, this field was calculated from + * the image itself and all of its parent images. Images are now stored + * self-contained, and no longer use a parent-chain, making this field + * an equivalent of the Size field. + * + * Deprecated: this field is kept for backward compatibility, and will be removed in API v1.44. + * @example 172064416 + */ + VirtualSize?: number; + /** + * @description User-defined key/value metadata. + * @example { + * "com.example.some-label": "some-value", + * "com.example.some-other-label": "some-other-value" + * } + */ + Labels: { [key: string]: string }; + /** + * @description Number of containers using this image. Includes both stopped and running + * containers. + * + * This size is not calculated by default, and depends on which API endpoint + * is used. `-1` indicates that the value has not been set / calculated. + * + * @example 2 + */ + Containers: number; + }; + /** + * @example { + * "username": "hannibal", + * "password": "xxxx", + * "serveraddress": "https://index.docker.io/v1/" + * } + */ + AuthConfig: { + username?: string; + password?: string; + email?: string; + serveraddress?: string; + }; + ProcessConfig: { + privileged?: boolean; + user?: string; + tty?: boolean; + entrypoint?: string; + arguments?: string[]; + }; + Volume: { + /** + * @description Name of the volume. + * @example tardis + */ + Name: string; + /** + * @description Name of the volume driver used by the volume. + * @example custom + */ + Driver: string; + /** + * @description Mount path of the volume on the host. + * @example /var/lib/docker/volumes/tardis + */ + Mountpoint: string; + /** + * Format: dateTime + * @description Date/Time the volume was created. + * @example 2016-06-07T20:31:11.853781916Z + */ + CreatedAt?: string; + /** + * @description Low-level details about the volume, provided by the volume driver. + * Details are returned as a map with key/value pairs: + * `{"key":"value","key2":"value2"}`. + * + * The `Status` field is optional, and is omitted if the volume driver + * does not support this feature. + * + * @example { + * "hello": "world" + * } + */ + Status?: { [key: string]: { [key: string]: unknown } }; + /** + * @description User-defined key/value metadata. + * @example { + * "com.example.some-label": "some-value", + * "com.example.some-other-label": "some-other-value" + * } + */ + Labels: { [key: string]: string }; + /** + * @description The level at which the volume exists. Either `global` for cluster-wide, + * or `local` for machine level. + * + * @default local + * @example local + * @enum {string} + */ + Scope: "local" | "global"; + ClusterVolume?: definitions["ClusterVolume"]; + /** + * @description The driver specific options used when creating the volume. + * + * @example { + * "device": "tmpfs", + * "o": "size=100m,uid=1000", + * "type": "tmpfs" + * } + */ + Options: { [key: string]: string }; + /** + * @description Usage details about the volume. This information is used by the + * `GET /system/df` endpoint, and omitted in other endpoints. + */ + UsageData?: { + /** + * Format: int64 + * @description Amount of disk space used by the volume (in bytes). This information + * is only available for volumes created with the `"local"` volume + * driver. For volumes created with other volume drivers, this field + * is set to `-1` ("not available") + * + * @default -1 + */ + Size: number; + /** + * Format: int64 + * @description The number of containers referencing this volume. This field + * is set to `-1` if the reference-count is not available. + * + * @default -1 + */ + RefCount: number; + }; + }; + /** + * VolumeConfig + * @description Volume configuration + */ + VolumeCreateOptions: { + /** + * @description The new volume's name. If not specified, Docker generates a name. + * + * @example tardis + */ + Name?: string; + /** + * @description Name of the volume driver to use. + * @default local + * @example custom + */ + Driver?: string; + /** + * @description A mapping of driver options and values. These options are + * passed directly to the driver and are driver specific. + * + * @example { + * "device": "tmpfs", + * "o": "size=100m,uid=1000", + * "type": "tmpfs" + * } + */ + DriverOpts?: { [key: string]: string }; + /** + * @description User-defined key/value metadata. + * @example { + * "com.example.some-label": "some-value", + * "com.example.some-other-label": "some-other-value" + * } + */ + Labels?: { [key: string]: string }; + ClusterVolumeSpec?: definitions["ClusterVolumeSpec"]; + }; + /** + * VolumeListResponse + * @description Volume list response + */ + VolumeListResponse: { + /** @description List of volumes */ + Volumes?: definitions["Volume"][]; + /** + * @description Warnings that occurred when fetching the list of volumes. + * + * @example [] + */ + Warnings?: string[]; + }; + /** + * @example { + * "Name": "net01", + * "Id": "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99", + * "Created": "2016-10-19T04:33:30.360899459Z", + * "Scope": "local", + * "Driver": "bridge", + * "EnableIPv6": false, + * "IPAM": { + * "Driver": "default", + * "Config": [ + * { + * "Subnet": "172.19.0.0/16", + * "Gateway": "172.19.0.1" + * } + * ], + * "Options": { + * "foo": "bar" + * } + * }, + * "Internal": false, + * "Attachable": false, + * "Ingress": false, + * "Containers": { + * "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": { + * "Name": "test", + * "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a", + * "MacAddress": "02:42:ac:13:00:02", + * "IPv4Address": "172.19.0.2/16", + * "IPv6Address": "" + * } + * }, + * "Options": { + * "com.docker.network.bridge.default_bridge": "true", + * "com.docker.network.bridge.enable_icc": "true", + * "com.docker.network.bridge.enable_ip_masquerade": "true", + * "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + * "com.docker.network.bridge.name": "docker0", + * "com.docker.network.driver.mtu": "1500" + * }, + * "Labels": { + * "com.example.some-label": "some-value", + * "com.example.some-other-label": "some-other-value" + * } + * } + */ + Network: { + Name?: string; + Id?: string; + /** Format: dateTime */ + Created?: string; + Scope?: string; + Driver?: string; + EnableIPv6?: boolean; + IPAM?: definitions["IPAM"]; + Internal?: boolean; + Attachable?: boolean; + Ingress?: boolean; + Containers?: { [key: string]: definitions["NetworkContainer"] }; + Options?: { [key: string]: string }; + Labels?: { [key: string]: string }; + }; + IPAM: { + /** + * @description Name of the IPAM driver to use. + * @default default + */ + Driver?: string; + /** + * @description List of IPAM configuration options, specified as a map: + * + * ``` + * {"Subnet": , "IPRange": , "Gateway": , "AuxAddress": } + * ``` + */ + Config?: definitions["IPAMConfig"][]; + /** @description Driver-specific options, specified as a map. */ + Options?: { [key: string]: string }; + }; + IPAMConfig: { + Subnet?: string; + IPRange?: string; + Gateway?: string; + AuxiliaryAddresses?: { [key: string]: string }; + }; + NetworkContainer: { + Name?: string; + EndpointID?: string; + MacAddress?: string; + IPv4Address?: string; + IPv6Address?: string; + }; + BuildInfo: { + id?: string; + stream?: string; + error?: string; + errorDetail?: definitions["ErrorDetail"]; + status?: string; + progress?: string; + progressDetail?: definitions["ProgressDetail"]; + aux?: definitions["ImageID"]; + }; + /** @description BuildCache contains information about a build cache record. */ + BuildCache: { + /** + * @description Unique ID of the build cache record. + * + * @example ndlpt0hhvkqcdfkputsk4cq9c + */ + ID?: string; + /** + * @description ID of the parent build cache record. + * + * > **Deprecated**: This field is deprecated, and omitted if empty. + * + * @example + */ + Parent?: string; + /** + * @description List of parent build cache record IDs. + * + * @example [ + * "hw53o5aio51xtltp5xjp8v7fx" + * ] + */ + Parents?: string[]; + /** + * @description Cache record type. + * + * @example regular + * @enum {string} + */ + Type?: + | "internal" + | "frontend" + | "source.local" + | "source.git.checkout" + | "exec.cachemount" + | "regular"; + /** + * @description Description of the build-step that produced the build cache. + * + * @example mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache + */ + Description?: string; + /** + * @description Indicates if the build cache is in use. + * + * @example false + */ + InUse?: boolean; + /** + * @description Indicates if the build cache is shared. + * + * @example true + */ + Shared?: boolean; + /** + * @description Amount of disk space used by the build cache (in bytes). + * + * @example 51 + */ + Size?: number; + /** + * Format: dateTime + * @description Date and time at which the build cache was created in + * [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + * + * @example 2016-08-18T10:44:24.496525531Z + */ + CreatedAt?: string; + /** + * Format: dateTime + * @description Date and time at which the build cache was last used in + * [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + * + * @example 2017-08-09T07:09:37.632105588Z + */ + LastUsedAt?: string; + /** @example 26 */ + UsageCount?: number; + }; + /** + * @description Image ID or Digest + * @example { + * "ID": "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + * } + */ + ImageID: { + ID?: string; + }; + CreateImageInfo: { + id?: string; + error?: string; + errorDetail?: definitions["ErrorDetail"]; + status?: string; + progress?: string; + progressDetail?: definitions["ProgressDetail"]; + }; + PushImageInfo: { + error?: string; + status?: string; + progress?: string; + progressDetail?: definitions["ProgressDetail"]; + }; + ErrorDetail: { + code?: number; + message?: string; + }; + ProgressDetail: { + current?: number; + total?: number; + }; + /** + * @description Represents an error. + * @example { + * "message": "Something went wrong." + * } + */ + ErrorResponse: { + /** @description The error message. */ + message: string; + }; + /** @description Response to an API call that returns just an Id */ + IdResponse: { + /** @description The id of the newly created object. */ + Id: string; + }; + /** @description Configuration for a network endpoint. */ + EndpointSettings: { + IPAMConfig?: definitions["EndpointIPAMConfig"]; + /** + * @example [ + * "container_1", + * "container_2" + * ] + */ + Links?: string[]; + /** + * @example [ + * "server_x", + * "server_y" + * ] + */ + Aliases?: string[]; + /** + * @description Unique ID of the network. + * + * @example 08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a + */ + NetworkID?: string; + /** + * @description Unique ID for the service endpoint in a Sandbox. + * + * @example b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b + */ + EndpointID?: string; + /** + * @description Gateway address for this network. + * + * @example 172.17.0.1 + */ + Gateway?: string; + /** + * @description IPv4 address. + * + * @example 172.17.0.4 + */ + IPAddress?: string; + /** + * @description Mask length of the IPv4 address. + * + * @example 16 + */ + IPPrefixLen?: number; + /** + * @description IPv6 gateway address. + * + * @example 2001:db8:2::100 + */ + IPv6Gateway?: string; + /** + * @description Global IPv6 address. + * + * @example 2001:db8::5689 + */ + GlobalIPv6Address?: string; + /** + * Format: int64 + * @description Mask length of the global IPv6 address. + * + * @example 64 + */ + GlobalIPv6PrefixLen?: number; + /** + * @description MAC address for the endpoint on this network. + * + * @example 02:42:ac:11:00:04 + */ + MacAddress?: string; + /** + * @description DriverOpts is a mapping of driver options and values. These options + * are passed directly to the driver and are driver specific. + * + * @example { + * "com.example.some-label": "some-value", + * "com.example.some-other-label": "some-other-value" + * } + */ + DriverOpts?: { [key: string]: string }; + }; + /** @description EndpointIPAMConfig represents an endpoint's IPAM configuration. */ + EndpointIPAMConfig: { + /** @example 172.20.30.33 */ + IPv4Address?: string; + /** @example 2001:db8:abcd::3033 */ + IPv6Address?: string; + /** + * @example [ + * "169.254.34.68", + * "fe80::3468" + * ] + */ + LinkLocalIPs?: string[]; + }; + PluginMount: { + /** @example some-mount */ + Name: string; + /** @example This is a mount that's used by the plugin. */ + Description: string; + Settable: string[]; + /** @example /var/lib/docker/plugins/ */ + Source: string; + /** @example /mnt/state */ + Destination: string; + /** @example bind */ + Type: string; + /** + * @example [ + * "rbind", + * "rw" + * ] + */ + Options: string[]; + }; + PluginDevice: { + Name: string; + Description: string; + Settable: string[]; + /** @example /dev/fuse */ + Path: string; + }; + PluginEnv: { + Name: string; + Description: string; + Settable: string[]; + Value: string; + }; + PluginInterfaceType: { + Prefix: string; + Capability: string; + Version: string; + }; + /** + * @description Describes a permission the user has to accept upon installing + * the plugin. + */ + PluginPrivilege: { + /** @example network */ + Name?: string; + Description?: string; + /** + * @example [ + * "host" + * ] + */ + Value?: string[]; + }; + /** @description A plugin for the Engine API */ + Plugin: { + /** @example 5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078 */ + Id?: string; + /** @example tiborvass/sample-volume-plugin */ + Name: string; + /** + * @description True if the plugin is running. False if the plugin is not running, only installed. + * @example true + */ + Enabled: boolean; + /** @description Settings that can be modified by users. */ + Settings: { + Mounts: definitions["PluginMount"][]; + /** + * @example [ + * "DEBUG=0" + * ] + */ + Env: string[]; + Args: string[]; + Devices: definitions["PluginDevice"][]; + }; + /** + * @description plugin remote reference used to push/pull the plugin + * @example localhost:5000/tiborvass/sample-volume-plugin:latest + */ + PluginReference?: string; + /** @description The config of a plugin. */ + Config: { + /** + * @description Docker Version used to create the plugin + * @example 17.06.0-ce + */ + DockerVersion?: string; + /** @example A sample volume plugin for Docker */ + Description: string; + /** @example https://docs.docker.com/engine/extend/plugins/ */ + Documentation: string; + /** @description The interface between Docker and the plugin */ + Interface: { + /** + * @example [ + * "docker.volumedriver/1.0" + * ] + */ + Types: definitions["PluginInterfaceType"][]; + /** @example plugins.sock */ + Socket: string; + /** + * @description Protocol to use for clients connecting to the plugin. + * @example some.protocol/v1.0 + * @enum {string} + */ + ProtocolScheme?: "" | "moby.plugins.http/v1"; + }; + /** + * @example [ + * "/usr/bin/sample-volume-plugin", + * "/data" + * ] + */ + Entrypoint: string[]; + /** @example /bin/ */ + WorkDir: string; + User?: { + /** + * Format: uint32 + * @example 1000 + */ + UID?: number; + /** + * Format: uint32 + * @example 1000 + */ + GID?: number; + }; + Network: { + /** @example host */ + Type: string; + }; + Linux: { + /** + * @example [ + * "CAP_SYS_ADMIN", + * "CAP_SYSLOG" + * ] + */ + Capabilities: string[]; + /** @example false */ + AllowAllDevices: boolean; + Devices: definitions["PluginDevice"][]; + }; + /** @example /mnt/volumes */ + PropagatedMount: string; + /** @example false */ + IpcHost: boolean; + /** @example false */ + PidHost: boolean; + Mounts: definitions["PluginMount"][]; + /** + * @example [ + * { + * "Name": "DEBUG", + * "Description": "If set, prints debug messages", + * "Settable": null, + * "Value": "0" + * } + * ] + */ + Env: definitions["PluginEnv"][]; + Args: { + /** @example args */ + Name: string; + /** @example command line arguments */ + Description: string; + Settable: string[]; + Value: string[]; + }; + rootfs?: { + /** @example layers */ + type?: string; + /** + * @example [ + * "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887", + * "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + * ] + */ + diff_ids?: string[]; + }; + }; + }; + /** + * @description The version number of the object such as node, service, etc. This is needed + * to avoid conflicting writes. The client must send the version number along + * with the modified specification when updating these objects. + * + * This approach ensures safe concurrency and determinism in that the change + * on the object may not be applied if the version number has changed from the + * last read. In other words, if two update requests specify the same base + * version, only one of the requests can succeed. As a result, two separate + * update requests that happen at the same time will not unintentionally + * overwrite each other. + */ + ObjectVersion: { + /** + * Format: uint64 + * @example 373531 + */ + Index?: number; + }; + /** + * @example { + * "Availability": "active", + * "Name": "node-name", + * "Role": "manager", + * "Labels": { + * "foo": "bar" + * } + * } + */ + NodeSpec: { + /** + * @description Name for the node. + * @example my-node + */ + Name?: string; + /** @description User-defined key/value metadata. */ + Labels?: { [key: string]: string }; + /** + * @description Role of the node. + * @example manager + * @enum {string} + */ + Role?: "worker" | "manager"; + /** + * @description Availability of the node. + * @example active + * @enum {string} + */ + Availability?: "active" | "pause" | "drain"; + }; + Node: { + /** @example 24ifsmvkjbyhk */ + ID?: string; + Version?: definitions["ObjectVersion"]; + /** + * Format: dateTime + * @description Date and time at which the node was added to the swarm in + * [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + * + * @example 2016-08-18T10:44:24.496525531Z + */ + CreatedAt?: string; + /** + * Format: dateTime + * @description Date and time at which the node was last updated in + * [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + * + * @example 2017-08-09T07:09:37.632105588Z + */ + UpdatedAt?: string; + Spec?: definitions["NodeSpec"]; + Description?: definitions["NodeDescription"]; + Status?: definitions["NodeStatus"]; + ManagerStatus?: definitions["ManagerStatus"]; + }; + /** + * @description NodeDescription encapsulates the properties of the Node as reported by the + * agent. + */ + NodeDescription: { + /** @example bf3067039e47 */ + Hostname?: string; + Platform?: definitions["Platform"]; + Resources?: definitions["ResourceObject"]; + Engine?: definitions["EngineDescription"]; + TLSInfo?: definitions["TLSInfo"]; + }; + /** @description Platform represents the platform (Arch/OS). */ + Platform: { + /** + * @description Architecture represents the hardware architecture (for example, + * `x86_64`). + * + * @example x86_64 + */ + Architecture?: string; + /** + * @description OS represents the Operating System (for example, `linux` or `windows`). + * + * @example linux + */ + OS?: string; + }; + /** @description EngineDescription provides information about an engine. */ + EngineDescription: { + /** @example 17.06.0 */ + EngineVersion?: string; + /** + * @example { + * "foo": "bar" + * } + */ + Labels?: { [key: string]: string }; + /** + * @example [ + * { + * "Type": "Log", + * "Name": "awslogs" + * }, + * { + * "Type": "Log", + * "Name": "fluentd" + * }, + * { + * "Type": "Log", + * "Name": "gcplogs" + * }, + * { + * "Type": "Log", + * "Name": "gelf" + * }, + * { + * "Type": "Log", + * "Name": "journald" + * }, + * { + * "Type": "Log", + * "Name": "json-file" + * }, + * { + * "Type": "Log", + * "Name": "logentries" + * }, + * { + * "Type": "Log", + * "Name": "splunk" + * }, + * { + * "Type": "Log", + * "Name": "syslog" + * }, + * { + * "Type": "Network", + * "Name": "bridge" + * }, + * { + * "Type": "Network", + * "Name": "host" + * }, + * { + * "Type": "Network", + * "Name": "ipvlan" + * }, + * { + * "Type": "Network", + * "Name": "macvlan" + * }, + * { + * "Type": "Network", + * "Name": "null" + * }, + * { + * "Type": "Network", + * "Name": "overlay" + * }, + * { + * "Type": "Volume", + * "Name": "local" + * }, + * { + * "Type": "Volume", + * "Name": "localhost:5000/vieux/sshfs:latest" + * }, + * { + * "Type": "Volume", + * "Name": "vieux/sshfs:latest" + * } + * ] + */ + Plugins?: { + Type?: string; + Name?: string; + }[]; + }; + /** + * @description Information about the issuer of leaf TLS certificates and the trusted root + * CA certificate. + * + * @example { + * "TrustRoot": "-----BEGIN CERTIFICATE-----\nMIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw\nEzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0\nMzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH\nA0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf\n3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB\nAf8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO\nPQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz\npxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H\n-----END CERTIFICATE-----\n", + * "CertIssuerSubject": "MBMxETAPBgNVBAMTCHN3YXJtLWNh", + * "CertIssuerPublicKey": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + * } + */ + TLSInfo: { + /** + * @description The root CA certificate(s) that are used to validate leaf TLS + * certificates. + */ + TrustRoot?: string; + /** @description The base64-url-safe-encoded raw subject bytes of the issuer. */ + CertIssuerSubject?: string; + /** @description The base64-url-safe-encoded raw public key bytes of the issuer. */ + CertIssuerPublicKey?: string; + }; + /** + * @description NodeStatus represents the status of a node. + * + * It provides the current status of the node, as seen by the manager. + */ + NodeStatus: { + State?: definitions["NodeState"]; + /** @example */ + Message?: string; + /** + * @description IP address of the node. + * @example 172.17.0.2 + */ + Addr?: string; + }; + /** + * @description NodeState represents the state of a node. + * @example ready + * @enum {string} + */ + NodeState: "unknown" | "down" | "ready" | "disconnected"; + /** + * @description ManagerStatus represents the status of a manager. + * + * It provides the current status of a node's manager component, if the node + * is a manager. + */ + ManagerStatus: { + /** + * @default false + * @example true + */ + Leader?: boolean; + Reachability?: definitions["Reachability"]; + /** + * @description The IP address and port at which the manager is reachable. + * + * @example 10.0.0.46:2377 + */ + Addr?: string; + }; + /** + * @description Reachability represents the reachability of a node. + * @example reachable + * @enum {string} + */ + Reachability: "unknown" | "unreachable" | "reachable"; + /** @description User modifiable swarm configuration. */ + SwarmSpec: { + /** + * @description Name of the swarm. + * @example default + */ + Name?: string; + /** + * @description User-defined key/value metadata. + * @example { + * "com.example.corp.type": "production", + * "com.example.corp.department": "engineering" + * } + */ + Labels?: { [key: string]: string }; + /** @description Orchestration configuration. */ + Orchestration?: { + /** + * Format: int64 + * @description The number of historic tasks to keep per instance or node. If + * negative, never remove completed or failed tasks. + * + * @example 10 + */ + TaskHistoryRetentionLimit?: number; + }; + /** @description Raft configuration. */ + Raft?: { + /** + * Format: uint64 + * @description The number of log entries between snapshots. + * @example 10000 + */ + SnapshotInterval?: number; + /** + * Format: uint64 + * @description The number of snapshots to keep beyond the current snapshot. + */ + KeepOldSnapshots?: number; + /** + * Format: uint64 + * @description The number of log entries to keep around to sync up slow followers + * after a snapshot is created. + * + * @example 500 + */ + LogEntriesForSlowFollowers?: number; + /** + * @description The number of ticks that a follower will wait for a message from + * the leader before becoming a candidate and starting an election. + * `ElectionTick` must be greater than `HeartbeatTick`. + * + * A tick currently defaults to one second, so these translate + * directly to seconds currently, but this is NOT guaranteed. + * + * @example 3 + */ + ElectionTick?: number; + /** + * @description The number of ticks between heartbeats. Every HeartbeatTick ticks, + * the leader will send a heartbeat to the followers. + * + * A tick currently defaults to one second, so these translate + * directly to seconds currently, but this is NOT guaranteed. + * + * @example 1 + */ + HeartbeatTick?: number; + }; + /** @description Dispatcher configuration. */ + Dispatcher?: { + /** + * Format: int64 + * @description The delay for an agent to send a heartbeat to the dispatcher. + * + * @example 5000000000 + */ + HeartbeatPeriod?: number; + }; + /** @description CA configuration. */ + CAConfig?: { + /** + * Format: int64 + * @description The duration node certificates are issued for. + * @example 7776000000000000 + */ + NodeCertExpiry?: number; + /** + * @description Configuration for forwarding signing requests to an external + * certificate authority. + */ + ExternalCAs?: { + /** + * @description Protocol for communication with the external CA (currently + * only `cfssl` is supported). + * + * @default cfssl + * @enum {string} + */ + Protocol?: "cfssl"; + /** @description URL where certificate signing requests should be sent. */ + URL?: string; + /** + * @description An object with key/value pairs that are interpreted as + * protocol-specific options for the external CA driver. + */ + Options?: { [key: string]: string }; + /** + * @description The root CA certificate (in PEM format) this external CA uses + * to issue TLS certificates (assumed to be to the current swarm + * root CA certificate if not provided). + */ + CACert?: string; + }[]; + /** + * @description The desired signing CA certificate for all swarm node TLS leaf + * certificates, in PEM format. + */ + SigningCACert?: string; + /** + * @description The desired signing CA key for all swarm node TLS leaf certificates, + * in PEM format. + */ + SigningCAKey?: string; + /** + * Format: uint64 + * @description An integer whose purpose is to force swarm to generate a new + * signing CA certificate and key, if none have been specified in + * `SigningCACert` and `SigningCAKey` + */ + ForceRotate?: number; + }; + /** @description Parameters related to encryption-at-rest. */ + EncryptionConfig?: { + /** + * @description If set, generate a key and use it to lock data stored on the + * managers. + * + * @example false + */ + AutoLockManagers?: boolean; + }; + /** @description Defaults for creating tasks in this cluster. */ + TaskDefaults?: { + /** + * @description The log driver to use for tasks created in the orchestrator if + * unspecified by a service. + * + * Updating this value only affects new tasks. Existing tasks continue + * to use their previously configured log driver until recreated. + */ + LogDriver?: { + /** + * @description The log driver to use as a default for new tasks. + * + * @example json-file + */ + Name?: string; + /** + * @description Driver-specific options for the selectd log driver, specified + * as key/value pairs. + * + * @example { + * "max-file": "10", + * "max-size": "100m" + * } + */ + Options?: { [key: string]: string }; + }; + }; + }; + /** + * @description ClusterInfo represents information about the swarm as is returned by the + * "/info" endpoint. Join-tokens are not included. + */ + ClusterInfo: { + /** + * @description The ID of the swarm. + * @example abajmipo7b4xz5ip2nrla6b11 + */ + ID?: string; + Version?: definitions["ObjectVersion"]; + /** + * Format: dateTime + * @description Date and time at which the swarm was initialised in + * [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + * + * @example 2016-08-18T10:44:24.496525531Z + */ + CreatedAt?: string; + /** + * Format: dateTime + * @description Date and time at which the swarm was last updated in + * [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + * + * @example 2017-08-09T07:09:37.632105588Z + */ + UpdatedAt?: string; + Spec?: definitions["SwarmSpec"]; + TLSInfo?: definitions["TLSInfo"]; + /** + * @description Whether there is currently a root CA rotation in progress for the swarm + * + * @example false + */ + RootRotationInProgress?: boolean; + /** + * Format: uint32 + * @description DataPathPort specifies the data path port number for data traffic. + * Acceptable port range is 1024 to 49151. + * If no port is set or is set to 0, the default port (4789) is used. + * + * @default 4789 + * @example 4789 + */ + DataPathPort?: number; + /** + * @description Default Address Pool specifies default subnet pools for global scope + * networks. + */ + DefaultAddrPool?: string[]; + /** + * Format: uint32 + * @description SubnetSize specifies the subnet size of the networks created from the + * default subnet pool. + * + * @default 24 + * @example 24 + */ + SubnetSize?: number; + }; + /** @description JoinTokens contains the tokens workers and managers need to join the swarm. */ + JoinTokens: { + /** + * @description The token workers can use to join the swarm. + * + * @example SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx + */ + Worker?: string; + /** + * @description The token managers can use to join the swarm. + * + * @example SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 + */ + Manager?: string; + }; + Swarm: definitions["ClusterInfo"] & { + JoinTokens?: definitions["JoinTokens"]; + }; + /** @description User modifiable task configuration. */ + TaskSpec: { + /** + * @description Plugin spec for the service. *(Experimental release only.)* + * + *


+ * + * > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + * > mutually exclusive. PluginSpec is only used when the Runtime field + * > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + * > field is set to `attachment`. + */ + PluginSpec?: { + /** @description The name or 'alias' to use for the plugin. */ + Name?: string; + /** @description The plugin image reference to use. */ + Remote?: string; + /** @description Disable the plugin once scheduled. */ + Disabled?: boolean; + PluginPrivilege?: definitions["PluginPrivilege"][]; + }; + /** + * @description Container spec for the service. + * + *


+ * + * > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + * > mutually exclusive. PluginSpec is only used when the Runtime field + * > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + * > field is set to `attachment`. + */ + ContainerSpec?: { + /** @description The image name to use for the container */ + Image?: string; + /** @description User-defined key/value data. */ + Labels?: { [key: string]: string }; + /** @description The command to be run in the image. */ + Command?: string[]; + /** @description Arguments to the command. */ + Args?: string[]; + /** + * @description The hostname to use for the container, as a valid + * [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. + */ + Hostname?: string; + /** @description A list of environment variables in the form `VAR=value`. */ + Env?: string[]; + /** @description The working directory for commands to run in. */ + Dir?: string; + /** @description The user inside the container. */ + User?: string; + /** @description A list of additional groups that the container process will run as. */ + Groups?: string[]; + /** @description Security options for the container */ + Privileges?: { + /** @description CredentialSpec for managed service account (Windows only) */ + CredentialSpec?: { + /** + * @description Load credential spec from a Swarm Config with the given ID. + * The specified config must also be present in the Configs + * field with the Runtime property set. + * + *


+ * + * + * > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + * > and `CredentialSpec.Config` are mutually exclusive. + * + * @example 0bt9dmxjvjiqermk6xrop3ekq + */ + Config?: string; + /** + * @description Load credential spec from this file. The file is read by + * the daemon, and must be present in the `CredentialSpecs` + * subdirectory in the docker data directory, which defaults + * to `C:\ProgramData\Docker\` on Windows. + * + * For example, specifying `spec.json` loads + * `C:\ProgramData\Docker\CredentialSpecs\spec.json`. + * + *


+ * + * > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + * > and `CredentialSpec.Config` are mutually exclusive. + * + * @example spec.json + */ + File?: string; + /** + * @description Load credential spec from this value in the Windows + * registry. The specified registry value must be located in: + * + * `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + * + *


+ * + * + * > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + * > and `CredentialSpec.Config` are mutually exclusive. + */ + Registry?: string; + }; + /** @description SELinux labels of the container */ + SELinuxContext?: { + /** @description Disable SELinux */ + Disable?: boolean; + /** @description SELinux user label */ + User?: string; + /** @description SELinux role label */ + Role?: string; + /** @description SELinux type label */ + Type?: string; + /** @description SELinux level label */ + Level?: string; + }; + }; + /** @description Whether a pseudo-TTY should be allocated. */ + TTY?: boolean; + /** @description Open `stdin` */ + OpenStdin?: boolean; + /** @description Mount the container's root filesystem as read only. */ + ReadOnly?: boolean; + /** + * @description Specification for mounts to be added to containers created as part + * of the service. + */ + Mounts?: definitions["Mount"][]; + /** @description Signal to stop the container. */ + StopSignal?: string; + /** + * Format: int64 + * @description Amount of time to wait for the container to terminate before + * forcefully killing it. + */ + StopGracePeriod?: number; + HealthCheck?: definitions["HealthConfig"]; + /** + * @description A list of hostname/IP mappings to add to the container's `hosts` + * file. The format of extra hosts is specified in the + * [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) + * man page: + * + * IP_address canonical_hostname [aliases...] + */ + Hosts?: string[]; + /** + * @description Specification for DNS related configurations in resolver configuration + * file (`resolv.conf`). + */ + DNSConfig?: { + /** @description The IP addresses of the name servers. */ + Nameservers?: string[]; + /** @description A search list for host-name lookup. */ + Search?: string[]; + /** + * @description A list of internal resolver variables to be modified (e.g., + * `debug`, `ndots:3`, etc.). + */ + Options?: string[]; + }; + /** + * @description Secrets contains references to zero or more secrets that will be + * exposed to the service. + */ + Secrets?: { + /** @description File represents a specific target that is backed by a file. */ + File?: { + /** @description Name represents the final filename in the filesystem. */ + Name?: string; + /** @description UID represents the file UID. */ + UID?: string; + /** @description GID represents the file GID. */ + GID?: string; + /** + * Format: uint32 + * @description Mode represents the FileMode of the file. + */ + Mode?: number; + }; + /** + * @description SecretID represents the ID of the specific secret that we're + * referencing. + */ + SecretID?: string; + /** + * @description SecretName is the name of the secret that this references, + * but this is just provided for lookup/display purposes. The + * secret in the reference will be identified by its ID. + */ + SecretName?: string; + }[]; + /** + * @description Configs contains references to zero or more configs that will be + * exposed to the service. + */ + Configs?: { + /** + * @description File represents a specific target that is backed by a file. + * + *


+ * + * > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive + */ + File?: { + /** @description Name represents the final filename in the filesystem. */ + Name?: string; + /** @description UID represents the file UID. */ + UID?: string; + /** @description GID represents the file GID. */ + GID?: string; + /** + * Format: uint32 + * @description Mode represents the FileMode of the file. + */ + Mode?: number; + }; + /** + * @description Runtime represents a target that is not mounted into the + * container but is used by the task + * + *


+ * + * > **Note**: `Configs.File` and `Configs.Runtime` are mutually + * > exclusive + */ + Runtime?: { [key: string]: unknown }; + /** + * @description ConfigID represents the ID of the specific config that we're + * referencing. + */ + ConfigID?: string; + /** + * @description ConfigName is the name of the config that this references, + * but this is just provided for lookup/display purposes. The + * config in the reference will be identified by its ID. + */ + ConfigName?: string; + }[]; + /** + * @description Isolation technology of the containers running the service. + * (Windows only) + * + * @enum {string} + */ + Isolation?: "default" | "process" | "hyperv"; + /** + * @description Run an init inside the container that forwards signals and reaps + * processes. This field is omitted if empty, and the default (as + * configured on the daemon) is used. + */ + Init?: boolean; + /** + * @description Set kernel namedspaced parameters (sysctls) in the container. + * The Sysctls option on services accepts the same sysctls as the + * are supported on containers. Note that while the same sysctls are + * supported, no guarantees or checks are made about their + * suitability for a clustered environment, and it's up to the user + * to determine whether a given sysctl will work properly in a + * Service. + */ + Sysctls?: { [key: string]: string }; + /** + * @description A list of kernel capabilities to add to the default set + * for the container. + * + * @example [ + * "CAP_NET_RAW", + * "CAP_SYS_ADMIN", + * "CAP_SYS_CHROOT", + * "CAP_SYSLOG" + * ] + */ + CapabilityAdd?: string[]; + /** + * @description A list of kernel capabilities to drop from the default set + * for the container. + * + * @example [ + * "CAP_NET_RAW" + * ] + */ + CapabilityDrop?: string[]; + /** @description A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" */ + Ulimits?: { + /** @description Name of ulimit */ + Name?: string; + /** @description Soft limit */ + Soft?: number; + /** @description Hard limit */ + Hard?: number; + }[]; + }; + /** + * @description Read-only spec type for non-swarm containers attached to swarm overlay + * networks. + * + *


+ * + * > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + * > mutually exclusive. PluginSpec is only used when the Runtime field + * > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + * > field is set to `attachment`. + */ + NetworkAttachmentSpec?: { + /** @description ID of the container represented by this task */ + ContainerID?: string; + }; + /** + * @description Resource requirements which apply to each individual container created + * as part of the service. + */ + Resources?: { + /** @description Define resources limits. */ + Limits?: definitions["Limit"]; + /** @description Define resources reservation. */ + Reservations?: definitions["ResourceObject"]; + }; + /** + * @description Specification for the restart policy which applies to containers + * created as part of this service. + */ + RestartPolicy?: { + /** + * @description Condition for restart. + * @enum {string} + */ + Condition?: "none" | "on-failure" | "any"; + /** + * Format: int64 + * @description Delay between restart attempts. + */ + Delay?: number; + /** + * Format: int64 + * @description Maximum attempts to restart a given container before giving up + * (default value is 0, which is ignored). + * + * @default 0 + */ + MaxAttempts?: number; + /** + * Format: int64 + * @description Windows is the time window used to evaluate the restart policy + * (default value is 0, which is unbounded). + * + * @default 0 + */ + Window?: number; + }; + Placement?: { + /** + * @description An array of constraint expressions to limit the set of nodes where + * a task can be scheduled. Constraint expressions can either use a + * _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find + * nodes that satisfy every expression (AND match). Constraints can + * match node or Docker Engine labels as follows: + * + * node attribute | matches | example + * ---------------------|--------------------------------|----------------------------------------------- + * `node.id` | Node ID | `node.id==2ivku8v2gvtg4` + * `node.hostname` | Node hostname | `node.hostname!=node-2` + * `node.role` | Node role (`manager`/`worker`) | `node.role==manager` + * `node.platform.os` | Node operating system | `node.platform.os==windows` + * `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` + * `node.labels` | User-defined node labels | `node.labels.security==high` + * `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04` + * + * `engine.labels` apply to Docker Engine labels like operating system, + * drivers, etc. Swarm administrators add `node.labels` for operational + * purposes by using the [`node update endpoint`](#operation/NodeUpdate). + * + * @example [ + * "node.hostname!=node3.corp.example.com", + * "node.role!=manager", + * "node.labels.type==production", + * "node.platform.os==linux", + * "node.platform.arch==x86_64" + * ] + */ + Constraints?: string[]; + /** + * @description Preferences provide a way to make the scheduler aware of factors + * such as topology. They are provided in order from highest to + * lowest precedence. + * + * @example [ + * { + * "Spread": { + * "SpreadDescriptor": "node.labels.datacenter" + * } + * }, + * { + * "Spread": { + * "SpreadDescriptor": "node.labels.rack" + * } + * } + * ] + */ + Preferences?: { + Spread?: { + /** @description label descriptor, such as `engine.labels.az`. */ + SpreadDescriptor?: string; + }; + }[]; + /** + * Format: int64 + * @description Maximum number of replicas for per node (default value is 0, which + * is unlimited) + * + * @default 0 + */ + MaxReplicas?: number; + /** + * @description Platforms stores all the platforms that the service's image can + * run on. This field is used in the platform filter for scheduling. + * If empty, then the platform filter is off, meaning there are no + * scheduling restrictions. + */ + Platforms?: definitions["Platform"][]; + }; + /** + * @description A counter that triggers an update even if no relevant parameters have + * been changed. + */ + ForceUpdate?: number; + /** @description Runtime is the type of runtime specified for the task executor. */ + Runtime?: string; + /** @description Specifies which networks the service should attach to. */ + Networks?: definitions["NetworkAttachmentConfig"][]; + /** + * @description Specifies the log driver to use for tasks created from this spec. If + * not present, the default one for the swarm will be used, finally + * falling back to the engine default if not specified. + */ + LogDriver?: { + Name?: string; + Options?: { [key: string]: string }; + }; + }; + /** @enum {string} */ + TaskState: + | "new" + | "allocated" + | "pending" + | "assigned" + | "accepted" + | "preparing" + | "ready" + | "starting" + | "running" + | "complete" + | "shutdown" + | "failed" + | "rejected" + | "remove" + | "orphaned"; + /** + * @example { + * "ID": "0kzzo1i0y4jz6027t0k7aezc7", + * "Version": { + * "Index": 71 + * }, + * "CreatedAt": "2016-06-07T21:07:31.171892745Z", + * "UpdatedAt": "2016-06-07T21:07:31.376370513Z", + * "Spec": { + * "ContainerSpec": { + * "Image": "redis" + * }, + * "Resources": { + * "Limits": {}, + * "Reservations": {} + * }, + * "RestartPolicy": { + * "Condition": "any", + * "MaxAttempts": 0 + * }, + * "Placement": {} + * }, + * "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz", + * "Slot": 1, + * "NodeID": "60gvrl6tm78dmak4yl7srz94v", + * "Status": { + * "Timestamp": "2016-06-07T21:07:31.290032978Z", + * "State": "running", + * "Message": "started", + * "ContainerStatus": { + * "ContainerID": "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035", + * "PID": 677 + * } + * }, + * "DesiredState": "running", + * "NetworksAttachments": [ + * { + * "Network": { + * "ID": "4qvuz4ko70xaltuqbt8956gd1", + * "Version": { + * "Index": 18 + * }, + * "CreatedAt": "2016-06-07T20:31:11.912919752Z", + * "UpdatedAt": "2016-06-07T21:07:29.955277358Z", + * "Spec": { + * "Name": "ingress", + * "Labels": { + * "com.docker.swarm.internal": "true" + * }, + * "DriverConfiguration": {}, + * "IPAMOptions": { + * "Driver": {}, + * "Configs": [ + * { + * "Subnet": "10.255.0.0/16", + * "Gateway": "10.255.0.1" + * } + * ] + * } + * }, + * "DriverState": { + * "Name": "overlay", + * "Options": { + * "com.docker.network.driver.overlay.vxlanid_list": "256" + * } + * }, + * "IPAMOptions": { + * "Driver": { + * "Name": "default" + * }, + * "Configs": [ + * { + * "Subnet": "10.255.0.0/16", + * "Gateway": "10.255.0.1" + * } + * ] + * } + * }, + * "Addresses": [ + * "10.255.0.10/16" + * ] + * } + * ], + * "AssignedGenericResources": [ + * { + * "DiscreteResourceSpec": { + * "Kind": "SSD", + * "Value": 3 + * } + * }, + * { + * "NamedResourceSpec": { + * "Kind": "GPU", + * "Value": "UUID1" + * } + * }, + * { + * "NamedResourceSpec": { + * "Kind": "GPU", + * "Value": "UUID2" + * } + * } + * ] + * } + */ + Task: { + /** @description The ID of the task. */ + ID?: string; + Version?: definitions["ObjectVersion"]; + /** Format: dateTime */ + CreatedAt?: string; + /** Format: dateTime */ + UpdatedAt?: string; + /** @description Name of the task. */ + Name?: string; + /** @description User-defined key/value metadata. */ + Labels?: { [key: string]: string }; + Spec?: definitions["TaskSpec"]; + /** @description The ID of the service this task is part of. */ + ServiceID?: string; + Slot?: number; + /** @description The ID of the node that this task is on. */ + NodeID?: string; + AssignedGenericResources?: definitions["GenericResources"]; + Status?: { + /** Format: dateTime */ + Timestamp?: string; + State?: definitions["TaskState"]; + Message?: string; + Err?: string; + ContainerStatus?: { + ContainerID?: string; + PID?: number; + ExitCode?: number; + }; + }; + DesiredState?: definitions["TaskState"]; + /** + * @description If the Service this Task belongs to is a job-mode service, contains + * the JobIteration of the Service this Task was created for. Absent if + * the Task was created for a Replicated or Global Service. + */ + JobIteration?: definitions["ObjectVersion"]; + }; + /** @description User modifiable configuration for a service. */ + ServiceSpec: { + /** @description Name of the service. */ + Name?: string; + /** @description User-defined key/value metadata. */ + Labels?: { [key: string]: string }; + TaskTemplate?: definitions["TaskSpec"]; + /** @description Scheduling mode for the service. */ + Mode?: { + Replicated?: { + /** Format: int64 */ + Replicas?: number; + }; + Global?: { [key: string]: unknown }; + /** + * @description The mode used for services with a finite number of tasks that run + * to a completed state. + */ + ReplicatedJob?: { + /** + * Format: int64 + * @description The maximum number of replicas to run simultaneously. + * + * @default 1 + */ + MaxConcurrent?: number; + /** + * Format: int64 + * @description The total number of replicas desired to reach the Completed + * state. If unset, will default to the value of `MaxConcurrent` + */ + TotalCompletions?: number; + }; + /** + * @description The mode used for services which run a task to the completed state + * on each valid node. + */ + GlobalJob?: { [key: string]: unknown }; + }; + /** @description Specification for the update strategy of the service. */ + UpdateConfig?: { + /** + * Format: int64 + * @description Maximum number of tasks to be updated in one iteration (0 means + * unlimited parallelism). + */ + Parallelism?: number; + /** + * Format: int64 + * @description Amount of time between updates, in nanoseconds. + */ + Delay?: number; + /** + * @description Action to take if an updated task fails to run, or stops running + * during the update. + * + * @enum {string} + */ + FailureAction?: "continue" | "pause" | "rollback"; + /** + * Format: int64 + * @description Amount of time to monitor each updated task for failures, in + * nanoseconds. + */ + Monitor?: number; + /** + * @description The fraction of tasks that may fail during an update before the + * failure action is invoked, specified as a floating point number + * between 0 and 1. + * + * @default 0 + */ + MaxFailureRatio?: number; + /** + * @description The order of operations when rolling out an updated task. Either + * the old task is shut down before the new task is started, or the + * new task is started before the old task is shut down. + * + * @enum {string} + */ + Order?: "stop-first" | "start-first"; + }; + /** @description Specification for the rollback strategy of the service. */ + RollbackConfig?: { + /** + * Format: int64 + * @description Maximum number of tasks to be rolled back in one iteration (0 means + * unlimited parallelism). + */ + Parallelism?: number; + /** + * Format: int64 + * @description Amount of time between rollback iterations, in nanoseconds. + */ + Delay?: number; + /** + * @description Action to take if an rolled back task fails to run, or stops + * running during the rollback. + * + * @enum {string} + */ + FailureAction?: "continue" | "pause"; + /** + * Format: int64 + * @description Amount of time to monitor each rolled back task for failures, in + * nanoseconds. + */ + Monitor?: number; + /** + * @description The fraction of tasks that may fail during a rollback before the + * failure action is invoked, specified as a floating point number + * between 0 and 1. + * + * @default 0 + */ + MaxFailureRatio?: number; + /** + * @description The order of operations when rolling back a task. Either the old + * task is shut down before the new task is started, or the new task + * is started before the old task is shut down. + * + * @enum {string} + */ + Order?: "stop-first" | "start-first"; + }; + /** @description Specifies which networks the service should attach to. */ + Networks?: definitions["NetworkAttachmentConfig"][]; + EndpointSpec?: definitions["EndpointSpec"]; + }; + EndpointPortConfig: { + Name?: string; + /** @enum {string} */ + Protocol?: "tcp" | "udp" | "sctp"; + /** @description The port inside the container. */ + TargetPort?: number; + /** @description The port on the swarm hosts. */ + PublishedPort?: number; + /** + * @description The mode in which port is published. + * + *


+ * + * - "ingress" makes the target port accessible on every node, + * regardless of whether there is a task for the service running on + * that node or not. + * - "host" bypasses the routing mesh and publish the port directly on + * the swarm node where that service is running. + * + * @default ingress + * @example ingress + * @enum {string} + */ + PublishMode?: "ingress" | "host"; + }; + /** @description Properties that can be configured to access and load balance a service. */ + EndpointSpec: { + /** + * @description The mode of resolution to use for internal load balancing between tasks. + * + * @default vip + * @enum {string} + */ + Mode?: "vip" | "dnsrr"; + /** + * @description List of exposed ports that this service is accessible on from the + * outside. Ports can only be provided if `vip` resolution mode is used. + */ + Ports?: definitions["EndpointPortConfig"][]; + }; + /** + * @example { + * "ID": "9mnpnzenvg8p8tdbtq4wvbkcz", + * "Version": { + * "Index": 19 + * }, + * "CreatedAt": "2016-06-07T21:05:51.880065305Z", + * "UpdatedAt": "2016-06-07T21:07:29.962229872Z", + * "Spec": { + * "Name": "hopeful_cori", + * "TaskTemplate": { + * "ContainerSpec": { + * "Image": "redis" + * }, + * "Resources": { + * "Limits": {}, + * "Reservations": {} + * }, + * "RestartPolicy": { + * "Condition": "any", + * "MaxAttempts": 0 + * }, + * "Placement": {}, + * "ForceUpdate": 0 + * }, + * "Mode": { + * "Replicated": { + * "Replicas": 1 + * } + * }, + * "UpdateConfig": { + * "Parallelism": 1, + * "Delay": 1000000000, + * "FailureAction": "pause", + * "Monitor": 15000000000, + * "MaxFailureRatio": 0.15 + * }, + * "RollbackConfig": { + * "Parallelism": 1, + * "Delay": 1000000000, + * "FailureAction": "pause", + * "Monitor": 15000000000, + * "MaxFailureRatio": 0.15 + * }, + * "EndpointSpec": { + * "Mode": "vip", + * "Ports": [ + * { + * "Protocol": "tcp", + * "TargetPort": 6379, + * "PublishedPort": 30001 + * } + * ] + * } + * }, + * "Endpoint": { + * "Spec": { + * "Mode": "vip", + * "Ports": [ + * { + * "Protocol": "tcp", + * "TargetPort": 6379, + * "PublishedPort": 30001 + * } + * ] + * }, + * "Ports": [ + * { + * "Protocol": "tcp", + * "TargetPort": 6379, + * "PublishedPort": 30001 + * } + * ], + * "VirtualIPs": [ + * { + * "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", + * "Addr": "10.255.0.2/16" + * }, + * { + * "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", + * "Addr": "10.255.0.3/16" + * } + * ] + * } + * } + */ + Service: { + ID?: string; + Version?: definitions["ObjectVersion"]; + /** Format: dateTime */ + CreatedAt?: string; + /** Format: dateTime */ + UpdatedAt?: string; + Spec?: definitions["ServiceSpec"]; + Endpoint?: { + Spec?: definitions["EndpointSpec"]; + Ports?: definitions["EndpointPortConfig"][]; + VirtualIPs?: { + NetworkID?: string; + Addr?: string; + }[]; + }; + /** @description The status of a service update. */ + UpdateStatus?: { + /** @enum {string} */ + State?: "updating" | "paused" | "completed"; + /** Format: dateTime */ + StartedAt?: string; + /** Format: dateTime */ + CompletedAt?: string; + Message?: string; + }; + /** + * @description The status of the service's tasks. Provided only when requested as + * part of a ServiceList operation. + */ + ServiceStatus?: { + /** + * Format: uint64 + * @description The number of tasks for the service currently in the Running state. + * + * @example 7 + */ + RunningTasks?: number; + /** + * Format: uint64 + * @description The number of tasks for the service desired to be running. + * For replicated services, this is the replica count from the + * service spec. For global services, this is computed by taking + * count of all tasks for the service with a Desired State other + * than Shutdown. + * + * @example 10 + */ + DesiredTasks?: number; + /** + * Format: uint64 + * @description The number of tasks for a job that are in the Completed state. + * This field must be cross-referenced with the service type, as the + * value of 0 may mean the service is not in a job mode, or it may + * mean the job-mode service has no tasks yet Completed. + */ + CompletedTasks?: number; + }; + /** + * @description The status of the service when it is in one of ReplicatedJob or + * GlobalJob modes. Absent on Replicated and Global mode services. The + * JobIteration is an ObjectVersion, but unlike the Service's version, + * does not need to be sent with an update request. + */ + JobStatus?: { + /** + * @description JobIteration is a value increased each time a Job is executed, + * successfully or otherwise. "Executed", in this case, means the + * job as a whole has been started, not that an individual Task has + * been launched. A job is "Executed" when its ServiceSpec is + * updated. JobIteration can be used to disambiguate Tasks belonging + * to different executions of a job. Though JobIteration will + * increase with each subsequent execution, it may not necessarily + * increase by 1, and so JobIteration should not be used to + */ + JobIteration?: definitions["ObjectVersion"]; + /** + * Format: dateTime + * @description The last time, as observed by the server, that this job was + * started. + */ + LastExecution?: string; + }; + }; + ImageDeleteResponseItem: { + /** @description The image ID of an image that was untagged */ + Untagged?: string; + /** @description The image ID of an image that was deleted */ + Deleted?: string; + }; + /** + * @example { + * "Warning": "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + * } + */ + ServiceUpdateResponse: { + /** @description Optional warning messages */ + Warnings?: string[]; + }; + ContainerSummary: { + /** @description The ID of this container */ + Id?: string; + /** @description The names that this container has been given */ + Names?: string[]; + /** @description The name of the image used when creating this container */ + Image?: string; + /** @description The ID of the image that this container was created from */ + ImageID?: string; + /** @description Command to run when starting the container */ + Command?: string; + /** + * Format: int64 + * @description When the container was created + */ + Created?: number; + /** @description The ports exposed by this container */ + Ports?: definitions["Port"][]; + /** + * Format: int64 + * @description The size of files that have been created or changed by this container + */ + SizeRw?: number; + /** + * Format: int64 + * @description The total size of all the files in this container + */ + SizeRootFs?: number; + /** @description User-defined key/value metadata. */ + Labels?: { [key: string]: string }; + /** @description The state of this container (e.g. `Exited`) */ + State?: string; + /** @description Additional human-readable status of this container (e.g. `Exit 0`) */ + Status?: string; + HostConfig?: { + NetworkMode?: string; + }; + /** @description A summary of the container's network settings */ + NetworkSettings?: { + Networks?: { [key: string]: definitions["EndpointSettings"] }; + }; + Mounts?: definitions["MountPoint"][]; + }; + /** @description Driver represents a driver (network, logging, secrets). */ + Driver: { + /** + * @description Name of the driver. + * @example some-driver + */ + Name: string; + /** + * @description Key/value map of driver-specific options. + * @example { + * "OptionA": "value for driver-specific option A", + * "OptionB": "value for driver-specific option B" + * } + */ + Options?: { [key: string]: string }; + }; + SecretSpec: { + /** @description User-defined name of the secret. */ + Name?: string; + /** + * @description User-defined key/value metadata. + * @example { + * "com.example.some-label": "some-value", + * "com.example.some-other-label": "some-other-value" + * } + */ + Labels?: { [key: string]: string }; + /** + * @description Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) + * data to store as secret. + * + * This field is only used to _create_ a secret, and is not returned by + * other endpoints. + * + * @example + */ + Data?: string; + /** + * @description Name of the secrets driver used to fetch the secret's value from an + * external secret store. + */ + Driver?: definitions["Driver"]; + /** + * @description Templating driver, if applicable + * + * Templating controls whether and how to evaluate the config payload as + * a template. If no driver is set, no templating is used. + */ + Templating?: definitions["Driver"]; + }; + Secret: { + /** @example blt1owaxmitz71s9v5zh81zun */ + ID?: string; + Version?: definitions["ObjectVersion"]; + /** + * Format: dateTime + * @example 2017-07-20T13:55:28.678958722Z + */ + CreatedAt?: string; + /** + * Format: dateTime + * @example 2017-07-20T13:55:28.678958722Z + */ + UpdatedAt?: string; + Spec?: definitions["SecretSpec"]; + }; + ConfigSpec: { + /** @description User-defined name of the config. */ + Name?: string; + /** @description User-defined key/value metadata. */ + Labels?: { [key: string]: string }; + /** + * @description Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) + * config data. + */ + Data?: string; + /** + * @description Templating driver, if applicable + * + * Templating controls whether and how to evaluate the config payload as + * a template. If no driver is set, no templating is used. + */ + Templating?: definitions["Driver"]; + }; + Config: { + ID?: string; + Version?: definitions["ObjectVersion"]; + /** Format: dateTime */ + CreatedAt?: string; + /** Format: dateTime */ + UpdatedAt?: string; + Spec?: definitions["ConfigSpec"]; + }; + /** + * @description ContainerState stores container's running state. It's part of ContainerJSONBase + * and will be returned by the "inspect" command. + */ + ContainerState: { + /** + * @description String representation of the container state. Can be one of "created", + * "running", "paused", "restarting", "removing", "exited", or "dead". + * + * @example running + * @enum {string} + */ + Status?: + | "created" + | "running" + | "paused" + | "restarting" + | "removing" + | "exited" + | "dead"; + /** + * @description Whether this container is running. + * + * Note that a running container can be _paused_. The `Running` and `Paused` + * booleans are not mutually exclusive: + * + * When pausing a container (on Linux), the freezer cgroup is used to suspend + * all processes in the container. Freezing the process requires the process to + * be running. As a result, paused containers are both `Running` _and_ `Paused`. + * + * Use the `Status` field instead to determine if a container's state is "running". + * + * @example true + */ + Running?: boolean; + /** + * @description Whether this container is paused. + * @example false + */ + Paused?: boolean; + /** + * @description Whether this container is restarting. + * @example false + */ + Restarting?: boolean; + /** + * @description Whether a process within this container has been killed because it ran + * out of memory since the container was last started. + * + * @example false + */ + OOMKilled?: boolean; + /** @example false */ + Dead?: boolean; + /** + * @description The process ID of this container + * @example 1234 + */ + Pid?: number; + /** + * @description The last exit code of this container + * @example 0 + */ + ExitCode?: number; + Error?: string; + /** + * @description The time when this container was last started. + * @example 2020-01-06T09:06:59.461876391Z + */ + StartedAt?: string; + /** + * @description The time when this container last exited. + * @example 2020-01-06T09:07:59.461876391Z + */ + FinishedAt?: string; + Health?: definitions["Health"]; + }; + /** + * ContainerCreateResponse + * @description OK response to ContainerCreate operation + */ + ContainerCreateResponse: { + /** + * @description The ID of the created container + * @example ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743 + */ + Id: string; + /** + * @description Warnings encountered when creating the container + * @example [] + */ + Warnings: string[]; + }; + /** + * ContainerWaitResponse + * @description OK response to ContainerWait operation + */ + ContainerWaitResponse: { + /** + * Format: int64 + * @description Exit code of the container + */ + StatusCode: number; + Error?: definitions["ContainerWaitExitError"]; + }; + /** @description container waiting error, if any */ + ContainerWaitExitError: { + /** @description Details of an error */ + Message?: string; + }; + /** @description Response of Engine API: GET "/version" */ + SystemVersion: { + Platform?: { + Name: string; + }; + /** @description Information about system components */ + Components?: { + /** + * @description Name of the component + * + * @example Engine + */ + Name: string; + /** + * @description Version of the component + * + * @example 19.03.12 + */ + Version: string; + /** + * @description Key/value pairs of strings with additional information about the + * component. These values are intended for informational purposes + * only, and their content is not defined, and not part of the API + * specification. + * + * These messages can be printed by the client as information to the user. + */ + Details?: { [key: string]: unknown }; + }[]; + /** + * @description The version of the daemon + * @example 19.03.12 + */ + Version?: string; + /** + * @description The default (and highest) API version that is supported by the daemon + * + * @example 1.40 + */ + ApiVersion?: string; + /** + * @description The minimum API version that is supported by the daemon + * + * @example 1.12 + */ + MinAPIVersion?: string; + /** + * @description The Git commit of the source code that was used to build the daemon + * + * @example 48a66213fe + */ + GitCommit?: string; + /** + * @description The version Go used to compile the daemon, and the version of the Go + * runtime in use. + * + * @example go1.13.14 + */ + GoVersion?: string; + /** + * @description The operating system that the daemon is running on ("linux" or "windows") + * + * @example linux + */ + Os?: string; + /** + * @description The architecture that the daemon is running on + * + * @example amd64 + */ + Arch?: string; + /** + * @description The kernel version (`uname -r`) that the daemon is running on. + * + * This field is omitted when empty. + * + * @example 4.19.76-linuxkit + */ + KernelVersion?: string; + /** + * @description Indicates if the daemon is started with experimental features enabled. + * + * This field is omitted when empty / false. + * + * @example true + */ + Experimental?: boolean; + /** + * @description The date and time that the daemon was compiled. + * + * @example 2020-06-22T15:49:27.000000000+00:00 + */ + BuildTime?: string; + }; + SystemInfo: { + /** + * @description Unique identifier of the daemon. + * + *


+ * + * > **Note**: The format of the ID itself is not part of the API, and + * > should not be considered stable. + * + * @example 7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS + */ + ID?: string; + /** + * @description Total number of containers on the host. + * @example 14 + */ + Containers?: number; + /** + * @description Number of containers with status `"running"`. + * + * @example 3 + */ + ContainersRunning?: number; + /** + * @description Number of containers with status `"paused"`. + * + * @example 1 + */ + ContainersPaused?: number; + /** + * @description Number of containers with status `"stopped"`. + * + * @example 10 + */ + ContainersStopped?: number; + /** + * @description Total number of images on the host. + * + * Both _tagged_ and _untagged_ (dangling) images are counted. + * + * @example 508 + */ + Images?: number; + /** + * @description Name of the storage driver in use. + * @example overlay2 + */ + Driver?: string; + /** + * @description Information specific to the storage driver, provided as + * "label" / "value" pairs. + * + * This information is provided by the storage driver, and formatted + * in a way consistent with the output of `docker info` on the command + * line. + * + *


+ * + * > **Note**: The information returned in this field, including the + * > formatting of values and labels, should not be considered stable, + * > and may change without notice. + * + * @example [ + * [ + * "Backing Filesystem", + * "extfs" + * ], + * [ + * "Supports d_type", + * "true" + * ], + * [ + * "Native Overlay Diff", + * "true" + * ] + * ] + */ + DriverStatus?: string[][]; + /** + * @description Root directory of persistent Docker state. + * + * Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` + * on Windows. + * + * @example /var/lib/docker + */ + DockerRootDir?: string; + Plugins?: definitions["PluginsInfo"]; + /** + * @description Indicates if the host has memory limit support enabled. + * @example true + */ + MemoryLimit?: boolean; + /** + * @description Indicates if the host has memory swap limit support enabled. + * @example true + */ + SwapLimit?: boolean; + /** + * @description Indicates if the host has kernel memory TCP limit support enabled. This + * field is omitted if not supported. + * + * Kernel memory TCP limits are not supported when using cgroups v2, which + * does not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup. + * + * @example true + */ + KernelMemoryTCP?: boolean; + /** + * @description Indicates if CPU CFS(Completely Fair Scheduler) period is supported by + * the host. + * + * @example true + */ + CpuCfsPeriod?: boolean; + /** + * @description Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by + * the host. + * + * @example true + */ + CpuCfsQuota?: boolean; + /** + * @description Indicates if CPU Shares limiting is supported by the host. + * + * @example true + */ + CPUShares?: boolean; + /** + * @description Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. + * + * See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) + * + * @example true + */ + CPUSet?: boolean; + /** + * @description Indicates if the host kernel has PID limit support enabled. + * @example true + */ + PidsLimit?: boolean; + /** @description Indicates if OOM killer disable is supported on the host. */ + OomKillDisable?: boolean; + /** + * @description Indicates IPv4 forwarding is enabled. + * @example true + */ + IPv4Forwarding?: boolean; + /** + * @description Indicates if `bridge-nf-call-iptables` is available on the host. + * @example true + */ + BridgeNfIptables?: boolean; + /** + * @description Indicates if `bridge-nf-call-ip6tables` is available on the host. + * @example true + */ + BridgeNfIp6tables?: boolean; + /** + * @description Indicates if the daemon is running in debug-mode / with debug-level + * logging enabled. + * + * @example true + */ + Debug?: boolean; + /** + * @description The total number of file Descriptors in use by the daemon process. + * + * This information is only returned if debug-mode is enabled. + * + * @example 64 + */ + NFd?: number; + /** + * @description The number of goroutines that currently exist. + * + * This information is only returned if debug-mode is enabled. + * + * @example 174 + */ + NGoroutines?: number; + /** + * @description Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + * format with nano-seconds. + * + * @example 2017-08-08T20:28:29.06202363Z + */ + SystemTime?: string; + /** @description The logging driver to use as a default for new containers. */ + LoggingDriver?: string; + /** + * @description The driver to use for managing cgroups. + * + * @default cgroupfs + * @example cgroupfs + * @enum {string} + */ + CgroupDriver?: "cgroupfs" | "systemd" | "none"; + /** + * @description The version of the cgroup. + * + * @default 1 + * @example 1 + * @enum {string} + */ + CgroupVersion?: "1" | "2"; + /** + * @description Number of event listeners subscribed. + * @example 30 + */ + NEventsListener?: number; + /** + * @description Kernel version of the host. + * + * On Linux, this information obtained from `uname`. On Windows this + * information is queried from the HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ + * registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. + * + * @example 4.9.38-moby + */ + KernelVersion?: string; + /** + * @description Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS" + * or "Windows Server 2016 Datacenter" + * + * @example Alpine Linux v3.5 + */ + OperatingSystem?: string; + /** + * @description Version of the host's operating system + * + *


+ * + * > **Note**: The information returned in this field, including its + * > very existence, and the formatting of values, should not be considered + * > stable, and may change without notice. + * + * @example 16.04 + */ + OSVersion?: string; + /** + * @description Generic type of the operating system of the host, as returned by the + * Go runtime (`GOOS`). + * + * Currently returned values are "linux" and "windows". A full list of + * possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). + * + * @example linux + */ + OSType?: string; + /** + * @description Hardware architecture of the host, as returned by the Go runtime + * (`GOARCH`). + * + * A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). + * + * @example x86_64 + */ + Architecture?: string; + /** + * @description The number of logical CPUs usable by the daemon. + * + * The number of available CPUs is checked by querying the operating + * system when the daemon starts. Changes to operating system CPU + * allocation after the daemon is started are not reflected. + * + * @example 4 + */ + NCPU?: number; + /** + * Format: int64 + * @description Total amount of physical memory available on the host, in bytes. + * + * @example 2095882240 + */ + MemTotal?: number; + /** + * @description Address / URL of the index server that is used for image search, + * and as a default for user authentication for Docker Hub and Docker Cloud. + * + * @default https://index.docker.io/v1/ + * @example https://index.docker.io/v1/ + */ + IndexServerAddress?: string; + RegistryConfig?: definitions["RegistryServiceConfig"]; + GenericResources?: definitions["GenericResources"]; + /** + * @description HTTP-proxy configured for the daemon. This value is obtained from the + * [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + * Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + * are masked in the API response. + * + * Containers do not automatically inherit this configuration. + * + * @example http://xxxxx:xxxxx@proxy.corp.example.com:8080 + */ + HttpProxy?: string; + /** + * @description HTTPS-proxy configured for the daemon. This value is obtained from the + * [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + * Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + * are masked in the API response. + * + * Containers do not automatically inherit this configuration. + * + * @example https://xxxxx:xxxxx@proxy.corp.example.com:4443 + */ + HttpsProxy?: string; + /** + * @description Comma-separated list of domain extensions for which no proxy should be + * used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) + * environment variable. + * + * Containers do not automatically inherit this configuration. + * + * @example *.local, 169.254/16 + */ + NoProxy?: string; + /** + * @description Hostname of the host. + * @example node5.corp.example.com + */ + Name?: string; + /** + * @description User-defined labels (key/value metadata) as set on the daemon. + * + *


+ * + * > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, + * > set through the daemon configuration, and _node_ labels, set from a + * > manager node in the Swarm. Node labels are not included in this + * > field. Node labels can be retrieved using the `/nodes/(id)` endpoint + * > on a manager node in the Swarm. + * + * @example [ + * "storage=ssd", + * "production" + * ] + */ + Labels?: string[]; + /** + * @description Indicates if experimental features are enabled on the daemon. + * + * @example true + */ + ExperimentalBuild?: boolean; + /** + * @description Version string of the daemon. + * + * @example 24.0.2 + */ + ServerVersion?: string; + /** + * @description List of [OCI compliant](https://github.com/opencontainers/runtime-spec) + * runtimes configured on the daemon. Keys hold the "name" used to + * reference the runtime. + * + * The Docker daemon relies on an OCI compliant runtime (invoked via the + * `containerd` daemon) as its interface to the Linux kernel namespaces, + * cgroups, and SELinux. + * + * The default runtime is `runc`, and automatically configured. Additional + * runtimes can be configured by the user and will be listed here. + * + * @default { + * "runc": { + * "path": "runc" + * } + * } + * @example { + * "runc": { + * "path": "runc" + * }, + * "runc-master": { + * "path": "/go/bin/runc" + * }, + * "custom": { + * "path": "/usr/local/bin/my-oci-runtime", + * "runtimeArgs": [ + * "--debug", + * "--systemd-cgroup=false" + * ] + * } + * } + */ + Runtimes?: { [key: string]: definitions["Runtime"] }; + /** + * @description Name of the default OCI runtime that is used when starting containers. + * + * The default can be overridden per-container at create time. + * + * @default runc + * @example runc + */ + DefaultRuntime?: string; + Swarm?: definitions["SwarmInfo"]; + /** + * @description Indicates if live restore is enabled. + * + * If enabled, containers are kept running when the daemon is shutdown + * or upon daemon start if running containers are detected. + * + * @default false + * @example false + */ + LiveRestoreEnabled?: boolean; + /** + * @description Represents the isolation technology to use as a default for containers. + * The supported values are platform-specific. + * + * If no isolation value is specified on daemon start, on Windows client, + * the default is `hyperv`, and on Windows server, the default is `process`. + * + * This option is currently not used on other platforms. + * + * @default default + * @enum {string} + */ + Isolation?: "default" | "hyperv" | "process"; + /** + * @description Name and, optional, path of the `docker-init` binary. + * + * If the path is omitted, the daemon searches the host's `$PATH` for the + * binary and uses the first result. + * + * @example docker-init + */ + InitBinary?: string; + ContainerdCommit?: definitions["Commit"]; + RuncCommit?: definitions["Commit"]; + InitCommit?: definitions["Commit"]; + /** + * @description List of security features that are enabled on the daemon, such as + * apparmor, seccomp, SELinux, user-namespaces (userns), rootless and + * no-new-privileges. + * + * Additional configuration options for each security feature may + * be present, and are included as a comma-separated list of key/value + * pairs. + * + * @example [ + * "name=apparmor", + * "name=seccomp,profile=default", + * "name=selinux", + * "name=userns", + * "name=rootless" + * ] + */ + SecurityOptions?: string[]; + /** + * @description Reports a summary of the product license on the daemon. + * + * If a commercial license has been applied to the daemon, information + * such as number of nodes, and expiration are included. + * + * @example Community Engine + */ + ProductLicense?: string; + /** + * @description List of custom default address pools for local networks, which can be + * specified in the daemon.json file or dockerd option. + * + * Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 + * 10.10.[0-255].0/24 address pools. + */ + DefaultAddressPools?: { + /** + * @description The network address in CIDR format + * @example 10.10.0.0/16 + */ + Base?: string; + /** + * @description The network pool size + * @example 24 + */ + Size?: number; + }[]; + /** + * @description List of warnings / informational messages about missing features, or + * issues related to the daemon configuration. + * + * These messages can be printed by the client as information to the user. + * + * @example [ + * "WARNING: No memory limit support", + * "WARNING: bridge-nf-call-iptables is disabled", + * "WARNING: bridge-nf-call-ip6tables is disabled" + * ] + */ + Warnings?: string[]; + }; + /** + * @description Available plugins per type. + * + *


+ * + * > **Note**: Only unmanaged (V1) plugins are included in this list. + * > V1 plugins are "lazily" loaded, and are not returned in this list + * > if there is no resource using the plugin. + */ + PluginsInfo: { + /** + * @description Names of available volume-drivers, and network-driver plugins. + * @example [ + * "local" + * ] + */ + Volume?: string[]; + /** + * @description Names of available network-drivers, and network-driver plugins. + * @example [ + * "bridge", + * "host", + * "ipvlan", + * "macvlan", + * "null", + * "overlay" + * ] + */ + Network?: string[]; + /** + * @description Names of available authorization plugins. + * @example [ + * "img-authz-plugin", + * "hbm" + * ] + */ + Authorization?: string[]; + /** + * @description Names of available logging-drivers, and logging-driver plugins. + * @example [ + * "awslogs", + * "fluentd", + * "gcplogs", + * "gelf", + * "journald", + * "json-file", + * "logentries", + * "splunk", + * "syslog" + * ] + */ + Log?: string[]; + }; + /** @description RegistryServiceConfig stores daemon registry services configuration. */ + RegistryServiceConfig: { + /** + * @description List of IP ranges to which nondistributable artifacts can be pushed, + * using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). + * + * Some images (for example, Windows base images) contain artifacts + * whose distribution is restricted by license. When these images are + * pushed to a registry, restricted artifacts are not included. + * + * This configuration override this behavior, and enables the daemon to + * push nondistributable artifacts to all registries whose resolved IP + * address is within the subnet described by the CIDR syntax. + * + * This option is useful when pushing images containing + * nondistributable artifacts to a registry on an air-gapped network so + * hosts on that network can pull the images without connecting to + * another server. + * + * > **Warning**: Nondistributable artifacts typically have restrictions + * > on how and where they can be distributed and shared. Only use this + * > feature to push artifacts to private registries and ensure that you + * > are in compliance with any terms that cover redistributing + * > nondistributable artifacts. + * + * @example [ + * "::1/128", + * "127.0.0.0/8" + * ] + */ + AllowNondistributableArtifactsCIDRs?: string[]; + /** + * @description List of registry hostnames to which nondistributable artifacts can be + * pushed, using the format `[:]` or `[:]`. + * + * Some images (for example, Windows base images) contain artifacts + * whose distribution is restricted by license. When these images are + * pushed to a registry, restricted artifacts are not included. + * + * This configuration override this behavior for the specified + * registries. + * + * This option is useful when pushing images containing + * nondistributable artifacts to a registry on an air-gapped network so + * hosts on that network can pull the images without connecting to + * another server. + * + * > **Warning**: Nondistributable artifacts typically have restrictions + * > on how and where they can be distributed and shared. Only use this + * > feature to push artifacts to private registries and ensure that you + * > are in compliance with any terms that cover redistributing + * > nondistributable artifacts. + * + * @example [ + * "registry.internal.corp.example.com:3000", + * "[2001:db8:a0b:12f0::1]:443" + * ] + */ + AllowNondistributableArtifactsHostnames?: string[]; + /** + * @description List of IP ranges of insecure registries, using the CIDR syntax + * ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries + * accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates + * from unknown CAs) communication. + * + * By default, local registries (`127.0.0.0/8`) are configured as + * insecure. All other registries are secure. Communicating with an + * insecure registry is not possible if the daemon assumes that registry + * is secure. + * + * This configuration override this behavior, insecure communication with + * registries whose resolved IP address is within the subnet described by + * the CIDR syntax. + * + * Registries can also be marked insecure by hostname. Those registries + * are listed under `IndexConfigs` and have their `Secure` field set to + * `false`. + * + * > **Warning**: Using this option can be useful when running a local + * > registry, but introduces security vulnerabilities. This option + * > should therefore ONLY be used for testing purposes. For increased + * > security, users should add their CA to their system's list of trusted + * > CAs instead of enabling this option. + * + * @example [ + * "::1/128", + * "127.0.0.0/8" + * ] + */ + InsecureRegistryCIDRs?: string[]; + /** + * @example { + * "127.0.0.1:5000": { + * "Name": "127.0.0.1:5000", + * "Mirrors": [], + * "Secure": false, + * "Official": false + * }, + * "[2001:db8:a0b:12f0::1]:80": { + * "Name": "[2001:db8:a0b:12f0::1]:80", + * "Mirrors": [], + * "Secure": false, + * "Official": false + * }, + * "docker.io": { + * "Name": "docker.io", + * "Mirrors": [ + * "https://hub-mirror.corp.example.com:5000/" + * ], + * "Secure": true, + * "Official": true + * }, + * "registry.internal.corp.example.com:3000": { + * "Name": "registry.internal.corp.example.com:3000", + * "Mirrors": [], + * "Secure": false, + * "Official": false + * } + * } + */ + IndexConfigs?: { [key: string]: definitions["IndexInfo"] }; + /** + * @description List of registry URLs that act as a mirror for the official + * (`docker.io`) registry. + * + * @example [ + * "https://hub-mirror.corp.example.com:5000/", + * "https://[2001:db8:a0b:12f0::1]/" + * ] + */ + Mirrors?: string[]; + }; + /** @description IndexInfo contains information about a registry. */ + IndexInfo: { + /** + * @description Name of the registry, such as "docker.io". + * + * @example docker.io + */ + Name?: string; + /** + * @description List of mirrors, expressed as URIs. + * + * @example [ + * "https://hub-mirror.corp.example.com:5000/", + * "https://registry-2.docker.io/", + * "https://registry-3.docker.io/" + * ] + */ + Mirrors?: string[]; + /** + * @description Indicates if the registry is part of the list of insecure + * registries. + * + * If `false`, the registry is insecure. Insecure registries accept + * un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from + * unknown CAs) communication. + * + * > **Warning**: Insecure registries can be useful when running a local + * > registry. However, because its use creates security vulnerabilities + * > it should ONLY be enabled for testing purposes. For increased + * > security, users should add their CA to their system's list of + * > trusted CAs instead of enabling this option. + * + * @example true + */ + Secure?: boolean; + /** + * @description Indicates whether this is an official registry (i.e., Docker Hub / docker.io) + * + * @example true + */ + Official?: boolean; + }; + /** + * @description Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) + * runtime. + * + * The runtime is invoked by the daemon via the `containerd` daemon. OCI + * runtimes act as an interface to the Linux kernel namespaces, cgroups, + * and SELinux. + */ + Runtime: { + /** + * @description Name and, optional, path, of the OCI executable binary. + * + * If the path is omitted, the daemon searches the host's `$PATH` for the + * binary and uses the first result. + * + * @example /usr/local/bin/my-oci-runtime + */ + path?: string; + /** + * @description List of command-line arguments to pass to the runtime when invoked. + * + * @example [ + * "--debug", + * "--systemd-cgroup=false" + * ] + */ + runtimeArgs?: string[]; + }; + /** + * @description Commit holds the Git-commit (SHA1) that a binary was built from, as + * reported in the version-string of external tools, such as `containerd`, + * or `runC`. + */ + Commit: { + /** + * @description Actual commit ID of external tool. + * @example cfb82a876ecc11b5ca0977d1733adbe58599088a + */ + ID?: string; + /** + * @description Commit ID of external tool expected by dockerd as set at build time. + * + * @example 2d41c047c83e09a6d61d464906feb2a2f3c52aa4 + */ + Expected?: string; + }; + /** @description Represents generic information about swarm. */ + SwarmInfo: { + /** + * @description Unique identifier of for this node in the swarm. + * @default + * @example k67qz4598weg5unwwffg6z1m1 + */ + NodeID?: string; + /** + * @description IP address at which this node can be reached by other nodes in the + * swarm. + * + * @default + * @example 10.0.0.46 + */ + NodeAddr?: string; + LocalNodeState?: definitions["LocalNodeState"]; + /** + * @default false + * @example true + */ + ControlAvailable?: boolean; + /** @default */ + Error?: string; + /** + * @description List of ID's and addresses of other managers in the swarm. + * + * @default null + * @example [ + * { + * "NodeID": "71izy0goik036k48jg985xnds", + * "Addr": "10.0.0.158:2377" + * }, + * { + * "NodeID": "79y6h1o4gv8n120drcprv5nmc", + * "Addr": "10.0.0.159:2377" + * }, + * { + * "NodeID": "k67qz4598weg5unwwffg6z1m1", + * "Addr": "10.0.0.46:2377" + * } + * ] + */ + RemoteManagers?: definitions["PeerNode"][]; + /** + * @description Total number of nodes in the swarm. + * @example 4 + */ + Nodes?: number; + /** + * @description Total number of managers in the swarm. + * @example 3 + */ + Managers?: number; + Cluster?: definitions["ClusterInfo"]; + }; + /** + * @description Current local status of this node. + * @default + * @example active + * @enum {string} + */ + LocalNodeState: "" | "inactive" | "pending" | "active" | "error" | "locked"; + /** @description Represents a peer-node in the swarm */ + PeerNode: { + /** @description Unique identifier of for this node in the swarm. */ + NodeID?: string; + /** @description IP address and ports at which this node can be reached. */ + Addr?: string; + }; + /** @description Specifies how a service should be attached to a particular network. */ + NetworkAttachmentConfig: { + /** @description The target network for attachment. Must be a network name or ID. */ + Target?: string; + /** @description Discoverable alternate names for the service on this network. */ + Aliases?: string[]; + /** @description Driver attachment options for the network target. */ + DriverOpts?: { [key: string]: string }; + }; + /** + * @description Actor describes something that generates events, like a container, network, + * or a volume. + */ + EventActor: { + /** + * @description The ID of the object emitting the event + * @example ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743 + */ + ID?: string; + /** + * @description Various key/value attributes of the object, depending on its type. + * + * @example { + * "com.example.some-label": "some-label-value", + * "image": "alpine:latest", + * "name": "my-container" + * } + */ + Attributes?: { [key: string]: string }; + }; + /** + * SystemEventsResponse + * @description EventMessage represents the information an event contains. + */ + EventMessage: { + /** + * @description The type of object emitting the event + * @example container + * @enum {string} + */ + Type?: + | "builder" + | "config" + | "container" + | "daemon" + | "image" + | "network" + | "node" + | "plugin" + | "secret" + | "service" + | "volume"; + /** + * @description The type of event + * @example create + */ + Action?: string; + Actor?: definitions["EventActor"]; + /** + * @description Scope of the event. Engine events are `local` scope. Cluster (Swarm) + * events are `swarm` scope. + * + * @enum {string} + */ + scope?: "local" | "swarm"; + /** + * Format: int64 + * @description Timestamp of event + * @example 1629574695 + */ + time?: number; + /** + * Format: int64 + * @description Timestamp of event, with nanosecond accuracy + * @example 1629574695515050000 + */ + timeNano?: number; + }; + /** + * @description A descriptor struct containing digest, media type, and size, as defined in + * the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). + */ + OCIDescriptor: { + /** + * @description The media type of the object this schema refers to. + * + * @example application/vnd.docker.distribution.manifest.v2+json + */ + mediaType?: string; + /** + * @description The digest of the targeted content. + * + * @example sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96 + */ + digest?: string; + /** + * Format: int64 + * @description The size in bytes of the blob. + * + * @example 3987495 + */ + size?: number; + }; + /** + * @description Describes the platform which the image in the manifest runs on, as defined + * in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). + */ + OCIPlatform: { + /** + * @description The CPU architecture, for example `amd64` or `ppc64`. + * + * @example arm + */ + architecture?: string; + /** + * @description The operating system, for example `linux` or `windows`. + * + * @example windows + */ + os?: string; + /** + * @description Optional field specifying the operating system version, for example on + * Windows `10.0.19041.1165`. + * + * @example 10.0.19041.1165 + */ + "os.version"?: string; + /** + * @description Optional field specifying an array of strings, each listing a required + * OS feature (for example on Windows `win32k`). + * + * @example [ + * "win32k" + * ] + */ + "os.features"?: string[]; + /** + * @description Optional field specifying a variant of the CPU, for example `v7` to + * specify ARMv7 when architecture is `arm`. + * + * @example v7 + */ + variant?: string; + }; + /** + * DistributionInspectResponse + * @description Describes the result obtained from contacting the registry to retrieve + * image metadata. + */ + DistributionInspect: { + Descriptor: definitions["OCIDescriptor"]; + /** @description An array containing all platforms supported by the image. */ + Platforms: definitions["OCIPlatform"][]; + }; + /** + * @description Options and information specific to, and only present on, Swarm CSI + * cluster volumes. + */ + ClusterVolume: { + /** + * @description The Swarm ID of this volume. Because cluster volumes are Swarm + * objects, they have an ID, unlike non-cluster volumes. This ID can + * be used to refer to the Volume instead of the name. + */ + ID?: string; + Version?: definitions["ObjectVersion"]; + /** Format: dateTime */ + CreatedAt?: string; + /** Format: dateTime */ + UpdatedAt?: string; + Spec?: definitions["ClusterVolumeSpec"]; + /** @description Information about the global status of the volume. */ + Info?: { + /** + * Format: int64 + * @description The capacity of the volume in bytes. A value of 0 indicates that + * the capacity is unknown. + */ + CapacityBytes?: number; + /** + * @description A map of strings to strings returned from the storage plugin when + * the volume is created. + */ + VolumeContext?: { [key: string]: string }; + /** + * @description The ID of the volume as returned by the CSI storage plugin. This + * is distinct from the volume's ID as provided by Docker. This ID + * is never used by the user when communicating with Docker to refer + * to this volume. If the ID is blank, then the Volume has not been + * successfully created in the plugin yet. + */ + VolumeID?: string; + /** @description The topology this volume is actually accessible from. */ + AccessibleTopology?: definitions["Topology"][]; + }; + /** + * @description The status of the volume as it pertains to its publishing and use on + * specific nodes + */ + PublishStatus?: { + /** @description The ID of the Swarm node the volume is published on. */ + NodeID?: string; + /** + * @description The published state of the volume. + * * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. + * * `published` The volume is published successfully to the node. + * * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. + * * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. + * + * @enum {string} + */ + State?: + | "pending-publish" + | "published" + | "pending-node-unpublish" + | "pending-controller-unpublish"; + /** + * @description A map of strings to strings returned by the CSI controller + * plugin when a volume is published. + */ + PublishContext?: { [key: string]: string }; + }[]; + }; + /** @description Cluster-specific options used to create the volume. */ + ClusterVolumeSpec: { + /** + * @description Group defines the volume group of this volume. Volumes belonging to + * the same group can be referred to by group name when creating + * Services. Referring to a volume by group instructs Swarm to treat + * volumes in that group interchangeably for the purpose of scheduling. + * Volumes with an empty string for a group technically all belong to + * the same, emptystring group. + */ + Group?: string; + /** @description Defines how the volume is used by tasks. */ + AccessMode?: { + /** + * @description The set of nodes this volume can be used on at one time. + * - `single` The volume may only be scheduled to one node at a time. + * - `multi` the volume may be scheduled to any supported number of nodes at a time. + * + * @default single + * @enum {string} + */ + Scope?: "single" | "multi"; + /** + * @description The number and way that different tasks can use this volume + * at one time. + * - `none` The volume may only be used by one task at a time. + * - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly + * - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. + * - `all` The volume may have any number of readers and writers. + * + * @default none + * @enum {string} + */ + Sharing?: "none" | "readonly" | "onewriter" | "all"; + /** + * @description Options for using this volume as a Mount-type volume. + * + * Either MountVolume or BlockVolume, but not both, must be + * present. + * properties: + * FsType: + * type: "string" + * description: | + * Specifies the filesystem type for the mount volume. + * Optional. + * MountFlags: + * type: "array" + * description: | + * Flags to pass when mounting the volume. Optional. + * items: + * type: "string" + * BlockVolume: + * type: "object" + * description: | + * Options for using this volume as a Block-type volume. + * Intentionally empty. + */ + MountVolume?: { [key: string]: unknown }; + /** + * @description Swarm Secrets that are passed to the CSI storage plugin when + * operating on this volume. + */ + Secrets?: { + /** + * @description Key is the name of the key of the key-value pair passed to + * the plugin. + */ + Key?: string; + /** + * @description Secret is the swarm Secret object from which to read data. + * This can be a Secret name or ID. The Secret data is + * retrieved by swarm and used as the value of the key-value + * pair passed to the plugin. + */ + Secret?: string; + }[]; + /** + * @description Requirements for the accessible topology of the volume. These + * fields are optional. For an in-depth description of what these + * fields mean, see the CSI specification. + */ + AccessibilityRequirements?: { + /** + * @description A list of required topologies, at least one of which the + * volume must be accessible from. + */ + Requisite?: definitions["Topology"][]; + /** + * @description A list of topologies that the volume should attempt to be + * provisioned in. + */ + Preferred?: definitions["Topology"][]; + }; + /** + * @description The desired capacity that the volume should be created with. If + * empty, the plugin will decide the capacity. + */ + CapacityRange?: { + /** + * Format: int64 + * @description The volume must be at least this big. The value of 0 + * indicates an unspecified minimum + */ + RequiredBytes?: number; + /** + * Format: int64 + * @description The volume must not be bigger than this. The value of 0 + * indicates an unspecified maximum. + */ + LimitBytes?: number; + }; + /** + * @description The availability of the volume for use in tasks. + * - `active` The volume is fully available for scheduling on the cluster + * - `pause` No new workloads should use the volume, but existing workloads are not stopped. + * - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. + * + * @default active + * @enum {string} + */ + Availability?: "active" | "pause" | "drain"; + }; + }; + /** + * @description A map of topological domains to topological segments. For in depth + * details, see documentation for the Topology object in the CSI + * specification. + */ + Topology: { [key: string]: string }; +} + +export interface operations { + /** + * Returns a list of containers. For details on the format, see the + * [inspect endpoint](#operation/ContainerInspect). + * + * Note that it uses a different, smaller representation of a container + * than inspecting a single container. For example, the list of linked + * containers is not propagated . + */ + ContainerList: { + parameters: { + query: { + /** Return all containers. By default, only running containers are shown. */ + all?: boolean; + /** + * Return this number of most recently created containers, including + * non-running ones. + */ + limit?: number; + /** Return the size of container as fields `SizeRw` and `SizeRootFs`. */ + size?: boolean; + /** + * Filters to process on the container list, encoded as JSON (a + * `map[string][]string`). For example, `{"status": ["paused"]}` will + * only return paused containers. + * + * Available filters: + * + * - `ancestor`=(`[:]`, ``, or ``) + * - `before`=(`` or ``) + * - `expose`=(`[/]`|`/[]`) + * - `exited=` containers with exit code of `` + * - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + * - `id=` a container's ID + * - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + * - `is-task=`(`true`|`false`) + * - `label=key` or `label="key=value"` of a container label + * - `name=` a container's name + * - `network`=(`` or ``) + * - `publish`=(`[/]`|`/[]`) + * - `since`=(`` or ``) + * - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + * - `volume`=(`` or ``) + */ + filters?: string; + }; + }; + responses: { + /** no error */ + 200: { + schema: definitions["ContainerSummary"][]; + }; + /** bad parameter */ + 400: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + ContainerCreate: { + parameters: { + query: { + /** + * Assign the specified name to the container. Must match + * `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. + */ + name?: string; + /** + * Platform in the format `os[/arch[/variant]]` used for image lookup. + * + * When specified, the daemon checks if the requested image is present + * in the local image cache with the given OS and Architecture, and + * otherwise returns a `404` status. + * + * If the option is not set, the host's native OS and Architecture are + * used to look up the image in the image cache. However, if no platform + * is passed and the given image does exist in the local image cache, + * but its OS or architecture does not match, the container is created + * with the available image, and a warning is added to the `Warnings` + * field in the response, for example; + * + * WARNING: The requested image's platform (linux/arm64/v8) does not + * match the detected host platform (linux/amd64) and no + * specific platform was requested + */ + platform?: string; + }; + body: { + /** Container to create */ + body: definitions["ContainerConfig"] & { + HostConfig?: definitions["HostConfig"]; + NetworkingConfig?: definitions["NetworkingConfig"]; + }; + }; + }; + responses: { + /** Container created successfully */ + 201: { + schema: definitions["ContainerCreateResponse"]; + }; + /** bad parameter */ + 400: { + schema: definitions["ErrorResponse"]; + }; + /** no such image */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** conflict */ + 409: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** Return low-level information about a container. */ + ContainerInspect: { + parameters: { + path: { + /** ID or name of the container */ + id: string; + }; + query: { + /** Return the size of container as fields `SizeRw` and `SizeRootFs` */ + size?: boolean; + }; + }; + responses: { + /** no error */ + 200: { + schema: { + /** @description The ID of the container */ + Id?: string; + /** @description The time the container was created */ + Created?: string; + /** @description The path to the command being run */ + Path?: string; + /** @description The arguments to the command being run */ + Args?: string[]; + State?: definitions["ContainerState"]; + /** @description The container's image ID */ + Image?: string; + ResolvConfPath?: string; + HostnamePath?: string; + HostsPath?: string; + LogPath?: string; + Name?: string; + RestartCount?: number; + Driver?: string; + Platform?: string; + MountLabel?: string; + ProcessLabel?: string; + AppArmorProfile?: string; + /** @description IDs of exec instances that are running in the container. */ + ExecIDs?: string[]; + HostConfig?: definitions["HostConfig"]; + GraphDriver?: definitions["GraphDriverData"]; + /** + * Format: int64 + * @description The size of files that have been created or changed by this + * container. + */ + SizeRw?: number; + /** + * Format: int64 + * @description The total size of all the files in this container. + */ + SizeRootFs?: number; + Mounts?: definitions["MountPoint"][]; + Config?: definitions["ContainerConfig"]; + NetworkSettings?: definitions["NetworkSettings"]; + }; + }; + /** no such container */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * On Unix systems, this is done by running the `ps` command. This endpoint + * is not supported on Windows. + */ + ContainerTop: { + parameters: { + path: { + /** ID or name of the container */ + id: string; + }; + query: { + /** The arguments to pass to `ps`. For example, `aux` */ + ps_args?: string; + }; + }; + responses: { + /** no error */ + 200: { + schema: { + /** @description The ps column titles */ + Titles?: string[]; + /** + * @description Each process running in the container, where each is process + * is an array of values corresponding to the titles. + */ + Processes?: string[][]; + }; + }; + /** no such container */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * Get `stdout` and `stderr` logs from a container. + * + * Note: This endpoint works only for containers with the `json-file` or + * `journald` logging driver. + */ + ContainerLogs: { + parameters: { + path: { + /** ID or name of the container */ + id: string; + }; + query: { + /** Keep connection after returning logs. */ + follow?: boolean; + /** Return logs from `stdout` */ + stdout?: boolean; + /** Return logs from `stderr` */ + stderr?: boolean; + /** Only return logs since this time, as a UNIX timestamp */ + since?: number; + /** Only return logs before this time, as a UNIX timestamp */ + until?: number; + /** Add timestamps to every log line */ + timestamps?: boolean; + /** + * Only return this number of log lines from the end of the logs. + * Specify as an integer or `all` to output all log lines. + */ + tail?: string; + }; + }; + responses: { + /** + * logs returned as a stream in response body. + * For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + * Note that unlike the attach endpoint, the logs endpoint does not + * upgrade the connection and does not set Content-Type. + */ + 200: { + schema: string; + }; + /** no such container */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * Returns which files in a container's filesystem have been added, deleted, + * or modified. The `Kind` of modification can be one of: + * + * - `0`: Modified ("C") + * - `1`: Added ("A") + * - `2`: Deleted ("D") + */ + ContainerChanges: { + parameters: { + path: { + /** ID or name of the container */ + id: string; + }; + }; + responses: { + /** The list of changes */ + 200: { + schema: definitions["FilesystemChange"][]; + }; + /** no such container */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** Export the contents of a container as a tarball. */ + ContainerExport: { + parameters: { + path: { + /** ID or name of the container */ + id: string; + }; + }; + responses: { + /** no error */ + 200: unknown; + /** no such container */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * This endpoint returns a live stream of a container’s resource usage + * statistics. + * + * The `precpu_stats` is the CPU statistic of the *previous* read, and is + * used to calculate the CPU usage percentage. It is not an exact copy + * of the `cpu_stats` field. + * + * If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is + * nil then for compatibility with older daemons the length of the + * corresponding `cpu_usage.percpu_usage` array should be used. + * + * On a cgroup v2 host, the following fields are not set + * * `blkio_stats`: all fields other than `io_service_bytes_recursive` + * * `cpu_stats`: `cpu_usage.percpu_usage` + * * `memory_stats`: `max_usage` and `failcnt` + * Also, `memory_stats.stats` fields are incompatible with cgroup v1. + * + * To calculate the values shown by the `stats` command of the docker cli tool + * the following formulas can be used: + * * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * * available_memory = `memory_stats.limit` + * * Memory usage % = `(used_memory / available_memory) * 100.0` + * * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` + * * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` + * * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` + * * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` + */ + ContainerStats: { + parameters: { + path: { + /** ID or name of the container */ + id: string; + }; + query: { + /** + * Stream the output. If false, the stats will be output once and then + * it will disconnect. + */ + stream?: boolean; + /** + * Only get a single stat instead of waiting for 2 cycles. Must be used + * with `stream=false`. + */ + "one-shot"?: boolean; + }; + }; + responses: { + /** no error */ + 200: { + schema: { [key: string]: unknown }; + }; + /** no such container */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** Resize the TTY for a container. */ + ContainerResize: { + parameters: { + path: { + /** ID or name of the container */ + id: string; + }; + query: { + /** Height of the TTY session in characters */ + h?: number; + /** Width of the TTY session in characters */ + w?: number; + }; + }; + responses: { + /** no error */ + 200: unknown; + /** no such container */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** cannot resize container */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + ContainerStart: { + parameters: { + path: { + /** ID or name of the container */ + id: string; + }; + query: { + /** + * Override the key sequence for detaching a container. Format is a + * single character `[a-Z]` or `ctrl-` where `` is one + * of: `a-z`, `@`, `^`, `[`, `,` or `_`. + */ + detachKeys?: string; + }; + }; + responses: { + /** no error */ + 204: never; + /** container already started */ + 304: never; + /** no such container */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + ContainerStop: { + parameters: { + path: { + /** ID or name of the container */ + id: string; + }; + query: { + /** Signal to send to the container as an integer or string (e.g. `SIGINT`). */ + signal?: string; + /** Number of seconds to wait before killing the container */ + t?: number; + }; + }; + responses: { + /** no error */ + 204: never; + /** container already stopped */ + 304: never; + /** no such container */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + ContainerRestart: { + parameters: { + path: { + /** ID or name of the container */ + id: string; + }; + query: { + /** Signal to send to the container as an integer or string (e.g. `SIGINT`). */ + signal?: string; + /** Number of seconds to wait before killing the container */ + t?: number; + }; + }; + responses: { + /** no error */ + 204: never; + /** no such container */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * Send a POSIX signal to a container, defaulting to killing to the + * container. + */ + ContainerKill: { + parameters: { + path: { + /** ID or name of the container */ + id: string; + }; + query: { + /** Signal to send to the container as an integer or string (e.g. `SIGINT`). */ + signal?: string; + }; + }; + responses: { + /** no error */ + 204: never; + /** no such container */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** container is not running */ + 409: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * Change various configuration options of a container without having to + * recreate it. + */ + ContainerUpdate: { + parameters: { + path: { + /** ID or name of the container */ + id: string; + }; + body: { + update: definitions["Resources"] & { + RestartPolicy?: definitions["RestartPolicy"]; + }; + }; + }; + responses: { + /** The container has been updated. */ + 200: { + schema: { + Warnings?: string[]; + }; + }; + /** no such container */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + ContainerRename: { + parameters: { + path: { + /** ID or name of the container */ + id: string; + }; + query: { + /** New name for the container */ + name: string; + }; + }; + responses: { + /** no error */ + 204: never; + /** no such container */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** name already in use */ + 409: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * Use the freezer cgroup to suspend all processes in a container. + * + * Traditionally, when suspending a process the `SIGSTOP` signal is used, + * which is observable by the process being suspended. With the freezer + * cgroup the process is unaware, and unable to capture, that it is being + * suspended, and subsequently resumed. + */ + ContainerPause: { + parameters: { + path: { + /** ID or name of the container */ + id: string; + }; + }; + responses: { + /** no error */ + 204: never; + /** no such container */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** Resume a container which has been paused. */ + ContainerUnpause: { + parameters: { + path: { + /** ID or name of the container */ + id: string; + }; + }; + responses: { + /** no error */ + 204: never; + /** no such container */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * Attach to a container to read its output or send it input. You can attach + * to the same container multiple times and you can reattach to containers + * that have been detached. + * + * Either the `stream` or `logs` parameter must be `true` for this endpoint + * to do anything. + * + * See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) + * for more details. + * + * ### Hijacking + * + * This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, + * and `stderr` on the same socket. + * + * This is the response from the daemon for an attach request: + * + * ``` + * HTTP/1.1 200 OK + * Content-Type: application/vnd.docker.raw-stream + * + * [STREAM] + * ``` + * + * After the headers and two new lines, the TCP connection can now be used + * for raw, bidirectional communication between the client and server. + * + * To hint potential proxies about connection hijacking, the Docker client + * can also optionally send connection upgrade headers. + * + * For example, the client sends this request to upgrade the connection: + * + * ``` + * POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + * Upgrade: tcp + * Connection: Upgrade + * ``` + * + * The Docker daemon will respond with a `101 UPGRADED` response, and will + * similarly follow with the raw stream: + * + * ``` + * HTTP/1.1 101 UPGRADED + * Content-Type: application/vnd.docker.raw-stream + * Connection: Upgrade + * Upgrade: tcp + * + * [STREAM] + * ``` + * + * ### Stream format + * + * When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), + * the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream + * and the stream over the hijacked connected is multiplexed to separate out + * `stdout` and `stderr`. The stream consists of a series of frames, each + * containing a header and a payload. + * + * The header contains the information which the stream writes (`stdout` or + * `stderr`). It also contains the size of the associated frame encoded in + * the last four bytes (`uint32`). + * + * It is encoded on the first eight bytes like this: + * + * ```go + * header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + * ``` + * + * `STREAM_TYPE` can be: + * + * - 0: `stdin` (is written on `stdout`) + * - 1: `stdout` + * - 2: `stderr` + * + * `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size + * encoded as big endian. + * + * Following the header is the payload, which is the specified number of + * bytes of `STREAM_TYPE`. + * + * The simplest way to implement this protocol is the following: + * + * 1. Read 8 bytes. + * 2. Choose `stdout` or `stderr` depending on the first byte. + * 3. Extract the frame size from the last four bytes. + * 4. Read the extracted size and output it on the correct output. + * 5. Goto 1. + * + * ### Stream format when using a TTY + * + * When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), + * the stream is not multiplexed. The data exchanged over the hijacked + * connection is simply the raw data from the process PTY and client's + * `stdin`. + */ + ContainerAttach: { + parameters: { + path: { + /** ID or name of the container */ + id: string; + }; + query: { + /** + * Override the key sequence for detaching a container.Format is a single + * character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + * `@`, `^`, `[`, `,` or `_`. + */ + detachKeys?: string; + /** + * Replay previous logs from the container. + * + * This is useful for attaching to a container that has started and you + * want to output everything since the container started. + * + * If `stream` is also enabled, once all the previous output has been + * returned, it will seamlessly transition into streaming current + * output. + */ + logs?: boolean; + /** Stream attached streams from the time the request was made onwards. */ + stream?: boolean; + /** Attach to `stdin` */ + stdin?: boolean; + /** Attach to `stdout` */ + stdout?: boolean; + /** Attach to `stderr` */ + stderr?: boolean; + }; + }; + responses: { + /** no error, hints proxy about hijacking */ + 101: unknown; + /** no error, no upgrade header found */ + 200: unknown; + /** bad parameter */ + 400: { + schema: definitions["ErrorResponse"]; + }; + /** no such container */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + ContainerAttachWebsocket: { + parameters: { + path: { + /** ID or name of the container */ + id: string; + }; + query: { + /** + * Override the key sequence for detaching a container.Format is a single + * character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + * `@`, `^`, `[`, `,`, or `_`. + */ + detachKeys?: string; + /** Return logs */ + logs?: boolean; + /** Return stream */ + stream?: boolean; + /** Attach to `stdin` */ + stdin?: boolean; + /** Attach to `stdout` */ + stdout?: boolean; + /** Attach to `stderr` */ + stderr?: boolean; + }; + }; + responses: { + /** no error, hints proxy about hijacking */ + 101: unknown; + /** no error, no upgrade header found */ + 200: unknown; + /** bad parameter */ + 400: { + schema: definitions["ErrorResponse"]; + }; + /** no such container */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** Block until a container stops, then returns the exit code. */ + ContainerWait: { + parameters: { + path: { + /** ID or name of the container */ + id: string; + }; + query: { + /** + * Wait until a container state reaches the given condition. + * + * Defaults to `not-running` if omitted or empty. + */ + condition?: "not-running" | "next-exit" | "removed"; + }; + }; + responses: { + /** The container has exit. */ + 200: { + schema: definitions["ContainerWaitResponse"]; + }; + /** bad parameter */ + 400: { + schema: definitions["ErrorResponse"]; + }; + /** no such container */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + ContainerDelete: { + parameters: { + path: { + /** ID or name of the container */ + id: string; + }; + query: { + /** Remove anonymous volumes associated with the container. */ + v?: boolean; + /** If the container is running, kill it before removing it. */ + force?: boolean; + /** Remove the specified link associated with the container. */ + link?: boolean; + }; + }; + responses: { + /** no error */ + 204: never; + /** bad parameter */ + 400: { + schema: definitions["ErrorResponse"]; + }; + /** no such container */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** conflict */ + 409: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** Get a tar archive of a resource in the filesystem of container id. */ + ContainerArchive: { + parameters: { + path: { + /** ID or name of the container */ + id: string; + }; + query: { + /** Resource in the container’s filesystem to archive. */ + path: string; + }; + }; + responses: { + /** no error */ + 200: unknown; + /** Bad parameter */ + 400: { + schema: definitions["ErrorResponse"]; + }; + /** Container or path does not exist */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * Upload a tar archive to be extracted to a path in the filesystem of container id. + * `path` parameter is asserted to be a directory. If it exists as a file, 400 error + * will be returned with message "not a directory". + */ + PutContainerArchive: { + parameters: { + path: { + /** ID or name of the container */ + id: string; + }; + query: { + /** Path to a directory in the container to extract the archive’s contents into. */ + path: string; + /** + * If `1`, `true`, or `True` then it will be an error if unpacking the + * given content would cause an existing directory to be replaced with + * a non-directory and vice versa. + */ + noOverwriteDirNonDir?: string; + /** + * If `1`, `true`, then it will copy UID/GID maps to the dest file or + * dir + */ + copyUIDGID?: string; + }; + body: { + /** + * The input stream must be a tar archive compressed with one of the + * following algorithms: `identity` (no compression), `gzip`, `bzip2`, + * or `xz`. + */ + inputStream: string; + }; + }; + responses: { + /** The content was extracted successfully */ + 200: unknown; + /** Bad parameter */ + 400: { + schema: definitions["ErrorResponse"]; + }; + /** Permission denied, the volume or container rootfs is marked as read-only. */ + 403: { + schema: definitions["ErrorResponse"]; + }; + /** No such container or path does not exist inside the container */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * A response header `X-Docker-Container-Path-Stat` is returned, containing + * a base64 - encoded JSON object with some filesystem header information + * about the path. + */ + ContainerArchiveInfo: { + parameters: { + path: { + /** ID or name of the container */ + id: string; + }; + query: { + /** Resource in the container’s filesystem to archive. */ + path: string; + }; + }; + responses: { + /** no error */ + 200: unknown; + /** Bad parameter */ + 400: { + schema: definitions["ErrorResponse"]; + }; + /** Container or path does not exist */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + ContainerPrune: { + parameters: { + query: { + /** + * Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + * + * Available filters: + * - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + * - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels. + */ + filters?: string; + }; + }; + responses: { + /** No error */ + 200: { + schema: { + /** @description Container IDs that were deleted */ + ContainersDeleted?: string[]; + /** + * Format: int64 + * @description Disk space reclaimed in bytes + */ + SpaceReclaimed?: number; + }; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image. */ + ImageList: { + parameters: { + query: { + /** Show all images. Only images from a final layer (no children) are shown by default. */ + all?: boolean; + /** + * A JSON encoded value of the filters (a `map[string][]string`) to + * process on the images list. + * + * Available filters: + * + * - `before`=(`[:]`, `` or ``) + * - `dangling=true` + * - `label=key` or `label="key=value"` of an image label + * - `reference`=(`[:]`) + * - `since`=(`[:]`, `` or ``) + */ + filters?: string; + /** Compute and show shared size as a `SharedSize` field on each image. */ + "shared-size"?: boolean; + /** Show digest information as a `RepoDigests` field on each image. */ + digests?: boolean; + }; + }; + responses: { + /** Summary image data for the images matching the query */ + 200: { + schema: definitions["ImageSummary"][]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * Build an image from a tar archive with a `Dockerfile` in it. + * + * The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + * + * The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + * + * The build is canceled if the client drops the connection by quitting or being killed. + */ + ImageBuild: { + parameters: { + body: { + /** A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz. */ + inputStream?: string; + }; + query: { + /** Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`. */ + dockerfile?: string; + /** A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters. */ + t?: string; + /** Extra hosts to add to /etc/hosts */ + extrahosts?: string; + /** A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball. */ + remote?: string; + /** Suppress verbose build output. */ + q?: boolean; + /** Do not use the cache when building the image. */ + nocache?: boolean; + /** JSON array of images used for build cache resolution. */ + cachefrom?: string; + /** Attempt to pull the image even if an older image exists locally. */ + pull?: string; + /** Remove intermediate containers after a successful build. */ + rm?: boolean; + /** Always remove intermediate containers, even upon failure. */ + forcerm?: boolean; + /** Set memory limit for build. */ + memory?: number; + /** Total memory (memory + swap). Set as `-1` to disable swap. */ + memswap?: number; + /** CPU shares (relative weight). */ + cpushares?: number; + /** CPUs in which to allow execution (e.g., `0-3`, `0,1`). */ + cpusetcpus?: string; + /** The length of a CPU period in microseconds. */ + cpuperiod?: number; + /** Microseconds of CPU time that the container can get in a CPU period. */ + cpuquota?: number; + /** + * JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the buildargs as the environment context for commands run via the `Dockerfile` RUN instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for passing secret values. + * + * For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. + * + * [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) + */ + buildargs?: string; + /** Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. */ + shmsize?: number; + /** Squash the resulting images layers into a single layer. *(Experimental release only.)* */ + squash?: boolean; + /** Arbitrary key/value labels to set on the image, as a JSON map of string pairs. */ + labels?: string; + /** + * Sets the networking mode for the run commands during build. Supported + * standard values are: `bridge`, `host`, `none`, and `container:`. + * Any other value is taken as a custom network's name or ID to which this + * container should connect to. + */ + networkmode?: string; + /** Platform in the format os[/arch[/variant]] */ + platform?: string; + /** Target build stage */ + target?: string; + /** BuildKit output configuration */ + outputs?: string; + }; + header: { + "Content-type"?: "application/x-tar"; + /** + * This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + * + * The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + * + * ``` + * { + * "docker.example.com": { + * "username": "janedoe", + * "password": "hunter2" + * }, + * "https://index.docker.io/v1/": { + * "username": "mobydock", + * "password": "conta1n3rize14" + * } + * } + * ``` + * + * Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + */ + "X-Registry-Config"?: string; + }; + }; + responses: { + /** no error */ + 200: unknown; + /** Bad parameter */ + 400: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + BuildPrune: { + parameters: { + query: { + /** Amount of disk space in bytes to keep for cache */ + "keep-storage"?: number; + /** Remove all types of build cache */ + all?: boolean; + /** + * A JSON encoded value of the filters (a `map[string][]string`) to + * process on the list of build cache objects. + * + * Available filters: + * + * - `until=` remove cache older than ``. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon's local time. + * - `id=` + * - `parent=` + * - `type=` + * - `description=` + * - `inuse` + * - `shared` + * - `private` + */ + filters?: string; + }; + }; + responses: { + /** No error */ + 200: { + schema: { + CachesDeleted?: string[]; + /** + * Format: int64 + * @description Disk space reclaimed in bytes + */ + SpaceReclaimed?: number; + }; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** Create an image by either pulling it from a registry or importing it. */ + ImageCreate: { + parameters: { + query: { + /** Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed. */ + fromImage?: string; + /** Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image. */ + fromSrc?: string; + /** Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image. */ + repo?: string; + /** Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled. */ + tag?: string; + /** Set commit message for imported image. */ + message?: string; + /** + * Apply `Dockerfile` instructions to the image that is created, + * for example: `changes=ENV DEBUG=true`. + * Note that `ENV DEBUG=true` should be URI component encoded. + * + * Supported `Dockerfile` instructions: + * `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + */ + changes?: string[]; + /** + * Platform in the format os[/arch[/variant]]. + * + * When used in combination with the `fromImage` option, the daemon checks + * if the given image is present in the local image cache with the given + * OS and Architecture, and otherwise attempts to pull the image. If the + * option is not set, the host's native OS and Architecture are used. + * If the given image does not exist in the local image cache, the daemon + * attempts to pull the image with the host's native OS and Architecture. + * If the given image does exists in the local image cache, but its OS or + * architecture does not match, a warning is produced. + * + * When used with the `fromSrc` option to import an image from an archive, + * this option sets the platform information for the imported image. If + * the option is not set, the host's native OS and Architecture are used + * for the imported image. + */ + platform?: string; + }; + body: { + /** Image content if the value `-` has been specified in fromSrc query parameter */ + inputImage?: string; + }; + header: { + /** + * A base64url-encoded auth configuration. + * + * Refer to the [authentication section](#section/Authentication) for + * details. + */ + "X-Registry-Auth"?: string; + }; + }; + responses: { + /** no error */ + 200: unknown; + /** repository does not exist or no read access */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** Return low-level information about an image. */ + ImageInspect: { + parameters: { + path: { + /** Image name or id */ + name: string; + }; + }; + responses: { + /** No error */ + 200: { + schema: definitions["ImageInspect"]; + }; + /** No such image */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** Return parent layers of an image. */ + ImageHistory: { + parameters: { + path: { + /** Image name or ID */ + name: string; + }; + }; + responses: { + /** List of image layers */ + 200: { + schema: { + Id: string; + /** Format: int64 */ + Created: number; + CreatedBy: string; + Tags: string[]; + /** Format: int64 */ + Size: number; + Comment: string; + }[]; + }; + /** No such image */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * Push an image to a registry. + * + * If you wish to push an image on to a private registry, that image must + * already have a tag which references the registry. For example, + * `registry.example.com/myimage:latest`. + * + * The push is cancelled if the HTTP connection is closed. + */ + ImagePush: { + parameters: { + path: { + /** Image name or ID. */ + name: string; + }; + query: { + /** The tag to associate with the image on the registry. */ + tag?: string; + }; + header: { + /** + * A base64url-encoded auth configuration. + * + * Refer to the [authentication section](#section/Authentication) for + * details. + */ + "X-Registry-Auth": string; + }; + }; + responses: { + /** No error */ + 200: unknown; + /** No such image */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** Tag an image so that it becomes part of a repository. */ + ImageTag: { + parameters: { + path: { + /** Image name or ID to tag. */ + name: string; + }; + query: { + /** The repository to tag in. For example, `someuser/someimage`. */ + repo?: string; + /** The name of the new tag. */ + tag?: string; + }; + }; + responses: { + /** No error */ + 201: unknown; + /** Bad parameter */ + 400: { + schema: definitions["ErrorResponse"]; + }; + /** No such image */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** Conflict */ + 409: { + schema: definitions["ErrorResponse"]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * Remove an image, along with any untagged parent images that were + * referenced by that image. + * + * Images can't be removed if they have descendant images, are being + * used by a running container or are being used by a build. + */ + ImageDelete: { + parameters: { + path: { + /** Image name or ID */ + name: string; + }; + query: { + /** Remove the image even if it is being used by stopped containers or has other tags */ + force?: boolean; + /** Do not delete untagged parent images */ + noprune?: boolean; + }; + }; + responses: { + /** The image was deleted successfully */ + 200: { + schema: definitions["ImageDeleteResponseItem"][]; + }; + /** No such image */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** Conflict */ + 409: { + schema: definitions["ErrorResponse"]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** Search for an image on Docker Hub. */ + ImageSearch: { + parameters: { + query: { + /** Term to search */ + term: string; + /** Maximum number of results to return */ + limit?: number; + /** + * A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + * + * - `is-automated=(true|false)` + * - `is-official=(true|false)` + * - `stars=` Matches images that has at least 'number' stars. + */ + filters?: string; + }; + }; + responses: { + /** No error */ + 200: { + schema: { + description?: string; + is_official?: boolean; + is_automated?: boolean; + name?: string; + star_count?: number; + }[]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + ImagePrune: { + parameters: { + query: { + /** + * Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: + * + * - `dangling=` When set to `true` (or `1`), prune only + * unused *and* untagged images. When set to `false` + * (or `0`), all unused images are pruned. + * - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + * - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels. + */ + filters?: string; + }; + }; + responses: { + /** No error */ + 200: { + schema: { + /** @description Images that were deleted */ + ImagesDeleted?: definitions["ImageDeleteResponseItem"][]; + /** + * Format: int64 + * @description Disk space reclaimed in bytes + */ + SpaceReclaimed?: number; + }; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * Validate credentials for a registry and, if available, get an identity + * token for accessing the registry without password. + */ + SystemAuth: { + parameters: { + body: { + /** Authentication to check */ + authConfig?: definitions["AuthConfig"]; + }; + }; + responses: { + /** An identity token was generated successfully. */ + 200: { + schema: { + /** @description The status of the authentication */ + Status: string; + /** @description An opaque token used to authenticate a user after a successful login */ + IdentityToken?: string; + }; + }; + /** No error */ + 204: never; + /** Auth error */ + 401: { + schema: definitions["ErrorResponse"]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + SystemInfo: { + responses: { + /** No error */ + 200: { + schema: definitions["SystemInfo"]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** Returns the version of Docker that is running and various information about the system that Docker is running on. */ + SystemVersion: { + responses: { + /** no error */ + 200: { + schema: definitions["SystemVersion"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** This is a dummy endpoint you can use to test if the server is accessible. */ + SystemPing: { + responses: { + /** no error */ + 200: { + headers: {}; + schema: string; + }; + /** server error */ + 500: { + headers: {}; + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** This is a dummy endpoint you can use to test if the server is accessible. */ + SystemPingHead: { + responses: { + /** no error */ + 200: { + headers: {}; + schema: string; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + ImageCommit: { + parameters: { + body: { + /** The container configuration */ + containerConfig?: definitions["ContainerConfig"]; + }; + query: { + /** The ID or name of the container to commit */ + container?: string; + /** Repository name for the created image */ + repo?: string; + /** Tag name for the create image */ + tag?: string; + /** Commit message */ + comment?: string; + /** Author of the image (e.g., `John Hannibal Smith `) */ + author?: string; + /** Whether to pause the container before committing */ + pause?: boolean; + /** `Dockerfile` instructions to apply while committing */ + changes?: string; + }; + }; + responses: { + /** no error */ + 201: { + schema: definitions["IdResponse"]; + }; + /** no such container */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * Stream real-time events from the server. + * + * Various objects within Docker report events when something happens to them. + * + * Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` + * + * Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` + * + * Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` + * + * Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` + * + * The Docker daemon reports these events: `reload` + * + * Services report these events: `create`, `update`, and `remove` + * + * Nodes report these events: `create`, `update`, and `remove` + * + * Secrets report these events: `create`, `update`, and `remove` + * + * Configs report these events: `create`, `update`, and `remove` + * + * The Builder reports `prune` events + */ + SystemEvents: { + parameters: { + query: { + /** Show events created since this timestamp then stream new events. */ + since?: string; + /** Show events created until this timestamp then stop streaming. */ + until?: string; + /** + * A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + * + * - `config=` config name or ID + * - `container=` container name or ID + * - `daemon=` daemon name or ID + * - `event=` event type + * - `image=` image name or ID + * - `label=` image or container label + * - `network=` network name or ID + * - `node=` node ID + * - `plugin`= plugin name or ID + * - `scope`= local or swarm + * - `secret=` secret name or ID + * - `service=` service name or ID + * - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` + * - `volume=` volume name + */ + filters?: string; + }; + }; + responses: { + /** no error */ + 200: { + schema: definitions["EventMessage"]; + }; + /** bad parameter */ + 400: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + SystemDataUsage: { + parameters: { + query: { + /** Object types, for which to compute and return data. */ + type?: ("container" | "image" | "volume" | "build-cache")[]; + }; + }; + responses: { + /** no error */ + 200: { + schema: { + /** Format: int64 */ + LayersSize?: number; + Images?: definitions["ImageSummary"][]; + Containers?: definitions["ContainerSummary"][]; + Volumes?: definitions["Volume"][]; + BuildCache?: definitions["BuildCache"][]; + }; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * Get a tarball containing all images and metadata for a repository. + * + * If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + * + * ### Image tarball format + * + * An image tarball contains one directory per image layer (named using its long ID), each containing these files: + * + * - `VERSION`: currently `1.0` - the file format version + * - `json`: detailed layer information, similar to `docker inspect layer_id` + * - `layer.tar`: A tarfile containing the filesystem changes in this layer + * + * The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + * + * If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + * + * ```json + * { + * "hello-world": { + * "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + * } + * } + * ``` + */ + ImageGet: { + parameters: { + path: { + /** Image name or ID */ + name: string; + }; + }; + responses: { + /** no error */ + 200: { + schema: string; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * Get a tarball containing all images and metadata for several image + * repositories. + * + * For each value of the `names` parameter: if it is a specific name and + * tag (e.g. `ubuntu:latest`), then only that image (and its parents) are + * returned; if it is an image ID, similarly only that image (and its parents) + * are returned and there would be no names referenced in the 'repositories' + * file for this image ID. + * + * For details on the format, see the [export image endpoint](#operation/ImageGet). + */ + ImageGetAll: { + parameters: { + query: { + /** Image names to filter by */ + names?: string[]; + }; + }; + responses: { + /** no error */ + 200: { + schema: string; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * Load a set of images and tags into a repository. + * + * For details on the format, see the [export image endpoint](#operation/ImageGet). + */ + ImageLoad: { + parameters: { + body: { + /** Tar archive containing images */ + imagesTarball?: string; + }; + query: { + /** Suppress progress details during load. */ + quiet?: boolean; + }; + }; + responses: { + /** no error */ + 200: unknown; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** Run a command inside a running container. */ + ContainerExec: { + parameters: { + body: { + /** Exec configuration */ + execConfig: { + /** @description Attach to `stdin` of the exec command. */ + AttachStdin?: boolean; + /** @description Attach to `stdout` of the exec command. */ + AttachStdout?: boolean; + /** @description Attach to `stderr` of the exec command. */ + AttachStderr?: boolean; + /** @description Initial console size, as an `[height, width]` array. */ + ConsoleSize?: number[]; + /** + * @description Override the key sequence for detaching a container. Format is + * a single character `[a-Z]` or `ctrl-` where `` + * is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + */ + DetachKeys?: string; + /** @description Allocate a pseudo-TTY. */ + Tty?: boolean; + /** @description A list of environment variables in the form `["VAR=value", ...]`. */ + Env?: string[]; + /** @description Command to run, as a string or array of strings. */ + Cmd?: string[]; + /** + * @description Runs the exec process with extended privileges. + * @default false + */ + Privileged?: boolean; + /** + * @description The user, and optionally, group to run the exec process inside + * the container. Format is one of: `user`, `user:group`, `uid`, + * or `uid:gid`. + */ + User?: string; + /** @description The working directory for the exec process inside the container. */ + WorkingDir?: string; + }; + }; + path: { + /** ID or name of container */ + id: string; + }; + }; + responses: { + /** no error */ + 201: { + schema: definitions["IdResponse"]; + }; + /** no such container */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** container is paused */ + 409: { + schema: definitions["ErrorResponse"]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * Starts a previously set up exec instance. If detach is true, this endpoint + * returns immediately after starting the command. Otherwise, it sets up an + * interactive session with the command. + */ + ExecStart: { + parameters: { + body: { + execStartConfig?: { + /** @description Detach from the command. */ + Detach?: boolean; + /** @description Allocate a pseudo-TTY. */ + Tty?: boolean; + /** @description Initial console size, as an `[height, width]` array. */ + ConsoleSize?: number[]; + }; + }; + path: { + /** Exec instance ID */ + id: string; + }; + }; + responses: { + /** No error */ + 200: unknown; + /** No such exec instance */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** Container is stopped or paused */ + 409: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * Resize the TTY session used by an exec instance. This endpoint only works + * if `tty` was specified as part of creating and starting the exec instance. + */ + ExecResize: { + parameters: { + path: { + /** Exec instance ID */ + id: string; + }; + query: { + /** Height of the TTY session in characters */ + h?: number; + /** Width of the TTY session in characters */ + w?: number; + }; + }; + responses: { + /** No error */ + 200: unknown; + /** bad parameter */ + 400: { + schema: definitions["ErrorResponse"]; + }; + /** No such exec instance */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** Return low-level information about an exec instance. */ + ExecInspect: { + parameters: { + path: { + /** Exec instance ID */ + id: string; + }; + }; + responses: { + /** No error */ + 200: { + schema: { + CanRemove?: boolean; + DetachKeys?: string; + ID?: string; + Running?: boolean; + ExitCode?: number; + ProcessConfig?: definitions["ProcessConfig"]; + OpenStdin?: boolean; + OpenStderr?: boolean; + OpenStdout?: boolean; + ContainerID?: string; + /** @description The system process ID for the exec process. */ + Pid?: number; + }; + }; + /** No such exec instance */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + VolumeList: { + parameters: { + query: { + /** + * JSON encoded value of the filters (a `map[string][]string`) to + * process on the volumes list. Available filters: + * + * - `dangling=` When set to `true` (or `1`), returns all + * volumes that are not in use by a container. When set to `false` + * (or `0`), only volumes that are in use by one or more + * containers are returned. + * - `driver=` Matches volumes based on their driver. + * - `label=` or `label=:` Matches volumes based on + * the presence of a `label` alone or a `label` and a value. + * - `name=` Matches all or part of a volume name. + */ + filters?: string; + }; + }; + responses: { + /** Summary volume data that matches the query */ + 200: { + schema: definitions["VolumeListResponse"]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + VolumeCreate: { + parameters: { + body: { + /** Volume configuration */ + volumeConfig: definitions["VolumeCreateOptions"]; + }; + }; + responses: { + /** The volume was created successfully */ + 201: { + schema: definitions["Volume"]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + VolumeInspect: { + parameters: { + path: { + /** Volume name or ID */ + name: string; + }; + }; + responses: { + /** No error */ + 200: { + schema: definitions["Volume"]; + }; + /** No such volume */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + VolumeUpdate: { + parameters: { + path: { + /** The name or ID of the volume */ + name: string; + }; + body: { + /** + * The spec of the volume to update. Currently, only Availability may + * change. All other fields must remain unchanged. + */ + body?: { + Spec?: definitions["ClusterVolumeSpec"]; + }; + }; + query: { + /** + * The version number of the volume being updated. This is required to + * avoid conflicting writes. Found in the volume's `ClusterVolume` + * field. + */ + version: number; + }; + }; + responses: { + /** no error */ + 200: unknown; + /** bad parameter */ + 400: { + schema: definitions["ErrorResponse"]; + }; + /** no such volume */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** Instruct the driver to remove the volume. */ + VolumeDelete: { + parameters: { + path: { + /** Volume name or ID */ + name: string; + }; + query: { + /** Force the removal of the volume */ + force?: boolean; + }; + }; + responses: { + /** The volume was removed */ + 204: never; + /** No such volume or volume driver */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** Volume is in use and cannot be removed */ + 409: { + schema: definitions["ErrorResponse"]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + VolumePrune: { + parameters: { + query: { + /** + * Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + * + * Available filters: + * - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + * - `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes. + */ + filters?: string; + }; + }; + responses: { + /** No error */ + 200: { + schema: { + /** @description Volumes that were deleted */ + VolumesDeleted?: string[]; + /** + * Format: int64 + * @description Disk space reclaimed in bytes + */ + SpaceReclaimed?: number; + }; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * Returns a list of networks. For details on the format, see the + * [network inspect endpoint](#operation/NetworkInspect). + * + * Note that it uses a different, smaller representation of a network than + * inspecting a single network. For example, the list of containers attached + * to the network is not propagated in API versions 1.28 and up. + */ + NetworkList: { + parameters: { + query: { + /** + * JSON encoded value of the filters (a `map[string][]string`) to process + * on the networks list. + * + * Available filters: + * + * - `dangling=` When set to `true` (or `1`), returns all + * networks that are not in use by a container. When set to `false` + * (or `0`), only networks that are in use by one or more + * containers are returned. + * - `driver=` Matches a network's driver. + * - `id=` Matches all or part of a network ID. + * - `label=` or `label==` of a network label. + * - `name=` Matches all or part of a network name. + * - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). + * - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + */ + filters?: string; + }; + }; + responses: { + /** No error */ + 200: { + schema: definitions["Network"][]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + NetworkInspect: { + parameters: { + path: { + /** Network ID or name */ + id: string; + }; + query: { + /** Detailed inspect output for troubleshooting */ + verbose?: boolean; + /** Filter the network by scope (swarm, global, or local) */ + scope?: string; + }; + }; + responses: { + /** No error */ + 200: { + schema: definitions["Network"]; + }; + /** Network not found */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + NetworkDelete: { + parameters: { + path: { + /** Network ID or name */ + id: string; + }; + }; + responses: { + /** No error */ + 204: never; + /** operation not supported for pre-defined networks */ + 403: { + schema: definitions["ErrorResponse"]; + }; + /** no such network */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + NetworkCreate: { + parameters: { + body: { + /** Network configuration */ + networkConfig: { + /** @description The network's name. */ + Name: string; + /** + * @description Check for networks with duplicate names. Since Network is + * primarily keyed based on a random ID and not on the name, and + * network name is strictly a user-friendly alias to the network + * which is uniquely identified using ID, there is no guaranteed + * way to check for duplicates. CheckDuplicate is there to provide + * a best effort checking of any networks which has the same name + * but it is not guaranteed to catch all name collisions. + */ + CheckDuplicate?: boolean; + /** + * @description Name of the network driver plugin to use. + * @default bridge + */ + Driver?: string; + /** @description Restrict external access to the network. */ + Internal?: boolean; + /** + * @description Globally scoped network is manually attachable by regular + * containers from workers in swarm mode. + */ + Attachable?: boolean; + /** + * @description Ingress network is the network which provides the routing-mesh + * in swarm mode. + */ + Ingress?: boolean; + /** @description Optional custom IP scheme for the network. */ + IPAM?: definitions["IPAM"]; + /** @description Enable IPv6 on the network. */ + EnableIPv6?: boolean; + /** @description Network specific options to be used by the drivers. */ + Options?: { [key: string]: string }; + /** @description User-defined key/value metadata. */ + Labels?: { [key: string]: string }; + }; + }; + }; + responses: { + /** No error */ + 201: { + schema: { + /** @description The ID of the created network. */ + Id?: string; + Warning?: string; + }; + }; + /** operation not supported for pre-defined networks */ + 403: { + schema: definitions["ErrorResponse"]; + }; + /** plugin not found */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + NetworkConnect: { + parameters: { + path: { + /** Network ID or name */ + id: string; + }; + body: { + container: { + /** @description The ID or name of the container to connect to the network. */ + Container?: string; + EndpointConfig?: definitions["EndpointSettings"]; + }; + }; + }; + responses: { + /** No error */ + 200: unknown; + /** Operation not supported for swarm scoped networks */ + 403: { + schema: definitions["ErrorResponse"]; + }; + /** Network or container not found */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + NetworkDisconnect: { + parameters: { + path: { + /** Network ID or name */ + id: string; + }; + body: { + container: { + /** @description The ID or name of the container to disconnect from the network. */ + Container?: string; + /** @description Force the container to disconnect from the network. */ + Force?: boolean; + }; + }; + }; + responses: { + /** No error */ + 200: unknown; + /** Operation not supported for swarm scoped networks */ + 403: { + schema: definitions["ErrorResponse"]; + }; + /** Network or container not found */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + NetworkPrune: { + parameters: { + query: { + /** + * Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + * + * Available filters: + * - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + * - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels. + */ + filters?: string; + }; + }; + responses: { + /** No error */ + 200: { + schema: { + /** @description Networks that were deleted */ + NetworksDeleted?: string[]; + }; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** Returns information about installed plugins. */ + PluginList: { + parameters: { + query: { + /** + * A JSON encoded value of the filters (a `map[string][]string`) to + * process on the plugin list. + * + * Available filters: + * + * - `capability=` + * - `enable=|` + */ + filters?: string; + }; + }; + responses: { + /** No error */ + 200: { + schema: definitions["Plugin"][]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + GetPluginPrivileges: { + parameters: { + query: { + /** + * The name of the plugin. The `:latest` tag is optional, and is the + * default if omitted. + */ + remote: string; + }; + }; + responses: { + /** no error */ + 200: { + schema: definitions["PluginPrivilege"][]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * Pulls and installs a plugin. After the plugin is installed, it can be + * enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + */ + PluginPull: { + parameters: { + query: { + /** + * Remote reference for plugin to install. + * + * The `:latest` tag is optional, and is used as the default if omitted. + */ + remote: string; + /** + * Local name for the pulled plugin. + * + * The `:latest` tag is optional, and is used as the default if omitted. + */ + name?: string; + }; + header: { + /** + * A base64url-encoded auth configuration to use when pulling a plugin + * from a registry. + * + * Refer to the [authentication section](#section/Authentication) for + * details. + */ + "X-Registry-Auth"?: string; + }; + body: { + body?: definitions["PluginPrivilege"][]; + }; + }; + responses: { + /** no error */ + 204: never; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + PluginInspect: { + parameters: { + path: { + /** + * The name of the plugin. The `:latest` tag is optional, and is the + * default if omitted. + */ + name: string; + }; + }; + responses: { + /** no error */ + 200: { + schema: definitions["Plugin"]; + }; + /** plugin is not installed */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + PluginDelete: { + parameters: { + path: { + /** + * The name of the plugin. The `:latest` tag is optional, and is the + * default if omitted. + */ + name: string; + }; + query: { + /** + * Disable the plugin before removing. This may result in issues if the + * plugin is in use by a container. + */ + force?: boolean; + }; + }; + responses: { + /** no error */ + 200: { + schema: definitions["Plugin"]; + }; + /** plugin is not installed */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + PluginEnable: { + parameters: { + path: { + /** + * The name of the plugin. The `:latest` tag is optional, and is the + * default if omitted. + */ + name: string; + }; + query: { + /** Set the HTTP client timeout (in seconds) */ + timeout?: number; + }; + }; + responses: { + /** no error */ + 200: unknown; + /** plugin is not installed */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + PluginDisable: { + parameters: { + path: { + /** + * The name of the plugin. The `:latest` tag is optional, and is the + * default if omitted. + */ + name: string; + }; + query: { + /** Force disable a plugin even if still in use. */ + force?: boolean; + }; + }; + responses: { + /** no error */ + 200: unknown; + /** plugin is not installed */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + PluginUpgrade: { + parameters: { + path: { + /** + * The name of the plugin. The `:latest` tag is optional, and is the + * default if omitted. + */ + name: string; + }; + query: { + /** + * Remote reference to upgrade to. + * + * The `:latest` tag is optional, and is used as the default if omitted. + */ + remote: string; + }; + header: { + /** + * A base64url-encoded auth configuration to use when pulling a plugin + * from a registry. + * + * Refer to the [authentication section](#section/Authentication) for + * details. + */ + "X-Registry-Auth"?: string; + }; + body: { + body?: definitions["PluginPrivilege"][]; + }; + }; + responses: { + /** no error */ + 204: never; + /** plugin not installed */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + PluginCreate: { + parameters: { + query: { + /** + * The name of the plugin. The `:latest` tag is optional, and is the + * default if omitted. + */ + name: string; + }; + body: { + /** Path to tar containing plugin rootfs and manifest */ + tarContext?: string; + }; + }; + responses: { + /** no error */ + 204: never; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** Push a plugin to the registry. */ + PluginPush: { + parameters: { + path: { + /** + * The name of the plugin. The `:latest` tag is optional, and is the + * default if omitted. + */ + name: string; + }; + }; + responses: { + /** no error */ + 200: unknown; + /** plugin not installed */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + PluginSet: { + parameters: { + path: { + /** + * The name of the plugin. The `:latest` tag is optional, and is the + * default if omitted. + */ + name: string; + }; + body: { + body?: string[]; + }; + }; + responses: { + /** No error */ + 204: never; + /** Plugin not installed */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + NodeList: { + parameters: { + query: { + /** + * Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + * + * Available filters: + * - `id=` + * - `label=` + * - `membership=`(`accepted`|`pending`)` + * - `name=` + * - `node.label=` + * - `role=`(`manager`|`worker`)` + */ + filters?: string; + }; + }; + responses: { + /** no error */ + 200: { + schema: definitions["Node"][]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + NodeInspect: { + parameters: { + path: { + /** The ID or name of the node */ + id: string; + }; + }; + responses: { + /** no error */ + 200: { + schema: definitions["Node"]; + }; + /** no such node */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + NodeDelete: { + parameters: { + path: { + /** The ID or name of the node */ + id: string; + }; + query: { + /** Force remove a node from the swarm */ + force?: boolean; + }; + }; + responses: { + /** no error */ + 200: unknown; + /** no such node */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + NodeUpdate: { + parameters: { + path: { + /** The ID of the node */ + id: string; + }; + body: { + body?: definitions["NodeSpec"]; + }; + query: { + /** + * The version number of the node object being updated. This is required + * to avoid conflicting writes. + */ + version: number; + }; + }; + responses: { + /** no error */ + 200: unknown; + /** bad parameter */ + 400: { + schema: definitions["ErrorResponse"]; + }; + /** no such node */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + SwarmInspect: { + responses: { + /** no error */ + 200: { + schema: definitions["Swarm"]; + }; + /** no such swarm */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + SwarmInit: { + parameters: { + body: { + body: { + /** + * @description Listen address used for inter-manager communication, as well + * as determining the networking interface used for the VXLAN + * Tunnel Endpoint (VTEP). This can either be an address/port + * combination in the form `192.168.1.1:4567`, or an interface + * followed by a port number, like `eth0:4567`. If the port number + * is omitted, the default swarm listening port is used. + */ + ListenAddr?: string; + /** + * @description Externally reachable address advertised to other nodes. This + * can either be an address/port combination in the form + * `192.168.1.1:4567`, or an interface followed by a port number, + * like `eth0:4567`. If the port number is omitted, the port + * number from the listen address is used. If `AdvertiseAddr` is + * not specified, it will be automatically detected when possible. + */ + AdvertiseAddr?: string; + /** + * @description Address or interface to use for data path traffic (format: + * ``), for example, `192.168.1.1`, or an interface, + * like `eth0`. If `DataPathAddr` is unspecified, the same address + * as `AdvertiseAddr` is used. + * + * The `DataPathAddr` specifies the address that global scope + * network drivers will publish towards other nodes in order to + * reach the containers running on this node. Using this parameter + * it is possible to separate the container data traffic from the + * management traffic of the cluster. + */ + DataPathAddr?: string; + /** + * Format: uint32 + * @description DataPathPort specifies the data path port number for data traffic. + * Acceptable port range is 1024 to 49151. + * if no port is set or is set to 0, default port 4789 will be used. + */ + DataPathPort?: number; + /** + * @description Default Address Pool specifies default subnet pools for global + * scope networks. + */ + DefaultAddrPool?: string[]; + /** @description Force creation of a new swarm. */ + ForceNewCluster?: boolean; + /** + * Format: uint32 + * @description SubnetSize specifies the subnet size of the networks created + * from the default subnet pool. + */ + SubnetSize?: number; + Spec?: definitions["SwarmSpec"]; + }; + }; + }; + responses: { + /** no error */ + 200: { + schema: string; + }; + /** bad parameter */ + 400: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is already part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + SwarmJoin: { + parameters: { + body: { + body: { + /** + * @description Listen address used for inter-manager communication if the node + * gets promoted to manager, as well as determining the networking + * interface used for the VXLAN Tunnel Endpoint (VTEP). + */ + ListenAddr?: string; + /** + * @description Externally reachable address advertised to other nodes. This + * can either be an address/port combination in the form + * `192.168.1.1:4567`, or an interface followed by a port number, + * like `eth0:4567`. If the port number is omitted, the port + * number from the listen address is used. If `AdvertiseAddr` is + * not specified, it will be automatically detected when possible. + */ + AdvertiseAddr?: string; + /** + * @description Address or interface to use for data path traffic (format: + * ``), for example, `192.168.1.1`, or an interface, + * like `eth0`. If `DataPathAddr` is unspecified, the same address + * as `AdvertiseAddr` is used. + * + * The `DataPathAddr` specifies the address that global scope + * network drivers will publish towards other nodes in order to + * reach the containers running on this node. Using this parameter + * it is possible to separate the container data traffic from the + * management traffic of the cluster. + */ + DataPathAddr?: string; + /** @description Addresses of manager nodes already participating in the swarm. */ + RemoteAddrs?: string[]; + /** @description Secret token for joining this swarm. */ + JoinToken?: string; + }; + }; + }; + responses: { + /** no error */ + 200: unknown; + /** bad parameter */ + 400: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is already part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + SwarmLeave: { + parameters: { + query: { + /** + * Force leave swarm, even if this is the last manager or that it will + * break the cluster. + */ + force?: boolean; + }; + }; + responses: { + /** no error */ + 200: unknown; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + SwarmUpdate: { + parameters: { + body: { + body: definitions["SwarmSpec"]; + }; + query: { + /** + * The version number of the swarm object being updated. This is + * required to avoid conflicting writes. + */ + version: number; + /** Rotate the worker join token. */ + rotateWorkerToken?: boolean; + /** Rotate the manager join token. */ + rotateManagerToken?: boolean; + /** Rotate the manager unlock key. */ + rotateManagerUnlockKey?: boolean; + }; + }; + responses: { + /** no error */ + 200: unknown; + /** bad parameter */ + 400: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + SwarmUnlockkey: { + responses: { + /** no error */ + 200: { + schema: { + /** @description The swarm's unlock key. */ + UnlockKey?: string; + }; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + SwarmUnlock: { + parameters: { + body: { + body: { + /** @description The swarm's unlock key. */ + UnlockKey?: string; + }; + }; + }; + responses: { + /** no error */ + 200: unknown; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + ServiceList: { + parameters: { + query: { + /** + * A JSON encoded value of the filters (a `map[string][]string`) to + * process on the services list. + * + * Available filters: + * + * - `id=` + * - `label=` + * - `mode=["replicated"|"global"]` + * - `name=` + */ + filters?: string; + /** Include service status, with count of running and desired tasks. */ + status?: boolean; + }; + }; + responses: { + /** no error */ + 200: { + schema: definitions["Service"][]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + ServiceCreate: { + parameters: { + body: { + body: definitions["ServiceSpec"] & { [key: string]: unknown }; + }; + header: { + /** + * A base64url-encoded auth configuration for pulling from private + * registries. + * + * Refer to the [authentication section](#section/Authentication) for + * details. + */ + "X-Registry-Auth"?: string; + }; + }; + responses: { + /** no error */ + 201: { + schema: { + /** @description The ID of the created service. */ + ID?: string; + /** @description Optional warning message */ + Warning?: string; + }; + }; + /** bad parameter */ + 400: { + schema: definitions["ErrorResponse"]; + }; + /** network is not eligible for services */ + 403: { + schema: definitions["ErrorResponse"]; + }; + /** name conflicts with an existing service */ + 409: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + ServiceInspect: { + parameters: { + path: { + /** ID or name of service. */ + id: string; + }; + query: { + /** Fill empty fields with default values. */ + insertDefaults?: boolean; + }; + }; + responses: { + /** no error */ + 200: { + schema: definitions["Service"]; + }; + /** no such service */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + ServiceDelete: { + parameters: { + path: { + /** ID or name of service. */ + id: string; + }; + }; + responses: { + /** no error */ + 200: unknown; + /** no such service */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + ServiceUpdate: { + parameters: { + path: { + /** ID or name of service. */ + id: string; + }; + body: { + body: definitions["ServiceSpec"] & { [key: string]: unknown }; + }; + query: { + /** + * The version number of the service object being updated. This is + * required to avoid conflicting writes. + * This version number should be the value as currently set on the + * service *before* the update. You can find the current version by + * calling `GET /services/{id}` + */ + version: number; + /** + * If the `X-Registry-Auth` header is not specified, this parameter + * indicates where to find registry authorization credentials. + */ + registryAuthFrom?: "spec" | "previous-spec"; + /** + * Set to this parameter to `previous` to cause a server-side rollback + * to the previous service spec. The supplied spec will be ignored in + * this case. + */ + rollback?: string; + }; + header: { + /** + * A base64url-encoded auth configuration for pulling from private + * registries. + * + * Refer to the [authentication section](#section/Authentication) for + * details. + */ + "X-Registry-Auth"?: string; + }; + }; + responses: { + /** no error */ + 200: { + schema: definitions["ServiceUpdateResponse"]; + }; + /** bad parameter */ + 400: { + schema: definitions["ErrorResponse"]; + }; + /** no such service */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * Get `stdout` and `stderr` logs from a service. See also + * [`/containers/{id}/logs`](#operation/ContainerLogs). + * + * **Note**: This endpoint works only for services with the `local`, + * `json-file` or `journald` logging drivers. + */ + ServiceLogs: { + parameters: { + path: { + /** ID or name of the service */ + id: string; + }; + query: { + /** Show service context and extra details provided to logs. */ + details?: boolean; + /** Keep connection after returning logs. */ + follow?: boolean; + /** Return logs from `stdout` */ + stdout?: boolean; + /** Return logs from `stderr` */ + stderr?: boolean; + /** Only return logs since this time, as a UNIX timestamp */ + since?: number; + /** Add timestamps to every log line */ + timestamps?: boolean; + /** + * Only return this number of log lines from the end of the logs. + * Specify as an integer or `all` to output all log lines. + */ + tail?: string; + }; + }; + responses: { + /** logs returned as a stream in response body */ + 200: { + schema: string; + }; + /** no such service */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + TaskList: { + parameters: { + query: { + /** + * A JSON encoded value of the filters (a `map[string][]string`) to + * process on the tasks list. + * + * Available filters: + * + * - `desired-state=(running | shutdown | accepted)` + * - `id=` + * - `label=key` or `label="key=value"` + * - `name=` + * - `node=` + * - `service=` + */ + filters?: string; + }; + }; + responses: { + /** no error */ + 200: { + schema: definitions["Task"][]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + TaskInspect: { + parameters: { + path: { + /** ID of the task */ + id: string; + }; + }; + responses: { + /** no error */ + 200: { + schema: definitions["Task"]; + }; + /** no such task */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * Get `stdout` and `stderr` logs from a task. + * See also [`/containers/{id}/logs`](#operation/ContainerLogs). + * + * **Note**: This endpoint works only for services with the `local`, + * `json-file` or `journald` logging drivers. + */ + TaskLogs: { + parameters: { + path: { + /** ID of the task */ + id: string; + }; + query: { + /** Show task context and extra details provided to logs. */ + details?: boolean; + /** Keep connection after returning logs. */ + follow?: boolean; + /** Return logs from `stdout` */ + stdout?: boolean; + /** Return logs from `stderr` */ + stderr?: boolean; + /** Only return logs since this time, as a UNIX timestamp */ + since?: number; + /** Add timestamps to every log line */ + timestamps?: boolean; + /** + * Only return this number of log lines from the end of the logs. + * Specify as an integer or `all` to output all log lines. + */ + tail?: string; + }; + }; + responses: { + /** logs returned as a stream in response body */ + 200: { + schema: string; + }; + /** no such task */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + SecretList: { + parameters: { + query: { + /** + * A JSON encoded value of the filters (a `map[string][]string`) to + * process on the secrets list. + * + * Available filters: + * + * - `id=` + * - `label= or label==value` + * - `name=` + * - `names=` + */ + filters?: string; + }; + }; + responses: { + /** no error */ + 200: { + schema: definitions["Secret"][]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + SecretCreate: { + parameters: { + body: { + body?: definitions["SecretSpec"] & { [key: string]: unknown }; + }; + }; + responses: { + /** no error */ + 201: { + schema: definitions["IdResponse"]; + }; + /** name conflicts with an existing object */ + 409: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + SecretInspect: { + parameters: { + path: { + /** ID of the secret */ + id: string; + }; + }; + responses: { + /** no error */ + 200: { + schema: definitions["Secret"]; + }; + /** secret not found */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + SecretDelete: { + parameters: { + path: { + /** ID of the secret */ + id: string; + }; + }; + responses: { + /** no error */ + 204: never; + /** secret not found */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + SecretUpdate: { + parameters: { + path: { + /** The ID or name of the secret */ + id: string; + }; + body: { + /** + * The spec of the secret to update. Currently, only the Labels field + * can be updated. All other fields must remain unchanged from the + * [SecretInspect endpoint](#operation/SecretInspect) response values. + */ + body?: definitions["SecretSpec"]; + }; + query: { + /** + * The version number of the secret object being updated. This is + * required to avoid conflicting writes. + */ + version: number; + }; + }; + responses: { + /** no error */ + 200: unknown; + /** bad parameter */ + 400: { + schema: definitions["ErrorResponse"]; + }; + /** no such secret */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + ConfigList: { + parameters: { + query: { + /** + * A JSON encoded value of the filters (a `map[string][]string`) to + * process on the configs list. + * + * Available filters: + * + * - `id=` + * - `label= or label==value` + * - `name=` + * - `names=` + */ + filters?: string; + }; + }; + responses: { + /** no error */ + 200: { + schema: definitions["Config"][]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + ConfigCreate: { + parameters: { + body: { + body?: definitions["ConfigSpec"] & { [key: string]: unknown }; + }; + }; + responses: { + /** no error */ + 201: { + schema: definitions["IdResponse"]; + }; + /** name conflicts with an existing object */ + 409: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + ConfigInspect: { + parameters: { + path: { + /** ID of the config */ + id: string; + }; + }; + responses: { + /** no error */ + 200: { + schema: definitions["Config"]; + }; + /** config not found */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + ConfigDelete: { + parameters: { + path: { + /** ID of the config */ + id: string; + }; + }; + responses: { + /** no error */ + 204: never; + /** config not found */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + ConfigUpdate: { + parameters: { + path: { + /** The ID or name of the config */ + id: string; + }; + body: { + /** + * The spec of the config to update. Currently, only the Labels field + * can be updated. All other fields must remain unchanged from the + * [ConfigInspect endpoint](#operation/ConfigInspect) response values. + */ + body?: definitions["ConfigSpec"]; + }; + query: { + /** + * The version number of the config object being updated. This is + * required to avoid conflicting writes. + */ + version: number; + }; + }; + responses: { + /** no error */ + 200: unknown; + /** bad parameter */ + 400: { + schema: definitions["ErrorResponse"]; + }; + /** no such config */ + 404: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + /** node is not part of a swarm */ + 503: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** Return image digest and platform information by contacting the registry. */ + DistributionInspect: { + parameters: { + path: { + /** Image name or id */ + name: string; + }; + }; + responses: { + /** descriptor and platform information */ + 200: { + schema: definitions["DistributionInspect"]; + }; + /** Failed authentication or no image found */ + 401: { + schema: definitions["ErrorResponse"]; + }; + /** Server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; + /** + * Start a new interactive session with a server. Session allows server to + * call back to the client for advanced capabilities. + * + * ### Hijacking + * + * This endpoint hijacks the HTTP connection to HTTP2 transport that allows + * the client to expose gPRC services on that connection. + * + * For example, the client sends this request to upgrade the connection: + * + * ``` + * POST /session HTTP/1.1 + * Upgrade: h2c + * Connection: Upgrade + * ``` + * + * The Docker daemon responds with a `101 UPGRADED` response follow with + * the raw stream: + * + * ``` + * HTTP/1.1 101 UPGRADED + * Connection: Upgrade + * Upgrade: h2c + * ``` + */ + Session: { + responses: { + /** no error, hijacking successful */ + 101: unknown; + /** bad parameter */ + 400: { + schema: definitions["ErrorResponse"]; + }; + /** server error */ + 500: { + schema: definitions["ErrorResponse"]; + }; + }; + }; +} + +export interface external {} diff --git a/src/server/utils/logger.ts b/src/server/utils/logger.ts index e7328ce..4084fc3 100644 --- a/src/server/utils/logger.ts +++ b/src/server/utils/logger.ts @@ -24,7 +24,13 @@ const logger = createLogger({ if (others[SPLAT]) { const splat = others[SPLAT] as unknown[]; if (splat.length > 0) { - return base + " " + splat.map((s) => util.inspect(s)).join("\n"); + const formattedSplat = splat + .map((s) => util.inspect(s, { colors: true, showHidden: true })) + .flatMap((s) => s.split("\n")) + .map((s) => ` ${s}`) + .join("\n"); + + return base + "\n" + formattedSplat; } }