# 查询 docker 的磁盘使用情况docker system df# 每个容器的磁盘占用情况docker system df -v# 从容器中拷贝到宿主机docker cp <container-id>:<path> .
# 安装依赖sudo yum install -y yum-utils device-mapper-persistent-data lvm2# 设置 yum 源sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo# 查看 docker 版本sudo yum list docker-ce --showduplicates | sort -r# 选取一个版本进行安装sudo yum install -y docker-ce-20.10.9# 启动 docker 并设置开机启动sudo systemctl enable --now docker
# 卸载sudo yum remove -y docker docker-ce docker-common docker-selinux docker-engine
sudo vi /etc/docker/daemon.json
{ "registry-mirrors": [], "insecure-registries": [ "10.188.132.44:5000", "10.188.132.123:5000", "10.176.2.207:5000" ], "data-root":"/data/docker/system", "debug": true, "experimental": false, "log-driver": "json-file", "log-opts": { "max-size": "10m", "max-file": "1", "labels": "production_status", "env": "os,customer" }}
sudo systemctl restart docker# 直接生效 daemon.jsonsudo kill -SIGHUP $(pidof dockerd)
其他方法
# 创建一个挂在镜像和容器的地方,最好选存储空间大的位置mkdir -p /data/docker/system#如果下面位置找不到 可以通过 sudo find / -name 'docker.service' -type f 寻找 docker.service 位置sudo vim /usr/lib/systemd/system/docker.service# 在 ExecStart 最后添加 私服和镜像存储位置 --insecure-registry 10.176.66.20:5000 --graph /data/docker/systemExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --insecure-registry 10.176.66.20:5000 --graph /data/docker/system# 重载配置sudo systemctl daemon-reload# 重启服务sudo systemctl restart docker
# 将当前用户添加到 docker 组内sudo gpasswd -a $USER docker# 刷新 docker 组配置newgrp docker# 重启 docker 服务sudo systemctl restart docker
1.自带插件
sudo yum install -y docker-compose-plugin
2.下载
# 下载安装docker-composesudo curl -L "https://github.com/docker/compose/releases/download/v2.17.2/docker-compose-linux-x86_64" -o /usr/bin/docker-compose# 设置执行权限sudo chmod 777 /usr/bin/docker-compose
docker run -it 10.176.2.207:5000/lemes-cloud/lemes-gateway:pgsql-master-202306181324 /bin/bash
docker create -it –name dumy 10.188.132.123:5000/lemes-cloud/lemes-gateway:develop-202312111536 bash
docker cp dumy:/data .
# 创建容器, 不启动docker create -it --name dumy 10.188.132.123:5000/library/mysql2postgresql-jdbc-agent:1.0.0 bash# 从容器内复制文件出来docker cp dumy:/tmp/mysql2postgresql-jdbc-agent-1.0.0.jar .# 移除容器docker rm dumy
# 打包镜像docker save -o lemes-web.tar 10.176.2.207:5000/lemes-cloud/lemes-web:pgsql-master-202306272212# 还原镜像docker load -i lemes-web.tar
假设我们有已经启动的容器 my-nginx
, 目前的端口映射为 80:80
, 现在需要修改为 8080:80
docker ps | grep nginx
查看容器的 CONTAINER ID
docker
的磁盘存储地址, 默认为 /var/lib/docker/containers
, 如果修改了 daemon.json
中的 data-root
则需要修改对应的地址containers
下载找到对应的 CONTAINER ID
的文件夹, 编辑其下的 hostconfig.json
文件PortBindings
下的 80/tcp
对应的 HostPort
目前应该是 80
, 修改为 8080
docker
服务, systemctl restart docker
注意: 如果需要修改容器内的端口, 比如要修改为
80:8080
那么除了修改config.v2.json
中的PortBindings
的key
从80/tcp
到8080/tcp
外
还需要修改config.v2.json
中的ExposedPorts
中的80/tcp
为8080/tcp
sudo sysctl -w user.max_user_namespaces=150000
start docker deamon faild.
$ sudo systemctl start dockerJob for docker.service failed because the control process exited with error code. See "systemctl status docker.service" and "journalctl -xe" for details.`$ journalctl -xe-- Subject: Unit docker.socket has finished start-up-- Defined-By: systemd-- Support: http://lists.freedesktop.org/mailman/listinfo/systemd-devel---- Unit docker.socket has finished starting up.---- The start-up result is done.May 19 10:26:34 szxlpidgapp06 systemd[1]: Starting Docker Application Container Engine...-- Subject: Unit docker.service has begun start-up-- Defined-By: systemd-- Support: http://lists.freedesktop.org/mailman/listinfo/systemd-devel---- Unit docker.service has begun starting up.May 19 10:26:34 szxlpidgapp06 dockerd[33200]: time="2022-05-19T10:26:34.580459394+08:00" level=info msg="Starting up"May 19 10:26:34 szxlpidgapp06 dockerd[33200]: time="2022-05-19T10:26:34.580654556+08:00" level=debug msg="Listener created for HTTP on fd ()"May 19 10:26:34 szxlpidgapp06 dockerd[33200]: time="2022-05-19T10:26:34.580999123+08:00" level=debug msg="Golang's threads limit set to 922770"May 19 10:26:34 szxlpidgapp06 dockerd[33200]: time="2022-05-19T10:26:34.582149991+08:00" level=info msg="parsed scheme: \"unix\"" module=grpcMay 19 10:26:34 szxlpidgapp06 dockerd[33200]: time="2022-05-19T10:26:34.582178725+08:00" level=info msg="scheme \"unix\" not registered, fallback to default scheme" module=grpcMay 19 10:26:34 szxlpidgapp06 dockerd[33200]: time="2022-05-19T10:26:34.582285273+08:00" level=info msg="ccResolverWrapper: sending update to cc: {[{unix:///run/containerd/containerd.sock 0 <nil>}] <nil>}" module=grpcMay 19 10:26:34 szxlpidgapp06 dockerd[33200]: time="2022-05-19T10:26:34.582321352+08:00" level=info msg="ClientConn switching balancer to \"pick_first\"" module=grpcMay 19 10:26:34 szxlpidgapp06 dockerd[33200]: time="2022-05-19T10:26:34.583565030+08:00" level=info msg="parsed scheme: \"unix\"" module=grpcMay 19 10:26:34 szxlpidgapp06 dockerd[33200]: time="2022-05-19T10:26:34.583585521+08:00" level=info msg="scheme \"unix\" not registered, fallback to default scheme" module=grpcMay 19 10:26:34 szxlpidgapp06 dockerd[33200]: time="2022-05-19T10:26:34.583602813+08:00" level=info msg="ccResolverWrapper: sending update to cc: {[{unix:///run/containerd/containerd.sock 0 <nil>}] <nil>}" module=grpcMay 19 10:26:34 szxlpidgapp06 dockerd[33200]: time="2022-05-19T10:26:34.583610828+08:00" level=info msg="ClientConn switching balancer to \"pick_first\"" module=grpcMay 19 10:26:34 szxlpidgapp06 dockerd[33200]: time="2022-05-19T10:26:34.584849300+08:00" level=debug msg="Using default logging driver json-file"May 19 10:26:34 szxlpidgapp06 dockerd[33200]: time="2022-05-19T10:26:34.584898470+08:00" level=debug msg="processing event stream" module=libcontainerd namespace=plugins.mobyMay 19 10:26:34 szxlpidgapp06 dockerd[33200]: time="2022-05-19T10:26:34.584902148+08:00" level=debug msg="[graphdriver] priority list: [btrfs zfs overlay2 aufs overlay devicemapper vfs]"May 19 10:26:34 szxlpidgapp06 dockerd[33200]: time="2022-05-19T10:26:34.594035041+08:00" level=debug msg="backingFs=xfs, projectQuotaSupported=false, indexOff=\"index=off,\"" storage-driver=overlay2May 19 10:26:34 szxlpidgapp06 dockerd[33200]: time="2022-05-19T10:26:34.594052376+08:00" level=info msg="[graphdriver] using prior storage driver: overlay2"May 19 10:26:34 szxlpidgapp06 dockerd[33200]: time="2022-05-19T10:26:34.594062706+08:00" level=debug msg="Initialized graph driver overlay2"May 19 10:26:34 szxlpidgapp06 dockerd[33200]: time="2022-05-19T10:26:34.594715135+08:00" level=warning msg="Your kernel does not support cgroup memory limit"May 19 10:26:34 szxlpidgapp06 dockerd[33200]: time="2022-05-19T10:26:34.594726229+08:00" level=warning msg="Unable to find cpu cgroup in mounts"May 19 10:26:34 szxlpidgapp06 dockerd[33200]: time="2022-05-19T10:26:34.594731880+08:00" level=warning msg="Unable to find blkio cgroup in mounts"May 19 10:26:34 szxlpidgapp06 dockerd[33200]: time="2022-05-19T10:26:34.594737414+08:00" level=warning msg="Unable to find cpuset cgroup in mounts"May 19 10:26:34 szxlpidgapp06 dockerd[33200]: time="2022-05-19T10:26:34.594741722+08:00" level=warning msg="mountpoint for pids not found"May 19 10:26:34 szxlpidgapp06 dockerd[33200]: time="2022-05-19T10:26:34.594868843+08:00" level=debug msg="Cleaning up old mountid : start."May 19 10:26:34 szxlpidgapp06 dockerd[33200]: failed to start daemon: Devices cgroup isn't mountedMay 19 10:26:34 szxlpidgapp06 systemd[1]: docker.service: main process exited, code=exited, status=1/FAILUREMay 19 10:26:34 szxlpidgapp06 systemd[1]: Failed to start Docker Application Container Engine.-- Subject: Unit docker.service has failed-- Defined-By: systemd-- Support: http://lists.freedesktop.org/mailman/listinfo/systemd-devel---- Unit docker.service has failed.---- The result is failed.May 19 10:26:34 szxlpidgapp06 systemd[1]: Unit docker.service entered failed state.May 19 10:26:34 szxlpidgapp06 systemd[1]: docker.service failed.
failed to start daemon: Devices cgroup isn’t mounted is very important message.
We check docker config by https://raw.githubusercontent.com/moby/moby/master/contrib/check-config.sh
warning: /proc/config.gz does not exist, searching other paths for kernel config ...info: reading kernel config from /boot/config-3.10.0-1160.62.1.el7.x86_64 ...Generally Necessary:- cgroup hierarchy: nonexistent?? (see https://github.com/tianon/cgroupfs-mount)- CONFIG_NAMESPACES: enabled- CONFIG_NET_NS: enabled- CONFIG_PID_NS: enabled- CONFIG_IPC_NS: enabled- CONFIG_UTS_NS: enabled- CONFIG_CGROUPS: enabled- CONFIG_CGROUP_CPUACCT: enabled- CONFIG_CGROUP_DEVICE: enabled- CONFIG_CGROUP_FREEZER: enabled- CONFIG_CGROUP_SCHED: enabled- CONFIG_CPUSETS: enabled- CONFIG_MEMCG: enabled- CONFIG_KEYS: enabled- CONFIG_VETH: enabled (as module)- CONFIG_BRIDGE: enabled (as module)- CONFIG_BRIDGE_NETFILTER: enabled (as module)- CONFIG_IP_NF_FILTER: enabled (as module)- CONFIG_IP_NF_TARGET_MASQUERADE: enabled (as module)- CONFIG_NETFILTER_XT_MATCH_ADDRTYPE: enabled (as module)- CONFIG_NETFILTER_XT_MATCH_CONNTRACK: enabled (as module)- CONFIG_NETFILTER_XT_MATCH_IPVS: enabled (as module)- CONFIG_NETFILTER_XT_MARK: enabled (as module)- CONFIG_IP_NF_NAT: enabled (as module)- CONFIG_NF_NAT: enabled (as module)- CONFIG_POSIX_MQUEUE: enabled- CONFIG_DEVPTS_MULTIPLE_INSTANCES: enabled- CONFIG_NF_NAT_IPV4: enabled (as module)- CONFIG_NF_NAT_NEEDED: enabled
cgroup hierarchy: nonexistent??, so we mounted cgroup by following script
reference url: https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
#!/bin/sh# Copyright 2011 Canonical, Inc# 2014 Tianon Gravi# Author: Serge Hallyn <serge.hallyn@canonical.com># Tianon Gravi <tianon@debian.org>set -e# for simplicity this script provides no flexibility# if cgroup is mounted by fstab, don't run# don't get too smart - bail on any uncommented entry with 'cgroup' in itif grep -v '^#' /etc/fstab | grep -q cgroup; then echo 'cgroups mounted from fstab, not mounting /sys/fs/cgroup' exit 0fi# kernel provides cgroups?if [ ! -e /proc/cgroups ]; then exit 0fi# if we don't even have the directory we need, something else must be wrongif [ ! -d /sys/fs/cgroup ]; then exit 0fi# mount /sys/fs/cgroup if not already doneif ! mountpoint -q /sys/fs/cgroup; then mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroupficd /sys/fs/cgroup# get/mount list of enabled cgroup controllersfor sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do mkdir -p $sys if ! mountpoint -q $sys; then if ! mount -n -t cgroup -o $sys cgroup $sys; then rmdir $sys || true fi fidone# example /proc/cgroups:# #subsys_name hierarchy num_cgroups enabled# cpuset 2 3 1# cpu 3 3 1# cpuacct 4 3 1# memory 5 3 0# devices 6 3 1# freezer 7 3 1# blkio 8 3 1# enable cgroups memory hierarchy, like systemd does (and lxc/docker desires)# https://github.com/systemd/systemd/blob/v245/src/core/cgroup.c#L2983# https://bugs.debian.org/940713if [ -e /sys/fs/cgroup/memory/memory.use_hierarchy ]; then echo 1 > /sys/fs/cgroup/memory/memory.use_hierarchyfiexit 0
通过 systemctl status docker
查看状态, 发现并没有关闭成功
我们可以通过如下脚本, 进行强制关闭
while true; do kill -9 $(pidof containerd) $(pidof dockerd) if [[ ! "$?" = "0" ]]; then break fidone
然后再次启动 docker
即可