包安装docker
官网: https://docs.docker.com/engine/install/
阿里云:https://developer.aliyun.com/mirror/docker-ce?spm=a2c6h.13651102.0.0.3e221b11guHCWE
https://docs.docker.com/install/linux/docker-ce/ubuntu/
#ubuntu
# step 1: 安装必要的一些系统工具
sudo apt-get update
sudo apt-get -y install apt-transport-https ca-certificates curl software-properties-common
# step 2: 安装GPG证书
curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
# Step 3: 写入软件源信息
sudo add-apt-repository "deb [arch=amd64] https://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
# Step 4: 更新并安装Docker-CE
sudo apt-get -y update
sudo apt-get -y install docker-ce
# 安装指定版本的Docker-CE:
# Step 1: 查找Docker-CE的版本:
# apt-cache madison docker-ce
# docker-ce | 17.03.1~ce-0~ubuntu-xenial | https://mirrors.aliyun.com/docker-ce/linux/ubuntu xenial/stable amd64 Packages
# docker-ce | 17.03.0~ce-0~ubuntu-xenial | https://mirrors.aliyun.com/docker-ce/linux/ubuntu xenial/stable amd64 Packages
# Step 2: 安装指定版本的Docker-CE: (VERSION例如上面的17.03.1~ce-0~ubuntu-xenial)
# sudo apt-get -y install docker-ce=[VERSION]
#centos
# step 1: 安装必要的一些系统工具
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
# Step 2: 添加软件源信息
sudo yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# Step 3
sudo sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
# Step 4: 更新并安装Docker-CE
sudo yum makecache fast
sudo yum -y install docker-ce
# Step 4: 开启Docker服务
sudo service docker start
# 注意:
# 官方软件源默认启用了最新的软件,您可以通过编辑软件源的方式获取各个版本的软件包。例如官方并没有将测试版本的软件源置为可用,您可以通过以下方式开启。同理可以开启各种测试版本等。
# vim /etc/yum.repos.d/docker-ce.repo
# 将[docker-ce-test]下方的enabled=0修改为enabled=1
#
# 安装指定版本的Docker-CE:
# Step 1: 查找Docker-CE的版本:
# yum list docker-ce.x86_64 --showduplicates | sort -r
# Loading mirror speeds from cached hostfile
# Loaded plugins: branch, fastestmirror, langpacks
# docker-ce.x86_64 17.03.1.ce-1.el7.centos docker-ce-stable
# docker-ce.x86_64 17.03.1.ce-1.el7.centos @docker-ce-stable
# docker-ce.x86_64 17.03.0.ce-1.el7.centos docker-ce-stable
# Available Packages
# Step2: 安装指定版本的Docker-CE: (VERSION例如上面的17.03.0.ce.1-1.el7.centos)
# sudo yum -y install docker-ce-[VERSION]
二进制安装docker
#!/bin/bash
#
#********************************************************************
#Author: shuhong
#QQ: 985347841
#Date: 2022-10-14
#FileName: install_docker.sh
#URL: hhhhhh
#Description: The test script
#Copyright (C): 2022 All rights reserved
#********************************************************************
DOCKER_VERSION=20.10.19
#URL=https://mirrors.aliyun.com
URL=https://download.docker.com
prepare () {
if [ ! -e docker-${DOCKER_VERSION}.tgz ];then
#wget ${URL}/docker-ce/linux/static/stable/x86_64/docker-${DOCKER_VERSION}.tgz
wget ${URL}/linux/static/stable/x86_64/docker-${DOCKER_VERSION}.tgz
fi
[ $? -ne 0 ] && { echo "文件下载失败"; exit; }
}
install_docker () {
tar xf docker-${DOCKER_VERSION}.tgz -C /usr/local/
cp /usr/local/docker/* /usr/bin/
cat > /lib/systemd/system/docker.service <<-EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
ExecStart=/usr/bin/dockerd -H unix://var/run/docker.sock
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
#TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
# restart the docker process if it exits prematurely
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
}
start_docker (){
systemctl enable --now docker
docker info
}
config_docker () {
mkdir -p /etc/docker
tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": ["https://si7y70hh.mirror.aliyuncs.com"]
}
EOF
systemctl restart docker
}
prepare
install_docker
config_docker
start_docker
镜像管理
官网: http://hub.docker.com
#查找镜像
[root@localhost ~]#docker search nginx
[root@localhost ~]#docker search --filter=stars=100 centos
#下载镜像
[root@localhost ~]#docker pull alpine
#查看镜像
[root@localhost ~]#docker images
#镜像存储路径
[root@localhost ~]#ls /var/lib/docker/overlay2/
[root@localhost ~]#docker images --no-trunc #显示完整的镜像ID
[root@localhost ~]#docker images -q #只显示镜像id
#显示镜像详细信息
[root@localhost ~]#docker inspect alpine
#单个镜像导出
[root@localhost ~]#docker save ubuntu:22.10 -o ubuntu.tar
[root@localhost ~]#docker save ubuntu:22.10 > ubuntu.tar
#镜像批量导出
[root@localhost ~]#docker images | awk 'NR!=1{print $1,$2}' | while read repo tag ;do docker save $repo:$tag -o /opt/$repo-$tag.tar ;done
[root@localhost ~]#for i in docker image ls --format "{{.Repository}}:{{.Tag}}"
;do docker save $i -o echo $i|cut -d: -f1
.tar ;done
[root@localhost ~]#docker save docker images -qa
-o all.tar
[root@localhost ~]#docker save docker images | awk 'NR!=1{print $1":"$2}'
-o all.tar
[root@localhost ~]#docker image save docker image ls --format "{{.Repository}}:{{.Tag}}"
-o all.tar
#导入镜像
[root@ubuntu2004 ~]#docker load -i alpine.tar
[root@ubuntu2004 ~]#docker load < alpine.tar
[root@ubuntu2004 ~]#docker load -i all.tar
#删除镜像
[root@ubuntu2004 ~]#docker rmi ubuntu:22.10 #删除标签
[root@ubuntu2004 ~]#docker rmi feb5d9fea6a5 #删除镜像慎用
[root@ubuntu2004 ~]#docker rmi -f feb5d9fea6a5 #强制删除
[root@ubuntu2004 ~]#docker rmi -f docker images -q
#删除所有镜像
[root@ubuntu2004 ~]#docker image prune #删除dangling的镜像
[root@ubuntu2004 ~]#docker image prune -a #删除所有未使用的镜像
#镜像打标签
[root@ubuntu2004 ~]#docker tag ubuntu:22.10 ubuntu2210:1
容器管理
#方便实现定义一个alias删除容器
[root@localhost ~]#alias rmc='docker rm -f docker ps -qa
'
#启动容器
[root@localhost ~]#docker run nginx:1.22.0 #下载nginx1.22.0镜像运行容器
[root@localhost ~]#docker run -d --name nignx01 -p 80:80 --restart=always nginx:1.22.0 #应用nginx:1.22.0镜像在后台运行一个名为nginx01的容器并实现开机自启动
[root@localhost ~]#docker run --rm busybox sleep 1000 #使用busybox镜像运行一个容器并执行sleep1000的命令,执行退出后自动删除容器
#查看容器
[root@localhost ~]#docker ps #显示正在运行的容器
[root@localhost ~]#docker ps -a #显示所有容器
[root@localhost ~]#docker ps -as #显示所有容器并显示他们的大小
[root@localhost ~]#docker ps -aq #显示所有容器的ID
[root@localhost ~]#docker ps -f 'status=exited' #显示所有停止运行的容器
[root@localhost ~]#docker ps -f 'status=running' #显示所有正在运行的容器
[root@localhost ~]#docker top 39723d5b4c95 #显示容器内的进程
[root@localhost ~]#docker stats 39723d5b4c95 #查看容器资源使用情况
[root@localhost ~]#docker stats --no-stream 39723d5b4c95 #只显示一次容器资源使用情况
[root@localhost ~]#docker inspect 39723d5b4c95 #查看容器信息
[root@localhost ~]#docker inspect -f "{{.Created}}" 39723d5b4c95 #查看容器内指定信息
#删除容器
[root@localhost ~]#docker rm 39723d5b4c95 #删除容器
[root@localhost ~]#docker rm -f 39723d5b4c95 #强制删除
#容器的启停
[root@localhost ~]#docker start nginx01 #启动容器
[root@localhost ~]#docker stop nginx01 #关闭容器
[root@localhost ~]#docker restart nginx01 #重启容器
[root@localhost ~]#docker pause nginx01 #挂停止容器
[root@localhost ~]#docker unpause nginx01 #解除挂起状态
#给容器发信号
[root@localhost ~]#docker kill nginx01
#进入容器
[root@localhost ~]#docker exec -it nginx02 bash
#暴露端口
[root@localhost ~]#docker run -d --name nignx01 -p 80:80 --restart=always nginx:1.22.0 #指定80内外端口对应
[root@localhost ~]#docker run -d --name nignx01 -P --restart=always nginx:1.22.0 #宿主机随机生成对应端口
#查看日志
[root@localhost ~]#docker logs nginx02
[root@localhost ~]#docker logs --tail 3 nginx02 #显示后三行日志
[root@localhost ~]#docker logs --tail 1 -t nginx02 #显示时间
[root@localhost ~]#docker logs -f nginx02 #持续跟进日志
#指定DNS
[root@localhost ~]#docker run -it --rm --dns 1.1.1.1 --dns 8.8.8.8 busybox sh
#指定domain
[root@localhost ~]#docker run -it --rm --dns 1.1.1.1 --dns 8.8.8.8 --dns-search a.com --dns-search b.com busybox sh
#拷贝文件
[root@localhost ~]#docker cp nignx01:/etc/hosts . #把容器内的hosts文件拷贝到宿主机当前目录
[root@localhost ~]#docker cp /etc/hosts nignx01:/ #把宿主机的hosts文件考到容器的根目录
#传递环境变量
[root@localhost ~]#docker run --name mysql-test1 -v /data/mysql:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=123456 -e MYSQL_DATABASE=wordpress -e MYSQL_USER=wpuser -e MYSQL_PASSWORD=123456 -d -p 3306:3306 mysql:5.7.30 #传递环境变修改root密码创建数据库,创建数据库用户和密码
#清除不再使用的数据
[root@localhost ~]#docker system prune #!!!慎用
Dockerfile打镜像
#docker commit #通过修改现有容器,将之手动构建为镜像
[root@localhost ~]#docker commit -m "test" -a "sh" -c "CMD /bin/bash" fe9b4ba22b50 mysql:1.0 #描述test,作者sh,指定CMD命令为/bin/bash
#docker build #通过Dockerfile文件,批量构建为镜像
[root@localhost data]#mkdir /data/Dockerfile/{system/{ubuntu,rocky},apps/{nginx,tomcat,mysql,jumpserver}} -pv
[root@localhost data]#tree /data/Dockerfile/
/data/Dockerfile/
├── apps
│ ├── jumpserver
│ ├── mysql
│ ├── nginx
│ └── tomcat
└── system
├── rocky
└── ubuntu
[root@localhost ubuntu]#cd /data/Dockerfile/system/ubuntu/
[root@localhost ubuntu]#vim Dockerfile
FROM ubuntu:22.04
LABEL author=shuhong
ENV NAME shu hong
RUN touch $NAME.txt
[root@localhost ubuntu]#docker build -t ubuntu2204-v1 .
[root@localhost ubuntu]#docker run --rm --name c1 ubuntu2204-v1:latest env
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
HOSTNAME=57c787c8d593
NAME=shu hong
HOME=/root
[root@localhost ubuntu]#docker run --rm --name c1 -e NAME="shu zi han" ubuntu2204-v1:latest env
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
HOSTNAME=7f473ddcd323
NAME=shu zi han
HOME=/root
Dockerfile文件指令解释
FROM: 指定基础镜像
LABEL: 指定镜像元数据
RUN: 执行 shell命令
ENV: 设置环境变量
COPY: 复制文本
ADD: 复制和解包文件
CMD: 容器启动命令
ENTRYPOINT: 入口点
ARG: 构建参数
VOLUME: 匿名卷
EXPOSE: 暴露端口
WORKDIR: 指定工作目录
ONBUILD: 子镜像引用父镜像的指令
USER: 指定当前用户
HEALTHCHECK: 健康检查
分层构建jpress镜像
[root@localhost data]#tree Dockerfile/
Dockerfile/
├── apps
│ ├── JDK
│ │ ├── Dockerfile
│ │ └── jdk-8u341-linux-x64.tar.gz
│ ├── jumpserver
│ ├── nginx
│ └── tomcat
│ ├── apache-tomcat-9.0.65.tar.gz
│ ├── context1.xml
│ ├── context.xml
│ ├── Dockerfile
│ └── tomcat-users.xml
├── system
│ ├── rocky
│ └── ubuntu
│ ├── Dockerfile
│ └── sources.list
└── website
└── Jpress
├── Dockerfile
├── jpress-v5.0.2.war
└── server.xml
ubuntu2204-Dockerfile
[root@localhost data]#cat Dockerfile/system/ubuntu/Dockerfile
FROM ubuntu:22.04
LABEL author=shuhong version="1.0" description="ubuntu2204"
ENV ORG=M50 DATE=2022-10-18
copy sources.list /etc/apt/sources.list
RUN apt update && apt -y install wget curl net-tools iproute2 tcpdump telnet traceroute nfs-common lrzsz tree iotop unzip zip vim tzdata && \
rm -rf /etc/localtime && ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
groupadd -g 88 tomcat && useradd -u 88 -g tomcat -M -s /sbin/nologin tomcat
JDK-Dockerfile
[root@localhost data]#cat Dockerfile/apps/JDK/Dockerfile
FROM ubuntu2204:v1.0
ENV version=8u341
ENV JDK_DIR=/usr/local
LABEL Author=shuhong VERSION=JDK-${version}
ADD jdk-8u341-linux-x64.tar.gz ${JDK_DIR}/
RUN cd ${JDK_DIR} && ln -s jdk* jdk
RUN echo "export JAVA_HOME=${JDK_DIR}/jdk" >> /etc/profile.d/jdk.sh && echo export "PATH=\$PATH:\$JAVA_HOME/bin" >> /etc/profile.d/jdk.sh
Tomcat-Dockerfile
[root@localhost data]#cat Dockerfile/apps/tomcat/Dockerfile
FROM jdk8u341:v1.0
ENV version=9.0.65
ENV TOMCAT_DIR=/usr/local
ENV JDK_DIR=/usr/local
ENV JAVA_HOME=${JDK_DIR}/jdk
LABEL Author=shuhong VERSION=${version}
ADD apache-tomcat-9.0.65.tar.gz ${TOMCAT_DIR}/
RUN cd ${TOMCAT_DIR} && ln -s apache-tomcat-*/ tomcat
RUN echo "PATH=${TOMCAT_DIR}/tomcat/bin:"'$PATH' > /etc/profile.d/tomcat.sh
#RUN echo "JAVA_HOME=${JDK_DIR}/jdk" >> ${TOMCAT_DIR}/tomcat/conf/tomcat.conf
COPY tomcat-users.xml /usr/local/tomcat/conf/tomcat-users.xml
COPY context.xml /usr/local/tomcat/webapps/manager/META-INF/context.xml
COPY context1.xml /usr/local/tomcat/webapps/host-manager/META-INF/context.xml
RUN chown -R tomcat.tomcat ${TOMCAT_DIR}/tomcat/
CMD ${TOMCAT_DIR}/tomcat/bin/catalina.sh run
EXPOSE 8080
Jpress-Dockerfile
[root@localhost data]#cat Dockerfile/website/Jpress/Dockerfile
FROM tomcat9.0.65:v1.0
ENV version=5.0.2
ENV TOMCAT_DIR=/usr/local
LABEL Author=shuhong VERSION=${version}i
COPY server.xml /usr/local/tomcat/conf/server.xml
COPY jpress-v5.0.2.war /data/website/ROOT.war
CMD ${TOMCAT_DIR}/tomcat/bin/catalina.sh run
运行jpress01容器
[root@localhost data]# docker run -d --name jpress01 -p 80:8080 jpress5.0.2:v1.0
Docker数据持久化
数据卷
创建匿名卷
#创建匿名卷-v 只指定容器内目录,生成随机id的挂载卷
[root@localhost ~]#docker run -d -v /apps/nginx/html/ nginx:1.22.0
Unable to find image 'nginx:1.22.0' locally
1.22.0: Pulling from library/nginx
bd159e379b3b: Pull complete
265da2307f4a: Pull complete
9f5a323076dc: Pull complete
1cb127bd9321: Pull complete
20d83d630f2b: Pull complete
e0c68760750a: Pull complete
Digest: sha256:f0d28f2047853cbc10732d6eaa1b57f1f4db9b017679b9fd7966b6a2f9ccc2d1
Status: Downloaded newer image for nginx:1.22.0
fe4d277584fe2a4b116cf8257e872a7181f44c982e8869dfdb223ebf7d47f416
[root@localhost ~]#ll /var/lib/docker/volumes/
total 24
drwx-----x 3 root root 19 Oct 19 19:41 6900b88f25e99b8b87c5f82a059ec7e7eb465c0633b697d7c0eef18c9516ba3e
brw------- 1 root root 253, 0 Oct 19 19:37 backingFsBlockDev
-rw------- 1 root root 32768 Oct 19 19:41 metadata.db
创建命名卷
#创建挂载卷-v 指定卷名和容器挂载目录(命名卷可由下面命令自动创建,也可以通过docker volume create 创建)
[root@localhost ~]#docker run -d -v volume1:/apps/nginx/html/ nginx:1.22.0
eb89cd0cb78954c58041682550378df3dff3ee7cbcd19796f78ffeca3436cd19
[root@localhost ~]#ll /var/lib/docker/volumes/
total 24
drwx-----x 3 root root 19 Oct 19 19:41 6900b88f25e99b8b87c5f82a059ec7e7eb465c0633b697d7c0eef18c9516ba3e
brw------- 1 root root 253, 0 Oct 19 19:37 backingFsBlockDev
-rw------- 1 root root 32768 Oct 19 19:44 metadata.db
drwx-----x 3 root root 19 Oct 19 19:44 volume1
[root@localhost ~]#docker volume ls
DRIVER VOLUME NAME
local 6900b88f25e99b8b87c5f82a059ec7e7eb465c0633b697d7c0eef18c9516ba3e
local volume1
宿主机目录挂载
#-v 宿主机目录:容器目录进行挂载,实现数据持久化
[root@localhost ~]#docker run -d -v /data/html:/apps/nginx/html/ nginx:1.22.0
2d36613db203beec927deb823948ecd2ee37057b6bc1f640c2ffb7baa7674def
数据卷容器
#创建第一个容器实现挂载,第二个容器参照第一个容器进行挂载
[root@localhost ~]#docker run -d -v volume1:/apps/nginx/html/ --name nginx01 nginx:1.22.0
701cec0f2af93673ee18f12f5ff27990a19b3f5fe1bf526c01f373bc3d6a3007
#后续容器参照第一个容器挂载,实现多个容器挂载相同的数据卷或目录
[root@localhost ~]#docker run -d --volumes-from nginx01 --name nginx02 nginx:1.22.0
ad47db878b6049a18c3bfa41958a92ff722f4502b8010a526dfb737d74ca1d3a
[root@localhost ~]#docker run -d --volumes-from nginx01 --name nginx03 nginx:1.22.0
68aac481b0d5e179e2823b421e03dec76391a5e4df7a7b936bb0eb1833ea1ea6
Docker网络管理
bridge(默认模式)
#默认模式可以不写--network bridge
[root@localhost ~]#docker run -d --name nginx01 --network bridge nginx:1.22.0
ccde1d2b9ab83e6521a2c041588f61af8eae0de99a515b39a186f7808e0a69fb
#docker默认地址网段172.17.0.1/16,修改网段
方法一:
[root@localhost ~]#vim /lib/systemd/system/docker.service
ExecStart=/usr/bin/dockerd -H --bip=10.100.0.1/24 unix://var/run/docker.sock
[root@localhost ~]#ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 00:0c:29:b2:6f:d6 brd ff:ff:ff:ff:ff:ff
inet 10.0.0.152/24 brd 10.0.0.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:feb2:6fd6/64 scope link
valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:70:c1:0e:a2 brd ff:ff:ff:ff:ff:ff
inet 172.25.0.1/24 brd 172.25.0.255 scope global docker0
valid_lft forever preferred_lft forever
inet6 fe80::42:70ff:fec1:ea2/64 scope link
valid_lft forever preferred_lft forever
方法二:
[root@localhost ~]#vim /etc/docker/daemon.json
{
"registry-mirrors": ["https://si7y70hh.mirror.aliyuncs.com"],
"bip":"172.28.0.1/24"
}
[root@localhost ~]#systemctl restart docker.service
[root@localhost ~]#ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 00:0c:29:b2:6f:d6 brd ff:ff:ff:ff:ff:ff
inet 10.0.0.152/24 brd 10.0.0.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:feb2:6fd6/64 scope link
valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:70:c1:0e:a2 brd ff:ff:ff:ff:ff:ff
inet 172.28.0.1/24 brd 172.28.0.255 scope global docker0
valid_lft forever preferred_lft forever
inet6 fe80::42:70ff:fec1:ea2/64 scope link
valid_lft forever preferred_lft forever
host模式
#使用宿主机的网卡和网络配置
[root@localhost ~]#ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 00:0c:29:b2:6f:d6 brd ff:ff:ff:ff:ff:ff
inet 10.0.0.152/24 brd 10.0.0.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:feb2:6fd6/64 scope link
valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:70:c1:0e:a2 brd ff:ff:ff:ff:ff:ff
inet 172.28.0.1/24 brd 172.28.0.255 scope global docker0
valid_lft forever preferred_lft forever
inet6 fe80::42:70ff:fec1:ea2/64 scope link
valid_lft forever preferred_lft forever
[root@localhost ~]#docker run -d --name tomcat01 --network host tomcat9.0.65:v1.0
32f1ea59a44ec867a265621b8097a818d2a6d73e0b2b28f1fd0718322482bc06
[root@localhost ~]#docker exec tomcat01 ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 00:0c:29:b2:6f:d6 brd ff:ff:ff:ff:ff:ff
inet 10.0.0.152/24 brd 10.0.0.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:feb2:6fd6/64 scope link
valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:70:c1:0e:a2 brd ff:ff:ff:ff:ff:ff
inet 172.28.0.1/24 brd 172.28.0.255 scope global docker0
valid_lft forever preferred_lft forever
inet6 fe80::42:70ff:fec1:ea2/64 scope link
valid_lft forever preferred_lft forever
None
#没有任何网卡和网络配置
[root@localhost ~]#docker run -d --name tomcat01 --network none tomcat9.0.65:v1.0
8f6bab5e9605b40aac0d9466e53fd021fae9a8860cc8f07d35a45fff3d94b896
[root@localhost ~]#docker exec tomcat01 ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
container
#和一个已存在的容器共享网络
[root@localhost ~]#docker run -d --name tomcat01 tomcat9.0.65:v1.0
2b1f0a016ff53527215c938f79a84dbee978a7b6b7b72d1fc95d87ac627dbf12
[root@localhost ~]#docker run -d --name ubuntu2204 --network container:tomcat01 ubuntu2204:v1.0 sleep 10000
2bee921afc00567f7e7d20b992f66ab1ac45c9a07bde57a251ae9055bbdf14bd
[root@localhost ~]#docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
2bee921afc00 ubuntu2204:v1.0 "sleep 10000" 7 seconds ago Up 6 seconds ubuntu2204
2b1f0a016ff5 tomcat9.0.65:v1.0 "/bin/sh -c '${TOMCA…" 15 seconds ago Up 14 seconds 8080/tcp tomcat01
[root@localhost ~]#docker exec tomcat01 ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
34: eth0@if35: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ac:1c:00:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 172.28.0.2/24 brd 172.28.0.255 scope global eth0
valid_lft forever preferred_lft forever
[root@localhost ~]#docker exec ubuntu2204 ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
34: eth0@if35: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ac:1c:00:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 172.28.0.2/24 brd 172.28.0.255 scope global eth0
valid_lft forever preferred_lft forever
自定义模式(注意: 自定义网络内的容器可以直接通过容器名进行相互的访问,而无需使用 –link)
#自定义网络模式
[root@localhost ~]#docker network create --subnet 172.30.0.0/24 --gateway 172.30.0.1 testnet
d02deff04c71988dfbe768b3a366ced2e266ad519ba7d6a67621a48fc8ddc2cc
[root@localhost ~]#docker network ls
NETWORK ID NAME DRIVER SCOPE
9434606195b2 bridge bridge local
64c76a346ecf host host local
0ea2842c27a4 none null local
d02deff04c71 testnet bridge local
#创建容器
[root@localhost ~]#docker run -d --name tomcat01 --network testnet tomcat9.0.65:v1.0
2169eeda8f61082d6c07fd95336424c0b4e7afd5bc9bf81e552624a083867eaf
[root@localhost ~]#docker run -d --name tomcat02 --network testnet tomcat9.0.65:v1.0
39ca93a9eaea9825328ba3b871bddf8ea7fbc8e7be00d0647a105c26c73b26e8
[root@localhost ~]#docker exec -it tomcat02 ping tomcat01
PING tomcat01 (172.30.0.2) 56(84) bytes of data.
64 bytes from tomcat01.testnet (172.30.0.2): icmp_seq=1 ttl=64 time=0.047 ms
64 bytes from tomcat01.testnet (172.30.0.2): icmp_seq=2 ttl=64 time=0.065 ms
64 bytes from tomcat01.testnet (172.30.0.2): icmp_seq=3 ttl=64 time=0.081 ms
64 bytes from tomcat01.testnet (172.30.0.2): icmp_seq=4 ttl=64 time=0.067 ms
64 bytes from tomcat01.testnet (172.30.0.2): icmp_seq=5 ttl=64 time=0.081 ms
--- tomcat01 ping statistics ---
5 packets transmitted, 5 received, 0% packet loss, time 4075ms
rtt min/avg/max/mdev = 0.047/0.068/0.081/0.012 ms
[root@localhost ~]#docker exec -it tomcat01 ping tomcat02
PING tomcat02 (172.30.0.3) 56(84) bytes of data.
64 bytes from tomcat02.testnet (172.30.0.3): icmp_seq=1 ttl=64 time=0.048 ms
64 bytes from tomcat02.testnet (172.30.0.3): icmp_seq=2 ttl=64 time=0.063 ms
64 bytes from tomcat02.testnet (172.30.0.3): icmp_seq=3 ttl=64 time=0.064 ms
--- tomcat02 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2069ms
rtt min/avg/max/mdev = 0.048/0.058/0.064/0.007 ms
同宿主机不同容器之间不同网段相互通信
方法一:通过修改iptables规则实现
#确认开启ip_forward
[root@ubuntu1804 ~]#cat /proc/sys/net/ipv4/ip_forward
1
#默认网络和自定义网络是两个不同的网桥
[root@ubuntu1804 ~]#brctl show
bridge name bridge id STP enabled interfaces
br-c90dee3b7937 8000.0242587cf093 no veth984a5b4
docker0 8000.02429b31732b no veth1a20128
[root@ubuntu1804 ~]#iptables-save
# Generated by iptables-save v1.6.1 on Sun Feb 2 14:33:19 2020
*filter
:INPUT ACCEPT [1283:90246]
:FORWARD DROP [0:0]
:OUTPUT ACCEPT [1489:217126]
:DOCKER - [0:0]
:DOCKER-ISOLATION-STAGE-1 - [0:0]
:DOCKER-ISOLATION-STAGE-2 - [0:0]
:DOCKER-USER - [0:0]
-A FORWARD -j DOCKER-USER
-A FORWARD -j DOCKER-ISOLATION-STAGE-1
-A FORWARD -o br-c90dee3b7937 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -o br-c90dee3b7937 -j DOCKER
-A FORWARD -i br-c90dee3b7937 ! -o br-c90dee3b7937 -j ACCEPT
-A FORWARD -i br-c90dee3b7937 -o br-c90dee3b7937 -j ACCEPT
-A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -o docker0 -j DOCKER
-A FORWARD -i docker0 ! -o docker0 -j ACCEPT
-A FORWARD -i docker0 -o docker0 -j ACCEPT
-A DOCKER-ISOLATION-STAGE-1 -i br-c90dee3b7937 ! -o br-c90dee3b7937 -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-1 -i docker0 ! -o docker0 -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-1 -j RETURN
-A DOCKER-ISOLATION-STAGE-2 -o br-c90dee3b7937 -j DROP #注意此行规则
-A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP #注意此行规则
-A DOCKER-ISOLATION-STAGE-2 -j RETURN
-A DOCKER-USER -j RETURN
COMMIT
# Completed on Sun Feb 2 14:33:19 2020
# Generated by iptables-save v1.6.1 on Sun Feb 2 14:33:19 2020
*nat
:PREROUTING ACCEPT [887:75032]
:INPUT ACCEPT [6:1028]
:OUTPUT ACCEPT [19:1444]
:POSTROUTING ACCEPT [19:1444]
:DOCKER - [0:0]
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER
-A POSTROUTING -s 172.27.0.0/16 ! -o br-c90dee3b7937 -j MASQUERADE
-A POSTROUTING -s 172.17.0.0/16 ! -o docker0 -j MASQUERADE
-A DOCKER -i br-c90dee3b7937 -j RETURN
-A DOCKER -i docker0 -j RETURN
COMMIT
# Completed on Sun Feb 2 14:33:19 2020
[root@ubuntu1804 ~]#iptables-save > iptables.rule
[root@ubuntu1804 ~]#vim iptables.rule
#修改下面两行的规则
-A DOCKER-ISOLATION-STAGE-2 -o br-c90dee3b7937 -j ACCEPT
-A DOCKER-ISOLATION-STAGE-2 -o docker0 -j ACCEPT
#或者执行下面命令
[root@ubuntu1804 ~]#iptables -I DOCKER-ISOLATION-STAGE-2 -j ACCEPT
[root@ubuntu1804 ~]#iptables-restore < iptables.rule
#再次两个容器之间可以相互通信
/ # ping 172.27.0.2
PING 172.27.0.2 (172.27.0.2): 56 data bytes
64 bytes from 172.27.0.2: seq=896 ttl=63 time=0.502 ms
64 bytes from 172.27.0.2: seq=897 ttl=63 time=0.467 ms
64 bytes from 172.27.0.2: seq=898 ttl=63 time=0.227 ms
/ # ping 172.17.0.2
PING 172.17.0.2 (172.17.0.2): 56 data bytes
64 bytes from 172.17.0.2: seq=0 ttl=63 time=0.163 ms
64 bytes from 172.17.0.2: seq=1 ttl=63 time=0.232 ms
方法二:通过docker network connect实现
[root@ubuntu1804 ~]#docker network connect test-net test1
[root@ubuntu1804 ~]#docker network connect bridge test2
#断开方式
[root@ubuntu1804 ~]#docker network disconnect test-net test1
[root@ubuntu1804 ~]#docker network disconnect bridge test2
跨宿主机的容器之间相互通信
方式1: 利用桥接实现跨宿主机的容器间互联
#分别将两个宿主机都执行下面操作
[root@ubuntu1804 ~]#apt -y install bridge-utils
[root@ubuntu1804 ~]#brctl addif docker0 eth0
#在两个宿主机上各启动一个容器,需要确保IP不同,相互测试访问
#第一个宿主机的容器
[root@ubuntu1804 ~]#docker run -it --name b1 busybox
/ # hostname -i
172.17.0.2
/ # httpd -h /data/html/ -f -v
[::ffff:172.17.0.3]:42488:response:200
#第二个宿主机的容器
[root@ubuntu1804 ~]#docker run -it --name b2 busybox
/ # hostname -i
172.17.0.3
/#wget-q0 - http://172.17.0.2
httpd website in busybox
方式2: 利用NAT实现跨主机的容器间互联
#添加路由(在第一台宿主机添加静态路由和iptables规则)
[root@ubuntu1804 ~]#route add -net 192.168.200.0/24 gw 10.0.0.102
#修改iptables规则
[root@ubuntu1804 ~]#iptables -A FORWARD -s 10.0.0.0/24 -j ACCEPT
#或者修改FORWARD默认规则
[root@ubuntu1804 ~]#iptables -P FORWARD ACCEPT
#添加路由(在第二台宿主机添加静态路由和iptables规则)
[root@ubuntu1804 ~]#route add -net 192.168.100.0/24 gw 10.0.0.101
#修改iptables规则
[root@ubuntu1804 ~]#iptables -A FORWARD -s 10.0.0.0/24 -j ACCEPT
#或者修改FORWARD默认规则
[root@ubuntu1804 ~]#iptables -P FORWARD ACCEPT
Docker Compose
github地址: https://github.com/docker/compose
官方地址: https://docs.docker.com/compose/
Docker Compose安装
方法一:在线安装,通过pip安装
Ubuntu:
# apt update
# apt install -y python-pip
CentOS:
# yum install epel-release
# yum install -y python-pip
# pip install --upgrade pi
#范例,基于python3 安装 docker-compose
[root@ubuntu2004 ~]#mkdir ~/.pip
[root@ubuntu2004 ~]#cat > ~/.pip/pip.conf <<-EOF
> [global]
> index-url = https://pypi.tuna.tsinghua.edu.cn/simple
> EOF
[root@ubuntu2004 ~]#apt -y install python3-pip
[root@ubuntu2004 ~]#pip3 install --upgrade pip
[root@ubuntu2004 ~]#pip3 install docker-compose
[root@ubuntu2004 ~]#docker-compose --version
/usr/lib/python3/dist-packages/requests/__init__.py:89: RequestsDependencyWarning: urllib3 (1.26.12) or chardet (3.0.4) doesn't match a supported version!
warnings.warn("urllib3 ({}) or chardet ({}) doesn't match a supported "
docker-compose version 1.29.2, build unknown
方法2: 在线直接从包仓库安装(此方法安装的版本较旧,不推荐使用)
方法三:官网下载,离线安装
[root@ubuntu2004 ~]#cp docker-compose-linux-x86_64-v2.12.0 /usr/local/bin/docker-compose
[root@ubuntu2004 ~]#chmod +x /usr/local/bin/docker-compose
[root@ubuntu2004 ~]#docker-compose version
Docker Compose version v2.12.0
查看命令格式
官方文档: https://docs.docker.com/compose/reference/
docker-compose --help
Define and run multi-container applications with Docker.
Usage:
docker-compose [-f <arg>...] [options] [COMMAND] [ARGS...]
docker-compose -h|--help
#选项说明:
-f,–file FILE #指定Compose 模板文件,默认为docker-compose.yml
-p,–project-name NAME #指定项目名称,默认将使用当前所在目录名称作为项目名。
--verbose #显示更多输出信息
--log-level LEVEL #定义日志级别 (DEBUG, INFO, WARNING, ERROR, CRITICAL)
--no-ansi #不显示ANSI 控制字符
-v, --version #显示版本
#以下为命令选项,需要在docker-compose.yml|yaml 文件所在在目录里执行
config -q #查看当前配置,没有错误不输出任何信息
up #创建并启动容器
build #构建镜像
bundle #从当前docker compose 文件生成一个以<当前目录>为名称的json格式的Docker Bundle 备
份文件
create #创建服务
down #停止和删除所有容器、网络、镜像和卷
events #从容器接收实时事件,可以指定json 日志格式
exec #进入指定容器进行操作
help #显示帮助细信息
images #显示镜像信息
kill #强制终止运行中的容器
logs #查看容器的日志
pause #暂停服务
port #查看端口
ps #列出容器
pull #重新拉取镜像,镜像发生变化后,需要重新拉取镜像
push #上传镜像
restart #重启服务
rm #删除已经停止的服务
run #一次性运行容器
scale #设置指定服务运行的容器个数,新版已废弃
start #启动服务
stop #停止服务
top #显示容器运行状态
unpause #取消暂定
docker compose 文件格式
官方文档: https://docs.docker.com/compose/compose-file/
自动生成docker-compose:https://www.composerize.com/
搭建wordpress
[root@localhost wordpress]#vim docker-compose.yml
services:
db:
image: mysql:8.0
container_name: db
restart: always
environment:
- MYSQL_DATABASE=wordpress
- MYSQL_ROOT_PASSWORD=123456
- MYSQL_USER=wordpress
- MYSQL_PASSWORD=123456
volumes:
- dbdata:/var/lib/mysql
networks:
- wordpress-network
wordpress:
depends_on:
- db
image: wordpress:5.8.3-apache
container_name: wordpress
restart: unless-stopped
ports:
- "80:80"
environment:
- WORDPRESS_DB_HOST=db:3306
- WORDPRESS_DB_USER=wordpress
- WORDPRESS_DB_PASSWORD=123456
- WORDPRESS_DB_NAME=wordpress
volumes:
- wordpress:/var/www/html
networks:
- wordpress-network
volumes:
wordpress:
dbdata:
networks:
wordpress-network:
driver: bridge
ipam:
config:
- subnet: 172.17.0.0/16
[root@localhost wordpress]#docker-compose up -d
[+] Running 5/5
⠿ Network wordpress_wordpress-network Created 0.1s
⠿ Volume "wordpress_dbdata" Created 0.0s
⠿ Volume "wordpress_wordpress" Created 0.0s
⠿ Container db Started 0.6s
⠿ Container wordpress Started 1.2s
[root@localhost wordpress]#docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
6544bb7de86f wordpress:5.8.3-apache "docker-entrypoint.s…" 4 minutes ago Up 4 minutes 0.0.0.0:80->80/tcp, :::80->80/tcp wordpress
49c24c5a2ee4 mysql:8.0 "docker-entrypoint.s…" 4 minutes ago Up 4 minutes 3306/tcp, 33060/tcp db
搭建运维平台 Spug
[root@localhost Spug]#vim docker-compose.yml
services:
db:
image: mariadb:10.8
container_name: spug-db
restart: always
command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
volumes:
- /data/spug/mysql:/var/lib/mysql
environment:
- MYSQL_DATABASE=spug
- MYSQL_USER=spug
- MYSQL_PASSWORD=spug.cc
- MYSQL_ROOT_PASSWORD=spug.cc
spug:
image: openspug/spug-service
container_name: spug
privileged: true
restart: always
volumes:
- /data/spug/service:/data/spug
- /data/spug/repos:/data/repos
ports:
- "80:80"
environment:
- SPUG_DOCKER_VERSION=v3.2.1
- MYSQL_DATABASE=spug
- MYSQL_USER=spug
- MYSQL_PASSWORD=spug.cc
- MYSQL_HOST=db
- MYSQL_PORT=3306
depends_on:
- db
[root@localhost Spug]#docker-compose up -d
[+] Running 27/27
⠿ spug Pulled 53.4s
⠿ 2d473b07cdd5 Pull complete 9.9s
⠿ 30fb14a94460 Pull complete 12.6s
⠿ 33b193505f30 Pull complete 12.8s
⠿ 0dc88a039e58 Pull complete 14.1s
⠿ 1e4810fe59b5 Pull complete 14.2s
⠿ b982e1d26912 Pull complete 14.2s
⠿ 88ff1bc3c2c8 Pull complete 14.2s
⠿ 41f60540c45b Pull complete 14.3s
⠿ 6e51dcaa57e6 Pull complete 14.3s
⠿ aa2124333bc8 Pull complete 14.3s
⠿ 390c3ff87a79 Pull complete 14.4s
⠿ 4644b9bbf983 Pull complete 14.4s
⠿ 8e3abf52ddc8 Pull complete 15.4s
⠿ 2a80002b5fd9 Pull complete 17.6s
⠿ db Pulled 21.7s
⠿ cf92e523b49e Pull complete 5.6s
⠿ 11a7b642a1b0 Pull complete 5.6s
⠿ d05db1f7ddc9 Pull complete 5.7s
⠿ 043662c3afa1 Pull complete 6.3s
⠿ de48eea20795 Pull complete 6.3s
⠿ 1a40b9e7476d Pull complete 7.8s
⠿ d053ff7fa7cc Pull complete 7.9s
⠿ 2fda51abf889 Pull complete 8.1s
⠿ 8c9d611afa09 Pull complete 15.8s
⠿ 6ea3c205ff42 Pull complete 15.8s
⠿ af34ee7f2a8a Pull complete 15.8s
[+] Running 3/3
⠿ Network spug_default Created 0.1s
⠿ Container spug-db Started 0.6s
⠿ Container spug Started 1.2s
[root@localhost Spug]#docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
2de41b268347 openspug/spug-service "/entrypoint.sh" 10 seconds ago Up 8 seconds 0.0.0.0:80->80/tcp, :::80->80/tcp spug
58fbeafa0219 mariadb:10.8 "docker-entrypoint.s…" 10 seconds ago Up 9 seconds 3306/tcp spug-db