TL; DR: Panduan artikel ikhtisar untuk membandingkan lingkungan untuk menjalankan aplikasi dalam container. Kemampuan Docker dan sistem serupa lainnya akan dipertimbangkan.
chroot. β , , . , «» chroot . ( , ), .
β , . -, β , , . FreeBSD Jails, Solaris Zones, OpenVZ LXC Linux. , , , , . chroot , , - ( Linux, FreeBSD) «» .
( , ..) , β ( ). , Docker. β .
Buruh pelabuhan
Docker β . Go, Linux β cgroups, namespaces, capabilities .., Aufs .
: wikimedia
1.11 Docker , : , , API. 1.11 Docker , : containerd, ( , , , , ) runC, , cgroups Linux. docker , API, containerd.
docker docker-machine, docker ( ) , .
2018 , Linux β .
, Ansible , .
Centos 7, , :
# yum install -y yum-utils
# yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
# yum install docker-ce docker-ce-cli containerd.io
, :
# systemctl enable docker
# systemctl start docker
# firewall-cmd --zone=public --add-port=2377/tcp --permanent
docker, docker sudo, , API , firewall ( , , β ), .
docker machine docker registry, , docker compose β , YAML ( , ).
CI\CD. β , swarm mode ( 1.12 docker swarm), . , , .
YAML docker compose , . Kubernetes, swarm mode Kubernetes. runC Kata containers
Docker
, GitLab Docker Registry . , GlusterFS, docker volumes, docker registry. : Docker Registry, Postgresql, Redis, GitLab GitLab Runner Swarm. Postgresql Stolon, Postgresql GlusterFS. GlusterFS.
GlusterFS ( node1, node2, node3) , firewall, :
# yum -y install centos-release-gluster7
# yum -y install glusterfs-server
# systemctl enable glusterd
# systemctl start glusterd
# firewall-cmd --add-service=glusterfs --permanent
# firewall-cmd --reload
# mkdir -p /srv/gluster
# mkdir -p /srv/docker
# echo "$(hostname):/docker /srv/docker glusterfs defaults,_netdev 0 0" >> /etc/fstab
GlusterFS , node1:
# gluster peer probe node2
# gluster peer probe node3
# gluster volume create docker replica 3 node1:/srv/gluster node2:/srv/gluster node3:/srv/gluster force
# gluster volume start docker
volume ( ):
# mount /srv/docker
swarm mode , Leader, , .
, node1:
# docker swarm init
Swarm initialized: current node (a5jpfrh5uvo7svzz1ajduokyq) is now a manager.
To add a worker to this swarm, run the following command:
docker swarm join --token SWMTKN-1-0c5mf7mvzc7o7vjk0wngno2dy70xs95tovfxbv4tqt9280toku-863hyosdlzvd76trfptd4xnzd xx.xx.xx.xx:2377
To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
# docker swarm join-token manager
, node2 node3:
# docker swarm join --token SWMTKN-x-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-xxxxxxxxx xx.xx.xx.xx:2377
This node joined a swarm as a manager.
, , node1, .
:
# docker network create --driver=overlay etcd
# docker network create --driver=overlay pgsql
# docker network create --driver=overlay redis
# docker network create --driver=overlay traefik
# docker network create --driver=overlay gitlab
, :
# docker node update --label-add nodename=node1 node1
# docker node update --label-add nodename=node2 node2
# docker node update --label-add nodename=node3 node3
etcd, KV-, Traefik Stolon. Postgresql , :
# mkdir -p /srv/etcd
etcd :
version: '3.7'
services:
etcd1:
image: quay.io/coreos/etcd:latest
hostname: etcd1
command:
- etcd
- --name=etcd1
- --data-dir=/data.etcd
- --advertise-client-urls=http://etcd1:2379
- --listen-client-urls=http://0.0.0.0:2379
- --initial-advertise-peer-urls=http://etcd1:2380
- --listen-peer-urls=http://0.0.0.0:2380
- --initial-cluster=etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380
- --initial-cluster-state=new
- --initial-cluster-token=etcd-cluster
networks:
- etcd
volumes:
- etcd1vol:/data.etcd
deploy:
replicas: 1
placement:
constraints: [node.labels.nodename == node1]
etcd2:
image: quay.io/coreos/etcd:latest
hostname: etcd2
command:
- etcd
- --name=etcd2
- --data-dir=/data.etcd
- --advertise-client-urls=http://etcd2:2379
- --listen-client-urls=http://0.0.0.0:2379
- --initial-advertise-peer-urls=http://etcd2:2380
- --listen-peer-urls=http://0.0.0.0:2380
- --initial-cluster=etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380
- --initial-cluster-state=new
- --initial-cluster-token=etcd-cluster
networks:
- etcd
volumes:
- etcd2vol:/data.etcd
deploy:
replicas: 1
placement:
constraints: [node.labels.nodename == node2]
etcd3:
image: quay.io/coreos/etcd:latest
hostname: etcd3
command:
- etcd
- --name=etcd3
- --data-dir=/data.etcd
- --advertise-client-urls=http://etcd3:2379
- --listen-client-urls=http://0.0.0.0:2379
- --initial-advertise-peer-urls=http://etcd3:2380
- --listen-peer-urls=http://0.0.0.0:2380
- --initial-cluster=etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380
- --initial-cluster-state=new
- --initial-cluster-token=etcd-cluster
networks:
- etcd
volumes:
- etcd3vol:/data.etcd
deploy:
replicas: 1
placement:
constraints: [node.labels.nodename == node3]
volumes:
etcd1vol:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/etcd"
etcd2vol:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/etcd"
etcd3vol:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/etcd"
networks:
etcd:
external: true
# docker stack deploy --compose-file 00etcd.yml etcd
, etcd :
# docker exec $(docker ps | awk '/etcd/ {print $1}') etcdctl member list
ade526d28b1f92f7: name=etcd1 peerURLs=http://etcd1:2380 clientURLs=http://etcd1:2379 isLeader=false
bd388e7810915853: name=etcd3 peerURLs=http://etcd3:2380 clientURLs=http://etcd3:2379 isLeader=false
d282ac2ce600c1ce: name=etcd2 peerURLs=http://etcd2:2380 clientURLs=http://etcd2:2379 isLeader=true
# docker exec $(docker ps | awk '/etcd/ {print $1}') etcdctl cluster-health
member ade526d28b1f92f7 is healthy: got healthy result from http://etcd1:2379
member bd388e7810915853 is healthy: got healthy result from http://etcd3:2379
member d282ac2ce600c1ce is healthy: got healthy result from http://etcd2:2379
cluster is healthy
Postgresql, :
# mkdir -p /srv/pgsql
Postgresql:
version: '3.7'
services:
pgsentinel:
image: sorintlab/stolon:master-pg10
command:
- gosu
- stolon
- stolon-sentinel
- --cluster-name=stolon-cluster
- --store-backend=etcdv3
- --store-endpoints=http://etcd1:2379,http://etcd2:2379,http://etcd3:2379
- --log-level=debug
networks:
- etcd
- pgsql
deploy:
replicas: 3
update_config:
parallelism: 1
delay: 30s
order: stop-first
failure_action: pause
pgkeeper1:
image: sorintlab/stolon:master-pg10
hostname: pgkeeper1
command:
- gosu
- stolon
- stolon-keeper
- --pg-listen-address=pgkeeper1
- --pg-repl-username=replica
- --uid=pgkeeper1
- --pg-su-username=postgres
- --pg-su-passwordfile=/run/secrets/pgsql
- --pg-repl-passwordfile=/run/secrets/pgsql_repl
- --data-dir=/var/lib/postgresql/data
- --cluster-name=stolon-cluster
- --store-backend=etcdv3
- --store-endpoints=http://etcd1:2379,http://etcd2:2379,http://etcd3:2379
networks:
- etcd
- pgsql
environment:
- PGDATA=/var/lib/postgresql/data
volumes:
- pgkeeper1:/var/lib/postgresql/data
secrets:
- pgsql
- pgsql_repl
deploy:
replicas: 1
placement:
constraints: [node.labels.nodename == node1]
pgkeeper2:
image: sorintlab/stolon:master-pg10
hostname: pgkeeper2
command:
- gosu
- stolon
- stolon-keeper
- --pg-listen-address=pgkeeper2
- --pg-repl-username=replica
- --uid=pgkeeper2
- --pg-su-username=postgres
- --pg-su-passwordfile=/run/secrets/pgsql
- --pg-repl-passwordfile=/run/secrets/pgsql_repl
- --data-dir=/var/lib/postgresql/data
- --cluster-name=stolon-cluster
- --store-backend=etcdv3
- --store-endpoints=http://etcd1:2379,http://etcd2:2379,http://etcd3:2379
networks:
- etcd
- pgsql
environment:
- PGDATA=/var/lib/postgresql/data
volumes:
- pgkeeper2:/var/lib/postgresql/data
secrets:
- pgsql
- pgsql_repl
deploy:
replicas: 1
placement:
constraints: [node.labels.nodename == node2]
pgkeeper3:
image: sorintlab/stolon:master-pg10
hostname: pgkeeper3
command:
- gosu
- stolon
- stolon-keeper
- --pg-listen-address=pgkeeper3
- --pg-repl-username=replica
- --uid=pgkeeper3
- --pg-su-username=postgres
- --pg-su-passwordfile=/run/secrets/pgsql
- --pg-repl-passwordfile=/run/secrets/pgsql_repl
- --data-dir=/var/lib/postgresql/data
- --cluster-name=stolon-cluster
- --store-backend=etcdv3
- --store-endpoints=http://etcd1:2379,http://etcd2:2379,http://etcd3:2379
networks:
- etcd
- pgsql
environment:
- PGDATA=/var/lib/postgresql/data
volumes:
- pgkeeper3:/var/lib/postgresql/data
secrets:
- pgsql
- pgsql_repl
deploy:
replicas: 1
placement:
constraints: [node.labels.nodename == node3]
postgresql:
image: sorintlab/stolon:master-pg10
command: gosu stolon stolon-proxy --listen-address 0.0.0.0 --cluster-name stolon-cluster --store-backend=etcdv3 --store-endpoints http://etcd1:2379,http://etcd2:2379,http://etcd3:2379
networks:
- etcd
- pgsql
deploy:
replicas: 3
update_config:
parallelism: 1
delay: 30s
order: stop-first
failure_action: rollback
volumes:
pgkeeper1:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/pgsql"
pgkeeper2:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/pgsql"
pgkeeper3:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/pgsql"
secrets:
pgsql:
file: "/srv/docker/postgres"
pgsql_repl:
file: "/srv/docker/replica"
networks:
etcd:
external: true
pgsql:
external: true
, :
# </dev/urandom tr -dc 234567890qwertyuopasdfghjkzxcvbnmQWERTYUPASDFGHKLZXCVBNM | head -c $(((RANDOM%3)+15)) > /srv/docker/replica
# </dev/urandom tr -dc 234567890qwertyuopasdfghjkzxcvbnmQWERTYUPASDFGHKLZXCVBNM | head -c $(((RANDOM%3)+15)) > /srv/docker/postgres
# docker stack deploy --compose-file 01pgsql.yml pgsql
( docker service ls, ) Postgresql:
# docker exec $(docker ps | awk '/pgkeeper/ {print $1}') stolonctl --cluster-name=stolon-cluster --store-backend=etcdv3 --store-endpoints=http://etcd1:2379,http://etcd2:2379,http://etcd3:2379 init
Postgresql:
# docker exec $(docker ps | awk '/pgkeeper/ {print $1}') stolonctl --cluster-name=stolon-cluster --store-backend=etcdv3 --store-endpoints=http://etcd1:2379,http://etcd2:2379,http://etcd3:2379 status
=== Active sentinels ===
ID LEADER
26baa11d false
74e98768 false
a8cb002b true
=== Active proxies ===
ID
4d233826
9f562f3b
b0c79ff1
=== Keepers ===
UID HEALTHY PG LISTENADDRESS PG HEALTHY PG WANTEDGENERATION PG CURRENTGENERATION
pgkeeper1 true pgkeeper1:5432 true 2 2
pgkeeper2 true pgkeeper2:5432 true 2 2
pgkeeper3 true pgkeeper3:5432 true 3 3
=== Cluster Info ===
Master Keeper: pgkeeper3
===== Keepers/DB tree =====
pgkeeper3 (master)
ββpgkeeper2
ββpgkeeper1
traefik :
version: '3.7'
services:
traefik:
image: traefik:latest
command: >
--log.level=INFO
--providers.docker=true
--entryPoints.web.address=:80
--providers.providersThrottleDuration=2
--providers.docker.watch=true
--providers.docker.swarmMode=true
--providers.docker.swarmModeRefreshSeconds=15s
--providers.docker.exposedbydefault=false
--accessLog.bufferingSize=0
--api=true
--api.dashboard=true
--api.insecure=true
networks:
- traefik
ports:
- 80:80
volumes:
- /var/run/docker.sock:/var/run/docker.sock
deploy:
replicas: 3
placement:
constraints:
- node.role == manager
preferences:
- spread: node.id
labels:
- traefik.enable=true
- traefik.http.routers.traefik.rule=Host(`traefik.example.com`)
- traefik.http.services.traefik.loadbalancer.server.port=8080
- traefik.docker.network=traefik
networks:
traefik:
external: true
# docker stack deploy --compose-file 03traefik.yml traefik
Redis Cluster, :
# mkdir -p /srv/redis
version: '3.7'
services:
redis-master:
image: 'bitnami/redis:latest'
networks:
- redis
ports:
- '6379:6379'
environment:
- REDIS_REPLICATION_MODE=master
- REDIS_PASSWORD=xxxxxxxxxxx
deploy:
mode: global
restart_policy:
condition: any
volumes:
- 'redis:/opt/bitnami/redis/etc/'
redis-replica:
image: 'bitnami/redis:latest'
networks:
- redis
ports:
- '6379'
depends_on:
- redis-master
environment:
- REDIS_REPLICATION_MODE=slave
- REDIS_MASTER_HOST=redis-master
- REDIS_MASTER_PORT_NUMBER=6379
- REDIS_MASTER_PASSWORD=xxxxxxxxxxx
- REDIS_PASSWORD=xxxxxxxxxxx
deploy:
mode: replicated
replicas: 3
update_config:
parallelism: 1
delay: 10s
restart_policy:
condition: any
redis-sentinel:
image: 'bitnami/redis:latest'
networks:
- redis
ports:
- '16379'
depends_on:
- redis-master
- redis-replica
entrypoint: |
bash -c 'bash -s <<EOF
"/bin/bash" -c "cat <<EOF > /opt/bitnami/redis/etc/sentinel.conf
port 16379
dir /tmp
sentinel monitor master-node redis-master 6379 2
sentinel down-after-milliseconds master-node 5000
sentinel parallel-syncs master-node 1
sentinel failover-timeout master-node 5000
sentinel auth-pass master-node xxxxxxxxxxx
sentinel announce-ip redis-sentinel
sentinel announce-port 16379
EOF"
"/bin/bash" -c "redis-sentinel /opt/bitnami/redis/etc/sentinel.conf"
EOF'
deploy:
mode: global
restart_policy:
condition: any
volumes:
redis:
driver: local
driver_opts:
type: 'none'
o: 'bind'
device: "/srv/redis"
networks:
redis:
external: true
# docker stack deploy --compose-file 05redis.yml redis
Docker Registry:
version: '3.7'
services:
registry:
image: registry:2.6
networks:
- traefik
volumes:
- registry_data:/var/lib/registry
deploy:
replicas: 1
placement:
constraints: [node.role == manager]
restart_policy:
condition: on-failure
labels:
- traefik.enable=true
- traefik.http.routers.registry.rule=Host(`registry.example.com`)
- traefik.http.services.registry.loadbalancer.server.port=5000
- traefik.docker.network=traefik
volumes:
registry_data:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/docker/registry"
networks:
traefik:
external: true
# mkdir /srv/docker/registry
# docker stack deploy --compose-file 06registry.yml registry
β GitLab:
version: '3.7'
services:
gitlab:
image: gitlab/gitlab-ce:latest
networks:
- pgsql
- redis
- traefik
- gitlab
ports:
- 22222:22
environment:
GITLAB_OMNIBUS_CONFIG: |
postgresql['enable'] = false
redis['enable'] = false
gitlab_rails['registry_enabled'] = false
gitlab_rails['db_username'] = "gitlab"
gitlab_rails['db_password'] = "XXXXXXXXXXX"
gitlab_rails['db_host'] = "postgresql"
gitlab_rails['db_port'] = "5432"
gitlab_rails['db_database'] = "gitlab"
gitlab_rails['db_adapter'] = 'postgresql'
gitlab_rails['db_encoding'] = 'utf8'
gitlab_rails['redis_host'] = 'redis-master'
gitlab_rails['redis_port'] = '6379'
gitlab_rails['redis_password'] = 'xxxxxxxxxxx'
gitlab_rails['smtp_enable'] = true
gitlab_rails['smtp_address'] = "smtp.yandex.ru"
gitlab_rails['smtp_port'] = 465
gitlab_rails['smtp_user_name'] = "noreply@example.com"
gitlab_rails['smtp_password'] = "xxxxxxxxx"
gitlab_rails['smtp_domain'] = "example.com"
gitlab_rails['gitlab_email_from'] = 'noreply@example.com'
gitlab_rails['smtp_authentication'] = "login"
gitlab_rails['smtp_tls'] = true
gitlab_rails['smtp_enable_starttls_auto'] = true
gitlab_rails['smtp_openssl_verify_mode'] = 'peer'
external_url 'http://gitlab.example.com/'
gitlab_rails['gitlab_shell_ssh_port'] = 22222
volumes:
- gitlab_conf:/etc/gitlab
- gitlab_logs:/var/log/gitlab
- gitlab_data:/var/opt/gitlab
deploy:
mode: replicated
replicas: 1
placement:
constraints:
- node.role == manager
labels:
- traefik.enable=true
- traefik.http.routers.gitlab.rule=Host(`gitlab.example.com`)
- traefik.http.services.gitlab.loadbalancer.server.port=80
- traefik.docker.network=traefik
gitlab-runner:
image: gitlab/gitlab-runner:latest
networks:
- gitlab
volumes:
- gitlab_runner_conf:/etc/gitlab
- /var/run/docker.sock:/var/run/docker.sock
deploy:
mode: replicated
replicas: 1
placement:
constraints:
- node.role == manager
volumes:
gitlab_conf:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/docker/gitlab/conf"
gitlab_logs:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/docker/gitlab/logs"
gitlab_data:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/docker/gitlab/data"
gitlab_runner_conf:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/docker/gitlab/runner"
networks:
pgsql:
external: true
redis:
external: true
traefik:
external: true
gitlab:
external: true
# mkdir -p /srv/docker/gitlab/conf
# mkdir -p /srv/docker/gitlab/logs
# mkdir -p /srv/docker/gitlab/data
# mkdir -p /srv/docker/gitlab/runner
# docker stack deploy --compose-file 08gitlab-runner.yml gitlab
:
# docker service ls
ID NAME MODE REPLICAS IMAGE PORTS
lef9n3m92buq etcd_etcd1 replicated 1/1 quay.io/coreos/etcd:latest
ij6uyyo792x5 etcd_etcd2 replicated 1/1 quay.io/coreos/etcd:latest
fqttqpjgp6pp etcd_etcd3 replicated 1/1 quay.io/coreos/etcd:latest
hq5iyga28w33 gitlab_gitlab replicated 1/1 gitlab/gitlab-ce:latest *:22222->22/tcp
dt7s6vs0q4qc gitlab_gitlab-runner replicated 1/1 gitlab/gitlab-runner:latest
k7uoezno0h9n pgsql_pgkeeper1 replicated 1/1 sorintlab/stolon:master-pg10
cnrwul4r4nse pgsql_pgkeeper2 replicated 1/1 sorintlab/stolon:master-pg10
frflfnpty7tr pgsql_pgkeeper3 replicated 1/1 sorintlab/stolon:master-pg10
x7pqqchi52kq pgsql_pgsentinel replicated 3/3 sorintlab/stolon:master-pg10
mwu2wl8fti4r pgsql_postgresql replicated 3/3 sorintlab/stolon:master-pg10
9hkbe2vksbzb redis_redis-master global 3/3 bitnami/redis:latest *:6379->6379/tcp
l88zn8cla7dc redis_redis-replica replicated 3/3 bitnami/redis:latest *:30003->6379/tcp
1utp309xfmsy redis_redis-sentinel global 3/3 bitnami/redis:latest *:30002->16379/tcp
oteb824ylhyp registry_registry replicated 1/1 registry:2.6
qovrah8nzzu8 traefik_traefik replicated 3/3 traefik:latest *:80->80/tcp, *:443->443/tcp
? Traefik https, tls Postgresql Redis. PoC. Docker.
Podman
engine , (pods, , ). Docker - , libpod. Go, OCI- runtime , runC.
Podman Docker, , ( , ):
$ alias docker=podman
. Podman , Kubernetes Docker, 2015 , (OCI β Open Container Initiative) Docker containerd runC, Docker Kubernetes: CRI-O. Podman Docker, Kubernetes, , β Docker . swarm mode, , β Kubernetes.
Centos 7 Extras, :
# yum -y install podman
Podman systemd, . systemd pid 1 . buildah, β docker-compose, , Kubernetes, Podman Kubernetes .
Podman
swarm mode ( Kubernetes, ) β .
podman-compose:
# yum -y install python3-pip
# pip3 install podman-compose
podman , volumes .
version: '3.7'
services:
gitlab:
image: gitlab/gitlab-ce:latest
hostname: gitlab.example.com
restart: unless-stopped
environment:
GITLAB_OMNIBUS_CONFIG: |
gitlab_rails['gitlab_shell_ssh_port'] = 22222
ports:
- "80:80"
- "22222:22"
volumes:
- /srv/podman/gitlab/conf:/etc/gitlab
- /srv/podman/gitlab/data:/var/opt/gitlab
- /srv/podman/gitlab/logs:/var/log/gitlab
networks:
- gitlab
gitlab-runner:
image: gitlab/gitlab-runner:alpine
restart: unless-stopped
depends_on:
- gitlab
volumes:
- /srv/podman/gitlab/runner:/etc/gitlab-runner
- /var/run/docker.sock:/var/run/docker.sock
networks:
- gitlab
networks:
gitlab:
# podman-compose -f gitlab-runner.yml -d up
:
# podman ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
da53da946c01 docker.io/gitlab/gitlab-runner:alpine run --user=gitlab... About a minute ago Up About a minute ago 0.0.0.0:22222->22/tcp, 0.0.0.0:80->80/tcp root_gitlab-runner_1
781c0103c94a docker.io/gitlab/gitlab-ce:latest /assets/wrapper About a minute ago Up About a minute ago 0.0.0.0:22222->22/tcp, 0.0.0.0:80->80/tcp root_gitlab_1
, systemd kubernetes, id :
# podman pod ls
POD ID NAME STATUS CREATED # OF CONTAINERS INFRA ID
71fc2b2a5c63 root Running 11 minutes ago 3 db40ab8bf84b
# podman generate kube 71fc2b2a5c63
# Generation of Kubernetes YAML is still under development!
#
# Save the output of this file and use kubectl create -f to import
# it into Kubernetes.
#
# Created with podman-1.6.4
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: "2020-07-29T19:22:40Z"
labels:
app: root
name: root
spec:
containers:
- command:
- /assets/wrapper
env:
- name: PATH
value: /opt/gitlab/embedded/bin:/opt/gitlab/bin:/assets:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
- name: TERM
value: xterm
- name: HOSTNAME
value: gitlab.example.com
- name: container
value: podman
- name: GITLAB_OMNIBUS_CONFIG
value: |
gitlab_rails['gitlab_shell_ssh_port'] = 22222
- name: LANG
value: C.UTF-8
image: docker.io/gitlab/gitlab-ce:latest
name: rootgitlab1
ports:
- containerPort: 22
hostPort: 22222
protocol: TCP
- containerPort: 80
hostPort: 80
protocol: TCP
resources: {}
securityContext:
allowPrivilegeEscalation: true
capabilities: {}
privileged: false
readOnlyRootFilesystem: false
volumeMounts:
- mountPath: /var/opt/gitlab
name: srv-podman-gitlab-data
- mountPath: /var/log/gitlab
name: srv-podman-gitlab-logs
- mountPath: /etc/gitlab
name: srv-podman-gitlab-conf
workingDir: /
- command:
- run
- --user=gitlab-runner
- --working-directory=/home/gitlab-runner
env:
- name: PATH
value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
- name: TERM
value: xterm
- name: HOSTNAME
- name: container
value: podman
image: docker.io/gitlab/gitlab-runner:alpine
name: rootgitlab-runner1
resources: {}
securityContext:
allowPrivilegeEscalation: true
capabilities: {}
privileged: false
readOnlyRootFilesystem: false
volumeMounts:
- mountPath: /etc/gitlab-runner
name: srv-podman-gitlab-runner
- mountPath: /var/run/docker.sock
name: var-run-docker.sock
workingDir: /
volumes:
- hostPath:
path: /srv/podman/gitlab/runner
type: Directory
name: srv-podman-gitlab-runner
- hostPath:
path: /var/run/docker.sock
type: File
name: var-run-docker.sock
- hostPath:
path: /srv/podman/gitlab/data
type: Directory
name: srv-podman-gitlab-data
- hostPath:
path: /srv/podman/gitlab/logs
type: Directory
name: srv-podman-gitlab-logs
- hostPath:
path: /srv/podman/gitlab/conf
type: Directory
name: srv-podman-gitlab-conf
status: {}
# podman generate systemd 71fc2b2a5c63
# pod-71fc2b2a5c6346f0c1c86a2dc45dbe78fa192ea02aac001eb8347ccb8c043c26.service
# autogenerated by Podman 1.6.4
# Thu Jul 29 15:23:28 EDT 2020
[Unit]
Description=Podman pod-71fc2b2a5c6346f0c1c86a2dc45dbe78fa192ea02aac001eb8347ccb8c043c26.service
Documentation=man:podman-generate-systemd(1)
Requires=container-781c0103c94aaa113c17c58d05ddabf8df4bf39707b664abcf17ed2ceff467d3.service container-da53da946c01449f500aa5296d9ea6376f751948b17ca164df438b7df6607864.service
Before=container-781c0103c94aaa113c17c58d05ddabf8df4bf39707b664abcf17ed2ceff467d3.service container-da53da946c01449f500aa5296d9ea6376f751948b17ca164df438b7df6607864.service
[Service]
Restart=on-failure
ExecStart=/usr/bin/podman start db40ab8bf84bf35141159c26cb6e256b889c7a98c0418eee3c4aa683c14fccaa
ExecStop=/usr/bin/podman stop -t 10 db40ab8bf84bf35141159c26cb6e256b889c7a98c0418eee3c4aa683c14fccaa
KillMode=none
Type=forking
PIDFile=/var/run/containers/storage/overlay-containers/db40ab8bf84bf35141159c26cb6e256b889c7a98c0418eee3c4aa683c14fccaa/userdata/conmon.pid
[Install]
WantedBy=multi-user.target
# container-da53da946c01449f500aa5296d9ea6376f751948b17ca164df438b7df6607864.service
# autogenerated by Podman 1.6.4
# Thu Jul 29 15:23:28 EDT 2020
[Unit]
Description=Podman container-da53da946c01449f500aa5296d9ea6376f751948b17ca164df438b7df6607864.service
Documentation=man:podman-generate-systemd(1)
RefuseManualStart=yes
RefuseManualStop=yes
BindsTo=pod-71fc2b2a5c6346f0c1c86a2dc45dbe78fa192ea02aac001eb8347ccb8c043c26.service
After=pod-71fc2b2a5c6346f0c1c86a2dc45dbe78fa192ea02aac001eb8347ccb8c043c26.service
[Service]
Restart=on-failure
ExecStart=/usr/bin/podman start da53da946c01449f500aa5296d9ea6376f751948b17ca164df438b7df6607864
ExecStop=/usr/bin/podman stop -t 10 da53da946c01449f500aa5296d9ea6376f751948b17ca164df438b7df6607864
KillMode=none
Type=forking
PIDFile=/var/run/containers/storage/overlay-containers/da53da946c01449f500aa5296d9ea6376f751948b17ca164df438b7df6607864/userdata/conmon.pid
[Install]
WantedBy=multi-user.target
# container-781c0103c94aaa113c17c58d05ddabf8df4bf39707b664abcf17ed2ceff467d3.service
# autogenerated by Podman 1.6.4
# Thu Jul 29 15:23:28 EDT 2020
[Unit]
Description=Podman container-781c0103c94aaa113c17c58d05ddabf8df4bf39707b664abcf17ed2ceff467d3.service
Documentation=man:podman-generate-systemd(1)
RefuseManualStart=yes
RefuseManualStop=yes
BindsTo=pod-71fc2b2a5c6346f0c1c86a2dc45dbe78fa192ea02aac001eb8347ccb8c043c26.service
After=pod-71fc2b2a5c6346f0c1c86a2dc45dbe78fa192ea02aac001eb8347ccb8c043c26.service
[Service]
Restart=on-failure
ExecStart=/usr/bin/podman start 781c0103c94aaa113c17c58d05ddabf8df4bf39707b664abcf17ed2ceff467d3
ExecStop=/usr/bin/podman stop -t 10 781c0103c94aaa113c17c58d05ddabf8df4bf39707b664abcf17ed2ceff467d3
KillMode=none
Type=forking
PIDFile=/var/run/containers/storage/overlay-containers/781c0103c94aaa113c17c58d05ddabf8df4bf39707b664abcf17ed2ceff467d3/userdata/conmon.pid
[Install]
WantedBy=multi-user.target
systemd ( ), .
Podman , , , docker-compose, Kubernetes, , Docker.
rkt
- , RedHat, . , Docker Podman . CoreOS, rkt ( Docker), RedHat.
Plash
Kubernetes : Docker ( swarm mode), , (3-5 ), , Kubernetes .
Podman , β Kubernetes, (buildah ). : , β Docker ( swarm mode), localhost β Podman , β Kubernetes.
, Docker , - , , Podman ( Linux, , β ) , .
P.S. 3 Β« DockerΒ», . : , . , Docker. best practice .