系统是centos7.9
1.安装docker
yum -y install docker
systemctl start docker
systemctl restart docker.service
2.拉取镜像
nohup docker pull registry.cn-hangzhou.aliyuncs.com/lhrbest/oracle19clhr_asm_db_12.2.0.3:2.0 &docker images
3.创建容器并运行
docker run -itd -h lhr2019ocpasm --name 19c01 \
-p 1555:1521 -p 5555:5500 -p 55550:5501 -p 555:22 -p 3400:3389 \
--privileged=true \
lhrbest/oracle19clhr_asm_db_12.2.0.3:2.0 init
--加-itd参数是让docker启动后能一直运行,如果不加,则在容器启动后执行完就立刻退出了
--加-h 指定容器的hostname
--加–name为容器指定一个名称
--加--privileged=true container内的root拥有真正的root权限
查看镜像
[root@oracle19c /]# docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS
NAMES89d967300b0f lhrbest/oracle19clhr_asm_db_12.2.0.3:2.0 "init" About a minute ago Up About a minute 0.0.0.0:555->22/tcp, 0.0.0.0:1555->1521/tcp, 0.0.0.0:3400->3389/tcp, 0.0.0.0:5555->55
00/tcp, 0.0.0.0:55550->5501/tcp 19c[root@oracle19c /]#
4.启动数据库
docker exec -it 19c bash
[root@lhr2019ocpasm /]# su - oracle
Last login: Mon Aug 24 09:45:25 CST 2020 on pts/1
[oracle@lhr2019ocpasm ~]$ sas
SQL*Plus: Release 19.0.0.0.0 - Production on Mon Mar 8 10:03:25 2021
Version 19.3.0.0.0
Copyright (c) 1982, 2019, Oracle. All rights reserved.
Connected to an idle instance.
SYS@lhrcdb2> startup
ORA-01078: failure in processing system parameters
LRM-00109: could not open parameter file '/u01/app/oracle/product/19.3.0/dbhome_1/dbs/initlhrcdb2.ora'
解决办法:
1)cd /u01/app/oracle/product/19.3.0/dbhome_1/dbs/
2)[oracle@lhr2019ocpasm dbs]$ cat initlhr19sdb.ora
*.audit_file_dest='/u01/app/oracle/admin/lhr19sdb/adump'
*.audit_trail='db'
*.compatible='19.0.0'
*.control_files='/u01/app/oracle/oradata/LHR19SDB/control01.ctl','/u01/app/oracle/flash_recovery_area/LHR19SDB/control02.ctl'
*.db_block_size=8192
*.db_name='lhr19sdb'
*.db_recovery_file_dest='/u01/app/oracle/flash_recovery_area'
*.db_recovery_file_dest_size=8256m
*.diagnostic_dest='/u01/app/oracle'
*.dispatchers='(PROTOCOL=TCP) (SERVICE=lhr19sdbXDB)'
*.nls_language='AMERICAN'
*.nls_territory='AMERICA'
*.open_cursors=300
*.pga_aggregate_target=150m
*.processes=640
*.remote_login_passwordfile='EXCLUSIVE'
*.sga_target=450m
*.undo_tablespace='UNDOTBS1'
[oracle@lhr2019ocpasm dbs]$ cp initlhr19sdb.ora initlhrcdb2.ora
SYS@lhrcdb2> show con_name
CON_NAME
------------------------------
lhr19sdb
没有root容器,应该没有开容器
SYS@lhrcdb2> select pdb_name from cdb_pdbs;
no rows selected
5.启动监听,绑定数据库
lsnrctl start
vi /u01/app/oracle/product/19.3.0/dbhome_1/network/admin/tnsnames.ora
test =
(DESCRIPTION =
(ADDRESS_LIST =
(ADDRESS = (PROTOCOL = TCP)(HOST = lhr2019ocpasm)(PORT = 1521))
)
(CONNECT_DATA =
(SERVICE_NAME = lhr19sdb)
)
)
show parameter service_name命令察看service_name。
6.客户端登录,遇到的小问题
ora_28040:没有匹配的验证协议
vi /u01/app/oracle/product/19.3.0/dbhome_1/network/admin
在末端添加SQLNET.ALLOWED_LOGON_VERSION=8
test/TEST
8.136.199.98:1555/lhr19sdb(测试环境,随时销毁,不用去测试)
7.删除容器和镜像
[root@oracle19c ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
docker.io/lhrbest/oracle19clhr_asm_db_12.2.0.3 2.0 433d38c3c0b1 6 months ago 47.5 GB
registry.cn-hangzhou.aliyuncs.com/lhrbest/oracle19clhr_asm_db_12.2.0.3 2.0 433d38c3c0b1 6 months ago 47.5 GB
[root@oracle19c ~]# docker rmi 433d38c3c0b1
Error response from daemon: conflict: unable to delete 433d38c3c0b1 (must be forced) - image is referenced in multiple repositories
[root@oracle19c ~]# docker rmi -f 433d38c3c0b1
Untagged: docker.io/lhrbest/oracle19clhr_asm_db_12.2.0.3:2.0
Untagged: docker.io/lhrbest/oracle19clhr_asm_db_12.2.0.3@sha256:a3779200fe0e8d1fd663770f3fa139696fd36f7653a9d42dc1a82c189a14e8f8
Untagged: registry.cn-hangzhou.aliyuncs.com/lhrbest/oracle19clhr_asm_db_12.2.0.3:2.0
Untagged: registry.cn-hangzhou.aliyuncs.com/lhrbest/oracle19clhr_asm_db_12.2.0.3@sha256:a3779200fe0e8d1fd663770f3fa139696fd36f7653a9d42dc1a82c189a14e8f8
Deleted: sha256:433d38c3c0b12dc2c150fcd51c599d87d29e8276626a1a0dcfdc1e9f5176604d
Deleted: sha256:37e758819e9b41688d16d7b68f8e546cbc6291c91d90a718e44f6d118a2437c7
[root@oracle19c ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
[root@oracle19c ~]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 3.8G 0 3.8G 0% /dev
tmpfs 3.8G 0 3.8G 0% /dev/shm
tmpfs 3.8G 520K 3.8G 1% /run
tmpfs 3.8G 0 3.8G 0% /sys/fs/cgroup
/dev/vda1 99G 2.6G 92G 3% /
tmpfs 768M 0 768M 0% /run/user/0