利用clickhouse-client和linux管道命令结合,迁移数据。这个办法速度不是特别快,一下午大概迁移40亿数据的样子
clickhouse-client --host 127.0.0.1 --password default -q "SELECT * FROM sgdatabase.historysignal2 FORMAT CSVWithNames" | clickhouse-client --host 127.0.0.1 --port 9000 --password default -q "INSERT INTO sgdatabase.historysignal FORMAT CSVWithNames"
查看集群分布式信息
select * from system.clusters;
查看数据库版本信息等
select * from system.build_options
查看正在执行的sql
select * from system.processes
将TABLE1重命名为TABLE2
rename table TABLE1 to TABLE2;
删除数据
ALTER TABLE db.test DELETE WHERE condition!=3;
修改数据
ALTER TABLE db.test UPDATE name='ccc' WHERE createTime='2020/2/13';
从mysql插入数据到clickhouse
clickhouse-client --host 192.168.1.155 --password default --database=DB-q "INSERT INTO historysignal (ROOMID, ROOMNAME, SAMPLERID, SAMPLERNAME, EQUIPMENTID, EQUIPMENTNAME, SIGNALID, SIGNALNAME, RECORDTIME, FLOATVALUE, STRINGVALUE, MEANINGS, STATIONNAME, STATIONID, UNIT, UNIQUID, RECORDTIMETOMIN, RECORDTIMEINORDER, ESNAME) select * from mysql('192.168.1.61:3306', 'DB', 'TABLE', 'mysql', 'mysql')"
当前连接数(分为 TCP 和 HTTP )
SELECT *
FROM system.metrics
WHERE metric LIKE '%Connection'
当前正在执行的查询
SELECT
query_id,
user,
address,
query
FROM system.processes
ORDER BY query_id ASC
查询Mutation操作(ALTER DELETE 和 ALTER UPDATE)
SELECT
database,
table,
mutation_id,
command,
create_time,
is_done
FROM system.mutations
终止语句
KILL QUERY WHERE query_id = 'query_id';
KILL MUTATION WHERE mutation_id = 'mutation_id';
存储空间统计,查询 CH 各个存储路径的空间
SELECT
name,
path,
formatReadableSize(free_space) AS free,
formatReadableSize(total_space) AS total,
formatReadableSize(keep_free_space) AS reserved
FROM system.disks
各数据库占用空间统计
SELECT
database,
formatReadableSize(sum(bytes_on_disk)) AS on_disk
FROM system.parts
GROUP BY database
个列字段占用空间统计
每个列字段的压缩大小、压缩比率以及该列的每行数据大小的占比
SELECT
database,
table,
column,
any(type),
sum(column_data_compressed_bytes) AS compressed,
sum(column_data_uncompressed_bytes) AS uncompressed,
round(uncompressed / compressed, 2) AS ratio,
compressed / sum(rows) AS bpr,
sum(rows)
FROM system.parts_columns
WHERE active AND database != 'system'
GROUP BY
database,
table,
column
ORDER BY
database ASC,
table ASC,
column ASC
慢查询
SELECT
user,
client_hostname AS host,
client_name AS client,
formatDateTime(query_start_time, '%T') AS started,
query_duration_ms / 1000 AS sec,
round(memory_usage / 1048576) AS MEM_MB,
result_rows AS RES_CNT,
result_bytes / 1048576 AS RES_MB,
read_rows AS R_CNT,
round(read_bytes / 1048576) AS R_MB,
written_rows AS W_CNT,
round(written_bytes / 1048576) AS W_MB,
query
FROM system.query_log
WHERE type = 2
ORDER BY query_duration_ms DESC
LIMIT 10
副本预警监控
通过下面的 SQL 语句对副本进行预警监控,其中各个预警的变量可以根据自身情况调整。
SELECT database, table, is_leader, total_replicas, active_replicas
FROM system.replicas
WHERE is_readonly
OR is_session_expired
OR future_parts > 30
OR parts_to_check > 20
OR queue_size > 30
OR inserts_in_queue > 20
OR log_max_index - log_pointer > 20
OR total_replicas < 2
OR active_replicas < total_replicas
centos 安装clickhosue 客户端
sudo yum install yum-utils
sudo rpm --import https://repo.clickhouse.tech/CLICKHOUSE-KEY.GPG
sudo yum-config-manager --add-repo https://repo.clickhouse.tech/rpm/stable/x86_64
sudo yum install clickhouse-server clickhouse-client
ubuntu 安装clickhosue 客户端
sudo apt-get install apt-transport-https ca-certificates dirmngr
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv E0C56BD4
echo "deb https://repo.clickhouse.tech/deb/stable/ main/" | sudo tee /etc/apt/sources.list.d/clickhouse.list
sudo apt-get update
sudo apt-get install -y clickhouse-client