kafak日志自动删除

#!/usr/bin/env bash

RETAIN_FILES_NUM=72
RETAIN_FILES_TIME=72
RETAIN_FILE_MB=1024
function retain_files_num() {
        echo ""
 }
function retain_files_date(){
    for file_name in $* ; do
        FILE_NAME=${file_name}
        # 确认给定时间之前的时间
        DELETE_DATE=`date +%Y-%m-%d-%H -d "${RETAIN_FILES_TIME} hours ago"`
        # 获取所有日志,将日志的时间戳获取
        # 遍历所有日志文件,截取日志文件的时间戳部分,与delete_date对比,小于等于这个时间的,删除。
        for log_file in `ls -1 ${FILE_NAME}.log.20*`;do
            LOG_FILE_DATE=`ls -1 ${log_file} | awk -F . '{print $(NF)}'`
            if [[ ${LOG_FILE_DATE} < ${DELETE_DATE} ]]; then
                echo "当前日志文件:${log_file}, 保存时间已超过${RETAIN_FILES_TIME}个小时,删除中……"
                rm -f ${log_file}
            fi
        done
        done
}

function retain_files_size(){
    for file_name in $* ; do
        FILE_NAME=${file_name}
        # 判断出文件大小
        # 判断超过1G的文件个数,超过两个删除新文件(保留就的文件,事件现场)。
        BIG_FILE_NUM=`ls -lh ${FILE_NAME}.log.20* | grep -v total | grep G | wc -l `
        if [[ ${BIG_FILE_NUM} > 1 ]];then
            flag=1
            for log_file in `ls -lh ${FILE_NAME}.log.20* | grep -v total | grep G | awk '{print $(NF)}'` ;do
                if [[ ${flag} -gt 1 ]] ;then
                    echo "当前日志文件:${log_file}, 大小已超过${RETAIN_FILE_MB}M,删除中……"
                    rm -f ${log_file}
                fi
                ((flag++))
            done
        fi
        if [[ ${BIG_FILE_NUM} == 1 ]];then
            echo "剩余1个超过${RETAIN_FILE_MB}M的文件,请检查文件过大内容,如有问题解决问题后清除。"
        fi
        echo "${FILE_NAME}.log的保留文件大小正常"
    done
}
retain_files_date server controller kafka-authorizer kafka-request log-cleaner state-change
retain_files_size server controller kafka-authorizer kafka-request log-cleaner state-change

  

上一篇:C# EF 加密连接数据库连接字符串


下一篇:iView之清空选择框