转载请标明出处: http://blog.csdn.net/linuxfu/article/details/61915473
看xlog源码的初衷是为了将xlog运用在项目中,因为项目中已经有了一些对于日志的操作逻辑,希望xlog能适应之前的逻辑,使整个日志的替换能平滑过渡,因此需要对xlog作一些修改,例如表现在如下几个方面: –修改xlog的日志清理操作; –修改压缩方式; –修改xlog默认的格式; –加密方式的增加;
看完下面的代码分析,对于上面几个方面应该可以知道怎么修改。
sg_ 开头的变量,static global 全局static PtrBuffer buff_ 变量名后带_,表示成员变量 bool __Reset(); 函数前面带__,表示成员函数 AutoBuffer& _buff 变量名前带_,表示局部变量
我们先从入口开始分析,主要也就这两个native函数,具体的jni接口定义在Java2C_Xlog.cc中
public static native void appenderOpen(int level, int mode, String cacheDir, String logDir, String nameprefix); public static native void logWrite2(int level, String tag, String filename, String funcname, int line, int pid, long tid, long maintid, String log);以logWrite2函数为例,主要是调用了xlogger_Write。appenderOpen则是调用了appender_open_with_cache
DEFINE_FIND_STATIC_METHOD(KXlog_logWrite2, KXlog, "logWrite2", "(ILjava/lang/String;Ljava/lang/String;Ljava/lang/String;IIJJLjava/lang/String;)V") JNIEXPORT void JNICALL Java_com_tencent_mars_xlog_Xlog_logWrite2 (JNIEnv *env, jclass, int _level, jstring _tag, jstring _filename, jstring _funcname, jint _line, jint _pid, jlong _tid, jlong _maintid, jstring _log) { if (!xlogger_IsEnabledFor((TLogLevel)_level)) { return; } XLoggerInfo xlog_info; gettimeofday(&xlog_info.timeval, NULL); xlog_info.level = (TLogLevel)_level; xlog_info.line = (int)_line; xlog_info.pid = (int)_pid; xlog_info.tid = LONGTHREADID2INT(_tid); xlog_info.maintid = LONGTHREADID2INT(_maintid); const char* tag_cstr = NULL; const char* filename_cstr = NULL; const char* funcname_cstr = NULL; const char* log_cstr = NULL; if (NULL != _tag) { tag_cstr = env->GetStringUTFChars(_tag, NULL); } if (NULL != _filename) { filename_cstr = env->GetStringUTFChars(_filename, NULL); } if (NULL != _funcname) { funcname_cstr = env->GetStringUTFChars(_funcname, NULL); } if (NULL != _log) { log_cstr = env->GetStringUTFChars(_log, NULL); } xlog_info.tag = NULL == tag_cstr ? "" : tag_cstr; xlog_info.filename = NULL == filename_cstr ? "" : filename_cstr; xlog_info.func_name = NULL == funcname_cstr ? "" : funcname_cstr; xlogger_Write(&xlog_info, NULL == log_cstr ? "NULL == log" : log_cstr); if (NULL != _tag) { env->ReleaseStringUTFChars(_tag, tag_cstr); } if (NULL != _filename) { env->ReleaseStringUTFChars(_filename, filename_cstr); } if (NULL != _funcname) { env->ReleaseStringUTFChars(_funcname, funcname_cstr); } if (NULL != _log) { env->ReleaseStringUTFChars(_log, log_cstr); } }我们先来分析下appender_open_with_cache,先删除_cachedir里面过期的日志,删除逻辑是根据日志文件的上次修改时间,现在是10天前的日志就删除。注意可能会删除文件夹里面所有文件,这儿做的不够好,应该根据后缀名删除的,更合理些;__move_old_files 将日志文件从_cachedir移到_logdir,这个函数中匹配了prefix和end,只有匹配到的文件才会move过去。
void appender_open_with_cache(TAppenderMode _mode, const std::string& _cachedir, const std::string& _logdir, const char* _nameprefix) { assert(!_cachedir.empty()); assert(!_logdir.empty()); assert(_nameprefix); sg_logdir = _logdir; if (!_cachedir.empty()) { sg_cache_logdir = _cachedir; boost::filesystem::create_directories(_cachedir); __del_timeout_file(_cachedir); 1.删除_cachedir里面过期的日志 // "_nameprefix" must explicitly convert to "std::string", or when the thread is ready to run, "_nameprefix" has been released. Thread(boost::bind(&__move_old_files, _cachedir, _logdir, std::string(_nameprefix))).start_after(3 * 60 * 1000);//2.开启线程move 老文件 } appender_open(_mode, _logdir.c_str(), _nameprefix); } static void __del_timeout_file(const std::string& _log_path) { time_t now_time = time(NULL); boost::filesystem::path path(_log_path); if (boost::filesystem::exists(path) && boost::filesystem::is_directory(path)){ boost::filesystem::directory_iterator end_iter; for (boost::filesystem::directory_iterator iter(path); iter != end_iter; ++iter) { time_t fileModifyTime = boost::filesystem::last_write_time(iter->path()); if (now_time > fileModifyTime && now_time - fileModifyTime > kMaxLogAliveTime) {//1.超过kMaxLogAliveTime就删除 if (boost::filesystem::is_regular_file(iter->status())) { boost::filesystem::remove(iter->path()); } else if (boost::filesystem::is_directory(iter->status())) {//2.注意可能会删除文件夹里面所有文件 __del_files(iter->path().string()); } } } } } static void __del_files(const std::string& _forder_path) { boost::filesystem::path path(_forder_path); if (!boost::filesystem::is_directory(path)) { return; } boost::filesystem::directory_iterator end_iter; for (boost::filesystem::directory_iterator iter(path); iter != end_iter; ++iter) { if (boost::filesystem::is_regular_file(iter->status())) { boost::filesystem::remove(iter->path()); } } }然后再看看appender_open,在每次open的时候,都会添加一些信息,如上图。
void appender_open(TAppenderMode _mode, const char* _dir, const char* _nameprefix) { assert(_dir); assert(_nameprefix); if (!sg_log_close) { __writetips2file("appender has already been opened. _dir:%s _nameprefix:%s", _dir, _nameprefix); return; } xlogger_SetAppender(&xlogger_appender); //mkdir(_dir, S_IRWXU|S_IRWXG|S_IRWXO); boost::filesystem::create_directories(_dir); tickcount_t tick; tick.gettickcount(); __del_timeout_file(_dir);//1.删除过期文件 tickcountdiff_t del_timeout_file_time = tickcount_t().gettickcount() - tick; tick.gettickcount(); char mmap_file_path[512] = {0}; snprintf(mmap_file_path, sizeof(mmap_file_path), "%s/%s.mmap2", sg_cache_logdir.empty()?_dir:sg_cache_logdir.c_str(), _nameprefix); bool use_mmap = false; if (OpenMmapFile(mmap_file_path, kBufferBlockLength, sg_mmmap_file)) {//2.创建mmap文件 sg_log_buff = new LogBuffer(sg_mmmap_file.data(), kBufferBlockLength, false);//3.并将文件映射到内存 use_mmap = true; } else { char* buffer = new char[kBufferBlockLength]; //4.mmap文件创建不成功,则会选择用一个普通的buf sg_log_buff = new LogBuffer(buffer, kBufferBlockLength, false); use_mmap = false; } if (NULL == sg_log_buff->GetData().Ptr()) { if (use_mmap && sg_mmmap_file.is_open()) CloseMmapFile(sg_mmmap_file); return; } AutoBuffer buffer; sg_log_buff->Flush(buffer);//5.如果之前mmap中存在内容,则将内容冲到buffer中。如果发生crash了,buf中内容会保存在mmap中,下次启动会将mmap中的内容冲洗出来 ScopedLock lock(sg_mutex_log_file); sg_logdir = _dir; sg_logfileprefix = _nameprefix; sg_log_close = false; appender_setmode(_mode); lock.unlock(); char mark_info[512] = {0}; get_mark_info(mark_info, sizeof(mark_info)); if (buffer.Ptr()) { __writetips2file("~~~~~ begin of mmap ~~~~~\n"); __log2file(buffer.Ptr(), buffer.Length());// 6.将buffer中的内容写到日志 __writetips2file("~~~~~ end of mmap ~~~~~%s\n", mark_info); } tickcountdiff_t get_mmap_time = tickcount_t().gettickcount() - tick; //7.如上图,会写一些开始的内容 char appender_info[728] = {0}; snprintf(appender_info, sizeof(appender_info), "^^^^^^^^^^" __DATE__ "^^^" __TIME__ "^^^^^^^^^^%s", mark_info); xlogger_appender(NULL, appender_info); char logmsg[64] = {0}; snprintf(logmsg, sizeof(logmsg), "del time out files time: %" PRIu64, (int64_t)del_timeout_file_time); xlogger_appender(NULL, logmsg); snprintf(logmsg, sizeof(logmsg), "get mmap time: %" PRIu64, (int64_t)get_mmap_time); xlogger_appender(NULL, logmsg); xlogger_appender(NULL, "MARS_URL: " MARS_URL); xlogger_appender(NULL, "MARS_PATH: " MARS_PATH); xlogger_appender(NULL, "MARS_REVISION: " MARS_REVISION); xlogger_appender(NULL, "MARS_BUILD_TIME: " MARS_BUILD_TIME); xlogger_appender(NULL, "MARS_BUILD_JOB: " MARS_TAG); snprintf(logmsg, sizeof(logmsg), "log appender mode:%d, use mmap:%d", (int)_mode, use_mmap); xlogger_appender(NULL, logmsg); BOOT_RUN_EXIT(appender_close); }然后xlogger_appender中,这个里面会根据同步或者异步分别调用__appender_sync、__appender_async。这个函数中还有s_recursion_str,不知道干啥的,不影响主流程的分析也没深究,熟悉的大神可告知;
void xlogger_appender(const XLoggerInfo* _info, const char* _log) { if (sg_log_close) return; SCOPE_ERRNO(); DEFINE_SCOPERECURSIONLIMIT(recursion); static Tss s_recursion_str(free); if (sg_consolelog_open) ConsoleLog(_info, _log); //1.如果开启了,android平台使用logcat输出日志 if (2 <= (int)recursion.Get() && NULL == s_recursion_str.get()) { if ((int)recursion.Get() > 10) return; char* strrecursion = (char*)calloc(16 * 1024, 1); s_recursion_str.set((void*)(strrecursion)); XLoggerInfo info = *_info; info.level = kLevelFatal; char recursive_log[256] = {0}; snprintf(recursive_log, sizeof(recursive_log), "ERROR!!! xlogger_appender Recursive calls!!!, count:%d", (int)recursion.Get()); PtrBuffer tmp(strrecursion, 0, 16*1024); log_formater(&info, recursive_log, tmp); strncat(strrecursion, _log, 4096); strrecursion[4095] = '\0'; ConsoleLog(&info, strrecursion); } else { if (NULL != s_recursion_str.get()) { char* strrecursion = (char*)s_recursion_str.get(); s_recursion_str.set(NULL); __writetips2file(strrecursion); free(strrecursion); } if (kAppednerSync == sg_mode) __appender_sync(_info, _log);//2.同步写日志 else __appender_async(_info, _log);//3.异步写日志 } }同步模式下,__appender_sync会先格式化,然后加密,然后直接写到文件中
static void __appender_sync(const XLoggerInfo* _info, const char* _log) { char temp[16 * 1024] = {0}; // tell perry,ray if you want modify size. PtrBuffer log(temp, 0, sizeof(temp)); log_formater(_info, _log, log);//1.格式化log char buffer_crypt[16 * 1024] = {0}; size_t len = 16 * 1024; if (!LogBuffer::Write(log.Ptr(), log.Length(), buffer_crypt, len)) return;//2.加密日志 __log2file(buffer_crypt, len);//3.写日志文件 } bool LogBuffer::Write(const void* _data, size_t _inputlen, void* _output, size_t& _len) { if (NULL == _data || NULL == _output || 0 == _inputlen || _len <= (size_t) s_log_crypt->GetHeaderLen()) { return false; } s_log_crypt->CryptSyncLog((char*)_data, _inputlen, (char*)_output, _len);//目前没有加密,只是单纯的拷贝了下buf。加密逻辑需要自己添加 return true; }异步模式,和同步模式类似,不过是只写到buf中,超过总长1/3才会通知写日志
static void __appender_async(const XLoggerInfo* _info, const char* _log) { ScopedLock lock(sg_mutex_buffer_async); if (NULL == sg_log_buff) return; char temp[16*1024] = {0}; //tell perry,ray if you want modify size. PtrBuffer log_buff(temp, 0, sizeof(temp)); log_formater(_info, _log, log_buff); //1.格式化log if (sg_log_buff->GetData().Length() >= kBufferBlockLength*4/5) {//2.这个应该算是一种异常情况,所以将其记录了下来 int ret = snprintf(temp, sizeof(temp), "[F][ sg_buffer_async.Length() >= BUFFER_BLOCK_LENTH*4/5, len: %d\n", (int)sg_log_buff->GetData().Length()); log_buff.Length(ret, ret); } if (!sg_log_buff->Write(log_buff.Ptr(), (unsigned int)log_buff.Length())) return; //3.将log_buff写到sg_log_buff if (sg_log_buff->GetData().Length() >= kBufferBlockLength*1/3 || (NULL!=_info && kLevelFatal == _info->level)) {//4.在buf长度>=1/3总长,就通知写日志 sg_cond_buffer_async.notifyAll(); } } bool LogBuffer::Write(const void* _data, size_t _length) { if (NULL == _data || 0 == _length) { return false; } if (buff_.Length() == 0) { if (!__Reset()) return false; } size_t before_len = buff_.Length(); size_t write_len = _length; if (is_compress_) { //1.压缩,每次压缩一条日志 cstream_.avail_in = (uInt)_length; cstream_.next_in = (Bytef*)_data; uInt avail_out = (uInt)(buff_.MaxLength() - buff_.Length()); cstream_.next_out = (Bytef*)buff_.PosPtr(); cstream_.avail_out = avail_out; if (Z_OK != deflate(&cstream_, Z_SYNC_FLUSH)) { return false; } write_len = avail_out - cstream_.avail_out; } else { buff_.Write(_data, _length); //2.不压缩 } char crypt_buffer[4096] = {0}; size_t crypt_buffer_len = sizeof(crypt_buffer); s_log_crypt->CryptAsyncLog((char*)buff_.Ptr() + before_len, write_len, crypt_buffer, crypt_buffer_len);//3.加密 uint16_t single_log_len = crypt_buffer_len; buff_.Write(&single_log_len, sizeof(single_log_len), before_len);//4.加密后日志长度变化了 before_len += sizeof(single_log_len); buff_.Write(crypt_buffer, crypt_buffer_len, before_len); //5.然后将加密后日志内容填在后面 before_len += crypt_buffer_len; buff_.Length(before_len, before_len); //6.更新buff_长度 s_log_crypt->UpdateLogLen((char*)buff_.Ptr(), (uint32_t)crypt_buffer_len + sizeof(single_log_len));//7.更新sg_log_buff中的长度 return true; }通知了sg_cond_buffer_async,xlog中会有个线程专门用来写日志,也可以看到,如果超过15min,也是会自动写日志到文件的;
static void __async_log_thread() { while (true) { ScopedLock lock_buffer(sg_mutex_buffer_async); if (NULL == sg_log_buff) break; AutoBuffer tmp; sg_log_buff->Flush(tmp);//1.将sg_log_buff中的内容冲到tmp中 lock_buffer.unlock(); if (NULL != tmp.Ptr()) __log2file(tmp.Ptr(), tmp.Length());//2.写日志 if (sg_log_close) break; sg_cond_buffer_async.wait(15 * 60 *1000); } }这个sg_log_buff->Flush(tmp)冲洗的过程可以看下;
void LogBuffer::Flush(AutoBuffer& _buff) { if (Z_NULL != cstream_.state) { deflateEnd(&cstream_); } if (s_log_crypt->GetLogLen((char*)buff_.Ptr(), buff_.Length()) == 0){ __Clear(); return; } __Flush();//冲洗 _buff.Write(buff_.Ptr(), buff_.Length()); __Clear(); } void LogBuffer::__Flush() {//之前做加密时有一些对应的格式,解析这些格式; assert(buff_.Length() >= s_log_crypt->GetHeaderLen()); s_log_crypt->UpdateLogHour((char*)buff_.Ptr()); s_log_crypt->SetTailerInfo((char*)buff_.Ptr() + buff_.Length()); buff_.Length(buff_.Length() + s_log_crypt->GetTailerLen(), buff_.Length() + s_log_crypt->GetTailerLen()); }__log2file,异步和同步最终是通过这个写到文件中。注意这个里面的逻辑,是存在两种文件:正式的日志文件(logfilepath),cache日志文件(logcachefilepath)。写的时候,1.先写入cache日志文件中,2.再写入正式日志文件中,3.再将cache日志文件删除。如果1失败了,则直接跳到2写正式日志文件,如果正式也写不成功,则又写到cache中。等到后面,再将cache中的内容转入到正式文件中。 做这么多,无非是防丢日志,尽量确保日志不会丢失;
static void __log2file(const void* _data, size_t _len) { if (NULL == _data || 0 == _len || sg_logdir.empty()) { return; } ScopedLock lock_file(sg_mutex_log_file); if (sg_cache_logdir.empty()) { //1.如果sg_cache_logdir是空的,则写到sg_logdir if (__openlogfile(sg_logdir)) { __writefile(_data, _len, sg_logfile); if (kAppednerAsync == sg_mode) { __closelogfile(); } } return; } struct timeval tv; gettimeofday(&tv, NULL); char logcachefilepath[1024] = {0}; __make_logfilename(tv, sg_cache_logdir, sg_logfileprefix.c_str(), LOG_EXT, logcachefilepath , 1024);//2.指定logcachefilepath if (boost::filesystem::exists(logcachefilepath) && __openlogfile(sg_cache_logdir)) {//3.如果logcachefilepath文件存在,则打开文件并写入 __writefile(_data, _len, sg_logfile); //4.写入到cache文件中 if (kAppednerAsync == sg_mode) { __closelogfile(); } char logfilepath[1024] = {0}; __make_logfilename(tv, sg_logdir, sg_logfileprefix.c_str(), LOG_EXT, logfilepath , 1024); if (__append_file(logcachefilepath, logfilepath)) {//5.同上,写入的日志文件中 if (kAppednerSync == sg_mode) { __closelogfile(); } remove(logcachefilepath); //6.然后删除cache日志文件 } } else { //7、如果打开cache文件失败,则直接写到日志文件 bool write_sucess = false; bool open_success = __openlogfile(sg_logdir); if (open_success) { write_sucess = __writefile(_data, _len, sg_logfile); if (kAppednerAsync == sg_mode) { __closelogfile(); } } if (!write_sucess) {//8.如果没日志写成功,则先关了,再打开cache日志文件,写入到cache日志文件中 if (open_success && kAppednerSync == sg_mode) { __closelogfile(); } if (__openlogfile(sg_cache_logdir)) { __writefile(_data, _len, sg_logfile); if (kAppednerAsync == sg_mode) { __closelogfile(); } } } } }同时冲洗缓存也存在异步和同步两种,和前面的非常类似
//同步模式冲洗缓存 void appender_flush_sync() { if (kAppednerSync == sg_mode) { return; } ScopedLock lock_buffer(sg_mutex_buffer_async);//加锁 if (NULL == sg_log_buff) return; AutoBuffer tmp; sg_log_buff->Flush(tmp);//将sg_log_buff里面的空间flush到tmp上 lock_buffer.unlock(); if (tmp.Ptr()) __log2file(tmp.Ptr(), tmp.Length());//真正保存在__log2file中 } //异步模式冲洗缓存 void appender_flush() { sg_cond_buffer_async.notifyAll(); }log_formater 格式化日志,涉及到一些格式的处理。将_logbody format到_log中,例如日志如下,包括:日志级别、时间、pid、tid(如果是主线程,则有*指示)、TAG、filename、funcname、行号。后面则是具体日志内容 [D][2017-04-01 +8.0 16:23:11.888][21639, 1][test][filename, funcname, 121][hello wpr
void log_formater(const XLoggerInfo* _info, const char* _logbody, PtrBuffer& _log) { static const char* levelStrings[] = { "V", "D", // debug "I", // info "W", // warn "E", // error "F" // fatal }; assert((unsigned int)_log.Pos() == _log.Length()); static int error_count = 0; static int error_size = 0; if (_log.MaxLength() <= _log.Length() + 5 * 1024) { // allowd len(_log) <= 11K(16K - 5K) ++error_count; error_size = (int)strnlen(_logbody, 1024 * 1024); if (_log.MaxLength() >= _log.Length() + 128) { int ret = snprintf((char*)_log.PosPtr(), 1024, "[F]log_size <= 5*1024, err(%d, %d)\n", error_count, error_size); // **CPPLINT SKIP** _log.Length(_log.Pos() + ret, _log.Length() + ret); _log.Write(""); error_count = 0; error_size = 0; } assert(false); return; } if (NULL != _info) {//1.日志头部 const char* filename = ExtractFileName(_info->filename); char strFuncName [128] = {0}; ExtractFunctionName(_info->func_name, strFuncName, sizeof(strFuncName)); char temp_time[64] = {0}; if (0 != _info->timeval.tv_sec) { time_t sec = _info->timeval.tv_sec; tm tm = *localtime((const time_t*)&sec); #ifdef ANDROID snprintf(temp_time, sizeof(temp_time), "%d-d-d %+.1f d:d:d.%.3ld", 1900 + tm.tm_year, 1 + tm.tm_mon, tm.tm_mday, tm.tm_gmtoff / 3600.0, tm.tm_hour, tm.tm_min, tm.tm_sec, _info->timeval.tv_usec / 1000); #elif _WIN32 snprintf(temp_time, sizeof(temp_time), "%d-d-d %+.1f d:d:d.%.3d", 1900 + tm.tm_year, 1 + tm.tm_mon, tm.tm_mday, (-_timezone) / 3600.0, tm.tm_hour, tm.tm_min, tm.tm_sec, _info->timeval.tv_usec / 1000); #else snprintf(temp_time, sizeof(temp_time), "%d-d-d %+.1f d:d:d.%.3d", 1900 + tm.tm_year, 1 + tm.tm_mon, tm.tm_mday, tm.tm_gmtoff / 3600.0, tm.tm_hour, tm.tm_min, tm.tm_sec, _info->timeval.tv_usec / 1000); #endif } // _log.AllocWrite(30*1024, false); int ret = snprintf((char*)_log.PosPtr(), 1024, "[%s][%s][%" PRIdMAX ", %" PRIdMAX "%s][%s][%s, %s, %d][", // **CPPLINT SKIP** _logbody ? levelStrings[_info->level] : levelStrings[kLevelFatal], temp_time, _info->pid, _info->tid, _info->tid == _info->maintid ? "*" : "", _info->tag ? _info->tag : "", filename, strFuncName, _info->line); assert(0 <= ret); _log.Length(_log.Pos() + ret, _log.Length() + ret); // memcpy((char*)_log.PosPtr() + 1, "\0", 1); assert((unsigned int)_log.Pos() == _log.Length()); } if (NULL != _logbody) {//2.日志具体内容 // in android 64bit, in strnlen memchr, const unsigned char* end = p + n; > 4G!!!!! in stack array size_t bodylen = _log.MaxLength() - _log.Length() > 130 ? _log.MaxLength() - _log.Length() - 130 : 0; bodylen = bodylen > 0xFFFFU ? 0xFFFFU : bodylen; bodylen = strnlen(_logbody, bodylen); bodylen = bodylen > 0xFFFFU ? 0xFFFFU : bodylen; _log.Write(_logbody, bodylen); } else { _log.Write("error!! NULL==_logbody"); } char nextline = '\n'; if (*((char*)_log.PosPtr() - 1) != nextline) _log.Write(&nextline, 1);//最后写个换行符 }前面讲到了现在没有加密过程,只是单纯将buf拷贝下。而且,是有一定的格式的,如下,xlog也是按照这样的格式拼装数据 |magic start(char)|seq(uint16_t)|begin hour(char)|end hour(char)|length(uint32_t)|crypt key(uint32_t)|
void LogCrypt::CryptSyncLog(const char* const _log_data, size_t _input_len, char* _output, size_t& _output_len) { uint16_t seq = __GetSeq(false); uint32_t len = std::min(_input_len, _output_len - GetHeaderLen() - GetTailerLen()); memcpy(_output + GetHeaderLen(), _log_data, len); //copy log内容 _output[GetHeaderLen() + len] = kMagicEnd; //填充tail _output[0] = kMagicSyncStart; //填充头 memcpy(_output + 1, &seq, sizeof(seq)); //seq struct timeval tv; gettimeofday(&tv, 0); time_t sec = tv.tv_sec; tm tm_tmp = *localtime((const time_t*)&sec); char hour = (char)tm_tmp.tm_hour; memcpy(_output+3, &hour, sizeof(hour)); //begin hour memcpy(_output+4, &hour, sizeof(hour)); //end hour memcpy(_output+5, &len, sizeof(len)); //length _output_len = GetHeaderLen() + GetTailerLen() + len; }加密的格式,head和tail
/* * |magic start(char)|seq(uint16_t)|begin hour(char)|end hour(char)|length(uint32_t)|crypt key(uint32_t)| */ uint32_t LogCrypt::GetHeaderLen() { return sizeof(char) * 3 + sizeof(uint16_t) + sizeof(uint32_t) * 2; } uint32_t LogCrypt::GetTailerLen() { return sizeof(kMagicEnd); } static const char kMagicEnd = '\0';在reset里面初始化buf
bool LogBuffer::__Reset() { __Clear(); if (is_compress_) { cstream_.zalloc = Z_NULL; cstream_.zfree = Z_NULL; cstream_.opaque = Z_NULL; if (Z_OK != deflateInit2(&cstream_, Z_BEST_COMPRESSION, Z_DEFLATED, -MAX_WBITS, MAX_MEM_LEVEL, Z_DEFAULT_STRATEGY)) { return false; } } s_log_crypt->SetHeaderInfo((char*)buff_.Ptr(), is_compress_); //在每次reset时,初始化head buff_.Length(s_log_crypt->GetHeaderLen(), s_log_crypt->GetHeaderLen()); return true; }读完的一些感受: 1.存在有很多特殊场景的逻辑,一不小心也许没有仔细读过源码的使用者也许会踩到坑里,比如前面appender_open_with_cache对于过期日志的处理,应该给出个默认的处理,也提供一个操作接口给用户; 2.有一些static global的变量,这种全局变量的写法会打破函数的复用性,使得逻辑容易冗余;
总的来说还是很棒的,稍微看懂后,改一改就成了一个很强大的日志组件,实乃搬砖之利器; 参考资料 http://www.tuicool.com/articles/NJbUbiU