前面写了个cassandra-appender,一个基于cassandra的logback插件。正是cassandra的分布式数据库属性才合适作为akka-cluster-sharding分布式应用的logger。所以,cassandra-appender核心功能就是对logback消息的存写部分了。同样,基于ES的logback-appender核心部分就是对ES的存写过程了。在ES里这个过程还附带了索引indexing过程。将来对历史消息的搜索、分析会更加方便。直接看看消息存写这部分elastic4代码:
def writeLog(event: ILoggingEvent)(client: ElasticClient, idx: String)(appName: String, ip: String, hostName: String, default: String) = { var content: List[(String,Any)] = List( APP_NAME -> appName, HOST_IP -> ip, HOST_NAME -> hostName, LOGGER_NAME -> event.getLoggerName(), LEVEL -> event.getLevel().toString, THREAD_NAME -> event.getThreadName(), LOG_DATE -> logDate, LOG_TIME -> logTime ) try { val callerData = event.getCallerData() if (callerData.nonEmpty) { content = content ++ List( CLASS_NAME -> callerData.head.getClassName(), FILE_NAME -> callerData.head.getFileName(), LINE_NUMBER -> callerData.head.getLineNumber().toString, METHOD_NAME -> callerData.head.getMethodName() ) } } catch {case e: Throwable => println(s"logging event error: ${e.getMessage}")} try { if (event.getThrowableProxy() != null) { val throwableStrs = event.getThrowableProxy().getSuppressed().asInstanceOf[List[IThrowableProxy]] val throwableStr = throwableStrs.foldLeft("") { case (b, t) => b + "," + t.getMessage() } content = content :+ (THROWABLE_STR -> throwableStr) } } catch {case e: Throwable => println(s"logging event error: ${e.getMessage}")} var logmsgs = event.getMessage() try { val logMap = fromJson[Map[String,String]](logmsgs) logMap.foreach ( m => content = content :+ (m._1 -> m._2)) } catch { case e: Throwable => content = content :+ (MESSAGE -> logmsgs) try { val dftMap = fromJson[Map[String,String]](default) dftMap.foreach ( m => content = content :+ (m._1 -> m._2)) } catch { case e: Throwable => } } val newRecord = indexInto(idx) .fields( content ).createOnly(true) client.execute(newRecord) //.await }