wip: impl log rotation which checks on check append, however, this may cause some issues in slow systems as the mutex lock doesnt give up until the file is rotated, however, the plugin looks for context and will give up if the plugin broker decides its timeout and will retry once the plugin has pushed again
This commit is contained in:
parent
434e18c42c
commit
69a6ba7603
|
@ -17,7 +17,7 @@ format: |
|
|||
log_path: "/tmp/crowdsec_alerts.json"
|
||||
rotate:
|
||||
enabled: true # Change to false if you want to handle log rotate on system basis
|
||||
max_size: 500
|
||||
max_size: 500 # in MB
|
||||
max_files: 5
|
||||
max_age: 5
|
||||
compress: true
|
||||
|
|
|
@ -1,10 +1,15 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/crowdsecurity/crowdsec/pkg/protobufs"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
@ -15,6 +20,7 @@ import (
|
|||
var (
|
||||
FileWriter *os.File
|
||||
FileWriteMutex *sync.Mutex
|
||||
FileSize int64
|
||||
)
|
||||
|
||||
type PluginConfig struct {
|
||||
|
@ -43,6 +49,110 @@ var logger hclog.Logger = hclog.New(&hclog.LoggerOptions{
|
|||
JSONFormat: true,
|
||||
})
|
||||
|
||||
func (r *LogRotate) rotateLogs(cfg PluginConfig) {
|
||||
// Rotate the log file
|
||||
err := r.rotateLogFile(cfg.LogPath, r.MaxFiles)
|
||||
if err != nil {
|
||||
logger.Error("Failed to rotate log file", "error", err)
|
||||
}
|
||||
// Reopen the FileWriter
|
||||
FileWriter.Close()
|
||||
FileWriter, err = os.OpenFile(cfg.LogPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
logger.Error("Failed to reopen log file", "error", err)
|
||||
}
|
||||
// Reset the file size
|
||||
FileInfo, err := FileWriter.Stat()
|
||||
if err != nil {
|
||||
logger.Error("Failed to get file info", "error", err)
|
||||
}
|
||||
FileSize = FileInfo.Size()
|
||||
}
|
||||
|
||||
func (r *LogRotate) rotateLogFile(logPath string, maxBackups int) error {
|
||||
// Rename the current log file
|
||||
backupPath := logPath + "." + time.Now().Format("20060102-150405")
|
||||
err := os.Rename(logPath, backupPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glob := logPath + ".*"
|
||||
if r.Compress {
|
||||
glob = logPath + ".*.gz"
|
||||
err = compressFile(backupPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Remove old backups
|
||||
files, err := filepath.Glob(glob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sort.Sort(sort.Reverse(sort.StringSlice(files)))
|
||||
|
||||
for i, file := range files {
|
||||
logger.Trace("Checking file", "file", file, "index", i, "maxBackups", maxBackups)
|
||||
if i >= maxBackups {
|
||||
logger.Trace("Removing file as over max backup count", "file", file)
|
||||
os.Remove(file)
|
||||
} else {
|
||||
// Check the age of the file
|
||||
fileInfo, err := os.Stat(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
age := time.Since(fileInfo.ModTime()).Hours()
|
||||
if age > float64(r.MaxAge*24) {
|
||||
logger.Trace("Removing file as age was over configured amount", "file", file, "age", age)
|
||||
os.Remove(file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func compressFile(src string) error {
|
||||
// Open the source file for reading
|
||||
srcFile, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer srcFile.Close()
|
||||
|
||||
// Create the destination file
|
||||
dstFile, err := os.Create(src + ".gz")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dstFile.Close()
|
||||
|
||||
// Create a gzip writer
|
||||
gw := gzip.NewWriter(dstFile)
|
||||
defer gw.Close()
|
||||
|
||||
// Read the source file and write its contents to the gzip writer
|
||||
b, err := io.ReadAll(srcFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = gw.Write(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete the original (uncompressed) backup file
|
||||
err = os.Remove(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func WriteToFileWithCtx(ctx context.Context, cfg PluginConfig, log string) error {
|
||||
for {
|
||||
select {
|
||||
|
@ -70,6 +180,14 @@ func WriteToFileWithCtx(ctx context.Context, cfg PluginConfig, log string) error
|
|||
logger.Info("Log file has been reopened successfully")
|
||||
}
|
||||
_, err = FileWriter.WriteString(log)
|
||||
if err == nil {
|
||||
FileSize += int64(len(log))
|
||||
if FileSize > int64(cfg.LogRotate.MaxSize)*1024*1024 {
|
||||
logger.Debug("Rotating log file", cfg.LogPath)
|
||||
// Rotate the log file
|
||||
cfg.LogRotate.rotateLogs(cfg)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -87,12 +205,24 @@ func (s *FilePlugin) Notify(ctx context.Context, notification *protobufs.Notific
|
|||
func (s *FilePlugin) Configure(ctx context.Context, config *protobufs.Config) (*protobufs.Empty, error) {
|
||||
d := PluginConfig{}
|
||||
err := yaml.Unmarshal(config.Config, &d)
|
||||
if err == nil {
|
||||
FileWriteMutex = &sync.Mutex{}
|
||||
FileWriter, err = os.OpenFile(d.LogPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
s.PluginConfigByName[d.Name] = d
|
||||
logger.SetLevel(hclog.LevelFromString(d.LogLevel))
|
||||
if err != nil {
|
||||
logger.Error("Failed to unmarshal config", "error", err)
|
||||
return &protobufs.Empty{}, err
|
||||
}
|
||||
FileWriteMutex = &sync.Mutex{}
|
||||
FileWriter, err = os.OpenFile(d.LogPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
logger.Error("Failed to open log file", "error", err)
|
||||
return &protobufs.Empty{}, err
|
||||
}
|
||||
FileInfo, err := FileWriter.Stat()
|
||||
if err != nil {
|
||||
logger.Error("Failed to get file info", "error", err)
|
||||
return &protobufs.Empty{}, err
|
||||
}
|
||||
FileSize = FileInfo.Size()
|
||||
s.PluginConfigByName[d.Name] = d
|
||||
logger.SetLevel(hclog.LevelFromString(d.LogLevel))
|
||||
return &protobufs.Empty{}, err
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue