optimize: Limit file load size and clean up server logging

This commit is contained in:
dragon
2026-01-29 13:50:11 +08:00
parent 4fdb77c833
commit a60f123bee
2 changed files with 22 additions and 4 deletions

View File

@@ -15,7 +15,9 @@ type PageVFS struct {
commitID string
}
// todo: 限制最大文件加载大小
// MaxFileLoadSize limits the maximum size of file loaded into memory (10MB)
const MaxFileLoadSize = 10 * 1024 * 1024
func NewPageVFS(
backend Backend,
org string,
@@ -69,7 +71,19 @@ func (p *PageVFS) Read(ctx context.Context, path string) ([]byte, error) {
return nil, err
}
defer open.Close()
return io.ReadAll(open)
// Use LimitReader to prevent reading too much data
limitReader := io.LimitReader(open, MaxFileLoadSize+1)
data, err := io.ReadAll(limitReader)
if err != nil {
return nil, err
}
if len(data) > MaxFileLoadSize {
return nil, &os.PathError{Op: "read", Path: path, Err: os.ErrInvalid} // Or a specific "file too large" error
}
return data, nil
}
func (p *PageVFS) ReadString(ctx context.Context, path string) (string, error) {

View File

@@ -218,6 +218,7 @@ func (s *Server) Serve(writer *utils.WrittenResponseWriter, request *http.Reques
if !ok {
value, err = glob.Compile(filter.Path)
if err != nil {
zap.L().Warn("invalid glob pattern", zap.String("pattern", filter.Path), zap.Error(err))
continue
}
s.globCache.Add(filter.Path, value)
@@ -239,9 +240,12 @@ func (s *Server) Serve(writer *utils.WrittenResponseWriter, request *http.Reques
slices.Reverse(activeFiltersCall)
slices.Reverse(activeFilters)
// Build the visual call stack for logging (e.g., A -> B -> C -> B -> A)
l := len(filtersRoute)
for i := l - 2; i >= 0; i-- {
filtersRoute = append(filtersRoute, filtersRoute[i])
if l > 1 {
for i := l - 2; i >= 0; i-- {
filtersRoute = append(filtersRoute, filtersRoute[i])
}
}
zap.L().Debug("active filters", zap.String("filters", strings.Join(filtersRoute, " -> ")))