重构项目

This commit is contained in:
dragon
2025-11-11 17:28:09 +08:00
parent c7c08311ea
commit 47992401f7
10 changed files with 348 additions and 194 deletions

6
go.mod
View File

@@ -11,7 +11,7 @@ require (
github.com/pkg/errors v0.9.1
github.com/stretchr/testify v1.11.1
go.uber.org/zap v1.27.0
gopkg.d7z.net/middleware v0.0.0-20251110085441-55e78e556d53
gopkg.d7z.net/middleware v0.0.0-20251111072327-ca8cc16305f4
gopkg.in/yaml.v3 v3.0.1
)
@@ -51,8 +51,8 @@ require (
golang.org/x/net v0.46.0 // indirect
golang.org/x/sys v0.38.0 // indirect
golang.org/x/text v0.30.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20251103181224-f26f9409b101 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20251110190251-83f479183930 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20251110190251-83f479183930 // indirect
google.golang.org/grpc v1.76.0 // indirect
google.golang.org/protobuf v1.36.10 // indirect
)

8
go.sum
View File

@@ -165,8 +165,12 @@ gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
google.golang.org/genproto/googleapis/api v0.0.0-20251103181224-f26f9409b101 h1:vk5TfqZHNn0obhPIYeS+cxIFKFQgser/M2jnI+9c6MM=
google.golang.org/genproto/googleapis/api v0.0.0-20251103181224-f26f9409b101/go.mod h1:E17fc4PDhkr22dE3RgnH2hEubUaky6ZwW4VhANxyspg=
google.golang.org/genproto/googleapis/api v0.0.0-20251110190251-83f479183930 h1:8BWFtrvJRbplrKV5VHlIm4YM726eeBPPAL2QDNWhRrU=
google.golang.org/genproto/googleapis/api v0.0.0-20251110190251-83f479183930/go.mod h1:G5IanEx8/PgI9w6CFcYQf7jMtHQhZruvfM1i3qOqk5U=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251110190251-83f479183930 h1:tK4fkUnnRhig9TsTp4otV1FxwBFYgbKUq1RY0V6KZ4U=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251110190251-83f479183930/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A=
google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c=
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
@@ -179,6 +183,10 @@ gopkg.d7z.net/middleware v0.0.0-20251110035951-40e0de46e3c4 h1:wIHzqRwujNatx9ueC
gopkg.d7z.net/middleware v0.0.0-20251110035951-40e0de46e3c4/go.mod h1:BJ8ySXqmlBpM9B2zFJfmvYQ61XPA+G0O1VDmYomxyrM=
gopkg.d7z.net/middleware v0.0.0-20251110085441-55e78e556d53 h1:Mw9UU8AAv0tk86rco7CkdhDe2HNBbBF/VFPHNhBnJpk=
gopkg.d7z.net/middleware v0.0.0-20251110085441-55e78e556d53/go.mod h1:BJ8ySXqmlBpM9B2zFJfmvYQ61XPA+G0O1VDmYomxyrM=
gopkg.d7z.net/middleware v0.0.0-20251111034620-9ddf39894699 h1:5IRYlPahwQZ54nLnxn56hdLksJNA4ufJDNTxdOWHhrY=
gopkg.d7z.net/middleware v0.0.0-20251111034620-9ddf39894699/go.mod h1:BJ8ySXqmlBpM9B2zFJfmvYQ61XPA+G0O1VDmYomxyrM=
gopkg.d7z.net/middleware v0.0.0-20251111072327-ca8cc16305f4 h1:29Thhz0nYiK+BYG0yjU6Cxu/3p/k2umeokfXGTp0NTg=
gopkg.d7z.net/middleware v0.0.0-20251111072327-ca8cc16305f4/go.mod h1:BJ8ySXqmlBpM9B2zFJfmvYQ61XPA+G0O1VDmYomxyrM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=

View File

@@ -22,19 +22,32 @@ type CacheBackend struct {
backend Backend
cacheRepo *tools.Cache[map[string]string]
cacheBranch *tools.Cache[map[string]*BranchInfo]
cacheBlob cache.Cache
cacheBlobLimit uint64
}
func (c *CacheBackend) Close() error {
return c.backend.Close()
}
func NewCacheBackend(backend Backend, cache kv.KV, ttl time.Duration) *CacheBackend {
repoCache := tools.NewCache[map[string]string](cache, "repos", ttl)
branchCache := tools.NewCache[map[string]*BranchInfo](cache, "branches", ttl)
func NewCacheBackend(
backend Backend,
cacheMeta kv.KV,
cacheMetaTtl time.Duration,
cacheBlob cache.Cache,
cacheBlobLimit uint64,
) *CacheBackend {
repoCache := tools.NewCache[map[string]string](cacheMeta, "repos", cacheMetaTtl)
branchCache := tools.NewCache[map[string]*BranchInfo](cacheMeta, "branches", cacheMetaTtl)
return &CacheBackend{
backend: backend,
cacheRepo: repoCache,
cacheBranch: branchCache,
cacheBlob: cacheBlob,
cacheBlobLimit: cacheBlobLimit,
}
}
@@ -76,37 +89,43 @@ func (c *CacheBackend) Branches(ctx context.Context, owner, repo string) (map[st
}
func (c *CacheBackend) Open(ctx context.Context, client *http.Client, owner, repo, commit, path string, headers http.Header) (*http.Response, error) {
if headers != nil && headers.Get("Range") != "" {
// ignore custom header
return c.backend.Open(ctx, client, owner, repo, commit, path, headers)
}
type CacheBackendBlobReader struct {
client *http.Client
cache cache.Cache
base Backend
limit uint64
}
func NewCacheBackendBlobReader(
client *http.Client,
base Backend,
cache cache.Cache,
limit uint64,
) *CacheBackendBlobReader {
return &CacheBackendBlobReader{client: client, base: base, cache: cache, limit: limit}
}
func (c *CacheBackendBlobReader) Open(ctx context.Context, owner, repo, commit, path string) (io.ReadCloser, error) {
key := fmt.Sprintf("%s/%s/%s/%s", owner, repo, commit, path)
lastCache, err := c.cache.Get(ctx, key)
lastCache, err := c.cacheBlob.Get(ctx, key)
if err != nil && !errors.Is(err, os.ErrNotExist) {
return nil, err
} else if lastCache == nil && err == nil {
// 边界缓存
return nil, os.ErrNotExist
} else if lastCache != nil {
return lastCache, nil
h := lastCache.Metadata
if h["Not-Found"] == "true" {
return nil, os.ErrNotExist
}
open, err := c.base.Open(ctx, c.client, owner, repo, commit, path, http.Header{})
respHeader := make(http.Header)
respHeader.Set("Last-Modified", h["Last-Modified"])
respHeader.Set("Content-Type", h["Content-Type"])
respHeader.Set("Content-Length", h["Content-Length"])
atoi, err := strconv.Atoi(h["Content-Length"])
if err != nil {
return nil, err
}
return &http.Response{
Status: "200 OK",
StatusCode: 200,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Body: lastCache,
ContentLength: int64(atoi),
Request: nil,
Header: respHeader,
}, nil
}
open, err := c.backend.Open(ctx, client, owner, repo, commit, path, http.Header{})
if err != nil || open == nil {
if open != nil {
_ = open.Body.Close()
@@ -119,38 +138,33 @@ func (c *CacheBackendBlobReader) Open(ctx context.Context, owner, repo, commit,
_ = open.Body.Close()
return nil, os.ErrNotExist
}
lastMod, err := time.Parse(http.TimeFormat, open.Header.Get("Last-Modified"))
if err != nil {
// 无时间,跳过
return open.Body, nil
}
length, err := strconv.ParseUint(open.Header.Get("Content-Length"), 10, 64)
// 无法计算大小,跳过
if err != nil {
return open.Body, nil
return open, nil
}
if length > c.limit {
if length > c.cacheBlobLimit {
// 超过最大大小,跳过
return &utils.SizeReadCloser{
open.Body = &utils.SizeReadCloser{
ReadCloser: open.Body,
Size: length,
}, nil
}
return open, nil
}
defer open.Body.Close()
allBytes, err := io.ReadAll(open.Body)
if err != nil {
return nil, err
}
if err = c.cache.Put(ctx, key, bytes.NewBuffer(allBytes), time.Hour); err != nil {
zap.L().Warn("缓存归档失败", zap.Error(err), zap.Int("Size", len(allBytes)), zap.Uint64("MaxSize", c.limit))
if err = c.cacheBlob.Put(ctx, key, map[string]string{
"Content-Length": open.Header.Get("Content-Length"),
"Last-Modified": open.Header.Get("Last-Modified"),
"Content-Type": open.Header.Get("Content-Type"),
}, bytes.NewBuffer(allBytes), time.Hour); err != nil {
zap.L().Warn("缓存归档失败", zap.Error(err), zap.Int("Size", len(allBytes)), zap.Uint64("MaxSize", c.cacheBlobLimit))
}
return &cache.Content{
ReadSeekCloser: utils.NopCloser{
open.Body = utils.NopCloser{
ReadSeeker: bytes.NewReader(allBytes),
},
LastModified: lastMod,
Length: length,
}, nil
}
return open, nil
}

View File

@@ -30,7 +30,7 @@ func NewPageDomain(meta *ServerMeta, alias kv.KV, baseDomain, defaultBranch stri
type PageDomainContent struct {
*PageMetaContent
*PageVFS
Owner string
Repo string
Path string
@@ -48,7 +48,7 @@ func (p *PageDomain) ParseDomainMeta(ctx context.Context, domain, path, branch s
return nil, os.ErrNotExist
}
zap.L().Debug("命中别名", zap.String("domain", domain), zap.Any("alias", alias))
return p.ReturnMeta(ctx, alias.Owner, alias.Repo, alias.Branch, pathArr)
return p.returnMeta(ctx, alias.Owner, alias.Repo, alias.Branch, pathArr)
}
owner := strings.TrimSuffix(domain, "."+p.baseDomain)
repo := pathArr[0]
@@ -57,9 +57,9 @@ func (p *PageDomain) ParseDomainMeta(ctx context.Context, domain, path, branch s
if repo == "" {
// 回退到默认仓库 (路径未包含仓库)
zap.L().Debug("fail back to default repo", zap.String("repo", domain))
returnMeta, err = p.ReturnMeta(ctx, owner, domain, branch, pathArr)
returnMeta, err = p.returnMeta(ctx, owner, domain, branch, pathArr)
} else {
returnMeta, err = p.ReturnMeta(ctx, owner, repo, branch, pathArr[1:])
returnMeta, err = p.returnMeta(ctx, owner, repo, branch, pathArr[1:])
}
if err != nil && !errors.Is(err, os.ErrNotExist) {
return nil, err
@@ -67,11 +67,11 @@ func (p *PageDomain) ParseDomainMeta(ctx context.Context, domain, path, branch s
return returnMeta, nil
}
// 发现 repo 的情况下回退到默认页面
return p.ReturnMeta(ctx, owner, domain, branch, pathArr)
return p.returnMeta(ctx, owner, domain, branch, pathArr)
}
func (p *PageDomain) ReturnMeta(ctx context.Context, owner, repo, branch string, path []string) (*PageDomainContent, error) {
rel := &PageDomainContent{}
func (p *PageDomain) returnMeta(ctx context.Context, owner, repo, branch string, path []string) (*PageDomainContent, error) {
result := &PageDomainContent{}
meta, err := p.GetMeta(ctx, owner, repo, branch)
if err != nil {
zap.L().Debug("查询错误", zap.Error(err))
@@ -81,13 +81,15 @@ func (p *PageDomain) ReturnMeta(ctx context.Context, owner, repo, branch string,
}
return nil, errors.Wrap(os.ErrNotExist, strings.Join(path, "/"))
}
rel.PageMetaContent = meta
rel.Owner = owner
rel.Repo = repo
rel.Path = strings.Join(path, "/")
if err = p.alias.Bind(ctx, meta.Alias, rel.Owner, rel.Repo, branch); err != nil {
result.PageMetaContent = meta
result.Owner = owner
result.Repo = repo
result.PageVFS = NewPageVFS(p.client, p, result.Owner, result.Repo, result.CommitID)
result.Path = strings.Join(path, "/")
if err = p.alias.Bind(ctx, meta.Alias, result.Owner, result.Repo, branch); err != nil {
zap.L().Warn("别名绑定失败", zap.Error(err))
return nil, err
}
return rel, nil
return result, nil
}

View File

@@ -2,6 +2,7 @@ package core
import (
"context"
"fmt"
"io"
"net/http"
"net/url"
@@ -13,6 +14,7 @@ import (
"go.uber.org/zap"
"gopkg.d7z.net/middleware/kv"
"gopkg.d7z.net/middleware/tools"
"gopkg.in/yaml.v3"
"github.com/gobwas/glob"
@@ -26,19 +28,20 @@ var regexpHostname = regexp.MustCompile(`^(?:([a-z0-9-]+|\*)\.)?([a-z0-9-]{1,61}
type ServerMeta struct {
Backend
Domain string
client *http.Client
cache kv.KV
ttl time.Duration
cache *tools.Cache[PageMetaContent]
locker *utils.Locker
}
func NewServerMeta(client *http.Client, backend Backend, kv kv.KV, domain string, ttl time.Duration) *ServerMeta {
return &ServerMeta{backend, domain, client, kv, ttl, utils.NewLocker()}
return &ServerMeta{
backend, domain, client,
tools.NewCache[PageMetaContent](kv, "pages/meta", ttl),
utils.NewLocker(),
}
}
func (s *ServerMeta) GetMeta(ctx context.Context, owner, repo, branch string) (*PageMetaContent, error) {
@@ -65,43 +68,36 @@ func (s *ServerMeta) GetMeta(ctx context.Context, owner, repo, branch string) (*
rel.CommitID = info.ID
rel.LastModified = info.LastModified
key := s.cache.WithKey("meta", owner, repo, branch)
cache, err := s.cache.Get(ctx, key)
if err != nil && !errors.Is(err, os.ErrNotExist) {
return nil, err
}
if err == nil {
if err = rel.From(cache); err == nil {
if !rel.IsPage {
return nil, os.ErrNotExist
}
key := fmt.Sprintf("%s/%s/%s", owner, repo, branch)
if cache, find := s.cache.Load(ctx, key); find {
if cache.IsPage {
return rel, nil
} else {
return nil, os.ErrNotExist
}
}
mux := s.locker.Open(key)
mux.Lock()
defer mux.Unlock()
cache, err = s.cache.Get(ctx, key)
if err == nil {
if err = rel.From(cache); err == nil {
if !rel.IsPage {
return nil, os.ErrNotExist
}
if cache, find := s.cache.Load(ctx, key); find {
if cache.IsPage {
return rel, nil
} else {
return nil, os.ErrNotExist
}
}
// 确定存在 index.html , 否则跳过
if find, _ := s.FileExists(ctx, owner, repo, rel.CommitID, "index.html"); !find {
rel.IsPage = false
_ = s.cache.Put(ctx, key, rel.String(), s.ttl)
_ = s.cache.Store(ctx, key, *rel)
return nil, os.ErrNotExist
}
rel.IsPage = true
errCall := func(err error) error {
rel.IsPage = false
rel.ErrorMsg = err.Error()
_ = s.cache.Put(ctx, key, rel.String(), s.ttl)
_ = s.cache.Store(ctx, key, *rel)
return err
}
// 添加默认跳过的内容
@@ -181,7 +177,7 @@ func (s *ServerMeta) GetMeta(ctx context.Context, owner, repo, branch string) (*
}
rel.Alias = utils.ClearDuplicates(rel.Alias)
rel.Ignore = utils.ClearDuplicates(rel.Ignore)
_ = s.cache.Put(ctx, key, rel.String(), s.ttl)
_ = s.cache.Store(ctx, key, *rel)
return rel, nil
}

View File

@@ -5,6 +5,7 @@ import (
"time"
"github.com/gobwas/glob"
"gopkg.in/yaml.v3"
)
type renderCompiler struct {
@@ -41,8 +42,27 @@ func NewPageMetaContent() *PageMetaContent {
}
}
func (m *PageMetaContent) From(data string) error {
err := json.Unmarshal([]byte(data), m)
func (m *PageMetaContent) UnmarshalJSON(bytes []byte) error {
type alias PageMetaContent
var c alias
if err := json.Unmarshal(bytes, &c); err != nil {
return err
}
*m = PageMetaContent(c)
return m.init()
}
func (m *PageMetaContent) UnmarshalYAML(value *yaml.Node) error {
type alias PageMetaContent
var c alias
if err := value.Decode(&c); err != nil {
return err
}
*m = PageMetaContent(c)
return m.init()
}
func (m *PageMetaContent) init() error {
clear(m.rendersL)
for key, gs := range m.Renders {
for _, g := range gs {
@@ -56,7 +76,7 @@ func (m *PageMetaContent) From(data string) error {
for _, g := range m.Ignore {
m.ignoreL = append(m.ignoreL, glob.MustCompile(g))
}
return err
return nil
}
func (m *PageMetaContent) IgnorePath(path string) bool {

75
pkg/core/page.go Normal file
View File

@@ -0,0 +1,75 @@
package core
import (
"context"
"io"
"net/http"
"os"
)
type PageVFS struct {
backend Backend
client *http.Client
org string
repo string
commitID string
}
func NewPageVFS(
client *http.Client,
backend Backend,
org string,
repo string,
commitID string,
) *PageVFS {
return &PageVFS{
client: client,
backend: backend,
org: org,
repo: repo,
commitID: commitID,
}
}
func (p *PageVFS) NativeOpen(ctx context.Context, path string, headers http.Header) (*http.Response, error) {
return p.backend.Open(ctx, p.client, p.org, p.repo, p.commitID, path, headers)
}
func (p *PageVFS) Exists(ctx context.Context, path string) (bool, error) {
open, err := p.NativeOpen(ctx, path, nil)
if open != nil {
defer open.Body.Close()
}
if err != nil || open == nil {
return false, err
}
if open.StatusCode != http.StatusOK {
return false, nil
}
return true, nil
}
func (p *PageVFS) Open(ctx context.Context, path string) (io.ReadCloser, error) {
resp, err := p.NativeOpen(ctx, path, nil)
if err != nil {
if resp != nil {
_ = resp.Body.Close()
}
return nil, err
}
if resp.StatusCode != http.StatusOK {
_ = resp.Body.Close()
return nil, os.ErrNotExist
}
return resp.Body, nil
}
func (p *PageVFS) Read(ctx context.Context, path string) ([]byte, error) {
open, err := p.Open(ctx, path)
if err != nil {
return nil, err
}
defer open.Close()
return io.ReadAll(open)
}

61
pkg/core/parser.go Normal file
View File

@@ -0,0 +1,61 @@
package core
import (
"net/http"
"regexp"
"strings"
)
type Domain struct {
Org string `json:"org"`
Repo string `json:"repo"`
Branch string `json:"branch"` // commit id or branch
Path string `json:"path"`
}
var portExp = regexp.MustCompile(`:\d+$`)
type DomainParser struct {
baseDomain string
defaultBranch string
alias *DomainAlias
}
func (d *DomainParser) ParseDomains(request *http.Request) ([]Domain, error) {
host := portExp.ReplaceAllString(strings.ToLower(request.Host), "")
path := strings.Split(strings.Trim(request.URL.Path, "/"), "/")
branch := request.URL.Query().Get("branch")
if branch == "" {
branch = d.defaultBranch
}
result := make([]Domain, 0)
if strings.HasSuffix(host, d.baseDomain) {
org := strings.TrimSuffix(host, d.baseDomain)
if len(path) > 1 {
// repo.base.com/path
result = append(result, Domain{
Org: org,
Repo: path[0],
Branch: branch,
Path: strings.Join(path[1:], "/"),
})
}
// repo.base.com/
result = append(result, Domain{
Org: org,
Repo: host,
Branch: branch,
Path: strings.Join(path, "/"),
})
} else {
if find, _ := d.alias.Query(request.Context(), host); find != nil {
result = append(result, Domain{
Org: find.Owner,
Repo: find.Repo,
Branch: find.Branch,
Path: request.URL.Path,
})
}
}
return result, nil
}

1
pkg/core/proxy.go Normal file
View File

@@ -0,0 +1 @@
package core

View File

@@ -3,7 +3,6 @@ package pkg
import (
"fmt"
"io"
"mime"
"net"
"net/http"
"net/http/httputil"
@@ -12,7 +11,6 @@ import (
"path/filepath"
"regexp"
"slices"
"strconv"
"strings"
"time"
@@ -89,7 +87,6 @@ func DefaultOptions(domain string) ServerOptions {
type Server struct {
options *ServerOptions
meta *core.PageDomain
reader *core.CacheBackendBlobReader
backend core.Backend
fs http.Handler
}
@@ -97,10 +94,11 @@ type Server struct {
var staticPrefix = "/.well-known/page-server/"
func NewPageServer(backend core.Backend, options ServerOptions) *Server {
backend = core.NewCacheBackend(backend, options.CacheMeta, options.CacheMetaTTL)
backend = core.NewCacheBackend(backend, options.CacheMeta, options.CacheMetaTTL,
options.CacheBlob, options.CacheBlobLimit,
)
svcMeta := core.NewServerMeta(options.HTTPClient, backend, options.CacheMeta, options.Domain, options.CacheMetaTTL)
pageMeta := core.NewPageDomain(svcMeta, options.Alias, options.Domain, options.DefaultBranch)
reader := core.NewCacheBackendBlobReader(options.HTTPClient, backend, options.CacheBlob, options.CacheBlobLimit)
var fs http.Handler
if options.StaticDir != "" {
fs = http.StripPrefix(staticPrefix, http.FileServer(http.Dir(options.StaticDir)))
@@ -109,7 +107,6 @@ func NewPageServer(backend core.Backend, options ServerOptions) *Server {
backend: backend,
options: &options,
meta: pageMeta,
reader: reader,
fs: fs,
}
}
@@ -139,22 +136,90 @@ func (s *Server) ServeHTTP(writer http.ResponseWriter, request *http.Request) {
func (s *Server) Serve(writer http.ResponseWriter, request *http.Request) error {
ctx := request.Context()
domainHost := portExp.ReplaceAllString(strings.ToLower(request.Host), "")
meta, err := s.meta.ParseDomainMeta(
ctx,
domainHost,
request.URL.Path,
request.URL.Query().Get("branch"))
meta, err := s.meta.ParseDomainMeta(ctx, domainHost, request.URL.Path, request.URL.Query().Get("branch"))
if err != nil {
return err
}
zap.L().Debug("new request", zap.Any("request path", meta.Path))
if len(meta.Alias) > 0 && !slices.Contains(meta.Alias, domainHost) {
// 重定向到配置的地址
zap.L().Debug("redirect", zap.Any("src", request.Host), zap.Any("dst", meta.Alias[0]))
http.Redirect(writer, request, fmt.Sprintf("https://%s/%s", meta.Alias[0], meta.Path), http.StatusFound)
return nil
}
// 处理反向代理
if s.options.EnableProxy && s.Proxy(writer, request, meta) {
return nil
}
// 在非反向代理时处理目录访问
if strings.HasSuffix(meta.Path, "/") || meta.Path == "" {
meta.Path += "index.html"
}
// 如果不是反向代理路由则跳过任何配置
if request.Method != http.MethodGet {
return os.ErrNotExist
}
if meta.IgnorePath(meta.Path) {
zap.L().Debug("ignore path", zap.Any("request", request.RequestURI), zap.Any("meta.path", meta.Path))
err = os.ErrNotExist
}
type resp struct {
IsError bool
Path string
}
if s.options.EnableProxy {
callPath := []resp{{false, meta.Path}}
if meta.VRoute {
callPath = append(callPath, resp{false, "index.html"})
} else {
callPath = append(callPath, resp{false, meta.Path + "/index.html"})
}
callPath = append(callPath, resp{true, "404.html"})
var callResp *http.Response
callErr := os.ErrNotExist
var callRespMeta resp
for _, r := range callPath {
callResp, callErr = meta.NativeOpen(request.Context(), r.Path, nil)
if callErr != nil {
if callResp != nil {
_ = callResp.Body.Close()
}
if !errors.Is(callErr, os.ErrNotExist) {
zap.L().Debug("error", zap.Any("error", callErr))
}
callRespMeta = r
continue
}
break
}
if callResp == nil {
return os.ErrNotExist
}
if callErr != nil {
// 回退失败
return callErr
}
render := meta.TryRender(meta.Path)
writer.Header().Set("Content-Type", callResp.Header.Get("Content-Type"))
if callRespMeta.IsError {
render = meta.TryRender(meta.Path)
writer.WriteHeader(http.StatusNotFound)
} else if render == nil {
lastMod, err := time.Parse(http.TimeFormat, callResp.Header.Get("Last-Modified"))
if seekResp, ok := callResp.Body.(io.ReadSeeker); ok && err == nil {
http.ServeContent(writer, request, filepath.Base(callRespMeta.Path), lastMod, seekResp)
}
} else {
defer callResp.Body.Close()
return render.Render(writer, request, callResp.Body)
}
return nil
}
func (s *Server) Proxy(writer http.ResponseWriter, request *http.Request, meta *core.PageDomainContent) bool {
for prefix, backend := range meta.Proxy {
proxyPath := "/" + meta.Path
if strings.HasPrefix(proxyPath, prefix) {
@@ -178,98 +243,10 @@ func (s *Server) Serve(writer http.ResponseWriter, request *http.Request) error
zap.Any("path", proxyPath), zap.Any("target", fmt.Sprintf("%s%s", u, targetPath)))
// todo(security): 处理 websocket
proxy.ServeHTTP(writer, request)
return nil
return true
}
}
}
// 在非反向代理时处理目录访问
if strings.HasSuffix(meta.Path, "/") || meta.Path == "" {
meta.Path += "index.html"
}
// 如果不是反向代理路由则跳过任何配置
if request.Method != http.MethodGet {
return os.ErrNotExist
}
var result io.ReadCloser
if meta.IgnorePath(meta.Path) {
zap.L().Debug("ignore path", zap.Any("request", request.RequestURI), zap.Any("meta.path", meta.Path))
err = os.ErrNotExist
} else {
result, err = s.reader.Open(ctx, meta.Owner, meta.Repo, meta.CommitID, meta.Path)
}
if err != nil {
if errors.Is(err, os.ErrNotExist) {
if meta.VRoute {
// 回退 abc => index.html
result, err = s.reader.Open(ctx, meta.Owner, meta.Repo, meta.CommitID, "index.html")
if err == nil {
meta.Path = "index.html"
}
} else {
// 回退 abc => abc/ => abc/index.html
result, err = s.reader.Open(ctx, meta.Owner, meta.Repo, meta.CommitID, meta.Path+"/index.html")
if err == nil {
meta.Path = strings.Trim(meta.Path+"/index.html", "/")
}
}
} else {
return err
}
}
// 处理请求错误
if err != nil {
if errors.Is(err, os.ErrNotExist) {
result, err = s.reader.Open(ctx, meta.Owner, meta.Repo, meta.CommitID, "404.html")
if err != nil {
return err
}
writer.Header().Set("Content-Type", mime.TypeByExtension(".html"))
writer.WriteHeader(http.StatusNotFound)
if render := meta.TryRender(meta.Path, "/404.html"); render != nil && s.options.EnableRender {
defer result.Close()
return render.Render(writer, request, result)
}
_, _ = io.Copy(writer, result)
_ = result.Close()
return nil
}
return err
}
fileName := filepath.Base(meta.Path)
render := meta.TryRender(meta.Path)
if !s.options.EnableRender {
render = nil
}
defer result.Close()
if reader, ok := result.(*cache.Content); ok {
writer.Header().Add("X-Cache", "HIT")
writer.Header().Set("Content-Type", mime.TypeByExtension(filepath.Ext(fileName)))
writer.Header().Add("Cache-Control", s.options.CacheControl)
if render != nil {
if err = render.Render(writer, request, reader); err != nil {
return err
}
} else {
http.ServeContent(writer, request, fileName, reader.LastModified, reader)
}
} else {
if reader, ok := result.(*utils.SizeReadCloser); ok && render == nil {
writer.Header().Add("Content-Length", strconv.FormatUint(reader.Size, 10))
}
// todo(bug) : 直连模式下告知数据长度
writer.Header().Add("X-Cache", "MISS")
writer.Header().Set("Content-Type", mime.TypeByExtension(filepath.Ext(fileName)))
writer.WriteHeader(http.StatusOK)
if render != nil {
if err = render.Render(writer, request, reader); err != nil {
return err
}
} else {
_, _ = io.Copy(writer, result)
}
}
return nil
return false
}
func (s *Server) Close() error {