feat: docs api design
This commit is contained in:
48
pkg/logger/kylin.go
Normal file
48
pkg/logger/kylin.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"octopus/internal/config"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
var (
|
||||
CtxDebug = struct{}{}
|
||||
CtxTimezone = struct{}{}
|
||||
)
|
||||
|
||||
func NewKylinMiddleware() fiber.Handler {
|
||||
return func(c *fiber.Ctx) error {
|
||||
ctxLogger := log.With().Interface(config.LogTagTraceID, c.Locals("trace_id"))
|
||||
|
||||
if authorization := c.Get(fiber.HeaderAuthorization); authorization != "" {
|
||||
ctxLogger.Str(config.LogTagAuthorization, authorization)
|
||||
}
|
||||
if bid := c.Get("Rcrai-Bid"); bid != "" {
|
||||
ctxLogger.Str(config.LogTagBID, bid)
|
||||
}
|
||||
if staffID := c.Get("Rcrai-StaffId"); staffID != "" {
|
||||
ctxLogger.Str(config.LogTagStaffID, staffID)
|
||||
}
|
||||
ctx := c.UserContext()
|
||||
logger := ctxLogger.Logger()
|
||||
logger = logger.Level(zerolog.InfoLevel)
|
||||
if lv := c.Get("Debug_Kylin"); strings.ToLower(lv) == "true" {
|
||||
ctx = context.WithValue(ctx, CtxDebug, struct{}{})
|
||||
logger = logger.Level(zerolog.DebugLevel)
|
||||
}
|
||||
if tz := c.Get("R-Timezone"); tz != "" {
|
||||
if loc, err := time.LoadLocation(tz); err == nil {
|
||||
ctx = context.WithValue(ctx, CtxTimezone, loc)
|
||||
}
|
||||
}
|
||||
c.SetUserContext(logger.WithContext(ctx))
|
||||
return c.Next()
|
||||
}
|
||||
}
|
||||
269
pkg/logger/sentry_writer.go
Normal file
269
pkg/logger/sentry_writer.go
Normal file
@@ -0,0 +1,269 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"octopus/internal/config"
|
||||
|
||||
// "github.com/buger/jsonparser"
|
||||
"github.com/getsentry/sentry-go"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/tidwall/gjson"
|
||||
)
|
||||
|
||||
var levelsMapping = map[zerolog.Level]sentry.Level{
|
||||
zerolog.DebugLevel: sentry.LevelDebug,
|
||||
zerolog.InfoLevel: sentry.LevelInfo,
|
||||
zerolog.WarnLevel: sentry.LevelWarning,
|
||||
zerolog.ErrorLevel: sentry.LevelError,
|
||||
zerolog.FatalLevel: sentry.LevelFatal,
|
||||
zerolog.PanicLevel: sentry.LevelFatal,
|
||||
}
|
||||
|
||||
var _ = io.WriteCloser(new(SentryWriter))
|
||||
|
||||
var now = time.Now
|
||||
|
||||
// SentryWriter is a sentry events writer with std io.SentryWriter iface.
|
||||
type SentryWriter struct {
|
||||
client *sentry.Client
|
||||
|
||||
levels map[zerolog.Level]struct{}
|
||||
flushTimeout time.Duration
|
||||
}
|
||||
|
||||
// Write handles zerolog's json and sends events to sentry.
|
||||
func (w *SentryWriter) Write(data []byte) (int, error) {
|
||||
event, ok := w.parseLogEvent(data)
|
||||
if ok {
|
||||
w.client.CaptureEvent(event, nil, nil)
|
||||
// should flush before os.Exit
|
||||
if event.Level == sentry.LevelFatal {
|
||||
w.client.Flush(w.flushTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
// Close forces client to flush all pending events.
|
||||
// Can be useful before application exits.
|
||||
func (w *SentryWriter) Close() error {
|
||||
w.client.Flush(w.flushTimeout)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *SentryWriter) parseLogEvent(data []byte) (*sentry.Event, bool) {
|
||||
const logger = "zerolog"
|
||||
lvlStr := gjson.GetBytes(data, zerolog.LevelFieldName)
|
||||
lvl, err := zerolog.ParseLevel(lvlStr.String())
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
_, enabled := w.levels[lvl]
|
||||
if !enabled {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
sentryLvl, ok := levelsMapping[lvl]
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
event := sentry.Event{
|
||||
Timestamp: now(),
|
||||
Level: sentryLvl,
|
||||
Logger: logger,
|
||||
Tags: make(map[string]string, 6),
|
||||
Request: &sentry.Request{},
|
||||
}
|
||||
|
||||
gjson.ParseBytes(data).ForEach(func(key, value gjson.Result) bool {
|
||||
switch key.String() {
|
||||
// case zerolog.LevelFieldName, zerolog.TimestampFieldName:
|
||||
case zerolog.MessageFieldName:
|
||||
event.Message = value.String()
|
||||
case zerolog.ErrorFieldName:
|
||||
event.Exception = append(event.Exception, sentry.Exception{
|
||||
Value: value.String(),
|
||||
Stacktrace: newStacktrace(),
|
||||
})
|
||||
case config.LogTagURL:
|
||||
event.Request.URL = value.String()
|
||||
case config.LogTagMethod:
|
||||
event.Request.Method = value.String()
|
||||
case config.LogTagHeaders:
|
||||
headers := make(map[string]string)
|
||||
value.ForEach(func(key, value gjson.Result) bool {
|
||||
headers[key.String()] = value.String()
|
||||
return true
|
||||
})
|
||||
event.Request.Headers = headers
|
||||
case config.LogTagData:
|
||||
event.Request.Data = value.String()
|
||||
case config.LogTagAuthorization:
|
||||
event.Tags["Authorization"] = value.String()
|
||||
case config.LogTagBID:
|
||||
event.Tags["bid"] = value.String()
|
||||
case config.LogTagStaffID:
|
||||
event.Tags["staff_id"] = value.String()
|
||||
case config.LogTagTraceID:
|
||||
event.Tags["trace_id"] = value.String()
|
||||
default:
|
||||
event.Tags[key.String()] = value.String()
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
return &event, true
|
||||
}
|
||||
|
||||
func newStacktrace() *sentry.Stacktrace {
|
||||
const (
|
||||
module = "github.com/archdx/zerolog-sentry"
|
||||
loggerModule = "github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
st := sentry.NewStacktrace()
|
||||
|
||||
threshold := len(st.Frames) - 1
|
||||
// drop current module frames
|
||||
for ; threshold > 0 && st.Frames[threshold].Module == module; threshold-- {
|
||||
}
|
||||
|
||||
outer:
|
||||
// try to drop zerolog module frames after logger call point
|
||||
for i := threshold; i > 0; i-- {
|
||||
if st.Frames[i].Module == loggerModule {
|
||||
for j := i - 1; j >= 0; j-- {
|
||||
if st.Frames[j].Module != loggerModule {
|
||||
threshold = j
|
||||
break outer
|
||||
}
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
st.Frames = st.Frames[:threshold+1]
|
||||
|
||||
return st
|
||||
}
|
||||
|
||||
// WriterOption configures sentry events writer.
|
||||
type WriterOption interface {
|
||||
apply(*options)
|
||||
}
|
||||
|
||||
type optionFunc func(*options)
|
||||
|
||||
func (fn optionFunc) apply(c *options) { fn(c) }
|
||||
|
||||
type options struct {
|
||||
release string
|
||||
environment string
|
||||
serverName string
|
||||
levels []zerolog.Level
|
||||
ignoreErrors []string
|
||||
sampleRate float64
|
||||
flushTimeout time.Duration
|
||||
debug bool
|
||||
}
|
||||
|
||||
// WithLevels configures zerolog levels that have to be sent to Sentry.
|
||||
// Default levels are: error, fatal, panic.
|
||||
func WithLevels(levels ...zerolog.Level) WriterOption {
|
||||
return optionFunc(func(cfg *options) {
|
||||
cfg.levels = levels
|
||||
})
|
||||
}
|
||||
|
||||
// WithSampleRate configures the sample rate as a percentage of events to be sent in the range of 0.0 to 1.0.
|
||||
func WithSampleRate(rate float64) WriterOption {
|
||||
return optionFunc(func(cfg *options) {
|
||||
cfg.sampleRate = rate
|
||||
})
|
||||
}
|
||||
|
||||
// WithRelease configures the release to be sent with events.
|
||||
func WithRelease(release string) WriterOption {
|
||||
return optionFunc(func(cfg *options) {
|
||||
cfg.release = release
|
||||
})
|
||||
}
|
||||
|
||||
// WithEnvironment configures the environment to be sent with events.
|
||||
func WithEnvironment(environment string) WriterOption {
|
||||
return optionFunc(func(cfg *options) {
|
||||
cfg.environment = environment
|
||||
})
|
||||
}
|
||||
|
||||
// WithServerName configures the server name field for events. Default value is OS hostname.
|
||||
func WithServerName(serverName string) WriterOption {
|
||||
return optionFunc(func(cfg *options) {
|
||||
cfg.serverName = serverName
|
||||
})
|
||||
}
|
||||
|
||||
// WithIgnoreErrors configures the list of regexp strings that will be used to match against event's message
|
||||
// and if applicable, caught errors type and value. If the match is found, then a whole event will be dropped.
|
||||
func WithIgnoreErrors(reList []string) WriterOption {
|
||||
return optionFunc(func(cfg *options) {
|
||||
cfg.ignoreErrors = reList
|
||||
})
|
||||
}
|
||||
|
||||
// WithDebug enables sentry client debug logs.
|
||||
func WithDebug(debug bool) WriterOption {
|
||||
return optionFunc(func(cfg *options) {
|
||||
cfg.debug = debug
|
||||
})
|
||||
}
|
||||
|
||||
// NewSentryWriter creates writer with provided DSN and options.
|
||||
func NewSentryWriter(dsn string, opts ...WriterOption) (*SentryWriter, error) {
|
||||
cfg := newDefaultConfig()
|
||||
for _, opt := range opts {
|
||||
opt.apply(&cfg)
|
||||
}
|
||||
|
||||
client, err := sentry.NewClient(sentry.ClientOptions{
|
||||
Dsn: dsn,
|
||||
SampleRate: cfg.sampleRate,
|
||||
Release: cfg.release,
|
||||
Environment: cfg.environment,
|
||||
ServerName: cfg.serverName,
|
||||
IgnoreErrors: cfg.ignoreErrors,
|
||||
Debug: cfg.debug,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
levels := make(map[zerolog.Level]struct{}, len(cfg.levels))
|
||||
for _, lvl := range cfg.levels {
|
||||
levels[lvl] = struct{}{}
|
||||
}
|
||||
|
||||
return &SentryWriter{
|
||||
client: client,
|
||||
levels: levels,
|
||||
flushTimeout: cfg.flushTimeout,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newDefaultConfig() options {
|
||||
return options{
|
||||
levels: []zerolog.Level{
|
||||
zerolog.ErrorLevel,
|
||||
zerolog.FatalLevel,
|
||||
zerolog.PanicLevel,
|
||||
},
|
||||
sampleRate: 1.0,
|
||||
flushTimeout: 3 * time.Second,
|
||||
}
|
||||
}
|
||||
38
pkg/logger/setup.go
Normal file
38
pkg/logger/setup.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"octopus"
|
||||
"octopus/internal/config"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/rs/zerolog/pkgerrors"
|
||||
)
|
||||
|
||||
func Setup() {
|
||||
cfg := config.Get()
|
||||
zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack
|
||||
|
||||
writers := []io.Writer{}
|
||||
if config.Get().IsLocal {
|
||||
writers = append(writers, zerolog.NewConsoleWriter())
|
||||
} else {
|
||||
writers = append(writers, os.Stderr)
|
||||
}
|
||||
if dsn := cfg.Sentry.DSN; dsn != "" {
|
||||
sentryWriter, err := NewSentryWriter(
|
||||
cfg.Sentry.DSN,
|
||||
WithDebug(cfg.Debug),
|
||||
WithEnvironment(cfg.Sentry.Environment),
|
||||
WithRelease(octopus.Version),
|
||||
WithServerName("octopus-service"),
|
||||
)
|
||||
if err == nil {
|
||||
writers = append(writers, sentryWriter)
|
||||
}
|
||||
}
|
||||
log.Logger = zerolog.New(io.MultiWriter(writers...)).With().Stack().Timestamp().Logger()
|
||||
}
|
||||
29
pkg/middlewares/metrics/config.go
Normal file
29
pkg/middlewares/metrics/config.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package metrics
|
||||
|
||||
import "github.com/gofiber/fiber/v2"
|
||||
|
||||
// Config defines the config for middleware.
|
||||
type Config struct {
|
||||
// Next defines a function to skip this middleware when returned true.
|
||||
//
|
||||
// Optional. Default: nil
|
||||
Next func(c *fiber.Ctx) bool
|
||||
}
|
||||
|
||||
// ConfigDefault is the default config
|
||||
var ConfigDefault = Config{
|
||||
Next: nil,
|
||||
}
|
||||
|
||||
// Helper function to set default values
|
||||
func configDefault(config ...Config) Config {
|
||||
// Return default config if nothing provided
|
||||
if len(config) < 1 {
|
||||
return ConfigDefault
|
||||
}
|
||||
|
||||
// Override default config
|
||||
cfg := config[0]
|
||||
|
||||
return cfg
|
||||
}
|
||||
135
pkg/middlewares/metrics/middleware.go
Normal file
135
pkg/middlewares/metrics/middleware.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
const serviceName = "cp-service"
|
||||
|
||||
var (
|
||||
labels = prometheus.Labels{"service": serviceName}
|
||||
|
||||
KafkaConsumeSuccessCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: prometheus.BuildFQName("cp", "kafka", "consume_success"),
|
||||
Help: "记录kafka消息消费成功的次数",
|
||||
ConstLabels: labels,
|
||||
}, []string{"topic", "bid"},
|
||||
)
|
||||
KafkaConsumeErrorCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: prometheus.BuildFQName("cp", "kafka", "consume_error"),
|
||||
Help: "记录kafka消息消费失败的次数",
|
||||
ConstLabels: labels,
|
||||
}, []string{"topic", "bid"},
|
||||
)
|
||||
KafkaConsumeProcessingCounter = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: prometheus.BuildFQName("cp", "kafka", "consume_processing"),
|
||||
Help: "当前正在处理的kafka消息个数",
|
||||
ConstLabels: labels,
|
||||
}, []string{"topic", "bid"},
|
||||
)
|
||||
|
||||
requestInProgressTotal = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: prometheus.BuildFQName("cp", "http", "requests_in_progress_total"),
|
||||
Help: "All the requests in progress",
|
||||
ConstLabels: labels,
|
||||
}, []string{"method"})
|
||||
|
||||
requestsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: prometheus.BuildFQName("cp", "http", "requests_total"),
|
||||
Help: "Count all http requests by status code, method and path.",
|
||||
ConstLabels: labels,
|
||||
},
|
||||
[]string{"status_code", "method", "path"},
|
||||
)
|
||||
requestDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Name: prometheus.BuildFQName("cp", "http", "request_duration_seconds"),
|
||||
Help: "Duration of all HTTP requests by status code, method and path.",
|
||||
ConstLabels: labels,
|
||||
Buckets: []float64{
|
||||
0.000000001, // 1ns
|
||||
0.000000002,
|
||||
0.000000005,
|
||||
0.00000001, // 10ns
|
||||
0.00000002,
|
||||
0.00000005,
|
||||
0.0000001, // 100ns
|
||||
0.0000002,
|
||||
0.0000005,
|
||||
0.000001, // 1µs
|
||||
0.000002,
|
||||
0.000005,
|
||||
0.00001, // 10µs
|
||||
0.00002,
|
||||
0.00005,
|
||||
0.0001, // 100µs
|
||||
0.0002,
|
||||
0.0005,
|
||||
0.001, // 1ms
|
||||
0.002,
|
||||
0.005,
|
||||
0.01, // 10ms
|
||||
0.02,
|
||||
0.05,
|
||||
0.1, // 100 ms
|
||||
0.2,
|
||||
0.5,
|
||||
1.0, // 1s
|
||||
2.0,
|
||||
5.0,
|
||||
10.0, // 10s
|
||||
15.0,
|
||||
20.0,
|
||||
30.0,
|
||||
},
|
||||
},
|
||||
[]string{"status_code", "method", "path"},
|
||||
)
|
||||
)
|
||||
|
||||
func NewMiddleware(cfgs ...Config) fiber.Handler {
|
||||
cfg := configDefault(cfgs...)
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
if cfg.Next != nil && cfg.Next(ctx) {
|
||||
return ctx.Next()
|
||||
}
|
||||
start := time.Now()
|
||||
method := ctx.Route().Method
|
||||
|
||||
if ctx.Route().Path == "/metrics" {
|
||||
return ctx.Next()
|
||||
}
|
||||
|
||||
requestInProgressTotal.WithLabelValues(method).Inc()
|
||||
defer requestInProgressTotal.WithLabelValues(method).Dec()
|
||||
|
||||
err := ctx.Next()
|
||||
// initialize with default error code
|
||||
// https://docs.gofiber.io/guide/error-handling
|
||||
status := fiber.StatusInternalServerError
|
||||
if err != nil {
|
||||
if e, ok := err.(*fiber.Error); ok {
|
||||
// Get correct error code from fiber.Error type
|
||||
status = e.Code
|
||||
}
|
||||
} else {
|
||||
status = ctx.Response().StatusCode()
|
||||
}
|
||||
|
||||
path := ctx.Route().Path
|
||||
|
||||
statusCode := strconv.Itoa(status)
|
||||
requestsTotal.WithLabelValues(statusCode, method, path).Inc()
|
||||
|
||||
elapsed := float64(time.Since(start).Nanoseconds()) / 1e9
|
||||
requestDuration.WithLabelValues(statusCode, method, path).Observe(elapsed)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
60
pkg/middlewares/uniform/uniform.go
Normal file
60
pkg/middlewares/uniform/uniform.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package uniform
|
||||
|
||||
import (
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/tidwall/gjson"
|
||||
"github.com/tidwall/sjson"
|
||||
)
|
||||
|
||||
// 调整JSON结果返回为公司要求的统一格式
|
||||
|
||||
// New creates a new middleware handler
|
||||
func New(cfgs ...Config) fiber.Handler {
|
||||
// Set default config
|
||||
cfg := configDefault(cfgs...)
|
||||
|
||||
// Return new handler
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
// Don't execute middleware if Next returns true
|
||||
if cfg.Next != nil && cfg.Next(ctx) {
|
||||
return ctx.Next()
|
||||
}
|
||||
|
||||
uniformedResp := []byte{}
|
||||
_ = ctx.Next()
|
||||
code := ctx.Response().StatusCode()
|
||||
if code >= 200 && code < 300 {
|
||||
if string(ctx.Response().Header.ContentType()) == "application/json" {
|
||||
uniformedResp, _ = sjson.SetBytes(uniformedResp, "code", 0)
|
||||
uniformedResp, _ = sjson.SetRawBytes(uniformedResp, "data", ctx.Response().Body())
|
||||
ctx.Response().SetBodyRaw(uniformedResp)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if code == 422 {
|
||||
uniformedResp, _ = sjson.SetBytes(uniformedResp, "code", 422)
|
||||
uniformedResp, _ = sjson.SetBytes(uniformedResp, "message", "参数校验失败")
|
||||
uniformedResp, _ = sjson.SetRawBytes(uniformedResp, "details", ctx.Response().Body())
|
||||
ctx.Response().SetBodyRaw(uniformedResp)
|
||||
return nil
|
||||
}
|
||||
if code >= 400 {
|
||||
log.Ctx(ctx.UserContext()).Error().
|
||||
Int("status", code).
|
||||
Str("request", ctx.String()).
|
||||
Bytes("body", uniformedResp).
|
||||
Msg("error occurred")
|
||||
uniformedResp, _ = sjson.SetBytes(uniformedResp, "code", code)
|
||||
|
||||
resp := ctx.Context().Response.Body()
|
||||
if msg := gjson.GetBytes(resp, "message"); msg.Exists() {
|
||||
uniformedResp, _ = sjson.SetBytes(uniformedResp, "message", msg.String())
|
||||
} else {
|
||||
uniformedResp, _ = sjson.SetBytes(uniformedResp, "message", resp)
|
||||
}
|
||||
ctx.Response().SetBodyRaw(uniformedResp)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
29
pkg/middlewares/uniform/uniform_config.go
Normal file
29
pkg/middlewares/uniform/uniform_config.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package uniform
|
||||
|
||||
import "github.com/gofiber/fiber/v2"
|
||||
|
||||
// Config defines the config for middleware.
|
||||
type Config struct {
|
||||
// Next defines a function to skip this middleware when returned true.
|
||||
//
|
||||
// Optional. Default: nil
|
||||
Next func(c *fiber.Ctx) bool
|
||||
}
|
||||
|
||||
// ConfigDefault is the default config
|
||||
var ConfigDefault = Config{
|
||||
Next: nil,
|
||||
}
|
||||
|
||||
// Helper function to set default values
|
||||
func configDefault(config ...Config) Config {
|
||||
// Return default config if nothing provided
|
||||
if len(config) < 1 {
|
||||
return ConfigDefault
|
||||
}
|
||||
|
||||
// Override default config
|
||||
cfg := config[0]
|
||||
|
||||
return cfg
|
||||
}
|
||||
1
pkg/s3/s3.go
Normal file
1
pkg/s3/s3.go
Normal file
@@ -0,0 +1 @@
|
||||
package s3
|
||||
65
pkg/tools/generics.go
Normal file
65
pkg/tools/generics.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package tools
|
||||
|
||||
// Map returns a new slice where each element is the result of fn for the corresponding element in the original slice
|
||||
func Map[T any, U any](slice []T, fn func(T) U) []U {
|
||||
result := make([]U, len(slice))
|
||||
for i, t := range slice {
|
||||
result[i] = fn(t)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Contains returns true if find appears in slice
|
||||
func Contains[T comparable](slice []T, find T) bool {
|
||||
for _, t := range slice {
|
||||
if t == find {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// IndexOf returns the index of find if it appears in slice. If find is not in slice, -1 will be returned.
|
||||
func IndexOf[T comparable](slice []T, find T) int {
|
||||
for i, t := range slice {
|
||||
if t == find {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
// GroupBy returns a map that is keyed by keySelector and contains a slice of elements returned by valSelector
|
||||
func GroupBy[T any, K comparable, V any](slice []T, keySelector func(T) K, valSelector func(T) V) map[K][]V {
|
||||
grouping := make(map[K][]V)
|
||||
for _, t := range slice {
|
||||
key := keySelector(t)
|
||||
grouping[key] = append(grouping[key], valSelector(t))
|
||||
}
|
||||
|
||||
return grouping
|
||||
}
|
||||
|
||||
// ToSet returns a map keyed by keySelector and contains a value of an empty struct
|
||||
func ToSet[T any, K comparable](slice []T, keySelector func(T) K) *Set[K] {
|
||||
set := NewSetWithCapacity[K](len(slice))
|
||||
for _, t := range slice {
|
||||
set.Add(keySelector(t))
|
||||
}
|
||||
|
||||
return set
|
||||
}
|
||||
|
||||
// ToMap return a map that is keyed keySelector and has the value of valSelector for each element in slice.
|
||||
// If multiple elements return the same key the element that appears later in slice will be chosen.
|
||||
func ToMap[T any, K comparable, V any](slice []T, keySelector func(T) K, valSelector func(T) V) map[K]V {
|
||||
m := make(map[K]V)
|
||||
for _, t := range slice {
|
||||
m[keySelector(t)] = valSelector(t)
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
101
pkg/tools/set.go
Normal file
101
pkg/tools/set.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package tools
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Set[T comparable] struct {
|
||||
mu sync.RWMutex
|
||||
set map[T]struct{}
|
||||
}
|
||||
|
||||
func NewSetWithCapacity[T comparable](c int) *Set[T] {
|
||||
set := make(map[T]struct{}, c)
|
||||
return &Set[T]{set: set}
|
||||
}
|
||||
|
||||
func NewSet[T comparable](elems ...T) *Set[T] {
|
||||
set := make(map[T]struct{}, len(elems))
|
||||
for _, elem := range elems {
|
||||
set[elem] = struct{}{}
|
||||
}
|
||||
return &Set[T]{set: set}
|
||||
}
|
||||
|
||||
func (s *Set[T]) Add(ele ...T) *Set[T] {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
for _, e := range ele {
|
||||
s.set[e] = struct{}{}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Set[T]) Del(ele ...T) *Set[T] {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
for _, e := range ele {
|
||||
delete(s.set, e)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Set[T]) Has(ele T) bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
_, exists := s.set[ele]
|
||||
return exists
|
||||
}
|
||||
|
||||
// 取交集.
|
||||
func (s *Set[T]) Intersect(other *Set[T]) *Set[T] {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
ret := NewSet[T]()
|
||||
for ele := range s.set {
|
||||
if other.Has(ele) {
|
||||
ret.Add(ele)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (s *Set[T]) Len() int {
|
||||
return len(s.set)
|
||||
}
|
||||
|
||||
func (s *Set[T]) Set() map[T]struct{} {
|
||||
return s.set
|
||||
}
|
||||
|
||||
func (s *Set[T]) Values() []T {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
ret := make([]T, 0, len(s.set))
|
||||
for ele := range s.set {
|
||||
ret = append(ret, ele)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (s *Set[T]) Compare(other *Set[T]) (less, more *Set[T]) {
|
||||
less = NewSet[T]()
|
||||
more = NewSet[T]()
|
||||
// less: s - other
|
||||
s.mu.RLock()
|
||||
other.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
defer other.mu.RUnlock()
|
||||
for ele := range other.set {
|
||||
if !s.Has(ele) {
|
||||
less.Add(ele)
|
||||
}
|
||||
}
|
||||
// more: other - s
|
||||
for ele := range s.set {
|
||||
if !other.Has(ele) {
|
||||
more.Add(ele)
|
||||
}
|
||||
}
|
||||
return less, more
|
||||
}
|
||||
21
pkg/tools/times.go
Normal file
21
pkg/tools/times.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package tools
|
||||
|
||||
import "time"
|
||||
|
||||
func FirstDayOfISOWeek(year int, week int, timezone *time.Location) time.Time {
|
||||
date := time.Date(year, 0, 0, 0, 0, 0, 0, timezone)
|
||||
isoYear, isoWeek := date.ISOWeek()
|
||||
for date.Weekday() != time.Monday { // iterate back to Monday
|
||||
date = date.AddDate(0, 0, -1)
|
||||
isoYear, isoWeek = date.ISOWeek()
|
||||
}
|
||||
for isoYear < year { // iterate forward to the first day of the first week
|
||||
date = date.AddDate(0, 0, 1)
|
||||
isoYear, isoWeek = date.ISOWeek()
|
||||
}
|
||||
for isoWeek < week { // iterate forward to the first day of the given week
|
||||
date = date.AddDate(0, 0, 1)
|
||||
_, isoWeek = date.ISOWeek()
|
||||
}
|
||||
return date
|
||||
}
|
||||
50
pkg/utils/cache.go
Normal file
50
pkg/utils/cache.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Cache[T any] struct {
|
||||
storage map[string]T
|
||||
ttl map[string]time.Time
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewCache[T any]() *Cache[T] {
|
||||
return &Cache[T]{
|
||||
storage: make(map[string]T),
|
||||
ttl: make(map[string]time.Time),
|
||||
mu: sync.RWMutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache[T]) Get(key string, ttl time.Duration, fn func() (T, error)) (T, error) {
|
||||
c.mu.RLock()
|
||||
if v, ok := c.storage[key]; ok {
|
||||
if time.Now().Before(c.ttl[key]) {
|
||||
c.mu.RUnlock()
|
||||
return v, nil
|
||||
}
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if v, ok := c.storage[key]; ok {
|
||||
if time.Now().Before(c.ttl[key]) {
|
||||
return v, nil
|
||||
}
|
||||
delete(c.storage, key)
|
||||
delete(c.ttl, key)
|
||||
}
|
||||
|
||||
v, err := fn()
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
c.storage[key] = v
|
||||
c.ttl[key] = time.Now().Add(ttl)
|
||||
return v, nil
|
||||
}
|
||||
19
pkg/utils/collections.go
Normal file
19
pkg/utils/collections.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package utils
|
||||
|
||||
func Any[T any](items []T, predicate func(T) bool) bool {
|
||||
for _, item := range items {
|
||||
if predicate(item) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func All[T any](items []T, predicate func(T) bool) bool {
|
||||
for _, item := range items {
|
||||
if !predicate(item) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
39
pkg/utils/date.go
Normal file
39
pkg/utils/date.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cast"
|
||||
)
|
||||
|
||||
var parseLayouts = []string{
|
||||
"2006-01-02 15:04:05",
|
||||
"2006-1-2 15:04:05",
|
||||
"2006-1-2 15:4:5",
|
||||
"2006/01/02 15:04:05",
|
||||
"2006/1/2 15:04:05",
|
||||
"2006/1/2 15:4:5",
|
||||
"2006年01月02日 15时04分05秒",
|
||||
"2006年1月2日 15时04分05秒",
|
||||
"2006年1月2日 15时4分5秒",
|
||||
"2006年01月02日 15时04分",
|
||||
"2006年1月2日 15时04分",
|
||||
"2006年1月2日 15时4分",
|
||||
"2006年01月02日 15:04:05",
|
||||
"2006年1月2日 15:04:05",
|
||||
"2006年1月2日 15:4:5",
|
||||
}
|
||||
|
||||
func ToDateE(v interface{}) (time.Time, error) {
|
||||
beijing, _ := time.LoadLocation("Asia/Shanghai")
|
||||
for _, layout := range parseLayouts {
|
||||
if t, err := time.ParseInLocation(layout, cast.ToString(v), beijing); err == nil {
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
if t, err := cast.ToTimeE(v); err == nil {
|
||||
return t, nil
|
||||
}
|
||||
return time.Time{}, fmt.Errorf("不能被识别的时间%s", cast.ToString(v))
|
||||
}
|
||||
26
pkg/utils/dbg.go
Normal file
26
pkg/utils/dbg.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func DBG(v interface{}, msg ...string) {
|
||||
bv, err := json.MarshalIndent(v, "", " ")
|
||||
if len(msg) == 0 {
|
||||
fmt.Println("===========DBG===========")
|
||||
} else {
|
||||
fmt.Printf("===========%s===========\n", msg[0])
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
} else {
|
||||
fmt.Println(string(bv))
|
||||
}
|
||||
fmt.Println("=========================")
|
||||
}
|
||||
|
||||
func DbgE(v interface{}, err error) {
|
||||
bv, _ := json.Marshal(v)
|
||||
fmt.Println(string(bv))
|
||||
}
|
||||
11
pkg/utils/hash.go
Normal file
11
pkg/utils/hash.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
)
|
||||
|
||||
func MD5(d []byte) string {
|
||||
hash := md5.Sum(d)
|
||||
return hex.EncodeToString(hash[:])
|
||||
}
|
||||
14
pkg/utils/ptr.go
Normal file
14
pkg/utils/ptr.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package utils
|
||||
|
||||
import "reflect"
|
||||
|
||||
func Ptr[T any](v T) *T {
|
||||
return &v
|
||||
}
|
||||
|
||||
func Unptr[T any](v *T) T {
|
||||
if v == nil {
|
||||
return reflect.Zero(reflect.TypeOf(v).Elem()).Interface().(T)
|
||||
}
|
||||
return *v
|
||||
}
|
||||
Reference in New Issue
Block a user