flowchart TD
A[开始性能分析] --> B[建立性能基线]
B --> C[选择分析工具]
C --> D[收集性能数据]
D --> E[分析数据结果]
E --> F{发现瓶颈?}
F -->|是| G[制定优化方案]
F -->|否| H[监控维护]
G --> I[实施优化]
I --> J[验证效果]
J --> K{达到目标?}
K -->|是| H
K -->|否| C
H --> L[持续监控]
L --> M{性能下降?}
M -->|是| C
M -->|否| L
graph TB
A[性能分析工具] --> B[内置工具]
A --> C[第三方工具]
A --> D[系统工具]
B --> B1[pprof]
B --> B2[trace]
B --> B3[benchmark]
C --> C1[go-torch]
C --> C2[graphviz]
C --> C3[Jaeger]
D --> D1[top/htop]
D --> D2[iostat]
D --> D3[netstat]
package optimization
import (
"bytes"
"encoding/json"
"fmt"
"strings"
"testing"
)
// 字符串拼接性能比较
func BenchmarkStringConcat(b *testing.B) {
strs := []string{"hello", "world", "go", "programming"}
b.Run("Plus", func(b *testing.B) {
for i := 0; i < b.N; i++ {
result := ""
for _, s := range strs {
result += s
}
_ = result
}
})
b.Run("Builder", func(b *testing.B) {
for i := 0; i < b.N; i++ {
var builder strings.Builder
for _, s := range strs {
builder.WriteString(s)
}
_ = builder.String()
}
})
b.Run("Buffer", func(b *testing.B) {
for i := 0; i < b.N; i++ {
var buffer bytes.Buffer
for _, s := range strs {
buffer.WriteString(s)
}
_ = buffer.String()
}
})
b.Run("Join", func(b *testing.B) {
for i := 0; i < b.N; i++ {
result := strings.Join(strs, "")
_ = result
}
})
}
// JSON序列化性能比较
type User struct {
ID int `json:"id"`
Username string `json:"username"`
Email string `json:"email"`
Age int `json:"age"`
}
func BenchmarkJSONSerialization(b *testing.B) {
user := User{
ID: 1,
Username: "testuser",
Email: "[email protected]",
Age: 25,
}
b.Run("Marshal", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := json.Marshal(user)
if err != nil {
b.Fatal(err)
}
}
})
b.Run("Encoder", func(b *testing.B) {
for i := 0; i < b.N; i++ {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
err := encoder.Encode(user)
if err != nil {
b.Fatal(err)
}
}
})
}
// 切片操作性能比较
func BenchmarkSliceOperations(b *testing.B) {
size := 1000
b.Run("Append", func(b *testing.B) {
for i := 0; i < b.N; i++ {
var slice []int
for j := 0; j < size; j++ {
slice = append(slice, j)
}
}
})
b.Run("PreAllocated", func(b *testing.B) {
for i := 0; i < b.N; i++ {
slice := make([]int, 0, size)
for j := 0; j < size; j++ {
slice = append(slice, j)
}
}
})
b.Run("IndexAssignment", func(b *testing.B) {
for i := 0; i < b.N; i++ {
slice := make([]int, size)
for j := 0; j < size; j++ {
slice[j] = j
}
}
})
}
// 映射操作性能比较
func BenchmarkMapOperations(b *testing.B) {
size := 1000
keys := make([]string, size)
for i := 0; i < size; i++ {
keys[i] = fmt.Sprintf("key%d", i)
}
b.Run("WithoutPreAllocation", func(b *testing.B) {
for i := 0; i < b.N; i++ {
m := make(map[string]int)
for j, key := range keys {
m[key] = j
}
}
})
b.Run("WithPreAllocation", func(b *testing.B) {
for i := 0; i < b.N; i++ {
m := make(map[string]int, size)
for j, key := range keys {
m[key] = j
}
}
})
}
// 运行基准测试的辅助函数
func RunBenchmarks() {
// 这个函数可以在main中调用来运行基准测试
fmt.Println("Run benchmarks with: go test -bench=. -benchmem")
}
flowchart TD
A[内存优化开始] --> B[分析内存使用情况]
B --> C[识别内存问题类型]
C --> D{问题类型}
D -->|GC频繁| E[调整GC参数]
D -->|内存泄漏| F[检查对象引用]
D -->|分配过多| G[优化内存分配]
E --> H[设置GOGC]
E --> I[调整内存限制]
F --> J[修复循环引用]
F --> K[释放长期引用]
G --> L[使用对象池]
G --> M[预分配内存]
H --> N[验证GC效果]
I --> N
J --> O[验证泄漏修复]
K --> O
L --> P[验证分配优化]
M --> P
N --> Q{效果满意?}
O --> Q
P --> Q
Q -->|是| R[持续监控]
Q -->|否| B
flowchart TD
A[任务提交] --> B[任务队列]
B --> C{队列是否满?}
C -->|否| D[任务入队]
C -->|是| E[阻塞等待/拒绝]
D --> F[Worker池]
F --> G[Worker1]
F --> H[Worker2]
F --> I[WorkerN]
G --> J[执行任务1]
H --> K[执行任务2]
I --> L[执行任务N]
J --> M[任务完成]
K --> M
L --> M
M --> N[返回结果]
N --> O[Worker空闲]
O --> P{有新任务?}
P -->|是| B
P -->|否| Q[等待新任务]
Q --> P
style F fill:#e1f5fe
style B fill:#fff3e0
style M fill:#e8f5e8
package concurrency
import (
"context"
"fmt"
"runtime"
"sync"
"sync/atomic"
"time"
)
// 工作任务接口
type Task interface {
Execute(ctx context.Context) error
}
// 简单任务实现
type SimpleTask struct {
ID int
Work func() error
}
func (t *SimpleTask) Execute(ctx context.Context) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
return t.Work()
}
}
// Goroutine池
type WorkerPool struct {
workerCount int
taskQueue chan Task
wg sync.WaitGroup
ctx context.Context
cancel context.CancelFunc
// 统计信息
tasksProcessed int64
tasksQueued int64
activeWorkers int64
}
func NewWorkerPool(workerCount, queueSize int) *WorkerPool {
ctx, cancel := context.WithCancel(context.Background())
return &WorkerPool{
workerCount: workerCount,
taskQueue: make(chan Task, queueSize),
ctx: ctx,
cancel: cancel,
}
}
func (wp *WorkerPool) Start() {
for i := 0; i < wp.workerCount; i++ {
wp.wg.Add(1)
go wp.worker(i)
}
}
func (wp *WorkerPool) worker(id int) {
defer wp.wg.Done()
for {
select {
case <-wp.ctx.Done():
return
case task, ok := <-wp.taskQueue:
if !ok {
return
}
atomic.AddInt64(&wp.activeWorkers, 1)
if err := task.Execute(wp.ctx); err != nil {
fmt.Printf("Worker %d: task execution failed: %v\n", id, err)
}
atomic.AddInt64(&wp.tasksProcessed, 1)
atomic.AddInt64(&wp.activeWorkers, -1)
}
}
}
func (wp *WorkerPool) Submit(task Task) error {
select {
case <-wp.ctx.Done():
return wp.ctx.Err()
case wp.taskQueue <- task:
atomic.AddInt64(&wp.tasksQueued, 1)
return nil
default:
return fmt.Errorf("task queue is full")
}
}
func (wp *WorkerPool) Stop() {
wp.cancel()
close(wp.taskQueue)
wp.wg.Wait()
}
func (wp *WorkerPool) Stats() (queued, processed, active int64) {
return atomic.LoadInt64(&wp.tasksQueued),
atomic.LoadInt64(&wp.tasksProcessed),
atomic.LoadInt64(&wp.activeWorkers)
}
// 自适应工作池
type AdaptiveWorkerPool struct {
*WorkerPool
minWorkers int
maxWorkers int
scaleInterval time.Duration
scaleMutex sync.Mutex
}
func NewAdaptiveWorkerPool(minWorkers, maxWorkers, queueSize int, scaleInterval time.Duration) *AdaptiveWorkerPool {
pool := NewWorkerPool(minWorkers, queueSize)
return &AdaptiveWorkerPool{
WorkerPool: pool,
minWorkers: minWorkers,
maxWorkers: maxWorkers,
scaleInterval: scaleInterval,
}
}
func (awp *AdaptiveWorkerPool) Start() {
awp.WorkerPool.Start()
// 启动自动缩放
go awp.autoScale()
}
func (awp *AdaptiveWorkerPool) autoScale() {
ticker := time.NewTicker(awp.scaleInterval)
defer ticker.Stop()
for {
select {
case <-awp.ctx.Done():
return
case <-ticker.C:
awp.scale()
}
}
}
func (awp *AdaptiveWorkerPool) scale() {
awp.scaleMutex.Lock()
defer awp.scaleMutex.Unlock()
queueLen := len(awp.taskQueue)
currentWorkers := awp.workerCount
// 根据队列长度和当前工作者数量决定是否需要扩缩容
if queueLen > currentWorkers*2 && currentWorkers < awp.maxWorkers {
// 扩容:队列积压较多且未达到最大工作者数
newWorkers := currentWorkers + 1
if newWorkers > awp.maxWorkers {
newWorkers = awp.maxWorkers
}
for i := currentWorkers; i < newWorkers; i++ {
awp.wg.Add(1)
go awp.worker(i)
}
awp.workerCount = newWorkers
fmt.Printf("Scaled up to %d workers\n", newWorkers)
} else if queueLen == 0 && currentWorkers > awp.minWorkers {
// 缩容:队列为空且超过最小工作者数
// 注意:实际缩容需要更复杂的逻辑来安全地停止worker
fmt.Printf("Could scale down from %d workers\n", currentWorkers)
}
}
flowchart TD
A[应用请求] --> B{连接池}
B -->|有空闲连接| C[获取连接]
B -->|无空闲连接| D{达到最大连接数?}
D -->|否| E[创建新连接]
D -->|是| F[等待连接释放]
E --> C
F --> G[连接释放]
G --> C
C --> H[执行SQL查询]
H --> I[处理结果]
I --> J[归还连接到池]
J --> K{连接空闲时间超时?}
K -->|是| L[关闭连接]
K -->|否| M[连接回到空闲池]
L --> N[连接池大小减1]
M --> O[等待下次使用]
style B fill:#e3f2fd
style H fill:#fff3e0
style J fill:#e8f5e8
flowchart TD
A[客户端请求] --> B[应用层]
B --> C{"L1缓存<br>(内存缓存)"}
C -->|命中| D[返回数据]
C -->|未命中| E{"L2缓存<br>(Redis)"}
E -->|命中| F[写入L1缓存]
E -->|未命中| G{"L3缓存<br>(CDN)"}
G -->|命中| H[写入L2缓存]
G -->|未命中| I[数据库查询]
I --> J[写入L3缓存]
J --> H
H --> F
F --> D
K[缓存更新策略] --> L[Write-Through]
K --> M[Write-Behind]
K --> N[Write-Around]
O[缓存淘汰策略] --> P[LRU]
O --> Q[LFU]
O --> R[TTL]
style C fill:#e3f2fd
style E fill:#fff3e0
style G fill:#e8f5e8
style I fill:#ffebee
flowchart TD
A[HTTP请求] --> B{连接池}
B -->|复用连接| C[Keep-Alive连接]
B -->|新建连接| D[建立TCP连接]
D --> E[TLS握手]
E --> C
C --> F[发送请求]
F --> G{启用压缩?}
G -->|是| H[Gzip压缩]
G -->|否| I[原始数据]
H --> J[传输数据]
I --> J
J --> K[接收响应]
K --> L{响应压缩?}
L -->|是| M[解压缩]
L -->|否| N[处理响应]
M --> N
N --> O{缓存策略}
O -->|缓存| P[存储到缓存]
O -->|不缓存| Q[直接返回]
P --> Q
Q --> R{Keep-Alive?}
R -->|是| S[连接回池]
R -->|否| T[关闭连接]
S --> U[等待下次请求]
T --> V[连接结束]
style B fill:#e3f2fd
style G fill:#fff3e0
style O fill:#e8f5e8
flowchart TD
A[系统级优化] --> B[操作系统层]
A --> C[容器层]
A --> D[应用层]
B --> E[内核参数调优]
B --> F[文件系统优化]
B --> G[网络参数调优]
E --> E1[vm.swappiness]
E --> E2[net.core.somaxconn]
E --> E3[fs.file-max]
F --> F1[文件描述符限制]
F --> F2[磁盘I/O调度]
F --> F3[文件系统选择]
G --> G1[TCP参数优化]
G --> G2[连接队列大小]
G --> G3[超时时间设置]
C --> H[资源限制]
C --> I[容器配置]
C --> J[编排优化]
H --> H1[CPU限制]
H --> H2[内存限制]
H --> H3[I/O限制]
I --> I1[镜像优化]
I --> I2[启动参数]
I --> I3[健康检查]
J --> J1[副本数量]
J --> J2[负载均衡]
J --> J3[滚动更新]
D --> K[运行时优化]
D --> L[监控告警]
K --> K1[GOMAXPROCS]
K --> K2[GC参数]
K --> K3[内存分配]
L --> L1[性能指标]
L --> L2[资源使用]
L --> L3[异常检测]
style A fill:#e3f2fd
style B fill:#fff3e0
style C fill:#e8f5e8
style D fill:#ffebee
flowchart TD
A[应用系统] --> B[指标收集]
B --> C[Prometheus]
B --> D[Grafana]
B --> E[Jaeger]
C --> F[时序数据库]
D --> G[可视化面板]
E --> H[链路追踪]
F --> I[告警系统]
G --> J[监控大屏]
H --> K[性能分析]
I --> L[AlertManager]
J --> M[实时监控]
K --> N[瓶颈识别]
L --> O[通知渠道]
M --> P[运维团队]
N --> Q[优化建议]
O --> O1[邮件]
O --> O2[短信]
O --> O3[钉钉]
O --> O4[企业微信]
style A fill:#e3f2fd
style C fill:#fff3e0
style D fill:#e8f5e8
style E fill:#ffebee
sequenceDiagram
participant App as 应用程序
participant Collector as 指标收集器
participant TSDB as 时序数据库
participant Alert as 告警系统
participant Ops as 运维人员
App->>Collector: 上报性能指标
Collector->>TSDB: 存储时序数据
loop 定期检查
Alert->>TSDB: 查询指标数据
TSDB-->>Alert: 返回指标值
alt 指标异常
Alert->>Alert: 触发告警规则
Alert->>Ops: 发送告警通知
Ops->>App: 执行优化操作
App->>Collector: 上报新指标
else 指标正常
Alert->>Alert: 继续监控
end
end
Note over App,Ops: 持续监控与优化循环
package monitoring
import (
"context"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
// Prometheus指标
var (
requestDuration = promauto.NewHistogramVec(
prometheus.HistogramOpts{
Name: "http_request_duration_seconds",
Help: "HTTP request duration in seconds",
Buckets: prometheus.DefBuckets,
},
[]string{"method", "endpoint", "status"},
)
requestCount = promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "http_requests_total",
Help: "Total number of HTTP requests",
},
[]string{"method", "endpoint", "status"},
)
activeConnections = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "active_connections",
Help: "Number of active connections",
},
)
memoryUsage = promauto.NewGaugeVec(
prometheus.GaugeOpts{
Name: "memory_usage_bytes",
Help: "Memory usage in bytes",
},
[]string{"type"},
)
)
// 性能监控中间件
func PrometheusMiddleware() gin.HandlerFunc {
return func(c *gin.Context) {
start := time.Now()
c.Next()
duration := time.Since(start).Seconds()
status := fmt.Sprintf("%d", c.Writer.Status())
requestDuration.WithLabelValues(
c.Request.Method,
c.FullPath(),
status,
).Observe(duration)
requestCount.WithLabelValues(
c.Request.Method,
c.FullPath(),
status,
).Inc()
}
}
// 自定义性能监控器
type PerformanceCollector struct {
metrics map[string]*Metric
mu sync.RWMutex
}
type Metric struct {
Name string
Value float64
Timestamp time.Time
Labels map[string]string
}
func NewPerformanceCollector() *PerformanceCollector {
collector := &PerformanceCollector{
metrics: make(map[string]*Metric),
}
// 启动系统指标收集
go collector.collectSystemMetrics()
return collector
}
func (pc *PerformanceCollector) RecordMetric(name string, value float64, labels map[string]string) {
pc.mu.Lock()
defer pc.mu.Unlock()
pc.metrics[name] = &Metric{
Name: name,
Value: value,
Timestamp: time.Now(),
Labels: labels,
}
}
func (pc *PerformanceCollector) GetMetrics() map[string]*Metric {
pc.mu.RLock()
defer pc.mu.RUnlock()
result := make(map[string]*Metric)
for k, v := range pc.metrics {
result[k] = v
}
return result
}
func (pc *PerformanceCollector) collectSystemMetrics() {
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
for range ticker.C {
var m runtime.MemStats
runtime.ReadMemStats(&m)
pc.RecordMetric("heap_alloc", float64(m.HeapAlloc), nil)
pc.RecordMetric("heap_sys", float64(m.HeapSys), nil)
pc.RecordMetric("goroutines", float64(runtime.NumGoroutine()), nil)
pc.RecordMetric("gc_runs", float64(m.NumGC), nil)
// 更新Prometheus指标
memoryUsage.WithLabelValues("heap_alloc").Set(float64(m.HeapAlloc))
memoryUsage.WithLabelValues("heap_sys").Set(float64(m.HeapSys))
}
}