diff --git a/sweet/benchmarks/go-build/main.go b/sweet/benchmarks/go-build/main.go index c49ad0b..3d805ad 100644 --- a/sweet/benchmarks/go-build/main.go +++ b/sweet/benchmarks/go-build/main.go @@ -17,6 +17,7 @@ import ( "golang.org/x/benchmarks/sweet/benchmarks/internal/cgroups" "golang.org/x/benchmarks/sweet/benchmarks/internal/driver" "golang.org/x/benchmarks/sweet/common" + "golang.org/x/benchmarks/sweet/common/diagnostics" sprofile "golang.org/x/benchmarks/sweet/common/profile" ) @@ -67,7 +68,7 @@ func run(pkgPath string) error { "-bench-name", name, } flag.CommandLine.Visit(func(f *flag.Flag) { - if f.Name == "go" || f.Name == "bench-name" { + if f.Name == "go" || f.Name == "bench-name" || strings.HasPrefix(f.Name, "perf") { // No need to pass this along. return } @@ -76,8 +77,12 @@ func run(pkgPath string) error { cmdArgs = append(cmdArgs, "-toolexec", strings.Join(selfCmd, " ")) var baseCmd *exec.Cmd - if driver.ProfilingEnabled(driver.ProfilePerf) { - baseCmd = exec.Command("perf", append([]string{"record", "-o", filepath.Join(tmpDir, "perf.data"), goTool}, cmdArgs...)...) + if driver.DiagnosticEnabled(diagnostics.Perf) { + perfArgs := []string{"record", "-o", filepath.Join(tmpDir, "perf.data")} + perfArgs = append(perfArgs, driver.PerfFlags()...) + perfArgs = append(perfArgs, goTool) + perfArgs = append(perfArgs, cmdArgs...) + baseCmd = exec.Command("perf", perfArgs...) } else { baseCmd = exec.Command(goTool, cmdArgs...) } @@ -98,41 +103,55 @@ func run(pkgPath string) error { // Handle any CPU profiles produced, and merge them. // Then, write them out to the canonical profiles above. - if driver.ProfilingEnabled(driver.ProfileCPU) { - compileProfile, err := mergeProfiles(tmpDir, profilePrefix("compile", driver.ProfileCPU)) + if driver.DiagnosticEnabled(diagnostics.CPUProfile) { + compileProfile, err := mergePprofProfiles(tmpDir, profilePrefix("compile", diagnostics.CPUProfile)) if err != nil { return err } - if err := driver.WriteProfile(compileProfile, driver.ProfileCPU, name+"Compile"); err != nil { + if err := driver.WritePprofProfile(compileProfile, diagnostics.CPUProfile, name+"Compile"); err != nil { return err } - linkProfile, err := mergeProfiles(tmpDir, profilePrefix("link", driver.ProfileCPU)) + linkProfile, err := mergePprofProfiles(tmpDir, profilePrefix("link", diagnostics.CPUProfile)) if err != nil { return err } - if err := driver.WriteProfile(linkProfile, driver.ProfileCPU, name+"Link"); err != nil { + if err := driver.WritePprofProfile(linkProfile, diagnostics.CPUProfile, name+"Link"); err != nil { return err } } - if driver.ProfilingEnabled(driver.ProfileMem) { - if err := copyProfiles(tmpDir, "compile", driver.ProfileMem, name+"Compile"); err != nil { + if driver.DiagnosticEnabled(diagnostics.MemProfile) { + if err := copyPprofProfiles(tmpDir, "compile", diagnostics.MemProfile, name+"Compile"); err != nil { return err } - if err := copyProfiles(tmpDir, "link", driver.ProfileMem, name+"Link"); err != nil { + if err := copyPprofProfiles(tmpDir, "link", diagnostics.MemProfile, name+"Link"); err != nil { return err } } - if driver.ProfilingEnabled(driver.ProfilePerf) { - if err := driver.CopyProfile(filepath.Join(tmpDir, "perf.data"), driver.ProfilePerf, name); err != nil { + if driver.DiagnosticEnabled(diagnostics.Perf) { + if err := driver.CopyDiagnosticData(filepath.Join(tmpDir, "perf.data"), diagnostics.Perf, name); err != nil { return err } } + if driver.DiagnosticEnabled(diagnostics.Trace) { + entries, err := os.ReadDir(tmpDir) + if err != nil { + return err + } + for _, entry := range entries { + if !strings.HasPrefix(entry.Name(), profilePrefix("compile", diagnostics.Trace)) { + continue + } + if err := driver.CopyDiagnosticData(filepath.Join(tmpDir, entry.Name()), diagnostics.Trace, name+"Compile"); err != nil { + return err + } + } + } return printOtherResults(tmpResultsDir()) } -func mergeProfiles(dir, prefix string) (*profile.Profile, error) { - profiles, err := sprofile.ReadDir(dir, func(name string) bool { +func mergePprofProfiles(dir, prefix string) (*profile.Profile, error) { + profiles, err := sprofile.ReadDirPprof(dir, func(name string) bool { return strings.HasPrefix(name, prefix) }) if err != nil { @@ -141,23 +160,23 @@ func mergeProfiles(dir, prefix string) (*profile.Profile, error) { return profile.Merge(profiles) } -func copyProfiles(dir, bin string, typ driver.ProfileType, finalPrefix string) error { +func copyPprofProfiles(dir, bin string, typ diagnostics.Type, finalPrefix string) error { prefix := profilePrefix(bin, typ) - profiles, err := sprofile.ReadDir(dir, func(name string) bool { + profiles, err := sprofile.ReadDirPprof(dir, func(name string) bool { return strings.HasPrefix(name, prefix) }) if err != nil { return err } for _, profile := range profiles { - if err := driver.WriteProfile(profile, typ, finalPrefix); err != nil { + if err := driver.WritePprofProfile(profile, typ, finalPrefix); err != nil { return err } } return nil } -func profilePrefix(bin string, typ driver.ProfileType) string { +func profilePrefix(bin string, typ diagnostics.Type) string { return bin + "-prof." + string(typ) } @@ -200,15 +219,23 @@ func runToolexec() error { return cmd.Run() } var extraFlags []string - for _, typ := range []driver.ProfileType{driver.ProfileCPU, driver.ProfileMem} { - if driver.ProfilingEnabled(typ) { + for _, typ := range []diagnostics.Type{diagnostics.CPUProfile, diagnostics.MemProfile, diagnostics.Trace} { + if driver.DiagnosticEnabled(typ) { + if bin == "link" && typ == diagnostics.Trace { + // TODO(mknyszek): Traces are not supported for the linker. + continue + } // Stake a claim for a filename. f, err := os.CreateTemp(tmpDir, profilePrefix(bin, typ)) if err != nil { return err } f.Close() - extraFlags = append(extraFlags, "-"+string(typ)+"profile", f.Name()) + flag := "-" + string(typ) + if typ == diagnostics.Trace { + flag += "profile" // The compiler flag is -traceprofile. + } + extraFlags = append(extraFlags, flag, f.Name()) } } cmd := exec.Command(flag.Args()[0], append(extraFlags, flag.Args()[1:]...)...) diff --git a/sweet/benchmarks/gvisor/common.go b/sweet/benchmarks/gvisor/common.go index aeb2879..7f18e16 100644 --- a/sweet/benchmarks/gvisor/common.go +++ b/sweet/benchmarks/gvisor/common.go @@ -15,6 +15,7 @@ import ( "golang.org/x/benchmarks/sweet/benchmarks/internal/driver" "golang.org/x/benchmarks/sweet/common" + "golang.org/x/benchmarks/sweet/common/diagnostics" ) func workloadsPath(assetsDir, subBenchmark string) string { @@ -23,15 +24,15 @@ func workloadsPath(assetsDir, subBenchmark string) string { return filepath.Join(assetsDir, subBenchmark, "bin", platformDir, "workload") } -func (c *config) profilePath(typ driver.ProfileType) string { +func (c *config) profilePath(typ diagnostics.Type) string { return filepath.Join(c.tmpDir, string(typ)+".prof") } func (cfg *config) runscCmd(arg ...string) *exec.Cmd { var cmd *exec.Cmd goProfiling := false - for _, typ := range []driver.ProfileType{driver.ProfileCPU, driver.ProfileMem} { - if driver.ProfilingEnabled(typ) { + for _, typ := range []diagnostics.Type{diagnostics.CPUProfile, diagnostics.MemProfile, diagnostics.Trace} { + if driver.DiagnosticEnabled(typ) { goProfiling = true break } @@ -39,14 +40,21 @@ func (cfg *config) runscCmd(arg ...string) *exec.Cmd { if goProfiling { arg = append([]string{"-profile"}, arg...) } - if driver.ProfilingEnabled(driver.ProfileCPU) { - arg = append([]string{"-profile-cpu", cfg.profilePath(driver.ProfileCPU)}, arg...) + if driver.DiagnosticEnabled(diagnostics.CPUProfile) { + arg = append([]string{"-profile-cpu", cfg.profilePath(diagnostics.CPUProfile)}, arg...) } - if driver.ProfilingEnabled(driver.ProfileMem) { - arg = append([]string{"-profile-heap", cfg.profilePath(driver.ProfileMem)}, arg...) + if driver.DiagnosticEnabled(diagnostics.MemProfile) { + arg = append([]string{"-profile-heap", cfg.profilePath(diagnostics.MemProfile)}, arg...) } - if driver.ProfilingEnabled(driver.ProfilePerf) { - cmd = exec.Command("perf", append([]string{"record", "-o", cfg.profilePath(driver.ProfilePerf), cfg.runscPath}, arg...)...) + if driver.DiagnosticEnabled(diagnostics.Trace) { + arg = append([]string{"-trace", cfg.profilePath(diagnostics.Trace)}, arg...) + } + if driver.DiagnosticEnabled(diagnostics.Perf) { + perfArgs := []string{"record", "-o", cfg.profilePath(diagnostics.Perf)} + perfArgs = append(perfArgs, driver.PerfFlags()...) + perfArgs = append(perfArgs, cfg.runscPath) + perfArgs = append(perfArgs, arg...) + cmd = exec.Command("perf", perfArgs...) } else { cmd = exec.Command(cfg.runscPath, arg...) } diff --git a/sweet/benchmarks/gvisor/main.go b/sweet/benchmarks/gvisor/main.go index 704fec9..b288e28 100644 --- a/sweet/benchmarks/gvisor/main.go +++ b/sweet/benchmarks/gvisor/main.go @@ -17,6 +17,7 @@ import ( "time" "golang.org/x/benchmarks/sweet/benchmarks/internal/driver" + "golang.org/x/benchmarks/sweet/common/diagnostics" ) type config struct { @@ -66,12 +67,12 @@ func main1() error { } return err } - for _, typ := range driver.ProfileTypes { - if !driver.ProfilingEnabled(typ) { + for _, typ := range diagnostics.Types() { + if !driver.DiagnosticEnabled(typ) { continue } // runscCmd ensures these are created if necessary. - if err := driver.CopyProfile(cliCfg.profilePath(typ), typ, bench.name()); err != nil { + if err := driver.CopyDiagnosticData(cliCfg.profilePath(typ), typ, bench.name()); err != nil { return err } } diff --git a/sweet/benchmarks/internal/driver/driver.go b/sweet/benchmarks/internal/driver/driver.go index bb6e3eb..36d3ed7 100644 --- a/sweet/benchmarks/internal/driver/driver.go +++ b/sweet/benchmarks/internal/driver/driver.go @@ -13,6 +13,7 @@ import ( "os/exec" "path/filepath" "runtime/pprof" + "runtime/trace" "sort" "strconv" "strings" @@ -20,23 +21,17 @@ import ( "time" "github.com/google/pprof/profile" + "golang.org/x/benchmarks/sweet/common/diagnostics" ) var ( - coreDumpDir string - cpuProfileDir string - memProfileDir string - perfDir string - perfFlags string - short bool + coreDumpDir string + diag map[diagnostics.Type]*diagnostics.DriverConfig ) func SetFlags(f *flag.FlagSet) { f.StringVar(&coreDumpDir, "dump-cores", "", "dump a core file to the given directory after every benchmark run") - f.StringVar(&cpuProfileDir, "cpuprofile", "", "write a CPU profile to the given directory after every benchmark run") - f.StringVar(&memProfileDir, "memprofile", "", "write a memory profile to the given directory after every benchmark run") - f.StringVar(&perfDir, "perf", "", "write a Linux perf data file to the given directory after every benchmark run") - f.StringVar(&perfFlags, "perf-flags", "", "pass the following additional flags to Linux perf") + diag = diagnostics.SetFlagsForDriver(f) } const ( @@ -88,19 +83,25 @@ func DoCoreDump(v bool) RunOption { func DoCPUProfile(v bool) RunOption { return func(b *B) { - b.doProfile[ProfileCPU] = v + b.collectDiag[diagnostics.CPUProfile] = v } } func DoMemProfile(v bool) RunOption { return func(b *B) { - b.doProfile[ProfileMem] = v + b.collectDiag[diagnostics.MemProfile] = v } } func DoPerf(v bool) RunOption { return func(b *B) { - b.doProfile[ProfilePerf] = v + b.collectDiag[diagnostics.Perf] = v + } +} + +func DoTrace(v bool) RunOption { + return func(b *B) { + b.collectDiag[diagnostics.Trace] = v } } @@ -108,9 +109,10 @@ func BenchmarkPID(pid int) RunOption { return func(b *B) { b.pid = pid if pid != os.Getpid() { - b.doProfile[ProfileCPU] = false - b.doProfile[ProfileMem] = false - b.doProfile[ProfilePerf] = false + b.collectDiag[diagnostics.CPUProfile] = false + b.collectDiag[diagnostics.MemProfile] = false + b.collectDiag[diagnostics.Perf] = false + b.collectDiag[diagnostics.Trace] = false } } } @@ -136,6 +138,7 @@ var InProcessMeasurementOptions = []RunOption{ DoCPUProfile(true), DoMemProfile(true), DoPerf(true), + DoTrace(true), } type B struct { @@ -148,13 +151,13 @@ type B struct { doPeakRSS bool doPeakVM bool doCoreDump bool - doProfile map[ProfileType]bool + collectDiag map[diagnostics.Type]bool rssFunc func() (uint64, error) statsMu sync.Mutex stats map[string]uint64 ops int wg sync.WaitGroup - profiles map[ProfileType]*os.File + diagnostics map[diagnostics.Type]*os.File resultsWriter io.Writer perfProcess *os.Process } @@ -163,13 +166,13 @@ func newB(name string) *B { b := &B{ pid: os.Getpid(), name: name, - doProfile: map[ProfileType]bool{ - ProfileCPU: false, - ProfileMem: false, + collectDiag: map[diagnostics.Type]bool{ + diagnostics.CPUProfile: false, + diagnostics.MemProfile: false, }, - stats: make(map[string]uint64), - ops: 1, - profiles: make(map[ProfileType]*os.File), + stats: make(map[string]uint64), + ops: 1, + diagnostics: make(map[diagnostics.Type]*os.File), } return b } @@ -180,15 +183,19 @@ func (b *B) setStat(name string, value uint64) { b.stats[name] = value } -func (b *B) shouldProfile(typ ProfileType) bool { - return b.doProfile[typ] && ProfilingEnabled(typ) +func (b *B) shouldCollectDiag(typ diagnostics.Type) bool { + return b.collectDiag[typ] && DiagnosticEnabled(typ) +} + +func (b *B) Name() string { + return b.name } func (b *B) StartTimer() { - if b.shouldProfile(ProfileCPU) { - pprof.StartCPUProfile(b.profiles[ProfileCPU]) + if b.shouldCollectDiag(diagnostics.CPUProfile) { + pprof.StartCPUProfile(b.diagnostics[diagnostics.CPUProfile]) } - if b.shouldProfile(ProfilePerf) { + if b.shouldCollectDiag(diagnostics.Perf) { if err := b.startPerf(); err != nil { warningf("failed to start perf: %v", err) } @@ -197,18 +204,18 @@ func (b *B) StartTimer() { } func (b *B) ResetTimer() { - if b.shouldProfile(ProfileCPU) { + if b.shouldCollectDiag(diagnostics.CPUProfile) { pprof.StopCPUProfile() - if err := b.truncateProfile(ProfileCPU); err != nil { + if err := b.truncateDiagnosticData(diagnostics.CPUProfile); err != nil { warningf("failed to truncate CPU profile: %v", err) } - pprof.StartCPUProfile(b.profiles[ProfileCPU]) + pprof.StartCPUProfile(b.diagnostics[diagnostics.CPUProfile]) } - if b.shouldProfile(ProfilePerf) { + if b.shouldCollectDiag(diagnostics.Perf) { if err := b.stopPerf(); err != nil { warningf("failed to stop perf: %v", err) } - if err := b.truncateProfile(ProfilePerf); err != nil { + if err := b.truncateDiagnosticData(diagnostics.Perf); err != nil { warningf("failed to truncate perf data file: %v", err) } if err := b.startPerf(); err != nil { @@ -221,8 +228,8 @@ func (b *B) ResetTimer() { b.dur = 0 } -func (b *B) truncateProfile(typ ProfileType) error { - f := b.profiles[typ] +func (b *B) truncateDiagnosticData(typ diagnostics.Type) error { + f := b.diagnostics[typ] _, err := f.Seek(0, 0) if err != nil { return err @@ -238,10 +245,10 @@ func (b *B) StopTimer() { b.dur += end.Sub(b.start) b.start = time.Time{} - if b.shouldProfile(ProfileCPU) { + if b.shouldCollectDiag(diagnostics.CPUProfile) { pprof.StopCPUProfile() } - if b.shouldProfile(ProfilePerf) { + if b.shouldCollectDiag(diagnostics.Perf) { if err := b.stopPerf(); err != nil { warningf("failed to stop perf: %v", err) } @@ -402,8 +409,8 @@ func (b *B) startPerf() error { if b.perfProcess != nil { panic("perf process already started") } - args := []string{"record", "-o", b.profiles[ProfilePerf].Name(), "-p", strconv.Itoa(b.pid)} - if perfFlags != "" { + args := []string{"record", "-o", b.diagnostics[diagnostics.Perf].Name(), "-p", strconv.Itoa(b.pid)} + if perfFlags := diag[diagnostics.Perf].Flags; perfFlags != "" { args = append(args, strings.Split(perfFlags, " ")...) } cmd := exec.Command("perf", args...) @@ -439,13 +446,19 @@ func RunBenchmark(name string, f func(*B) error, opts ...RunOption) error { stop := b.startRSSSampler() // Make sure profile file(s) are created if necessary. - for _, typ := range ProfileTypes { - if b.shouldProfile(typ) { - f, err := newProfileFile(typ, b.name) + for _, typ := range diagnostics.Types() { + if b.shouldCollectDiag(typ) { + f, err := newDiagnosticDataFile(typ, b.name) if err != nil { return err } - b.profiles[typ] = f + b.diagnostics[typ] = f + } + } + + if b.shouldCollectDiag(diagnostics.Trace) { + if err := trace.Start(b.diagnostics[diagnostics.Trace]); err != nil { + return err } } @@ -507,12 +520,15 @@ func RunBenchmark(name string, f func(*B) error, opts ...RunOption) error { b.wg.Wait() // Finalize all the profile files we're handling ourselves. - for typ, f := range b.profiles { - if typ == ProfileMem { + for typ, f := range b.diagnostics { + if typ == diagnostics.MemProfile { if err := pprof.Lookup("heap").WriteTo(f, 0); err != nil { return err } } + if typ == diagnostics.Trace { + trace.Stop() + } f.Close() } @@ -521,37 +537,22 @@ func RunBenchmark(name string, f func(*B) error, opts ...RunOption) error { return nil } -type ProfileType string - -const ( - ProfileCPU ProfileType = "cpu" - ProfileMem ProfileType = "mem" - ProfilePerf ProfileType = "perf" -) - -var ProfileTypes = []ProfileType{ - ProfileCPU, - ProfileMem, - ProfilePerf, -} - -func ProfilingEnabled(typ ProfileType) bool { - switch typ { - case ProfileCPU: - return cpuProfileDir != "" - case ProfileMem: - return memProfileDir != "" - case ProfilePerf: - return perfDir != "" +func DiagnosticEnabled(typ diagnostics.Type) bool { + cfg, ok := diag[typ] + if !ok { + panic("bad profile type") } - panic("bad profile type") + return cfg.Dir != "" } -func WriteProfile(prof *profile.Profile, typ ProfileType, pattern string) error { - if !ProfilingEnabled(typ) { - return fmt.Errorf("this type of profile is not currently enabled") +func WritePprofProfile(prof *profile.Profile, typ diagnostics.Type, pattern string) error { + if !typ.IsPprof() { + return fmt.Errorf("this type of diagnostic doesn't use the pprof format") + } + if !DiagnosticEnabled(typ) { + return fmt.Errorf("this type of diagnostic is not currently enabled") } - f, err := newProfileFile(typ, pattern) + f, err := newDiagnosticDataFile(typ, pattern) if err != nil { return err } @@ -559,13 +560,13 @@ func WriteProfile(prof *profile.Profile, typ ProfileType, pattern string) error return prof.Write(f) } -func CopyProfile(profilePath string, typ ProfileType, pattern string) error { - inF, err := os.Open(profilePath) +func CopyDiagnosticData(diagPath string, typ diagnostics.Type, pattern string) error { + inF, err := os.Open(diagPath) if err != nil { return err } defer inF.Close() - outF, err := newProfileFile(typ, pattern) + outF, err := newDiagnosticDataFile(typ, pattern) if err != nil { return err } @@ -574,21 +575,17 @@ func CopyProfile(profilePath string, typ ProfileType, pattern string) error { return err } -func newProfileFile(typ ProfileType, pattern string) (*os.File, error) { - if !ProfilingEnabled(typ) { +func PerfFlags() []string { + if !DiagnosticEnabled(diagnostics.Perf) { + panic("perf not enabled") + } + return strings.Split(diag[diagnostics.Perf].Flags, " ") +} + +func newDiagnosticDataFile(typ diagnostics.Type, pattern string) (*os.File, error) { + cfg, ok := diag[typ] + if !ok || cfg.Dir == "" { return nil, fmt.Errorf("this type of profile is not currently enabled") } - var outDir, patternSuffix string - switch typ { - case ProfileCPU: - outDir = cpuProfileDir - patternSuffix = ".cpu" - case ProfileMem: - outDir = memProfileDir - patternSuffix = ".mem" - case ProfilePerf: - outDir = perfDir - patternSuffix = ".perf" - } - return os.CreateTemp(outDir, pattern+patternSuffix) + return os.CreateTemp(cfg.Dir, pattern+"."+string(typ)) } diff --git a/sweet/benchmarks/tile38/main.go b/sweet/benchmarks/tile38/main.go index ea3027a..8de4af2 100644 --- a/sweet/benchmarks/tile38/main.go +++ b/sweet/benchmarks/tile38/main.go @@ -11,17 +11,20 @@ import ( "fmt" "io" "math/rand" + "net/http" "os" "os/exec" "path/filepath" "runtime" "sort" "strconv" + "sync" "sync/atomic" "time" "golang.org/x/benchmarks/sweet/benchmarks/internal/driver" "golang.org/x/benchmarks/sweet/benchmarks/internal/pool" + "golang.org/x/benchmarks/sweet/common/diagnostics" "golang.org/x/benchmarks/sweet/common/profile" "github.com/gomodule/redigo/redis" @@ -39,15 +42,17 @@ type config struct { short bool } -func (c *config) profilePath(typ driver.ProfileType) string { +func (c *config) diagnosticDataPath(typ diagnostics.Type) string { var fname string switch typ { - case driver.ProfileCPU: + case diagnostics.CPUProfile: fname = "cpu.prof" - case driver.ProfileMem: + case diagnostics.MemProfile: fname = "mem.prof" - case driver.ProfilePerf: + case diagnostics.Perf: fname = "perf.data" + case diagnostics.Trace: + fname = "runtime.trace" default: panic("unsupported profile type " + string(typ)) } @@ -58,7 +63,7 @@ var cliCfg config func init() { driver.SetFlags(flag.CommandLine) - flag.StringVar(&cliCfg.host, "host", "", "hostname of tile38 server") + flag.StringVar(&cliCfg.host, "host", "127.0.0.1", "hostname of tile38 server") flag.IntVar(&cliCfg.port, "port", 9851, "port for tile38 server") flag.Int64Var(&cliCfg.seed, "seed", 0, "seed for PRNG") flag.StringVar(&cliCfg.serverBin, "server", "", "path to tile38 server binary") @@ -219,13 +224,14 @@ func launchServer(cfg *config, out io.Writer) (*exec.Cmd, error) { // Set up arguments. srvArgs := []string{ "-d", cfg.dataPath, - "-h", "127.0.0.1", - "-p", "9851", + "-h", cfg.host, + "-p", strconv.Itoa(cfg.port), "-threads", strconv.Itoa(cfg.serverProcs), + "-pprofport", strconv.Itoa(pprofPort), } - for _, typ := range []driver.ProfileType{driver.ProfileCPU, driver.ProfileMem} { - if driver.ProfilingEnabled(typ) { - srvArgs = append(srvArgs, "-"+string(typ)+"profile", cfg.profilePath(typ)) + for _, typ := range []diagnostics.Type{diagnostics.CPUProfile, diagnostics.MemProfile} { + if driver.DiagnosticEnabled(typ) { + srvArgs = append(srvArgs, "-"+string(typ), cfg.diagnosticDataPath(typ)) } } @@ -271,6 +277,26 @@ func launchServer(cfg *config, out io.Writer) (*exec.Cmd, error) { return nil, fmt.Errorf("timeout trying to connect to server: %v", err) } +const pprofPort = 12345 + +func (cfg *config) readTrace(benchName string) (int64, error) { + f, err := os.Create(cfg.diagnosticDataPath(diagnostics.Trace)) + if err != nil { + return 0, err + } + defer f.Close() + resp, err := http.Get(fmt.Sprintf("http://%s:%d/debug/pprof/trace", cfg.host, pprofPort)) + if err != nil { + return 0, err + } + defer resp.Body.Close() + n, err := io.Copy(f, resp.Body) + if err != nil { + return 0, err + } + return n, driver.CopyDiagnosticData(cfg.diagnosticDataPath(diagnostics.Trace), diagnostics.Trace, benchName) +} + func runOne(bench benchmark, cfg *config) (err error) { var buf bytes.Buffer @@ -306,14 +332,14 @@ func runOne(bench benchmark, cfg *config) (err error) { // Now that the server is done, the profile should be complete and flushed. // Copy it over. - for _, typ := range []driver.ProfileType{driver.ProfileCPU, driver.ProfileMem} { - if driver.ProfilingEnabled(typ) { - p, r := profile.Read(cfg.profilePath(typ)) + for _, typ := range []diagnostics.Type{diagnostics.CPUProfile, diagnostics.MemProfile} { + if driver.DiagnosticEnabled(typ) { + p, r := profile.ReadPprof(cfg.diagnosticDataPath(typ)) if r != nil { err = r return } - if r := driver.WriteProfile(p, typ, bench.name()); r != nil { + if r := driver.WritePprofProfile(p, typ, bench.name()); r != nil { err = r return } @@ -329,12 +355,46 @@ func runOne(bench benchmark, cfg *config) (err error) { driver.DoCoreDump(true), driver.BenchmarkPID(srvCmd.Process.Pid), driver.DoPerf(true), + driver.DoTrace(true), } iters := 20 * 50000 if cfg.short { iters = 1000 } return driver.RunBenchmark(bench.name(), func(d *driver.B) error { + if driver.DiagnosticEnabled(diagnostics.Trace) { + // Handle execution tracing. + // + // TODO(mknyszek): This is kind of a hack. We really should find a way to just + // enable tracing at a lower level for the entire server run. + var traceStop chan struct{} + var traceWg sync.WaitGroup + var traceBytes uint64 + traceWg.Add(1) + traceStop = make(chan struct{}) + go func() { + defer traceWg.Done() + for { + select { + case <-traceStop: + return + default: + } + n, err := cfg.readTrace(bench.name()) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to read trace: %v", err) + return + } + traceBytes += uint64(n) + } + }() + defer func() { + // Stop the trace loop. + close(traceStop) + traceWg.Wait() + d.Report("trace-bytes", traceBytes) + }() + } return bench.run(d, cfg.host, cfg.port, cfg.serverProcs, iters) }, opts...) } @@ -345,8 +405,8 @@ func main() { fmt.Fprintf(os.Stderr, "error: unexpected args\n") os.Exit(1) } - for _, typ := range driver.ProfileTypes { - cliCfg.isProfiling = cliCfg.isProfiling || driver.ProfilingEnabled(typ) + for _, typ := range diagnostics.Types() { + cliCfg.isProfiling = cliCfg.isProfiling || driver.DiagnosticEnabled(typ) } benchmarks := benchmarks if cliCfg.short { diff --git a/sweet/cmd/sweet/benchmark.go b/sweet/cmd/sweet/benchmark.go index 8ad98a0..b8402fd 100644 --- a/sweet/cmd/sweet/benchmark.go +++ b/sweet/cmd/sweet/benchmark.go @@ -279,24 +279,15 @@ func (b *benchmark) execute(cfgs []*common.Config, r *runCfg) error { mkdirAll(resultsBinDir) copyDirContents(resultsBinDir, binDir) } - if r.cpuProfile || r.memProfile || r.perf { + if !cfg.Diagnostics.Empty() { // Create a directory for any profile files to live in. resultsProfilesDir := r.runProfilesDir(b, cfg) mkdirAll(resultsProfilesDir) // We need to pass arguments to the benchmark binary to generate // profiles. See benchmarks/internal/driver for details. - if r.cpuProfile { - args = append(args, "-cpuprofile", resultsProfilesDir) - } - if r.memProfile { - args = append(args, "-memprofile", resultsProfilesDir) - } - if r.perf { - args = append(args, "-perf", resultsProfilesDir) - if r.perfFlags != "" { - args = append(args, "-perf-flags", r.perfFlags) - } + for _, d := range cfg.Diagnostics.ToSlice() { + args = append(args, d.DriverArgs(resultsProfilesDir)...) } } diff --git a/sweet/cmd/sweet/run.go b/sweet/cmd/sweet/run.go index 0e69f4a..d9752da 100644 --- a/sweet/cmd/sweet/run.go +++ b/sweet/cmd/sweet/run.go @@ -20,6 +20,7 @@ import ( "golang.org/x/benchmarks/sweet/cli/bootstrap" "golang.org/x/benchmarks/sweet/common" + "golang.org/x/benchmarks/sweet/common/diagnostics" "golang.org/x/benchmarks/sweet/common/log" sprofile "golang.org/x/benchmarks/sweet/common/profile" @@ -58,10 +59,6 @@ type runCfg struct { workDir string assetsCache string dumpCore bool - cpuProfile bool - memProfile bool - perf bool - perfFlags string pgo bool pgoCount int short bool @@ -150,10 +147,6 @@ func (c *runCmd) SetFlags(f *flag.FlagSet) { f.StringVar(&c.runCfg.workDir, "work-dir", "", "work directory for benchmarks (default: temporary directory)") f.StringVar(&c.runCfg.assetsCache, "cache", bootstrap.CacheDefault(), "cache location for assets") f.BoolVar(&c.runCfg.dumpCore, "dump-core", false, "whether to dump core files for each benchmark process when it completes a benchmark") - f.BoolVar(&c.runCfg.cpuProfile, "cpuprofile", false, "whether to dump a CPU profile for each benchmark (ensures all benchmarks do the same amount of work)") - f.BoolVar(&c.runCfg.memProfile, "memprofile", false, "whether to dump a memory profile for each benchmark (ensures all executions do the same amount of work") - f.BoolVar(&c.runCfg.perf, "perf", false, "whether to run each benchmark under Linux perf and dump the results") - f.StringVar(&c.runCfg.perfFlags, "perf-flags", "", "the flags to pass to Linux perf if -perf is set") f.BoolVar(&c.pgo, "pgo", false, "perform PGO testing; for each config, collect profiles from a baseline run which are used to feed into a generated PGO config") f.IntVar(&c.runCfg.pgoCount, "pgo-count", 0, "the number of times to run profiling runs for -pgo; defaults to the value of -count if <=5, or 5 if higher") f.IntVar(&c.runCfg.count, "count", 0, fmt.Sprintf("the number of times to run each benchmark (default %d)", countDefault)) @@ -362,11 +355,12 @@ func (c *runCmd) Run(args []string) error { if len(unknown) != 0 { return fmt.Errorf("unknown benchmarks: %s", strings.Join(unknown, ", ")) } - countString := fmt.Sprintf("%d runs", c.runCfg.count) + + // Print an indication of how many runs will be done. + countString := fmt.Sprintf("%d runs", c.runCfg.count*len(configs)) if c.pgo { - countString += fmt.Sprintf(", %d pgo runs", c.runCfg.pgoCount) + countString += fmt.Sprintf(", %d pgo runs", c.runCfg.pgoCount*len(configs)) } - countString += fmt.Sprintf(" per config (%d)", len(configs)) log.Printf("Benchmarks: %s (%s)", strings.Join(benchmarkNames(benchmarks), " "), countString) // Check prerequisites for each benchmark. @@ -406,11 +400,11 @@ func (c *runCmd) preparePGO(configs []*common.Config, benchmarks []*benchmark) ( for _, c := range configs { cc := c.Copy() cc.Name += ".profile" + cc.Diagnostics.Set(diagnostics.Config{Type: diagnostics.CPUProfile}) profileConfigs = append(profileConfigs, cc) } profileRunCfg := c.runCfg - profileRunCfg.cpuProfile = true profileRunCfg.count = profileRunCfg.pgoCount log.Printf("Running profile collection runs") @@ -453,10 +447,10 @@ func (c *runCmd) preparePGO(configs []*common.Config, benchmarks []*benchmark) ( return newConfigs, nil } -var cpuProfileRe = regexp.MustCompile(`^.*\.cpu[0-9]+$`) +var cpuProfileRe = regexp.MustCompile(`^.*\.cpuprofile[0-9]+$`) func mergeCPUProfiles(dir string) (string, error) { - profiles, err := sprofile.ReadDir(dir, func(name string) bool { + profiles, err := sprofile.ReadDirPprof(dir, func(name string) bool { return cpuProfileRe.FindString(name) != "" }) if err != nil { diff --git a/sweet/common/config.go b/sweet/common/config.go index a1c3231..5754d24 100644 --- a/sweet/common/config.go +++ b/sweet/common/config.go @@ -10,32 +10,44 @@ import ( "path/filepath" "github.com/BurntSushi/toml" + "golang.org/x/benchmarks/sweet/common/diagnostics" ) const ConfigHelp = ` The input configuration format is TOML consisting of a single array field called 'config'. Each element of the array consists of the following fields: - name: a unique name for the configuration (required) - goroot: path to a GOROOT representing the toolchain to run (required) - envbuild: additional environment variables that should be used for compilation - each variable should take the form "X=Y" (optional) - envexec: additional environment variables that should be used for execution - each variable should take the form "X=Y" (optional) + name: a unique name for the configuration (required) + goroot: path to a GOROOT representing the toolchain to run (required) + envbuild: additional environment variables that should be used for + compilation each variable should take the form "X=Y" (optional) + envexec: additional environment variables that should be used for execution + each variable should take the form "X=Y" (optional) + pgofiles: a map of benchmark names (see 'sweet help run') to profile files + to be passed to the Go compiler for optimization (optional) + diagnostics: profile types to collect for each benchmark run of this + configuration, which may be one of: cpuprofile, memprofile, + perf[=flags], trace (optional) A simple example configuration might look like: [[config]] name = "original" goroot = "/path/to/go" - envexec = ["GODEBUG=gctrace=1"] [[config]] name = "improved" goroot = "/path/to/go-but-better" - envexec = ["GODEBUG=gctrace=1"] Note that because 'config' is an array field, one may have multiple configurations present in a single file. + +An example of using some of the other fields to diagnose performance differences: + +[[config]] + name = "improved-but-why" + goroot = "/path/to/go-but-better" + envexec = ["GODEBUG=gctrace=1"] + diagnostics = ["cpuprofile", "perf=-e page-faults"] ` type ConfigFile struct { @@ -43,11 +55,12 @@ type ConfigFile struct { } type Config struct { - Name string `toml:"name"` - GoRoot string `toml:"goroot"` - BuildEnv ConfigEnv `toml:"envbuild"` - ExecEnv ConfigEnv `toml:"envexec"` - PGOFiles map[string]string `toml:"pgofiles"` + Name string `toml:"name"` + GoRoot string `toml:"goroot"` + BuildEnv ConfigEnv `toml:"envbuild"` + ExecEnv ConfigEnv `toml:"envexec"` + PGOFiles map[string]string `toml:"pgofiles"` + Diagnostics diagnostics.ConfigSet `toml:"diagnostics"` } func (c *Config) GoTool() *Go { @@ -61,9 +74,12 @@ func (c *Config) GoTool() *Go { // Copy returns a deep copy of Config. func (c *Config) Copy() *Config { - // Currently, all fields in Config are immutable, so a simply copy is - // sufficient. cc := *c + cc.PGOFiles = make(map[string]string) + for k, v := range c.PGOFiles { + cc.PGOFiles[k] = v + } + cc.Diagnostics = c.Diagnostics.Copy() return &cc } @@ -76,11 +92,12 @@ func ConfigFileMarshalTOML(c *ConfigFile) ([]byte, error) { // on Config and use dummy types that have a straightforward // mapping that *does* work. type config struct { - Name string `toml:"name"` - GoRoot string `toml:"goroot"` - BuildEnv []string `toml:"envbuild"` - ExecEnv []string `toml:"envexec"` - PGOFiles map[string]string `toml:"pgofiles"` + Name string `toml:"name"` + GoRoot string `toml:"goroot"` + BuildEnv []string `toml:"envbuild"` + ExecEnv []string `toml:"envexec"` + PGOFiles map[string]string `toml:"pgofiles"` + Diagnostics []string `toml:"diagnostics"` } type configFile struct { Configs []*config `toml:"config"` @@ -93,6 +110,7 @@ func ConfigFileMarshalTOML(c *ConfigFile) ([]byte, error) { cfg.BuildEnv = c.BuildEnv.Collapse() cfg.ExecEnv = c.ExecEnv.Collapse() cfg.PGOFiles = c.PGOFiles + cfg.Diagnostics = c.Diagnostics.Strings() cfgs.Configs = append(cfgs.Configs, &cfg) } diff --git a/sweet/common/diagnostics/config.go b/sweet/common/diagnostics/config.go new file mode 100644 index 0000000..10f3dea --- /dev/null +++ b/sweet/common/diagnostics/config.go @@ -0,0 +1,169 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diagnostics + +import ( + "fmt" + "strings" +) + +// ConfigSet is an immutable set of Config, containing at most +// one Config of each supported type. +type ConfigSet struct { + cfgs map[Type]Config +} + +// Strings returns the set of ConfigSet as strings by calling the String +// method on each Config. +func (c ConfigSet) Strings() []string { + var diags []string + for _, diag := range c.cfgs { + diags = append(diags, diag.String()) + } + return diags +} + +// UnmarshalTOML implements TOML unmarshaling for ConfigSet. +func (c *ConfigSet) UnmarshalTOML(data interface{}) error { + ldata, ok := data.([]interface{}) + if !ok { + return fmt.Errorf("expected data for diagnostics to be a list") + } + cfgs := make(map[Type]Config, len(ldata)) + for _, li := range ldata { + s, ok := li.(string) + if !ok { + return fmt.Errorf("expected data for env to contain strings") + } + d, err := ParseConfig(s) + if err != nil { + return err + } + cfgs[d.Type] = d + } + c.cfgs = cfgs + return nil +} + +// Copy creates a deep clone of a ConfigSet. +func (c ConfigSet) Copy() ConfigSet { + cfgs := make(map[Type]Config, len(c.cfgs)) + for k, v := range c.cfgs { + cfgs[k] = v + } + return ConfigSet{cfgs} +} + +// Set adds a Config to ConfigSet, overwriting any Config of the same Type. +func (c *ConfigSet) Set(d Config) { + c.cfgs[d.Type] = d +} + +// Clear removes the Config with the provided Type from the ConfigSet, if applicable. +func (c *ConfigSet) Clear(typ Type) { + delete(c.cfgs, typ) +} + +// Get looks up the Config with the provided Type and returns it if it exists with the +// second result indicating presence. +func (c ConfigSet) Get(typ Type) (Config, bool) { + cfg, ok := c.cfgs[typ] + return cfg, ok +} + +// Empty returns true if the ConfigSet is empty. +func (c ConfigSet) Empty() bool { + return len(c.cfgs) == 0 +} + +// ToSlice returns each Config contained in the ConfigSet in a slice. +func (c ConfigSet) ToSlice() []Config { + cfgs := make([]Config, 0, len(c.cfgs)) + for _, cfg := range c.cfgs { + cfgs = append(cfgs, cfg) + } + return cfgs +} + +// Type is a diagnostic type supported by Sweet. +type Type string + +const ( + CPUProfile Type = "cpuprofile" + MemProfile Type = "memprofile" + Perf Type = "perf" + Trace Type = "trace" +) + +// IsPprof returns whether the diagnostic's data is stored in the pprof format. +func (t Type) IsPprof() bool { + return t == CPUProfile || t == MemProfile +} + +// AsFlag returns the Type suitable for use as a CLI flag. +func (t Type) AsFlag() string { + return "-" + string(t) +} + +// Types returns a slice of all supported types. +func Types() []Type { + return []Type{ + CPUProfile, + MemProfile, + Perf, + Trace, + } +} + +// Config is an intent to collect data for some diagnostic with some room +// for additional configuration as to how that data is collected. +type Config struct { + // Type is the diagnostic to collect data for. + Type + + // Flags is additional opaque configuration for data collection. + // + // Currently only used if Type == Perf. + Flags string +} + +// String returns the string representation of a Config, as it would appear +// in a Sweet common.Config. +func (d Config) String() string { + result := string(d.Type) + if d.Flags != "" { + result += "=" + d.Flags + } + return result +} + +// ParseConfig derives a Config from a string. The string must take the form +// +// [=] +// +// where [=] is only accepted if is perf. +func ParseConfig(d string) (Config, error) { + comp := strings.SplitN(d, "=", 2) + var result Config + switch comp[0] { + case string(CPUProfile): + fallthrough + case string(MemProfile): + fallthrough + case string(Trace): + if len(comp) != 1 { + return result, fmt.Errorf("diagnostic %q does not take flags", comp[0]) + } + result.Type = Type(comp[0]) + case string(Perf): + if len(comp) == 2 { + result.Flags = comp[1] + } + result.Type = Type(comp[0]) + default: + return result, fmt.Errorf("invalid diagnostic %q", comp[0]) + } + return result, nil +} diff --git a/sweet/common/diagnostics/driver.go b/sweet/common/diagnostics/driver.go new file mode 100644 index 0000000..ff692f0 --- /dev/null +++ b/sweet/common/diagnostics/driver.go @@ -0,0 +1,40 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diagnostics + +import ( + "flag" + "fmt" +) + +// DriverArgs returns the arguments that should be passed to a Sweet benchmark +// binary to collect data for the Config. +func (d Config) DriverArgs(resultsDir string) []string { + flag := d.Type.AsFlag() + args := []string{flag, resultsDir} + if d.Flags != "" { + args = append(args, flag+"-flags", d.Flags) + } + return args +} + +type DriverConfig struct { + Config + Dir string +} + +func SetFlagsForDriver(f *flag.FlagSet) map[Type]*DriverConfig { + storage := make(map[Type]*DriverConfig) + for _, t := range Types() { + dc := new(DriverConfig) + dc.Type = t + storage[t] = dc + f.StringVar(&dc.Dir, string(t), "", fmt.Sprintf("directory to write %s data", t)) + if t == Perf { + f.StringVar(&dc.Flags, string(t)+"-flags", "", "flags for Linux perf") + } + } + return storage +} diff --git a/sweet/common/profile/profile.go b/sweet/common/profile/profile.go index d448997..d23bb98 100644 --- a/sweet/common/profile/profile.go +++ b/sweet/common/profile/profile.go @@ -12,7 +12,7 @@ import ( "github.com/google/pprof/profile" ) -func Read(filename string) (*profile.Profile, error) { +func ReadPprof(filename string) (*profile.Profile, error) { f, err := os.Open(filename) if err != nil { return nil, err @@ -21,8 +21,8 @@ func Read(filename string) (*profile.Profile, error) { return profile.Parse(f) } -// ReadDir reads all profiles in dir whose name matches match(name). -func ReadDir(dir string, match func(string) bool) ([]*profile.Profile, error) { +// ReadDir reads all pprof profiles in dir whose name matches match(name). +func ReadDirPprof(dir string, match func(string) bool) ([]*profile.Profile, error) { entries, err := os.ReadDir(dir) if err != nil { return nil, err @@ -39,7 +39,7 @@ func ReadDir(dir string, match func(string) bool) ([]*profile.Profile, error) { continue } if match(name) { - p, err := Read(path) + p, err := ReadPprof(path) if err != nil { return nil, err }