Add support for tracing via OpenCencus
This adds a few flags for configuring the tracer. Includes support for jaeger tracing (built into OC).
This commit is contained in:
19
vendor/go.opencensus.io/zpages/internal/gen.go
generated
vendored
Normal file
19
vendor/go.opencensus.io/zpages/internal/gen.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
// Copyright 2018, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
package internal // import "go.opencensus.io/zpages/internal"
|
||||
|
||||
// go get https://github.com/mjibson/esc.git
|
||||
//go:generate esc -pkg internal -o resources.go public/ templates/
|
||||
284
vendor/go.opencensus.io/zpages/internal/resources.go
generated
vendored
Normal file
284
vendor/go.opencensus.io/zpages/internal/resources.go
generated
vendored
Normal file
@@ -0,0 +1,284 @@
|
||||
// Code generated by "esc -pkg resources -o resources.go public/ templates/"; DO NOT EDIT.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/base64"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type _escLocalFS struct{}
|
||||
|
||||
var _escLocal _escLocalFS
|
||||
|
||||
type _escStaticFS struct{}
|
||||
|
||||
var _escStatic _escStaticFS
|
||||
|
||||
type _escDirectory struct {
|
||||
fs http.FileSystem
|
||||
name string
|
||||
}
|
||||
|
||||
type _escFile struct {
|
||||
compressed string
|
||||
size int64
|
||||
modtime int64
|
||||
local string
|
||||
isDir bool
|
||||
|
||||
once sync.Once
|
||||
data []byte
|
||||
name string
|
||||
}
|
||||
|
||||
func (_escLocalFS) Open(name string) (http.File, error) {
|
||||
f, present := _escData[path.Clean(name)]
|
||||
if !present {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
return os.Open(f.local)
|
||||
}
|
||||
|
||||
func (_escStaticFS) prepare(name string) (*_escFile, error) {
|
||||
f, present := _escData[path.Clean(name)]
|
||||
if !present {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
var err error
|
||||
f.once.Do(func() {
|
||||
f.name = path.Base(name)
|
||||
if f.size == 0 {
|
||||
return
|
||||
}
|
||||
var gr *gzip.Reader
|
||||
b64 := base64.NewDecoder(base64.StdEncoding, bytes.NewBufferString(f.compressed))
|
||||
gr, err = gzip.NewReader(b64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
f.data, err = ioutil.ReadAll(gr)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (fs _escStaticFS) Open(name string) (http.File, error) {
|
||||
f, err := fs.prepare(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.File()
|
||||
}
|
||||
|
||||
func (dir _escDirectory) Open(name string) (http.File, error) {
|
||||
return dir.fs.Open(dir.name + name)
|
||||
}
|
||||
|
||||
func (f *_escFile) File() (http.File, error) {
|
||||
type httpFile struct {
|
||||
*bytes.Reader
|
||||
*_escFile
|
||||
}
|
||||
return &httpFile{
|
||||
Reader: bytes.NewReader(f.data),
|
||||
_escFile: f,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *_escFile) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *_escFile) Readdir(count int) ([]os.FileInfo, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (f *_escFile) Stat() (os.FileInfo, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (f *_escFile) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
func (f *_escFile) Size() int64 {
|
||||
return f.size
|
||||
}
|
||||
|
||||
func (f *_escFile) Mode() os.FileMode {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (f *_escFile) ModTime() time.Time {
|
||||
return time.Unix(f.modtime, 0)
|
||||
}
|
||||
|
||||
func (f *_escFile) IsDir() bool {
|
||||
return f.isDir
|
||||
}
|
||||
|
||||
func (f *_escFile) Sys() interface{} {
|
||||
return f
|
||||
}
|
||||
|
||||
// FS returns a http.Filesystem for the embedded assets. If useLocal is true,
|
||||
// the filesystem's contents are instead used.
|
||||
func FS(useLocal bool) http.FileSystem {
|
||||
if useLocal {
|
||||
return _escLocal
|
||||
}
|
||||
return _escStatic
|
||||
}
|
||||
|
||||
// Dir returns a http.Filesystem for the embedded assets on a given prefix dir.
|
||||
// If useLocal is true, the filesystem's contents are instead used.
|
||||
func Dir(useLocal bool, name string) http.FileSystem {
|
||||
if useLocal {
|
||||
return _escDirectory{fs: _escLocal, name: name}
|
||||
}
|
||||
return _escDirectory{fs: _escStatic, name: name}
|
||||
}
|
||||
|
||||
// FSByte returns the named file from the embedded assets. If useLocal is
|
||||
// true, the filesystem's contents are instead used.
|
||||
func FSByte(useLocal bool, name string) ([]byte, error) {
|
||||
if useLocal {
|
||||
f, err := _escLocal.Open(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b, err := ioutil.ReadAll(f)
|
||||
_ = f.Close()
|
||||
return b, err
|
||||
}
|
||||
f, err := _escStatic.prepare(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.data, nil
|
||||
}
|
||||
|
||||
// FSMustByte is the same as FSByte, but panics if name is not present.
|
||||
func FSMustByte(useLocal bool, name string) []byte {
|
||||
b, err := FSByte(useLocal, name)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// FSString is the string version of FSByte.
|
||||
func FSString(useLocal bool, name string) (string, error) {
|
||||
b, err := FSByte(useLocal, name)
|
||||
return string(b), err
|
||||
}
|
||||
|
||||
// FSMustString is the string version of FSMustByte.
|
||||
func FSMustString(useLocal bool, name string) string {
|
||||
return string(FSMustByte(useLocal, name))
|
||||
}
|
||||
|
||||
var _escData = map[string]*_escFile{
|
||||
|
||||
"/public/opencensus.css": {
|
||||
local: "public/opencensus.css",
|
||||
size: 0,
|
||||
modtime: 1519153040,
|
||||
compressed: `
|
||||
H4sIAAAAAAAC/wEAAP//AAAAAAAAAAA=
|
||||
`,
|
||||
},
|
||||
|
||||
"/templates/footer.html": {
|
||||
local: "templates/footer.html",
|
||||
size: 16,
|
||||
modtime: 1519153248,
|
||||
compressed: `
|
||||
H4sIAAAAAAAC/7LRT8pPqbTjstHPKMnNseMCBAAA//8ATCBFEAAAAA==
|
||||
`,
|
||||
},
|
||||
|
||||
"/templates/header.html": {
|
||||
local: "templates/header.html",
|
||||
size: 523,
|
||||
modtime: 1519164535,
|
||||
compressed: `
|
||||
H4sIAAAAAAAC/5TRv07rMBQG8D1P4ev1qvat7oKQEwZgYEAwdGF0nZP4UP+JfE6oqqrvjkyKBGIpky0f
|
||||
+6fP+syfu6fbzcvzvfAcQ9eYuohg09hKSLIzHmzfNUIIYSKwFc7bQsCtnHlYXcnziJEDdMej2tTN6WT0
|
||||
crJMA6adKBBaST4XdjMLdDlJ4QsMrdR6v9+rPEFykGgmhVkP9q1eUeiy1D8ZPgQgD8CfxjRvAzr9BXFE
|
||||
F730zBNdaz3kxKTGnMcAdkJSLkddM9wMNmI4tI+WoaANfx9cTiR/QbvcgxqBYx/q39bqv/qn45lTmHoc
|
||||
82rCtFMR00fwM06u4MSihwGKoOIuJSvzSrIzehG6xuilSLPN/aHWvP7Wll93zXsAAAD//6iqQ1ULAgAA
|
||||
`,
|
||||
},
|
||||
|
||||
"/templates/rpcz.html": {
|
||||
local: "templates/rpcz.html",
|
||||
size: 2626,
|
||||
modtime: 1519164559,
|
||||
compressed: `
|
||||
H4sIAAAAAAAC/+yW3WrbMBTH7/0UwmUjYyxJU3o1W1C6sQ4WNrq+gCwdfzBFMtJx9+Hl3cex3DhNCrOz
|
||||
XfbGxFZ+5/8D+Ry5bZ0wBbD5VxT4wdmm9tttlNQ8QZFpYFkhrbYuPQMAyHP2vVJYpufL5QueoGNCV4VJ
|
||||
JRgExxNUPMmtQearX5C+XvG2nb+rHEisrNlukwUt8mRB/1ugowuF8GRR8+ggMD7L8/wSIGa5ExtIM/uD
|
||||
SdDa10JWpkiX3V0tlKK7FY8ixhgjp6ECAFwqiHm3FJZLCi2DKnnsLzGphfdprM9jJi0lmfSCX9vG4FTo
|
||||
6r5gWiAY+ZPNNv7VVP5WILCZq+ViOvvR1A2y2bfsBPZzg6fD752zzndU2Aza47H70r9KGnLka8DSql38
|
||||
S5P5+u3x9Vgr1HBVUSJfV2bel3i8cOOefn5ncf6c+Zz5XzKfaADyGLrlYn9UvlnxB52DERlFw4Q2oval
|
||||
RRrQDyX3zBVPMhq4oXlo2mZHjXvcyqrXjzv/mAp0A29dmQbht6TfVGscdWMbN5W5syj0I2ik59V98SmM
|
||||
2F5240elDlynO5kKwjtspO3tl2sa6r2qEwijYnusM50KBdE9aqRqd4DsySqBYnT2Du6UT0OD+AE7Uj6c
|
||||
YKfaD/R0/YH9F/9wiE5uv4BN7L8A/a0BwxxqWzCKPg37b7bdgz8BAAD//6NjPmJCCgAA
|
||||
`,
|
||||
},
|
||||
|
||||
"/templates/summary.html": {
|
||||
local: "templates/summary.html",
|
||||
size: 1619,
|
||||
modtime: 1519164559,
|
||||
compressed: `
|
||||
H4sIAAAAAAAC/6yVPW/bMBCG9/yKg2p4qu2kW12JRQtkCzok3YoOlHSWBdMngaSc2iz/e8EP+Stqi8Re
|
||||
DIo63t3zvjwr1TwXCEpvBWZJ3sgS5US1vKipmsNtwm4AAFItwyI8lFA0QrWcsjvgoq4oE7jQLM3ZU8sJ
|
||||
vvE1prOcpTNdnhxjY8pV+yn8/j5+8KFDiZMCSaNMXPLHjqim6i2pB5v/OFDjgWukYgtPfN0KVFerNcRz
|
||||
L2Ujhyuls17xv0t/pcbelsYYyalCmEbBvnbFCrVzXlmb6uU/wX8YM7X2Z0ReMmOQSmuviRIENGbEYZ7B
|
||||
9LvkBap7KtumJm2teyNqWin/9sGt/GaAGsnmuaYSf733Sx/z2DyHkAmMiK/RbzreuFkvADdIh7NOBrkf
|
||||
LF6sKtl0VM7hHSImjlko9EGBHyZRAUdvTMzdD8b/9IgtRKijVC/k57CUuMgSp421n3dOOgeUGePBrB3v
|
||||
9LbF7NY1Of1S6HrjG+HsUMr1ft7wIXIfdUb1aoa9Ib0bGy66IH28d07ACxjvxjvV5X5pzCj65rhDpSPs
|
||||
/o6e0J9Pge+G+dv98tClYlxs6IcDbPDW/wGpE8cGfB2Iiij9kHnIdOY/JezmTwAAAP//Dz6TJ1MGAAA=
|
||||
`,
|
||||
},
|
||||
|
||||
"/templates/traces.html": {
|
||||
local: "templates/traces.html",
|
||||
size: 420,
|
||||
modtime: 1519164578,
|
||||
compressed: `
|
||||
H4sIAAAAAAAC/4yQsU70MBCEez/FKtIv3RW/w6WgOIw7kGgoDiRqO14gwnGM1xEgs++OnKMA5Qq2ssYz
|
||||
I82nolZW30UT4NaMuIdSZH0wg2qtVm3UQkVd1XlkhgO+zkiZvj8SavHwjAFO35U3kdDBhrDfiv9/PFFK
|
||||
MuEJQR6mN2IuJaYh5Edo/nXn1MBmCA7fQV4P6B3B2ZYZfnh23dqzO3p+i12tlp85mR4HxyxKweCYVbvs
|
||||
UjYt25UFyh8eL5t+8lPaWz/jRaPva+zGVUowogkEZMbo0UE6MpKiIlinTf9yMh6mvKpYMH8FAAD//yQs
|
||||
JUakAQAA
|
||||
`,
|
||||
},
|
||||
|
||||
"/": {
|
||||
isDir: true,
|
||||
local: "",
|
||||
},
|
||||
|
||||
"/public": {
|
||||
isDir: true,
|
||||
local: "public",
|
||||
},
|
||||
|
||||
"/templates": {
|
||||
isDir: true,
|
||||
local: "templates",
|
||||
},
|
||||
}
|
||||
333
vendor/go.opencensus.io/zpages/rpcz.go
generated
vendored
Normal file
333
vendor/go.opencensus.io/zpages/rpcz.go
generated
vendored
Normal file
@@ -0,0 +1,333 @@
|
||||
// Copyright 2017, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
package zpages
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
"net/http"
|
||||
"sort"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"go.opencensus.io/plugin/ocgrpc"
|
||||
"go.opencensus.io/stats/view"
|
||||
)
|
||||
|
||||
const bytesPerKb = 1024
|
||||
|
||||
var (
|
||||
programStartTime = time.Now()
|
||||
mu sync.Mutex // protects snaps
|
||||
snaps = make(map[methodKey]*statSnapshot)
|
||||
|
||||
// viewType lists the views we are interested in for RPC stats.
|
||||
// A view's map value indicates whether that view contains data for received
|
||||
// RPCs.
|
||||
viewType = map[*view.View]bool{
|
||||
ocgrpc.ClientCompletedRPCsView: false,
|
||||
ocgrpc.ClientSentBytesPerRPCView: false,
|
||||
ocgrpc.ClientSentMessagesPerRPCView: false,
|
||||
ocgrpc.ClientReceivedBytesPerRPCView: false,
|
||||
ocgrpc.ClientReceivedMessagesPerRPCView: false,
|
||||
ocgrpc.ClientRoundtripLatencyView: false,
|
||||
ocgrpc.ServerCompletedRPCsView: true,
|
||||
ocgrpc.ServerReceivedBytesPerRPCView: true,
|
||||
ocgrpc.ServerReceivedMessagesPerRPCView: true,
|
||||
ocgrpc.ServerSentBytesPerRPCView: true,
|
||||
ocgrpc.ServerSentMessagesPerRPCView: true,
|
||||
ocgrpc.ServerLatencyView: true,
|
||||
}
|
||||
)
|
||||
|
||||
func registerRPCViews() {
|
||||
views := make([]*view.View, 0, len(viewType))
|
||||
for v := range viewType {
|
||||
views = append(views, v)
|
||||
}
|
||||
if err := view.Register(views...); err != nil {
|
||||
log.Printf("error subscribing to views: %v", err)
|
||||
}
|
||||
view.RegisterExporter(snapExporter{})
|
||||
}
|
||||
|
||||
func rpczHandler(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
WriteHTMLRpczPage(w)
|
||||
}
|
||||
|
||||
// WriteHTMLRpczPage writes an HTML document to w containing per-method RPC stats.
|
||||
func WriteHTMLRpczPage(w io.Writer) {
|
||||
if err := headerTemplate.Execute(w, headerData{Title: "RPC Stats"}); err != nil {
|
||||
log.Printf("zpages: executing template: %v", err)
|
||||
}
|
||||
WriteHTMLRpczSummary(w)
|
||||
if err := footerTemplate.Execute(w, nil); err != nil {
|
||||
log.Printf("zpages: executing template: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteHTMLRpczSummary writes HTML to w containing per-method RPC stats.
|
||||
//
|
||||
// It includes neither a header nor footer, so you can embed this data in other pages.
|
||||
func WriteHTMLRpczSummary(w io.Writer) {
|
||||
mu.Lock()
|
||||
if err := statsTemplate.Execute(w, getStatsPage()); err != nil {
|
||||
log.Printf("zpages: executing template: %v", err)
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
// WriteTextRpczPage writes formatted text to w containing per-method RPC stats.
|
||||
func WriteTextRpczPage(w io.Writer) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
page := getStatsPage()
|
||||
|
||||
for i, sg := range page.StatGroups {
|
||||
switch i {
|
||||
case 0:
|
||||
fmt.Fprint(w, "Sent:\n")
|
||||
case 1:
|
||||
fmt.Fprint(w, "\nReceived:\n")
|
||||
}
|
||||
tw := tabwriter.NewWriter(w, 6, 8, 1, ' ', 0)
|
||||
fmt.Fprint(tw, "Method\tCount\t\t\tAvgLat\t\t\tMaxLat\t\t\tRate\t\t\tIn (MiB/s)\t\t\tOut (MiB/s)\t\t\tErrors\t\t\n")
|
||||
fmt.Fprint(tw, "\tMin\tHr\tTot\tMin\tHr\tTot\tMin\tHr\tTot\tMin\tHr\tTot\tMin\tHr\tTot\tMin\tHr\tTot\tMin\tHr\tTot\n")
|
||||
for _, s := range sg.Snapshots {
|
||||
fmt.Fprintf(tw, "%s\t%d\t%d\t%d\t%v\t%v\t%v\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%d\t%d\t%d\n",
|
||||
s.Method,
|
||||
s.CountMinute,
|
||||
s.CountHour,
|
||||
s.CountTotal,
|
||||
s.AvgLatencyMinute,
|
||||
s.AvgLatencyHour,
|
||||
s.AvgLatencyTotal,
|
||||
s.RPCRateMinute,
|
||||
s.RPCRateHour,
|
||||
s.RPCRateTotal,
|
||||
s.InputRateMinute/bytesPerKb,
|
||||
s.InputRateHour/bytesPerKb,
|
||||
s.InputRateTotal/bytesPerKb,
|
||||
s.OutputRateMinute/bytesPerKb,
|
||||
s.OutputRateHour/bytesPerKb,
|
||||
s.OutputRateTotal/bytesPerKb,
|
||||
s.ErrorsMinute,
|
||||
s.ErrorsHour,
|
||||
s.ErrorsTotal)
|
||||
}
|
||||
tw.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
// headerData contains data for the header template.
|
||||
type headerData struct {
|
||||
Title string
|
||||
}
|
||||
|
||||
// statsPage aggregates stats on the page for 'sent' and 'received' categories
|
||||
type statsPage struct {
|
||||
StatGroups []*statGroup
|
||||
}
|
||||
|
||||
// statGroup aggregates snapshots for a directional category
|
||||
type statGroup struct {
|
||||
Direction string
|
||||
Snapshots []*statSnapshot
|
||||
}
|
||||
|
||||
func (s *statGroup) Len() int {
|
||||
return len(s.Snapshots)
|
||||
}
|
||||
|
||||
func (s *statGroup) Swap(i, j int) {
|
||||
s.Snapshots[i], s.Snapshots[j] = s.Snapshots[j], s.Snapshots[i]
|
||||
}
|
||||
|
||||
func (s *statGroup) Less(i, j int) bool {
|
||||
return s.Snapshots[i].Method < s.Snapshots[j].Method
|
||||
}
|
||||
|
||||
// statSnapshot holds the data items that are presented in a single row of RPC
|
||||
// stat information.
|
||||
type statSnapshot struct {
|
||||
// TODO: compute hour/minute values from cumulative
|
||||
Method string
|
||||
Received bool
|
||||
CountMinute int
|
||||
CountHour int
|
||||
CountTotal int
|
||||
AvgLatencyMinute time.Duration
|
||||
AvgLatencyHour time.Duration
|
||||
AvgLatencyTotal time.Duration
|
||||
RPCRateMinute float64
|
||||
RPCRateHour float64
|
||||
RPCRateTotal float64
|
||||
InputRateMinute float64
|
||||
InputRateHour float64
|
||||
InputRateTotal float64
|
||||
OutputRateMinute float64
|
||||
OutputRateHour float64
|
||||
OutputRateTotal float64
|
||||
ErrorsMinute int
|
||||
ErrorsHour int
|
||||
ErrorsTotal int
|
||||
}
|
||||
|
||||
type methodKey struct {
|
||||
method string
|
||||
received bool
|
||||
}
|
||||
|
||||
type snapExporter struct{}
|
||||
|
||||
func (s snapExporter) ExportView(vd *view.Data) {
|
||||
received, ok := viewType[vd.View]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if len(vd.Rows) == 0 {
|
||||
return
|
||||
}
|
||||
ageSec := float64(time.Now().Sub(programStartTime)) / float64(time.Second)
|
||||
|
||||
computeRate := func(maxSec, x float64) float64 {
|
||||
dur := ageSec
|
||||
if maxSec > 0 && dur > maxSec {
|
||||
dur = maxSec
|
||||
}
|
||||
return x / dur
|
||||
}
|
||||
|
||||
convertTime := func(ms float64) time.Duration {
|
||||
if math.IsInf(ms, 0) || math.IsNaN(ms) {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(float64(time.Millisecond) * ms)
|
||||
}
|
||||
|
||||
haveResetErrors := make(map[string]struct{})
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
for _, row := range vd.Rows {
|
||||
var method string
|
||||
for _, tag := range row.Tags {
|
||||
if tag.Key == ocgrpc.KeyClientMethod || tag.Key == ocgrpc.KeyServerMethod {
|
||||
method = tag.Value
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
key := methodKey{method: method, received: received}
|
||||
s := snaps[key]
|
||||
if s == nil {
|
||||
s = &statSnapshot{Method: method, Received: received}
|
||||
snaps[key] = s
|
||||
}
|
||||
|
||||
var (
|
||||
sum float64
|
||||
count float64
|
||||
)
|
||||
switch v := row.Data.(type) {
|
||||
case *view.CountData:
|
||||
sum = float64(v.Value)
|
||||
count = float64(v.Value)
|
||||
case *view.DistributionData:
|
||||
sum = v.Sum()
|
||||
count = float64(v.Count)
|
||||
case *view.SumData:
|
||||
sum = v.Value
|
||||
count = v.Value
|
||||
}
|
||||
|
||||
// Update field of s corresponding to the view.
|
||||
switch vd.View {
|
||||
case ocgrpc.ClientCompletedRPCsView:
|
||||
if _, ok := haveResetErrors[method]; !ok {
|
||||
haveResetErrors[method] = struct{}{}
|
||||
s.ErrorsTotal = 0
|
||||
}
|
||||
for _, tag := range row.Tags {
|
||||
if tag.Key == ocgrpc.KeyClientStatus && tag.Value != "OK" {
|
||||
s.ErrorsTotal += int(count)
|
||||
}
|
||||
}
|
||||
|
||||
case ocgrpc.ClientRoundtripLatencyView:
|
||||
s.AvgLatencyTotal = convertTime(sum / count)
|
||||
|
||||
case ocgrpc.ClientSentBytesPerRPCView:
|
||||
s.OutputRateTotal = computeRate(0, sum)
|
||||
|
||||
case ocgrpc.ClientReceivedBytesPerRPCView:
|
||||
s.InputRateTotal = computeRate(0, sum)
|
||||
|
||||
case ocgrpc.ClientSentMessagesPerRPCView:
|
||||
s.CountTotal = int(count)
|
||||
s.RPCRateTotal = computeRate(0, count)
|
||||
|
||||
case ocgrpc.ClientReceivedMessagesPerRPCView:
|
||||
// currently unused
|
||||
|
||||
case ocgrpc.ServerCompletedRPCsView:
|
||||
if _, ok := haveResetErrors[method]; !ok {
|
||||
haveResetErrors[method] = struct{}{}
|
||||
s.ErrorsTotal = 0
|
||||
}
|
||||
for _, tag := range row.Tags {
|
||||
if tag.Key == ocgrpc.KeyServerStatus && tag.Value != "OK" {
|
||||
s.ErrorsTotal += int(count)
|
||||
}
|
||||
}
|
||||
|
||||
case ocgrpc.ServerLatencyView:
|
||||
s.AvgLatencyTotal = convertTime(sum / count)
|
||||
|
||||
case ocgrpc.ServerSentBytesPerRPCView:
|
||||
s.OutputRateTotal = computeRate(0, sum)
|
||||
|
||||
case ocgrpc.ServerReceivedMessagesPerRPCView:
|
||||
s.CountTotal = int(count)
|
||||
s.RPCRateTotal = computeRate(0, count)
|
||||
|
||||
case ocgrpc.ServerSentMessagesPerRPCView:
|
||||
// currently unused
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getStatsPage() *statsPage {
|
||||
sentStats := statGroup{Direction: "Sent"}
|
||||
receivedStats := statGroup{Direction: "Received"}
|
||||
for key, sg := range snaps {
|
||||
if key.received {
|
||||
receivedStats.Snapshots = append(receivedStats.Snapshots, sg)
|
||||
} else {
|
||||
sentStats.Snapshots = append(sentStats.Snapshots, sg)
|
||||
}
|
||||
}
|
||||
sort.Sort(&sentStats)
|
||||
sort.Sort(&receivedStats)
|
||||
|
||||
return &statsPage{
|
||||
StatGroups: []*statGroup{&sentStats, &receivedStats},
|
||||
}
|
||||
}
|
||||
121
vendor/go.opencensus.io/zpages/templates.go
generated
vendored
Normal file
121
vendor/go.opencensus.io/zpages/templates.go
generated
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
// Copyright 2017, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
package zpages
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"go.opencensus.io/trace"
|
||||
"go.opencensus.io/zpages/internal"
|
||||
)
|
||||
|
||||
var (
|
||||
fs = internal.FS(false)
|
||||
templateFunctions = template.FuncMap{
|
||||
"count": countFormatter,
|
||||
"ms": msFormatter,
|
||||
"rate": rateFormatter,
|
||||
"datarate": dataRateFormatter,
|
||||
"even": even,
|
||||
"traceid": traceIDFormatter,
|
||||
}
|
||||
headerTemplate = parseTemplate("header")
|
||||
summaryTableTemplate = parseTemplate("summary")
|
||||
statsTemplate = parseTemplate("rpcz")
|
||||
tracesTableTemplate = parseTemplate("traces")
|
||||
footerTemplate = parseTemplate("footer")
|
||||
)
|
||||
|
||||
func parseTemplate(name string) *template.Template {
|
||||
f, err := fs.Open("/templates/" + name + ".html")
|
||||
if err != nil {
|
||||
log.Panicf("%v: %v", name, err)
|
||||
}
|
||||
defer f.Close()
|
||||
text, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
log.Panicf("%v: %v", name, err)
|
||||
}
|
||||
return template.Must(template.New(name).Funcs(templateFunctions).Parse(string(text)))
|
||||
}
|
||||
|
||||
func countFormatter(num int) string {
|
||||
if num <= 0 {
|
||||
return " "
|
||||
}
|
||||
var floatVal float64
|
||||
var suffix string
|
||||
|
||||
num64 := int64(num)
|
||||
|
||||
if num64 >= 1e12 {
|
||||
floatVal = float64(num64) / 1e12
|
||||
suffix = " T "
|
||||
} else if num64 >= 1e9 {
|
||||
floatVal = float64(num64) / 1e9
|
||||
suffix = " G "
|
||||
} else if num64 >= 1e6 {
|
||||
floatVal = float64(num64) / 1e6
|
||||
suffix = " M "
|
||||
}
|
||||
|
||||
if floatVal != 0 {
|
||||
return fmt.Sprintf("%1.3f%s", floatVal, suffix)
|
||||
}
|
||||
return fmt.Sprint(num)
|
||||
}
|
||||
|
||||
func msFormatter(d time.Duration) string {
|
||||
if d == 0 {
|
||||
return "0"
|
||||
}
|
||||
if d < 10*time.Millisecond {
|
||||
return fmt.Sprintf("%.3f", float64(d)*1e-6)
|
||||
}
|
||||
return strconv.Itoa(int(d / time.Millisecond))
|
||||
}
|
||||
|
||||
func rateFormatter(r float64) string {
|
||||
return fmt.Sprintf("%.3f", r)
|
||||
}
|
||||
|
||||
func dataRateFormatter(b float64) string {
|
||||
return fmt.Sprintf("%.3f", b/1e6)
|
||||
}
|
||||
|
||||
func traceIDFormatter(r traceRow) template.HTML {
|
||||
sc := r.SpanContext
|
||||
if sc == (trace.SpanContext{}) {
|
||||
return ""
|
||||
}
|
||||
col := "black"
|
||||
if sc.TraceOptions.IsSampled() {
|
||||
col = "blue"
|
||||
}
|
||||
if r.ParentSpanID != (trace.SpanID{}) {
|
||||
return template.HTML(fmt.Sprintf(`trace_id: <b style="color:%s">%s</b> span_id: %s parent_span_id: %s`, col, sc.TraceID, sc.SpanID, r.ParentSpanID))
|
||||
}
|
||||
return template.HTML(fmt.Sprintf(`trace_id: <b style="color:%s">%s</b> span_id: %s`, col, sc.TraceID, sc.SpanID))
|
||||
}
|
||||
|
||||
func even(x int) bool {
|
||||
return x%2 == 0
|
||||
}
|
||||
442
vendor/go.opencensus.io/zpages/tracez.go
generated
vendored
Normal file
442
vendor/go.opencensus.io/zpages/tracez.go
generated
vendored
Normal file
@@ -0,0 +1,442 @@
|
||||
// Copyright 2017, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
package zpages
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"go.opencensus.io/internal"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
// spanNameQueryField is the header for span name.
|
||||
spanNameQueryField = "zspanname"
|
||||
// spanTypeQueryField is the header for type (running = 0, latency = 1, error = 2) to display.
|
||||
spanTypeQueryField = "ztype"
|
||||
// spanSubtypeQueryField is the header for sub-type:
|
||||
// * for latency based samples [0, 8] representing the latency buckets, where 0 is the first one;
|
||||
// * for error based samples, 0 means all, otherwise the error code;
|
||||
spanSubtypeQueryField = "zsubtype"
|
||||
// maxTraceMessageLength is the maximum length of a message in tracez output.
|
||||
maxTraceMessageLength = 1024
|
||||
)
|
||||
|
||||
var (
|
||||
defaultLatencies = [...]time.Duration{
|
||||
10 * time.Microsecond,
|
||||
100 * time.Microsecond,
|
||||
time.Millisecond,
|
||||
10 * time.Millisecond,
|
||||
100 * time.Millisecond,
|
||||
time.Second,
|
||||
10 * time.Second,
|
||||
100 * time.Second,
|
||||
}
|
||||
canonicalCodes = [...]string{
|
||||
"OK",
|
||||
"CANCELLED",
|
||||
"UNKNOWN",
|
||||
"INVALID_ARGUMENT",
|
||||
"DEADLINE_EXCEEDED",
|
||||
"NOT_FOUND",
|
||||
"ALREADY_EXISTS",
|
||||
"PERMISSION_DENIED",
|
||||
"RESOURCE_EXHAUSTED",
|
||||
"FAILED_PRECONDITION",
|
||||
"ABORTED",
|
||||
"OUT_OF_RANGE",
|
||||
"UNIMPLEMENTED",
|
||||
"INTERNAL",
|
||||
"UNAVAILABLE",
|
||||
"DATA_LOSS",
|
||||
"UNAUTHENTICATED",
|
||||
}
|
||||
)
|
||||
|
||||
func canonicalCodeString(code int32) string {
|
||||
if code < 0 || int(code) >= len(canonicalCodes) {
|
||||
return "error code " + strconv.FormatInt(int64(code), 10)
|
||||
}
|
||||
return canonicalCodes[code]
|
||||
}
|
||||
|
||||
func tracezHandler(w http.ResponseWriter, r *http.Request) {
|
||||
r.ParseForm()
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
name := r.Form.Get(spanNameQueryField)
|
||||
t, _ := strconv.Atoi(r.Form.Get(spanTypeQueryField))
|
||||
st, _ := strconv.Atoi(r.Form.Get(spanSubtypeQueryField))
|
||||
WriteHTMLTracezPage(w, name, t, st)
|
||||
}
|
||||
|
||||
// WriteHTMLTracezPage writes an HTML document to w containing locally-sampled trace spans.
|
||||
func WriteHTMLTracezPage(w io.Writer, spanName string, spanType, spanSubtype int) {
|
||||
if err := headerTemplate.Execute(w, headerData{Title: "Trace Spans"}); err != nil {
|
||||
log.Printf("zpages: executing template: %v", err)
|
||||
}
|
||||
WriteHTMLTracezSummary(w)
|
||||
WriteHTMLTracezSpans(w, spanName, spanType, spanSubtype)
|
||||
if err := footerTemplate.Execute(w, nil); err != nil {
|
||||
log.Printf("zpages: executing template: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteHTMLTracezSummary writes HTML to w containing a summary of locally-sampled trace spans.
|
||||
//
|
||||
// It includes neither a header nor footer, so you can embed this data in other pages.
|
||||
func WriteHTMLTracezSummary(w io.Writer) {
|
||||
if err := summaryTableTemplate.Execute(w, getSummaryPageData()); err != nil {
|
||||
log.Printf("zpages: executing template: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteHTMLTracezSpans writes HTML to w containing locally-sampled trace spans.
|
||||
//
|
||||
// It includes neither a header nor footer, so you can embed this data in other pages.
|
||||
func WriteHTMLTracezSpans(w io.Writer, spanName string, spanType, spanSubtype int) {
|
||||
if spanName == "" {
|
||||
return
|
||||
}
|
||||
if err := tracesTableTemplate.Execute(w, traceDataFromSpans(spanName, traceSpans(spanName, spanType, spanSubtype))); err != nil {
|
||||
log.Printf("zpages: executing template: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteTextTracezSpans writes formatted text to w containing locally-sampled trace spans.
|
||||
func WriteTextTracezSpans(w io.Writer, spanName string, spanType, spanSubtype int) {
|
||||
spans := traceSpans(spanName, spanType, spanSubtype)
|
||||
data := traceDataFromSpans(spanName, spans)
|
||||
writeTextTraces(w, data)
|
||||
}
|
||||
|
||||
// WriteTextTracezSummary writes formatted text to w containing a summary of locally-sampled trace spans.
|
||||
func WriteTextTracezSummary(w io.Writer) {
|
||||
w.Write([]byte("Locally sampled spans summary\n\n"))
|
||||
|
||||
data := getSummaryPageData()
|
||||
if len(data.Rows) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
tw := tabwriter.NewWriter(w, 8, 8, 1, ' ', 0)
|
||||
|
||||
for i, s := range data.Header {
|
||||
if i != 0 {
|
||||
tw.Write([]byte("\t"))
|
||||
}
|
||||
tw.Write([]byte(s))
|
||||
}
|
||||
tw.Write([]byte("\n"))
|
||||
|
||||
put := func(x int) {
|
||||
if x == 0 {
|
||||
tw.Write([]byte(".\t"))
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(tw, "%d\t", x)
|
||||
}
|
||||
for _, r := range data.Rows {
|
||||
tw.Write([]byte(r.Name))
|
||||
tw.Write([]byte("\t"))
|
||||
put(r.Active)
|
||||
for _, l := range r.Latency {
|
||||
put(l)
|
||||
}
|
||||
put(r.Errors)
|
||||
tw.Write([]byte("\n"))
|
||||
}
|
||||
tw.Flush()
|
||||
}
|
||||
|
||||
// traceData contains data for the trace data template.
|
||||
type traceData struct {
|
||||
Name string
|
||||
Num int
|
||||
Rows []traceRow
|
||||
}
|
||||
|
||||
type traceRow struct {
|
||||
Fields [3]string
|
||||
trace.SpanContext
|
||||
ParentSpanID trace.SpanID
|
||||
}
|
||||
|
||||
type events []interface{}
|
||||
|
||||
func (e events) Len() int { return len(e) }
|
||||
func (e events) Less(i, j int) bool {
|
||||
var ti time.Time
|
||||
switch x := e[i].(type) {
|
||||
case *trace.Annotation:
|
||||
ti = x.Time
|
||||
case *trace.MessageEvent:
|
||||
ti = x.Time
|
||||
}
|
||||
switch x := e[j].(type) {
|
||||
case *trace.Annotation:
|
||||
return ti.Before(x.Time)
|
||||
case *trace.MessageEvent:
|
||||
return ti.Before(x.Time)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (e events) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
|
||||
|
||||
func traceRows(s *trace.SpanData) []traceRow {
|
||||
start := s.StartTime
|
||||
|
||||
lasty, lastm, lastd := start.Date()
|
||||
wholeTime := func(t time.Time) string {
|
||||
return t.Format("2006/01/02-15:04:05") + fmt.Sprintf(".%06d", t.Nanosecond()/1000)
|
||||
}
|
||||
formatTime := func(t time.Time) string {
|
||||
y, m, d := t.Date()
|
||||
if y == lasty && m == lastm && d == lastd {
|
||||
return t.Format(" 15:04:05") + fmt.Sprintf(".%06d", t.Nanosecond()/1000)
|
||||
}
|
||||
lasty, lastm, lastd = y, m, d
|
||||
return wholeTime(t)
|
||||
}
|
||||
|
||||
lastTime := start
|
||||
formatElapsed := func(t time.Time) string {
|
||||
d := t.Sub(lastTime)
|
||||
lastTime = t
|
||||
u := int64(d / 1000)
|
||||
// There are five cases for duration printing:
|
||||
// -1234567890s
|
||||
// -1234.123456
|
||||
// .123456
|
||||
// 12345.123456
|
||||
// 12345678901s
|
||||
switch {
|
||||
case u < -9999999999:
|
||||
return fmt.Sprintf("%11ds", u/1e6)
|
||||
case u < 0:
|
||||
sec := u / 1e6
|
||||
u -= sec * 1e6
|
||||
return fmt.Sprintf("%5d.%06d", sec, -u)
|
||||
case u < 1e6:
|
||||
return fmt.Sprintf(" .%6d", u)
|
||||
case u <= 99999999999:
|
||||
sec := u / 1e6
|
||||
u -= sec * 1e6
|
||||
return fmt.Sprintf("%5d.%06d", sec, u)
|
||||
default:
|
||||
return fmt.Sprintf("%11ds", u/1e6)
|
||||
}
|
||||
}
|
||||
|
||||
firstRow := traceRow{Fields: [3]string{wholeTime(start), "", ""}, SpanContext: s.SpanContext, ParentSpanID: s.ParentSpanID}
|
||||
if s.EndTime.IsZero() {
|
||||
firstRow.Fields[1] = " "
|
||||
} else {
|
||||
firstRow.Fields[1] = formatElapsed(s.EndTime)
|
||||
lastTime = start
|
||||
}
|
||||
out := []traceRow{firstRow}
|
||||
|
||||
formatAttributes := func(a map[string]interface{}) string {
|
||||
if len(a) == 0 {
|
||||
return ""
|
||||
}
|
||||
var keys []string
|
||||
for key := range a {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
var s []string
|
||||
for _, key := range keys {
|
||||
val := a[key]
|
||||
switch val.(type) {
|
||||
case string:
|
||||
s = append(s, fmt.Sprintf("%s=%q", key, val))
|
||||
default:
|
||||
s = append(s, fmt.Sprintf("%s=%v", key, val))
|
||||
}
|
||||
}
|
||||
return "Attributes:{" + strings.Join(s, ", ") + "}"
|
||||
}
|
||||
|
||||
if s.Status != (trace.Status{}) {
|
||||
msg := fmt.Sprintf("Status{canonicalCode=%s, description=%q}",
|
||||
canonicalCodeString(s.Status.Code), s.Status.Message)
|
||||
out = append(out, traceRow{Fields: [3]string{"", "", msg}})
|
||||
}
|
||||
|
||||
if len(s.Attributes) != 0 {
|
||||
out = append(out, traceRow{Fields: [3]string{"", "", formatAttributes(s.Attributes)}})
|
||||
}
|
||||
|
||||
var es events
|
||||
for i := range s.Annotations {
|
||||
es = append(es, &s.Annotations[i])
|
||||
}
|
||||
for i := range s.MessageEvents {
|
||||
es = append(es, &s.MessageEvents[i])
|
||||
}
|
||||
sort.Sort(es)
|
||||
for _, e := range es {
|
||||
switch e := e.(type) {
|
||||
case *trace.Annotation:
|
||||
msg := e.Message
|
||||
if len(e.Attributes) != 0 {
|
||||
msg = msg + " " + formatAttributes(e.Attributes)
|
||||
}
|
||||
row := traceRow{Fields: [3]string{
|
||||
formatTime(e.Time),
|
||||
formatElapsed(e.Time),
|
||||
msg,
|
||||
}}
|
||||
out = append(out, row)
|
||||
case *trace.MessageEvent:
|
||||
row := traceRow{Fields: [3]string{formatTime(e.Time), formatElapsed(e.Time)}}
|
||||
switch e.EventType {
|
||||
case trace.MessageEventTypeSent:
|
||||
row.Fields[2] = fmt.Sprintf("sent message [%d bytes, %d compressed bytes]", e.UncompressedByteSize, e.CompressedByteSize)
|
||||
case trace.MessageEventTypeRecv:
|
||||
row.Fields[2] = fmt.Sprintf("received message [%d bytes, %d compressed bytes]", e.UncompressedByteSize, e.CompressedByteSize)
|
||||
}
|
||||
out = append(out, row)
|
||||
}
|
||||
}
|
||||
for i := range out {
|
||||
if len(out[i].Fields[2]) > maxTraceMessageLength {
|
||||
out[i].Fields[2] = out[i].Fields[2][:maxTraceMessageLength]
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func traceSpans(spanName string, spanType, spanSubtype int) []*trace.SpanData {
|
||||
internalTrace := internal.Trace.(interface {
|
||||
ReportActiveSpans(name string) []*trace.SpanData
|
||||
ReportSpansByError(name string, code int32) []*trace.SpanData
|
||||
ReportSpansByLatency(name string, minLatency, maxLatency time.Duration) []*trace.SpanData
|
||||
})
|
||||
var spans []*trace.SpanData
|
||||
switch spanType {
|
||||
case 0: // active
|
||||
spans = internalTrace.ReportActiveSpans(spanName)
|
||||
case 1: // latency
|
||||
var min, max time.Duration
|
||||
n := len(defaultLatencies)
|
||||
if spanSubtype == 0 {
|
||||
max = defaultLatencies[0]
|
||||
} else if spanSubtype == n {
|
||||
min, max = defaultLatencies[spanSubtype-1], (1<<63)-1
|
||||
} else if 0 < spanSubtype && spanSubtype < n {
|
||||
min, max = defaultLatencies[spanSubtype-1], defaultLatencies[spanSubtype]
|
||||
}
|
||||
spans = internalTrace.ReportSpansByLatency(spanName, min, max)
|
||||
case 2: // error
|
||||
spans = internalTrace.ReportSpansByError(spanName, 0)
|
||||
}
|
||||
return spans
|
||||
}
|
||||
|
||||
func traceDataFromSpans(name string, spans []*trace.SpanData) traceData {
|
||||
data := traceData{
|
||||
Name: name,
|
||||
Num: len(spans),
|
||||
}
|
||||
for _, s := range spans {
|
||||
data.Rows = append(data.Rows, traceRows(s)...)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func writeTextTraces(w io.Writer, data traceData) {
|
||||
tw := tabwriter.NewWriter(w, 1, 8, 1, ' ', 0)
|
||||
fmt.Fprint(tw, "When\tElapsed(s)\tType\n")
|
||||
for _, r := range data.Rows {
|
||||
tw.Write([]byte(r.Fields[0]))
|
||||
tw.Write([]byte("\t"))
|
||||
tw.Write([]byte(r.Fields[1]))
|
||||
tw.Write([]byte("\t"))
|
||||
tw.Write([]byte(r.Fields[2]))
|
||||
if sc := r.SpanContext; sc != (trace.SpanContext{}) {
|
||||
fmt.Fprintf(tw, "trace_id: %s span_id: %s", sc.TraceID, sc.SpanID)
|
||||
if r.ParentSpanID != (trace.SpanID{}) {
|
||||
fmt.Fprintf(tw, " parent_span_id: %s", r.ParentSpanID)
|
||||
}
|
||||
}
|
||||
tw.Write([]byte("\n"))
|
||||
}
|
||||
tw.Flush()
|
||||
}
|
||||
|
||||
type summaryPageData struct {
|
||||
Header []string
|
||||
LatencyBucketNames []string
|
||||
Links bool
|
||||
TracesEndpoint string
|
||||
Rows []summaryPageRow
|
||||
}
|
||||
|
||||
type summaryPageRow struct {
|
||||
Name string
|
||||
Active int
|
||||
Latency []int
|
||||
Errors int
|
||||
}
|
||||
|
||||
func getSummaryPageData() summaryPageData {
|
||||
data := summaryPageData{
|
||||
Links: true,
|
||||
TracesEndpoint: "tracez",
|
||||
}
|
||||
internalTrace := internal.Trace.(interface {
|
||||
ReportSpansPerMethod() map[string]internal.PerMethodSummary
|
||||
})
|
||||
for name, s := range internalTrace.ReportSpansPerMethod() {
|
||||
if len(data.Header) == 0 {
|
||||
data.Header = []string{"Name", "Active"}
|
||||
for _, b := range s.LatencyBuckets {
|
||||
l := b.MinLatency
|
||||
s := fmt.Sprintf(">%v", l)
|
||||
if l == 100*time.Second {
|
||||
s = ">100s"
|
||||
}
|
||||
data.Header = append(data.Header, s)
|
||||
data.LatencyBucketNames = append(data.LatencyBucketNames, s)
|
||||
}
|
||||
data.Header = append(data.Header, "Errors")
|
||||
}
|
||||
row := summaryPageRow{Name: name, Active: s.Active}
|
||||
for _, l := range s.LatencyBuckets {
|
||||
row.Latency = append(row.Latency, l.Size)
|
||||
}
|
||||
for _, e := range s.ErrorBuckets {
|
||||
row.Errors += e.Size
|
||||
}
|
||||
data.Rows = append(data.Rows, row)
|
||||
}
|
||||
sort.Slice(data.Rows, func(i, j int) bool {
|
||||
return data.Rows[i].Name < data.Rows[j].Name
|
||||
})
|
||||
return data
|
||||
}
|
||||
70
vendor/go.opencensus.io/zpages/zpages.go
generated
vendored
Normal file
70
vendor/go.opencensus.io/zpages/zpages.go
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
// Copyright 2017, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
// Package zpages implements a collection of HTML pages that display RPC stats
|
||||
// and trace data, and also functions to write that same data in plain text to
|
||||
// an io.Writer.
|
||||
//
|
||||
// Users can also embed the HTML for stats and traces in custom status pages.
|
||||
//
|
||||
// zpages are currrently work-in-process and cannot display minutely and
|
||||
// hourly stats correctly.
|
||||
//
|
||||
// Performance
|
||||
//
|
||||
// Installing the zpages has a performance overhead because additional traces
|
||||
// and stats will be collected in-process. In most cases, we expect this
|
||||
// overhead will not be significant but it depends on many factors, including
|
||||
// how many spans your process creates and how richly annotated they are.
|
||||
package zpages // import "go.opencensus.io/zpages"
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"go.opencensus.io/internal"
|
||||
)
|
||||
|
||||
// TODO(ramonza): Remove Handler to make initialization lazy.
|
||||
|
||||
// Handler is deprecated: Use Handle.
|
||||
var Handler http.Handler
|
||||
|
||||
func init() {
|
||||
mux := http.NewServeMux()
|
||||
Handle(mux, "/")
|
||||
Handler = mux
|
||||
}
|
||||
|
||||
// Handle adds the z-pages to the given ServeMux rooted at pathPrefix.
|
||||
func Handle(mux *http.ServeMux, pathPrefix string) {
|
||||
enable()
|
||||
if mux == nil {
|
||||
mux = http.DefaultServeMux
|
||||
}
|
||||
mux.HandleFunc(path.Join(pathPrefix, "rpcz"), rpczHandler)
|
||||
mux.HandleFunc(path.Join(pathPrefix, "tracez"), tracezHandler)
|
||||
mux.Handle(path.Join(pathPrefix, "public/"), http.FileServer(fs))
|
||||
}
|
||||
|
||||
var enableOnce sync.Once
|
||||
|
||||
func enable() {
|
||||
enableOnce.Do(func() {
|
||||
internal.LocalSpanStoreEnabled = true
|
||||
registerRPCViews()
|
||||
})
|
||||
}
|
||||
Reference in New Issue
Block a user