From 135332e65ceb36a4c4c491107e09173c7f9695ab Mon Sep 17 00:00:00 2001 From: Alexey Palazhchenko Date: Fri, 5 Jan 2018 09:37:08 +0300 Subject: [PATCH 1/6] Bump Go version. --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 4f177c522..cd27a3120 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,7 @@ sudo: required language: go go: -- 1.8.x +- 1.9.x - tip env: From eae1968ef8cd946eb82e6821d05b1dec067fe3a0 Mon Sep 17 00:00:00 2001 From: Alexey Palazhchenko Date: Fri, 5 Jan 2018 09:41:36 +0300 Subject: [PATCH 2/6] PMM-1485 Remove MyRocks/RocksDB collectors. --- collector/engine_rocksdb.go | 174 ------------------ collector/engine_rocksdb_test.go | 73 -------- collector/info_schema_rocksdb_cfstats.go | 40 ---- collector/info_schema_rocksdb_cfstats_test.go | 48 ----- collector/info_schema_rocksdb_dbstats.go | 36 ---- collector/info_schema_rocksdb_dbstats_test.go | 47 ----- mysqld_exporter.go | 55 ------ 7 files changed, 473 deletions(-) delete mode 100644 collector/engine_rocksdb.go delete mode 100644 collector/engine_rocksdb_test.go delete mode 100644 collector/info_schema_rocksdb_cfstats.go delete mode 100644 collector/info_schema_rocksdb_cfstats_test.go delete mode 100644 collector/info_schema_rocksdb_dbstats.go delete mode 100644 collector/info_schema_rocksdb_dbstats_test.go diff --git a/collector/engine_rocksdb.go b/collector/engine_rocksdb.go deleted file mode 100644 index e9c9a3060..000000000 --- a/collector/engine_rocksdb.go +++ /dev/null @@ -1,174 +0,0 @@ -// Scrape `SHOW ENGINE ROCKSDB STATUS`. - -package collector - -import ( - "bufio" - "database/sql" - "regexp" - "strconv" - "strings" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" -) - -const ( - // Subsystem. - rocksDB = "engine_rocksdb" - // Query. - engineRocksDBStatusQuery = `SHOW ENGINE ROCKSDB STATUS` -) - -var ( - rocksDBCounter = regexp.MustCompile(`^([a-z\.]+) COUNT : (\d+)$`) - - // Map known SHOW ENGINE ROCKSDB STATUS counters to the metric descriptions. - // Unknown counters will be skipped. - engineRocksDBStatusTypes = map[string]*prometheus.Desc{ - "rocksdb.block.cache.miss": newDesc(rocksDB, "block_cache_miss", "rocksdb.block.cache.miss"), - "rocksdb.block.cache.hit": newDesc(rocksDB, "block_cache_hit", "rocksdb.block.cache.hit"), - "rocksdb.block.cache.add": newDesc(rocksDB, "block_cache_add", "rocksdb.block.cache.add"), - "rocksdb.block.cache.add.failures": newDesc(rocksDB, "block_cache_add_failures", "rocksdb.block.cache.add.failures"), // not present in SHOW GLOBAL STATUS - - "rocksdb.block.cache.data.miss": newDesc(rocksDB, "block_cache_data_miss", "rocksdb.block.cache.data.miss"), - "rocksdb.block.cache.data.hit": newDesc(rocksDB, "block_cache_data_hit", "rocksdb.block.cache.data.hit"), - "rocksdb.block.cache.data.add": newDesc(rocksDB, "block_cache_data_add", "rocksdb.block.cache.data.add"), // not present in SHOW GLOBAL STATUS - - "rocksdb.block.cache.index.miss": newDesc(rocksDB, "block_cache_index_miss", "rocksdb.block.cache.index.miss"), - "rocksdb.block.cache.index.hit": newDesc(rocksDB, "block_cache_index_hit", "rocksdb.block.cache.index.hit"), - "rocksdb.block.cache.index.add": newDesc(rocksDB, "block_cache_index_add", "rocksdb.block.cache.index.add"), // not present in SHOW GLOBAL STATUS - - "rocksdb.block.cache.index.bytes.insert": newDesc(rocksDB, "block_cache_index_bytes_insert", "rocksdb.block.cache.index.bytes.insert"), // not present in SHOW GLOBAL STATUS - "rocksdb.block.cache.index.bytes.evict": newDesc(rocksDB, "block_cache_index_bytes_evict", "rocksdb.block.cache.index.bytes.evict"), // not present in SHOW GLOBAL STATUS - - "rocksdb.block.cache.filter.miss": newDesc(rocksDB, "block_cache_filter_miss", "rocksdb.block.cache.filter.miss"), - "rocksdb.block.cache.filter.hit": newDesc(rocksDB, "block_cache_filter_hit", "rocksdb.block.cache.filter.hit"), - "rocksdb.block.cache.filter.add": newDesc(rocksDB, "block_cache_filter_add", "rocksdb.block.cache.filter.add"), // not present in SHOW GLOBAL STATUS - - "rocksdb.block.cache.filter.bytes.insert": newDesc(rocksDB, "block_cache_filter_bytes_insert", "rocksdb.block.cache.filter.bytes.insert"), // not present in SHOW GLOBAL STATUS - "rocksdb.block.cache.filter.bytes.evict": newDesc(rocksDB, "block_cache_filter_bytes_evict", "rocksdb.block.cache.filter.bytes.evict"), // not present in SHOW GLOBAL STATUS - - "rocksdb.block.cache.bytes.read": newDesc(rocksDB, "block_cache_bytes_read", "rocksdb.block.cache.bytes.read"), // not present in SHOW GLOBAL STATUS - "rocksdb.block.cache.bytes.write": newDesc(rocksDB, "block_cache_bytes_write", "rocksdb.block.cache.bytes.write"), // not present in SHOW GLOBAL STATUS - - "rocksdb.block.cache.data.bytes.insert": newDesc(rocksDB, "block_cache_data_bytes_insert", "rocksdb.block.cache.data.bytes.insert"), // not present in SHOW GLOBAL STATUS - - "rocksdb.bloom.filter.useful": newDesc(rocksDB, "bloom_filter_useful", "rocksdb.bloom.filter.useful"), - - "rocksdb.memtable.miss": newDesc(rocksDB, "memtable_miss", "rocksdb.memtable.miss"), - "rocksdb.memtable.hit": newDesc(rocksDB, "memtable_hit", "rocksdb.memtable.hit"), - - "rocksdb.l0.hit": newDesc(rocksDB, "l0_hit", "rocksdb.l0.hit"), // not present in SHOW GLOBAL STATUS - "rocksdb.l1.hit": newDesc(rocksDB, "l1_hit", "rocksdb.l1.hit"), // not present in SHOW GLOBAL STATUS - - "rocksdb.number.keys.read": newDesc(rocksDB, "number_keys_read", "rocksdb.number.keys.read"), - "rocksdb.number.keys.written": newDesc(rocksDB, "number_keys_written", "rocksdb.number.keys.written"), - "rocksdb.number.keys.updated": newDesc(rocksDB, "number_keys_updated", "rocksdb.number.keys.updated"), - - "rocksdb.bytes.read": newDesc(rocksDB, "bytes_read", "rocksdb.bytes.read"), - "rocksdb.bytes.written": newDesc(rocksDB, "bytes_written", "rocksdb.bytes.written"), - - "rocksdb.number.db.seek": newDesc(rocksDB, "number_db_seek", "rocksdb.number.db.seek"), // not present in SHOW GLOBAL STATUS - "rocksdb.number.db.seek.found": newDesc(rocksDB, "number_db_seek_found", "rocksdb.number.db.seek.found"), // not present in SHOW GLOBAL STATUS - "rocksdb.number.db.next": newDesc(rocksDB, "number_db_next", "rocksdb.number.db.next"), // not present in SHOW GLOBAL STATUS - "rocksdb.number.db.next.found": newDesc(rocksDB, "number_db_next_found", "rocksdb.number.db.next.found"), // not present in SHOW GLOBAL STATUS - "rocksdb.number.db.prev": newDesc(rocksDB, "number_db_prev", "rocksdb.number.db.prev"), // not present in SHOW GLOBAL STATUS - "rocksdb.number.db.prev.found": newDesc(rocksDB, "number_db_prev_found", "rocksdb.number.db.prev.found"), // not present in SHOW GLOBAL STATUS - - "rocksdb.db.iter.bytes.read": newDesc(rocksDB, "db_iter_bytes_read", "rocksdb.db.iter.bytes.read"), // not present in SHOW GLOBAL STATUS - "rocksdb.number.reseeks.iteration": newDesc(rocksDB, "number_reseeks_iteration", "rocksdb.number.reseeks.iteration"), - - "rocksdb.wal.synced": newDesc(rocksDB, "wal_synced", "rocksdb.wal.synced"), - "rocksdb.wal.bytes": newDesc(rocksDB, "wal_bytes", "rocksdb.wal.bytes"), - - "rocksdb.no.file.opens": newDesc(rocksDB, "no_file_opens", "rocksdb.no.file.opens"), - "rocksdb.no.file.closes": newDesc(rocksDB, "no_file_closes", "rocksdb.no.file.closes"), - "rocksdb.no.file.errors": newDesc(rocksDB, "no_file_errors", "rocksdb.no.file.errors"), - - "rocksdb.write.self": newDesc(rocksDB, "write_self", "rocksdb.write.self"), - "rocksdb.write.other": newDesc(rocksDB, "write_other", "rocksdb.write.other"), - "rocksdb.write.timeout": newDesc(rocksDB, "write_timeout", "rocksdb.write.timeout"), // called `rocksdb_write_timedout` in SHOW GLOBAL STATUS - "rocksdb.write.wal": newDesc(rocksDB, "write_wal", "rocksdb.write.wal"), - } -) - -func parseRocksDBStatistics(data string) ([]prometheus.Metric, error) { - var metrics []prometheus.Metric - scanner := bufio.NewScanner(strings.NewReader(data)) - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - if len(line) == 0 { - continue - } - m := rocksDBCounter.FindStringSubmatch(line) - if len(m) == 0 { - continue - } - - value, err := strconv.Atoi(m[2]) - if err != nil { - log.Warnf("failed to parse: %s", scanner.Text()) - continue - } - description := engineRocksDBStatusTypes[m[1]] - if description == nil { - continue - } - metrics = append(metrics, prometheus.MustNewConstMetric(description, prometheus.CounterValue, float64(value))) - } - return metrics, scanner.Err() -} - -// ScrapeEngineRocksDBStatus scrapes from `SHOW ENGINE ROCKSDB STATUS`. -func ScrapeEngineRocksDBStatus(db *sql.DB, ch chan<- prometheus.Metric) error { - rows, err := db.Query(engineRocksDBStatusQuery) - if err != nil { - return err - } - defer rows.Close() - - var typeCol, nameCol, statusCol string - for rows.Next() { - if err := rows.Scan(&typeCol, &nameCol, &statusCol); err != nil { - return err - } - - if typeCol == "STATISTICS" && nameCol == "rocksdb" { - metrics, err := parseRocksDBStatistics(statusCol) - for _, m := range metrics { - ch <- m - } - if err != nil { - return err - } - } - } - return rows.Err() -} - -// RocksDBEnabled returns true if RocksDB is enabled, false otherwise. -func RocksDBEnabled(db *sql.DB) (bool, error) { - rows, err := db.Query("SHOW ENGINES") - if err != nil { - return false, err - } - defer rows.Close() - - var engine, support string - var dummy interface{} - for rows.Next() { - if err = rows.Scan(&engine, &support, &dummy, &dummy, &dummy, &dummy); err != nil { - return false, err - } - - if engine != "ROCKSDB" { - continue - } - return support == "YES" || support == "DEFAULT", nil - } - if err = rows.Err(); err != nil { - return false, err - } - return false, nil -} diff --git a/collector/engine_rocksdb_test.go b/collector/engine_rocksdb_test.go deleted file mode 100644 index c2fe546f8..000000000 --- a/collector/engine_rocksdb_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package collector - -import ( - "database/sql" - "testing" - "time" - - _ "github.com/go-sql-driver/mysql" // register driver - "github.com/percona/exporter_shared/helpers" - "github.com/prometheus/client_golang/prometheus" - "github.com/smartystreets/goconvey/convey" -) - -// getDB waits until MySQL is up and returns opened connection. -func getDB(t testing.TB) *sql.DB { - var db *sql.DB - var err error - for i := 0; i < 20; i++ { - db, err = sql.Open("mysql", "root@/mysql") - if err == nil { - err = db.Ping() - } - if err == nil { - return db - } - t.Log(err) - time.Sleep(time.Second) - } - t.Fatalf("Failed to get database connection: %s", err) - panic("not reached") -} - -func TestScrapeEngineRocksDBStatus(t *testing.T) { - if testing.Short() { - t.Skip("-short is passed, skipping test") - } - - db := getDB(t) - enabled, err := RocksDBEnabled(db) - if err != nil { - t.Fatal(err) - } - if !enabled { - t.Skip("RocksDB is not enabled, skipping test") - } - - convey.Convey("Metrics collection", t, convey.FailureContinues, func() { - ch := make(chan prometheus.Metric) - go func() { - err := ScrapeEngineRocksDBStatus(db, ch) - if err != nil { - t.Error(err) - } - close(ch) - }() - - // check that we found all metrics we expect - expected := make(map[string]struct{}) - for k := range engineRocksDBStatusTypes { - expected[k] = struct{}{} - } - for m := range ch { - got := helpers.ReadMetric(m) - convey.So(expected, convey.ShouldContainKey, got.Help) - delete(expected, got.Help) - } - // two exceptions - convey.So(expected, convey.ShouldResemble, map[string]struct{}{ - "rocksdb.l0.hit": struct{}{}, - "rocksdb.l1.hit": struct{}{}, - }) - }) -} diff --git a/collector/info_schema_rocksdb_cfstats.go b/collector/info_schema_rocksdb_cfstats.go deleted file mode 100644 index c8c9cf4c1..000000000 --- a/collector/info_schema_rocksdb_cfstats.go +++ /dev/null @@ -1,40 +0,0 @@ -// Scrape `information_schema.rocksdb_cfstats`. - -package collector - -import ( - "database/sql" - "strings" - - "github.com/prometheus/client_golang/prometheus" -) - -const infoSchemaRocksDBCFStatsQuery = `SELECT cf_name, stat_type, value FROM information_schema.rocksdb_cfstats` - -// ScrapeRocksDBCFStats collects from `information_schema.rocksdb_cfstats`. -func ScrapeRocksDBCFStats(db *sql.DB, ch chan<- prometheus.Metric) error { - rows, err := db.Query(infoSchemaRocksDBCFStatsQuery) - if err != nil { - return err - } - defer rows.Close() - - var nameCol, typeCol string - var valueCol int64 - for rows.Next() { - if err = rows.Scan(&nameCol, &typeCol, &valueCol); err != nil { - return err - } - - ch <- prometheus.MustNewConstMetric( - prometheus.NewDesc( - prometheus.BuildFQName(namespace, "rocksdb_cfstats", strings.ToLower(typeCol)), - typeCol, []string{"name"}, nil, - ), - prometheus.UntypedValue, - float64(valueCol), - nameCol, - ) - } - return rows.Err() -} diff --git a/collector/info_schema_rocksdb_cfstats_test.go b/collector/info_schema_rocksdb_cfstats_test.go deleted file mode 100644 index 10243179b..000000000 --- a/collector/info_schema_rocksdb_cfstats_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package collector - -import ( - "testing" - - "github.com/percona/exporter_shared/helpers" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/smartystreets/goconvey/convey" -) - -func TestScrapeRocksDBCFStats(t *testing.T) { - if testing.Short() { - t.Skip("-short is passed, skipping test") - } - - db := getDB(t) - enabled, err := RocksDBEnabled(db) - if err != nil { - t.Fatal(err) - } - if !enabled { - t.Skip("RocksDB is not enabled, skipping test") - } - - convey.Convey("Metrics collection", t, func() { - ch := make(chan prometheus.Metric) - go func() { - err := ScrapeRocksDBCFStats(db, ch) - if err != nil { - t.Error(err) - } - close(ch) - }() - - var found int - for m := range ch { - got := helpers.ReadMetric(m) - if got.Name == "mysql_rocksdb_cfstats_cur_size_all_mem_tables" { - convey.So(got.Type, convey.ShouldEqual, dto.MetricType_UNTYPED) - convey.So(got.Value, convey.ShouldBeGreaterThan, 0) - convey.So(got.Labels, convey.ShouldContainKey, "name") - found += 1 - } - } - convey.So(found, convey.ShouldEqual, 2) - }) -} diff --git a/collector/info_schema_rocksdb_dbstats.go b/collector/info_schema_rocksdb_dbstats.go deleted file mode 100644 index 36f69d6ca..000000000 --- a/collector/info_schema_rocksdb_dbstats.go +++ /dev/null @@ -1,36 +0,0 @@ -// Scrape `information_schema.rocksdb_dbstats`. - -package collector - -import ( - "database/sql" - "strings" - - "github.com/prometheus/client_golang/prometheus" -) - -const infoSchemaRocksDBDBStatsQuery = `SELECT stat_type, value FROM information_schema.rocksdb_dbstats` - -// ScrapeRocksDBDBStats collects from `information_schema.rocksdb_dbstats`. -func ScrapeRocksDBDBStats(db *sql.DB, ch chan<- prometheus.Metric) error { - rows, err := db.Query(infoSchemaRocksDBDBStatsQuery) - if err != nil { - return err - } - defer rows.Close() - - var typeCol string - var valueCol int64 - for rows.Next() { - if err = rows.Scan(&typeCol, &valueCol); err != nil { - return err - } - - ch <- prometheus.MustNewConstMetric( - newDesc("rocksdb_dbstats", strings.ToLower(typeCol), typeCol), - prometheus.UntypedValue, - float64(valueCol), - ) - } - return rows.Err() -} diff --git a/collector/info_schema_rocksdb_dbstats_test.go b/collector/info_schema_rocksdb_dbstats_test.go deleted file mode 100644 index f80d46887..000000000 --- a/collector/info_schema_rocksdb_dbstats_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package collector - -import ( - "testing" - - "github.com/percona/exporter_shared/helpers" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/smartystreets/goconvey/convey" -) - -func TestScrapeRocksDBDBStats(t *testing.T) { - if testing.Short() { - t.Skip("-short is passed, skipping test") - } - - db := getDB(t) - enabled, err := RocksDBEnabled(db) - if err != nil { - t.Fatal(err) - } - if !enabled { - t.Skip("RocksDB is not enabled, skipping test") - } - - convey.Convey("Metrics collection", t, func() { - ch := make(chan prometheus.Metric) - go func() { - err := ScrapeRocksDBDBStats(db, ch) - if err != nil { - t.Error(err) - } - close(ch) - }() - - var found int - for m := range ch { - got := helpers.ReadMetric(m) - if got.Name == "mysql_rocksdb_dbstats_db_block_cache_usage" { - convey.So(got.Type, convey.ShouldEqual, dto.MetricType_UNTYPED) - convey.So(got.Value, convey.ShouldBeGreaterThan, 0) - found++ - } - } - convey.So(found, convey.ShouldEqual, 1) - }) -} diff --git a/mysqld_exporter.go b/mysqld_exporter.go index 657f124d2..e4e3aa0e1 100644 --- a/mysqld_exporter.go +++ b/mysqld_exporter.go @@ -135,30 +135,12 @@ var ( collectQueryResponseTime = flag.Bool("collect.info_schema.query_response_time", false, "Collect query response time distribution if query_response_time_stats is ON.", ) - - // Those MyRocks collectors work but are not currently used by our dashboard, so disable them. - // TODO enable when we need them - removeMeFalse = false - collectRocksDBCFStats = &removeMeFalse - collectRocksDBDBStats = &removeMeFalse - /* - collectRocksDBCFStats = flag.Bool("collect.info_schema.rocksdb_cfstats", false, - "Collect RocksDB column family statistics", - ) - collectRocksDBDBStats = flag.Bool("collect.info_schema.rocksdb_dbstats", false, - "Collect RocksDB database statistics", - ) - */ - collectEngineTokudbStatus = flag.Bool("collect.engine_tokudb_status", false, "Collect from SHOW ENGINE TOKUDB STATUS", ) collectEngineInnodbStatus = flag.Bool("collect.engine_innodb_status", false, "Collect from SHOW ENGINE INNODB STATUS", ) - collectEngineRocksDBStatus = flag.String("collect.engine_rocksdb_status", "auto", - "Collect from SHOW ENGINE ROCKSDB STATUS", - ) collectHeartbeat = flag.Bool( "collect.heartbeat", false, "Collect from heartbeat", @@ -494,19 +476,6 @@ func (e *ExporterMr) scrape(ch chan<- prometheus.Metric) { } versionNum := getMySQLVersion(db) - var rocksDBEnabled bool - if *collectEngineRocksDBStatus == "auto" { - rocksDBEnabled, err = collector.RocksDBEnabled(db) - if err != nil { - log.Warnln("Error to detect MyRocks support:", err) - } - } else { - rocksDBEnabled, err = strconv.ParseBool(*collectEngineRocksDBStatus) - if err != nil { - log.Warnln("Failed to parse -collect.engine_rocksdb_status, assuming false:", err) - } - } - if *slowLogFilter { sessionSettingsRows, err := db.Query(sessionSettingsQuery) if err != nil { @@ -566,22 +535,6 @@ func (e *ExporterMr) scrape(ch chan<- prometheus.Metric) { } ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.query_response_time") } - if *collectRocksDBCFStats { - scrapeTime = time.Now() - if err = collector.ScrapeRocksDBCFStats(db, ch); err != nil { - log.Errorln("Error scraping for collect.info_schema.rocksdb_cfstats:", err) - e.scrapeErrors.WithLabelValues("collect.info_schema.rocksdb_cfstats").Inc() - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.rocksdb_cfstats") - } - if *collectRocksDBDBStats { - scrapeTime = time.Now() - if err = collector.ScrapeRocksDBDBStats(db, ch); err != nil { - log.Errorln("Error scraping for collect.info_schema.rocksdb_dbstats:", err) - e.scrapeErrors.WithLabelValues("collect.info_schema.rocksdb_dbstats").Inc() - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.info_schema.rocksdb_dbstats") - } if *collectEngineInnodbStatus { scrapeTime = time.Now() if err = collector.ScrapeEngineInnodbStatus(db, ch); err != nil { @@ -590,14 +543,6 @@ func (e *ExporterMr) scrape(ch chan<- prometheus.Metric) { } ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.engine_innodb_status") } - if rocksDBEnabled { - scrapeTime = time.Now() - if err = collector.ScrapeEngineRocksDBStatus(db, ch); err != nil { - log.Errorln("Error scraping for collect.engine_rocksdb_status:", err) - e.scrapeErrors.WithLabelValues("collect.engine_rocksdb_status").Inc() - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "collect.engine_rocksdb_status") - } } func (e *ExporterLr) scrape(ch chan<- prometheus.Metric) { From 08eb9b7baef9b33ce8449fd1d5de045786006402 Mon Sep 17 00:00:00 2001 From: Alexey Palazhchenko Date: Fri, 5 Jan 2018 09:45:12 +0300 Subject: [PATCH 3/6] PMM-1485 Apply gofmt. --- mysqld_exporter_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mysqld_exporter_test.go b/mysqld_exporter_test.go index 8911ec33c..5c9ccd7bd 100644 --- a/mysqld_exporter_test.go +++ b/mysqld_exporter_test.go @@ -6,18 +6,18 @@ import ( "context" "fmt" "io/ioutil" + "net" "net/http" + "net/url" "os" "os/exec" "reflect" "regexp" "runtime" "strings" + "syscall" "testing" "time" - "net/url" - "net" - "syscall" "github.com/smartystreets/goconvey/convey" "gopkg.in/DATA-DOG/go-sqlmock.v1" @@ -283,7 +283,7 @@ func testLandingPage(t *testing.T, data binData) { // Get the main page, but we need to wait a bit for http server var resp *http.Response var err error - for i:=0; i<= 10; i++ { + for i := 0; i <= 10; i++ { // Try to get main page resp, err = http.Get("http://127.0.0.1:9104") if err == nil { From 3629c34fedb91abb5c76a658aa80472c45509f06 Mon Sep 17 00:00:00 2001 From: Alexey Palazhchenko Date: Fri, 5 Jan 2018 13:47:05 +0300 Subject: [PATCH 4/6] PMM-1485 Add help text for known variables. --- collector/global_variables.go | 99 +++++++++++++++++++++++++++++++++-- 1 file changed, 96 insertions(+), 3 deletions(-) diff --git a/collector/global_variables.go b/collector/global_variables.go index c331c3678..8f710d19e 100644 --- a/collector/global_variables.go +++ b/collector/global_variables.go @@ -18,6 +18,92 @@ const ( globalVariablesQuery = `SHOW GLOBAL VARIABLES` ) +var ( + // Map known global variables to help strings. Unknown will be mapped to generic gauges. + globalVariablesHelp = map[string]string{ + // https://github.com/facebook/mysql-5.6/wiki/New-MySQL-RocksDB-Server-Variables + "rocksdb_access_hint_on_compaction_start": "File access pattern once a compaction is started, applied to all input files of a compaction.", + "rocksdb_advise_random_on_open": "Hint of random access to the filesystem when a data file is opened.", + "rocksdb_allow_concurrent_memtable_write": "Allow multi-writers to update memtables in parallel.", + "rocksdb_allow_mmap_reads": "Allow the OS to mmap a data file for reads.", + "rocksdb_allow_mmap_writes": "Allow the OS to mmap a data file for writes.", + "rocksdb_block_cache_size": "Size of the LRU block cache in RocksDB. This memory is reserved for the block cache, which is in addition to any filesystem caching that may occur.", + "rocksdb_block_restart_interval": "Number of keys for each set of delta encoded data.", + "rocksdb_block_size_deviation": "If the percentage of free space in the current data block (size specified in rocksdb-block-size) is less than this amount, close the block (and write record to new block).", + "rocksdb_block_size": "Size of the data block for reading sst files.", + "rocksdb_bulk_load_size": "Sets the number of keys to accumulate before committing them to the storage engine during bulk loading.", + "rocksdb_bulk_load": "When set, MyRocks will ignore checking keys for uniqueness or acquiring locks during transactions. This option should only be used when the application is certain there are no row conflicts, such as when setting up a new MyRocks instance from an existing MySQL dump.", + "rocksdb_bytes_per_sync": "Enables the OS to sync out file writes as data files are created.", + "rocksdb_cache_index_and_filter_blocks": "Requests RocksDB to use the block cache for caching the index and bloomfilter data blocks from each data file. If this is not set, RocksDB will allocate additional memory to maintain these data blocks.", + "rocksdb_checksums_pct": "Sets the percentage of rows to calculate and set MyRocks checksums.", + "rocksdb_collect_sst_properties": "Enables collecting statistics of each data file for improving optimizer behavior.", + "rocksdb_commit_in_the_middle": "Commit rows implicitly every rocksdb-bulk-load-size, during bulk load/insert/update/deletes.", + "rocksdb_compaction_readahead_size": "When non-zero, bigger reads are performed during compaction. Useful if running RocksDB on spinning disks, compaction will do sequential instead of random reads.", + "rocksdb_compaction_sequential_deletes_count_sd": "If enabled, factor in single deletes as part of rocksdb-compaction-sequential-deletes.", + "rocksdb_compaction_sequential_deletes_file_size": "Threshold to trigger compaction if the number of sequential keys that are all delete markers exceed this value. While this compaction helps reduce request latency by removing delete markers, it can increase write rates of RocksDB.", + "rocksdb_compaction_sequential_deletes_window": "Threshold to trigger compaction if, within a sliding window of keys, there exists this parameter's number of delete marker.", + "rocksdb_compaction_sequential_deletes": "Enables triggering of compaction when the number of delete markers in a data file exceeds a certain threshold. Depending on workload patterns, RocksDB can potentially maintain large numbers of delete markers and increase latency of all queries.", + "rocksdb_create_if_missing": "Allows creating the RocksDB database if it does not exist.", + "rocksdb_create_missing_column_families": "Allows creating new column families if they did not exist.", + "rocksdb_db_write_buffer_size": "Size of the memtable used to store writes within RocksDB. This is the size per column family. Once this size is reached, a flush of the memtable to persistent media occurs.", + "rocksdb_deadlock_detect": "Enables deadlock detection in RocksDB.", + "rocksdb_debug_optimizer_no_zero_cardinality": "Test only to prevent MyRocks from calculating cardinality.", + "rocksdb_delayed_write_rate": "When RocksDB hits the soft limits/thresholds for writes, such as soft_pending_compaction_bytes_limit being hit, or level0_slowdown_writes_trigger being hit, RocksDB will slow the write rate down to the value of this parameter as bytes/second.", + "rocksdb_delete_obsolete_files_period_micros": "The periodicity of when obsolete files get deleted, but does not affect files removed through compaction.", + "rocksdb_enable_bulk_load_api": "Enables using the SSTFileWriter feature in RocksDB, which bypasses the memtable, but this requires keys to be inserted into the table in either ascending or descending order. If disabled, bulk loading uses the normal write path via the memtable and does not keys to be inserted in any order.", + "rocksdb_enable_thread_tracking": "Set to allow RocksDB to track the status of threads accessing the database.", + "rocksdb_enable_write_thread_adaptive_yield": "Set to allow RocksDB write batch group leader to wait up to the max time allowed before blocking on a mutex, allowing an increase in throughput for concurrent workloads.", + "rocksdb_error_if_exists": "If set, reports an error if an existing database already exists.", + "rocksdb_flush_log_at_trx_commit": "Sync'ing on transaction commit similar to innodb-flush-log-at-trx-commit: 0 - never sync, 1 - always sync, 2 - sync based on a timer controlled via rocksdb-background-sync", + "rocksdb_flush_memtable_on_analyze": "When analyze table is run, determines of the memtable should be flushed so that data in the memtable is also used for calculating stats.", + "rocksdb_force_compute_memtable_stats": "When enabled, also include data in the memtables for index statistics calculations used by the query optimizer. Greater accuracy, but requires more cpu.", + "rocksdb_force_flush_memtable_now": "Triggers MyRocks to flush the memtables out to the data files.", + "rocksdb_force_index_records_in_range": "When force index is used, a non-zero value here will be used as the number of rows to be returned to the query optimizer when trying to determine the estimated number of rows.", + "rocksdb_hash_index_allow_collision": "Enables RocksDB to allow hashes to collide (uses less memory). Otherwise, the full prefix is stored to prevent hash collisions.", + "rocksdb_keep_log_file_num": "Sets the maximum number of info LOG files to keep around.", + "rocksdb_lock_scanned_rows": "If enabled, rows that are scanned during UPDATE remain locked even if they have not been updated.", + "rocksdb_lock_wait_timeout": "Sets the number of seconds MyRocks will wait to acquire a row lock before aborting the request.", + "rocksdb_log_file_time_to_roll": "Sets the number of seconds a info LOG file captures before rolling to a new LOG file.", + "rocksdb_manifest_preallocation_size": "Sets the number of bytes to preallocate for the MANIFEST file in RocksDB and reduce possible random I/O on XFS. MANIFEST files are used to store information about column families, levels, active files, etc.", + "rocksdb_max_open_files": "Sets a limit on the maximum number of file handles opened by RocksDB.", + "rocksdb_max_row_locks": "Sets a limit on the maximum number of row locks held by a transaction before failing it.", + "rocksdb_max_subcompactions": "For each compaction job, the maximum threads that will work on it simultaneously (i.e. subcompactions). A value of 1 means no subcompactions.", + "rocksdb_max_total_wal_size": "Sets a limit on the maximum size of WAL files kept around. Once this limit is hit, RocksDB will force the flushing of memtables to reduce the size of WAL files.", + "rocksdb_merge_buf_size": "Size (in bytes) of the merge buffers used to accumulate data during secondary key creation. During secondary key creation the data, we avoid updating the new indexes through the memtable and L0 by writing new entries directly to the lowest level in the database. This requires the values to be sorted so we use a merge/sort algorithm. This setting controls how large the merge buffers are. The default is 64Mb.", + "rocksdb_merge_combine_read_size": "Size (in bytes) of the merge combine buffer used in the merge/sort algorithm as described in rocksdb-merge-buf-size.", + "rocksdb_new_table_reader_for_compaction_inputs": "Indicates whether RocksDB should create a new file descriptor and table reader for each compaction input. Doing so may use more memory but may allow pre-fetch options to be specified for compaction input files without impacting table readers used for user queries.", + "rocksdb_no_block_cache": "Disables using the block cache for a column family.", + "rocksdb_paranoid_checks": "Forces RocksDB to re-read a data file that was just created to verify correctness.", + "rocksdb_pause_background_work": "Test only to start and stop all background compactions within RocksDB.", + "rocksdb_perf_context_level": "Sets the level of information to capture via the perf context plugins.", + "rocksdb_persistent_cache_size_mb": "The size (in Mb) to allocate to the RocksDB persistent cache if desired.", + "rocksdb_pin_l0_filter_and_index_blocks_in_cache": "If rocksdb-cache-index-and-filter-blocks is true then this controls whether RocksDB 'pins' the filter and index blocks in the cache.", + "rocksdb_print_snapshot_conflict_queries": "If this is true, MyRocks will log queries that generate snapshot conflicts into the .err log.", + "rocksdb_rate_limiter_bytes_per_sec": "Controls the rate at which RocksDB is allowed to write to media via memtable flushes and compaction.", + "rocksdb_records_in_range": "Test only to override the value returned by records-in-range.", + "rocksdb_seconds_between_stat_computes": "Sets the number of seconds between recomputation of table statistics for the optimizer.", + "rocksdb_signal_drop_index_thread": "Test only to signal the MyRocks drop index thread.", + "rocksdb_skip_bloom_filter_on_read": "Indicates whether the bloom filters should be skipped on reads.", + "rocksdb_skip_fill_cache": "Requests MyRocks to skip caching data on read requests.", + "rocksdb_stats_dump_period_sec": "Sets the number of seconds to perform a RocksDB stats dump to the info LOG files.", + "rocksdb_store_row_debug_checksums": "Include checksums when writing index/table records.", + "rocksdb_strict_collation_check": "Enables MyRocks to check and verify table indexes have the proper collation settings.", + "rocksdb_table_cache_numshardbits": "Sets the number of table caches within RocksDB.", + "rocksdb_use_adaptive_mutex": "Enables adaptive mutexes in RocksDB which spins in user space before resorting to the kernel.", + "rocksdb_use_direct_reads": "Enable direct IO when opening a file for read/write. This means that data will not be cached or buffered.", + "rocksdb_use_fsync": "Requires RocksDB to use fsync instead of fdatasync when requesting a sync of a data file.", + "rocksdb_validate_tables": "Requires MyRocks to verify all of MySQL's .frm files match tables stored in RocksDB.", + "rocksdb_verify_row_debug_checksums": "Verify checksums when reading index/table records.", + "rocksdb_wal_bytes_per_sync": "Controls the rate at which RocksDB writes out WAL file data.", + "rocksdb_wal_recovery_mode": "Sets RocksDB's level of tolerance when recovering the WAL files after a system crash.", + "rocksdb_wal_size_limit_mb": "Maximum size the RocksDB WAL is allow to grow to. When this size is exceeded rocksdb attempts to flush sufficient memtables to allow for the deletion of the oldest log.", + "rocksdb_wal_ttl_seconds": "No WAL file older than this value should exist.", + "rocksdb_whole_key_filtering": "Enables the bloomfilter to use the whole key for filtering instead of just the prefix. In order for this to be efficient, lookups should use the whole key for matching.", + "rocksdb_write_disable_wal": "Disables logging data to the WAL files. Useful for bulk loading.", + "rocksdb_write_ignore_missing_column_families": "If 1, then writes to column families that do not exist is ignored by RocksDB.", + } +) + // ScrapeGlobalVariables collects from `SHOW GLOBAL VARIABLES`. func ScrapeGlobalVariables(db *sql.DB, ch chan<- prometheus.Metric) error { globalVariablesRows, err := db.Query(globalVariablesQuery) @@ -37,18 +123,25 @@ func ScrapeGlobalVariables(db *sql.DB, ch chan<- prometheus.Metric) error { } for globalVariablesRows.Next() { - if err := globalVariablesRows.Scan(&key, &val); err != nil { + if err = globalVariablesRows.Scan(&key, &val); err != nil { return err } + key = strings.ToLower(key) if floatVal, ok := parseStatus(val); ok { + help := globalVariablesHelp[key] + if help == "" { + help = "Generic gauge metric from SHOW GLOBAL VARIABLES." + } ch <- prometheus.MustNewConstMetric( - newDesc(globalVariables, key, "Generic gauge metric from SHOW GLOBAL VARIABLES."), + newDesc(globalVariables, key, help), prometheus.GaugeValue, floatVal, ) continue - } else if _, ok := textItems[key]; ok { + } + + if _, ok := textItems[key]; ok { textItems[key] = string(val) } } From 592653d62b0b78a656cc12426fe6f1099db367fc Mon Sep 17 00:00:00 2001 From: Kamil Dziedzic Date: Wed, 17 Jan 2018 08:03:32 +0100 Subject: [PATCH 5/6] PMM-1920: Standard Metrics (#15) * PMM-1920: Standard Metrics * try to fix port allocation * not sure why tests sometimes fail, let's try increase timeouts * fix path * forgot about ssl:/ * fix passing port * PMM-1920 Expose standard metrics as HR only. --- mysqld_exporter.go | 36 ++++++------- mysqld_exporter_test.go | 116 +++++++++++++++++++++++++++++++--------- 2 files changed, 109 insertions(+), 43 deletions(-) diff --git a/mysqld_exporter.go b/mysqld_exporter.go index e4e3aa0e1..c493e7266 100644 --- a/mysqld_exporter.go +++ b/mysqld_exporter.go @@ -793,25 +793,25 @@ func main() { // https mux := http.NewServeMux() - reg := prometheus.NewRegistry() - reg.MustRegister(NewExporter(dsn)) - handler := promhttp.HandlerFor(reg, promhttp.HandlerOpts{}) + registryHr := prometheus.NewRegistry() + registryHr.MustRegister(NewExporter(dsn)) + handler := promhttp.HandlerFor(prometheus.Gatherers{prometheus.DefaultGatherer, registryHr}, promhttp.HandlerOpts{}) if cfg.User != "" && cfg.Password != "" { handler = &basicAuthHandler{handler: handler.ServeHTTP, user: cfg.User, password: cfg.Password} } mux.Handle(*metricPath+"-hr", handler) - reg = prometheus.NewRegistry() - reg.MustRegister(NewExporterMr(dsn)) - handler = promhttp.HandlerFor(reg, promhttp.HandlerOpts{}) + registryMr := prometheus.NewRegistry() + registryMr.MustRegister(NewExporterMr(dsn)) + handler = promhttp.HandlerFor(registryMr, promhttp.HandlerOpts{}) if cfg.User != "" && cfg.Password != "" { handler = &basicAuthHandler{handler: handler.ServeHTTP, user: cfg.User, password: cfg.Password} } mux.Handle(*metricPath+"-mr", handler) - reg = prometheus.NewRegistry() - reg.MustRegister(NewExporterLr(dsn)) - handler = promhttp.HandlerFor(reg, promhttp.HandlerOpts{}) + registryLr := prometheus.NewRegistry() + registryLr.MustRegister(NewExporterLr(dsn)) + handler = promhttp.HandlerFor(registryLr, promhttp.HandlerOpts{}) if cfg.User != "" && cfg.Password != "" { handler = &basicAuthHandler{handler: handler.ServeHTTP, user: cfg.User, password: cfg.Password} } @@ -841,25 +841,25 @@ func main() { log.Fatal(srv.ListenAndServeTLS(*sslCertFile, *sslKeyFile)) } else { // http - reg := prometheus.NewRegistry() - reg.MustRegister(NewExporter(dsn)) - handler := promhttp.HandlerFor(reg, promhttp.HandlerOpts{}) + registryHr := prometheus.NewRegistry() + registryHr.MustRegister(NewExporter(dsn)) + handler := promhttp.HandlerFor(prometheus.Gatherers{prometheus.DefaultGatherer, registryHr}, promhttp.HandlerOpts{}) if cfg.User != "" && cfg.Password != "" { handler = &basicAuthHandler{handler: handler.ServeHTTP, user: cfg.User, password: cfg.Password} } http.Handle(*metricPath+"-hr", handler) - reg = prometheus.NewRegistry() - reg.MustRegister(NewExporterMr(dsn)) - handler = promhttp.HandlerFor(reg, promhttp.HandlerOpts{}) + registryMr := prometheus.NewRegistry() + registryMr.MustRegister(NewExporterMr(dsn)) + handler = promhttp.HandlerFor(registryMr, promhttp.HandlerOpts{}) if cfg.User != "" && cfg.Password != "" { handler = &basicAuthHandler{handler: handler.ServeHTTP, user: cfg.User, password: cfg.Password} } http.Handle(*metricPath+"-mr", handler) - reg = prometheus.NewRegistry() - reg.MustRegister(NewExporterLr(dsn)) - handler = promhttp.HandlerFor(reg, promhttp.HandlerOpts{}) + registryLr := prometheus.NewRegistry() + registryLr.MustRegister(NewExporterLr(dsn)) + handler = promhttp.HandlerFor(registryLr, promhttp.HandlerOpts{}) if cfg.User != "" && cfg.Password != "" { handler = &basicAuthHandler{handler: handler.ServeHTTP, user: cfg.User, password: cfg.Password} } diff --git a/mysqld_exporter_test.go b/mysqld_exporter_test.go index 5c9ccd7bd..812a18162 100644 --- a/mysqld_exporter_test.go +++ b/mysqld_exporter_test.go @@ -145,7 +145,8 @@ func TestGetMySQLVersion(t *testing.T) { } type binData struct { - bin string + bin string + port int } func TestBin(t *testing.T) { @@ -189,17 +190,22 @@ func TestBin(t *testing.T) { t.Fatalf("Failed to build: %s", err) } - data := binData{ - bin: bin, - } tests := []func(*testing.T, binData){ testLandingPage, testVersion, + testDefaultGatherer, } + + portStart := 56000 t.Run(binName, func(t *testing.T) { for _, f := range tests { f := f // capture range variable fName := runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name() + portStart++ + data := binData{ + bin: bin, + port: portStart, + } t.Run(fName, func(t *testing.T) { t.Parallel() f(t, data) @@ -216,6 +222,7 @@ func testVersion(t *testing.T, data binData) { ctx, data.bin, "--version", + "--web.listen-address", fmt.Sprintf(":%d", data.port), ) b := &bytes.Buffer{} @@ -271,6 +278,7 @@ func testLandingPage(t *testing.T, data binData) { cmd := exec.CommandContext( ctx, data.bin, + "--web.listen-address", fmt.Sprintf(":%d", data.port), ) cmd.Env = append(os.Environ(), "DATA_SOURCE_NAME=127.0.0.1:3306") @@ -281,13 +289,75 @@ func testLandingPage(t *testing.T, data binData) { defer cmd.Process.Kill() // Get the main page, but we need to wait a bit for http server - var resp *http.Response - var err error - for i := 0; i <= 10; i++ { + body, err := get(fmt.Sprintf("http://127.0.0.1:%d", data.port)) + if err != nil { + t.Fatal(err) + } + got := string(body) + + expected := ` +MySQLd 3-in-1 exporter + +

MySQL 3-in-1 exporter

+
  • high-res metrics
  • +
  • medium-res metrics
  • +
  • low-res metrics
  • + + +` + if got != expected { + t.Fatalf("got '%s' but expected '%s'", got, expected) + } +} + +func testDefaultGatherer(t *testing.T, data binData) { + metricPath := "/metrics" + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + cmd := exec.CommandContext( + ctx, + data.bin, + "--web.telemetry-path", metricPath, + "--web.listen-address", fmt.Sprintf(":%d", data.port), + ) + cmd.Env = append(os.Environ(), "DATA_SOURCE_NAME=127.0.0.1:3306") + + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + defer cmd.Wait() + defer cmd.Process.Kill() + + const resolution = "hr" + body, err := get(fmt.Sprintf("http://127.0.0.1:%d%s-%s", data.port, metricPath, resolution)) + if err != nil { + t.Fatalf("unable to get metrics for '%s' resolution: %s", resolution, err) + } + got := string(body) + + metricsPrefixes := []string{ + "go_gc_duration_seconds", + "go_goroutines", + "go_memstats", + } + + for _, prefix := range metricsPrefixes { + if !strings.Contains(got, prefix) { + t.Fatalf("no metric starting with %s in resolution %s", prefix, resolution) + } + } +} + +func get(urlToGet string) (body []byte, err error) { + tries := 60 + + // Get data, but we need to wait a bit for http server + for i := 0; i <= tries; i++ { // Try to get main page - resp, err = http.Get("http://127.0.0.1:9104") + body, err = getBody(urlToGet) if err == nil { - break + return body, err } // If there is a syscall.ECONNREFUSED error (web server not available) then retry @@ -302,27 +372,23 @@ func testLandingPage(t *testing.T, data binData) { } } - t.Fatalf("%#v", err) + return nil, err + } + + return nil, fmt.Errorf("failed to GET %s: %s", urlToGet, err) +} + +func getBody(urlToGet string) ([]byte, error) { + resp, err := http.Get(urlToGet) + if err != nil { + return nil, err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { - t.Fatal(err) + return nil, err } - got := string(body) - expected := ` -MySQLd 3-in-1 exporter - -

    MySQL 3-in-1 exporter

    -
  • high-res metrics
  • -
  • medium-res metrics
  • -
  • low-res metrics
  • - - -` - if got != expected { - t.Fatalf("got '%s' but expected '%s'", got, expected) - } + return body, nil } From 49b7ccea578c674cfb43fa4c04e178da639fd522 Mon Sep 17 00:00:00 2001 From: Alexey Palazhchenko Date: Wed, 17 Jan 2018 10:05:31 +0300 Subject: [PATCH 6/6] Bump version. --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index d40238a62..7bb589584 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.10.0+percona.3 +0.10.0+percona.4