Upgrade to Pro — share decks privately, control downloads, hide ads and more …

Go in Production at Mackerel.io

Go in Production at Mackerel.io

Go Conference 2014 spring
http://connpass.com/event/6370/

Shinji Tanaka

May 31, 2014
Tweet

More Decks by Shinji Tanaka

Other Decks in Technology

Transcript

  1. Go in Production
    At Mackerel.io
    @stanaka
    Hatena Co., Ltd.

    View Slide

  2. @stanaka / id:stanaka
    Shinji Tanaka / ాத৻࢘
    Hatena Co., Ltd.
    We’re Hiring at both
    KYOTO and TOKYO!

    View Slide

  3. Thank you, Brad!
    Hatena is a heavy user of:

    memcached

    MogileFS

    stores a few billions objects

    meta data: over 2TB in ONE table of MySQL.
    Anyway it works!

    View Slide

  4. https://mackerel.io/

    View Slide

  5. Role-based merged graph
    Loadavg
    comparison
    Stacking
    CPU usage

    View Slide

  6. Overview of Mackerel.io
    Server
    Cloud A Cloud B
    mackerel.io
    User
    Send resource
    usage data
    View control panels
    Alert notifications
    Agent
    Server
    User
    environments
    Agent
    Server
    Agent
    Server
    Agent
    Server

    View Slide

  7. Overview of Mackerel.io
    Server
    Cloud A Cloud B
    mackerel.io
    User
    Send resource
    usage data
    View control panels
    Alert notifications
    User
    environments
    Agent
    Server
    Agent
    Server
    Agent
    Server
    Agent
    Server

    View Slide

  8. Overview of Mackerel.io
    Server
    Cloud A Cloud B
    mackerel.io
    User
    Send resource
    usage data
    View control panels
    Alert notifications
    User
    environments
    Agent
    Server
    Agent
    Server
    Agent
    Server
    Agent
    Server

    View Slide

  9. Written in Go

    Open Source Software (Apache2 License)

    !
    Read information and resource usage of OS/
    Hardware/System

    Post data to REST API of Mackerel.io
    https://github.com/mackerelio/mackerel-agent

    View Slide

  10. Why Go
    Need to support
    various environments

    !
    Easy setup

    !
    Works quietly as
    possible
    VmHWM:          6468  kB  
    VmRSS:          5768  kB
    Cross-compiler

    Few dependency

    !
    One binary

    !
    Small footprint

    View Slide

  11. Code Highlights in Detail

    View Slide

  12. Periodical Invocation
    Invoke every 1 minute
    go  func()  {  
      c  :=  time.Tick(1  *  time.Second)  
      last  :=  time.Now()  
      ticker  <-­‐  last  //  sends  tick  once  at  first  
    !
      for  t  :=  range  c  {  
        if  t.Second()  ==  0  ||  t.After(last.Add(1*time.Minute))  {  
          last  =  t  
          ticker  <-­‐  t  
        }  
      }  
    }()

    View Slide

  13. Collect metrics
    Invoke collector
    go  func()  {  
      //  Start  collectMetrics  concurrently  
      //  so  that  it  does  not  prevent  runnnig  next  collectMetrics.  
      sem  :=  make(chan  uint,  COLLECT_METRICS_WORKER_MAX)  
      for  tickedTime  :=  range  ticker  {  
        sem  <-­‐  1  
        go  func()  {  
          metricsResult  <-­‐  agent.collectMetrics(tickedTime)  
          <-­‐sem  
        }()  
      }  
    }()

    View Slide

  14. Collect metrics in detail
    Agent.collectMetics > Agent.generateValues
    > Generate (cpuusage.go)
    func  (g  *CpuusageGenerator)  collectProcStatValues()  ([]float64,  
    float64,  uint,  error)  {  
      file,  err  :=  os.Open("/proc/stat")
    func  (g  *CpuusageGenerator)  Generate()  (metrics.Values,  error)  
    {  
      prevValues,  prevTotal,  _,  err  :=  g.collectProcStatValues()
    $  cat  /proc/stat  
    cpu    7792253  5479  4851396  18056319678  127239  0  146818  2383839  
    cpu0  5385397  1412  1970781  4509432750  103260  0  136689  876389

    View Slide

  15. Collect metrics (plugin)
    plugin: outputs to stdout

    !
    exec external commands
    func  (g  *PluginGenerator)  collectValues(command  string)  
      (metrics.Values,  error)  {  
      ...  
      var  outBuffer  bytes.Buffer  
      var  errBuffer  bytes.Buffer  
    !
      cmd  :=  exec.Command("/bin/sh",  "-­‐c",  command)  
      cmd.Stdout  =  &outBuffer  
      cmd.Stderr  =  &errBuffer  
    !
      err  :=  cmd.Run()
    {metric  name}\t{metric  value}\t{epoch  seconds}

    View Slide

  16. Parallel Collector
    go  func()  {  
      var  wg  sync.WaitGroup  
      for  _,  g  :=  range  generators  {  
        wg.Add(1)  
        go  func(g  metrics.Generator)  {  
          defer  wg.Done()  
    !
          values,  err  :=  g.Generate()  
          ...  
          processed  <-­‐  values  
        }(g)  
      }  
      wg.Wait()  
      finish  <-­‐  true  //  processed  all  jobs  
    }()

    View Slide

  17. Wait for all collectors
    go  func()  {  
      allValues  :=  metrics.Values(make(map[string]float64))  
      for  {  
        select  {  
        case  values  :=  <-­‐processed:  
          allValues.Merge(values)  
        case  <-­‐finish:  
          result  <-­‐  allValues  
          return  
        }  
      }  
    }()

    View Slide

  18. Post metrics
    func  loop(ag  *agent.Agent,  api  *mackerel.API,  host  …)  {  
      metricsResult  :=  ag.Watch()  
      postQueue  :=  make(chan  []*mackerel.CreatingMetricsValue,  ..)  
      go  func()  {  
        for  values  :=  range  postQueue  {  
          err  :=  api.PostMetricsValues(values)  
          ...  //  retry  process  
          time.Sleep(METRICS_POST_DEQUEUE_DELAY)  
        }  
      }()  
      ...  
      for  {  
        select  {  
        case  result  :=  <-­‐metricsResult:  
          ...  //  Assemble  results    
          postQueue  <-­‐  creatingValues  
        }  
      }  
    }

    View Slide

  19. Multi architectures
    Build constrains
    command/  
      command.go  
      command_linux.go  #  filename-­‐based  
      command_windows.go  
    metrics/  
           linux/  
                   cpu.go  #  //  +build  linux  (directive-­‐based)  
                   memory.go  
                   ...  
           windows/  
                   cpu.go  #  //  +build  window  
                   memory.go  
                   ...

    View Slide

  20. Multi architectures
    command/command_linux.go
    func  metricsGenerators(conf  config.Config)  []metrics.Generator  
    {  
      generators  :=  []metrics.Generator{  
        &metricsLinux.Loadavg5Generator{},  
        &metricsLinux.CpuusageGenerator{Interval:  60},  
        ...    
      }  
      for  _,  pluginConfig  :=  range  conf.Plugin["metrics"]  {  
        generators  =  append(generators,  
                     &metricsLinux.PluginGenerator{pluginConfig})  
      }  
    !
      return  generators  
    }

    View Slide

  21. Build (Compile)
    use Makefile

    !
    !
    !
    !
    embed GITCOMMIT and VERSION
    build:  deps  
      go  build  -­‐ldflags="\  
         -­‐X  github.com/mackerelio/mackerel-­‐agent/version.GITCOMMIT  
    `git  rev-­‐parse  -­‐-­‐short  HEAD`  \  
         -­‐X  github.com/mackerelio/mackerel-­‐agent/version.VERSION      
    `git  describe  -­‐-­‐tags  -­‐-­‐abbrev=0  |  sed  's/^v//'  |  sed  's/\+.*$
    $//'`  "  \  
      -­‐o  build/$(BIN)  \  
      github.com/mackerelio/mackerel-­‐agent
    req.Header.Add("X-­‐Revision",  version.GITCOMMIT)

    View Slide

  22. Build (Packaging)
    use Docker to build in clean environment
    FROM  centos  
    RUN  yum  update  -­‐y  
    RUN  yum  install  -­‐y  rpmdevtools  
    !
    RUN  mkdir  -­‐p  /rpmbuild  
    ADD  ./  /rpmbuild/  
    RUN  chown  root:root  -­‐R  /rpmbuild  
    CMD  rpmbuild  -­‐ba  /rpmbuild/SPECS/mackerel-­‐agent.spec

    View Slide

  23. Daemonize
    nohup & `&`

    !
    !
    follow the recommended method of Debian

    (not implemented yet…)
    nohup  mackerel-­‐agent  -­‐-­‐apikey=$APIKEY  -­‐-­‐pidfile=$PIDFILE  \

       $OTHER_OPTS  >>$LOGFILE  2>&1  &
    start-­‐stop-­‐daemon  -­‐-­‐start  -­‐-­‐quiet  -­‐-­‐pidfile  $PIDFILE  \  
         -­‐-­‐background  -­‐-­‐exec  mackerel-­‐agent  

    View Slide

  24. Detect Memory leaks
    import  _  "net/http/pprof"
    go  func()  {  
      log.Println(http.ListenAndServe("localhost:6060",  nil))  
    }()

    View Slide

  25. Pros / Cons
    Pros

    Channel and goroutine is GREAT

    Easy to support various architectures

    linux, windows, 386, x86_64, ARM…

    Cons

    String processing (parsing /proc/…) is annoying..

    View Slide

  26. Q & A

    View Slide