Upgrade to Pro — share decks privately, control downloads, hide ads and more …

How not to Go wrong with concurrency – Artemiy ...

How not to Go wrong with concurrency – Artemiy Ryabinkov

GopherCon Russia

April 13, 2019
Tweet

More Decks by GopherCon Russia

Other Decks in Programming

Transcript

  1. Go is expressive, concise, clean, and efficient. Its concurrency mechanisms

    make it easy to write programs that get the most out of multicore and networked machines. golang.org/doc/
  2. Two Models of Communication Shared Memory Message Passing (CSP and

    Actor Model) Locks Mutexes Implicit communication Messages Channels Explicit communication
  3. Application Shared Memory Message Passing Mutex RWMutex Wait Chan Chan

    + other ops Lib Docker 9 0 3 5 2 2 Kubernetes 6 2 0 3 6 0 etcd 5 0 0 10 5 1 CockroachDB 4 3 0 5 0 0 gRPC 2 0 0 6 2 1 BoltDB 2 0 0 0 1 0 Total 28 5 3 29 16 4 Blocking Bug Causes https://songlh.github.io/paper/go-study.pdf
  4. https://songlh.github.io/paper/go-study.pdf Our study found that message passing does not necessarily

    make multithreaded programs less error-prone than shared memory. In fact, message passing is the main cause of blocking bugs.
  5. Speedup( P processors ) = Time( 1 processor ) Time(

    P processors ) F = inherently sequential fraction of the computation 1+(P-1)F S MAX = P
  6. F = 1% F = 5% F = 10% Max

    Speedup Processors 40 32 24 16 8 8 16 24 32 40 48 56 64
  7. CPU Bound IO Bound Progress is limited by CPU speed

    Progress is limited by I/O subsystem speed
  8. Cooperative multitasking Preemptive multitasking Process makes switch decision Process can

    monopolize processor Effective context switch Scheduler makes switch decision Prevents monopolizing Overheads involved with interrupts Fair timeslice
  9. runtime.GOMAXPROCS(1) x := 0 go func() { for { x++

    } }() time.Sleep(500 * time.Millisecond) fmt.Println(x)
  10. runtime.GOMAXPROCS(1) x := 0 go func() { for { runtime.Gosched()

    x++ } }() time.Sleep(500 * time.Millisecond) fmt.Println(x)
  11. a += 1 if a == 1 { criticalSection() }

    a += 1 if a == 1 { criticalSection() }
  12. tmp = a+1 a = tmp if a == 1

    { criticalSection() } tmp = a+1 a = tmp if a == 1 { criticalSection() }
  13. var mx sync.Mutex // .. mx.Lock() tmp = a +

    1 a = tmp if a == 1 { criticalSection() } mx.Unlock()
  14. mx.Lock() tmp = a + 1 a = tmp mx.Unlock()

    atomic.AddInt64(&a, 1) ⇔
  15. func setup() { a = "hello, world" done = true

    } var once sync.Once func doprint() { if !done { once.Do(setup) } print(a) }
  16. func setup() { done = true a = "hello, world"

    } var once sync.Once func doprint() { if !done { once.Do(setup) } print(a) }
  17. Requires test coverage Stores history of N memory access Reports

    no false positives. May miss data races Limits of Race Detector
  18. func call(ctx context.Context, requests []T) error { for _, req

    := range requests { err := send(ctx, req) if err != nil { return err } } return nil }
  19. func call(ctx context.Context, requests []T) error { errCh := make(chan

    error, 1) var wg sync.WaitGroup for _, req := range requests { go func() { wg.Add(1) if err = send(ctx, req); err != nil { errCh <- err } wg.Done() }() } wg.Wait() close(errCh) return <-errCh }
  20. for _, req := range requests { go func() {

    wg.Add(1) if err = send(ctx, req); err != nil { errCh <- err } wg.Done() }() } wg.Wait() close(errCh) return <-errCh
  21. for _, req := range requests { go func() {

    wg.Add(1) if err = send(ctx, req); err != nil { errCh <- err } wg.Done() }() } wg.Wait() close(errCh) return <-errCh
  22. wg.Add(len(requests)) for _, req := range requests { go func()

    { if err = send(ctx, req); err != nil { errCh <- err } wg.Done() }() } wg.Wait() close(errCh) return <-errCh
  23. wg.Add(len(requests)) for _, req := range requests { go func()

    { if err = send(ctx, req); err != nil { errCh <- err } wg.Done() }() } wg.Wait() close(errCh) return <-errCh
  24. wg.Add(len(requests)) for _, req := range requests { go func(req

    T) { if err = send(ctx, req); err != nil { errCh <- err } wg.Done() }(req) } wg.Wait() close(errCh) return <-errCh
  25. errCh := make(chan error, 1) for _, req := range

    requests { go func() { if err = send(ctx, req); err != nil { errCh <- err } }() } ... return <-errCh
  26. errCh := make(chan error, 1) for _, req := range

    requests { go func() { if err = send(ctx, req); err != nil { errCh <- err } }() } return <-errCh
  27. errCh := make(chan error, len(requests)) for _, req := range

    requests { go func() { if err = send(ctx, req); err != nil { errCh <- err } }() } return <-errCh
  28. func call(ctx context.Context, requests []T) error { errCh := make(chan

    error, len(requests)) var wg sync.WaitGroup wg.Add(len(requests)) for _, req := range requests { go func(req string) { if err = send(ctx, req); err != nil { errCh <- err } wg.Done() }(req) } wg.Wait() close(errCh) return <-errCh } Dave Cheney: Concurrency made easy
  29. func call(ctx context.Context, requests []T) error { g, ctx :=

    errgroup.WithContext(ctx) for _, req := range requests { req := req g.Go(func() error { return send(ctx, req) }) } return g.Wait() }
  30. func call(ctx context.Context, requests []T) error { for _, req

    := range requests { err := send(ctx, req) if err != nil { return err } } return nil }
  31. ctx := context.Background() g, ctx := errgroup.WithContext(ctx) g.Go(func() error {

    return DoA(ctx) }) g.Go(func() error { return DoB(ctx) }) err := g.Wait()
  32. As Simple as Possible, but not Simpler Get to know

    your abstractions Explicit over implicit syncronization