Skip to content
This repository has been archived by the owner on Aug 28, 2021. It is now read-only.

gofmt #3671

Merged
merged 1 commit into from
Sep 6, 2017
Merged

gofmt #3671

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
184 changes: 92 additions & 92 deletions samples/go/ipfs-chat/daemon.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,16 @@
package main

import (
"bytes"
"bytes"
"context"
"encoding/base64"
"fmt"
"fmt"
"log"
"os"
"os/signal"
"runtime"
"strings"
"syscall"
"os/signal"
"runtime"
"strings"
"syscall"
"time"

"github.com/attic-labs/noms/go/d"
Expand All @@ -24,40 +24,40 @@ import (
"github.com/attic-labs/noms/go/merge"
"github.com/attic-labs/noms/go/spec"
"github.com/attic-labs/noms/go/types"
"github.com/attic-labs/noms/go/util/writers"
"github.com/attic-labs/noms/samples/go/ipfs-chat/dbg"
"github.com/ipfs/go-ipfs/core"
"github.com/attic-labs/noms/go/util/writers"
)

func runDaemon(topic string, interval time.Duration, ipfsSpec string, nodeIdx int) {
dbg.SetLogger(log.New(os.Stdout, "", 0))
stackDumpOnSIGQUIT()

stackDumpOnSIGQUIT()
sp, err := spec.ForDataset(ipfsSpec)
d.CheckErrorNoUsage(err)

// Create/Open a new network chunkstore
// Create/Open a new network chunkstore
node, nwCS := initChunkStore(sp, nodeIdx)
nwDB := datas.NewDatabase(nwCS)
nwDB := datas.NewDatabase(nwCS)

// Use the same ipfs node to create a second chunkstore that restricts data
// access to it's local blockstore
// Use the same ipfs node to create a second chunkstore that restricts data
// access to it's local blockstore
csLocal := ipfs.ChunkStoreFromIPFSNode(sp.DatabaseName, true, node)
localDB := datas.NewDatabase(csLocal)
localDS := localDB.GetDataset(sp.Path.Dataset)
// Initial the database, if necessary, to an empty commit. Committing to
// the local database will also reset the head for the network database.
localDS, err = InitDatabase(localDS)

// Initial the database, if necessary, to an empty commit. Committing to
// the local database will also reset the head for the network database.
localDS, err = InitDatabase(localDS)
d.PanicIfError(err)

// Get the head of the network dataset.
nwDS := nwDB.GetDataset(sp.Path.Dataset)
// Get the head of the network dataset.
nwDS := nwDB.GetDataset(sp.Path.Dataset)

dbg.Debug("Storing locally to: %s", sp.String())

go replicate(node, topic, nwDS, localDS, func(ds1 datas.Dataset) {
nwDS = ds1
nwDS = ds1
})

for {
Expand All @@ -76,54 +76,54 @@ func replicate(node *core.IpfsNode, topic string, nwDS, localDS datas.Dataset, d
var lastHash hash.Hash
for {
dbg.Debug("looking for msgs")
msg, err := sub.Next(context.Background())
d.PanicIfError(err)
hstring := strings.TrimSpace(string(msg.Data))
h, ok := hash.MaybeParse(hstring)
if !ok {
// if something comes across the pubsub channel that doesn't look
// like a valid hash, just print a message and ignore it.
dbg.Debug("replicate: received unknown msg: %s", hstring)
continue
}
// If we just saw this hash, then don't need to do anything
if lastHash == h {
continue
}
// we're going to process this hash
msg, err := sub.Next(context.Background())
d.PanicIfError(err)
hstring := strings.TrimSpace(string(msg.Data))
h, ok := hash.MaybeParse(hstring)
if !ok {
// if something comes across the pubsub channel that doesn't look
// like a valid hash, just print a message and ignore it.
dbg.Debug("replicate: received unknown msg: %s", hstring)
continue
}
// If we just saw this hash, then don't need to do anything
if lastHash == h {
continue
}

// we're going to process this hash
dbg.Debug("got update: %s, lastHash: %s, sender %s", h, lastHash, base64.StdEncoding.EncodeToString(msg.From))
lastHash = h
// Get the current head of the local dataset
localDB := localDS.Database()
destHeadRef := localDS.HeadRef()

// If the headRef of the local dataset is equal to the hash we just
// received, there's nothing to do
lastHash = h

// Get the current head of the local dataset
localDB := localDS.Database()
destHeadRef := localDS.HeadRef()

// If the headRef of the local dataset is equal to the hash we just
// received, there's nothing to do
if h == localDS.HeadRef().TargetHash() {
dbg.Debug("received hash same as current head, nothing to do")
continue
}

// PullCommits() iterates through all the commits that are parents of this
// commit and reads every chunk. This should ensure that all blocks are
// local.
// PullCommits() iterates through all the commits that are parents of this
// commit and reads every chunk. This should ensure that all blocks are
// local.
dbg.Debug("syncing commits")
pullCommits(h, nwDS.Database(), localDS.Database(), 0)

// Everything should be local at this point, now check to see if a
// merge or fast-forward needs to be performed.
// Everything should be local at this point, now check to see if a
// merge or fast-forward needs to be performed.
sourceCommit := localDB.ReadValue(h)
sourceRef := types.NewRef(sourceCommit)
dbg.Debug("Finding common ancestor for merge")

dbg.Debug("Finding common ancestor for merge")
a, ok := datas.FindCommonAncestor(sourceRef, destHeadRef, localDB)
if !ok {
dbg.Debug("no common ancestor, cannot merge update!")
continue
}
dbg.Debug("Checking if source commit is ancestor")
dbg.Debug("Checking if source commit is ancestor")
if a.Equals(sourceRef) {
dbg.Debug("source commit was ancestor, nothing to do")
continue
Expand All @@ -135,24 +135,24 @@ func replicate(node *core.IpfsNode, topic string, nwDS, localDS datas.Dataset, d
continue
}

dbg.Debug("We have mergeable commit")
dbg.Debug("We have mergeable commit")
left := localDS.HeadValue()
right := sourceCommit.(types.Struct).Get("value")
parent := a.TargetValue(localDB).(types.Struct).Get("value")

dbg.Debug("Starting three-way commit")
dbg.Debug("Starting three-way commit")
merged, err := merge.ThreeWay(left, right, parent, localDB, nil, nil)
if err != nil {
dbg.Debug("could not merge received data: " + err.Error())
continue
}

dbg.Debug("setting new datasetHead on localDB")
localDS, err = localDB.SetHead(localDS, localDB.WriteValue(datas.NewCommit(merged, types.NewSet(localDB, localDS.HeadRef(), sourceRef), types.EmptyStruct)))
dbg.Debug("setting new datasetHead on localDB")
localDS, err = localDB.SetHead(localDS, localDB.WriteValue(datas.NewCommit(merged, types.NewSet(localDB, localDS.HeadRef(), sourceRef), types.EmptyStruct)))
if err != nil {
dbg.Debug("call failed to SetHead on localDB, err: %s", err)
}
dbg.Debug("merged commit, new dataset head is: %ss", localDS.HeadRef().TargetHash())
dbg.Debug("merged commit, new dataset head is: %ss", localDS.HeadRef().TargetHash())
didChange(localDS)
}
}
Expand All @@ -161,35 +161,35 @@ func replicate(node *core.IpfsNode, topic string, nwDS, localDS datas.Dataset, d
// effectively stops are 'replicate' loop. I can't figure out why that is
// happening.
func pullCommits(h hash.Hash, netDB, localDB datas.Database, level int) {
// This is so we can easily tell in the log that we are not blocked in this
// function
defer func() {
if level == 0 {
dbg.Debug("EXITING PULL-COMMITS!!!")
}
}()
// This is an optimization that can lead us to a bad place, if for some
// reason we find a commit in the localDB that doesn't have all of it's
// parents then something is wrong.
// This is so we can easily tell in the log that we are not blocked in this
// function
defer func() {
if level == 0 {
dbg.Debug("EXITING PULL-COMMITS!!!")
}
}()

// This is an optimization that can lead us to a bad place, if for some
// reason we find a commit in the localDB that doesn't have all of it's
// parents then something is wrong.
dbg.Debug("pullCommits, checking in local, h: %s", h)
if localDB.ReadValue(h) != nil {
dbg.Debug("pullCommits, found local h: %s", h)
return
}
dbg.Debug("pullCommits, not found in local, reading from net h: %s", h)
v := netDB.ReadValue(h)
d.Chk.NotNil(v)
dbg.Debug("pullCommits, encoding value from net, h: %s", h)
s1 := types.EncodedValue(v)
buf := bytes.Buffer{}
fmt.Fprintf(&writers.MaxLineWriter{Dest: &bytes.Buffer{}, MaxLines: 10}, s1)
dbg.Debug("pullCommits, read from net, h: %s", h)
dbg.Debug("pullCommits, read from net, h: %s, commit: %s", h, buf.String())
// Call this function recursively on all of this commit's parents.
}

dbg.Debug("pullCommits, not found in local, reading from net h: %s", h)
v := netDB.ReadValue(h)
d.Chk.NotNil(v)

dbg.Debug("pullCommits, encoding value from net, h: %s", h)
s1 := types.EncodedValue(v)
buf := bytes.Buffer{}
fmt.Fprintf(&writers.MaxLineWriter{Dest: &bytes.Buffer{}, MaxLines: 10}, s1)
dbg.Debug("pullCommits, read from net, h: %s", h)
dbg.Debug("pullCommits, read from net, h: %s, commit: %s", h, buf.String())

// Call this function recursively on all of this commit's parents.
commit := v.(types.Struct)
parents := commit.Get("parents").(types.Set)
parents.IterAll(func(v types.Value) {
Expand All @@ -199,13 +199,13 @@ func pullCommits(h hash.Hash, netDB, localDB datas.Database, level int) {
}

func stackDumpOnSIGQUIT() {
sigChan := make(chan os.Signal)
go func() {
stacktrace := make([]byte, 1024*1024)
for range sigChan {
length := runtime.Stack(stacktrace, true)
fmt.Println(string(stacktrace[:length]))
}
}()
signal.Notify(sigChan, syscall.SIGQUIT)
sigChan := make(chan os.Signal)
go func() {
stacktrace := make([]byte, 1024*1024)
for range sigChan {
length := runtime.Stack(stacktrace, true)
fmt.Println(string(stacktrace[:length]))
}
}()
signal.Notify(sigChan, syscall.SIGQUIT)
}
18 changes: 9 additions & 9 deletions samples/go/ipfs-chat/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,12 @@ import (
"log"
"os"
"regexp"
"runtime"
"strings"
"runtime"
"strings"
"syscall"
"time"

"github.com/attic-labs/noms/go/chunks"
"github.com/attic-labs/noms/go/d"
"github.com/attic-labs/noms/go/datas"
"github.com/attic-labs/noms/go/ipfs"
Expand All @@ -25,7 +26,6 @@ import (
"github.com/ipfs/go-ipfs/core"
"github.com/jroimartin/gocui"
"gopkg.in/alecthomas/kingpin.v2"
"github.com/attic-labs/noms/go/chunks"
)

const (
Expand Down Expand Up @@ -354,16 +354,16 @@ func handleEnter(body string, author string, clientTime time.Time, ds datas.Data
}

func quit(_ *gocui.Gui, _ *gocui.View) error {
dbg.Debug("QUITTING #####")
dbg.Debug("QUITTING #####")
return gocui.ErrQuit
}

func quitWithStack(_ *gocui.Gui, _ *gocui.View) error {
dbg.Debug("QUITTING WITH STACK")
stacktrace := make([]byte, 1024*1024)
length := runtime.Stack(stacktrace, true)
fmt.Println(string(stacktrace[:length]))
return gocui.ErrQuit
dbg.Debug("QUITTING WITH STACK")
stacktrace := make([]byte, 1024*1024)
length := runtime.Stack(stacktrace, true)
fmt.Println(string(stacktrace[:length]))
return gocui.ErrQuit
}

func arrowUp(_ *gocui.Gui, v *gocui.View) error {
Expand Down