Skip to content

Commit

Permalink
Merge pull request #241 from jbenet/fix/log-debugf
Browse files Browse the repository at this point in the history
fix(everything) log.Debug -> log.Debugf
  • Loading branch information
jbenet committed Oct 31, 2014
2 parents 3270ab0 + cea398b commit 01a583e
Show file tree
Hide file tree
Showing 16 changed files with 30 additions and 30 deletions.
4 changes: 2 additions & 2 deletions core/commands/block.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ func BlockGet(n *core.IpfsNode, args []string, opts map[string]interface{}, out
}

k := u.Key(h)
log.Debug("BlockGet key: '%q'", k)
log.Debugf("BlockGet key: '%q'", k)
ctx, _ := context.WithTimeout(context.TODO(), time.Second*5)
b, err := n.Blocks.GetBlock(ctx, k)
if err != nil {
Expand All @@ -48,7 +48,7 @@ func BlockPut(n *core.IpfsNode, args []string, opts map[string]interface{}, out
}

b := blocks.NewBlock(data)
log.Debug("BlockPut key: '%q'", b.Key())
log.Debugf("BlockPut key: '%q'", b.Key())

k, err := n.Blocks.AddBlock(b)
if err != nil {
Expand Down
6 changes: 3 additions & 3 deletions core/commands/object.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ func ObjectData(n *core.IpfsNode, args []string, opts map[string]interface{}, ou
if err != nil {
return fmt.Errorf("objectData error: %v", err)
}
log.Debug("objectData: found dagnode %q (# of bytes: %d - # links: %d)", args[0], len(dagnode.Data), len(dagnode.Links))
log.Debugf("objectData: found dagnode %q (# of bytes: %d - # links: %d)", args[0], len(dagnode.Data), len(dagnode.Links))

_, err = io.Copy(out, bytes.NewReader(dagnode.Data))
return err
Expand All @@ -31,7 +31,7 @@ func ObjectLinks(n *core.IpfsNode, args []string, opts map[string]interface{}, o
if err != nil {
return fmt.Errorf("objectLinks error: %v", err)
}
log.Debug("ObjectLinks: found dagnode %q (# of bytes: %d - # links: %d)", args[0], len(dagnode.Data), len(dagnode.Links))
log.Debugf("ObjectLinks: found dagnode %q (# of bytes: %d - # links: %d)", args[0], len(dagnode.Data), len(dagnode.Links))

for _, link := range dagnode.Links {
_, err = fmt.Fprintf(out, "%s %d %q\n", link.Hash.B58String(), link.Size, link.Name)
Expand Down Expand Up @@ -70,7 +70,7 @@ func ObjectGet(n *core.IpfsNode, args []string, opts map[string]interface{}, out
if err != nil {
return fmt.Errorf("ObjectGet error: %v", err)
}
log.Debug("objectGet: found dagnode %q (# of bytes: %d - # links: %d)", args[0], len(dagnode.Data), len(dagnode.Links))
log.Debugf("objectGet: found dagnode %q (# of bytes: %d - # links: %d)", args[0], len(dagnode.Data), len(dagnode.Links))

// sadly all encodings dont implement a common interface
var data []byte
Expand Down
2 changes: 1 addition & 1 deletion daemon/daemon.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ func (dl *DaemonListener) handleConnection(conn manet.Conn) {
return
}

log.Debug("Got command: %v", command)
log.Debugf("Got command: %v", command)
switch command.Command {
case "add":
err = commands.Add(dl.node, command.Args, command.Opts, conn)
Expand Down
2 changes: 1 addition & 1 deletion daemon/daemon_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ func getDaemonAddr(confdir string) (string, error) {
}
fi, err := os.Open(confdir + "/rpcaddress")
if err != nil {
log.Debug("getDaemonAddr failed: %s", err)
log.Debugf("getDaemonAddr failed: %s", err)
if err == os.ErrNotExist {
return "", ErrDaemonNotRunning
}
Expand Down
2 changes: 1 addition & 1 deletion fuse/ipns/link_unix.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,6 @@ func (l *Link) Attr() fuse.Attr {
}

func (l *Link) Readlink(req *fuse.ReadlinkRequest, intr fs.Intr) (string, fuse.Error) {
log.Debug("ReadLink: %s", l.Target)
log.Debugf("ReadLink: %s", l.Target)
return l.Target, nil
}
2 changes: 1 addition & 1 deletion fuse/readonly/readonly_unix.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ func (s *Node) Attr() fuse.Attr {

// Lookup performs a lookup under this node.
func (s *Node) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) {
log.Debug("Lookup '%s'", name)
log.Debugf("Lookup '%s'", name)
nd, err := s.Ipfs.Resolver.ResolveLinks(s.Nd, []string{name})
if err != nil {
// todo: make this error more versatile.
Expand Down
2 changes: 1 addition & 1 deletion merkledag/merkledag.go
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ type dagService struct {
// Add adds a node to the dagService, storing the block in the BlockService
func (n *dagService) Add(nd *Node) (u.Key, error) {
k, _ := nd.Key()
log.Debug("DagService Add [%s]", k)
log.Debugf("DagService Add [%s]", k)
if n == nil {
return "", fmt.Errorf("dagService is nil")
}
Expand Down
2 changes: 1 addition & 1 deletion namesys/publisher.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ func NewRoutingPublisher(route routing.IpfsRouting) Publisher {

// Publish implements Publisher. Accepts a keypair and a value,
func (p *ipnsPublisher) Publish(k ci.PrivKey, value string) error {
log.Debug("namesys: Publish %s", value)
log.Debugf("namesys: Publish %s", value)

// validate `value` is a ref (multihash)
_, err := mh.FromB58String(value)
Expand Down
2 changes: 1 addition & 1 deletion namesys/routing.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ func (r *routingResolver) CanResolve(name string) bool {
// Resolve implements Resolver. Uses the IPFS routing system to resolve SFS-like
// names.
func (r *routingResolver) Resolve(name string) (string, error) {
log.Debug("RoutingResolve: '%s'", name)
log.Debugf("RoutingResolve: '%s'", name)
ctx := context.TODO()
hash, err := mh.FromB58String(name)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion net/conn/conn.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ func newSingleConn(ctx context.Context, local, remote peer.Peer,

// close is the internal close function, called by ContextCloser.Close
func (c *singleConn) close() error {
log.Debug("%s closing Conn with %s", c.local, c.remote)
log.Debugf("%s closing Conn with %s", c.local, c.remote)

// close underlying connection
err := c.maconn.Close()
Expand Down
6 changes: 3 additions & 3 deletions net/swarm/swarm_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ func pong(ctx context.Context, swarm *Swarm) {
if bytes.Equal(m1.Data(), []byte("ping")) {
m2 := msg.New(m1.Peer(), []byte("pong"))
i++
log.Debug("%s pong %s (%d)", swarm.local, m1.Peer(), i)
log.Debugf("%s pong %s (%d)", swarm.local, m1.Peer(), i)
swarm.Outgoing <- m2
}
}
Expand Down Expand Up @@ -130,14 +130,14 @@ func SubtestSwarm(t *testing.T, addrs []string, MsgNum int) {

for k := 0; k < MsgNum; k++ {
for _, p := range *peers {
log.Debug("%s ping %s (%d)", s1.local, p, k)
log.Debugf("%s ping %s (%d)", s1.local, p, k)
s1.Outgoing <- msg.New(p, []byte("ping"))
}
}

got := map[u.Key]int{}
for k := 0; k < (MsgNum * len(*peers)); k++ {
log.Debug("%s waiting for pong (%d)", s1.local, k)
log.Debugf("%s waiting for pong (%d)", s1.local, k)
msg := <-s1.Incoming
if string(msg.Data()) != "pong" {
t.Error("unexpected conn output", msg.Data)
Expand Down
2 changes: 1 addition & 1 deletion path/path.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ type Resolver struct {
// path component as a hash (key) of the first node, then resolves
// all other components walking the links, with ResolveLinks.
func (s *Resolver) ResolvePath(fpath string) (*merkledag.Node, error) {
log.Debug("Resolve: '%s'", fpath)
log.Debugf("Resolve: '%s'", fpath)
fpath = path.Clean(fpath)

parts := strings.Split(fpath, "/")
Expand Down
2 changes: 1 addition & 1 deletion pin/indirect.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ func loadIndirPin(d ds.Datastore, k ds.Key) (*indirectPin, error) {
keys = append(keys, k)
refcnt[k] = v
}
log.Debug("indirPin keys: %#v", keys)
log.Debugf("indirPin keys: %#v", keys)

return &indirectPin{blockset: set.SimpleSetFromKeys(keys), refCounts: refcnt}, nil
}
Expand Down
2 changes: 1 addition & 1 deletion routing/dht/ext_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ func TestNotFound(t *testing.T) {

ctx, _ = context.WithTimeout(ctx, time.Second*5)
v, err := d.GetValue(ctx, u.Key("hello"))
log.Debug("get value got %v", v)
log.Debugf("get value got %v", v)
if err != nil {
switch err {
case routing.ErrNotFound:
Expand Down
20 changes: 10 additions & 10 deletions routing/dht/query.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ func newQueryRunner(ctx context.Context, q *dhtQuery) *dhtQueryRunner {
}

func (r *dhtQueryRunner) Run(peers []peer.Peer) (*dhtQueryResult, error) {
log.Debug("Run query with %d peers.", len(peers))
log.Debugf("Run query with %d peers.", len(peers))
if len(peers) == 0 {
log.Warning("Running query with no peers!")
return nil, nil
Expand Down Expand Up @@ -176,7 +176,7 @@ func (r *dhtQueryRunner) addPeerToQuery(next peer.Peer, benchmark peer.Peer) {
r.peersSeen[next.Key()] = next
r.Unlock()

log.Debug("adding peer to query: %v\n", next)
log.Debugf("adding peer to query: %v\n", next)

// do this after unlocking to prevent possible deadlocks.
r.peersRemaining.Increment(1)
Expand All @@ -200,14 +200,14 @@ func (r *dhtQueryRunner) spawnWorkers() {
if !more {
return // channel closed.
}
log.Debug("spawning worker for: %v\n", p)
log.Debugf("spawning worker for: %v\n", p)
go r.queryPeer(p)
}
}
}

func (r *dhtQueryRunner) queryPeer(p peer.Peer) {
log.Debug("spawned worker for: %v\n", p)
log.Debugf("spawned worker for: %v\n", p)

// make sure we rate limit concurrency.
select {
Expand All @@ -218,12 +218,12 @@ func (r *dhtQueryRunner) queryPeer(p peer.Peer) {
}

// ok let's do this!
log.Debug("running worker for: %v", p)
log.Debugf("running worker for: %v", p)

// make sure we do this when we exit
defer func() {
// signal we're done proccessing peer p
log.Debug("completing worker for: %v", p)
log.Debugf("completing worker for: %v", p)
r.peersRemaining.Decrement(1)
r.rateLimit <- struct{}{}
}()
Expand All @@ -232,7 +232,7 @@ func (r *dhtQueryRunner) queryPeer(p peer.Peer) {
// (Incidentally, this will add it to the peerstore too)
err := r.query.dialer.DialPeer(p)
if err != nil {
log.Debug("ERROR worker for: %v -- err connecting: %v", p, err)
log.Debugf("ERROR worker for: %v -- err connecting: %v", p, err)
r.Lock()
r.errs = append(r.errs, err)
r.Unlock()
Expand All @@ -243,20 +243,20 @@ func (r *dhtQueryRunner) queryPeer(p peer.Peer) {
res, err := r.query.qfunc(r.ctx, p)

if err != nil {
log.Debug("ERROR worker for: %v %v", p, err)
log.Debugf("ERROR worker for: %v %v", p, err)
r.Lock()
r.errs = append(r.errs, err)
r.Unlock()

} else if res.success {
log.Debug("SUCCESS worker for: %v", p, res)
log.Debugf("SUCCESS worker for: %v", p, res)
r.Lock()
r.result = res
r.Unlock()
r.cancel() // signal to everyone that we're done.

} else if res.closerPeers != nil {
log.Debug("PEERS CLOSER -- worker for: %v\n", p)
log.Debugf("PEERS CLOSER -- worker for: %v\n", p)
for _, next := range res.closerPeers {
r.addPeerToQuery(next, p)
}
Expand Down
2 changes: 1 addition & 1 deletion routing/dht/routing.go
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (peer.Peer, error)
return nil, err
}

log.Debug("FindPeer %v %v", id, result.success)
log.Debugf("FindPeer %v %v", id, result.success)
if result.peer == nil {
return nil, routing.ErrNotFound
}
Expand Down

0 comments on commit 01a583e

Please sign in to comment.