diff --git a/.gitignore b/.gitignore index 254988dc..9b8257cc 100644 --- a/.gitignore +++ b/.gitignore @@ -31,5 +31,5 @@ build # https://www.npmjs.org/doc/misc/npm-faq.html#should-i-check-my-node_modules-folder-into-git node_modules -lib dist +docs \ No newline at end of file diff --git a/.travis.yml b/.travis.yml index 5c9015c2..3d06a606 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,7 +2,7 @@ sudo: false language: node_js node_js: - 4 - - 5 + - 6 - stable # Make sure we have new NPM. diff --git a/README.md b/README.md index 13a43ff2..de87c919 100644 --- a/README.md +++ b/README.md @@ -27,10 +27,10 @@ npm install --save ipfsd-ctl ## Usage -IPFS daemons are already easy to start and stop, but this module is here to do it from javascript itself. +IPFS daemons are already easy to start and stop, but this module is here to do it from JavaScript itself. ```js -// start a disposable node, and get access to the api +// Start a disposable node, and get access to the api // print the node id, and kill the temporary daemon // IPFS_PATH will point to /tmp/ipfs_***** and will be @@ -41,13 +41,15 @@ var ipfsd = require('ipfsd-ctl') ipfsd.disposableApi(function (err, ipfs) { ipfs.id(function (err, id) { console.log(id) - process.kill() + process.exit() }) }) ``` If you need want to use an existing ipfs installation you can set `$IPFS_EXEC=/path/to/ipfs` to ensure it uses that. +For more details see https://ipfs.github.io/js-ipfsd-ctl/. + ## Contribute Feel free to join in. All welcome. Open an [issue](https://github.com/ipfs/js-ipfsd-ctl/issues)! diff --git a/example.js b/example.js new file mode 100644 index 00000000..e0389d25 --- /dev/null +++ b/example.js @@ -0,0 +1,17 @@ +'use strict' + +// Start a disposable node, and get access to the api +// print the node id, and kill the temporary daemon + +// IPFS_PATH will point to /tmp/ipfs_***** and will be +// cleaned up when the process exits. + +const ipfsd = require('ipfsd-ctl') + +ipfsd.disposableApi((err, ipfs) => { + if (err) throw err + ipfs.id((err, id) => { + if (err) throw err + console.log(id) + }) +}) diff --git a/package.json b/package.json index 45b6267a..54bed00e 100644 --- a/package.json +++ b/package.json @@ -2,16 +2,15 @@ "name": "ipfsd-ctl", "version": "0.17.0", "description": "simple controls for an ipfs node", - "main": "lib/index.js", - "jsxnext:main": "src/index.js", + "main": "src/index.js", "scripts": { "lint": "aegir-lint", "coverage": "aegir-coverage", "test": "aegir-test --env node", - "build": "aegir-build --env node", - "release": "aegir-release --env node", - "release-minor": "aegir-release --type minor --env node", - "release-major": "aegir-release --type major --env node", + "docs": "aegir-docs", + "release": "aegir-release --env node --docs", + "release-minor": "aegir-release --type minor --env node --docs", + "release-major": "aegir-release --type major --env node --docs", "coverage-publish": "aegir-coverage publish" }, "engines": { @@ -45,20 +44,22 @@ ], "license": "MIT", "dependencies": { - "bl": "^1.1.2", + "async": "^2.1.4", "go-ipfs-dep": "0.4.4", - "ipfs-api": "^12.0.3", - "multiaddr": "^2.1.0", + "ipfs-api": "^12.1.2", + "multiaddr": "^2.1.1", "once": "^1.4.0", "rimraf": "^2.5.4", - "run-series": "^1.1.4", "shutdown": "^0.2.4", "subcomandante": "^1.0.5" }, "devDependencies": { - "aegir": "^9.2.1", + "aegir": "^9.3.0", + "chai": "^3.5.0", + "is-running": "^2.1.0", "mkdirp": "^0.5.1", - "pre-commit": "^1.2.1" + "multihashes": "^0.3.1", + "pre-commit": "^1.2.2" }, "repository": { "type": "git", @@ -72,4 +73,4 @@ "example": "examples", "test": "test" } -} \ No newline at end of file +} diff --git a/src/exec.js b/src/exec.js new file mode 100644 index 00000000..4db94e5a --- /dev/null +++ b/src/exec.js @@ -0,0 +1,66 @@ +'use strict' + +const run = require('subcomandante') +const once = require('once') + +function exec (cmd, args, opts, handlers) { + opts = opts || {} + let err = '' + let result = '' + let callback + + // Handy method if we just want the result and err returned in a callback + if (typeof handlers === 'function') { + callback = once(handlers) + handlers = { + error: callback, + data (data) { + result += data + }, + done () { + if (err) { + return callback(new Error(err)) + } + callback(null, result.trim()) + } + } + } + + // The listeners that will actually be set on the process + const listeners = { + data: handlers.data, + error (data) { + err += data + }, + done: once((code) => { + if (typeof code === 'number' && code !== 0) { + return handlers.error( + new Error(`non-zero exit code ${code}\n + while running: ${cmd} ${args.join(' ')}\n\n + ${err}`) + ) + } + if (handlers.done) { + handlers.done() + } + }) + } + + const command = run(cmd, args, opts) + + if (listeners.data) { + command.stdout.on('data', listeners.data) + } + + command.stderr.on('data', listeners.error) + + // If command fails to execute return directly to the handler + command.on('error', handlers.error) + + command.on('close', listeners.done) + command.on('exit', listeners.done) + + return command +} + +module.exports = exec diff --git a/src/index.js b/src/index.js index a62ad680..23cf7222 100644 --- a/src/index.js +++ b/src/index.js @@ -5,66 +5,111 @@ const join = require('path').join const Node = require('./node') +const defaultOptions = { + 'Addresses.Swarm': ['/ip4/0.0.0.0/tcp/0'], + 'Addresses.Gateway': '', + 'Addresses.API': '/ip4/127.0.0.1/tcp/0', + disposable: true, + init: true +} + function tempDir () { return join(os.tmpdir(), `ipfs_${String(Math.random()).substr(2)}`) } -module.exports = { - version (done) { - (new Node()).version(done) +/** + * Control go-ipfs nodes directly from JavaScript. + * + * @namespace IpfsDaemonController + */ +const IpfsDaemonController = { + /** + * Get the version of the currently used go-ipfs binary. + * + * @memberof IpfsDaemonController + * @param {function(Error, string)} callback + * @returns {undefined} + */ + version (callback) { + (new Node()).version(callback) }, - local (path, opts, done) { + /** + * Create a new local node. + * + * @memberof IpfsDaemonController + * @param {string} [path] - Location of the repo. Defaults to `$IPFS_PATH`, or `$HOME/.ipfs`, or `$USER_PROFILE/.ipfs`. + * @param {Object} [opts={}] + * @param {function(Error, Node)} callback + * @returns {undefined} + */ + local (path, opts, callback) { if (typeof opts === 'function') { - done = opts + callback = opts opts = {} } - if (!done) { - done = path + if (!callback) { + callback = path path = process.env.IPFS_PATH || join(process.env.HOME || process.env.USERPROFILE, '.ipfs') } process.nextTick(() => { - done(null, new Node(path, opts)) + callback(null, new Node(path, opts)) }) }, - disposableApi (opts, done) { + /** + * Create a new disposable node and already start the daemon. + * + * @memberof IpfsDaemonController + * @param {Object} [opts={}] + * @param {function(Error, Node)} callback + * @returns {undefined} + */ + disposableApi (opts, callback) { if (typeof opts === 'function') { - done = opts + callback = opts opts = {} } this.disposable(opts, (err, node) => { - if (err) return done(err) - node.startDaemon(done) + if (err) { + return callback(err) + } + + node.startDaemon(callback) }) }, - disposable (opts, done) { + /** + * Create a new disposable node. + * This means the repo is created in a temporary location and cleaned up on process exit. + * + * @memberof IpfsDaemonController + * @param {Object} [opts={}] + * @param {function(Error, Node)} callback + * @returns {undefined} + */ + disposable (opts, callback) { if (typeof opts === 'function') { - done = opts + callback = opts opts = {} } - opts['Addresses.Swarm'] = ['/ip4/0.0.0.0/tcp/0'] - opts['Addresses.Gateway'] = '' - opts['Addresses.API'] = '/ip4/127.0.0.1/tcp/0' - if (opts.apiAddr) { - opts['Addresses.API'] = opts.apiAddr - } + let options = {} + Object.assign(options, defaultOptions, opts || {}) - if (opts.gatewayAddr) { - opts['Addresses.Gateway'] = opts.gatewayAddr - } + const repoPath = options.repoPath || tempDir() + const disposable = options.disposable + delete options.disposable + delete options.repoPath - const node = new Node(opts.repoPath || tempDir(), opts, true) + const node = new Node(repoPath, options, disposable) - if (typeof opts.init === 'boolean' && opts.init === false) { - process.nextTick(() => { - done(null, node) - }) + if (typeof options.init === 'boolean' && + options.init === false) { + process.nextTick(() => callback(null, node)) } else { - node.init((err) => { - done(err, node) - }) + node.init((err) => callback(err, node)) } } } + +module.exports = IpfsDaemonController diff --git a/src/node.js b/src/node.js index a3bf523f..ddb173b2 100644 --- a/src/node.js +++ b/src/node.js @@ -1,17 +1,17 @@ 'use strict' const fs = require('fs') -const run = require('subcomandante') -const series = require('run-series') +const async = require('async') const ipfs = require('ipfs-api') const multiaddr = require('multiaddr') const rimraf = require('rimraf') const shutdown = require('shutdown') const path = require('path') const join = path.join -const bl = require('bl') const once = require('once') +const exec = require('./exec') + const ipfsDefaultPath = findIpfsExecutable() const GRACE_PERIOD = 7500 // amount of ms to wait before sigkill @@ -32,30 +32,52 @@ function findIpfsExecutable () { } } -function configureNode (node, conf, done) { - const keys = Object.keys(conf) - series(keys.map((key) => (cb) => { - const value = conf[key] - const env = {env: node.env} +function setConfigValue (node, key, value, callback) { + exec( + node.exec, + ['config', key, value, '--json'], + {env: node.env}, + callback + ) +} - run(node.exec, ['config', key, '--json', JSON.stringify(value)], env) - .on('error', cb) - .on('end', cb) - }), done) +function configureNode (node, conf, callback) { + async.eachOfSeries(conf, (value, key, cb) => { + setConfigValue(node, key, JSON.stringify(value), cb) + }, callback) } -// Consistent error handling -function parseConfig (path, done) { +function tryJsonParse (input, callback) { + let res try { - const file = fs.readFileSync(join(path, 'config')) - const parsed = JSON.parse(file) - done(null, parsed) + res = JSON.parse(input) } catch (err) { - done(err) + return callback(err) } + callback(null, res) +} + +// Consistent error handling +function parseConfig (path, callback) { + async.waterfall([ + (cb) => fs.readFile(join(path, 'config'), cb), + (file, cb) => tryJsonParse(file.toString(), cb) + ], callback) } -module.exports = class Node { +/** + * Controll a go-ipfs node. + */ +class Node { + /** + * Create a new node. + * + * @param {string} path + * @param {Object} [opts] + * @param {Object} [opts.env={}] - Additional environment settings, passed to executing shell. + * @param {boolean} [disposable=false] - Should this be a temporary node. + * @returns {Node} + */ constructor (path, opts, disposable) { this.path = path this.opts = opts || {} @@ -64,25 +86,29 @@ module.exports = class Node { this.initialized = fs.existsSync(path) this.clean = true this.env = Object.assign({}, process.env, {IPFS_PATH: path}) + this.disposable = disposable - if (this.opts.env) Object.assign(this.env, this.opts.env) + if (this.opts.env) { + Object.assign(this.env, this.opts.env) + } } - _run (args, envArg, done) { - run(this.exec, args, envArg) - .on('error', done) - .pipe(bl((err, result) => { - if (err) { - return done(err) - } - - done(null, result.toString().trim()) - })) + _run (args, opts, callback) { + return exec(this.exec, args, opts, callback) } - init (initOpts, done) { - if (!done) { - done = initOpts + /** + * Initialize a repo. + * + * @param {Object} [initOpts={}] + * @param {number} [initOpts.keysize=2048] - The bit size of the identiy key. + * @param {string} [initOpts.directory=IPFS_PATH] - The location of the repo. + * @param {function (Error, Node)} callback + * @returns {undefined} + */ + init (initOpts, callback) { + if (!callback) { + callback = initOpts initOpts = {} } @@ -93,180 +119,224 @@ module.exports = class Node { this.env.IPFS_PATH = this.path } - run(this.exec, ['init', '-b', keySize], {env: this.env}) - .on('error', done) - .pipe(bl((err, buf) => { - if (err) return done(err) - - configureNode(this, this.opts, (err) => { - if (err) { - return done(err) - } + this._run(['init', '-b', keySize], {env: this.env}, (err, result) => { + if (err) { + return callback(err) + } - this.clean = false - this.initialized = true + configureNode(this, this.opts, (err) => { + if (err) { + return callback(err) + } - done(null, this) - }) - })) + this.clean = false + this.initialized = true + callback(null, this) + }) + }) if (this.disposable) { shutdown.addHandler('disposable', 1, this.shutdown.bind(this)) } } - // cleanup tmp files - // TODO: this is a bad name for a function. a user may call this expecting - // something similar to "stopDaemon()". consider changing it. - @jbenet - shutdown (done) { - if (!this.clean && this.disposable) { - rimraf(this.path, (err) => { - if (err) throw err - done() - }) + /** + * Delete the repo that was being used. + * If the node was marked as `disposable` this will be called + * automatically when the process is exited. + * + * @param {function(Error)} callback + * @returns {undefined} + */ + shutdown (callback) { + if (this.clean || !this.disposable) { + return callback() } + + rimraf(this.path, callback) } - startDaemon (flags, done) { - if (typeof flags === 'function' && typeof done === 'undefined') { - done = flags + /** + * Start the daemon. + * + * @param {Array} [flags=[]] - Flags to be passed to the `ipfs daemon` command. + * @param {function(Error, IpfsApi)} callback + * @returns {undefined} + */ + startDaemon (flags, callback) { + if (typeof flags === 'function') { + callback = flags flags = [] } - const node = this - parseConfig(node.path, (err, conf) => { - if (err) return done(err) - - let stdout = '' - let args = ['daemon'].concat(flags || []) - - // strategy: - // - run subprocess - // - listen for API addr on stdout (success) - // - or an early exit or error (failure) - node.subprocess = run(node.exec, args, {env: node.env}) - node.subprocess.on('error', onErr) - .on('data', onData) - - // done2 is called to call done after removing the event listeners - let done2 = (err, val) => { - node.subprocess.removeListener('data', onData) - node.subprocess.removeListener('error', onErr) - if (err) { - node.killProcess(() => {}) // we failed. kill, just to be sure... - } - done(err, val) - done2 = () => {} // in case it gets called twice - } - - function onErr (err) { - if (String(err).match('daemon is running')) { - // we're good - done2(null, ipfs(conf.Addresses.API)) - - // TODO: I don't think this case is OK at all... - // When does the daemon outout "daemon is running" ?? seems old. - // Someone should check on this... - @jbenet - } else if (String(err).match('non-zero exit code')) { - // exited with an error on startup, before we removed listeners - done2(err) - } else { - done2(err) - } - } - - function onData (data) { - data = String(data) - stdout += data + const args = ['daemon'].concat(flags) - if (!data.trim().match(/Daemon is ready/)) { - return // not ready yet, keep waiting. - } + callback = once(callback) - const apiM = stdout.match(/API server listening on (.*)\n/) - if (apiM) { - // found the API server listening. extract the addr. - node.apiAddr = apiM[1] - } else { - // daemon ready but no API server? seems wrong... - done2(new Error('daemon ready without api')) - } + parseConfig(this.path, (err, conf) => { + if (err) { + return callback(err) + } - const gatewayM = stdout.match(/Gateway \((readonly|writable)\) server listening on (.*)\n/) - if (gatewayM) { - // found the Gateway server listening. extract the addr. - node.gatewayAddr = gatewayM[1] + this.subprocess = this._run(args, {env: this.env}, { + error: (err) => { + // Only look at the last error + const input = String(err) + .split('\n') + .map((l) => l.trim()) + .filter(Boolean) + .slice(-1)[0] || '' + + if (input.match('daemon is running')) { + // we're good + return callback(null, this.api) + } + // ignore when kill -9'd + if (!input.match('non-zero exit code')) { + callback(err) + } + }, + data: (data) => { + const match = String(data).trim().match(/API server listening on (.*)/) + + if (match) { + this.apiAddr = match[1] + const addr = multiaddr(this.apiAddr).nodeAddress() + this.api = ipfs(this.apiAddr) + this.api.apiHost = addr.address + this.api.apiPort = addr.port + + callback(null, this.api) + } } - - const addr = multiaddr(node.apiAddr).nodeAddress() - const api = ipfs(node.apiAddr) - api.apiHost = addr.address - api.apiPort = addr.port - - // We are happyly listening, so let's not hide other errors - node.subprocess.removeListener('error', onErr) - - done2(null, api) - } + }) }) } - stopDaemon (done) { - if (!done) { - done = () => {} + /** + * Stop the daemon. + * + * @param {function(Error)} callback + * @returns {undefined} + */ + stopDaemon (callback) { + if (!callback) { + callback = () => {} } if (!this.subprocess) { - return done() + return callback() } - this.killProcess(done) + this.killProcess(callback) } - killProcess (done) { + /** + * Kill the `ipfs daemon` process. + * + * First `SIGTERM` is sent, after 7.5 seconds `SIGKILL` is sent + * if the process hasn't exited yet. + * + * @param {function()} callback - Called when the process was killed. + * @returns {undefined} + */ + killProcess (callback) { // need a local var for the closure, as we clear the var. const subprocess = this.subprocess const timeout = setTimeout(() => { + console.log('KILLINg') subprocess.kill('SIGKILL') - done() + callback() }, GRACE_PERIOD) - subprocess.on('close', () => { + subprocess.once('close', () => { clearTimeout(timeout) this.subprocess = null - done() + callback() }) subprocess.kill('SIGTERM') this.subprocess = null } + /** + * Get the pid of the `ipfs daemon` process. + * + * @returns {number} + */ daemonPid () { return this.subprocess && this.subprocess.pid } - getConfig (key, done) { + /** + * Call `ipfs config` + * + * If no `key` is passed, the whole config is returned as an object. + * + * @param {string} [key] - A specific config to retrieve. + * @param {function(Error, (Object|string))} callback + * @returns {undefined} + */ + getConfig (key, callback) { if (typeof key === 'function') { - done = key + callback = key key = '' } - this._run(['config', key], {env: this.env}, done) + async.waterfall([ + (cb) => this._run( + ['config', key], + {env: this.env}, + cb + ), + (config, cb) => { + if (!key) { + return tryJsonParse(config, cb) + } + cb(null, config.trim()) + } + ], callback) } - setConfig (key, value, done) { - done = once(done) - run(this.exec, ['config', key, value, '--json'], {env: this.env}) - .on('error', done) - .on('data', () => {}) - .on('end', () => done()) + /** + * Set a config value. + * + * @param {string} key + * @param {string} value + * @param {function(Error)} callback + * @returns {undefined} + */ + setConfig (key, value, callback) { + this._run( + ['config', key, value, '--json'], + {env: this.env}, + callback + ) } - replaceConf (file, done) { - this._run(['config', 'replace', file], {env: this.env}, done) + /** + * Replace the configuration with a given file + * + * @param {string} file - path to the new config file + * @param {function(Error)} callback + * @returns {undefined} + */ + replaceConf (file, callback) { + this._run( + ['config', 'replace', file], + {env: this.env}, + callback + ) } - - version (done) { - this._run(['version'], {}, done) + /** + * Get the version of ipfs + * + * @param {function(Error, string)} callback + * @returns {undefined} + */ + version (callback) { + this._run(['version'], {env: this.env}, callback) } } + +module.exports = Node diff --git a/test/exec.spec.js b/test/exec.spec.js new file mode 100644 index 00000000..9fdeb99a --- /dev/null +++ b/test/exec.spec.js @@ -0,0 +1,131 @@ +/* eslint max-nested-callbacks: ["error", 6] */ +/* eslint-env mocha */ +'use strict' + +const expect = require('chai').expect +const isrunning = require('is-running') +const cp = require('child_process') +const path = require('path') +const exec = require('../src/exec') + +const survivor = path.join(__dirname, 'survivor') +const hang = 'tail -f /dev/null'.split(' ') + +function token () { + return Math.random().toString().substr(2) +} + +function psExpect (pid, expect, grace, cb) { + setTimeout(() => { + const actual = isrunning(pid) + + if (actual !== expect && grace > 0) { + psExpect(pid, expect, grace--, cb) + return + } + + cb(null, actual) + }, 200) +} + +function isRunningGrep (pattern, cb) { + const cmd = 'ps aux' + cp.exec(cmd, (err, stdout, stderr) => { + if (err) { + return cb(err) + } + + const running = stdout.match(pattern) !== null + + cb(null, running) + }) +} + +function makeCheck (n, done) { + let i = 0 + + return (err) => { + if (err) { + return done(err) + } + + if (++i === n) { + done() + } + } +} + +describe('exec', () => { + it('SIGTERM kills hang', (done) => { + const tok = token() + + const check = makeCheck(2, done) + const args = hang.concat(tok) + + const p = exec(args[0], args.slice(1), {}, (err) => { + // `tail -f /dev/null somerandom` errors out + expect(err).to.exist + + isRunningGrep(token, (err, running) => { + expect(err).to.not.exist + expect(running).to.not.be.ok + check() + }) + }) + + psExpect(p.pid, true, 10, (err, running) => { + expect(err).to.not.exist + expect(running).to.be.ok + + p.kill('SIGTERM') // should kill it + psExpect(p.pid, false, 10, (err, running) => { + expect(err).to.not.exist + expect(running).to.not.be.ok + check() + }) + }) + }) + + // Travis and CircleCI don't like the usage of SIGHUP + if (process.env.CI) { + return + } + + it('SIGKILL kills survivor', (done) => { + const check = makeCheck(2, done) + + const tok = token() + + const p = exec(survivor, [tok], {}, (err) => { + expect(err).to.not.exist + + isRunningGrep(token, (err, running) => { + expect(err).to.not.exist + expect(running).to.not.be.ok + check() + }) + }) + + p.stdout.pipe(process.stdout) + p.stderr.pipe(process.stderr) + + psExpect(p.pid, true, 10, (err, running) => { + expect(err).to.not.exist + expect(running).to.be.ok + + p.kill('SIGTERM') // should not kill it + + psExpect(p.pid, true, 10, (err, running) => { + expect(err).to.not.exist + expect(running).to.be.ok + + p.kill('SIGKILL') // should kill it + psExpect(p.pid, false, 15, (err, running) => { + expect(err).to.not.exist + expect(running).to.not.be.ok + check() + }) + }) + }) + }) +}) diff --git a/test/index.spec.js b/test/index.spec.js index 29579cea..f4e1cd04 100644 --- a/test/index.spec.js +++ b/test/index.spec.js @@ -2,20 +2,22 @@ /* eslint max-nested-callbacks: ["error", 8] */ 'use strict' -const ipfsd = require('../src') -const assert = require('assert') +const async = require('async') +const expect = require('chai').expect const ipfsApi = require('ipfs-api') -const run = require('subcomandante') -const bs58 = require('bs58') +const mh = require('multihashes') const fs = require('fs') const rimraf = require('rimraf') const mkdirp = require('mkdirp') const path = require('path') +const once = require('once') +const os = require('os') -describe('ipfs executable path', function () { - this.timeout(2000) - let Node +const exec = require('../src/exec') +const ipfsd = require('../src') +describe('ipfs executable path', () => { + let Node it('has the correct path when installed with npm3', (done) => { process.env.testpath = '/tmp/ipfsd-ctl-test/node_modules/ipfsd-ctl/lib' // fake __dirname let npm3Path = '/tmp/ipfsd-ctl-test/node_modules/go-ipfs-dep/go-ipfs' @@ -29,7 +31,11 @@ describe('ipfs executable path', function () { delete require.cache[require.resolve('../src/node.js')] Node = require('../src/node.js') var node = new Node() - assert.equal(node.exec, '/tmp/ipfsd-ctl-test/node_modules/go-ipfs-dep/go-ipfs/ipfs') + expect( + node.exec + ).to.be.eql( + '/tmp/ipfsd-ctl-test/node_modules/go-ipfs-dep/go-ipfs/ipfs' + ) rimraf('/tmp/ipfsd-ctl-test', done) }) }) @@ -47,359 +53,392 @@ describe('ipfs executable path', function () { delete require.cache[require.resolve('../src/node.js')] Node = require('../src/node.js') var node = new Node() - assert.equal(node.exec, '/tmp/ipfsd-ctl-test/node_modules/ipfsd-ctl/node_modules/go-ipfs-dep/go-ipfs/ipfs') + expect( + node.exec + ).to.be.eql( + '/tmp/ipfsd-ctl-test/node_modules/ipfsd-ctl/node_modules/go-ipfs-dep/go-ipfs/ipfs' + ) rimraf('/tmp/ipfsd-ctl-test', done) }) }) }) -describe('local daemon', function () { - const repoPath = '/tmp/ipfsd-ctl-test' - const addr = '/ip4/127.0.0.1/tcp/5678' - const config = { - Addresses: { - API: addr +describe('daemons', () => { + describe('local node', () => { + const repoPath = '/tmp/ipfsd-ctl-test' + const addr = '/ip4/127.0.0.1/tcp/5678' + const config = { + Addresses: { + API: addr + } } - } - it('allows passing flags to init', (done) => { - ipfsd.local(repoPath, config, (err, node) => { - assert.equal(err, null) - - node.init((err) => { - assert.equal(err, null) - - node.getConfig('Addresses.API', (err, res) => { - assert.equal(err, null) - assert.equal(res, addr) - rimraf(repoPath, done) - }) - }) + it('allows passing flags to init', (done) => { + async.waterfall([ + (cb) => ipfsd.local(repoPath, config, cb), + (node, cb) => { + async.series([ + (cb) => node.init(cb), + (cb) => node.getConfig('Addresses.API', cb) + ], (err, res) => { + expect(err).to.not.exist + expect(res[1]).to.be.eql(addr) + rimraf(repoPath, cb) + }) + } + ], done) }) }) -}) -describe('disposable node with local api', function () { - this.timeout(20000) - let ipfs - before((done) => { - ipfsd.disposable((err, node) => { - if (err) throw err - node.startDaemon((err, ignore) => { - if (err) throw err - ipfs = ipfsApi(node.apiAddr) - done() - }) + describe('disposable node', () => { + const blorb = new Buffer('blorb') + let ipfs + let store + let retrieve + + beforeEach((done) => { + async.waterfall([ + (cb) => ipfs.block.put(blorb, cb), + (block, cb) => block.key(cb), + (key, cb) => { + store = mh.toB58String(key) + ipfs.block.get(store, cb) + }, + (_block, cb) => { + retrieve = _block.data + cb() + } + ], done) }) - }) - it('should have started the daemon and returned an api', () => { - assert(ipfs) - assert(ipfs.id) - }) + describe('with local api', () => { + before((done) => { + async.waterfall([ + (cb) => ipfsd.disposable(cb), + (node, cb) => { + node.startDaemon((err) => { + if (err) { + return cb(err) + } + ipfs = ipfsApi(node.apiAddr) + cb() + }) + } + ], done) + }) - let store, retrieve + it('should have started the daemon and returned an api', () => { + expect(ipfs).to.exist + expect(ipfs.id).to.exist + }) - before((done) => { - const blorb = Buffer('blorb') - ipfs.block.put(blorb, (err, block) => { - if (err) throw err - store = bs58.encode(block.key).toString() + it('should be able to store objects', () => { + expect( + store + ).to.be.eql( + 'QmPv52ekjS75L4JmHpXVeuJ5uX2ecSfSZo88NSyxwA3rAQ' + ) + }) - ipfs.block.get(store, (err, block) => { - if (err) throw err - retrieve = block.data - done() + it('should be able to retrieve objects', () => { + expect(retrieve.toString()).to.be.eql('blorb') }) }) - }) - it('should be able to store objects', () => { - assert.equal(store, 'QmPv52ekjS75L4JmHpXVeuJ5uX2ecSfSZo88NSyxwA3rAQ') - }) - it('should be able to retrieve objects', () => { - assert.equal(retrieve, 'blorb') - }) -}) -describe('disposableApi node', function () { - this.timeout(20000) - let ipfs - before((done) => { - ipfsd.disposableApi((err, api) => { - if (err) throw err - ipfs = api - done() - }) - }) + describe('disposableApi', () => { + before((done) => { + ipfsd.disposableApi((err, api) => { + if (err) { + done(err) + } - it('should have started the daemon and returned an api with host/port', () => { - assert(ipfs) - assert(ipfs.id) - assert(ipfs.apiHost) - assert(ipfs.apiPort) - }) + ipfs = api + done() + }) + }) - let store, retrieve + it('should have started the daemon and returned an api with host/port', () => { + expect(ipfs).to.have.property('id') + expect(ipfs).to.have.property('apiHost') + expect(ipfs).to.have.property('apiPort') + }) - before((done) => { - const blorb = Buffer('blorb') - ipfs.block.put(blorb, (err, block) => { - if (err) throw err - store = bs58.encode(block.key).toString() + it('should be able to store objects', () => { + expect( + store + ).to.be.eql( + 'QmPv52ekjS75L4JmHpXVeuJ5uX2ecSfSZo88NSyxwA3rAQ' + ) + }) - ipfs.block.get(store, (err, block) => { - if (err) throw err - retrieve = block.data - done() + it('should be able to retrieve objects', () => { + expect(retrieve.toString()).to.be.eql('blorb') }) }) }) - it('should be able to store objects', () => { - assert.equal(store, 'QmPv52ekjS75L4JmHpXVeuJ5uX2ecSfSZo88NSyxwA3rAQ') - }) - it('should be able to retrieve objects', () => { - assert.equal(retrieve, 'blorb') - }) -}) -describe('starting and stopping', function () { - this.timeout(20000) - let node + describe('starting and stopping', () => { + let node - describe('init', () => { - before((done) => { - ipfsd.disposable((err, res) => { - if (err) throw err - node = res - done() + describe('init', () => { + before((done) => { + ipfsd.disposable((err, res) => { + if (err) { + done(err) + } + node = res + done() + }) }) - }) - it('should returned a node', () => { - assert(node) - }) + it('should returned a node', () => { + expect(node).to.exist + }) - it('daemon should not be running', () => { - assert(!node.daemonPid()) + it('daemon should not be running', () => { + expect(node.daemonPid()).to.be.falsy + }) }) - }) - let pid + let pid + describe('starting', () => { + let ipfs - describe('starting', () => { - let ipfs - before((done) => { - node.startDaemon((err, res) => { - if (err) throw err + before((done) => { + node.startDaemon((err, res) => { + if (err) throw err - pid = node.daemonPid() - ipfs = res + pid = node.daemonPid() + ipfs = res - // actually running? - run('kill', ['-0', pid]) - .on(err, (err) => { throw err }) - .on('end', () => { done() }) + // actually running? + done = once(done) + exec('kill', ['-0', pid], {cleanup: true}, () => done()) + }) }) - }) - it('should be running', () => { - assert(ipfs.id) - }) - }) - - let stopped = false - describe('stopping', () => { - before((done) => { - node.stopDaemon((err) => { - if (err) throw err - stopped = true + it('should be running', () => { + expect(ipfs.id).to.be.truthy }) - // make sure it's not still running - const poll = setInterval(() => { - run('kill', ['-0', pid]) - .on('error', () => { - clearInterval(poll) - done() - done = () => {} // so it does not get called again - }) - }, 100) - }) - - it('should be stopped', () => { - assert(!node.daemonPid()) - assert(stopped) }) - }) -}) -describe('setting up and initializing a local node', () => { - const testpath1 = '/tmp/ipfstestpath1' + describe('stopping', () => { + let stopped = false - describe('cleanup', () => { - before((done) => { - rimraf(testpath1, done) - }) + before((done) => { + node.stopDaemon((err) => { + if (err) { + return done(err) + } + stopped = true + }) + // make sure it's not still running + const poll = setInterval(() => { + exec('kill', ['-0', pid], {cleanup: true}, { + error () { + clearInterval(poll) + done() + // so it does not get called again + done = () => {} + } + }) + }, 100) + }) - it('should not have a directory', () => { - assert.equal(fs.existsSync('/tmp/ipfstestpath1'), false) + it('should be stopped', () => { + expect(node.daemonPid()).to.be.falsy + expect(stopped).to.be.truthy + }) }) }) - describe('setup', () => { - let node - before((done) => { - ipfsd.local(testpath1, (err, res) => { - if (err) throw err - node = res - done() - }) - }) + describe('setting up and init a local node', () => { + const testpath1 = '/tmp/ipfstestpath1' - it('should have returned a node', () => { - assert(node) - }) + describe('cleanup', () => { + before((done) => { + rimraf(testpath1, done) + }) - it('should not be initialized', () => { - assert.equal(node.initialized, false) + it('should not have a directory', () => { + expect(fs.existsSync('/tmp/ipfstestpath1')).to.be.eql(false) + }) }) - describe('initialize', function () { - this.timeout(10000) - + describe('setup', () => { + let node before((done) => { - node.init((err) => { - if (err) throw err + ipfsd.local(testpath1, (err, res) => { + if (err) { + return done(err) + } + node = res done() }) }) - it('should have made a directory', () => { - assert.equal(fs.existsSync(testpath1), true) + it('should have returned a node', () => { + expect(node).to.exist }) - it('should be initialized', () => { - assert.equal(node.initialized, true) + it('should not be initialized', () => { + expect(node.initialized).to.be.eql(false) }) - it('should be initialized', () => { - assert.equal(node.initialized, true) + describe('initialize', () => { + before((done) => { + node.init(done) + }) + + it('should have made a directory', () => { + expect(fs.existsSync(testpath1)).to.be.eql(true) + }) + + it('should be initialized', () => { + expect(node.initialized).to.be.eql(true) + }) + + it('should be initialized', () => { + expect(node.initialized).to.be.eql(true) + }) }) }) }) -}) -describe('change config values of a disposable node', function () { - this.timeout(20000) + describe('change config of a disposable node', () => { + let ipfsNode - let ipfsNode - - before((done) => { - ipfsd.disposable((err, node) => { - if (err) { - throw err - } - ipfsNode = node - done() + before((done) => { + ipfsd.disposable((err, node) => { + if (err) { + return done(err) + } + ipfsNode = node + done() + }) }) - }) - it('Should return a config value', (done) => { - ipfsNode.getConfig('Bootstrap', (err, config) => { - if (err) { - throw err - } - assert(config) - done() + it('Should return a config value', (done) => { + ipfsNode.getConfig('Bootstrap', (err, config) => { + expect(err).to.not.exist + expect(config).to.exist + done() + }) }) - }) - it('Should set a config value', (done) => { - ipfsNode.setConfig('Bootstrap', null, (err) => { - if (err) { - throw err - } + it('Should set a config value', (done) => { + async.series([ + (cb) => ipfsNode.setConfig('Bootstrap', 'null', cb), + (cb) => ipfsNode.getConfig('Bootstrap', cb) + ], (err, res) => { + expect(err).to.not.exist + expect(res[1]).to.be.eql('null') + done() + }) + }) - ipfsNode.getConfig('Bootstrap', (err, config) => { - if (err) { - throw err - } - assert.equal(config, 'null') + it('should give an error if setting an invalid config value', (done) => { + ipfsNode.setConfig('Bootstrap', 'true', (err) => { + expect(err.message).to.match( + /failed to set config value/ + ) done() }) }) }) -}) -describe('external ipfs binaray', () => { it('allows passing via $IPFS_EXEC', (done) => { process.env.IPFS_EXEC = '/some/path' ipfsd.local((err, node) => { - if (err) throw err - - assert.equal(node.exec, '/some/path') + expect(err).to.not.exist + expect(node.exec).to.be.eql('/some/path') process.env.IPFS_EXEC = '' done() }) }) -}) -describe('version', () => { it('prints the version', (done) => { ipfsd.version((err, version) => { - if (err) throw err - - assert(version) + expect(err).to.not.exist + expect(version).to.be.eql('ipfs version 0.4.4') done() }) }) -}) -describe('ipfs-api version', function () { - this.timeout(20000) - - let ipfs + describe('ipfs-api version', () => { + let ipfs - before((done) => { - ipfsd.disposable((err, node) => { - if (err) throw err - node.startDaemon((err, ignore) => { + before((done) => { + ipfsd.disposable((err, node) => { if (err) throw err - ipfs = ipfsApi(node.apiAddr) - done() + node.startDaemon((err, ignore) => { + if (err) throw err + ipfs = ipfsApi(node.apiAddr) + done() + }) }) }) - }) - // NOTE: if you change ./fixtures, the hash will need to be changed - it('uses the correct ipfs-api', (done) => { - ipfs.util.addFromFs(path.join(__dirname, 'fixtures/'), { recursive: true }, (err, res) => { - if (err) throw err + // NOTE: if you change ./fixtures, the hash will need to be changed + it('uses the correct ipfs-api', (done) => { + ipfs.util.addFromFs(path.join(__dirname, 'fixtures/'), { recursive: true }, (err, res) => { + if (err) throw err - const added = res[res.length - 1] - assert(added) - assert.equal(added.hash, 'QmXkiTdnfRJjiQREtF5dWf2X4V9awNHQSn9YGofwVY4qUU') - done() + const added = res[res.length - 1] + expect(added).to.have.property( + 'hash', + 'QmXkiTdnfRJjiQREtF5dWf2X4V9awNHQSn9YGofwVY4qUU' + ) + done() + }) }) }) -}) -describe('node startDaemon', () => { - it('allows passing flags', (done) => { - ipfsd.disposable((err, node) => { - if (err) throw err - node.startDaemon(['--should-not-exist'], (err, ignore) => { - if (!err) { - throw new Error('should have errored') + describe('startDaemon', () => { + it('start and stop', (done) => { + const dir = os.tmpdir() + `/${Math.ceil(Math.random() * 100)}` + const check = (cb) => { + if (fs.existsSync(path.join(dir, 'repo.lock'))) { + cb(new Error('repo.lock not removed')) + } + if (fs.existsSync(path.join(dir, 'api'))) { + cb(new Error('api file not removed')) } + cb() + } - let errStr = 'Unrecognized option \'should-not-exist\'' + async.waterfall([ + (cb) => ipfsd.local(dir, cb), + (node, cb) => node.init((err) => cb(err, node)), + (node, cb) => node.startDaemon((err) => cb(err, node)), + (node, cb) => node.stopDaemon(cb), + check, + (cb) => ipfsd.local(dir, cb), + (node, cb) => node.startDaemon((err) => cb(err, node)), + (node, cb) => node.stopDaemon(cb), + check, + (cb) => ipfsd.local(dir, cb), + (node, cb) => node.startDaemon((err) => cb(err, node)), + (node, cb) => node.stopDaemon(cb), + check + ], done) + }) - if (String(err).indexOf(errStr) >= 0) { - done() // correct error - } + it('allows passing flags', (done) => { + ipfsd.disposable((err, node) => { + expect(err).to.not.exist - throw err + node.startDaemon(['--should-not-exist'], (err) => { + expect(err).to.exist + expect( + err.message + ).to.match( + /Unrecognized option 'should-not-exist'/ + ) + + done() + }) }) }) }) diff --git a/test/survivor b/test/survivor new file mode 100755 index 00000000..c75c89b1 --- /dev/null +++ b/test/survivor @@ -0,0 +1,3 @@ +#!/bin/sh +trap "echo 'you cannot kill me!'" SIGHUP SIGINT SIGTERM SIGQUIT SIGPIPE +while true; do sleep 1; done \ No newline at end of file