1
0
mirror of https://github.com/mgerb/mywebsite synced 2026-01-12 02:42:48 +00:00

updated bunch of file paths and changed the way posts are loaded

This commit is contained in:
2016-01-05 12:28:04 -06:00
parent 4bb8cae81e
commit 6ab45fe935
13249 changed files with 317868 additions and 2101398 deletions

581
node_modules/mongodb/lib/admin.js generated vendored Normal file
View File

@@ -0,0 +1,581 @@
"use strict";
var toError = require('./utils').toError,
Define = require('./metadata'),
shallowClone = require('./utils').shallowClone;
/**
* @fileOverview The **Admin** class is an internal class that allows convenient access to
* the admin functionality and commands for MongoDB.
*
* **ADMIN Cannot directly be instantiated**
* @example
* var MongoClient = require('mongodb').MongoClient,
* test = require('assert');
* // Connection url
* var url = 'mongodb://localhost:27017/test';
* // Connect using MongoClient
* MongoClient.connect(url, function(err, db) {
* // Use the admin database for the operation
* var adminDb = db.admin();
*
* // List all the available databases
* adminDb.listDatabases(function(err, dbs) {
* test.equal(null, err);
* test.ok(dbs.databases.length > 0);
* db.close();
* });
* });
*/
/**
* Create a new Admin instance (INTERNAL TYPE, do not instantiate directly)
* @class
* @return {Admin} a collection instance.
*/
var Admin = function(db, topology, promiseLibrary) {
if(!(this instanceof Admin)) return new Admin(db, topology);
var self = this;
// Internal state
this.s = {
db: db
, topology: topology
, promiseLibrary: promiseLibrary
}
}
var define = Admin.define = new Define('Admin', Admin, false);
/**
* The callback format for results
* @callback Admin~resultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {object} result The result object if the command was executed successfully.
*/
/**
* Execute a command
* @method
* @param {object} command The command hash
* @param {object} [options=null] Optional settings.
* @param {(ReadPreference|string)} [options.readPreference=null] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {number} [options.maxTimeMS=null] Number of milliseconds to wait before aborting the query.
* @param {Admin~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.command = function(command, options, callback) {
var self = this;
var args = Array.prototype.slice.call(arguments, 1);
callback = args.pop();
if(typeof callback != 'function') args.push(callback);
options = args.length ? args.shift() : {};
// Execute using callback
if(typeof callback == 'function') return this.s.db.executeDbAdminCommand(command, options, function(err, doc) {
return callback != null ? callback(err, doc) : null;
});
// Return a Promise
return new this.s.promiseLibrary(function(resolve, reject) {
self.s.db.executeDbAdminCommand(command, options, function(err, doc) {
if(err) return reject(err);
resolve(doc);
});
});
}
define.classMethod('command', {callback: true, promise:true});
/**
* Retrieve the server information for the current
* instance of the db client
*
* @param {Admin~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.buildInfo = function(callback) {
var self = this;
// Execute using callback
if(typeof callback == 'function') return this.serverInfo(callback);
// Return a Promise
return new this.s.promiseLibrary(function(resolve, reject) {
self.serverInfo(function(err, r) {
if(err) return reject(err);
resolve(r);
});
});
}
define.classMethod('buildInfo', {callback: true, promise:true});
/**
* Retrieve the server information for the current
* instance of the db client
*
* @param {Admin~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.serverInfo = function(callback) {
var self = this;
// Execute using callback
if(typeof callback == 'function') return this.s.db.executeDbAdminCommand({buildinfo:1}, function(err, doc) {
if(err != null) return callback(err, null);
callback(null, doc);
});
// Return a Promise
return new this.s.promiseLibrary(function(resolve, reject) {
self.s.db.executeDbAdminCommand({buildinfo:1}, function(err, doc) {
if(err) return reject(err);
resolve(doc);
});
});
}
define.classMethod('serverInfo', {callback: true, promise:true});
/**
* Retrieve this db's server status.
*
* @param {Admin~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.serverStatus = function(callback) {
var self = this;
// Execute using callback
if(typeof callback == 'function') return serverStatus(self, callback)
// Return a Promise
return new this.s.promiseLibrary(function(resolve, reject) {
serverStatus(self, function(err, r) {
if(err) return reject(err);
resolve(r);
});
});
};
var serverStatus = function(self, callback) {
self.s.db.executeDbAdminCommand({serverStatus: 1}, function(err, doc) {
if(err == null && doc.ok === 1) {
callback(null, doc);
} else {
if(err) return callback(err, false);
return callback(toError(doc), false);
}
});
}
define.classMethod('serverStatus', {callback: true, promise:true});
/**
* Retrieve the current profiling Level for MongoDB
*
* @param {Admin~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.profilingLevel = function(callback) {
var self = this;
// Execute using callback
if(typeof callback == 'function') return profilingLevel(self, callback)
// Return a Promise
return new this.s.promiseLibrary(function(resolve, reject) {
profilingLevel(self, function(err, r) {
if(err) return reject(err);
resolve(r);
});
});
};
var profilingLevel = function(self, callback) {
self.s.db.executeDbAdminCommand({profile:-1}, function(err, doc) {
doc = doc;
if(err == null && doc.ok === 1) {
var was = doc.was;
if(was == 0) return callback(null, "off");
if(was == 1) return callback(null, "slow_only");
if(was == 2) return callback(null, "all");
return callback(new Error("Error: illegal profiling level value " + was), null);
} else {
err != null ? callback(err, null) : callback(new Error("Error with profile command"), null);
}
});
}
define.classMethod('profilingLevel', {callback: true, promise:true});
/**
* Ping the MongoDB server and retrieve results
*
* @param {Admin~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.ping = function(options, callback) {
var self = this;
var args = Array.prototype.slice.call(arguments, 0);
callback = args.pop();
if(typeof callback != 'function') args.push(callback);
// Execute using callback
if(typeof callback == 'function') return this.s.db.executeDbAdminCommand({ping: 1}, callback);
// Return a Promise
return new this.s.promiseLibrary(function(resolve, reject) {
self.s.db.executeDbAdminCommand({ping: 1}, function(err, r) {
if(err) return reject(err);
resolve(r);
});
});
}
define.classMethod('ping', {callback: true, promise:true});
/**
* Authenticate a user against the server.
* @method
* @param {string} username The username.
* @param {string} [password] The password.
* @param {Admin~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.authenticate = function(username, password, options, callback) {
var self = this;
if(typeof options == 'function') callback = options, options = {};
options = shallowClone(options);
options.authdb = 'admin';
// Execute using callback
if(typeof callback == 'function') return this.s.db.authenticate(username, password, options, callback);
// Return a Promise
return new this.s.promiseLibrary(function(resolve, reject) {
self.s.db.authenticate(username, password, options, function(err, r) {
if(err) return reject(err);
resolve(r);
});
});
}
define.classMethod('authenticate', {callback: true, promise:true});
/**
* Logout user from server, fire off on all connections and remove all auth info
* @method
* @param {Admin~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.logout = function(callback) {
var self = this;
// Execute using callback
if(typeof callback == 'function') return this.s.db.logout({authdb: 'admin'}, callback);
// Return a Promise
return new this.s.promiseLibrary(function(resolve, reject) {
self.s.db.logout({authdb: 'admin'}, function(err, r) {
if(err) return reject(err);
resolve(r);
});
});
}
define.classMethod('logout', {callback: true, promise:true});
// Get write concern
var writeConcern = function(options, db) {
options = shallowClone(options);
// If options already contain write concerns return it
if(options.w || options.wtimeout || options.j || options.fsync) {
return options;
}
// Set db write concern if available
if(db.writeConcern) {
if(options.w) options.w = db.writeConcern.w;
if(options.wtimeout) options.wtimeout = db.writeConcern.wtimeout;
if(options.j) options.j = db.writeConcern.j;
if(options.fsync) options.fsync = db.writeConcern.fsync;
}
// Return modified options
return options;
}
/**
* Add a user to the database.
* @method
* @param {string} username The username.
* @param {string} password The password.
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.fsync=false] Specify a file sync write concern.
* @param {object} [options.customData=null] Custom data associated with the user (only Mongodb 2.6 or higher)
* @param {object[]} [options.roles=null] Roles associated with the created user (only Mongodb 2.6 or higher)
* @param {Admin~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.addUser = function(username, password, options, callback) {
var self = this;
var args = Array.prototype.slice.call(arguments, 2);
callback = args.pop();
if(typeof callback != 'function') args.push(callback);
options = args.length ? args.shift() : {};
options = options || {};
// Get the options
options = writeConcern(options, self.s.db)
// Set the db name to admin
options.dbName = 'admin';
// Execute using callback
if(typeof callback == 'function')
return self.s.db.addUser(username, password, options, callback);
// Return a Promise
return new this.s.promiseLibrary(function(resolve, reject) {
self.s.db.addUser(username, password, options, function(err, r) {
if(err) return reject(err);
resolve(r);
});
});
}
define.classMethod('addUser', {callback: true, promise:true});
/**
* Remove a user from a database
* @method
* @param {string} username The username.
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.fsync=false] Specify a file sync write concern.
* @param {Admin~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.removeUser = function(username, options, callback) {
var self = this;
var args = Array.prototype.slice.call(arguments, 1);
callback = args.pop();
if(typeof callback != 'function') args.push(callback);
options = args.length ? args.shift() : {};
options = options || {};
// Get the options
options = writeConcern(options, self.s.db)
// Set the db name
options.dbName = 'admin';
// Execute using callback
if(typeof callback == 'function')
return self.s.db.removeUser(username, options, callback);
// Return a Promise
return new this.s.promiseLibrary(function(resolve, reject) {
self.s.db.removeUser(username, options, function(err, r) {
if(err) return reject(err);
resolve(r);
});
});
}
define.classMethod('removeUser', {callback: true, promise:true});
/**
* Set the current profiling level of MongoDB
*
* @param {string} level The new profiling level (off, slow_only, all).
* @param {Admin~resultCallback} [callback] The command result callback.
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.setProfilingLevel = function(level, callback) {
var self = this;
// Execute using callback
if(typeof callback == 'function') return setProfilingLevel(self, level, callback);
// Return a Promise
return new this.s.promiseLibrary(function(resolve, reject) {
setProfilingLevel(self, level, function(err, r) {
if(err) return reject(err);
resolve(r);
});
});
};
var setProfilingLevel = function(self, level, callback) {
var command = {};
var profile = 0;
if(level == "off") {
profile = 0;
} else if(level == "slow_only") {
profile = 1;
} else if(level == "all") {
profile = 2;
} else {
return callback(new Error("Error: illegal profiling level value " + level));
}
// Set up the profile number
command['profile'] = profile;
self.s.db.executeDbAdminCommand(command, function(err, doc) {
doc = doc;
if(err == null && doc.ok === 1)
return callback(null, level);
return err != null ? callback(err, null) : callback(new Error("Error with profile command"), null);
});
}
define.classMethod('setProfilingLevel', {callback: true, promise:true});
/**
* Retrive the current profiling information for MongoDB
*
* @param {Admin~resultCallback} [callback] The command result callback.
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.profilingInfo = function(callback) {
var self = this;
// Execute using callback
if(typeof callback == 'function') return profilingInfo(self, callback);
// Return a Promise
return new this.s.promiseLibrary(function(resolve, reject) {
profilingInfo(self, function(err, r) {
if(err) return reject(err);
resolve(r);
});
});
};
var profilingInfo = function(self, callback) {
try {
self.s.topology.cursor("admin.system.profile", { find: 'system.profile', query: {}}, {}).toArray(callback);
} catch (err) {
return callback(err, null);
}
}
define.classMethod('profilingLevel', {callback: true, promise:true});
/**
* Validate an existing collection
*
* @param {string} collectionName The name of the collection to validate.
* @param {object} [options=null] Optional settings.
* @param {Admin~resultCallback} [callback] The command result callback.
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.validateCollection = function(collectionName, options, callback) {
var self = this;
var args = Array.prototype.slice.call(arguments, 1);
callback = args.pop();
if(typeof callback != 'function') args.push(callback);
options = args.length ? args.shift() : {};
options = options || {};
// Execute using callback
if(typeof callback == 'function')
return validateCollection(self, collectionName, options, callback);
// Return a Promise
return new this.s.promiseLibrary(function(resolve, reject) {
validateCollection(self, collectionName, options, function(err, r) {
if(err) return reject(err);
resolve(r);
});
});
};
var validateCollection = function(self, collectionName, options, callback) {
var command = {validate: collectionName};
var keys = Object.keys(options);
// Decorate command with extra options
for(var i = 0; i < keys.length; i++) {
if(options.hasOwnProperty(keys[i])) {
command[keys[i]] = options[keys[i]];
}
}
self.s.db.command(command, function(err, doc) {
if(err != null) return callback(err, null);
if(doc.ok === 0)
return callback(new Error("Error with validate command"), null);
if(doc.result != null && doc.result.constructor != String)
return callback(new Error("Error with validation data"), null);
if(doc.result != null && doc.result.match(/exception|corrupt/) != null)
return callback(new Error("Error: invalid collection " + collectionName), null);
if(doc.valid != null && !doc.valid)
return callback(new Error("Error: invalid collection " + collectionName), null);
return callback(null, doc);
});
}
define.classMethod('validateCollection', {callback: true, promise:true});
/**
* List the available databases
*
* @param {Admin~resultCallback} [callback] The command result callback.
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.listDatabases = function(callback) {
var self = this;
// Execute using callback
if(typeof callback == 'function') return self.s.db.executeDbAdminCommand({listDatabases:1}, {}, callback);
// Return a Promise
return new this.s.promiseLibrary(function(resolve, reject) {
self.s.db.executeDbAdminCommand({listDatabases:1}, {}, function(err, r) {
if(err) return reject(err);
resolve(r);
});
});
}
define.classMethod('listDatabases', {callback: true, promise:true});
/**
* Get ReplicaSet status
*
* @param {Admin~resultCallback} [callback] The command result callback.
* @return {Promise} returns Promise if no callback passed
*/
Admin.prototype.replSetGetStatus = function(callback) {
var self = this;
// Execute using callback
if(typeof callback == 'function') return replSetGetStatus(self, callback);
// Return a Promise
return new this.s.promiseLibrary(function(resolve, reject) {
replSetGetStatus(self, function(err, r) {
if(err) return reject(err);
resolve(r);
});
});
};
var replSetGetStatus = function(self, callback) {
self.s.db.executeDbAdminCommand({replSetGetStatus:1}, function(err, doc) {
if(err == null && doc.ok === 1)
return callback(null, doc);
if(err) return callback(err, false);
callback(toError(doc), false);
});
}
define.classMethod('replSetGetStatus', {callback: true, promise:true});
module.exports = Admin;

432
node_modules/mongodb/lib/aggregation_cursor.js generated vendored Normal file
View File

@@ -0,0 +1,432 @@
"use strict";
var inherits = require('util').inherits
, f = require('util').format
, toError = require('./utils').toError
, getSingleProperty = require('./utils').getSingleProperty
, formattedOrderClause = require('./utils').formattedOrderClause
, handleCallback = require('./utils').handleCallback
, Logger = require('mongodb-core').Logger
, EventEmitter = require('events').EventEmitter
, ReadPreference = require('./read_preference')
, MongoError = require('mongodb-core').MongoError
, Readable = require('stream').Readable || require('readable-stream').Readable
, Define = require('./metadata')
, CoreCursor = require('./cursor')
, Query = require('mongodb-core').Query
, CoreReadPreference = require('mongodb-core').ReadPreference;
/**
* @fileOverview The **AggregationCursor** class is an internal class that embodies an aggregation cursor on MongoDB
* allowing for iteration over the results returned from the underlying query. It supports
* one by one document iteration, conversion to an array or can be iterated as a Node 0.10.X
* or higher stream
*
* **AGGREGATIONCURSOR Cannot directly be instantiated**
* @example
* var MongoClient = require('mongodb').MongoClient,
* test = require('assert');
* // Connection url
* var url = 'mongodb://localhost:27017/test';
* // Connect using MongoClient
* MongoClient.connect(url, function(err, db) {
* // Create a collection we want to drop later
* var col = db.collection('createIndexExample1');
* // Insert a bunch of documents
* col.insert([{a:1, b:1}
* , {a:2, b:2}, {a:3, b:3}
* , {a:4, b:4}], {w:1}, function(err, result) {
* test.equal(null, err);
* // Show that duplicate records got dropped
* col.aggregation({}, {cursor: {}}).toArray(function(err, items) {
* test.equal(null, err);
* test.equal(4, items.length);
* db.close();
* });
* });
* });
*/
/**
* Namespace provided by the browser.
* @external Readable
*/
/**
* Creates a new Aggregation Cursor instance (INTERNAL TYPE, do not instantiate directly)
* @class AggregationCursor
* @extends external:Readable
* @fires AggregationCursor#data
* @fires AggregationCursor#end
* @fires AggregationCursor#close
* @fires AggregationCursor#readable
* @return {AggregationCursor} an AggregationCursor instance.
*/
var AggregationCursor = function(bson, ns, cmd, options, topology, topologyOptions) {
CoreCursor.apply(this, Array.prototype.slice.call(arguments, 0));
var self = this;
var state = AggregationCursor.INIT;
var streamOptions = {};
// MaxTimeMS
var maxTimeMS = null;
// Get the promiseLibrary
var promiseLibrary = options.promiseLibrary;
// No promise library selected fall back
if(!promiseLibrary) {
promiseLibrary = typeof global.Promise == 'function' ?
global.Promise : require('es6-promise').Promise;
}
// Set up
Readable.call(this, {objectMode: true});
// Internal state
this.s = {
// MaxTimeMS
maxTimeMS: maxTimeMS
// State
, state: state
// Stream options
, streamOptions: streamOptions
// BSON
, bson: bson
// Namespae
, ns: ns
// Command
, cmd: cmd
// Options
, options: options
// Topology
, topology: topology
// Topology Options
, topologyOptions: topologyOptions
// Promise library
, promiseLibrary: promiseLibrary
}
}
/**
* AggregationCursor stream data event, fired for each document in the cursor.
*
* @event AggregationCursor#data
* @type {object}
*/
/**
* AggregationCursor stream end event
*
* @event AggregationCursor#end
* @type {null}
*/
/**
* AggregationCursor stream close event
*
* @event AggregationCursor#close
* @type {null}
*/
/**
* AggregationCursor stream readable event
*
* @event AggregationCursor#readable
* @type {null}
*/
// Inherit from Readable
inherits(AggregationCursor, Readable);
// Set the methods to inherit from prototype
var methodsToInherit = ['_next', 'next', 'each', 'forEach', 'toArray'
, 'rewind', 'bufferedCount', 'readBufferedDocuments', 'close', 'isClosed', 'kill'
, '_find', '_getmore', '_killcursor', 'isDead', 'explain', 'isNotified'];
// Extend the Cursor
for(var name in CoreCursor.prototype) {
AggregationCursor.prototype[name] = CoreCursor.prototype[name];
}
var define = AggregationCursor.define = new Define('AggregationCursor', AggregationCursor, true);
/**
* Set the batch size for the cursor.
* @method
* @param {number} value The batchSize for the cursor.
* @throws {MongoError}
* @return {AggregationCursor}
*/
AggregationCursor.prototype.batchSize = function(value) {
if(this.s.state == AggregationCursor.CLOSED || this.isDead()) throw MongoError.create({message: "Cursor is closed", driver:true });
if(typeof value != 'number') throw MongoError.create({message: "batchSize requires an integer", drvier:true });
if(this.s.cmd.cursor) this.s.cmd.cursor.batchSize = value;
this.setCursorBatchSize(value);
return this;
}
define.classMethod('batchSize', {callback: false, promise:false, returns: [AggregationCursor]});
/**
* Add a geoNear stage to the aggregation pipeline
* @method
* @param {object} document The geoNear stage document.
* @return {AggregationCursor}
*/
AggregationCursor.prototype.geoNear = function(document) {
this.s.cmd.pipeline.push({$geoNear: document});
return this;
}
define.classMethod('geoNear', {callback: false, promise:false, returns: [AggregationCursor]});
/**
* Add a group stage to the aggregation pipeline
* @method
* @param {object} document The group stage document.
* @return {AggregationCursor}
*/
AggregationCursor.prototype.group = function(document) {
this.s.cmd.pipeline.push({$group: document});
return this;
}
define.classMethod('group', {callback: false, promise:false, returns: [AggregationCursor]});
/**
* Add a limit stage to the aggregation pipeline
* @method
* @param {number} value The state limit value.
* @return {AggregationCursor}
*/
AggregationCursor.prototype.limit = function(value) {
this.s.cmd.pipeline.push({$limit: value});
return this;
}
define.classMethod('limit', {callback: false, promise:false, returns: [AggregationCursor]});
/**
* Add a match stage to the aggregation pipeline
* @method
* @param {object} document The match stage document.
* @return {AggregationCursor}
*/
AggregationCursor.prototype.match = function(document) {
this.s.cmd.pipeline.push({$match: document});
return this;
}
define.classMethod('match', {callback: false, promise:false, returns: [AggregationCursor]});
/**
* Add a maxTimeMS stage to the aggregation pipeline
* @method
* @param {number} value The state maxTimeMS value.
* @return {AggregationCursor}
*/
AggregationCursor.prototype.maxTimeMS = function(value) {
if(this.s.topology.lastIsMaster().minWireVersion > 2) {
this.s.cmd.maxTimeMS = value;
}
return this;
}
define.classMethod('maxTimeMS', {callback: false, promise:false, returns: [AggregationCursor]});
/**
* Add a out stage to the aggregation pipeline
* @method
* @param {number} destination The destination name.
* @return {AggregationCursor}
*/
AggregationCursor.prototype.out = function(destination) {
this.s.cmd.pipeline.push({$out: destination});
return this;
}
define.classMethod('out', {callback: false, promise:false, returns: [AggregationCursor]});
/**
* Add a project stage to the aggregation pipeline
* @method
* @param {object} document The project stage document.
* @return {AggregationCursor}
*/
AggregationCursor.prototype.project = function(document) {
this.s.cmd.pipeline.push({$project: document});
return this;
}
define.classMethod('project', {callback: false, promise:false, returns: [AggregationCursor]});
/**
* Add a redact stage to the aggregation pipeline
* @method
* @param {object} document The redact stage document.
* @return {AggregationCursor}
*/
AggregationCursor.prototype.redact = function(document) {
this.s.cmd.pipeline.push({$redact: document});
return this;
}
define.classMethod('redact', {callback: false, promise:false, returns: [AggregationCursor]});
/**
* Add a skip stage to the aggregation pipeline
* @method
* @param {number} value The state skip value.
* @return {AggregationCursor}
*/
AggregationCursor.prototype.skip = function(value) {
this.s.cmd.pipeline.push({$skip: value});
return this;
}
define.classMethod('skip', {callback: false, promise:false, returns: [AggregationCursor]});
/**
* Add a sort stage to the aggregation pipeline
* @method
* @param {object} document The sort stage document.
* @return {AggregationCursor}
*/
AggregationCursor.prototype.sort = function(document) {
this.s.cmd.pipeline.push({$sort: document});
return this;
}
define.classMethod('sort', {callback: false, promise:false, returns: [AggregationCursor]});
/**
* Add a unwind stage to the aggregation pipeline
* @method
* @param {number} field The unwind field name.
* @return {AggregationCursor}
*/
AggregationCursor.prototype.unwind = function(field) {
this.s.cmd.pipeline.push({$unwind: field});
return this;
}
define.classMethod('unwind', {callback: false, promise:false, returns: [AggregationCursor]});
AggregationCursor.prototype.get = AggregationCursor.prototype.toArray;
// Inherited methods
define.classMethod('toArray', {callback: true, promise:true});
define.classMethod('each', {callback: true, promise:false});
define.classMethod('forEach', {callback: true, promise:false});
define.classMethod('next', {callback: true, promise:true});
define.classMethod('close', {callback: true, promise:true});
define.classMethod('isClosed', {callback: false, promise:false, returns: [Boolean]});
define.classMethod('rewind', {callback: false, promise:false});
define.classMethod('bufferedCount', {callback: false, promise:false, returns: [Number]});
define.classMethod('readBufferedDocuments', {callback: false, promise:false, returns: [Array]});
/**
* Get the next available document from the cursor, returns null if no more documents are available.
* @function AggregationCursor.prototype.next
* @param {AggregationCursor~resultCallback} [callback] The result callback.
* @throws {MongoError}
* @return {Promise} returns Promise if no callback passed
*/
/**
* The callback format for results
* @callback AggregationCursor~toArrayResultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {object[]} documents All the documents the satisfy the cursor.
*/
/**
* Returns an array of documents. The caller is responsible for making sure that there
* is enough memory to store the results. Note that the array only contain partial
* results when this cursor had been previouly accessed. In that case,
* cursor.rewind() can be used to reset the cursor.
* @method AggregationCursor.prototype.toArray
* @param {AggregationCursor~toArrayResultCallback} [callback] The result callback.
* @throws {MongoError}
* @return {Promise} returns Promise if no callback passed
*/
/**
* The callback format for results
* @callback AggregationCursor~resultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {(object|null)} result The result object if the command was executed successfully.
*/
/**
* Iterates over all the documents for this cursor. As with **{cursor.toArray}**,
* not all of the elements will be iterated if this cursor had been previouly accessed.
* In that case, **{cursor.rewind}** can be used to reset the cursor. However, unlike
* **{cursor.toArray}**, the cursor will only hold a maximum of batch size elements
* at any given time if batch size is specified. Otherwise, the caller is responsible
* for making sure that the entire result can fit the memory.
* @method AggregationCursor.prototype.each
* @param {AggregationCursor~resultCallback} callback The result callback.
* @throws {MongoError}
* @return {null}
*/
/**
* Close the cursor, sending a AggregationCursor command and emitting close.
* @method AggregationCursor.prototype.close
* @param {AggregationCursor~resultCallback} [callback] The result callback.
* @return {Promise} returns Promise if no callback passed
*/
/**
* Is the cursor closed
* @method AggregationCursor.prototype.isClosed
* @return {boolean}
*/
/**
* Execute the explain for the cursor
* @method AggregationCursor.prototype.explain
* @param {AggregationCursor~resultCallback} [callback] The result callback.
* @return {Promise} returns Promise if no callback passed
*/
/**
* Clone the cursor
* @function AggregationCursor.prototype.clone
* @return {AggregationCursor}
*/
/**
* Resets the cursor
* @function AggregationCursor.prototype.rewind
* @return {AggregationCursor}
*/
/**
* The callback format for the forEach iterator method
* @callback AggregationCursor~iteratorCallback
* @param {Object} doc An emitted document for the iterator
*/
/**
* The callback error format for the forEach iterator method
* @callback AggregationCursor~endCallback
* @param {MongoError} error An error instance representing the error during the execution.
*/
/*
* Iterates over all the documents for this cursor using the iterator, callback pattern.
* @method AggregationCursor.prototype.forEach
* @param {AggregationCursor~iteratorCallback} iterator The iteration callback.
* @param {AggregationCursor~endCallback} callback The end callback.
* @throws {MongoError}
* @return {null}
*/
AggregationCursor.INIT = 0;
AggregationCursor.OPEN = 1;
AggregationCursor.CLOSED = 2;
module.exports = AggregationCursor;

608
node_modules/mongodb/lib/apm.js generated vendored Normal file
View File

@@ -0,0 +1,608 @@
var EventEmitter = require('events').EventEmitter,
inherits = require('util').inherits;
// Get prototypes
var AggregationCursor = require('./aggregation_cursor'),
CommandCursor = require('./command_cursor'),
OrderedBulkOperation = require('./bulk/ordered').OrderedBulkOperation,
UnorderedBulkOperation = require('./bulk/unordered').UnorderedBulkOperation,
GridStore = require('./gridfs/grid_store'),
Server = require('./server'),
ReplSet = require('./replset'),
Mongos = require('./mongos'),
Cursor = require('./cursor'),
Collection = require('./collection'),
Db = require('./db'),
Admin = require('./admin');
var basicOperationIdGenerator = {
operationId: 1,
next: function() {
return this.operationId++;
}
}
var basicTimestampGenerator = {
current: function() {
return new Date().getTime();
},
duration: function(start, end) {
return end - start;
}
}
var senstiveCommands = ['authenticate', 'saslStart', 'saslContinue', 'getnonce',
'createUser', 'updateUser', 'copydbgetnonce', 'copydbsaslstart', 'copydb'];
var Instrumentation = function(core, options, callback) {
options = options || {};
// Optional id generators
var operationIdGenerator = options.operationIdGenerator || basicOperationIdGenerator;
// Optional timestamp generator
var timestampGenerator = options.timestampGenerator || basicTimestampGenerator;
// Extend with event emitter functionality
EventEmitter.call(this);
// Contains all the instrumentation overloads
this.overloads = [];
// ---------------------------------------------------------
//
// Instrument prototype
//
// ---------------------------------------------------------
var instrumentPrototype = function(callback) {
var instrumentations = []
// Classes to support
var classes = [GridStore, OrderedBulkOperation, UnorderedBulkOperation,
CommandCursor, AggregationCursor, Cursor, Collection, Db];
// Add instrumentations to the available list
for(var i = 0; i < classes.length; i++) {
if(classes[i].define) {
instrumentations.push(classes[i].define.generate());
}
}
// Return the list of instrumentation points
callback(null, instrumentations);
}
// Did the user want to instrument the prototype
if(typeof callback == 'function') {
instrumentPrototype(callback);
}
// ---------------------------------------------------------
//
// Server
//
// ---------------------------------------------------------
// Reference
var self = this;
// Names of methods we need to wrap
var methods = ['command', 'insert', 'update', 'remove'];
// Prototype
var proto = core.Server.prototype;
// Core server method we are going to wrap
methods.forEach(function(x) {
var func = proto[x];
// Add to overloaded methods
self.overloads.push({proto: proto, name:x, func:func});
// The actual prototype
proto[x] = function() {
var requestId = core.Query.nextRequestId();
// Get the aruments
var args = Array.prototype.slice.call(arguments, 0);
var ns = args[0];
var commandObj = args[1];
var options = args[2] || {};
var keys = Object.keys(commandObj);
var commandName = keys[0];
var db = ns.split('.')[0];
// Do we have a legacy insert/update/remove command
if(x == 'insert' && !this.lastIsMaster().maxWireVersion) {
commandName = 'insert';
// Get the collection
var col = ns.split('.');
col.shift();
col = col.join('.');
// Re-write the command
commandObj = {
insert: col, documents: commandObj
}
if(options.writeConcern && Object.keys(options.writeConcern).length > 0) {
commandObj.writeConcern = options.writeConcern;
}
commandObj.ordered = options.ordered != undefined ? options.ordered : true;
} else if(x == 'update' && !this.lastIsMaster().maxWireVersion) {
commandName = 'update';
// Get the collection
var col = ns.split('.');
col.shift();
col = col.join('.');
// Re-write the command
commandObj = {
update: col, updates: commandObj
}
if(options.writeConcern && Object.keys(options.writeConcern).length > 0) {
commandObj.writeConcern = options.writeConcern;
}
commandObj.ordered = options.ordered != undefined ? options.ordered : true;
} else if(x == 'remove' && !this.lastIsMaster().maxWireVersion) {
commandName = 'delete';
// Get the collection
var col = ns.split('.');
col.shift();
col = col.join('.');
// Re-write the command
commandObj = {
delete: col, deletes: commandObj
}
if(options.writeConcern && Object.keys(options.writeConcern).length > 0) {
commandObj.writeConcern = options.writeConcern;
}
commandObj.ordered = options.ordered != undefined ? options.ordered : true;
} else if(x == 'insert' || x == 'update' || x == 'remove' && this.lastIsMaster().maxWireVersion >= 2) {
// Skip the insert/update/remove commands as they are executed as actual write commands in 2.6 or higher
return func.apply(this, args);
}
// Get the callback
var callback = args.pop();
// Set current callback operation id from the current context or create
// a new one
var ourOpId = callback.operationId || operationIdGenerator.next();
// Get a connection reference for this server instance
var connection = this.s.pool.get()
// Emit the start event for the command
var command = {
// Returns the command.
command: commandObj,
// Returns the database name.
databaseName: db,
// Returns the command name.
commandName: commandName,
// Returns the driver generated request id.
requestId: requestId,
// Returns the driver generated operation id.
// This is used to link events together such as bulk write operations. OPTIONAL.
operationId: ourOpId,
// Returns the connection id for the command. For languages that do not have this,
// this MUST return the driver equivalent which MUST include the server address and port.
// The name of this field is flexible to match the object that is returned from the driver.
connectionId: connection
};
// Filter out any sensitive commands
if(senstiveCommands.indexOf(commandName.toLowerCase())) {
command.commandObj = {};
command.commandObj[commandName] = true;
}
// Emit the started event
self.emit('started', command)
// Start time
var startTime = timestampGenerator.current();
// Push our handler callback
args.push(function(err, r) {
var endTime = timestampGenerator.current();
var command = {
duration: timestampGenerator.duration(startTime, endTime),
commandName: commandName,
requestId: requestId,
operationId: ourOpId,
connectionId: connection
};
// If we have an error
if(err || (r && r.result && r.result.ok == 0)) {
command.failure = err || r.result.writeErrors || r.result;
// Filter out any sensitive commands
if(senstiveCommands.indexOf(commandName.toLowerCase())) {
command.failure = {};
}
self.emit('failed', command);
} else if(commandObj && commandObj.writeConcern
&& commandObj.writeConcern.w == 0) {
// If we have write concern 0
command.reply = {ok:1};
self.emit('succeeded', command);
} else {
command.reply = r && r.result ? r.result : r;
// Filter out any sensitive commands
if(senstiveCommands.indexOf(commandName.toLowerCase()) != -1) {
command.reply = {};
}
self.emit('succeeded', command);
}
// Return to caller
callback(err, r);
});
// Apply the call
func.apply(this, args);
}
});
// ---------------------------------------------------------
//
// Bulk Operations
//
// ---------------------------------------------------------
// Inject ourselves into the Bulk methods
var methods = ['execute'];
var prototypes = [
require('./bulk/ordered').Bulk.prototype,
require('./bulk/unordered').Bulk.prototype
]
prototypes.forEach(function(proto) {
// Core server method we are going to wrap
methods.forEach(function(x) {
var func = proto[x];
// Add to overloaded methods
self.overloads.push({proto: proto, name:x, func:func});
// The actual prototype
proto[x] = function() {
var bulk = this;
// Get the aruments
var args = Array.prototype.slice.call(arguments, 0);
// Set an operation Id on the bulk object
this.operationId = operationIdGenerator.next();
// Get the callback
var callback = args.pop();
// If we have a callback use this
if(typeof callback == 'function') {
args.push(function(err, r) {
// Return to caller
callback(err, r);
});
// Apply the call
func.apply(this, args);
} else {
return func.apply(this, args);
}
}
});
});
// ---------------------------------------------------------
//
// Cursor
//
// ---------------------------------------------------------
// Inject ourselves into the Cursor methods
var methods = ['_find', '_getmore', '_killcursor'];
var prototypes = [
require('./cursor').prototype,
require('./command_cursor').prototype,
require('./aggregation_cursor').prototype
]
// Command name translation
var commandTranslation = {
'_find': 'find', '_getmore': 'getMore', '_killcursor': 'killCursors', '_explain': 'explain'
}
prototypes.forEach(function(proto) {
// Core server method we are going to wrap
methods.forEach(function(x) {
var func = proto[x];
// Add to overloaded methods
self.overloads.push({proto: proto, name:x, func:func});
// The actual prototype
proto[x] = function() {
var cursor = this;
var requestId = core.Query.nextRequestId();
var ourOpId = operationIdGenerator.next();
var parts = this.ns.split('.');
var db = parts[0];
// Get the collection
parts.shift();
var collection = parts.join('.');
// Set the command
var command = this.query;
var cmd = this.s.cmd;
// If we have a find method, set the operationId on the cursor
if(x == '_find') {
cursor.operationId = ourOpId;
}
// Do we have a find command rewrite it
if(x == '_getmore') {
command = {
getMore: this.cursorState.cursorId,
collection: collection,
batchSize: cmd.batchSize
}
if(cmd.maxTimeMS) command.maxTimeMS = cmd.maxTimeMS;
} else if(x == '_killcursors') {
command = {
killCursors: collection,
cursors: [this.cursorState.cursorId]
}
} else if(cmd.find) {
command = {
find: collection, filter: cmd.query
}
if(cmd.sort) command.sort = cmd.sort;
if(cmd.fields) command.projection = cmd.fields;
if(cmd.limit && cmd.limit < 0) {
command.limit = Math.abs(cmd.limit);
command.singleBatch = true;
} else if(cmd.limit) {
command.limit = Math.abs(cmd.limit);
}
// Options
if(cmd.skip) command.skip = cmd.skip;
if(cmd.hint) command.hint = cmd.hint;
if(cmd.batchSize) command.batchSize = cmd.batchSize;
if(typeof cmd.returnKey == 'boolean') command.returnKey = cmd.returnKey;
if(cmd.comment) command.comment = cmd.comment;
if(cmd.min) command.min = cmd.min;
if(cmd.max) command.max = cmd.max;
if(cmd.maxScan) command.maxScan = cmd.maxScan;
if(cmd.maxTimeMS) command.maxTimeMS = cmd.maxTimeMS;
// Flags
if(typeof cmd.awaitData == 'boolean') command.awaitData = cmd.awaitData;
if(typeof cmd.snapshot == 'boolean') command.snapshot = cmd.snapshot;
if(typeof cmd.tailable == 'boolean') command.tailable = cmd.tailable;
if(typeof cmd.oplogReplay == 'boolean') command.oplogReplay = cmd.oplogReplay;
if(typeof cmd.noCursorTimeout == 'boolean') command.noCursorTimeout = cmd.noCursorTimeout;
if(typeof cmd.partial == 'boolean') command.partial = cmd.partial;
if(typeof cmd.showDiskLoc == 'boolean') command.showRecordId = cmd.showDiskLoc;
// Read Concern
if(cmd.readConcern) command.readConcern = cmd.readConcern;
// Override method
if(cmd.explain) command.explain = cmd.explain;
if(cmd.exhaust) command.exhaust = cmd.exhaust;
// If we have a explain flag
if(cmd.explain) {
// Create fake explain command
command = {
explain: command,
verbosity: 'allPlansExecution'
}
// Set readConcern on the command if available
if(cmd.readConcern) command.readConcern = cmd.readConcern
// Set up the _explain name for the command
x = '_explain';
}
} else {
command = cmd;
}
// Set up the connection
var connectionId = null;
// Set local connection
if(this.connection) connectionId = this.connection;
if(!connectionId && this.server && this.server.getConnection) connectionId = this.server.getConnection();
// Get the command Name
var commandName = x == '_find' ? Object.keys(command)[0] : commandTranslation[x];
// Emit the start event for the command
var command = {
// Returns the command.
command: command,
// Returns the database name.
databaseName: db,
// Returns the command name.
commandName: commandName,
// Returns the driver generated request id.
requestId: requestId,
// Returns the driver generated operation id.
// This is used to link events together such as bulk write operations. OPTIONAL.
operationId: this.operationId,
// Returns the connection id for the command. For languages that do not have this,
// this MUST return the driver equivalent which MUST include the server address and port.
// The name of this field is flexible to match the object that is returned from the driver.
connectionId: connectionId
};
// Get the aruments
var args = Array.prototype.slice.call(arguments, 0);
// Get the callback
var callback = args.pop();
// We do not have a callback but a Promise
if(typeof callback == 'function' || command.commandName == 'killCursors') {
var startTime = timestampGenerator.current();
// Emit the started event
self.emit('started', command)
// Emit succeeded event with killcursor if we have a legacy protocol
if(command.commandName == 'killCursors'
&& this.server.lastIsMaster()
&& this.server.lastIsMaster().maxWireVersion < 4) {
// Emit the succeeded command
var command = {
duration: timestampGenerator.duration(startTime, timestampGenerator.current()),
commandName: commandName,
requestId: requestId,
operationId: cursor.operationId,
connectionId: cursor.server.getConnection(),
reply: [{ok:1}]
};
// Emit the command
return self.emit('succeeded', command)
}
// Add our callback handler
args.push(function(err, r) {
if(err) {
// Command
var command = {
duration: timestampGenerator.duration(startTime, timestampGenerator.current()),
commandName: commandName,
requestId: requestId,
operationId: ourOpId,
connectionId: cursor.server.getConnection(),
failure: err };
// Emit the command
self.emit('failed', command)
} else {
// Do we have a getMore
if(commandName.toLowerCase() == 'getmore' && r == null) {
r = {
cursor: {
id: cursor.cursorState.cursorId,
ns: cursor.ns,
nextBatch: cursor.cursorState.documents
}, ok:1
}
} else if(commandName.toLowerCase() == 'find' && r == null) {
r = {
cursor: {
id: cursor.cursorState.cursorId,
ns: cursor.ns,
firstBatch: cursor.cursorState.documents
}, ok:1
}
} else if(commandName.toLowerCase() == 'killcursors' && r == null) {
r = {
cursorsUnknown:[cursor.cursorState.lastCursorId],
ok:1
}
}
// cursor id is zero, we can issue success command
var command = {
duration: timestampGenerator.duration(startTime, timestampGenerator.current()),
commandName: commandName,
requestId: requestId,
operationId: cursor.operationId,
connectionId: cursor.server.getConnection(),
reply: r && r.result ? r.result : r
};
// Emit the command
self.emit('succeeded', command)
}
// Return
if(!callback) return;
// Return to caller
callback(err, r);
});
// Apply the call
func.apply(this, args);
} else {
// Assume promise, push back the missing value
args.push(callback);
// Get the promise
var promise = func.apply(this, args);
// Return a new promise
return new cursor.s.promiseLibrary(function(resolve, reject) {
var startTime = timestampGenerator.current();
// Emit the started event
self.emit('started', command)
// Execute the function
promise.then(function(r) {
// cursor id is zero, we can issue success command
var command = {
duration: timestampGenerator.duration(startTime, timestampGenerator.current()),
commandName: commandName,
requestId: requestId,
operationId: cursor.operationId,
connectionId: cursor.server.getConnection(),
reply: cursor.cursorState.documents
};
// Emit the command
self.emit('succeeded', command)
}).catch(function(err) {
// Command
var command = {
duration: timestampGenerator.duration(startTime, timestampGenerator.current()),
commandName: commandName,
requestId: requestId,
operationId: ourOpId,
connectionId: cursor.server.getConnection(),
failure: err };
// Emit the command
self.emit('failed', command)
// reject the promise
reject(err);
});
});
}
}
});
});
}
inherits(Instrumentation, EventEmitter);
Instrumentation.prototype.uninstrument = function() {
for(var i = 0; i < this.overloads.length; i++) {
var obj = this.overloads[i];
obj.proto[obj.name] = obj.func;
}
// Remove all listeners
this.removeAllListeners('started');
this.removeAllListeners('succeeded');
this.removeAllListeners('failed');
}
module.exports = Instrumentation;

393
node_modules/mongodb/lib/bulk/common.js generated vendored Normal file
View File

@@ -0,0 +1,393 @@
"use strict";
var utils = require('../utils');
// Error codes
var UNKNOWN_ERROR = 8;
var INVALID_BSON_ERROR = 22;
var WRITE_CONCERN_ERROR = 64;
var MULTIPLE_ERROR = 65;
// Insert types
var INSERT = 1;
var UPDATE = 2;
var REMOVE = 3
// Get write concern
var writeConcern = function(target, col, options) {
if(options.w != null || options.j != null || options.fsync != null) {
target.writeConcern = options;
} else if(col.writeConcern.w != null || col.writeConcern.j != null || col.writeConcern.fsync != null) {
target.writeConcern = col.writeConcern;
}
return target
}
/**
* Helper function to define properties
* @ignore
*/
var defineReadOnlyProperty = function(self, name, value) {
Object.defineProperty(self, name, {
enumerable: true
, get: function() {
return value;
}
});
}
/**
* Keeps the state of a unordered batch so we can rewrite the results
* correctly after command execution
* @ignore
*/
var Batch = function(batchType, originalZeroIndex) {
this.originalZeroIndex = originalZeroIndex;
this.currentIndex = 0;
this.originalIndexes = [];
this.batchType = batchType;
this.operations = [];
this.size = 0;
this.sizeBytes = 0;
}
/**
* Wraps a legacy operation so we can correctly rewrite it's error
* @ignore
*/
var LegacyOp = function(batchType, operation, index) {
this.batchType = batchType;
this.index = index;
this.operation = operation;
}
/**
* Create a new BulkWriteResult instance (INTERNAL TYPE, do not instantiate directly)
*
* @class
* @property {boolean} ok Did bulk operation correctly execute
* @property {number} nInserted number of inserted documents
* @property {number} nUpdated number of documents updated logically
* @property {number} nUpserted Number of upserted documents
* @property {number} nModified Number of documents updated physically on disk
* @property {number} nRemoved Number of removed documents
* @return {BulkWriteResult} a BulkWriteResult instance
*/
var BulkWriteResult = function(bulkResult) {
defineReadOnlyProperty(this, "ok", bulkResult.ok);
defineReadOnlyProperty(this, "nInserted", bulkResult.nInserted);
defineReadOnlyProperty(this, "nUpserted", bulkResult.nUpserted);
defineReadOnlyProperty(this, "nMatched", bulkResult.nMatched);
defineReadOnlyProperty(this, "nModified", bulkResult.nModified);
defineReadOnlyProperty(this, "nRemoved", bulkResult.nRemoved);
/**
* Return an array of inserted ids
*
* @return {object[]}
*/
this.getInsertedIds = function() {
return bulkResult.insertedIds;
}
/**
* Return an array of upserted ids
*
* @return {object[]}
*/
this.getUpsertedIds = function() {
return bulkResult.upserted;
}
/**
* Return the upserted id at position x
*
* @param {number} index the number of the upserted id to return, returns undefined if no result for passed in index
* @return {object}
*/
this.getUpsertedIdAt = function(index) {
return bulkResult.upserted[index];
}
/**
* Return raw internal result
*
* @return {object}
*/
this.getRawResponse = function() {
return bulkResult;
}
/**
* Returns true if the bulk operation contains a write error
*
* @return {boolean}
*/
this.hasWriteErrors = function() {
return bulkResult.writeErrors.length > 0;
}
/**
* Returns the number of write errors off the bulk operation
*
* @return {number}
*/
this.getWriteErrorCount = function() {
return bulkResult.writeErrors.length;
}
/**
* Returns a specific write error object
*
* @return {WriteError}
*/
this.getWriteErrorAt = function(index) {
if(index < bulkResult.writeErrors.length) {
return bulkResult.writeErrors[index];
}
return null;
}
/**
* Retrieve all write errors
*
* @return {object[]}
*/
this.getWriteErrors = function() {
return bulkResult.writeErrors;
}
/**
* Retrieve lastOp if available
*
* @return {object}
*/
this.getLastOp = function() {
return bulkResult.lastOp;
}
/**
* Retrieve the write concern error if any
*
* @return {WriteConcernError}
*/
this.getWriteConcernError = function() {
if(bulkResult.writeConcernErrors.length == 0) {
return null;
} else if(bulkResult.writeConcernErrors.length == 1) {
// Return the error
return bulkResult.writeConcernErrors[0];
} else {
// Combine the errors
var errmsg = "";
for(var i = 0; i < bulkResult.writeConcernErrors.length; i++) {
var err = bulkResult.writeConcernErrors[i];
errmsg = errmsg + err.errmsg;
// TODO: Something better
if(i == 0) errmsg = errmsg + " and ";
}
return new WriteConcernError({ errmsg : errmsg, code : WRITE_CONCERN_ERROR });
}
}
this.toJSON = function() {
return bulkResult;
}
this.toString = function() {
return "BulkWriteResult(" + this.toJSON(bulkResult) + ")";
}
this.isOk = function() {
return bulkResult.ok == 1;
}
}
/**
* Create a new WriteConcernError instance (INTERNAL TYPE, do not instantiate directly)
*
* @class
* @property {number} code Write concern error code.
* @property {string} errmsg Write concern error message.
* @return {WriteConcernError} a WriteConcernError instance
*/
var WriteConcernError = function(err) {
if(!(this instanceof WriteConcernError)) return new WriteConcernError(err);
// Define properties
defineReadOnlyProperty(this, "code", err.code);
defineReadOnlyProperty(this, "errmsg", err.errmsg);
this.toJSON = function() {
return {code: err.code, errmsg: err.errmsg};
}
this.toString = function() {
return "WriteConcernError(" + err.errmsg + ")";
}
}
/**
* Create a new WriteError instance (INTERNAL TYPE, do not instantiate directly)
*
* @class
* @property {number} code Write concern error code.
* @property {number} index Write concern error original bulk operation index.
* @property {string} errmsg Write concern error message.
* @return {WriteConcernError} a WriteConcernError instance
*/
var WriteError = function(err) {
if(!(this instanceof WriteError)) return new WriteError(err);
// Define properties
defineReadOnlyProperty(this, "code", err.code);
defineReadOnlyProperty(this, "index", err.index);
defineReadOnlyProperty(this, "errmsg", err.errmsg);
//
// Define access methods
this.getOperation = function() {
return err.op;
}
this.toJSON = function() {
return {code: err.code, index: err.index, errmsg: err.errmsg, op: err.op};
}
this.toString = function() {
return "WriteError(" + JSON.stringify(this.toJSON()) + ")";
}
}
/**
* Merges results into shared data structure
* @ignore
*/
var mergeBatchResults = function(ordered, batch, bulkResult, err, result) {
// If we have an error set the result to be the err object
if(err) {
result = err;
} else if(result && result.result) {
result = result.result;
} else if(result == null) {
return;
}
// Do we have a top level error stop processing and return
if(result.ok == 0 && bulkResult.ok == 1) {
bulkResult.ok = 0;
// bulkResult.error = utils.toError(result);
var writeError = {
index: 0
, code: result.code || 0
, errmsg: result.message
, op: batch.operations[0]
};
bulkResult.writeErrors.push(new WriteError(writeError));
return;
} else if(result.ok == 0 && bulkResult.ok == 0) {
return;
}
// Add lastop if available
if(result.lastOp) {
bulkResult.lastOp = result.lastOp;
}
// If we have an insert Batch type
if(batch.batchType == INSERT && result.n) {
bulkResult.nInserted = bulkResult.nInserted + result.n;
}
// If we have an insert Batch type
if(batch.batchType == REMOVE && result.n) {
bulkResult.nRemoved = bulkResult.nRemoved + result.n;
}
var nUpserted = 0;
// We have an array of upserted values, we need to rewrite the indexes
if(Array.isArray(result.upserted)) {
nUpserted = result.upserted.length;
for(var i = 0; i < result.upserted.length; i++) {
bulkResult.upserted.push({
index: result.upserted[i].index + batch.originalZeroIndex
, _id: result.upserted[i]._id
});
}
} else if(result.upserted) {
nUpserted = 1;
bulkResult.upserted.push({
index: batch.originalZeroIndex
, _id: result.upserted
});
}
// If we have an update Batch type
if(batch.batchType == UPDATE && result.n) {
var nModified = result.nModified;
bulkResult.nUpserted = bulkResult.nUpserted + nUpserted;
bulkResult.nMatched = bulkResult.nMatched + (result.n - nUpserted);
if(typeof nModified == 'number') {
bulkResult.nModified = bulkResult.nModified + nModified;
} else {
bulkResult.nModified = null;
}
}
if(Array.isArray(result.writeErrors)) {
for(var i = 0; i < result.writeErrors.length; i++) {
var writeError = {
index: batch.originalZeroIndex + result.writeErrors[i].index
, code: result.writeErrors[i].code
, errmsg: result.writeErrors[i].errmsg
, op: batch.operations[result.writeErrors[i].index]
};
bulkResult.writeErrors.push(new WriteError(writeError));
}
}
if(result.writeConcernError) {
bulkResult.writeConcernErrors.push(new WriteConcernError(result.writeConcernError));
}
}
//
// Clone the options
var cloneOptions = function(options) {
var clone = {};
var keys = Object.keys(options);
for(var i = 0; i < keys.length; i++) {
clone[keys[i]] = options[keys[i]];
}
return clone;
}
// Exports symbols
exports.BulkWriteResult = BulkWriteResult;
exports.WriteError = WriteError;
exports.Batch = Batch;
exports.LegacyOp = LegacyOp;
exports.mergeBatchResults = mergeBatchResults;
exports.cloneOptions = cloneOptions;
exports.writeConcern = writeConcern;
exports.INVALID_BSON_ERROR = INVALID_BSON_ERROR;
exports.WRITE_CONCERN_ERROR = WRITE_CONCERN_ERROR;
exports.MULTIPLE_ERROR = MULTIPLE_ERROR;
exports.UNKNOWN_ERROR = UNKNOWN_ERROR;
exports.INSERT = INSERT;
exports.UPDATE = UPDATE;
exports.REMOVE = REMOVE;

530
node_modules/mongodb/lib/bulk/ordered.js generated vendored Normal file
View File

@@ -0,0 +1,530 @@
"use strict";
var common = require('./common')
, utils = require('../utils')
, toError = require('../utils').toError
, f = require('util').format
, handleCallback = require('../utils').handleCallback
, shallowClone = utils.shallowClone
, WriteError = common.WriteError
, BulkWriteResult = common.BulkWriteResult
, LegacyOp = common.LegacyOp
, ObjectID = require('mongodb-core').BSON.ObjectID
, Define = require('../metadata')
, Batch = common.Batch
, mergeBatchResults = common.mergeBatchResults;
/**
* Create a FindOperatorsOrdered instance (INTERNAL TYPE, do not instantiate directly)
* @class
* @return {FindOperatorsOrdered} a FindOperatorsOrdered instance.
*/
var FindOperatorsOrdered = function(self) {
this.s = self.s;
}
/**
* Add a single update document to the bulk operation
*
* @method
* @param {object} doc update operations
* @throws {MongoError}
* @return {OrderedBulkOperation}
*/
FindOperatorsOrdered.prototype.update = function(updateDocument) {
// Perform upsert
var upsert = typeof this.s.currentOp.upsert == 'boolean' ? this.s.currentOp.upsert : false;
// Establish the update command
var document = {
q: this.s.currentOp.selector
, u: updateDocument
, multi: true
, upsert: upsert
}
// Clear out current Op
this.s.currentOp = null;
// Add the update document to the list
return addToOperationsList(this, common.UPDATE, document);
}
/**
* Add a single update one document to the bulk operation
*
* @method
* @param {object} doc update operations
* @throws {MongoError}
* @return {OrderedBulkOperation}
*/
FindOperatorsOrdered.prototype.updateOne = function(updateDocument) {
// Perform upsert
var upsert = typeof this.s.currentOp.upsert == 'boolean' ? this.s.currentOp.upsert : false;
// Establish the update command
var document = {
q: this.s.currentOp.selector
, u: updateDocument
, multi: false
, upsert: upsert
}
// Clear out current Op
this.s.currentOp = null;
// Add the update document to the list
return addToOperationsList(this, common.UPDATE, document);
}
/**
* Add a replace one operation to the bulk operation
*
* @method
* @param {object} doc the new document to replace the existing one with
* @throws {MongoError}
* @return {OrderedBulkOperation}
*/
FindOperatorsOrdered.prototype.replaceOne = function(updateDocument) {
this.updateOne(updateDocument);
}
/**
* Upsert modifier for update bulk operation
*
* @method
* @throws {MongoError}
* @return {FindOperatorsOrdered}
*/
FindOperatorsOrdered.prototype.upsert = function() {
this.s.currentOp.upsert = true;
return this;
}
/**
* Add a remove one operation to the bulk operation
*
* @method
* @throws {MongoError}
* @return {OrderedBulkOperation}
*/
FindOperatorsOrdered.prototype.deleteOne = function() {
// Establish the update command
var document = {
q: this.s.currentOp.selector
, limit: 1
}
// Clear out current Op
this.s.currentOp = null;
// Add the remove document to the list
return addToOperationsList(this, common.REMOVE, document);
}
// Backward compatibility
FindOperatorsOrdered.prototype.removeOne = FindOperatorsOrdered.prototype.deleteOne;
/**
* Add a remove operation to the bulk operation
*
* @method
* @throws {MongoError}
* @return {OrderedBulkOperation}
*/
FindOperatorsOrdered.prototype.delete = function() {
// Establish the update command
var document = {
q: this.s.currentOp.selector
, limit: 0
}
// Clear out current Op
this.s.currentOp = null;
// Add the remove document to the list
return addToOperationsList(this, common.REMOVE, document);
}
// Backward compatibility
FindOperatorsOrdered.prototype.remove = FindOperatorsOrdered.prototype.delete;
// Add to internal list of documents
var addToOperationsList = function(_self, docType, document) {
// Get the bsonSize
var bsonSize = _self.s.bson.calculateObjectSize(document, false);
// Throw error if the doc is bigger than the max BSON size
if(bsonSize >= _self.s.maxBatchSizeBytes) throw toError("document is larger than the maximum size " + _self.s.maxBatchSizeBytes);
// Create a new batch object if we don't have a current one
if(_self.s.currentBatch == null) _self.s.currentBatch = new Batch(docType, _self.s.currentIndex);
// Check if we need to create a new batch
if(((_self.s.currentBatchSize + 1) >= _self.s.maxWriteBatchSize)
|| ((_self.s.currentBatchSizeBytes + _self.s.currentBatchSizeBytes) >= _self.s.maxBatchSizeBytes)
|| (_self.s.currentBatch.batchType != docType)) {
// Save the batch to the execution stack
_self.s.batches.push(_self.s.currentBatch);
// Create a new batch
_self.s.currentBatch = new Batch(docType, _self.s.currentIndex);
// Reset the current size trackers
_self.s.currentBatchSize = 0;
_self.s.currentBatchSizeBytes = 0;
} else {
// Update current batch size
_self.s.currentBatchSize = _self.s.currentBatchSize + 1;
_self.s.currentBatchSizeBytes = _self.s.currentBatchSizeBytes + bsonSize;
}
if(docType == common.INSERT) {
_self.s.bulkResult.insertedIds.push({index: _self.s.currentIndex, _id: document._id});
}
// We have an array of documents
if(Array.isArray(document)) {
throw toError("operation passed in cannot be an Array");
} else {
_self.s.currentBatch.originalIndexes.push(_self.s.currentIndex);
_self.s.currentBatch.operations.push(document)
_self.s.currentIndex = _self.s.currentIndex + 1;
}
// Return self
return _self;
}
/**
* Create a new OrderedBulkOperation instance (INTERNAL TYPE, do not instantiate directly)
* @class
* @property {number} length Get the number of operations in the bulk.
* @return {OrderedBulkOperation} a OrderedBulkOperation instance.
*/
function OrderedBulkOperation(topology, collection, options) {
options = options == null ? {} : options;
// TODO Bring from driver information in isMaster
var self = this;
var executed = false;
// Current item
var currentOp = null;
// Handle to the bson serializer, used to calculate running sizes
var bson = topology.bson;
// Namespace for the operation
var namespace = collection.collectionName;
// Set max byte size
var maxBatchSizeBytes = topology.isMasterDoc && topology.isMasterDoc.maxBsonObjectSize
? topology.isMasterDoc.maxBsonObjectSize : (1024*1025*16);
var maxWriteBatchSize = topology.isMasterDoc && topology.isMasterDoc.maxWriteBatchSize
? topology.isMasterDoc.maxWriteBatchSize : 1000;
// Get the write concern
var writeConcern = common.writeConcern(shallowClone(options), collection, options);
// Get the promiseLibrary
var promiseLibrary = options.promiseLibrary;
// No promise library selected fall back
if(!promiseLibrary) {
promiseLibrary = typeof global.Promise == 'function' ?
global.Promise : require('es6-promise').Promise;
}
// Current batch
var currentBatch = null;
var currentIndex = 0;
var currentBatchSize = 0;
var currentBatchSizeBytes = 0;
var batches = [];
// Final results
var bulkResult = {
ok: 1
, writeErrors: []
, writeConcernErrors: []
, insertedIds: []
, nInserted: 0
, nUpserted: 0
, nMatched: 0
, nModified: 0
, nRemoved: 0
, upserted: []
};
// Internal state
this.s = {
// Final result
bulkResult: bulkResult
// Current batch state
, currentBatch: null
, currentIndex: 0
, currentBatchSize: 0
, currentBatchSizeBytes: 0
, batches: []
// Write concern
, writeConcern: writeConcern
// Max batch size options
, maxBatchSizeBytes: maxBatchSizeBytes
, maxWriteBatchSize: maxWriteBatchSize
// Namespace
, namespace: namespace
// BSON
, bson: bson
// Topology
, topology: topology
// Options
, options: options
// Current operation
, currentOp: currentOp
// Executed
, executed: executed
// Collection
, collection: collection
// Promise Library
, promiseLibrary: promiseLibrary
// Fundamental error
, err: null
// Bypass validation
, bypassDocumentValidation: typeof options.bypassDocumentValidation == 'boolean' ? options.bypassDocumentValidation : false
}
}
var define = OrderedBulkOperation.define = new Define('OrderedBulkOperation', OrderedBulkOperation, false);
OrderedBulkOperation.prototype.raw = function(op) {
var key = Object.keys(op)[0];
// Set up the force server object id
var forceServerObjectId = typeof this.s.options.forceServerObjectId == 'boolean'
? this.s.options.forceServerObjectId : this.s.collection.s.db.options.forceServerObjectId;
// Update operations
if((op.updateOne && op.updateOne.q)
|| (op.updateMany && op.updateMany.q)
|| (op.replaceOne && op.replaceOne.q)) {
op[key].multi = op.updateOne || op.replaceOne ? false : true;
return addToOperationsList(this, common.UPDATE, op[key]);
}
// Crud spec update format
if(op.updateOne || op.updateMany || op.replaceOne) {
var multi = op.updateOne || op.replaceOne ? false : true;
var operation = {q: op[key].filter, u: op[key].update || op[key].replacement, multi: multi}
operation.upsert = op[key].upsert ? true: false;
return addToOperationsList(this, common.UPDATE, operation);
}
// Remove operations
if(op.removeOne || op.removeMany || (op.deleteOne && op.deleteOne.q) || op.deleteMany && op.deleteMany.q) {
op[key].limit = op.removeOne ? 1 : 0;
return addToOperationsList(this, common.REMOVE, op[key]);
}
// Crud spec delete operations, less efficient
if(op.deleteOne || op.deleteMany) {
var limit = op.deleteOne ? 1 : 0;
var operation = {q: op[key].filter, limit: limit}
return addToOperationsList(this, common.REMOVE, operation);
}
// Insert operations
if(op.insertOne && op.insertOne.document == null) {
if(forceServerObjectId !== true && op.insertOne._id == null) op.insertOne._id = new ObjectID();
return addToOperationsList(this, common.INSERT, op.insertOne);
} else if(op.insertOne && op.insertOne.document) {
if(forceServerObjectId !== true && op.insertOne.document._id == null) op.insertOne.document._id = new ObjectID();
return addToOperationsList(this, common.INSERT, op.insertOne.document);
}
if(op.insertMany) {
for(var i = 0; i < op.insertMany.length; i++) {
if(forceServerObjectId !== true && op.insertMany[i]._id == null) op.insertMany[i]._id = new ObjectID();
addToOperationsList(this, common.INSERT, op.insertMany[i]);
}
return;
}
// No valid type of operation
throw toError("bulkWrite only supports insertOne, insertMany, updateOne, updateMany, removeOne, removeMany, deleteOne, deleteMany");
}
/**
* Add a single insert document to the bulk operation
*
* @param {object} doc the document to insert
* @throws {MongoError}
* @return {OrderedBulkOperation}
*/
OrderedBulkOperation.prototype.insert = function(document) {
if(this.s.collection.s.db.options.forceServerObjectId !== true && document._id == null) document._id = new ObjectID();
return addToOperationsList(this, common.INSERT, document);
}
/**
* Initiate a find operation for an update/updateOne/remove/removeOne/replaceOne
*
* @method
* @param {object} selector The selector for the bulk operation.
* @throws {MongoError}
* @return {FindOperatorsOrdered}
*/
OrderedBulkOperation.prototype.find = function(selector) {
if (!selector) {
throw toError("Bulk find operation must specify a selector");
}
// Save a current selector
this.s.currentOp = {
selector: selector
}
return new FindOperatorsOrdered(this);
}
Object.defineProperty(OrderedBulkOperation.prototype, 'length', {
enumerable: true,
get: function() {
return this.s.currentIndex;
}
});
//
// Execute next write command in a chain
var executeCommands = function(self, callback) {
if(self.s.batches.length == 0) {
return handleCallback(callback, null, new BulkWriteResult(self.s.bulkResult));
}
// Ordered execution of the command
var batch = self.s.batches.shift();
var resultHandler = function(err, result) {
// Error is a driver related error not a bulk op error, terminate
if(err && err.driver || err && err.message) {
return handleCallback(callback, err);
}
// If we have and error
if(err) err.ok = 0;
// Merge the results together
var mergeResult = mergeBatchResults(true, batch, self.s.bulkResult, err, result);
if(mergeResult != null) {
return handleCallback(callback, null, new BulkWriteResult(self.s.bulkResult));
}
// If we are ordered and have errors and they are
// not all replication errors terminate the operation
if(self.s.bulkResult.writeErrors.length > 0) {
return handleCallback(callback, toError(self.s.bulkResult.writeErrors[0]), new BulkWriteResult(self.s.bulkResult));
}
// Execute the next command in line
executeCommands(self, callback);
}
var finalOptions = {ordered: true}
if(self.s.writeConcern != null) {
finalOptions.writeConcern = self.s.writeConcern;
}
// Set an operationIf if provided
if(self.operationId) {
resultHandler.operationId = self.operationId;
}
// Serialize functions
if(self.s.options.serializeFunctions) {
finalOptions.serializeFunctions = true
}
// Serialize functions
if(self.s.options.ignoreUndefined) {
finalOptions.ignoreUndefined = true
}
// Is the bypassDocumentValidation options specific
if(self.s.bypassDocumentValidation == true) {
finalOptions.bypassDocumentValidation = true;
}
try {
if(batch.batchType == common.INSERT) {
self.s.topology.insert(self.s.collection.namespace, batch.operations, finalOptions, resultHandler);
} else if(batch.batchType == common.UPDATE) {
self.s.topology.update(self.s.collection.namespace, batch.operations, finalOptions, resultHandler);
} else if(batch.batchType == common.REMOVE) {
self.s.topology.remove(self.s.collection.namespace, batch.operations, finalOptions, resultHandler);
}
} catch(err) {
// Force top level error
err.ok = 0;
// Merge top level error and return
handleCallback(callback, null, mergeBatchResults(false, batch, self.s.bulkResult, err, null));
}
}
/**
* The callback format for results
* @callback OrderedBulkOperation~resultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {BulkWriteResult} result The bulk write result.
*/
/**
* Execute the ordered bulk operation
*
* @method
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.fsync=false] Specify a file sync write concern.
* @param {OrderedBulkOperation~resultCallback} [callback] The result callback
* @throws {MongoError}
* @return {Promise} returns Promise if no callback passed
*/
OrderedBulkOperation.prototype.execute = function(_writeConcern, callback) {
var self = this;
if(this.s.executed) throw new toError("batch cannot be re-executed");
if(typeof _writeConcern == 'function') {
callback = _writeConcern;
} else {
this.s.writeConcern = _writeConcern;
}
// If we have current batch
if(this.s.currentBatch) this.s.batches.push(this.s.currentBatch);
// If we have no operations in the bulk raise an error
if(this.s.batches.length == 0) {
throw toError("Invalid Operation, No operations in bulk");
}
// Execute using callback
if(typeof callback == 'function') {
return executeCommands(this, callback);
}
// Return a Promise
return new this.s.promiseLibrary(function(resolve, reject) {
executeCommands(self, function(err, r) {
if(err) return reject(err);
resolve(r);
});
});
}
define.classMethod('execute', {callback: true, promise:false});
/**
* Returns an unordered batch object
* @ignore
*/
var initializeOrderedBulkOp = function(topology, collection, options) {
return new OrderedBulkOperation(topology, collection, options);
}
initializeOrderedBulkOp.OrderedBulkOperation = OrderedBulkOperation;
module.exports = initializeOrderedBulkOp;
module.exports.Bulk = OrderedBulkOperation;

539
node_modules/mongodb/lib/bulk/unordered.js generated vendored Normal file
View File

@@ -0,0 +1,539 @@
"use strict";
var common = require('./common')
, utils = require('../utils')
, toError = require('../utils').toError
, f = require('util').format
, handleCallback = require('../utils').handleCallback
, shallowClone = utils.shallowClone
, WriteError = common.WriteError
, BulkWriteResult = common.BulkWriteResult
, LegacyOp = common.LegacyOp
, ObjectID = require('mongodb-core').BSON.ObjectID
, Define = require('../metadata')
, Batch = common.Batch
, mergeBatchResults = common.mergeBatchResults;
/**
* Create a FindOperatorsUnordered instance (INTERNAL TYPE, do not instantiate directly)
* @class
* @property {number} length Get the number of operations in the bulk.
* @return {FindOperatorsUnordered} a FindOperatorsUnordered instance.
*/
var FindOperatorsUnordered = function(self) {
this.s = self.s;
}
/**
* Add a single update document to the bulk operation
*
* @method
* @param {object} doc update operations
* @throws {MongoError}
* @return {UnorderedBulkOperation}
*/
FindOperatorsUnordered.prototype.update = function(updateDocument) {
// Perform upsert
var upsert = typeof this.s.currentOp.upsert == 'boolean' ? this.s.currentOp.upsert : false;
// Establish the update command
var document = {
q: this.s.currentOp.selector
, u: updateDocument
, multi: true
, upsert: upsert
}
// Clear out current Op
this.s.currentOp = null;
// Add the update document to the list
return addToOperationsList(this, common.UPDATE, document);
}
/**
* Add a single update one document to the bulk operation
*
* @method
* @param {object} doc update operations
* @throws {MongoError}
* @return {UnorderedBulkOperation}
*/
FindOperatorsUnordered.prototype.updateOne = function(updateDocument) {
// Perform upsert
var upsert = typeof this.s.currentOp.upsert == 'boolean' ? this.s.currentOp.upsert : false;
// Establish the update command
var document = {
q: this.s.currentOp.selector
, u: updateDocument
, multi: false
, upsert: upsert
}
// Clear out current Op
this.s.currentOp = null;
// Add the update document to the list
return addToOperationsList(this, common.UPDATE, document);
}
/**
* Add a replace one operation to the bulk operation
*
* @method
* @param {object} doc the new document to replace the existing one with
* @throws {MongoError}
* @return {UnorderedBulkOperation}
*/
FindOperatorsUnordered.prototype.replaceOne = function(updateDocument) {
this.updateOne(updateDocument);
}
/**
* Upsert modifier for update bulk operation
*
* @method
* @throws {MongoError}
* @return {UnorderedBulkOperation}
*/
FindOperatorsUnordered.prototype.upsert = function() {
this.s.currentOp.upsert = true;
return this;
}
/**
* Add a remove one operation to the bulk operation
*
* @method
* @throws {MongoError}
* @return {UnorderedBulkOperation}
*/
FindOperatorsUnordered.prototype.removeOne = function() {
// Establish the update command
var document = {
q: this.s.currentOp.selector
, limit: 1
}
// Clear out current Op
this.s.currentOp = null;
// Add the remove document to the list
return addToOperationsList(this, common.REMOVE, document);
}
/**
* Add a remove operation to the bulk operation
*
* @method
* @throws {MongoError}
* @return {UnorderedBulkOperation}
*/
FindOperatorsUnordered.prototype.remove = function() {
// Establish the update command
var document = {
q: this.s.currentOp.selector
, limit: 0
}
// Clear out current Op
this.s.currentOp = null;
// Add the remove document to the list
return addToOperationsList(this, common.REMOVE, document);
}
//
// Add to the operations list
//
var addToOperationsList = function(_self, docType, document) {
// Get the bsonSize
var bsonSize = _self.s.bson.calculateObjectSize(document, false);
// Throw error if the doc is bigger than the max BSON size
if(bsonSize >= _self.s.maxBatchSizeBytes) throw toError("document is larger than the maximum size " + _self.s.maxBatchSizeBytes);
// Holds the current batch
_self.s.currentBatch = null;
// Get the right type of batch
if(docType == common.INSERT) {
_self.s.currentBatch = _self.s.currentInsertBatch;
} else if(docType == common.UPDATE) {
_self.s.currentBatch = _self.s.currentUpdateBatch;
} else if(docType == common.REMOVE) {
_self.s.currentBatch = _self.s.currentRemoveBatch;
}
// Create a new batch object if we don't have a current one
if(_self.s.currentBatch == null) _self.s.currentBatch = new Batch(docType, _self.s.currentIndex);
// Check if we need to create a new batch
if(((_self.s.currentBatch.size + 1) >= _self.s.maxWriteBatchSize)
|| ((_self.s.currentBatch.sizeBytes + bsonSize) >= _self.s.maxBatchSizeBytes)
|| (_self.s.currentBatch.batchType != docType)) {
// Save the batch to the execution stack
_self.s.batches.push(_self.s.currentBatch);
// Create a new batch
_self.s.currentBatch = new Batch(docType, _self.s.currentIndex);
}
// We have an array of documents
if(Array.isArray(document)) {
throw toError("operation passed in cannot be an Array");
} else {
_self.s.currentBatch.operations.push(document);
_self.s.currentBatch.originalIndexes.push(_self.s.currentIndex);
_self.s.currentIndex = _self.s.currentIndex + 1;
}
// Save back the current Batch to the right type
if(docType == common.INSERT) {
_self.s.currentInsertBatch = _self.s.currentBatch;
_self.s.bulkResult.insertedIds.push({index: _self.s.currentIndex, _id: document._id});
} else if(docType == common.UPDATE) {
_self.s.currentUpdateBatch = _self.s.currentBatch;
} else if(docType == common.REMOVE) {
_self.s.currentRemoveBatch = _self.s.currentBatch;
}
// Update current batch size
_self.s.currentBatch.size = _self.s.currentBatch.size + 1;
_self.s.currentBatch.sizeBytes = _self.s.currentBatch.sizeBytes + bsonSize;
// Return self
return _self;
}
/**
* Create a new UnorderedBulkOperation instance (INTERNAL TYPE, do not instantiate directly)
* @class
* @return {UnorderedBulkOperation} a UnorderedBulkOperation instance.
*/
var UnorderedBulkOperation = function(topology, collection, options) {
options = options == null ? {} : options;
// Contains reference to self
var self = this;
// Get the namesspace for the write operations
var namespace = collection.collectionName;
// Used to mark operation as executed
var executed = false;
// Current item
// var currentBatch = null;
var currentOp = null;
var currentIndex = 0;
var batches = [];
// The current Batches for the different operations
var currentInsertBatch = null;
var currentUpdateBatch = null;
var currentRemoveBatch = null;
// Handle to the bson serializer, used to calculate running sizes
var bson = topology.bson;
// Set max byte size
var maxBatchSizeBytes = topology.isMasterDoc && topology.isMasterDoc.maxBsonObjectSize
? topology.isMasterDoc.maxBsonObjectSize : (1024*1025*16);
var maxWriteBatchSize = topology.isMasterDoc && topology.isMasterDoc.maxWriteBatchSize
? topology.isMasterDoc.maxWriteBatchSize : 1000;
// Get the write concern
var writeConcern = common.writeConcern(shallowClone(options), collection, options);
// Get the promiseLibrary
var promiseLibrary = options.promiseLibrary;
// No promise library selected fall back
if(!promiseLibrary) {
promiseLibrary = typeof global.Promise == 'function' ?
global.Promise : require('es6-promise').Promise;
}
// Final results
var bulkResult = {
ok: 1
, writeErrors: []
, writeConcernErrors: []
, insertedIds: []
, nInserted: 0
, nUpserted: 0
, nMatched: 0
, nModified: 0
, nRemoved: 0
, upserted: []
};
// Internal state
this.s = {
// Final result
bulkResult: bulkResult
// Current batch state
, currentInsertBatch: null
, currentUpdateBatch: null
, currentRemoveBatch: null
, currentBatch: null
, currentIndex: 0
, batches: []
// Write concern
, writeConcern: writeConcern
// Max batch size options
, maxBatchSizeBytes: maxBatchSizeBytes
, maxWriteBatchSize: maxWriteBatchSize
// Namespace
, namespace: namespace
// BSON
, bson: bson
// Topology
, topology: topology
// Options
, options: options
// Current operation
, currentOp: currentOp
// Executed
, executed: executed
// Collection
, collection: collection
// Promise Library
, promiseLibrary: promiseLibrary
// Bypass validation
, bypassDocumentValidation: typeof options.bypassDocumentValidation == 'boolean' ? options.bypassDocumentValidation : false
}
}
var define = UnorderedBulkOperation.define = new Define('UnorderedBulkOperation', UnorderedBulkOperation, false);
/**
* Add a single insert document to the bulk operation
*
* @param {object} doc the document to insert
* @throws {MongoError}
* @return {UnorderedBulkOperation}
*/
UnorderedBulkOperation.prototype.insert = function(document) {
if(this.s.collection.s.db.options.forceServerObjectId !== true && document._id == null) document._id = new ObjectID();
return addToOperationsList(this, common.INSERT, document);
}
/**
* Initiate a find operation for an update/updateOne/remove/removeOne/replaceOne
*
* @method
* @param {object} selector The selector for the bulk operation.
* @throws {MongoError}
* @return {FindOperatorsUnordered}
*/
UnorderedBulkOperation.prototype.find = function(selector) {
if (!selector) {
throw toError("Bulk find operation must specify a selector");
}
// Save a current selector
this.s.currentOp = {
selector: selector
}
return new FindOperatorsUnordered(this);
}
Object.defineProperty(UnorderedBulkOperation.prototype, 'length', {
enumerable: true,
get: function() {
return this.s.currentIndex;
}
});
UnorderedBulkOperation.prototype.raw = function(op) {
var key = Object.keys(op)[0];
// Set up the force server object id
var forceServerObjectId = typeof this.s.options.forceServerObjectId == 'boolean'
? this.s.options.forceServerObjectId : this.s.collection.s.db.options.forceServerObjectId;
// Update operations
if((op.updateOne && op.updateOne.q)
|| (op.updateMany && op.updateMany.q)
|| (op.replaceOne && op.replaceOne.q)) {
op[key].multi = op.updateOne || op.replaceOne ? false : true;
return addToOperationsList(this, common.UPDATE, op[key]);
}
// Crud spec update format
if(op.updateOne || op.updateMany || op.replaceOne) {
var multi = op.updateOne || op.replaceOne ? false : true;
var operation = {q: op[key].filter, u: op[key].update || op[key].replacement, multi: multi}
if(op[key].upsert) operation.upsert = true;
return addToOperationsList(this, common.UPDATE, operation);
}
// Remove operations
if(op.removeOne || op.removeMany || (op.deleteOne && op.deleteOne.q) || op.deleteMany && op.deleteMany.q) {
op[key].limit = op.removeOne ? 1 : 0;
return addToOperationsList(this, common.REMOVE, op[key]);
}
// Crud spec delete operations, less efficient
if(op.deleteOne || op.deleteMany) {
var limit = op.deleteOne ? 1 : 0;
var operation = {q: op[key].filter, limit: limit}
return addToOperationsList(this, common.REMOVE, operation);
}
// Insert operations
if(op.insertOne && op.insertOne.document == null) {
if(forceServerObjectId !== true && op.insertOne._id == null) op.insertOne._id = new ObjectID();
return addToOperationsList(this, common.INSERT, op.insertOne);
} else if(op.insertOne && op.insertOne.document) {
if(forceServerObjectId !== true && op.insertOne.document._id == null) op.insertOne.document._id = new ObjectID();
return addToOperationsList(this, common.INSERT, op.insertOne.document);
}
if(op.insertMany) {
for(var i = 0; i < op.insertMany.length; i++) {
if(forceServerObjectId !== true && op.insertMany[i]._id == null) op.insertMany[i]._id = new ObjectID();
addToOperationsList(this, common.INSERT, op.insertMany[i]);
}
return;
}
// No valid type of operation
throw toError("bulkWrite only supports insertOne, insertMany, updateOne, updateMany, removeOne, removeMany, deleteOne, deleteMany");
}
//
// Execute the command
var executeBatch = function(self, batch, callback) {
var finalOptions = {ordered: false}
if(self.s.writeConcern != null) {
finalOptions.writeConcern = self.s.writeConcern;
}
var resultHandler = function(err, result) {
// Error is a driver related error not a bulk op error, terminate
if(err && err.driver || err && err.message) {
return handleCallback(callback, err);
}
// If we have and error
if(err) err.ok = 0;
handleCallback(callback, null, mergeBatchResults(false, batch, self.s.bulkResult, err, result));
}
// Set an operationIf if provided
if(self.operationId) {
resultHandler.operationId = self.operationId;
}
// Serialize functions
if(self.s.options.serializeFunctions) {
finalOptions.serializeFunctions = true
}
// Is the bypassDocumentValidation options specific
if(self.s.bypassDocumentValidation == true) {
finalOptions.bypassDocumentValidation = true;
}
try {
if(batch.batchType == common.INSERT) {
self.s.topology.insert(self.s.collection.namespace, batch.operations, finalOptions, resultHandler);
} else if(batch.batchType == common.UPDATE) {
self.s.topology.update(self.s.collection.namespace, batch.operations, finalOptions, resultHandler);
} else if(batch.batchType == common.REMOVE) {
self.s.topology.remove(self.s.collection.namespace, batch.operations, finalOptions, resultHandler);
}
} catch(err) {
// Force top level error
err.ok = 0;
// Merge top level error and return
handleCallback(callback, null, mergeBatchResults(false, batch, self.s.bulkResult, err, null));
}
}
//
// Execute all the commands
var executeBatches = function(self, callback) {
var numberOfCommandsToExecute = self.s.batches.length;
var error = null;
// Execute over all the batches
for(var i = 0; i < self.s.batches.length; i++) {
executeBatch(self, self.s.batches[i], function(err, result) {
// Driver layer error capture it
if(err) error = err;
// Count down the number of commands left to execute
numberOfCommandsToExecute = numberOfCommandsToExecute - 1;
// Execute
if(numberOfCommandsToExecute == 0) {
// Driver level error
if(error) return handleCallback(callback, error);
// Treat write errors
var error = self.s.bulkResult.writeErrors.length > 0 ? toError(self.s.bulkResult.writeErrors[0]) : null;
handleCallback(callback, error, new BulkWriteResult(self.s.bulkResult));
}
});
}
}
/**
* The callback format for results
* @callback UnorderedBulkOperation~resultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {BulkWriteResult} result The bulk write result.
*/
/**
* Execute the ordered bulk operation
*
* @method
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.fsync=false] Specify a file sync write concern.
* @param {UnorderedBulkOperation~resultCallback} [callback] The result callback
* @throws {MongoError}
* @return {Promise} returns Promise if no callback passed
*/
UnorderedBulkOperation.prototype.execute = function(_writeConcern, callback) {
var self = this;
if(this.s.executed) throw toError("batch cannot be re-executed");
if(typeof _writeConcern == 'function') {
callback = _writeConcern;
} else {
this.s.writeConcern = _writeConcern;
}
// If we have current batch
if(this.s.currentInsertBatch) this.s.batches.push(this.s.currentInsertBatch);
if(this.s.currentUpdateBatch) this.s.batches.push(this.s.currentUpdateBatch);
if(this.s.currentRemoveBatch) this.s.batches.push(this.s.currentRemoveBatch);
// If we have no operations in the bulk raise an error
if(this.s.batches.length == 0) {
throw toError("Invalid Operation, No operations in bulk");
}
// Execute using callback
if(typeof callback == 'function') return executeBatches(this, callback);
// Return a Promise
return new this.s.promiseLibrary(function(resolve, reject) {
executeBatches(self, function(err, r) {
if(err) return reject(err);
resolve(r);
});
});
}
define.classMethod('execute', {callback: true, promise:false});
/**
* Returns an unordered batch object
* @ignore
*/
var initializeUnorderedBulkOp = function(topology, collection, options) {
return new UnorderedBulkOperation(topology, collection, options);
}
initializeUnorderedBulkOp.UnorderedBulkOperation = UnorderedBulkOperation;
module.exports = initializeUnorderedBulkOp;
module.exports.Bulk = UnorderedBulkOperation;

3162
node_modules/mongodb/lib/collection.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

318
node_modules/mongodb/lib/command_cursor.js generated vendored Normal file
View File

@@ -0,0 +1,318 @@
"use strict";
var inherits = require('util').inherits
, f = require('util').format
, toError = require('./utils').toError
, getSingleProperty = require('./utils').getSingleProperty
, formattedOrderClause = require('./utils').formattedOrderClause
, handleCallback = require('./utils').handleCallback
, Logger = require('mongodb-core').Logger
, EventEmitter = require('events').EventEmitter
, ReadPreference = require('./read_preference')
, MongoError = require('mongodb-core').MongoError
, Readable = require('stream').Readable || require('readable-stream').Readable
, Define = require('./metadata')
, CoreCursor = require('./cursor')
, Query = require('mongodb-core').Query
, CoreReadPreference = require('mongodb-core').ReadPreference;
/**
* @fileOverview The **CommandCursor** class is an internal class that embodies a
* generalized cursor based on a MongoDB command allowing for iteration over the
* results returned. It supports one by one document iteration, conversion to an
* array or can be iterated as a Node 0.10.X or higher stream
*
* **CommandCursor Cannot directly be instantiated**
* @example
* var MongoClient = require('mongodb').MongoClient,
* test = require('assert');
* // Connection url
* var url = 'mongodb://localhost:27017/test';
* // Connect using MongoClient
* MongoClient.connect(url, function(err, db) {
* // Create a collection we want to drop later
* var col = db.collection('listCollectionsExample1');
* // Insert a bunch of documents
* col.insert([{a:1, b:1}
* , {a:2, b:2}, {a:3, b:3}
* , {a:4, b:4}], {w:1}, function(err, result) {
* test.equal(null, err);
*
* // List the database collections available
* db.listCollections().toArray(function(err, items) {
* test.equal(null, err);
* db.close();
* });
* });
* });
*/
/**
* Namespace provided by the browser.
* @external Readable
*/
/**
* Creates a new Command Cursor instance (INTERNAL TYPE, do not instantiate directly)
* @class CommandCursor
* @extends external:Readable
* @fires CommandCursor#data
* @fires CommandCursor#end
* @fires CommandCursor#close
* @fires CommandCursor#readable
* @return {CommandCursor} an CommandCursor instance.
*/
var CommandCursor = function(bson, ns, cmd, options, topology, topologyOptions) {
CoreCursor.apply(this, Array.prototype.slice.call(arguments, 0));
var self = this;
var state = CommandCursor.INIT;
var streamOptions = {};
// MaxTimeMS
var maxTimeMS = null;
// Get the promiseLibrary
var promiseLibrary = options.promiseLibrary;
// No promise library selected fall back
if(!promiseLibrary) {
promiseLibrary = typeof global.Promise == 'function' ?
global.Promise : require('es6-promise').Promise;
}
// Set up
Readable.call(this, {objectMode: true});
// Internal state
this.s = {
// MaxTimeMS
maxTimeMS: maxTimeMS
// State
, state: state
// Stream options
, streamOptions: streamOptions
// BSON
, bson: bson
// Namespae
, ns: ns
// Command
, cmd: cmd
// Options
, options: options
// Topology
, topology: topology
// Topology Options
, topologyOptions: topologyOptions
// Promise library
, promiseLibrary: promiseLibrary
}
}
/**
* CommandCursor stream data event, fired for each document in the cursor.
*
* @event CommandCursor#data
* @type {object}
*/
/**
* CommandCursor stream end event
*
* @event CommandCursor#end
* @type {null}
*/
/**
* CommandCursor stream close event
*
* @event CommandCursor#close
* @type {null}
*/
/**
* CommandCursor stream readable event
*
* @event CommandCursor#readable
* @type {null}
*/
// Inherit from Readable
inherits(CommandCursor, Readable);
// Set the methods to inherit from prototype
var methodsToInherit = ['_next', 'next', 'each', 'forEach', 'toArray'
, 'rewind', 'bufferedCount', 'readBufferedDocuments', 'close', 'isClosed', 'kill'
, '_find', '_getmore', '_killcursor', 'isDead', 'explain', 'isNotified', 'isKilled'];
// Only inherit the types we need
for(var i = 0; i < methodsToInherit.length; i++) {
CommandCursor.prototype[methodsToInherit[i]] = CoreCursor.prototype[methodsToInherit[i]];
}
var define = CommandCursor.define = new Define('CommandCursor', CommandCursor, true);
/**
* Set the ReadPreference for the cursor.
* @method
* @param {(string|ReadPreference)} readPreference The new read preference for the cursor.
* @throws {MongoError}
* @return {Cursor}
*/
CommandCursor.prototype.setReadPreference = function(r) {
if(this.s.state == CommandCursor.CLOSED || this.isDead()) throw MongoError.create({message: "Cursor is closed", driver:true});
if(this.s.state != CommandCursor.INIT) throw MongoError.create({message: 'cannot change cursor readPreference after cursor has been accessed', driver:true});
if(r instanceof ReadPreference) {
this.s.options.readPreference = new CoreReadPreference(r.mode, r.tags);
} else {
this.s.options.readPreference = new CoreReadPreference(r);
}
return this;
}
define.classMethod('setReadPreference', {callback: false, promise:false, returns: [CommandCursor]});
/**
* Set the batch size for the cursor.
* @method
* @param {number} value The batchSize for the cursor.
* @throws {MongoError}
* @return {CommandCursor}
*/
CommandCursor.prototype.batchSize = function(value) {
if(this.s.state == CommandCursor.CLOSED || this.isDead()) throw MongoError.create({message: "Cursor is closed", driver:true});
if(typeof value != 'number') throw MongoError.create({message: "batchSize requires an integer", driver:true});
if(this.s.cmd.cursor) this.s.cmd.cursor.batchSize = value;
this.setCursorBatchSize(value);
return this;
}
define.classMethod('batchSize', {callback: false, promise:false, returns: [CommandCursor]});
/**
* Add a maxTimeMS stage to the aggregation pipeline
* @method
* @param {number} value The state maxTimeMS value.
* @return {CommandCursor}
*/
CommandCursor.prototype.maxTimeMS = function(value) {
if(this.s.topology.lastIsMaster().minWireVersion > 2) {
this.s.cmd.maxTimeMS = value;
}
return this;
}
define.classMethod('maxTimeMS', {callback: false, promise:false, returns: [CommandCursor]});
CommandCursor.prototype.get = CommandCursor.prototype.toArray;
define.classMethod('get', {callback: true, promise:false});
// Inherited methods
define.classMethod('toArray', {callback: true, promise:true});
define.classMethod('each', {callback: true, promise:false});
define.classMethod('forEach', {callback: true, promise:false});
define.classMethod('next', {callback: true, promise:true});
define.classMethod('close', {callback: true, promise:true});
define.classMethod('isClosed', {callback: false, promise:false, returns: [Boolean]});
define.classMethod('rewind', {callback: false, promise:false});
define.classMethod('bufferedCount', {callback: false, promise:false, returns: [Number]});
define.classMethod('readBufferedDocuments', {callback: false, promise:false, returns: [Array]});
/**
* Get the next available document from the cursor, returns null if no more documents are available.
* @function CommandCursor.prototype.next
* @param {CommandCursor~resultCallback} [callback] The result callback.
* @throws {MongoError}
* @return {Promise} returns Promise if no callback passed
*/
/**
* The callback format for results
* @callback CommandCursor~toArrayResultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {object[]} documents All the documents the satisfy the cursor.
*/
/**
* Returns an array of documents. The caller is responsible for making sure that there
* is enough memory to store the results. Note that the array only contain partial
* results when this cursor had been previouly accessed.
* @method CommandCursor.prototype.toArray
* @param {CommandCursor~toArrayResultCallback} [callback] The result callback.
* @throws {MongoError}
* @return {Promise} returns Promise if no callback passed
*/
/**
* The callback format for results
* @callback CommandCursor~resultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {(object|null)} result The result object if the command was executed successfully.
*/
/**
* Iterates over all the documents for this cursor. As with **{cursor.toArray}**,
* not all of the elements will be iterated if this cursor had been previouly accessed.
* In that case, **{cursor.rewind}** can be used to reset the cursor. However, unlike
* **{cursor.toArray}**, the cursor will only hold a maximum of batch size elements
* at any given time if batch size is specified. Otherwise, the caller is responsible
* for making sure that the entire result can fit the memory.
* @method CommandCursor.prototype.each
* @param {CommandCursor~resultCallback} callback The result callback.
* @throws {MongoError}
* @return {null}
*/
/**
* Close the cursor, sending a KillCursor command and emitting close.
* @method CommandCursor.prototype.close
* @param {CommandCursor~resultCallback} [callback] The result callback.
* @return {Promise} returns Promise if no callback passed
*/
/**
* Is the cursor closed
* @method CommandCursor.prototype.isClosed
* @return {boolean}
*/
/**
* Clone the cursor
* @function CommandCursor.prototype.clone
* @return {CommandCursor}
*/
/**
* Resets the cursor
* @function CommandCursor.prototype.rewind
* @return {CommandCursor}
*/
/**
* The callback format for the forEach iterator method
* @callback CommandCursor~iteratorCallback
* @param {Object} doc An emitted document for the iterator
*/
/**
* The callback error format for the forEach iterator method
* @callback CommandCursor~endCallback
* @param {MongoError} error An error instance representing the error during the execution.
*/
/*
* Iterates over all the documents for this cursor using the iterator, callback pattern.
* @method CommandCursor.prototype.forEach
* @param {CommandCursor~iteratorCallback} iterator The iteration callback.
* @param {CommandCursor~endCallback} callback The end callback.
* @throws {MongoError}
* @return {null}
*/
CommandCursor.INIT = 0;
CommandCursor.OPEN = 1;
CommandCursor.CLOSED = 2;
module.exports = CommandCursor;

1200
node_modules/mongodb/lib/cursor.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

1779
node_modules/mongodb/lib/db.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

310
node_modules/mongodb/lib/gridfs-stream/download.js generated vendored Normal file
View File

@@ -0,0 +1,310 @@
var shallowClone = require('../utils').shallowClone;
var stream = require('stream');
var util = require('util');
module.exports = GridFSBucketReadStream;
/**
* A readable stream that enables you to read buffers from GridFS.
*
* Do not instantiate this class directly. Use `openDownloadStream()` instead.
*
* @class
* @param {Collection} chunks Handle for chunks collection
* @param {Collection} files Handle for files collection
* @param {Object} readPreference The read preference to use
* @param {Object} filter The query to use to find the file document
* @param {Object} [options=null] Optional settings.
* @param {Number} [options.sort=null] Optional sort for the file find query
* @param {Number} [options.skip=null] Optional skip for the file find query
* @param {Number} [options.start=null] Optional 0-based offset in bytes to start streaming from
* @param {Number} [options.end=null] Optional 0-based offset in bytes to stop streaming before
* @fires GridFSBucketReadStream#error
* @fires GridFSBucketReadStream#file
* @return {GridFSBucketReadStream} a GridFSBucketReadStream instance.
*/
function GridFSBucketReadStream(chunks, files, readPreference, filter, options) {
var _this = this;
this.s = {
bytesRead: 0,
chunks: chunks,
cursor: null,
expected: 0,
files: files,
filter: filter,
init: false,
expectedEnd: 0,
file: null,
options: options,
readPreference: readPreference
};
stream.Readable.call(this);
}
util.inherits(GridFSBucketReadStream, stream.Readable);
/**
* An error occurred
*
* @event GridFSBucketReadStream#error
* @type {Error}
*/
/**
* Fires when the stream loaded the file document corresponding to the
* provided id.
*
* @event GridFSBucketReadStream#file
* @type {object}
*/
/**
* Reads from the cursor and pushes to the stream.
* @method
*/
GridFSBucketReadStream.prototype._read = function() {
var _this = this;
waitForFile(_this, function() {
doRead(_this);
});
};
/**
* Sets the 0-based offset in bytes to start streaming from. Throws
* an error if this stream has entered flowing mode
* (e.g. if you've already called `on('data')`)
* @method
* @param {Number} start Offset in bytes to start reading at
* @return {GridFSBucketReadStream}
*/
GridFSBucketReadStream.prototype.start = function(start) {
throwIfInitialized(this);
this.s.options.start = start;
return this;
};
/**
* Sets the 0-based offset in bytes to start streaming from. Throws
* an error if this stream has entered flowing mode
* (e.g. if you've already called `on('data')`)
* @method
* @param {Number} end Offset in bytes to stop reading at
* @return {GridFSBucketReadStream}
*/
GridFSBucketReadStream.prototype.end = function(end) {
throwIfInitialized(this);
this.s.options.end = end;
return this;
};
/**
* @ignore
*/
function throwIfInitialized(self) {
if (self.s.init) {
throw new Error('You cannot change options after the stream has entered' +
'flowing mode!');
}
}
/**
* @ignore
*/
function doRead(_this) {
_this.s.cursor.next(function(error, doc) {
if (error) {
return __handleError(_this, error);
}
if (!doc) {
return _this.push(null);
}
var bytesRemaining = _this.s.file.length - _this.s.bytesRead;
var expectedN = _this.s.expected++;
var expectedLength = Math.min(_this.s.file.chunkSize,
bytesRemaining);
if (doc.n > expectedN) {
var errmsg = 'ChunkIsMissing: Got unexpected n: ' + doc.n +
', expected: ' + expectedN;
return __handleError(_this, new Error(errmsg));
}
if (doc.n < expectedN) {
var errmsg = 'ExtraChunk: Got unexpected n: ' + doc.n +
', expected: ' + expectedN;
return __handleError(_this, new Error(errmsg));
}
if (doc.data.length() !== expectedLength) {
if (bytesRemaining <= 0) {
var errmsg = 'ExtraChunk: Got unexpected n: ' + doc.n;
return __handleError(_this, new Error(errmsg));
}
var errmsg = 'ChunkIsWrongSize: Got unexpected length: ' +
doc.data.length() + ', expected: ' + expectedLength;
return __handleError(_this, new Error(errmsg));
}
_this.s.bytesRead += doc.data.length();
if (doc.data.buffer.length === 0) {
return _this.push(null);
}
var sliceStart = null;
var sliceEnd = null;
var buf = doc.data.buffer;
if (_this.s.bytesToSkip != null) {
sliceStart = _this.s.bytesToSkip;
_this.s.bytesToSkip = 0;
}
if (expectedN === _this.s.expectedEnd && _this.s.bytesToTrim != null) {
sliceEnd = _this.s.bytesToTrim;
}
if (sliceStart != null || sliceEnd != null) {
buf = buf.slice(sliceStart || 0, sliceEnd || buf.length);
}
_this.push(buf);
});
};
/**
* @ignore
*/
function init(self) {
var findOneOptions = {};
if (self.s.readPreference) {
findOneOptions.readPreference = self.s.readPreference;
}
if (self.s.options && self.s.options.sort) {
findOneOptions.sort = self.s.options.sort;
}
if (self.s.options && self.s.options.skip) {
findOneOptions.skip = self.s.options.skip;
}
self.s.files.findOne(self.s.filter, findOneOptions, function(error, doc) {
if (error) {
return __handleError(self, error);
}
if (!doc) {
var identifier = self.s.filter._id ?
self.s.filter._id.toString() : self.s.filter.filename;
var errmsg = 'FileNotFound: file ' + identifier + ' was not found';
return __handleError(self, new Error(errmsg));
}
// If document is empty, kill the stream immediately and don't
// execute any reads
if (doc.length <= 0) {
self.push(null);
return;
}
self.s.cursor = self.s.chunks.find({ files_id: doc._id }).sort({ n: 1 });
if (self.s.readPreference) {
self.s.cursor.setReadPreference(self.s.readPreference);
}
self.s.expectedEnd = Math.ceil(doc.length / doc.chunkSize);
self.s.file = doc;
self.s.bytesToSkip = handleStartOption(self, doc, self.s.cursor,
self.s.options);
self.s.bytesToTrim = handleEndOption(self, doc, self.s.cursor,
self.s.options);
self.emit('file', doc);
});
}
/**
* @ignore
*/
function waitForFile(_this, callback) {
if (_this.s.file) {
return callback();
}
if (!_this.s.init) {
init(_this);
_this.s.init = true;
}
_this.once('file', function() {
callback();
});
};
/**
* @ignore
*/
function handleStartOption(stream, doc, cursor, options) {
if (options && options.start != null) {
if (options.start > doc.length) {
throw new Error('Stream start (' + options.start + ') must not be ' +
'more than the length of the file (' + doc.length +')')
}
if (options.start < 0) {
throw new Error('Stream start (' + options.start + ') must not be ' +
'negative');
}
if (options.end != null && options.end < options.start) {
throw new Error('Stream start (' + options.start + ') must not be ' +
'greater than stream end (' + options.end + ')');
}
cursor.skip(Math.floor(options.start / doc.chunkSize));
stream.s.bytesRead = Math.floor(options.start / doc.chunkSize) *
doc.chunkSize;
stream.s.expected = Math.floor(options.start / doc.chunkSize);
return options.start - stream.s.bytesRead;
}
}
/**
* @ignore
*/
function handleEndOption(stream, doc, cursor, options) {
if (options && options.end != null) {
if (options.end > doc.length) {
throw new Error('Stream end (' + options.end + ') must not be ' +
'more than the length of the file (' + doc.length +')')
}
if (options.start < 0) {
throw new Error('Stream end (' + options.end + ') must not be ' +
'negative');
}
var start = options.start != null ?
Math.floor(options.start / doc.chunkSize) :
0;
cursor.limit(Math.ceil(options.end / doc.chunkSize) - start);
stream.s.expectedEnd = Math.ceil(options.end / doc.chunkSize);
return (Math.ceil(options.end / doc.chunkSize) * doc.chunkSize) -
options.end;
}
}
/**
* @ignore
*/
function __handleError(_this, error) {
_this.emit('error', error);
}

335
node_modules/mongodb/lib/gridfs-stream/index.js generated vendored Normal file
View File

@@ -0,0 +1,335 @@
var Emitter = require('events').EventEmitter;
var GridFSBucketReadStream = require('./download');
var GridFSBucketWriteStream = require('./upload');
var shallowClone = require('../utils').shallowClone;
var toError = require('../utils').toError;
var util = require('util');
var DEFAULT_GRIDFS_BUCKET_OPTIONS = {
bucketName: 'fs',
chunkSizeBytes: 255 * 1024
};
module.exports = GridFSBucket;
/**
* Constructor for a streaming GridFS interface
* @class
* @param {Db} db A db handle
* @param {object} [options=null] Optional settings.
* @param {string} [options.bucketName="fs"] The 'files' and 'chunks' collections will be prefixed with the bucket name followed by a dot.
* @param {number} [options.chunkSizeBytes=255 * 1024] Number of bytes stored in each chunk. Defaults to 255KB
* @param {object} [options.writeConcern=null] Optional write concern to be passed to write operations, for instance `{ w: 1 }`
* @param {object} [options.readPreference=null] Optional read preference to be passed to read operations
* @fires GridFSBucketWriteStream#index
* @return {GridFSBucket}
*/
function GridFSBucket(db, options) {
Emitter.apply(this);
this.setMaxListeners(0);
if (options && typeof options === 'object') {
options = shallowClone(options);
var keys = Object.keys(DEFAULT_GRIDFS_BUCKET_OPTIONS);
for (var i = 0; i < keys.length; ++i) {
if (!options[keys[i]]) {
options[keys[i]] = DEFAULT_GRIDFS_BUCKET_OPTIONS[keys[i]];
}
}
} else {
options = DEFAULT_GRIDFS_BUCKET_OPTIONS;
}
this.s = {
db: db,
options: options,
_chunksCollection: db.collection(options.bucketName + '.chunks'),
_filesCollection: db.collection(options.bucketName + '.files'),
checkedIndexes: false,
calledOpenUploadStream: false,
promiseLibrary: db.s.promiseLibrary ||
(typeof global.Promise == 'function' ? global.Promise : require('es6-promise').Promise)
};
};
util.inherits(GridFSBucket, Emitter);
/**
* When the first call to openUploadStream is made, the upload stream will
* check to see if it needs to create the proper indexes on the chunks and
* files collections. This event is fired either when 1) it determines that
* no index creation is necessary, 2) when it successfully creates the
* necessary indexes.
*
* @event GridFSBucket#index
* @type {Error}
*/
/**
* Returns a writable stream (GridFSBucketWriteStream) for writing
* buffers to GridFS. The stream's 'id' property contains the resulting
* file's id.
* @method
* @param {string} filename The value of the 'filename' key in the files doc
* @param {object} [options=null] Optional settings.
* @param {number} [options.chunkSizeBytes=null] Optional overwrite this bucket's chunkSizeBytes for this file
* @param {object} [options.metadata=null] Optional object to store in the file document's `metadata` field
* @param {string} [options.contentType=null] Optional string to store in the file document's `contentType` field
* @param {array} [options.aliases=null] Optional array of strings to store in the file document's `aliases` field
* @return {GridFSBucketWriteStream}
*/
GridFSBucket.prototype.openUploadStream = function(filename, options) {
if (options) {
options = shallowClone(options);
} else {
options = {};
}
if (!options.chunkSizeBytes) {
options.chunkSizeBytes = this.s.options.chunkSizeBytes;
}
return new GridFSBucketWriteStream(this, filename, options);
};
/**
* Returns a readable stream (GridFSBucketReadStream) for streaming file
* data from GridFS.
* @method
* @param {ObjectId} id The id of the file doc
* @param {Object} [options=null] Optional settings.
* @param {Number} [options.start=null] Optional 0-based offset in bytes to start streaming from
* @param {Number} [options.end=null] Optional 0-based offset in bytes to stop streaming before
* @return {GridFSBucketReadStream}
*/
GridFSBucket.prototype.openDownloadStream = function(id, options) {
var filter = { _id: id };
var options = {
start: options && options.start,
end: options && options.end
};
return new GridFSBucketReadStream(this.s._chunksCollection,
this.s._filesCollection, this.s.options.readPreference, filter, options);
};
/**
* Deletes a file with the given id
* @method
* @param {ObjectId} id The id of the file doc
* @param {Function} callback
*/
GridFSBucket.prototype.delete = function(id, callback) {
if (typeof callback === 'function') {
return _delete(this, id, callback);
}
var _this = this;
return new this.s.promiseLibrary(function(resolve, reject) {
_delete(_this, id, function(error, res) {
if (error) {
reject(error);
} else {
resolve(res);
}
});
});
};
/**
* @ignore
*/
function _delete(_this, id, callback) {
_this.s._filesCollection.deleteOne({ _id: id }, function(error, res) {
if (error) {
return callback(error);
}
_this.s._chunksCollection.deleteMany({ files_id: id }, function(error) {
if (error) {
return callback(error);
}
// Delete orphaned chunks before returning FileNotFound
if (!res.result.n) {
var errmsg = 'FileNotFound: no file with id ' + id + ' found';
return callback(new Error(errmsg));
}
callback();
});
});
}
/**
* Convenience wrapper around find on the files collection
* @method
* @param {Object} filter
* @param {Object} [options=null] Optional settings for cursor
* @param {number} [options.batchSize=null] Optional batch size for cursor
* @param {number} [options.limit=null] Optional limit for cursor
* @param {number} [options.maxTimeMS=null] Optional maxTimeMS for cursor
* @param {boolean} [options.noCursorTimeout=null] Optionally set cursor's `noCursorTimeout` flag
* @param {number} [options.skip=null] Optional skip for cursor
* @param {object} [options.sort=null] Optional sort for cursor
* @return {Cursor}
*/
GridFSBucket.prototype.find = function(filter, options) {
filter = filter || {};
options = options || {};
var cursor = this.s._filesCollection.find(filter);
if (options.batchSize != null) {
cursor.batchSize(options.batchSize);
}
if (options.limit != null) {
cursor.limit(options.limit);
}
if (options.maxTimeMS != null) {
cursor.maxTimeMS(options.maxTimeMS);
}
if (options.noCursorTimeout != null) {
cursor.addCursorFlag('noCursorTimeout', options.noCursorTimeout);
}
if (options.skip != null) {
cursor.skip(options.skip);
}
if (options.sort != null) {
cursor.sort(options.sort);
}
return cursor;
};
/**
* Returns a readable stream (GridFSBucketReadStream) for streaming the
* file with the given name from GridFS. If there are multiple files with
* the same name, this will stream the most recent file with the given name
* (as determined by the `uploadedDate` field). You can set the `revision`
* option to change this behavior.
* @method
* @param {String} filename The name of the file to stream
* @param {Object} [options=null] Optional settings
* @param {number} [options.revision=-1] The revision number relative to the oldest file with the given filename. 0 gets you the oldest file, 1 gets you the 2nd oldest, -1 gets you the newest.
* @param {Number} [options.start=null] Optional 0-based offset in bytes to start streaming from
* @param {Number} [options.end=null] Optional 0-based offset in bytes to stop streaming before
* @return {GridFSBucketReadStream}
*/
GridFSBucket.prototype.openDownloadStreamByName = function(filename, options) {
var sort = { uploadedDate: -1 };
var skip = null;
if (options && options.revision != null) {
if (options.revision >= 0) {
sort = { uploadedDate: 1 };
skip = options.revision;
} else {
skip = -options.revision - 1;
}
}
var filter = { filename: filename };
var options = {
sort: sort,
skip: skip,
start: options && options.start,
end: options && options.end
};
return new GridFSBucketReadStream(this.s._chunksCollection,
this.s._filesCollection, this.s.options.readPreference, filter, options);
};
/**
* Renames the file with the given _id to the given string
* @method
* @param {ObjectId} id the id of the file to rename
* @param {String} filename new name for the file
* @param {GridFSBucket~errorCallback} [callback]
*/
GridFSBucket.prototype.rename = function(id, filename, callback) {
if (typeof callback === 'function') {
return _rename(this, id, filename, callback);
}
var _this = this;
return new this.s.promiseLibrary(function(resolve, reject) {
_rename(_this, id, filename, function(error, res) {
if (error) {
reject(error);
} else {
resolve(res);
}
});
});
};
/**
* @ignore
*/
function _rename(_this, id, filename, callback) {
var filter = { _id: id };
var update = { $set: { filename: filename } };
_this.s._filesCollection.updateOne(filter, update, function(error, res) {
if (error) {
return callback(error);
}
if (!res.result.n) {
return callback(toError('File with id ' + id + ' not found'));
}
callback();
});
}
/**
* Removes this bucket's files collection, followed by its chunks collection.
* @method
* @param {GridFSBucket~errorCallback} [callback]
*/
GridFSBucket.prototype.drop = function(callback) {
if (typeof callback === 'function') {
return _drop(this, callback);
}
var _this = this;
return new this.s.promiseLibrary(function(resolve, reject) {
_drop(_this, function(error, res) {
if (error) {
reject(error);
} else {
resolve(res);
}
});
});
};
/**
* @ignore
*/
function _drop(_this, callback) {
_this.s._filesCollection.drop(function(error) {
if (error) {
return callback(error);
}
_this.s._chunksCollection.drop(function(error) {
if (error) {
return callback(error);
}
return callback();
});
});
}
/**
* Callback format for all GridFSBucket methods that can accept a callback.
* @callback GridFSBucket~errorCallback
* @param {MongoError} error An error instance representing any errors that occurred
*/

450
node_modules/mongodb/lib/gridfs-stream/upload.js generated vendored Normal file
View File

@@ -0,0 +1,450 @@
var core = require('mongodb-core');
var crypto = require('crypto');
var shallowClone = require('../utils').shallowClone;
var stream = require('stream');
var util = require('util');
var ERROR_NAMESPACE_NOT_FOUND = 26;
module.exports = GridFSBucketWriteStream;
/**
* A writable stream that enables you to write buffers to GridFS.
*
* Do not instantiate this class directly. Use `openUploadStream()` instead.
*
* @class
* @param {GridFSBucket} bucket Handle for this stream's corresponding bucket
* @param {string} filename The value of the 'filename' key in the files doc
* @param {object} [options=null] Optional settings.
* @param {number} [options.chunkSizeBytes=null] The chunk size to use, in bytes
* @param {number} [options.w=null] The write concern
* @param {number} [options.wtimeout=null] The write concern timeout
* @param {number} [options.j=null] The journal write concern
* @fires GridFSBucketWriteStream#error
* @fires GridFSBucketWriteStream#finish
* @return {GridFSBucketWriteStream} a GridFSBucketWriteStream instance.
*/
function GridFSBucketWriteStream(bucket, filename, options) {
this.bucket = bucket;
this.chunks = bucket.s._chunksCollection;
this.filename = filename;
this.files = bucket.s._filesCollection;
this.options = options;
this.id = core.BSON.ObjectId();
this.chunkSizeBytes = this.options.chunkSizeBytes;
this.bufToStore = new Buffer(this.chunkSizeBytes);
this.length = 0;
this.md5 = crypto.createHash('md5');
this.n = 0;
this.pos = 0;
this.state = {
streamEnd: false,
outstandingRequests: 0,
errored: false
};
if (!this.bucket.s.calledOpenUploadStream) {
this.bucket.s.calledOpenUploadStream = true;
var _this = this;
checkIndexes(this, function() {
_this.bucket.s.checkedIndexes = true;
_this.bucket.emit('index');
});
}
}
util.inherits(GridFSBucketWriteStream, stream.Writable);
/**
* An error occurred
*
* @event GridFSBucketWriteStream#error
* @type {Error}
*/
/**
* end() was called and the write stream successfully wrote all chunks to
* MongoDB.
*
* @event GridFSBucketWriteStream#finish
* @type {object}
*/
/**
* Write a buffer to the stream.
*
* @method
* @param {Buffer} chunk Buffer to write
* @param {String} encoding Optional encoding for the buffer
* @param {Function} callback Function to call when the chunk was added to the buffer, or if the entire chunk was persisted to MongoDB if this chunk caused a flush.
* @return {Boolean} False if this write required flushing a chunk to MongoDB. True otherwise.
*/
GridFSBucketWriteStream.prototype.write = function(chunk, encoding, callback) {
var _this = this;
return waitForIndexes(this, function() {
return doWrite(_this, chunk, encoding, callback);
});
};
/**
* Tells the stream that no more data will be coming in. The stream will
* persist the remaining data to MongoDB, write the files document, and
* then emit a 'finish' event.
*
* @method
* @param {Buffer} chunk Buffer to write
* @param {String} encoding Optional encoding for the buffer
* @param {Function} callback Function to call when all files and chunks have been persisted to MongoDB
*/
GridFSBucketWriteStream.prototype.end = function(chunk, encoding, callback) {
var _this = this;
this.state.streamEnd = true;
if (callback) {
this.once('finish', callback);
}
if (!chunk) {
waitForIndexes(this, function() {
writeRemnant(_this);
});
return;
}
var _this = this;
var inputBuf = (Buffer.isBuffer(chunk)) ?
chunk : new Buffer(chunk, encoding);
this.write(chunk, encoding, function() {
writeRemnant(_this);
});
};
/**
* @ignore
*/
function __handleError(_this, error, callback) {
if (_this.state.errored) {
return;
}
_this.state.errored = true;
if (callback) {
return callback(error);
}
_this.emit('error', error);
}
/**
* @ignore
*/
function createChunkDoc(filesId, n, data) {
return {
_id: core.BSON.ObjectId(),
files_id: filesId,
n: n,
data: data
};
}
/**
* @ignore
*/
function checkChunksIndex(_this, callback) {
_this.chunks.listIndexes().toArray(function(error, indexes) {
if (error) {
// Collection doesn't exist so create index
if (error.code === ERROR_NAMESPACE_NOT_FOUND) {
var index = { files_id: 1, n: 1 };
_this.chunks.createIndex(index, { background: false }, function(error) {
if (error) {
return callback(error);
}
callback();
});
return;
}
return callback(error);
}
var hasChunksIndex = false;
indexes.forEach(function(index) {
if (index.key) {
var keys = Object.keys(index.key);
if (keys.length === 2 && index.key.files_id === 1 &&
index.key.n === 1) {
hasChunksIndex = true;
}
}
});
if (hasChunksIndex) {
callback();
} else {
var index = { files_id: 1, n: 1 };
var indexOptions = getWriteOptions(_this);
indexOptions.background = false;
indexOptions.unique = true;
_this.chunks.createIndex(index, indexOptions, function(error) {
if (error) {
return callback(error);
}
callback();
});
}
});
}
/**
* @ignore
*/
function checkDone(_this, callback) {
if (_this.state.streamEnd &&
_this.state.outstandingRequests === 0 &&
!_this.state.errored) {
var filesDoc = createFilesDoc(_this.id, _this.length, _this.chunkSizeBytes,
_this.md5.digest('hex'), _this.filename, _this.options.contentType,
_this.options.aliases, _this.options.metadata);
_this.files.insert(filesDoc, getWriteOptions(_this), function(error) {
if (error) {
return __handleError(_this, error, callback);
}
_this.emit('finish', filesDoc);
});
return true;
}
return false;
}
/**
* @ignore
*/
function checkIndexes(_this, callback) {
_this.files.findOne({}, { _id: 1 }, function(error, doc) {
if (error) {
return callback(error);
}
if (doc) {
return callback();
}
_this.files.listIndexes().toArray(function(error, indexes) {
if (error) {
// Collection doesn't exist so create index
if (error.code === ERROR_NAMESPACE_NOT_FOUND) {
var index = { filename: 1, uploadDate: 1 };
_this.files.createIndex(index, { background: false }, function(error) {
if (error) {
return callback(error);
}
checkChunksIndex(_this, callback);
});
return;
}
return callback(error);
}
var hasFileIndex = false;
indexes.forEach(function(index) {
var keys = Object.keys(index.key);
if (keys.length === 2 && index.key.filename === 1 &&
index.key.uploadDate === 1) {
hasFileIndex = true;
}
});
if (hasFileIndex) {
checkChunksIndex(_this, callback);
} else {
var index = { filename: 1, uploadDate: 1 };
var indexOptions = getWriteOptions(_this);
indexOptions.background = false;
_this.files.createIndex(index, indexOptions, function(error) {
if (error) {
return callback(error);
}
checkChunksIndex(_this, callback);
});
}
});
});
}
/**
* @ignore
*/
function createFilesDoc(_id, length, chunkSize, md5, filename, contentType,
aliases, metadata) {
var ret = {
_id: _id,
length: length,
chunkSize: chunkSize,
uploadDate: new Date(),
md5: md5,
filename: filename
};
if (contentType) {
ret.contentType = contentType;
}
if (aliases) {
ret.aliases = aliases;
}
if (metadata) {
ret.metadata = metadata;
}
return ret;
}
/**
* @ignore
*/
function doWrite(_this, chunk, encoding, callback) {
var inputBuf = (Buffer.isBuffer(chunk)) ?
chunk : new Buffer(chunk, encoding);
_this.length += inputBuf.length;
// Input is small enough to fit in our buffer
if (_this.pos + inputBuf.length < _this.chunkSizeBytes) {
inputBuf.copy(_this.bufToStore, _this.pos);
_this.pos += inputBuf.length;
callback && callback();
// Note that we reverse the typical semantics of write's return value
// to be compatible with node's `.pipe()` function.
// True means client can keep writing.
return true;
}
// Otherwise, buffer is too big for current chunk, so we need to flush
// to MongoDB.
var inputBufRemaining = inputBuf.length;
var spaceRemaining = _this.chunkSizeBytes - _this.pos;
var numToCopy = Math.min(spaceRemaining, inputBuf.length);
var outstandingRequests = 0;
while (inputBufRemaining > 0) {
var inputBufPos = inputBuf.length - inputBufRemaining;
inputBuf.copy(_this.bufToStore, _this.pos,
inputBufPos, inputBufPos + numToCopy);
_this.pos += numToCopy;
spaceRemaining -= numToCopy;
if (spaceRemaining === 0) {
_this.md5.update(_this.bufToStore);
var doc = createChunkDoc(_this.id, _this.n, _this.bufToStore);
++_this.state.outstandingRequests;
++outstandingRequests;
_this.chunks.insert(doc, getWriteOptions(_this), function(error) {
if (error) {
return __handleError(_this, error);
}
--_this.state.outstandingRequests;
--outstandingRequests;
if (!outstandingRequests) {
_this.emit('drain', doc);
callback && callback();
checkDone(_this);
}
});
spaceRemaining = _this.chunkSizeBytes;
_this.pos = 0;
++_this.n;
}
inputBufRemaining -= numToCopy;
numToCopy = Math.min(spaceRemaining, inputBufRemaining);
}
// Note that we reverse the typical semantics of write's return value
// to be compatible with node's `.pipe()` function.
// False means the client should wait for the 'drain' event.
return false;
}
/**
* @ignore
*/
function getWriteOptions(_this) {
var obj = {};
if (_this.options.writeConcern) {
obj.w = concern.w;
obj.wtimeout = concern.wtimeout;
obj.j = concern.j;
}
return obj;
}
/**
* @ignore
*/
function waitForIndexes(_this, callback) {
if (_this.bucket.s.checkedIndexes) {
callback(false);
}
_this.bucket.once('index', function() {
callback(true);
});
return true;
}
/**
* @ignore
*/
function writeRemnant(_this, callback) {
// Buffer is empty, so don't bother to insert
if (_this.pos === 0) {
return checkDone(_this, callback);
}
++_this.state.outstandingRequests;
// Create a new buffer to make sure the buffer isn't bigger than it needs
// to be.
var remnant = new Buffer(_this.pos);
_this.bufToStore.copy(remnant, 0, 0, _this.pos);
_this.md5.update(remnant);
var doc = createChunkDoc(_this.id, _this.n, remnant);
_this.chunks.insert(doc, getWriteOptions(_this), function(error) {
if (error) {
return __handleError(_this, error);
}
--_this.state.outstandingRequests;
checkDone(_this);
});
}

233
node_modules/mongodb/lib/gridfs/chunk.js generated vendored Normal file
View File

@@ -0,0 +1,233 @@
"use strict";
var Binary = require('mongodb-core').BSON.Binary,
ObjectID = require('mongodb-core').BSON.ObjectID;
/**
* Class for representing a single chunk in GridFS.
*
* @class
*
* @param file {GridStore} The {@link GridStore} object holding this chunk.
* @param mongoObject {object} The mongo object representation of this chunk.
*
* @throws Error when the type of data field for {@link mongoObject} is not
* supported. Currently supported types for data field are instances of
* {@link String}, {@link Array}, {@link Binary} and {@link Binary}
* from the bson module
*
* @see Chunk#buildMongoObject
*/
var Chunk = function(file, mongoObject, writeConcern) {
if(!(this instanceof Chunk)) return new Chunk(file, mongoObject);
this.file = file;
var self = this;
var mongoObjectFinal = mongoObject == null ? {} : mongoObject;
this.writeConcern = writeConcern || {w:1};
this.objectId = mongoObjectFinal._id == null ? new ObjectID() : mongoObjectFinal._id;
this.chunkNumber = mongoObjectFinal.n == null ? 0 : mongoObjectFinal.n;
this.data = new Binary();
if(mongoObjectFinal.data == null) {
} else if(typeof mongoObjectFinal.data == "string") {
var buffer = new Buffer(mongoObjectFinal.data.length);
buffer.write(mongoObjectFinal.data, 0, mongoObjectFinal.data.length, 'binary');
this.data = new Binary(buffer);
} else if(Array.isArray(mongoObjectFinal.data)) {
var buffer = new Buffer(mongoObjectFinal.data.length);
var data = mongoObjectFinal.data.join('');
buffer.write(data, 0, data.length, 'binary');
this.data = new Binary(buffer);
} else if(mongoObjectFinal.data._bsontype === 'Binary') {
this.data = mongoObjectFinal.data;
} else if(Buffer.isBuffer(mongoObjectFinal.data)) {
} else {
throw Error("Illegal chunk format");
}
// Update position
this.internalPosition = 0;
};
/**
* Writes a data to this object and advance the read/write head.
*
* @param data {string} the data to write
* @param callback {function(*, GridStore)} This will be called after executing
* this method. The first parameter will contain null and the second one
* will contain a reference to this object.
*/
Chunk.prototype.write = function(data, callback) {
this.data.write(data, this.internalPosition, data.length, 'binary');
this.internalPosition = this.data.length();
if(callback != null) return callback(null, this);
return this;
};
/**
* Reads data and advances the read/write head.
*
* @param length {number} The length of data to read.
*
* @return {string} The data read if the given length will not exceed the end of
* the chunk. Returns an empty String otherwise.
*/
Chunk.prototype.read = function(length) {
// Default to full read if no index defined
length = length == null || length == 0 ? this.length() : length;
if(this.length() - this.internalPosition + 1 >= length) {
var data = this.data.read(this.internalPosition, length);
this.internalPosition = this.internalPosition + length;
return data;
} else {
return '';
}
};
Chunk.prototype.readSlice = function(length) {
if ((this.length() - this.internalPosition) >= length) {
var data = null;
if (this.data.buffer != null) { //Pure BSON
data = this.data.buffer.slice(this.internalPosition, this.internalPosition + length);
} else { //Native BSON
data = new Buffer(length);
length = this.data.readInto(data, this.internalPosition);
}
this.internalPosition = this.internalPosition + length;
return data;
} else {
return null;
}
};
/**
* Checks if the read/write head is at the end.
*
* @return {boolean} Whether the read/write head has reached the end of this
* chunk.
*/
Chunk.prototype.eof = function() {
return this.internalPosition == this.length() ? true : false;
};
/**
* Reads one character from the data of this chunk and advances the read/write
* head.
*
* @return {string} a single character data read if the the read/write head is
* not at the end of the chunk. Returns an empty String otherwise.
*/
Chunk.prototype.getc = function() {
return this.read(1);
};
/**
* Clears the contents of the data in this chunk and resets the read/write head
* to the initial position.
*/
Chunk.prototype.rewind = function() {
this.internalPosition = 0;
this.data = new Binary();
};
/**
* Saves this chunk to the database. Also overwrites existing entries having the
* same id as this chunk.
*
* @param callback {function(*, GridStore)} This will be called after executing
* this method. The first parameter will contain null and the second one
* will contain a reference to this object.
*/
Chunk.prototype.save = function(options, callback) {
var self = this;
if(typeof options == 'function') {
callback = options;
options = {};
}
self.file.chunkCollection(function(err, collection) {
if(err) return callback(err);
// Merge the options
var writeOptions = { upsert: true };
for(var name in options) writeOptions[name] = options[name];
for(var name in self.writeConcern) writeOptions[name] = self.writeConcern[name];
if(self.data.length() > 0) {
self.buildMongoObject(function(mongoObject) {
var options = {forceServerObjectId:true};
for(var name in self.writeConcern) {
options[name] = self.writeConcern[name];
}
collection.replaceOne({'_id':self.objectId}, mongoObject, writeOptions, function(err, collection) {
callback(err, self);
});
});
} else {
callback(null, self);
}
// });
});
};
/**
* Creates a mongoDB object representation of this chunk.
*
* @param callback {function(Object)} This will be called after executing this
* method. The object will be passed to the first parameter and will have
* the structure:
*
* <pre><code>
* {
* '_id' : , // {number} id for this chunk
* 'files_id' : , // {number} foreign key to the file collection
* 'n' : , // {number} chunk number
* 'data' : , // {bson#Binary} the chunk data itself
* }
* </code></pre>
*
* @see <a href="http://www.mongodb.org/display/DOCS/GridFS+Specification#GridFSSpecification-{{chunks}}">MongoDB GridFS Chunk Object Structure</a>
*/
Chunk.prototype.buildMongoObject = function(callback) {
var mongoObject = {
'files_id': this.file.fileId,
'n': this.chunkNumber,
'data': this.data};
// If we are saving using a specific ObjectId
if(this.objectId != null) mongoObject._id = this.objectId;
callback(mongoObject);
};
/**
* @return {number} the length of the data
*/
Chunk.prototype.length = function() {
return this.data.length();
};
/**
* The position of the read/write head
* @name position
* @lends Chunk#
* @field
*/
Object.defineProperty(Chunk.prototype, "position", { enumerable: true
, get: function () {
return this.internalPosition;
}
, set: function(value) {
this.internalPosition = value;
}
});
/**
* The default chunk size
* @constant
*/
Chunk.DEFAULT_CHUNK_SIZE = 1024 * 255;
module.exports = Chunk;

1956
node_modules/mongodb/lib/gridfs/grid_store.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

64
node_modules/mongodb/lib/metadata.js generated vendored Normal file
View File

@@ -0,0 +1,64 @@
var f = require('util').format;
var Define = function(name, object, stream) {
this.name = name;
this.object = object;
this.stream = typeof stream == 'boolean' ? stream : false;
this.instrumentations = {};
}
Define.prototype.classMethod = function(name, options) {
var keys = Object.keys(options).sort();
var key = generateKey(keys, options);
// Add a list of instrumentations
if(this.instrumentations[key] == null) {
this.instrumentations[key] = {
methods: [], options: options
}
}
// Push to list of method for this instrumentation
this.instrumentations[key].methods.push(name);
}
var generateKey = function(keys, options) {
var parts = [];
for(var i = 0; i < keys.length; i++) {
parts.push(f('%s=%s', keys[i], options[keys[i]]));
}
return parts.join();
}
Define.prototype.staticMethod = function(name, options) {
options.static = true;
var keys = Object.keys(options).sort();
var key = generateKey(keys, options);
// Add a list of instrumentations
if(this.instrumentations[key] == null) {
this.instrumentations[key] = {
methods: [], options: options
}
}
// Push to list of method for this instrumentation
this.instrumentations[key].methods.push(name);
}
Define.prototype.generate = function(keys, options) {
// Generate the return object
var object = {
name: this.name, obj: this.object, stream: this.stream,
instrumentations: []
}
for(var name in this.instrumentations) {
object.instrumentations.push(this.instrumentations[name]);
}
return object;
}
module.exports = Define;

472
node_modules/mongodb/lib/mongo_client.js generated vendored Normal file
View File

@@ -0,0 +1,472 @@
"use strict";
var parse = require('./url_parser')
, Server = require('./server')
, Mongos = require('./mongos')
, ReplSet = require('./replset')
, Define = require('./metadata')
, ReadPreference = require('./read_preference')
, Db = require('./db');
/**
* @fileOverview The **MongoClient** class is a class that allows for making Connections to MongoDB.
*
* @example
* var MongoClient = require('mongodb').MongoClient,
* test = require('assert');
* // Connection url
* var url = 'mongodb://localhost:27017/test';
* // Connect using MongoClient
* MongoClient.connect(url, function(err, db) {
* // Get an additional db
* db.close();
* });
*/
/**
* Creates a new MongoClient instance
* @class
* @return {MongoClient} a MongoClient instance.
*/
function MongoClient() {
/**
* The callback format for results
* @callback MongoClient~connectCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {Db} db The connected database.
*/
/**
* Connect to MongoDB using a url as documented at
*
* docs.mongodb.org/manual/reference/connection-string/
*
* Note that for replicasets the replicaSet query parameter is required in the 2.0 driver
*
* @method
* @param {string} url The connection URI string
* @param {object} [options=null] Optional settings.
* @param {boolean} [options.uri_decode_auth=false] Uri decode the user name and password for authentication
* @param {object} [options.db=null] A hash of options to set on the db object, see **Db constructor**
* @param {object} [options.server=null] A hash of options to set on the server objects, see **Server** constructor**
* @param {object} [options.replSet=null] A hash of options to set on the replSet object, see **ReplSet** constructor**
* @param {object} [options.mongos=null] A hash of options to set on the mongos object, see **Mongos** constructor**
* @param {object} [options.promiseLibrary=null] A Promise library class the application wishes to use such as Bluebird, must be ES6 compatible
* @param {MongoClient~connectCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
this.connect = MongoClient.connect;
}
var define = MongoClient.define = new Define('MongoClient', MongoClient, false);
/**
* Connect to MongoDB using a url as documented at
*
* docs.mongodb.org/manual/reference/connection-string/
*
* Note that for replicasets the replicaSet query parameter is required in the 2.0 driver
*
* @method
* @static
* @param {string} url The connection URI string
* @param {object} [options=null] Optional settings.
* @param {boolean} [options.uri_decode_auth=false] Uri decode the user name and password for authentication
* @param {object} [options.db=null] A hash of options to set on the db object, see **Db constructor**
* @param {object} [options.server=null] A hash of options to set on the server objects, see **Server** constructor**
* @param {object} [options.replSet=null] A hash of options to set on the replSet object, see **ReplSet** constructor**
* @param {object} [options.mongos=null] A hash of options to set on the mongos object, see **Mongos** constructor**
* @param {object} [options.promiseLibrary=null] A Promise library class the application wishes to use such as Bluebird, must be ES6 compatible
* @param {MongoClient~connectCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
MongoClient.connect = function(url, options, callback) {
var args = Array.prototype.slice.call(arguments, 1);
callback = typeof args[args.length - 1] == 'function' ? args.pop() : null;
options = args.length ? args.shift() : null;
options = options || {};
// Get the promiseLibrary
var promiseLibrary = options.promiseLibrary;
// No promise library selected fall back
if(!promiseLibrary) {
promiseLibrary = typeof global.Promise == 'function' ?
global.Promise : require('es6-promise').Promise;
}
// Return a promise
if(typeof callback != 'function') {
return new promiseLibrary(function(resolve, reject) {
connect(url, options, function(err, db) {
if(err) return reject(err);
resolve(db);
});
});
}
// Fallback to callback based connect
connect(url, options, callback);
}
define.staticMethod('connect', {callback: true, promise:true});
var connect = function(url, options, callback) {
var serverOptions = options.server || {};
var mongosOptions = options.mongos || {};
var replSetServersOptions = options.replSet || options.replSetServers || {};
var dbOptions = options.db || {};
// If callback is null throw an exception
if(callback == null)
throw new Error("no callback function provided");
// Parse the string
var object = parse(url, options);
// Merge in any options for db in options object
if(dbOptions) {
for(var name in dbOptions) object.db_options[name] = dbOptions[name];
}
// Added the url to the options
object.db_options.url = url;
// Merge in any options for server in options object
if(serverOptions) {
for(var name in serverOptions) object.server_options[name] = serverOptions[name];
}
// Merge in any replicaset server options
if(replSetServersOptions) {
for(var name in replSetServersOptions) object.rs_options[name] = replSetServersOptions[name];
}
if(replSetServersOptions.ssl
|| replSetServersOptions.sslValidate
|| replSetServersOptions.checkServerIdentity
|| replSetServersOptions.sslCA
|| replSetServersOptions.sslCert
|| replSetServersOptions.sslKey
|| replSetServersOptions.sslPass) {
object.server_options.ssl = replSetServersOptions.ssl;
object.server_options.sslValidate = replSetServersOptions.sslValidate;
object.server_options.checkServerIdentity = replSetServersOptions.checkServerIdentity;
object.server_options.sslCA = replSetServersOptions.sslCA;
object.server_options.sslCert = replSetServersOptions.sslCert;
object.server_options.sslKey = replSetServersOptions.sslKey;
object.server_options.sslPass = replSetServersOptions.sslPass;
}
// Merge in any replicaset server options
if(mongosOptions) {
for(var name in mongosOptions) object.mongos_options[name] = mongosOptions[name];
}
if(typeof object.server_options.poolSize == 'number') {
if(!object.mongos_options.poolSize) object.mongos_options.poolSize = object.server_options.poolSize;
if(!object.rs_options.poolSize) object.rs_options.poolSize = object.server_options.poolSize;
}
if(mongosOptions.ssl
|| mongosOptions.sslValidate
|| mongosOptions.checkServerIdentity
|| mongosOptions.sslCA
|| mongosOptions.sslCert
|| mongosOptions.sslKey
|| mongosOptions.sslPass) {
object.server_options.ssl = mongosOptions.ssl;
object.server_options.sslValidate = mongosOptions.sslValidate;
object.server_options.checkServerIdentity = mongosOptions.checkServerIdentity;
object.server_options.sslCA = mongosOptions.sslCA;
object.server_options.sslCert = mongosOptions.sslCert;
object.server_options.sslKey = mongosOptions.sslKey;
object.server_options.sslPass = mongosOptions.sslPass;
}
// Set the promise library
object.db_options.promiseLibrary = options.promiseLibrary;
// We need to ensure that the list of servers are only either direct members or mongos
// they cannot be a mix of monogs and mongod's
var totalNumberOfServers = object.servers.length;
var totalNumberOfMongosServers = 0;
var totalNumberOfMongodServers = 0;
var serverConfig = null;
var errorServers = {};
// Failure modes
if(object.servers.length == 0) throw new Error("connection string must contain at least one seed host");
// If we have no db setting for the native parser try to set the c++ one first
object.db_options.native_parser = _setNativeParser(object.db_options);
// If no auto_reconnect is set, set it to true as default for single servers
if(typeof object.server_options.auto_reconnect != 'boolean') {
object.server_options.auto_reconnect = true;
}
// If we have more than a server, it could be replicaset or mongos list
// need to verify that it's one or the other and fail if it's a mix
// Connect to all servers and run ismaster
for(var i = 0; i < object.servers.length; i++) {
// Set up socket options
var providedSocketOptions = object.server_options.socketOptions || {};
var _server_options = {
poolSize:1
, socketOptions: {
connectTimeoutMS: providedSocketOptions.connectTimeoutMS || (1000 * 120)
, socketTimeoutMS: providedSocketOptions.socketTimeoutMS || (1000 * 120)
}
, auto_reconnect:false};
// Ensure we have ssl setup for the servers
if(object.server_options.ssl) {
_server_options.ssl = object.server_options.ssl;
_server_options.sslValidate = object.server_options.sslValidate;
_server_options.checkServerIdentity = object.server_options.checkServerIdentity;
_server_options.sslCA = object.server_options.sslCA;
_server_options.sslCert = object.server_options.sslCert;
_server_options.sslKey = object.server_options.sslKey;
_server_options.sslPass = object.server_options.sslPass;
} else if(object.rs_options.ssl) {
_server_options.ssl = object.rs_options.ssl;
_server_options.sslValidate = object.rs_options.sslValidate;
_server_options.checkServerIdentity = object.rs_options.checkServerIdentity;
_server_options.sslCA = object.rs_options.sslCA;
_server_options.sslCert = object.rs_options.sslCert;
_server_options.sslKey = object.rs_options.sslKey;
_server_options.sslPass = object.rs_options.sslPass;
}
// Error
var error = null;
// Set up the Server object
var _server = object.servers[i].domain_socket
? new Server(object.servers[i].domain_socket, _server_options)
: new Server(object.servers[i].host, object.servers[i].port, _server_options);
var connectFunction = function(__server) {
// Attempt connect
new Db(object.dbName, __server, {w:1, native_parser:false, promiseLibrary:options.promiseLibrary}).open(function(err, db) {
// Update number of servers
totalNumberOfServers = totalNumberOfServers - 1;
// If no error do the correct checks
if(!err) {
// Close the connection
db.close();
// Get the last ismaster document
var isMasterDoc = db.serverConfig.isMasterDoc;
// Check what type of server we have
if(isMasterDoc.setName) {
totalNumberOfMongodServers++;
}
if(isMasterDoc.msg && isMasterDoc.msg == "isdbgrid") totalNumberOfMongosServers++;
} else {
error = err;
errorServers[__server.host + ":" + __server.port] = __server;
}
if(totalNumberOfServers == 0) {
// Error out
if(totalNumberOfMongodServers == 0 && totalNumberOfMongosServers == 0 && error) {
return callback(error, null);
}
// If we have a mix of mongod and mongos, throw an error
if(totalNumberOfMongosServers > 0 && totalNumberOfMongodServers > 0) {
if(db) db.close();
return process.nextTick(function() {
try {
callback(new Error("cannot combine a list of replicaset seeds and mongos seeds"));
} catch (err) {
throw err
}
})
}
if(totalNumberOfMongodServers == 0
&& totalNumberOfMongosServers == 0
&& object.servers.length == 1
&& (!object.rs_options.replicaSet || !object.rs_options.rs_name)) {
var obj = object.servers[0];
serverConfig = obj.domain_socket ?
new Server(obj.domain_socket, object.server_options)
: new Server(obj.host, obj.port, object.server_options);
} else if(totalNumberOfMongodServers > 0
|| totalNumberOfMongosServers > 0
|| object.rs_options.replicaSet || object.rs_options.rs_name) {
var finalServers = object.servers
.filter(function(serverObj) {
return errorServers[serverObj.host + ":" + serverObj.port] == null;
})
.map(function(serverObj) {
return serverObj.domain_socket ?
new Server(serverObj.domain_socket, 27017, object.server_options)
: new Server(serverObj.host, serverObj.port, object.server_options);
});
// Clean out any error servers
errorServers = {};
// Set up the final configuration
if(totalNumberOfMongodServers > 0) {
try {
// If no replicaset name was provided, we wish to perform a
// direct connection
if(totalNumberOfMongodServers == 1
&& (!object.rs_options.replicaSet && !object.rs_options.rs_name)) {
serverConfig = finalServers[0];
} else if(totalNumberOfMongodServers == 1) {
object.rs_options.replicaSet = object.rs_options.replicaSet || object.rs_options.rs_name;
serverConfig = new ReplSet(finalServers, object.rs_options);
} else {
serverConfig = new ReplSet(finalServers, object.rs_options);
}
} catch(err) {
return callback(err, null);
}
} else {
serverConfig = new Mongos(finalServers, object.mongos_options);
}
}
if(serverConfig == null) {
return process.nextTick(function() {
try {
callback(new Error("Could not locate any valid servers in initial seed list"));
} catch (err) {
if(db) db.close();
throw err
}
});
}
// Ensure no firing of open event before we are ready
serverConfig.emitOpen = false;
// Set up all options etc and connect to the database
_finishConnecting(serverConfig, object, options, callback)
}
});
}
// Wrap the context of the call
connectFunction(_server);
}
}
var _setNativeParser = function(db_options) {
if(typeof db_options.native_parser == 'boolean') return db_options.native_parser;
try {
require('mongodb-core').BSON.BSONNative.BSON;
return true;
} catch(err) {
return false;
}
}
var _finishConnecting = function(serverConfig, object, options, callback) {
// If we have a readPreference passed in by the db options
if(typeof object.db_options.readPreference == 'string') {
object.db_options.readPreference = new ReadPreference(object.db_options.readPreference);
} else if(typeof object.db_options.read_preference == 'string') {
object.db_options.readPreference = new ReadPreference(object.db_options.read_preference);
}
// Do we have readPreference tags
if(object.db_options.readPreference && object.db_options.readPreferenceTags) {
object.db_options.readPreference.tags = object.db_options.readPreferenceTags;
} else if(object.db_options.readPreference && object.db_options.read_preference_tags) {
object.db_options.readPreference.tags = object.db_options.read_preference_tags;
}
// Get the socketTimeoutMS
var socketTimeoutMS = object.server_options.socketOptions.socketTimeoutMS || 0;
// If we have a replset, override with replicaset socket timeout option if available
if(serverConfig instanceof ReplSet) {
socketTimeoutMS = object.rs_options.socketOptions.socketTimeoutMS || socketTimeoutMS;
}
// Set socketTimeout to the same as the connectTimeoutMS or 30 sec
serverConfig.connectTimeoutMS = serverConfig.connectTimeoutMS || 30000;
serverConfig.socketTimeoutMS = serverConfig.connectTimeoutMS;
// Set up the db options
var db = new Db(object.dbName, serverConfig, object.db_options);
// Open the db
db.open(function(err, db){
if(err) {
return process.nextTick(function() {
try {
callback(err, null);
} catch (err) {
if(db) db.close();
throw err
}
});
}
// Reset the socket timeout
serverConfig.socketTimeoutMS = socketTimeoutMS || 0;
// Return object
if(err == null && object.auth){
// What db to authenticate against
var authentication_db = db;
if(object.db_options && object.db_options.authSource) {
authentication_db = db.db(object.db_options.authSource);
}
// Build options object
var options = {};
if(object.db_options.authMechanism) options.authMechanism = object.db_options.authMechanism;
if(object.db_options.gssapiServiceName) options.gssapiServiceName = object.db_options.gssapiServiceName;
// Authenticate
authentication_db.authenticate(object.auth.user, object.auth.password, options, function(err, success){
if(success){
process.nextTick(function() {
try {
callback(null, db);
} catch (err) {
if(db) db.close();
throw err
}
});
} else {
if(db) db.close();
process.nextTick(function() {
try {
callback(err ? err : new Error('Could not authenticate user ' + object.auth[0]), null);
} catch (err) {
if(db) db.close();
throw err
}
});
}
});
} else {
process.nextTick(function() {
try {
callback(err, db);
} catch (err) {
if(db) db.close();
throw err
}
})
}
});
}
module.exports = MongoClient

499
node_modules/mongodb/lib/mongos.js generated vendored Normal file
View File

@@ -0,0 +1,499 @@
"use strict";
var EventEmitter = require('events').EventEmitter
, inherits = require('util').inherits
, f = require('util').format
, ServerCapabilities = require('./topology_base').ServerCapabilities
, MongoCR = require('mongodb-core').MongoCR
, MongoError = require('mongodb-core').MongoError
, CMongos = require('mongodb-core').Mongos
, Cursor = require('./cursor')
, AggregationCursor = require('./aggregation_cursor')
, CommandCursor = require('./command_cursor')
, Define = require('./metadata')
, Server = require('./server')
, Store = require('./topology_base').Store
, shallowClone = require('./utils').shallowClone;
/**
* @fileOverview The **Mongos** class is a class that represents a Mongos Proxy topology and is
* used to construct connections.
*
* **Mongos Should not be used, use MongoClient.connect**
* @example
* var Db = require('mongodb').Db,
* Mongos = require('mongodb').Mongos,
* Server = require('mongodb').Server,
* test = require('assert');
* // Connect using Mongos
* var server = new Server('localhost', 27017);
* var db = new Db('test', new Mongos([server]));
* db.open(function(err, db) {
* // Get an additional db
* db.close();
* });
*/
/**
* Creates a new Mongos instance
* @class
* @deprecated
* @param {Server[]} servers A seedlist of servers participating in the replicaset.
* @param {object} [options=null] Optional settings.
* @param {booelan} [options.ha=true] Turn on high availability monitoring.
* @param {number} [options.haInterval=5000] Time between each replicaset status check.
* @param {number} [options.poolSize=5] Number of connections in the connection pool for each server instance, set to 5 as default for legacy reasons.
* @param {boolean} [options.ssl=false] Use ssl connection (needs to have a mongod server with ssl support)
* @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function.
* @param {object} [options.sslValidate=true] Validate mongod server certificate against ca (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {array} [options.sslCA=null] Array of valid certificates either as Buffers or Strings (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {(Buffer|string)} [options.sslCert=null] String or buffer containing the certificate we wish to present (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {(Buffer|string)} [options.sslKey=null] String or buffer containing the certificate private key we wish to present (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {(Buffer|string)} [options.sslPass=null] String or buffer containing the certificate password (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {object} [options.socketOptions=null] Socket options
* @param {boolean} [options.socketOptions.noDelay=true] TCP Socket NoDelay option.
* @param {number} [options.socketOptions.keepAlive=0] TCP KeepAlive on the socket with a X ms delay before start.
* @param {number} [options.socketOptions.connectTimeoutMS=0] TCP Connection timeout setting
* @param {number} [options.socketOptions.socketTimeoutMS=0] TCP Socket timeout setting
* @fires Mongos#connect
* @fires Mongos#ha
* @fires Mongos#joined
* @fires Mongos#left
* @fires Mongos#fullsetup
* @fires Mongos#open
* @fires Mongos#close
* @fires Mongos#error
* @fires Mongos#timeout
* @fires Mongos#parseError
* @return {Mongos} a Mongos instance.
*/
var Mongos = function(servers, options) {
if(!(this instanceof Mongos)) return new Mongos(servers, options);
options = options || {};
var self = this;
// Ensure all the instances are Server
for(var i = 0; i < servers.length; i++) {
if(!(servers[i] instanceof Server)) {
throw MongoError.create({message: "all seed list instances must be of the Server type", driver:true});
}
}
// Store option defaults
var storeOptions = {
force: false
, bufferMaxEntries: -1
}
// Shared global store
var store = options.store || new Store(self, storeOptions);
// Set up event emitter
EventEmitter.call(this);
// Debug tag
var tag = options.tag;
// Build seed list
var seedlist = servers.map(function(x) {
return {host: x.host, port: x.port}
});
// Final options
var finalOptions = shallowClone(options);
// Default values
finalOptions.size = typeof options.poolSize == 'number' ? options.poolSize : 5;
finalOptions.reconnect = typeof options.auto_reconnect == 'boolean' ? options.auto_reconnect : true;
finalOptions.emitError = typeof options.emitError == 'boolean' ? options.emitError : true;
finalOptions.cursorFactory = Cursor;
// Add the store
finalOptions.disconnectHandler = store;
// Ensure we change the sslCA option to ca if available
if(options.sslCA) finalOptions.ca = options.sslCA;
if(typeof options.sslValidate == 'boolean') finalOptions.rejectUnauthorized = options.sslValidate;
if(options.sslKey) finalOptions.key = options.sslKey;
if(options.sslCert) finalOptions.cert = options.sslCert;
if(options.sslPass) finalOptions.passphrase = options.sslPass;
if(options.checkServerIdentity) finalOptions.checkServerIdentity = options.checkServerIdentity;
// Socket options passed down
if(options.socketOptions) {
if(options.socketOptions.connectTimeoutMS) {
this.connectTimeoutMS = options.socketOptions.connectTimeoutMS;
finalOptions.connectionTimeout = options.socketOptions.connectTimeoutMS;
}
if(options.socketOptions.socketTimeoutMS)
finalOptions.socketTimeout = options.socketOptions.socketTimeoutMS;
}
// Are we running in debug mode
var debug = typeof options.debug == 'boolean' ? options.debug : false;
if(debug) {
finalOptions.debug = debug;
}
// Map keep alive setting
if(options.socketOptions && typeof options.socketOptions.keepAlive == 'number') {
finalOptions.keepAlive = true;
if(typeof options.socketOptions.keepAlive == 'number') {
finalOptions.keepAliveInitialDelay = options.socketOptions.keepAlive;
}
}
// Connection timeout
if(options.socketOptions && typeof options.socketOptions.connectionTimeout == 'number') {
finalOptions.connectionTimeout = options.socketOptions.connectionTimeout;
}
// Socket timeout
if(options.socketOptions && typeof options.socketOptions.socketTimeout == 'number') {
finalOptions.socketTimeout = options.socketOptions.socketTimeout;
}
// noDelay
if(options.socketOptions && typeof options.socketOptions.noDelay == 'boolean') {
finalOptions.noDelay = options.socketOptions.noDelay;
}
if(typeof options.secondaryAcceptableLatencyMS == 'number') {
finalOptions.acceptableLatency = options.secondaryAcceptableLatencyMS;
}
// Add the non connection store
finalOptions.disconnectHandler = store;
// Create the Mongos
var mongos = new CMongos(seedlist, finalOptions)
// Server capabilities
var sCapabilities = null;
// Add auth prbufferMaxEntriesoviders
mongos.addAuthProvider('mongocr', new MongoCR());
// Internal state
this.s = {
// Create the Mongos
mongos: mongos
// Server capabilities
, sCapabilities: sCapabilities
// Debug turned on
, debug: debug
// Store option defaults
, storeOptions: storeOptions
// Cloned options
, clonedOptions: finalOptions
// Actual store of callbacks
, store: store
// Options
, options: options
}
// Last ismaster
Object.defineProperty(this, 'isMasterDoc', {
enumerable:true, get: function() { return self.s.mongos.lastIsMaster(); }
});
// Last ismaster
Object.defineProperty(this, 'numberOfConnectedServers', {
enumerable:true, get: function() {
return self.s.mongos.s.mongosState.connectedServers().length;
}
});
// BSON property
Object.defineProperty(this, 'bson', {
enumerable: true, get: function() {
return self.s.mongos.bson;
}
});
Object.defineProperty(this, 'haInterval', {
enumerable:true, get: function() { return self.s.mongos.haInterval; }
});
}
/**
* @ignore
*/
inherits(Mongos, EventEmitter);
var define = Mongos.define = new Define('Mongos', Mongos, false);
// Connect
Mongos.prototype.connect = function(db, _options, callback) {
var self = this;
if('function' === typeof _options) callback = _options, _options = {};
if(_options == null) _options = {};
if(!('function' === typeof callback)) callback = null;
self.s.options = _options;
// Update bufferMaxEntries
self.s.storeOptions.bufferMaxEntries = db.bufferMaxEntries;
// Error handler
var connectErrorHandler = function(event) {
return function(err) {
// Remove all event handlers
var events = ['timeout', 'error', 'close'];
events.forEach(function(e) {
self.removeListener(e, connectErrorHandler);
});
self.s.mongos.removeListener('connect', connectErrorHandler);
// Try to callback
try {
callback(err);
} catch(err) {
process.nextTick(function() { throw err; })
}
}
}
// Actual handler
var errorHandler = function(event) {
return function(err) {
if(event != 'error') {
self.emit(event, err);
}
}
}
// Error handler
var reconnectHandler = function(err) {
self.emit('reconnect');
self.s.store.execute();
}
// Connect handler
var connectHandler = function() {
// Clear out all the current handlers left over
["timeout", "error", "close"].forEach(function(e) {
self.s.mongos.removeAllListeners(e);
});
// Set up listeners
self.s.mongos.once('timeout', errorHandler('timeout'));
self.s.mongos.once('error', errorHandler('error'));
self.s.mongos.once('close', errorHandler('close'));
// relay the event
var relay = function(event) {
return function(t, server) {
self.emit(event, t, server);
}
}
// Set up serverConfig listeners
self.s.mongos.on('joined', relay('joined'));
self.s.mongos.on('left', relay('left'));
self.s.mongos.on('fullsetup', relay('fullsetup'));
// Emit open event
self.emit('open', null, self);
// Return correctly
try {
callback(null, self);
} catch(err) {
process.nextTick(function() { throw err; })
}
}
// Set up listeners
self.s.mongos.once('timeout', connectErrorHandler('timeout'));
self.s.mongos.once('error', connectErrorHandler('error'));
self.s.mongos.once('close', connectErrorHandler('close'));
self.s.mongos.once('connect', connectHandler);
// Reconnect server
self.s.mongos.on('reconnect', reconnectHandler);
// Start connection
self.s.mongos.connect(_options);
}
Mongos.prototype.parserType = function() {
return this.s.mongos.parserType();
}
define.classMethod('parserType', {callback: false, promise:false, returns: [String]});
// Server capabilities
Mongos.prototype.capabilities = function() {
if(this.s.sCapabilities) return this.s.sCapabilities;
if(this.s.mongos.lastIsMaster() == null) return null;
this.s.sCapabilities = new ServerCapabilities(this.s.mongos.lastIsMaster());
return this.s.sCapabilities;
}
define.classMethod('capabilities', {callback: false, promise:false, returns: [ServerCapabilities]});
// Command
Mongos.prototype.command = function(ns, cmd, options, callback) {
this.s.mongos.command(ns, cmd, options, callback);
}
define.classMethod('command', {callback: true, promise:false});
// Insert
Mongos.prototype.insert = function(ns, ops, options, callback) {
this.s.mongos.insert(ns, ops, options, function(e, m) {
callback(e, m)
});
}
define.classMethod('insert', {callback: true, promise:false});
// Update
Mongos.prototype.update = function(ns, ops, options, callback) {
this.s.mongos.update(ns, ops, options, callback);
}
define.classMethod('update', {callback: true, promise:false});
// Remove
Mongos.prototype.remove = function(ns, ops, options, callback) {
this.s.mongos.remove(ns, ops, options, callback);
}
define.classMethod('remove', {callback: true, promise:false});
// Destroyed
Mongos.prototype.isDestroyed = function() {
return this.s.mongos.isDestroyed();
}
// IsConnected
Mongos.prototype.isConnected = function() {
return this.s.mongos.isConnected();
}
define.classMethod('isConnected', {callback: false, promise:false, returns: [Boolean]});
// Insert
Mongos.prototype.cursor = function(ns, cmd, options) {
options.disconnectHandler = this.s.store;
return this.s.mongos.cursor(ns, cmd, options);
}
define.classMethod('cursor', {callback: false, promise:false, returns: [Cursor, AggregationCursor, CommandCursor]});
Mongos.prototype.setBSONParserType = function(type) {
return this.s.mongos.setBSONParserType(type);
}
Mongos.prototype.lastIsMaster = function() {
return this.s.mongos.lastIsMaster();
}
Mongos.prototype.close = function(forceClosed) {
this.s.mongos.destroy();
// We need to wash out all stored processes
if(forceClosed == true) {
this.s.storeOptions.force = forceClosed;
this.s.store.flush();
}
}
define.classMethod('close', {callback: false, promise:false});
Mongos.prototype.auth = function() {
var args = Array.prototype.slice.call(arguments, 0);
this.s.mongos.auth.apply(this.s.mongos, args);
}
define.classMethod('auth', {callback: true, promise:false});
/**
* All raw connections
* @method
* @return {array}
*/
Mongos.prototype.connections = function() {
return this.s.mongos.connections();
}
define.classMethod('connections', {callback: false, promise:false, returns:[Array]});
/**
* A mongos connect event, used to verify that the connection is up and running
*
* @event Mongos#connect
* @type {Mongos}
*/
/**
* The mongos high availability event
*
* @event Mongos#ha
* @type {function}
* @param {string} type The stage in the high availability event (start|end)
* @param {boolean} data.norepeat This is a repeating high availability process or a single execution only
* @param {number} data.id The id for this high availability request
* @param {object} data.state An object containing the information about the current replicaset
*/
/**
* A server member left the mongos set
*
* @event Mongos#left
* @type {function}
* @param {string} type The type of member that left (primary|secondary|arbiter)
* @param {Server} server The server object that left
*/
/**
* A server member joined the mongos set
*
* @event Mongos#joined
* @type {function}
* @param {string} type The type of member that joined (primary|secondary|arbiter)
* @param {Server} server The server object that joined
*/
/**
* Mongos fullsetup event, emitted when all proxies in the topology have been connected to.
*
* @event Mongos#fullsetup
* @type {Mongos}
*/
/**
* Mongos open event, emitted when mongos can start processing commands.
*
* @event Mongos#open
* @type {Mongos}
*/
/**
* Mongos close event
*
* @event Mongos#close
* @type {object}
*/
/**
* Mongos error event, emitted if there is an error listener.
*
* @event Mongos#error
* @type {MongoError}
*/
/**
* Mongos timeout event
*
* @event Mongos#timeout
* @type {object}
*/
/**
* Mongos parseError event
*
* @event Mongos#parseError
* @type {object}
*/
module.exports = Mongos;

104
node_modules/mongodb/lib/read_preference.js generated vendored Normal file
View File

@@ -0,0 +1,104 @@
"use strict";
/**
* @fileOverview The **ReadPreference** class is a class that represents a MongoDB ReadPreference and is
* used to construct connections.
*
* @example
* var Db = require('mongodb').Db,
* ReplSet = require('mongodb').ReplSet,
* Server = require('mongodb').Server,
* ReadPreference = require('mongodb').ReadPreference,
* test = require('assert');
* // Connect using ReplSet
* var server = new Server('localhost', 27017);
* var db = new Db('test', new ReplSet([server]));
* db.open(function(err, db) {
* test.equal(null, err);
* // Perform a read
* var cursor = db.collection('t').find({});
* cursor.setReadPreference(ReadPreference.PRIMARY);
* cursor.toArray(function(err, docs) {
* test.equal(null, err);
* db.close();
* });
* });
*/
/**
* Creates a new ReadPreference instance
*
* Read Preferences
* - **ReadPreference.PRIMARY**, Read from primary only. All operations produce an error (throw an exception where applicable) if primary is unavailable. Cannot be combined with tags (This is the default.).
* - **ReadPreference.PRIMARY_PREFERRED**, Read from primary if available, otherwise a secondary.
* - **ReadPreference.SECONDARY**, Read from secondary if available, otherwise error.
* - **ReadPreference.SECONDARY_PREFERRED**, Read from a secondary if available, otherwise read from the primary.
* - **ReadPreference.NEAREST**, All modes read from among the nearest candidates, but unlike other modes, NEAREST will include both the primary and all secondaries in the random selection.
*
* @class
* @param {string} mode The ReadPreference mode as listed above.
* @param {object} tags An object representing read preference tags.
* @property {string} mode The ReadPreference mode.
* @property {object} tags The ReadPreference tags.
* @return {ReadPreference} a ReadPreference instance.
*/
var ReadPreference = function(mode, tags) {
if(!(this instanceof ReadPreference))
return new ReadPreference(mode, tags);
this._type = 'ReadPreference';
this.mode = mode;
this.tags = tags;
}
/**
* Validate if a mode is legal
*
* @method
* @param {string} mode The string representing the read preference mode.
* @return {boolean}
*/
ReadPreference.isValid = function(_mode) {
return (_mode == ReadPreference.PRIMARY || _mode == ReadPreference.PRIMARY_PREFERRED
|| _mode == ReadPreference.SECONDARY || _mode == ReadPreference.SECONDARY_PREFERRED
|| _mode == ReadPreference.NEAREST
|| _mode == true || _mode == false || _mode == null);
}
/**
* Validate if a mode is legal
*
* @method
* @param {string} mode The string representing the read preference mode.
* @return {boolean}
*/
ReadPreference.prototype.isValid = function(mode) {
var _mode = typeof mode == 'string' ? mode : this.mode;
return ReadPreference.isValid(_mode);
}
/**
* @ignore
*/
ReadPreference.prototype.toObject = function() {
var object = {mode:this.mode};
if(this.tags != null) {
object['tags'] = this.tags;
}
return object;
}
/**
* @ignore
*/
ReadPreference.PRIMARY = 'primary';
ReadPreference.PRIMARY_PREFERRED = 'primaryPreferred';
ReadPreference.SECONDARY = 'secondary';
ReadPreference.SECONDARY_PREFERRED = 'secondaryPreferred';
ReadPreference.NEAREST = 'nearest'
/**
* @ignore
*/
module.exports = ReadPreference;

562
node_modules/mongodb/lib/replset.js generated vendored Normal file
View File

@@ -0,0 +1,562 @@
"use strict";
var EventEmitter = require('events').EventEmitter
, inherits = require('util').inherits
, f = require('util').format
, Server = require('./server')
, Mongos = require('./mongos')
, Cursor = require('./cursor')
, AggregationCursor = require('./aggregation_cursor')
, CommandCursor = require('./command_cursor')
, ReadPreference = require('./read_preference')
, MongoCR = require('mongodb-core').MongoCR
, MongoError = require('mongodb-core').MongoError
, ServerCapabilities = require('./topology_base').ServerCapabilities
, Store = require('./topology_base').Store
, Define = require('./metadata')
, CServer = require('mongodb-core').Server
, CReplSet = require('mongodb-core').ReplSet
, CoreReadPreference = require('mongodb-core').ReadPreference
, shallowClone = require('./utils').shallowClone;
/**
* @fileOverview The **ReplSet** class is a class that represents a Replicaset topology and is
* used to construct connections.
*
* **ReplSet Should not be used, use MongoClient.connect**
* @example
* var Db = require('mongodb').Db,
* ReplSet = require('mongodb').ReplSet,
* Server = require('mongodb').Server,
* test = require('assert');
* // Connect using ReplSet
* var server = new Server('localhost', 27017);
* var db = new Db('test', new ReplSet([server]));
* db.open(function(err, db) {
* // Get an additional db
* db.close();
* });
*/
/**
* Creates a new ReplSet instance
* @class
* @deprecated
* @param {Server[]} servers A seedlist of servers participating in the replicaset.
* @param {object} [options=null] Optional settings.
* @param {booelan} [options.ha=true] Turn on high availability monitoring.
* @param {number} [options.haInterval=5000] Time between each replicaset status check.
* @param {string} options.replicaSet The name of the replicaset to connect to.
* @param {number} [options.secondaryAcceptableLatencyMS=15] Sets the range of servers to pick when using NEAREST (lowest ping ms + the latency fence, ex: range of 1 to (1 + 15) ms)
* @param {boolean} [options.connectWithNoPrimary=false] Sets if the driver should connect even if no primary is available
* @param {number} [options.poolSize=5] Number of connections in the connection pool for each server instance, set to 5 as default for legacy reasons.
* @param {boolean} [options.ssl=false] Use ssl connection (needs to have a mongod server with ssl support)
* @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function.
* @param {object} [options.sslValidate=true] Validate mongod server certificate against ca (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {array} [options.sslCA=null] Array of valid certificates either as Buffers or Strings (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {(Buffer|string)} [options.sslCert=null] String or buffer containing the certificate we wish to present (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {(Buffer|string)} [options.sslKey=null] String or buffer containing the certificate private key we wish to present (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {(Buffer|string)} [options.sslPass=null] String or buffer containing the certificate password (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {object} [options.socketOptions=null] Socket options
* @param {boolean} [options.socketOptions.noDelay=true] TCP Socket NoDelay option.
* @param {number} [options.socketOptions.keepAlive=0] TCP KeepAlive on the socket with a X ms delay before start.
* @param {number} [options.socketOptions.connectTimeoutMS=10000] TCP Connection timeout setting
* @param {number} [options.socketOptions.socketTimeoutMS=0] TCP Socket timeout setting
* @fires ReplSet#connect
* @fires ReplSet#ha
* @fires ReplSet#joined
* @fires ReplSet#left
* @fires ReplSet#fullsetup
* @fires ReplSet#open
* @fires ReplSet#close
* @fires ReplSet#error
* @fires ReplSet#timeout
* @fires ReplSet#parseError
* @return {ReplSet} a ReplSet instance.
*/
var ReplSet = function(servers, options) {
if(!(this instanceof ReplSet)) return new ReplSet(servers, options);
options = options || {};
var self = this;
// Ensure all the instances are Server
for(var i = 0; i < servers.length; i++) {
if(!(servers[i] instanceof Server)) {
throw MongoError.create({message: "all seed list instances must be of the Server type", driver:true});
}
}
// Store option defaults
var storeOptions = {
force: false
, bufferMaxEntries: -1
}
// Shared global store
var store = options.store || new Store(self, storeOptions);
// Set up event emitter
EventEmitter.call(this);
// Debug tag
var tag = options.tag;
// Build seed list
var seedlist = servers.map(function(x) {
return {host: x.host, port: x.port}
});
// Final options
var finalOptions = shallowClone(options);
// Default values
finalOptions.size = typeof options.poolSize == 'number' ? options.poolSize : 5;
finalOptions.reconnect = typeof options.auto_reconnect == 'boolean' ? options.auto_reconnect : true;
finalOptions.emitError = typeof options.emitError == 'boolean' ? options.emitError : true;
finalOptions.cursorFactory = Cursor;
// Add the store
finalOptions.disconnectHandler = store;
// Socket options passed down
if(options.socketOptions) {
if(options.socketOptions.connectTimeoutMS) {
this.connectTimeoutMS = options.socketOptions.connectTimeoutMS;
finalOptions.connectionTimeout = options.socketOptions.connectTimeoutMS;
}
if(options.socketOptions.socketTimeoutMS) {
finalOptions.socketTimeout = options.socketOptions.socketTimeoutMS;
}
}
// Get the name
var replicaSet = options.replicaSet || options.rs_name;
// Set up options
finalOptions.setName = replicaSet;
// Are we running in debug mode
var debug = typeof options.debug == 'boolean' ? options.debug : false;
if(debug) {
finalOptions.debug = debug;
}
// Map keep alive setting
if(options.socketOptions && typeof options.socketOptions.keepAlive == 'number') {
finalOptions.keepAlive = true;
if(typeof options.socketOptions.keepAlive == 'number') {
finalOptions.keepAliveInitialDelay = options.socketOptions.keepAlive;
}
}
// Connection timeout
if(options.socketOptions && typeof options.socketOptions.connectionTimeout == 'number') {
finalOptions.connectionTimeout = options.socketOptions.connectionTimeout;
}
// Socket timeout
if(options.socketOptions && typeof options.socketOptions.socketTimeout == 'number') {
finalOptions.socketTimeout = options.socketOptions.socketTimeout;
}
// noDelay
if(options.socketOptions && typeof options.socketOptions.noDelay == 'boolean') {
finalOptions.noDelay = options.socketOptions.noDelay;
}
if(typeof options.secondaryAcceptableLatencyMS == 'number') {
finalOptions.acceptableLatency = options.secondaryAcceptableLatencyMS;
}
if(options.connectWithNoPrimary == true) {
finalOptions.secondaryOnlyConnectionAllowed = true;
}
// Add the non connection store
finalOptions.disconnectHandler = store;
// Translate the options
if(options.sslCA) finalOptions.ca = options.sslCA;
if(typeof options.sslValidate == 'boolean') finalOptions.rejectUnauthorized = options.sslValidate;
if(options.sslKey) finalOptions.key = options.sslKey;
if(options.sslCert) finalOptions.cert = options.sslCert;
if(options.sslPass) finalOptions.passphrase = options.sslPass;
if(options.checkServerIdentity) finalOptions.checkServerIdentity = options.checkServerIdentity;
// Create the ReplSet
var replset = new CReplSet(seedlist, finalOptions)
// Server capabilities
var sCapabilities = null;
// Add auth prbufferMaxEntriesoviders
replset.addAuthProvider('mongocr', new MongoCR());
// Listen to reconnect event
replset.on('reconnect', function() {
self.emit('reconnect');
store.execute();
});
// Internal state
this.s = {
// Replicaset
replset: replset
// Server capabilities
, sCapabilities: null
// Debug tag
, tag: options.tag
// Store options
, storeOptions: storeOptions
// Cloned options
, clonedOptions: finalOptions
// Store
, store: store
// Options
, options: options
}
// Debug
if(debug) {
// Last ismaster
Object.defineProperty(this, 'replset', {
enumerable:true, get: function() { return replset; }
});
}
// Last ismaster
Object.defineProperty(this, 'isMasterDoc', {
enumerable:true, get: function() { return replset.lastIsMaster(); }
});
// BSON property
Object.defineProperty(this, 'bson', {
enumerable: true, get: function() {
return replset.bson;
}
});
Object.defineProperty(this, 'haInterval', {
enumerable:true, get: function() { return replset.haInterval; }
});
}
/**
* @ignore
*/
inherits(ReplSet, EventEmitter);
var define = ReplSet.define = new Define('ReplSet', ReplSet, false);
// Ensure the right read Preference object
var translateReadPreference = function(options) {
if(typeof options.readPreference == 'string') {
options.readPreference = new CoreReadPreference(options.readPreference);
} else if(options.readPreference instanceof ReadPreference) {
options.readPreference = new CoreReadPreference(options.readPreference.mode
, options.readPreference.tags);
}
return options;
}
ReplSet.prototype.parserType = function() {
return this.s.replset.parserType();
}
define.classMethod('parserType', {callback: false, promise:false, returns: [String]});
// Connect method
ReplSet.prototype.connect = function(db, _options, callback) {
var self = this;
if('function' === typeof _options) callback = _options, _options = {};
if(_options == null) _options = {};
if(!('function' === typeof callback)) callback = null;
self.s.options = _options;
// Update bufferMaxEntries
self.s.storeOptions.bufferMaxEntries = db.bufferMaxEntries;
// Actual handler
var errorHandler = function(event) {
return function(err) {
if(event != 'error') {
self.emit(event, err);
}
}
}
// Connect handler
var connectHandler = function() {
// Clear out all the current handlers left over
["timeout", "error", "close"].forEach(function(e) {
self.s.replset.removeAllListeners(e);
});
// Set up listeners
self.s.replset.once('timeout', errorHandler('timeout'));
self.s.replset.once('error', errorHandler('error'));
self.s.replset.once('close', errorHandler('close'));
// relay the event
var relay = function(event) {
return function(t, server) {
self.emit(event, t, server);
}
}
// Replset events relay
var replsetRelay = function(event) {
return function(t, server) {
self.emit(event, t, server.lastIsMaster(), server);
}
}
// Relay ha
var relayHa = function(t, state) {
self.emit('ha', t, state);
if(t == 'start') {
self.emit('ha_connect', t, state);
} else if(t == 'end') {
self.emit('ha_ismaster', t, state);
}
}
// Set up serverConfig listeners
self.s.replset.on('joined', replsetRelay('joined'));
self.s.replset.on('left', relay('left'));
self.s.replset.on('ping', relay('ping'));
self.s.replset.on('ha', relayHa);
self.s.replset.on('fullsetup', function(topology) {
self.emit('fullsetup', null, self);
});
self.s.replset.on('all', function(topology) {
self.emit('all', null, self);
});
// Emit open event
self.emit('open', null, self);
// Return correctly
try {
callback(null, self);
} catch(err) {
process.nextTick(function() { throw err; })
}
}
// Error handler
var connectErrorHandler = function(event) {
return function(err) {
['timeout', 'error', 'close'].forEach(function(e) {
self.s.replset.removeListener(e, connectErrorHandler);
});
self.s.replset.removeListener('connect', connectErrorHandler);
// Destroy the replset
self.s.replset.destroy();
// Try to callback
try {
callback(err);
} catch(err) {
if(!self.s.replset.isConnected())
process.nextTick(function() { throw err; })
}
}
}
// Set up listeners
self.s.replset.once('timeout', connectErrorHandler('timeout'));
self.s.replset.once('error', connectErrorHandler('error'));
self.s.replset.once('close', connectErrorHandler('close'));
self.s.replset.once('connect', connectHandler);
// Start connection
self.s.replset.connect(_options);
}
// Server capabilities
ReplSet.prototype.capabilities = function() {
if(this.s.sCapabilities) return this.s.sCapabilities;
if(this.s.replset.lastIsMaster() == null) return null;
this.s.sCapabilities = new ServerCapabilities(this.s.replset.lastIsMaster());
return this.s.sCapabilities;
}
define.classMethod('capabilities', {callback: false, promise:false, returns: [ServerCapabilities]});
// Command
ReplSet.prototype.command = function(ns, cmd, options, callback) {
options = translateReadPreference(options);
this.s.replset.command(ns, cmd, options, callback);
}
define.classMethod('command', {callback: true, promise:false});
// Insert
ReplSet.prototype.insert = function(ns, ops, options, callback) {
this.s.replset.insert(ns, ops, options, callback);
}
define.classMethod('insert', {callback: true, promise:false});
// Update
ReplSet.prototype.update = function(ns, ops, options, callback) {
this.s.replset.update(ns, ops, options, callback);
}
define.classMethod('update', {callback: true, promise:false});
// Remove
ReplSet.prototype.remove = function(ns, ops, options, callback) {
this.s.replset.remove(ns, ops, options, callback);
}
define.classMethod('remove', {callback: true, promise:false});
// Destroyed
ReplSet.prototype.isDestroyed = function() {
return this.s.replset.isDestroyed();
}
// IsConnected
ReplSet.prototype.isConnected = function() {
return this.s.replset.isConnected();
}
define.classMethod('isConnected', {callback: false, promise:false, returns: [Boolean]});
ReplSet.prototype.setBSONParserType = function(type) {
return this.s.replset.setBSONParserType(type);
}
// Insert
ReplSet.prototype.cursor = function(ns, cmd, options) {
options = translateReadPreference(options);
options.disconnectHandler = this.s.store;
return this.s.replset.cursor(ns, cmd, options);
}
define.classMethod('cursor', {callback: false, promise:false, returns: [Cursor, AggregationCursor, CommandCursor]});
ReplSet.prototype.lastIsMaster = function() {
return this.s.replset.lastIsMaster();
}
ReplSet.prototype.close = function(forceClosed) {
var self = this;
this.s.replset.destroy();
// We need to wash out all stored processes
if(forceClosed == true) {
this.s.storeOptions.force = forceClosed;
this.s.store.flush();
}
var events = ['timeout', 'error', 'close', 'joined', 'left'];
events.forEach(function(e) {
self.removeAllListeners(e);
});
}
define.classMethod('close', {callback: false, promise:false});
ReplSet.prototype.auth = function() {
var args = Array.prototype.slice.call(arguments, 0);
this.s.replset.auth.apply(this.s.replset, args);
}
define.classMethod('auth', {callback: true, promise:false});
/**
* All raw connections
* @method
* @return {array}
*/
ReplSet.prototype.connections = function() {
return this.s.replset.connections();
}
define.classMethod('connections', {callback: false, promise:false, returns:[Array]});
/**
* A replset connect event, used to verify that the connection is up and running
*
* @event ReplSet#connect
* @type {ReplSet}
*/
/**
* The replset high availability event
*
* @event ReplSet#ha
* @type {function}
* @param {string} type The stage in the high availability event (start|end)
* @param {boolean} data.norepeat This is a repeating high availability process or a single execution only
* @param {number} data.id The id for this high availability request
* @param {object} data.state An object containing the information about the current replicaset
*/
/**
* A server member left the replicaset
*
* @event ReplSet#left
* @type {function}
* @param {string} type The type of member that left (primary|secondary|arbiter)
* @param {Server} server The server object that left
*/
/**
* A server member joined the replicaset
*
* @event ReplSet#joined
* @type {function}
* @param {string} type The type of member that joined (primary|secondary|arbiter)
* @param {Server} server The server object that joined
*/
/**
* ReplSet open event, emitted when replicaset can start processing commands.
*
* @event ReplSet#open
* @type {Replset}
*/
/**
* ReplSet fullsetup event, emitted when all servers in the topology have been connected to.
*
* @event ReplSet#fullsetup
* @type {Replset}
*/
/**
* ReplSet close event
*
* @event ReplSet#close
* @type {object}
*/
/**
* ReplSet error event, emitted if there is an error listener.
*
* @event ReplSet#error
* @type {MongoError}
*/
/**
* ReplSet timeout event
*
* @event ReplSet#timeout
* @type {object}
*/
/**
* ReplSet parseError event
*
* @event ReplSet#parseError
* @type {object}
*/
module.exports = ReplSet;

442
node_modules/mongodb/lib/server.js generated vendored Normal file
View File

@@ -0,0 +1,442 @@
"use strict";
var EventEmitter = require('events').EventEmitter
, inherits = require('util').inherits
, CServer = require('mongodb-core').Server
, Cursor = require('./cursor')
, AggregationCursor = require('./aggregation_cursor')
, CommandCursor = require('./command_cursor')
, f = require('util').format
, ServerCapabilities = require('./topology_base').ServerCapabilities
, Store = require('./topology_base').Store
, Define = require('./metadata')
, MongoError = require('mongodb-core').MongoError
, shallowClone = require('./utils').shallowClone;
/**
* @fileOverview The **Server** class is a class that represents a single server topology and is
* used to construct connections.
*
* **Server Should not be used, use MongoClient.connect**
* @example
* var Db = require('mongodb').Db,
* Server = require('mongodb').Server,
* test = require('assert');
* // Connect using single Server
* var db = new Db('test', new Server('localhost', 27017););
* db.open(function(err, db) {
* // Get an additional db
* db.close();
* });
*/
/**
* Creates a new Server instance
* @class
* @deprecated
* @param {string} host The host for the server, can be either an IP4, IP6 or domain socket style host.
* @param {number} [port] The server port if IP4.
* @param {object} [options=null] Optional settings.
* @param {number} [options.poolSize=5] Number of connections in the connection pool for each server instance, set to 5 as default for legacy reasons.
* @param {boolean} [options.ssl=false] Use ssl connection (needs to have a mongod server with ssl support)
* @param {object} [options.sslValidate=true] Validate mongod server certificate against ca (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function.
* @param {array} [options.sslCA=null] Array of valid certificates either as Buffers or Strings (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {(Buffer|string)} [options.sslCert=null] String or buffer containing the certificate we wish to present (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {(Buffer|string)} [options.sslKey=null] String or buffer containing the certificate private key we wish to present (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {(Buffer|string)} [options.sslPass=null] String or buffer containing the certificate password (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {object} [options.socketOptions=null] Socket options
* @param {boolean} [options.socketOptions.autoReconnect=false] Reconnect on error.
* @param {boolean} [options.socketOptions.noDelay=true] TCP Socket NoDelay option.
* @param {number} [options.socketOptions.keepAlive=0] TCP KeepAlive on the socket with a X ms delay before start.
* @param {number} [options.socketOptions.connectTimeoutMS=0] TCP Connection timeout setting
* @param {number} [options.socketOptions.socketTimeoutMS=0] TCP Socket timeout setting
* @param {number} [options.reconnectTries=30] Server attempt to reconnect #times
* @param {number} [options.reconnectInterval=1000] Server will wait # milliseconds between retries
* @fires Server#connect
* @fires Server#close
* @fires Server#error
* @fires Server#timeout
* @fires Server#parseError
* @fires Server#reconnect
* @return {Server} a Server instance.
*/
var Server = function(host, port, options) {
options = options || {};
if(!(this instanceof Server)) return new Server(host, port, options);
EventEmitter.call(this);
var self = this;
// Store option defaults
var storeOptions = {
force: false
, bufferMaxEntries: -1
}
// Shared global store
var store = options.store || new Store(self, storeOptions);
// Detect if we have a socket connection
if(host.indexOf('\/') != -1) {
if(port != null && typeof port == 'object') {
options = port;
port = null;
}
} else if(port == null) {
throw MongoError.create({message: 'port must be specified', driver:true});
}
// Clone options
var clonedOptions = shallowClone(options);
clonedOptions.host = host;
clonedOptions.port = port;
// Reconnect
var reconnect = typeof options.auto_reconnect == 'boolean' ? options.auto_reconnect : true;
reconnect = typeof options.autoReconnect == 'boolean' ? options.autoReconnect : reconnect;
var emitError = typeof options.emitError == 'boolean' ? options.emitError : true;
var poolSize = typeof options.poolSize == 'number' ? options.poolSize : 5;
// Socket options passed down
if(options.socketOptions) {
if(options.socketOptions.connectTimeoutMS) {
this.connectTimeoutMS = options.socketOptions.connectTimeoutMS;
clonedOptions.connectionTimeout = options.socketOptions.connectTimeoutMS;
}
if(options.socketOptions.socketTimeoutMS) {
clonedOptions.socketTimeout = options.socketOptions.socketTimeoutMS;
}
if(typeof options.socketOptions.keepAlive == 'number') {
clonedOptions.keepAliveInitialDelay = options.socketOptions.keepAlive;
clonedOptions.keepAlive = true;
}
if(typeof options.socketOptions.noDelay == 'boolean') {
clonedOptions.noDelay = options.socketOptions.noDelay;
}
}
// Add the cursor factory function
clonedOptions.cursorFactory = Cursor;
clonedOptions.reconnect = reconnect;
clonedOptions.emitError = emitError;
clonedOptions.size = poolSize;
// Translate the options
if(clonedOptions.sslCA) clonedOptions.ca = clonedOptions.sslCA;
if(typeof clonedOptions.sslValidate == 'boolean') clonedOptions.rejectUnauthorized = clonedOptions.sslValidate;
if(clonedOptions.sslKey) clonedOptions.key = clonedOptions.sslKey;
if(clonedOptions.sslCert) clonedOptions.cert = clonedOptions.sslCert;
if(clonedOptions.sslPass) clonedOptions.passphrase = clonedOptions.sslPass;
// Add the non connection store
clonedOptions.disconnectHandler = store;
// Create an instance of a server instance from mongodb-core
var server = new CServer(clonedOptions);
// Server capabilities
var sCapabilities = null;
// Define the internal properties
this.s = {
// Create an instance of a server instance from mongodb-core
server: server
// Server capabilities
, sCapabilities: null
// Cloned options
, clonedOptions: clonedOptions
// Reconnect
, reconnect: reconnect
// Emit error
, emitError: emitError
// Pool size
, poolSize: poolSize
// Store Options
, storeOptions: storeOptions
// Store
, store: store
// Host
, host: host
// Port
, port: port
// Options
, options: options
}
// BSON property
Object.defineProperty(this, 'bson', {
enumerable: true, get: function() {
return self.s.server.bson;
}
});
// Last ismaster
Object.defineProperty(this, 'isMasterDoc', {
enumerable:true, get: function() {
return self.s.server.lastIsMaster();
}
});
// Last ismaster
Object.defineProperty(this, 'poolSize', {
enumerable:true, get: function() { return self.s.server.connections().length; }
});
Object.defineProperty(this, 'autoReconnect', {
enumerable:true, get: function() { return self.s.reconnect; }
});
Object.defineProperty(this, 'host', {
enumerable:true, get: function() { return self.s.host; }
});
Object.defineProperty(this, 'port', {
enumerable:true, get: function() { return self.s.port; }
});
}
inherits(Server, EventEmitter);
var define = Server.define = new Define('Server', Server, false);
Server.prototype.parserType = function() {
return this.s.server.parserType();
}
define.classMethod('parserType', {callback: false, promise:false, returns: [String]});
// Connect
Server.prototype.connect = function(db, _options, callback) {
var self = this;
if('function' === typeof _options) callback = _options, _options = {};
if(_options == null) _options = {};
if(!('function' === typeof callback)) callback = null;
self.s.options = _options;
// Update bufferMaxEntries
self.s.storeOptions.bufferMaxEntries = db.bufferMaxEntries;
// Error handler
var connectErrorHandler = function(event) {
return function(err) {
// Remove all event handlers
var events = ['timeout', 'error', 'close'];
events.forEach(function(e) {
self.s.server.removeListener(e, connectHandlers[e]);
});
self.s.server.removeListener('connect', connectErrorHandler);
// Try to callback
try {
callback(err);
} catch(err) {
process.nextTick(function() { throw err; })
}
}
}
// Actual handler
var errorHandler = function(event) {
return function(err) {
if(event != 'error') {
self.emit(event, err);
}
}
}
// Error handler
var reconnectHandler = function(err) {
self.emit('reconnect', self);
self.s.store.execute();
}
// Destroy called on topology, perform cleanup
var destroyHandler = function() {
self.s.store.flush();
}
// Connect handler
var connectHandler = function() {
// Clear out all the current handlers left over
["timeout", "error", "close"].forEach(function(e) {
self.s.server.removeAllListeners(e);
});
// Set up listeners
self.s.server.once('timeout', errorHandler('timeout'));
self.s.server.once('error', errorHandler('error'));
self.s.server.on('close', errorHandler('close'));
// Only called on destroy
self.s.server.once('destroy', destroyHandler);
// Emit open event
self.emit('open', null, self);
// Return correctly
try {
callback(null, self);
} catch(err) {
console.log(err.stack)
process.nextTick(function() { throw err; })
}
}
// Set up listeners
var connectHandlers = {
timeout: connectErrorHandler('timeout'),
error: connectErrorHandler('error'),
close: connectErrorHandler('close')
};
// Add the event handlers
self.s.server.once('timeout', connectHandlers.timeout);
self.s.server.once('error', connectHandlers.error);
self.s.server.once('close', connectHandlers.close);
self.s.server.once('connect', connectHandler);
// Reconnect server
self.s.server.on('reconnect', reconnectHandler);
// Start connection
self.s.server.connect(_options);
}
// Server capabilities
Server.prototype.capabilities = function() {
if(this.s.sCapabilities) return this.s.sCapabilities;
if(this.s.server.lastIsMaster() == null) return null;
this.s.sCapabilities = new ServerCapabilities(this.s.server.lastIsMaster());
return this.s.sCapabilities;
}
define.classMethod('capabilities', {callback: false, promise:false, returns: [ServerCapabilities]});
// Command
Server.prototype.command = function(ns, cmd, options, callback) {
this.s.server.command(ns, cmd, options, callback);
}
define.classMethod('command', {callback: true, promise:false});
// Insert
Server.prototype.insert = function(ns, ops, options, callback) {
this.s.server.insert(ns, ops, options, callback);
}
define.classMethod('insert', {callback: true, promise:false});
// Update
Server.prototype.update = function(ns, ops, options, callback) {
this.s.server.update(ns, ops, options, callback);
}
define.classMethod('update', {callback: true, promise:false});
// Remove
Server.prototype.remove = function(ns, ops, options, callback) {
this.s.server.remove(ns, ops, options, callback);
}
define.classMethod('remove', {callback: true, promise:false});
// IsConnected
Server.prototype.isConnected = function() {
return this.s.server.isConnected();
}
Server.prototype.isDestroyed = function() {
return this.s.server.isDestroyed();
}
define.classMethod('isConnected', {callback: false, promise:false, returns: [Boolean]});
// Insert
Server.prototype.cursor = function(ns, cmd, options) {
options.disconnectHandler = this.s.store;
return this.s.server.cursor(ns, cmd, options);
}
define.classMethod('cursor', {callback: false, promise:false, returns: [Cursor, AggregationCursor, CommandCursor]});
Server.prototype.setBSONParserType = function(type) {
return this.s.server.setBSONParserType(type);
}
Server.prototype.lastIsMaster = function() {
return this.s.server.lastIsMaster();
}
Server.prototype.close = function(forceClosed) {
this.s.server.destroy();
// We need to wash out all stored processes
if(forceClosed == true) {
this.s.storeOptions.force = forceClosed;
this.s.store.flush();
}
}
define.classMethod('close', {callback: false, promise:false});
Server.prototype.auth = function() {
var args = Array.prototype.slice.call(arguments, 0);
this.s.server.auth.apply(this.s.server, args);
}
define.classMethod('auth', {callback: true, promise:false});
/**
* All raw connections
* @method
* @return {array}
*/
Server.prototype.connections = function() {
return this.s.server.connections();
}
define.classMethod('connections', {callback: false, promise:false, returns:[Array]});
/**
* Server connect event
*
* @event Server#connect
* @type {object}
*/
/**
* Server close event
*
* @event Server#close
* @type {object}
*/
/**
* Server reconnect event
*
* @event Server#reconnect
* @type {object}
*/
/**
* Server error event
*
* @event Server#error
* @type {MongoError}
*/
/**
* Server timeout event
*
* @event Server#timeout
* @type {object}
*/
/**
* Server parseError event
*
* @event Server#parseError
* @type {object}
*/
module.exports = Server;

152
node_modules/mongodb/lib/topology_base.js generated vendored Normal file
View File

@@ -0,0 +1,152 @@
"use strict";
var MongoError = require('mongodb-core').MongoError
, f = require('util').format;
// The store of ops
var Store = function(topology, storeOptions) {
var self = this;
var storedOps = [];
storeOptions = storeOptions || {force:false, bufferMaxEntries: -1}
// Internal state
this.s = {
storedOps: storedOps
, storeOptions: storeOptions
, topology: topology
}
Object.defineProperty(this, 'length', {
enumerable:true, get: function() { return self.s.storedOps.length; }
});
}
Store.prototype.add = function(opType, ns, ops, options, callback) {
if(this.s.storeOptions.force) {
return callback(MongoError.create({message: "db closed by application", driver:true}));
}
if(this.s.storeOptions.bufferMaxEntries == 0) {
return callback(MongoError.create({message: f("no connection available for operation and number of stored operation > %s", this.s.storeOptions.bufferMaxEntries), driver:true }));
}
if(this.s.storeOptions.bufferMaxEntries > 0 && this.s.storedOps.length > this.s.storeOptions.bufferMaxEntries) {
while(this.s.storedOps.length > 0) {
var op = this.s.storedOps.shift();
op.c(MongoError.create({message: f("no connection available for operation and number of stored operation > %s", this.s.storeOptions.bufferMaxEntries), driver:true }));
}
return;
}
this.s.storedOps.push({t: opType, n: ns, o: ops, op: options, c: callback})
}
Store.prototype.addObjectAndMethod = function(opType, object, method, params, callback) {
if(this.s.storeOptions.force) {
return callback(MongoError.create({message: "db closed by application", driver:true }));
}
if(this.s.storeOptions.bufferMaxEntries == 0) {
return callback(MongoError.create({message: f("no connection available for operation and number of stored operation > %s", this.s.storeOptions.bufferMaxEntries), driver:true }));
}
if(this.s.storeOptions.bufferMaxEntries > 0 && this.s.storedOps.length > this.s.storeOptions.bufferMaxEntries) {
while(this.s.storedOps.length > 0) {
var op = this.s.storedOps.shift();
op.c(MongoError.create({message: f("no connection available for operation and number of stored operation > %s", this.s.storeOptions.bufferMaxEntries), driver:true }));
}
return;
}
this.s.storedOps.push({t: opType, m: method, o: object, p: params, c: callback})
}
Store.prototype.flush = function() {
while(this.s.storedOps.length > 0) {
this.s.storedOps.shift().c(MongoError.create({message: f("no connection available for operation"), driver:true }));
}
}
Store.prototype.execute = function() {
// Get current ops
var ops = this.s.storedOps;
// Reset the ops
this.s.storedOps = [];
// Execute all the stored ops
while(ops.length > 0) {
var op = ops.shift();
if(op.t == 'cursor') {
op.o[op.m].apply(op.o, op.p);
} else {
this.s.topology[op.t](op.n, op.o, op.op, op.c);
}
}
}
Store.prototype.all = function() {
return this.s.storedOps;
}
// Server capabilities
var ServerCapabilities = function(ismaster) {
var setup_get_property = function(object, name, value) {
Object.defineProperty(object, name, {
enumerable: true
, get: function () { return value; }
});
}
// Capabilities
var aggregationCursor = false;
var writeCommands = false;
var textSearch = false;
var authCommands = false;
var listCollections = false;
var listIndexes = false;
var maxNumberOfDocsInBatch = ismaster.maxWriteBatchSize || 1000;
if(ismaster.minWireVersion >= 0) {
textSearch = true;
}
if(ismaster.maxWireVersion >= 1) {
aggregationCursor = true;
authCommands = true;
}
if(ismaster.maxWireVersion >= 2) {
writeCommands = true;
}
if(ismaster.maxWireVersion >= 3) {
listCollections = true;
listIndexes = true;
}
// If no min or max wire version set to 0
if(ismaster.minWireVersion == null) {
ismaster.minWireVersion = 0;
}
if(ismaster.maxWireVersion == null) {
ismaster.maxWireVersion = 0;
}
// Map up read only parameters
setup_get_property(this, "hasAggregationCursor", aggregationCursor);
setup_get_property(this, "hasWriteCommands", writeCommands);
setup_get_property(this, "hasTextSearch", textSearch);
setup_get_property(this, "hasAuthCommands", authCommands);
setup_get_property(this, "hasListCollectionsCommand", listCollections);
setup_get_property(this, "hasListIndexesCommand", listIndexes);
setup_get_property(this, "minWireVersion", ismaster.minWireVersion);
setup_get_property(this, "maxWireVersion", ismaster.maxWireVersion);
setup_get_property(this, "maxNumberOfDocsInBatch", maxNumberOfDocsInBatch);
}
exports.Store = Store;
exports.ServerCapabilities = ServerCapabilities;

379
node_modules/mongodb/lib/url_parser.js generated vendored Normal file
View File

@@ -0,0 +1,379 @@
"use strict";
var ReadPreference = require('./read_preference'),
parser = require('url'),
f = require('util').format;
module.exports = function(url, options) {
// Ensure we have a default options object if none set
options = options || {};
// Variables
var connection_part = '';
var auth_part = '';
var query_string_part = '';
var dbName = 'admin';
// Url parser result
var result = parser.parse(url, true);
if(result.protocol != 'mongodb:') {
throw new Error('invalid schema, expected mongodb');
}
if((result.hostname == null || result.hostname == '') && url.indexOf('.sock') == -1) {
throw new Error('no hostname or hostnames provided in connection string');
}
if(result.port == '0') {
throw new Error('invalid port (zero) with hostname');
}
if(!isNaN(parseInt(result.port, 10)) && parseInt(result.port, 10) > 65535) {
throw new Error('invalid port (larger than 65535) with hostname');
}
if(result.path
&& result.path.length > 0
&& result.path[0] != '/'
&& url.indexOf('.sock') == -1) {
throw new Error('missing delimiting slash between hosts and options');
}
if(result.query) {
for(var name in result.query) {
if(name.indexOf(':') != -1) {
throw new Error('double colon in host identifier');
}
if(result.query[name] == '') {
throw new Error('query parameter ' + name + ' is an incomplete value pair');
}
}
}
if(result.auth) {
var parts = result.auth.split(':');
if(url.indexOf(result.auth) != -1 && parts.length > 2) {
throw new Error('Username with password containing an unescaped colon');
}
if(url.indexOf(result.auth) != -1 && result.auth.indexOf('@') != -1) {
throw new Error('Username containing an unescaped at-sign');
}
}
// Remove query
var clean = url.split('?').shift();
// Extract the list of hosts
var strings = clean.split(',');
var hosts = [];
for(var i = 0; i < strings.length; i++) {
var hostString = strings[i];
if(hostString.indexOf('mongodb') != -1) {
if(hostString.indexOf('@') != -1) {
hosts.push(hostString.split('@').pop())
} else {
hosts.push(hostString.substr('mongodb://'.length));
}
} else if(hostString.indexOf('/') != -1) {
hosts.push(hostString.split('/').shift());
} else if(hostString.indexOf('/') == -1) {
hosts.push(hostString.trim());
}
}
for(var i = 0; i < hosts.length; i++) {
var r = parser.parse(f('mongodb://%s', hosts[i].trim()));
if(r.path && r.path.indexOf(':') != -1) {
throw new Error('double colon in host identifier');
}
}
// If we have a ? mark cut the query elements off
if(url.indexOf("?") != -1) {
query_string_part = url.substr(url.indexOf("?") + 1);
connection_part = url.substring("mongodb://".length, url.indexOf("?"))
} else {
connection_part = url.substring("mongodb://".length);
}
// Check if we have auth params
if(connection_part.indexOf("@") != -1) {
auth_part = connection_part.split("@")[0];
connection_part = connection_part.split("@")[1];
}
// Check if the connection string has a db
if(connection_part.indexOf(".sock") != -1) {
if(connection_part.indexOf(".sock/") != -1) {
dbName = connection_part.split(".sock/")[1];
connection_part = connection_part.split("/", connection_part.indexOf(".sock") + ".sock".length);
}
} else if(connection_part.indexOf("/") != -1) {
dbName = connection_part.split("/")[1];
connection_part = connection_part.split("/")[0];
}
// Result object
var object = {};
// Pick apart the authentication part of the string
var authPart = auth_part || '';
var auth = authPart.split(':', 2);
// Decode the URI components
auth[0] = decodeURIComponent(auth[0]);
if(auth[1]){
auth[1] = decodeURIComponent(auth[1]);
}
// Add auth to final object if we have 2 elements
if(auth.length == 2) object.auth = {user: auth[0], password: auth[1]};
// Variables used for temporary storage
var hostPart;
var urlOptions;
var servers;
var serverOptions = {socketOptions: {}};
var dbOptions = {read_preference_tags: []};
var replSetServersOptions = {socketOptions: {}};
// Add server options to final object
object.server_options = serverOptions;
object.db_options = dbOptions;
object.rs_options = replSetServersOptions;
object.mongos_options = {};
// Let's check if we are using a domain socket
if(url.match(/\.sock/)) {
// Split out the socket part
var domainSocket = url.substring(
url.indexOf("mongodb://") + "mongodb://".length
, url.lastIndexOf(".sock") + ".sock".length);
// Clean out any auth stuff if any
if(domainSocket.indexOf("@") != -1) domainSocket = domainSocket.split("@")[1];
servers = [{domain_socket: domainSocket}];
} else {
// Split up the db
hostPart = connection_part;
// Deduplicate servers
var deduplicatedServers = {};
// Parse all server results
servers = hostPart.split(',').map(function(h) {
var _host, _port, ipv6match;
//check if it matches [IPv6]:port, where the port number is optional
if ((ipv6match = /\[([^\]]+)\](?:\:(.+))?/.exec(h))) {
_host = ipv6match[1];
_port = parseInt(ipv6match[2], 10) || 27017;
} else {
//otherwise assume it's IPv4, or plain hostname
var hostPort = h.split(':', 2);
_host = hostPort[0] || 'localhost';
_port = hostPort[1] != null ? parseInt(hostPort[1], 10) : 27017;
// Check for localhost?safe=true style case
if(_host.indexOf("?") != -1) _host = _host.split(/\?/)[0];
}
// No entry returned for duplicate servr
if(deduplicatedServers[_host + "_" + _port]) return null;
deduplicatedServers[_host + "_" + _port] = 1;
// Return the mapped object
return {host: _host, port: _port};
}).filter(function(x) {
return x != null;
});
}
// Get the db name
object.dbName = dbName || 'admin';
// Split up all the options
urlOptions = (query_string_part || '').split(/[&;]/);
// Ugh, we have to figure out which options go to which constructor manually.
urlOptions.forEach(function(opt) {
if(!opt) return;
var splitOpt = opt.split('='), name = splitOpt[0], value = splitOpt[1];
// Options implementations
switch(name) {
case 'slaveOk':
case 'slave_ok':
serverOptions.slave_ok = (value == 'true');
dbOptions.slaveOk = (value == 'true');
break;
case 'maxPoolSize':
case 'poolSize':
serverOptions.poolSize = parseInt(value, 10);
replSetServersOptions.poolSize = parseInt(value, 10);
break;
case 'autoReconnect':
case 'auto_reconnect':
serverOptions.auto_reconnect = (value == 'true');
break;
case 'minPoolSize':
throw new Error("minPoolSize not supported");
case 'maxIdleTimeMS':
throw new Error("maxIdleTimeMS not supported");
case 'waitQueueMultiple':
throw new Error("waitQueueMultiple not supported");
case 'waitQueueTimeoutMS':
throw new Error("waitQueueTimeoutMS not supported");
case 'uuidRepresentation':
throw new Error("uuidRepresentation not supported");
case 'ssl':
if(value == 'prefer') {
serverOptions.ssl = value;
replSetServersOptions.ssl = value;
break;
}
serverOptions.ssl = (value == 'true');
replSetServersOptions.ssl = (value == 'true');
break;
case 'sslValidate':
serverOptions.sslValidate = (value == 'true');
replSetServerOptions.sslValidate = (value == 'true');
break;
case 'replicaSet':
case 'rs_name':
replSetServersOptions.rs_name = value;
break;
case 'reconnectWait':
replSetServersOptions.reconnectWait = parseInt(value, 10);
break;
case 'retries':
replSetServersOptions.retries = parseInt(value, 10);
break;
case 'readSecondary':
case 'read_secondary':
replSetServersOptions.read_secondary = (value == 'true');
break;
case 'fsync':
dbOptions.fsync = (value == 'true');
break;
case 'journal':
dbOptions.j = (value == 'true');
break;
case 'safe':
dbOptions.safe = (value == 'true');
break;
case 'nativeParser':
case 'native_parser':
dbOptions.native_parser = (value == 'true');
break;
case 'readConcernLevel':
dbOptions.readConcern = {level: value};
break;
case 'connectTimeoutMS':
serverOptions.socketOptions.connectTimeoutMS = parseInt(value, 10);
replSetServersOptions.socketOptions.connectTimeoutMS = parseInt(value, 10);
break;
case 'socketTimeoutMS':
serverOptions.socketOptions.socketTimeoutMS = parseInt(value, 10);
replSetServersOptions.socketOptions.socketTimeoutMS = parseInt(value, 10);
break;
case 'w':
dbOptions.w = parseInt(value, 10);
if(isNaN(dbOptions.w)) dbOptions.w = value;
break;
case 'authSource':
dbOptions.authSource = value;
break;
case 'gssapiServiceName':
dbOptions.gssapiServiceName = value;
break;
case 'authMechanism':
if(value == 'GSSAPI') {
// If no password provided decode only the principal
if(object.auth == null) {
var urlDecodeAuthPart = decodeURIComponent(authPart);
if(urlDecodeAuthPart.indexOf("@") == -1) throw new Error("GSSAPI requires a provided principal");
object.auth = {user: urlDecodeAuthPart, password: null};
} else {
object.auth.user = decodeURIComponent(object.auth.user);
}
} else if(value == 'MONGODB-X509') {
object.auth = {user: decodeURIComponent(authPart)};
}
// Only support GSSAPI or MONGODB-CR for now
if(value != 'GSSAPI'
&& value != 'MONGODB-X509'
&& value != 'MONGODB-CR'
&& value != 'DEFAULT'
&& value != 'SCRAM-SHA-1'
&& value != 'PLAIN')
throw new Error("only DEFAULT, GSSAPI, PLAIN, MONGODB-X509, SCRAM-SHA-1 or MONGODB-CR is supported by authMechanism");
// Authentication mechanism
dbOptions.authMechanism = value;
break;
case 'authMechanismProperties':
// Split up into key, value pairs
var values = value.split(',');
var o = {};
// For each value split into key, value
values.forEach(function(x) {
var v = x.split(':');
o[v[0]] = v[1];
});
// Set all authMechanismProperties
dbOptions.authMechanismProperties = o;
// Set the service name value
if(typeof o.SERVICE_NAME == 'string') dbOptions.gssapiServiceName = o.SERVICE_NAME;
break;
case 'wtimeoutMS':
dbOptions.wtimeout = parseInt(value, 10);
break;
case 'readPreference':
if(!ReadPreference.isValid(value)) throw new Error("readPreference must be either primary/primaryPreferred/secondary/secondaryPreferred/nearest");
dbOptions.readPreference = value;
break;
case 'readPreferenceTags':
// Decode the value
value = decodeURIComponent(value);
// Contains the tag object
var tagObject = {};
if(value == null || value == '') {
dbOptions.read_preference_tags.push(tagObject);
break;
}
// Split up the tags
var tags = value.split(/\,/);
for(var i = 0; i < tags.length; i++) {
var parts = tags[i].trim().split(/\:/);
tagObject[parts[0]] = parts[1];
}
// Set the preferences tags
dbOptions.read_preference_tags.push(tagObject);
break;
default:
break;
}
});
// No tags: should be null (not [])
if(dbOptions.read_preference_tags.length === 0) {
dbOptions.read_preference_tags = null;
}
// Validate if there are an invalid write concern combinations
if((dbOptions.w == -1 || dbOptions.w == 0) && (
dbOptions.journal == true
|| dbOptions.fsync == true
|| dbOptions.safe == true)) throw new Error("w set to -1 or 0 cannot be combined with safe/w/journal/fsync")
// If no read preference set it to primary
if(!dbOptions.readPreference) {
dbOptions.readPreference = 'primary';
}
// Add servers to result
object.servers = servers;
// Returned parsed object
return object;
}

234
node_modules/mongodb/lib/utils.js generated vendored Normal file
View File

@@ -0,0 +1,234 @@
"use strict";
var MongoError = require('mongodb-core').MongoError,
f = require('util').format;
var shallowClone = function(obj) {
var copy = {};
for(var name in obj) copy[name] = obj[name];
return copy;
}
// Set simple property
var getSingleProperty = function(obj, name, value) {
Object.defineProperty(obj, name, {
enumerable:true,
get: function() {
return value
}
});
}
var formatSortValue = exports.formatSortValue = function(sortDirection) {
var value = ("" + sortDirection).toLowerCase();
switch (value) {
case 'ascending':
case 'asc':
case '1':
return 1;
case 'descending':
case 'desc':
case '-1':
return -1;
default:
throw new Error("Illegal sort clause, must be of the form "
+ "[['field1', '(ascending|descending)'], "
+ "['field2', '(ascending|descending)']]");
}
};
var formattedOrderClause = exports.formattedOrderClause = function(sortValue) {
var orderBy = {};
if(sortValue == null) return null;
if (Array.isArray(sortValue)) {
if(sortValue.length === 0) {
return null;
}
for(var i = 0; i < sortValue.length; i++) {
if(sortValue[i].constructor == String) {
orderBy[sortValue[i]] = 1;
} else {
orderBy[sortValue[i][0]] = formatSortValue(sortValue[i][1]);
}
}
} else if(sortValue != null && typeof sortValue == 'object') {
orderBy = sortValue;
} else if (typeof sortValue == 'string') {
orderBy[sortValue] = 1;
} else {
throw new Error("Illegal sort clause, must be of the form " +
"[['field1', '(ascending|descending)'], ['field2', '(ascending|descending)']]");
}
return orderBy;
};
var checkCollectionName = function checkCollectionName (collectionName) {
if('string' !== typeof collectionName) {
throw Error("collection name must be a String");
}
if(!collectionName || collectionName.indexOf('..') != -1) {
throw Error("collection names cannot be empty");
}
if(collectionName.indexOf('$') != -1 &&
collectionName.match(/((^\$cmd)|(oplog\.\$main))/) == null) {
throw Error("collection names must not contain '$'");
}
if(collectionName.match(/^\.|\.$/) != null) {
throw Error("collection names must not start or end with '.'");
}
// Validate that we are not passing 0x00 in the colletion name
if(!!~collectionName.indexOf("\x00")) {
throw new Error("collection names cannot contain a null character");
}
};
var handleCallback = function(callback, err, value1, value2) {
try {
if(callback == null) return;
if(value2) return callback(err, value1, value2);
return callback(err, value1);
} catch(err) {
process.nextTick(function() { throw err; });
return false;
}
return true;
}
/**
* Wrap a Mongo error document in an Error instance
* @ignore
* @api private
*/
var toError = function(error) {
if (error instanceof Error) return error;
var msg = error.err || error.errmsg || error.errMessage || error;
var e = MongoError.create({message: msg, driver:true});
// Get all object keys
var keys = typeof error == 'object'
? Object.keys(error)
: [];
for(var i = 0; i < keys.length; i++) {
e[keys[i]] = error[keys[i]];
}
return e;
}
/**
* @ignore
*/
var normalizeHintField = function normalizeHintField(hint) {
var finalHint = null;
if(typeof hint == 'string') {
finalHint = hint;
} else if(Array.isArray(hint)) {
finalHint = {};
hint.forEach(function(param) {
finalHint[param] = 1;
});
} else if(hint != null && typeof hint == 'object') {
finalHint = {};
for (var name in hint) {
finalHint[name] = hint[name];
}
}
return finalHint;
};
/**
* Create index name based on field spec
*
* @ignore
* @api private
*/
var parseIndexOptions = function(fieldOrSpec) {
var fieldHash = {};
var indexes = [];
var keys;
// Get all the fields accordingly
if('string' == typeof fieldOrSpec) {
// 'type'
indexes.push(fieldOrSpec + '_' + 1);
fieldHash[fieldOrSpec] = 1;
} else if(Array.isArray(fieldOrSpec)) {
fieldOrSpec.forEach(function(f) {
if('string' == typeof f) {
// [{location:'2d'}, 'type']
indexes.push(f + '_' + 1);
fieldHash[f] = 1;
} else if(Array.isArray(f)) {
// [['location', '2d'],['type', 1]]
indexes.push(f[0] + '_' + (f[1] || 1));
fieldHash[f[0]] = f[1] || 1;
} else if(isObject(f)) {
// [{location:'2d'}, {type:1}]
keys = Object.keys(f);
keys.forEach(function(k) {
indexes.push(k + '_' + f[k]);
fieldHash[k] = f[k];
});
} else {
// undefined (ignore)
}
});
} else if(isObject(fieldOrSpec)) {
// {location:'2d', type:1}
keys = Object.keys(fieldOrSpec);
keys.forEach(function(key) {
indexes.push(key + '_' + fieldOrSpec[key]);
fieldHash[key] = fieldOrSpec[key];
});
}
return {
name: indexes.join("_"), keys: keys, fieldHash: fieldHash
}
}
var isObject = exports.isObject = function (arg) {
return '[object Object]' == toString.call(arg)
}
var debugOptions = function(debugFields, options) {
var finaloptions = {};
debugFields.forEach(function(n) {
finaloptions[n] = options[n];
});
return finaloptions;
}
var decorateCommand = function(command, options, exclude) {
for(var name in options) {
if(exclude[name] == null) command[name] = options[name];
}
return command;
}
exports.shallowClone = shallowClone;
exports.getSingleProperty = getSingleProperty;
exports.checkCollectionName = checkCollectionName;
exports.toError = toError;
exports.formattedOrderClause = formattedOrderClause;
exports.parseIndexOptions = parseIndexOptions;
exports.normalizeHintField = normalizeHintField;
exports.handleCallback = handleCallback;
exports.decorateCommand = decorateCommand;
exports.isObject = isObject;
exports.debugOptions = debugOptions;