1
0
mirror of https://github.com/mgerb/mywebsite synced 2026-01-12 02:42:48 +00:00

updated bunch of file paths and changed the way posts are loaded

This commit is contained in:
2016-01-05 12:28:04 -06:00
parent 4bb8cae81e
commit 6ab45fe935
13249 changed files with 317868 additions and 2101398 deletions

244
node_modules/mongodb-core/lib/auth/gssapi.js generated vendored Normal file
View File

@@ -0,0 +1,244 @@
"use strict";
var f = require('util').format
, crypto = require('crypto')
, MongoError = require('../error');
var AuthSession = function(db, username, password, options) {
this.db = db;
this.username = username;
this.password = password;
this.options = options;
}
AuthSession.prototype.equal = function(session) {
return session.db == this.db
&& session.username == this.username
&& session.password == this.password;
}
// Kerberos class
var Kerberos = null;
var MongoAuthProcess = null;
// Try to grab the Kerberos class
try {
Kerberos = require('kerberos').Kerberos
// Authentication process for Mongo
MongoAuthProcess = require('kerberos').processes.MongoAuthProcess
} catch(err) {}
/**
* Creates a new GSSAPI authentication mechanism
* @class
* @return {GSSAPI} A cursor instance
*/
var GSSAPI = function() {
this.authStore = [];
}
/**
* Authenticate
* @method
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
* @param {Pool} pool Connection pool for this topology
* @param {string} db Name of the database
* @param {string} username Username
* @param {string} password Password
* @param {authResultCallback} callback The callback to return the result from the authentication
* @return {object}
*/
GSSAPI.prototype.auth = function(server, pool, db, username, password, options, callback) {
var self = this;
// We don't have the Kerberos library
if(Kerberos == null) return callback(new Error("Kerberos library is not installed"));
var gssapiServiceName = options['gssapiServiceName'] || 'mongodb';
// Get all the connections
var connections = pool.getAll();
// Total connections
var count = connections.length;
if(count == 0) return callback(null, null);
// Valid connections
var numberOfValidConnections = 0;
var credentialsValid = false;
var errorObject = null;
// For each connection we need to authenticate
while(connections.length > 0) {
// Execute MongoCR
var execute = function(connection) {
// Start Auth process for a connection
GSSAPIInitialize(db, username, password, db, gssapiServiceName, server, connection, function(err, r) {
// Adjust count
count = count - 1;
// If we have an error
if(err) {
errorObject = err;
} else if(r.result['$err']) {
errorObject = r.result;
} else if(r.result['errmsg']) {
errorObject = r.result;
} else {
credentialsValid = true;
numberOfValidConnections = numberOfValidConnections + 1;
}
// We have authenticated all connections
if(count == 0 && numberOfValidConnections > 0) {
// Store the auth details
addAuthSession(self.authStore, new AuthSession(db, username, password, options));
// Return correct authentication
callback(null, true);
} else if(count == 0) {
if(errorObject == null) errorObject = new MongoError(f("failed to authenticate using mongocr"));
callback(errorObject, false);
}
});
}
// Get the connection
execute(connections.shift());
}
}
//
// Initialize step
var GSSAPIInitialize = function(db, username, password, authdb, gssapiServiceName, server, connection, callback) {
// Create authenticator
var mongo_auth_process = new MongoAuthProcess(connection.host, connection.port, gssapiServiceName);
// Perform initialization
mongo_auth_process.init(username, password, function(err, context) {
if(err) return callback(err, false);
// Perform the first step
mongo_auth_process.transition('', function(err, payload) {
if(err) return callback(err, false);
// Call the next db step
MongoDBGSSAPIFirstStep(mongo_auth_process, payload, db, username, password, authdb, server, connection, callback);
});
});
}
//
// Perform first step against mongodb
var MongoDBGSSAPIFirstStep = function(mongo_auth_process, payload, db, username, password, authdb, server, connection, callback) {
// Build the sasl start command
var command = {
saslStart: 1
, mechanism: 'GSSAPI'
, payload: payload
, autoAuthorize: 1
};
// Execute first sasl step
server.command("$external.$cmd"
, command
, { connection: connection }, function(err, r) {
if(err) return callback(err, false);
var doc = r.result;
// Execute mongodb transition
mongo_auth_process.transition(r.result.payload, function(err, payload) {
if(err) return callback(err, false);
// MongoDB API Second Step
MongoDBGSSAPISecondStep(mongo_auth_process, payload, doc, db, username, password, authdb, server, connection, callback);
});
});
}
//
// Perform first step against mongodb
var MongoDBGSSAPISecondStep = function(mongo_auth_process, payload, doc, db, username, password, authdb, server, connection, callback) {
// Build Authentication command to send to MongoDB
var command = {
saslContinue: 1
, conversationId: doc.conversationId
, payload: payload
};
// Execute the command
server.command("$external.$cmd"
, command
, { connection: connection }, function(err, r) {
if(err) return callback(err, false);
var doc = r.result;
// Call next transition for kerberos
mongo_auth_process.transition(doc.payload, function(err, payload) {
if(err) return callback(err, false);
// Call the last and third step
MongoDBGSSAPIThirdStep(mongo_auth_process, payload, doc, db, username, password, authdb, server, connection, callback);
});
});
}
var MongoDBGSSAPIThirdStep = function(mongo_auth_process, payload, doc, db, username, password, authdb, server, connection, callback) {
// Build final command
var command = {
saslContinue: 1
, conversationId: doc.conversationId
, payload: payload
};
// Execute the command
server.command("$external.$cmd"
, command
, { connection: connection }, function(err, r) {
if(err) return callback(err, false);
mongo_auth_process.transition(null, function(err, payload) {
if(err) return callback(err, null);
callback(null, r);
});
});
}
// Add to store only if it does not exist
var addAuthSession = function(authStore, session) {
var found = false;
for(var i = 0; i < authStore.length; i++) {
if(authStore[i].equal(session)) {
found = true;
break;
}
}
if(!found) authStore.push(session);
}
/**
* Re authenticate pool
* @method
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
* @param {Pool} pool Connection pool for this topology
* @param {authResultCallback} callback The callback to return the result from the authentication
* @return {object}
*/
GSSAPI.prototype.reauthenticate = function(server, pool, callback) {
var count = this.authStore.length;
if(count == 0) return callback(null, null);
// Iterate over all the auth details stored
for(var i = 0; i < this.authStore.length; i++) {
this.auth(server, pool, this.authStore[i].db, this.authStore[i].username, this.authStore[i].password, this.authStore[i].options, function(err, r) {
count = count - 1;
// Done re-authenticating
if(count == 0) {
callback(null, null);
}
});
}
}
/**
* This is a result from a authentication strategy
*
* @callback authResultCallback
* @param {error} error An error object. Set to null if no error present
* @param {boolean} result The result of the authentication process
*/
module.exports = GSSAPI;

160
node_modules/mongodb-core/lib/auth/mongocr.js generated vendored Normal file
View File

@@ -0,0 +1,160 @@
"use strict";
var f = require('util').format
, crypto = require('crypto')
, MongoError = require('../error');
var AuthSession = function(db, username, password) {
this.db = db;
this.username = username;
this.password = password;
}
AuthSession.prototype.equal = function(session) {
return session.db == this.db
&& session.username == this.username
&& session.password == this.password;
}
/**
* Creates a new MongoCR authentication mechanism
* @class
* @return {MongoCR} A cursor instance
*/
var MongoCR = function() {
this.authStore = [];
}
// Add to store only if it does not exist
var addAuthSession = function(authStore, session) {
var found = false;
for(var i = 0; i < authStore.length; i++) {
if(authStore[i].equal(session)) {
found = true;
break;
}
}
if(!found) authStore.push(session);
}
/**
* Authenticate
* @method
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
* @param {Pool} pool Connection pool for this topology
* @param {string} db Name of the database
* @param {string} username Username
* @param {string} password Password
* @param {authResultCallback} callback The callback to return the result from the authentication
* @return {object}
*/
MongoCR.prototype.auth = function(server, pool, db, username, password, callback) {
var self = this;
// Get all the connections
var connections = pool.getAll();
// Total connections
var count = connections.length;
if(count == 0) return callback(null, null);
// Valid connections
var numberOfValidConnections = 0;
var credentialsValid = false;
var errorObject = null;
// For each connection we need to authenticate
while(connections.length > 0) {
// Execute MongoCR
var executeMongoCR = function(connection) {
// Let's start the process
server.command(f("%s.$cmd", db)
, { getnonce: 1 }
, { connection: connection }, function(err, r) {
var nonce = null;
var key = null;
// Adjust the number of connections left
// Get nonce
if(err == null) {
nonce = r.result.nonce;
// Use node md5 generator
var md5 = crypto.createHash('md5');
// Generate keys used for authentication
md5.update(username + ":mongo:" + password);
var hash_password = md5.digest('hex');
// Final key
md5 = crypto.createHash('md5');
md5.update(nonce + username + hash_password);
key = md5.digest('hex');
}
// Execute command
server.command(f("%s.$cmd", db)
, { authenticate: 1, user: username, nonce: nonce, key:key}
, { connection: connection }, function(err, r) {
count = count - 1;
// If we have an error
if(err) {
errorObject = err;
} else if(r.result['$err']) {
errorObject = r.result;
} else if(r.result['errmsg']) {
errorObject = r.result;
} else {
credentialsValid = true;
numberOfValidConnections = numberOfValidConnections + 1;
}
// We have authenticated all connections
if(count == 0 && numberOfValidConnections > 0) {
// Store the auth details
addAuthSession(self.authStore, new AuthSession(db, username, password));
// Return correct authentication
callback(null, true);
} else if(count == 0) {
if(errorObject == null) errorObject = new MongoError(f("failed to authenticate using mongocr"));
callback(errorObject, false);
}
});
});
}
// Get the connection
executeMongoCR(connections.shift());
}
}
/**
* Re authenticate pool
* @method
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
* @param {Pool} pool Connection pool for this topology
* @param {authResultCallback} callback The callback to return the result from the authentication
* @return {object}
*/
MongoCR.prototype.reauthenticate = function(server, pool, callback) {
var count = this.authStore.length;
if(count == 0) return callback(null, null);
// Iterate over all the auth details stored
for(var i = 0; i < this.authStore.length; i++) {
this.auth(server, pool, this.authStore[i].db, this.authStore[i].username, this.authStore[i].password, function(err, r) {
count = count - 1;
// Done re-authenticating
if(count == 0) {
callback(null, null);
}
});
}
}
/**
* This is a result from a authentication strategy
*
* @callback authResultCallback
* @param {error} error An error object. Set to null if no error present
* @param {boolean} result The result of the authentication process
*/
module.exports = MongoCR;

150
node_modules/mongodb-core/lib/auth/plain.js generated vendored Normal file
View File

@@ -0,0 +1,150 @@
"use strict";
var f = require('util').format
, crypto = require('crypto')
, Binary = require('bson').Binary
, MongoError = require('../error');
var AuthSession = function(db, username, password) {
this.db = db;
this.username = username;
this.password = password;
}
AuthSession.prototype.equal = function(session) {
return session.db == this.db
&& session.username == this.username
&& session.password == this.password;
}
/**
* Creates a new Plain authentication mechanism
* @class
* @return {Plain} A cursor instance
*/
var Plain = function() {
this.authStore = [];
}
/**
* Authenticate
* @method
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
* @param {Pool} pool Connection pool for this topology
* @param {string} db Name of the database
* @param {string} username Username
* @param {string} password Password
* @param {authResultCallback} callback The callback to return the result from the authentication
* @return {object}
*/
Plain.prototype.auth = function(server, pool, db, username, password, callback) {
var self = this;
// Get all the connections
var connections = pool.getAll();
// Total connections
var count = connections.length;
if(count == 0) return callback(null, null);
// Valid connections
var numberOfValidConnections = 0;
var credentialsValid = false;
var errorObject = null;
// For each connection we need to authenticate
while(connections.length > 0) {
// Execute MongoCR
var execute = function(connection) {
// Create payload
var payload = new Binary(f("\x00%s\x00%s", username, password));
// Let's start the sasl process
var command = {
saslStart: 1
, mechanism: 'PLAIN'
, payload: payload
, autoAuthorize: 1
};
// Let's start the process
server.command("$external.$cmd"
, command
, { connection: connection }, function(err, r) {
// Adjust count
count = count - 1;
// If we have an error
if(err) {
errorObject = err;
} else if(r.result['$err']) {
errorObject = r.result;
} else if(r.result['errmsg']) {
errorObject = r.result;
} else {
credentialsValid = true;
numberOfValidConnections = numberOfValidConnections + 1;
}
// We have authenticated all connections
if(count == 0 && numberOfValidConnections > 0) {
// Store the auth details
addAuthSession(self.authStore, new AuthSession(db, username, password));
// Return correct authentication
callback(null, true);
} else if(count == 0) {
if(errorObject == null) errorObject = new MongoError(f("failed to authenticate using mongocr"));
callback(errorObject, false);
}
});
}
// Get the connection
execute(connections.shift());
}
}
// Add to store only if it does not exist
var addAuthSession = function(authStore, session) {
var found = false;
for(var i = 0; i < authStore.length; i++) {
if(authStore[i].equal(session)) {
found = true;
break;
}
}
if(!found) authStore.push(session);
}
/**
* Re authenticate pool
* @method
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
* @param {Pool} pool Connection pool for this topology
* @param {authResultCallback} callback The callback to return the result from the authentication
* @return {object}
*/
Plain.prototype.reauthenticate = function(server, pool, callback) {
var count = this.authStore.length;
if(count == 0) return callback(null, null);
// Iterate over all the auth details stored
for(var i = 0; i < this.authStore.length; i++) {
this.auth(server, pool, this.authStore[i].db, this.authStore[i].username, this.authStore[i].password, function(err, r) {
count = count - 1;
// Done re-authenticating
if(count == 0) {
callback(null, null);
}
});
}
}
/**
* This is a result from a authentication strategy
*
* @callback authResultCallback
* @param {error} error An error object. Set to null if no error present
* @param {boolean} result The result of the authentication process
*/
module.exports = Plain;

317
node_modules/mongodb-core/lib/auth/scram.js generated vendored Normal file
View File

@@ -0,0 +1,317 @@
"use strict";
var f = require('util').format
, crypto = require('crypto')
, Binary = require('bson').Binary
, MongoError = require('../error');
var AuthSession = function(db, username, password) {
this.db = db;
this.username = username;
this.password = password;
}
AuthSession.prototype.equal = function(session) {
return session.db == this.db
&& session.username == this.username
&& session.password == this.password;
}
/**
* Creates a new ScramSHA1 authentication mechanism
* @class
* @return {ScramSHA1} A cursor instance
*/
var ScramSHA1 = function() {
this.authStore = [];
}
var parsePayload = function(payload) {
var dict = {};
var parts = payload.split(',');
for(var i = 0; i < parts.length; i++) {
var valueParts = parts[i].split('=');
dict[valueParts[0]] = valueParts[1];
}
return dict;
}
var passwordDigest = function(username, password) {
if(typeof username != 'string') throw new MongoError("username must be a string");
if(typeof password != 'string') throw new MongoError("password must be a string");
if(password.length == 0) throw new MongoError("password cannot be empty");
// Use node md5 generator
var md5 = crypto.createHash('md5');
// Generate keys used for authentication
md5.update(username + ":mongo:" + password);
return md5.digest('hex');
}
// XOR two buffers
var xor = function(a, b) {
if (!Buffer.isBuffer(a)) a = new Buffer(a)
if (!Buffer.isBuffer(b)) b = new Buffer(b)
var res = []
if (a.length > b.length) {
for (var i = 0; i < b.length; i++) {
res.push(a[i] ^ b[i])
}
} else {
for (var i = 0; i < a.length; i++) {
res.push(a[i] ^ b[i])
}
}
return new Buffer(res);
}
// Create a final digest
var hi = function(data, salt, iterations) {
// Create digest
var digest = function(msg) {
var hmac = crypto.createHmac('sha1', data);
hmac.update(msg);
return new Buffer(hmac.digest('base64'), 'base64');
}
// Create variables
salt = Buffer.concat([salt, new Buffer('\x00\x00\x00\x01')])
var ui = digest(salt);
var u1 = ui;
for(var i = 0; i < iterations - 1; i++) {
u1 = digest(u1);
ui = xor(ui, u1);
}
return ui;
}
/**
* Authenticate
* @method
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
* @param {Pool} pool Connection pool for this topology
* @param {string} db Name of the database
* @param {string} username Username
* @param {string} password Password
* @param {authResultCallback} callback The callback to return the result from the authentication
* @return {object}
*/
ScramSHA1.prototype.auth = function(server, pool, db, username, password, callback) {
var self = this;
// Get all the connections
var connections = pool.getAll();
// Total connections
var count = connections.length;
if(count == 0) return callback(null, null);
// Valid connections
var numberOfValidConnections = 0;
var credentialsValid = false;
var errorObject = null;
// For each connection we need to authenticate
while(connections.length > 0) {
// Execute MongoCR
var executeScram = function(connection) {
// Clean up the user
username = username.replace('=', "=3D").replace(',', '=2C');
// Create a random nonce
var nonce = crypto.randomBytes(24).toString('base64');
// var nonce = 'MsQUY9iw0T9fx2MUEz6LZPwGuhVvWAhc'
var firstBare = f("n=%s,r=%s", username, nonce);
// Build command structure
var cmd = {
saslStart: 1
, mechanism: 'SCRAM-SHA-1'
, payload: new Binary(f("n,,%s", firstBare))
, autoAuthorize: 1
}
// Handle the error
var handleError = function(err, r) {
if(err) {
numberOfValidConnections = numberOfValidConnections - 1;
errorObject = err; return false;
} else if(r.result['$err']) {
errorObject = r.result; return false;
} else if(r.result['errmsg']) {
errorObject = r.result; return false;
} else {
credentialsValid = true;
numberOfValidConnections = numberOfValidConnections + 1;
}
return true
}
// Finish up
var finish = function(_count, _numberOfValidConnections) {
if(_count == 0 && _numberOfValidConnections > 0) {
// Store the auth details
addAuthSession(self.authStore, new AuthSession(db, username, password));
// Return correct authentication
return callback(null, true);
} else if(_count == 0) {
if(errorObject == null) errorObject = new MongoError(f("failed to authenticate using scram"));
return callback(errorObject, false);
}
}
var handleEnd = function(_err, _r) {
// Handle any error
handleError(_err, _r)
// Adjust the number of connections
count = count - 1;
// Execute the finish
finish(count, numberOfValidConnections);
}
// Execute start sasl command
server.command(f("%s.$cmd", db)
, cmd, { connection: connection }, function(err, r) {
// Do we have an error, handle it
if(handleError(err, r) == false) {
count = count - 1;
if(count == 0 && numberOfValidConnections > 0) {
// Store the auth details
addAuthSession(self.authStore, new AuthSession(db, username, password));
// Return correct authentication
return callback(null, true);
} else if(count == 0) {
if(errorObject == null) errorObject = new MongoError(f("failed to authenticate using scram"));
return callback(errorObject, false);
}
return;
}
// Get the dictionary
var dict = parsePayload(r.result.payload.value())
// Unpack dictionary
var iterations = parseInt(dict.i, 10);
var salt = dict.s;
var rnonce = dict.r;
// Set up start of proof
var withoutProof = f("c=biws,r=%s", rnonce);
var passwordDig = passwordDigest(username, password);
var saltedPassword = hi(passwordDig
, new Buffer(salt, 'base64')
, iterations);
// Create the client key
var hmac = crypto.createHmac('sha1', saltedPassword);
hmac.update(new Buffer("Client Key"));
var clientKey = new Buffer(hmac.digest('base64'), 'base64');
// Create the stored key
var hash = crypto.createHash('sha1');
hash.update(clientKey);
var storedKey = new Buffer(hash.digest('base64'), 'base64');
// Create the authentication message
var authMsg = [firstBare, r.result.payload.value().toString('base64'), withoutProof].join(',');
// Create client signature
var hmac = crypto.createHmac('sha1', storedKey);
hmac.update(new Buffer(authMsg));
var clientSig = new Buffer(hmac.digest('base64'), 'base64');
// Create client proof
var clientProof = f("p=%s", new Buffer(xor(clientKey, clientSig)).toString('base64'));
// Create client final
var clientFinal = [withoutProof, clientProof].join(',');
// Generate server key
var hmac = crypto.createHmac('sha1', saltedPassword);
hmac.update(new Buffer('Server Key'))
var serverKey = new Buffer(hmac.digest('base64'), 'base64');
// Generate server signature
var hmac = crypto.createHmac('sha1', serverKey);
hmac.update(new Buffer(authMsg))
var serverSig = new Buffer(hmac.digest('base64'), 'base64');
//
// Create continue message
var cmd = {
saslContinue: 1
, conversationId: r.result.conversationId
, payload: new Binary(new Buffer(clientFinal))
}
//
// Execute sasl continue
server.command(f("%s.$cmd", db)
, cmd, { connection: connection }, function(err, r) {
if(r && r.result.done == false) {
var cmd = {
saslContinue: 1
, conversationId: r.result.conversationId
, payload: new Buffer(0)
}
server.command(f("%s.$cmd", db)
, cmd, { connection: connection }, function(err, r) {
handleEnd(err, r);
});
} else {
handleEnd(err, r);
}
});
});
}
// Get the connection
executeScram(connections.shift());
}
}
// Add to store only if it does not exist
var addAuthSession = function(authStore, session) {
var found = false;
for(var i = 0; i < authStore.length; i++) {
if(authStore[i].equal(session)) {
found = true;
break;
}
}
if(!found) authStore.push(session);
}
/**
* Re authenticate pool
* @method
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
* @param {Pool} pool Connection pool for this topology
* @param {authResultCallback} callback The callback to return the result from the authentication
* @return {object}
*/
ScramSHA1.prototype.reauthenticate = function(server, pool, callback) {
var count = this.authStore.length;
if(count == 0) return callback(null, null);
// Iterate over all the auth details stored
for(var i = 0; i < this.authStore.length; i++) {
this.auth(server, pool, this.authStore[i].db, this.authStore[i].username, this.authStore[i].password, function(err, r) {
count = count - 1;
// Done re-authenticating
if(count == 0) {
callback(null, null);
}
});
}
}
module.exports = ScramSHA1;

234
node_modules/mongodb-core/lib/auth/sspi.js generated vendored Normal file
View File

@@ -0,0 +1,234 @@
"use strict";
var f = require('util').format
, crypto = require('crypto')
, MongoError = require('../error');
var AuthSession = function(db, username, password, options) {
this.db = db;
this.username = username;
this.password = password;
this.options = options;
}
AuthSession.prototype.equal = function(session) {
return session.db == this.db
&& session.username == this.username
&& session.password == this.password;
}
// Kerberos class
var Kerberos = null;
var MongoAuthProcess = null;
// Try to grab the Kerberos class
try {
Kerberos = require('kerberos').Kerberos
// Authentication process for Mongo
MongoAuthProcess = require('kerberos').processes.MongoAuthProcess
} catch(err) {}
/**
* Creates a new SSPI authentication mechanism
* @class
* @return {SSPI} A cursor instance
*/
var SSPI = function() {
this.authStore = [];
}
/**
* Authenticate
* @method
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
* @param {Pool} pool Connection pool for this topology
* @param {string} db Name of the database
* @param {string} username Username
* @param {string} password Password
* @param {authResultCallback} callback The callback to return the result from the authentication
* @return {object}
*/
SSPI.prototype.auth = function(server, pool, db, username, password, options, callback) {
var self = this;
// We don't have the Kerberos library
if(Kerberos == null) return callback(new Error("Kerberos library is not installed"));
var gssapiServiceName = options['gssapiServiceName'] || 'mongodb';
// Get all the connections
var connections = pool.getAll();
// Total connections
var count = connections.length;
if(count == 0) return callback(null, null);
// Valid connections
var numberOfValidConnections = 0;
var credentialsValid = false;
var errorObject = null;
// For each connection we need to authenticate
while(connections.length > 0) {
// Execute MongoCR
var execute = function(connection) {
// Start Auth process for a connection
SSIPAuthenticate(username, password, gssapiServiceName, server, connection, function(err, r) {
// Adjust count
count = count - 1;
// If we have an error
if(err) {
errorObject = err;
} else if(r && typeof r == 'object' && r.result['$err']) {
errorObject = r.result;
} else if(r && typeof r == 'object' && r.result['errmsg']) {
errorObject = r.result;
} else {
credentialsValid = true;
numberOfValidConnections = numberOfValidConnections + 1;
}
// We have authenticated all connections
if(count == 0 && numberOfValidConnections > 0) {
// Store the auth details
addAuthSession(self.authStore, new AuthSession(db, username, password, options));
// Return correct authentication
callback(null, true);
} else if(count == 0) {
if(errorObject == null) errorObject = new MongoError(f("failed to authenticate using mongocr"));
callback(errorObject, false);
}
});
}
// Get the connection
execute(connections.shift());
}
}
var SSIPAuthenticate = function(username, password, gssapiServiceName, server, connection, callback) {
// Build Authentication command to send to MongoDB
var command = {
saslStart: 1
, mechanism: 'GSSAPI'
, payload: ''
, autoAuthorize: 1
};
// Create authenticator
var mongo_auth_process = new MongoAuthProcess(connection.host, connection.port, gssapiServiceName);
// Execute first sasl step
server.command("$external.$cmd"
, command
, { connection: connection }, function(err, r) {
if(err) return callback(err, false);
var doc = r.result;
mongo_auth_process.init(username, password, function(err) {
if(err) return callback(err);
mongo_auth_process.transition(doc.payload, function(err, payload) {
if(err) return callback(err);
// Perform the next step against mongod
var command = {
saslContinue: 1
, conversationId: doc.conversationId
, payload: payload
};
// Execute the command
server.command("$external.$cmd"
, command
, { connection: connection }, function(err, r) {
if(err) return callback(err, false);
var doc = r.result;
mongo_auth_process.transition(doc.payload, function(err, payload) {
if(err) return callback(err);
// Perform the next step against mongod
var command = {
saslContinue: 1
, conversationId: doc.conversationId
, payload: payload
};
// Execute the command
server.command("$external.$cmd"
, command
, { connection: connection }, function(err, r) {
if(err) return callback(err, false);
var doc = r.result;
mongo_auth_process.transition(doc.payload, function(err, payload) {
// Perform the next step against mongod
var command = {
saslContinue: 1
, conversationId: doc.conversationId
, payload: payload
};
// Execute the command
server.command("$external.$cmd"
, command
, { connection: connection }, function(err, r) {
if(err) return callback(err, false);
var doc = r.result;
if(doc.done) return callback(null, true);
callback(new Error("Authentication failed"), false);
});
});
});
});
});
});
});
});
}
// Add to store only if it does not exist
var addAuthSession = function(authStore, session) {
var found = false;
for(var i = 0; i < authStore.length; i++) {
if(authStore[i].equal(session)) {
found = true;
break;
}
}
if(!found) authStore.push(session);
}
/**
* Re authenticate pool
* @method
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
* @param {Pool} pool Connection pool for this topology
* @param {authResultCallback} callback The callback to return the result from the authentication
* @return {object}
*/
SSPI.prototype.reauthenticate = function(server, pool, callback) {
var count = this.authStore.length;
if(count == 0) return callback(null, null);
// Iterate over all the auth details stored
for(var i = 0; i < this.authStore.length; i++) {
this.auth(server, pool, this.authStore[i].db, this.authStore[i].username, this.authStore[i].password, this.authStore[i].options, function(err, r) {
count = count - 1;
// Done re-authenticating
if(count == 0) {
callback(null, null);
}
});
}
}
/**
* This is a result from a authentication strategy
*
* @callback authResultCallback
* @param {error} error An error object. Set to null if no error present
* @param {boolean} result The result of the authentication process
*/
module.exports = SSPI;

145
node_modules/mongodb-core/lib/auth/x509.js generated vendored Normal file
View File

@@ -0,0 +1,145 @@
"use strict";
var f = require('util').format
, crypto = require('crypto')
, MongoError = require('../error');
var AuthSession = function(db, username, password) {
this.db = db;
this.username = username;
this.password = password;
}
AuthSession.prototype.equal = function(session) {
return session.db == this.db
&& session.username == this.username
&& session.password == this.password;
}
/**
* Creates a new X509 authentication mechanism
* @class
* @return {X509} A cursor instance
*/
var X509 = function() {
this.authStore = [];
}
/**
* Authenticate
* @method
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
* @param {Pool} pool Connection pool for this topology
* @param {string} db Name of the database
* @param {string} username Username
* @param {string} password Password
* @param {authResultCallback} callback The callback to return the result from the authentication
* @return {object}
*/
X509.prototype.auth = function(server, pool, db, username, password, callback) {
var self = this;
// Get all the connections
var connections = pool.getAll();
// Total connections
var count = connections.length;
if(count == 0) return callback(null, null);
// Valid connections
var numberOfValidConnections = 0;
var credentialsValid = false;
var errorObject = null;
// For each connection we need to authenticate
while(connections.length > 0) {
// Execute MongoCR
var execute = function(connection) {
// Let's start the sasl process
var command = {
authenticate: 1
, mechanism: 'MONGODB-X509'
, user: username
};
// Let's start the process
server.command("$external.$cmd"
, command
, { connection: connection }, function(err, r) {
// Adjust count
count = count - 1;
// If we have an error
if(err) {
errorObject = err;
} else if(r.result['$err']) {
errorObject = r.result;
} else if(r.result['errmsg']) {
errorObject = r.result;
} else {
credentialsValid = true;
numberOfValidConnections = numberOfValidConnections + 1;
}
// We have authenticated all connections
if(count == 0 && numberOfValidConnections > 0) {
// Store the auth details
addAuthSession(self.authStore, new AuthSession(db, username, password));
// Return correct authentication
callback(null, true);
} else if(count == 0) {
if(errorObject == null) errorObject = new MongoError(f("failed to authenticate using mongocr"));
callback(errorObject, false);
}
});
}
// Get the connection
execute(connections.shift());
}
}
// Add to store only if it does not exist
var addAuthSession = function(authStore, session) {
var found = false;
for(var i = 0; i < authStore.length; i++) {
if(authStore[i].equal(session)) {
found = true;
break;
}
}
if(!found) authStore.push(session);
}
/**
* Re authenticate pool
* @method
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
* @param {Pool} pool Connection pool for this topology
* @param {authResultCallback} callback The callback to return the result from the authentication
* @return {object}
*/
X509.prototype.reauthenticate = function(server, pool, callback) {
var count = this.authStore.length;
if(count == 0) return callback(null, null);
// Iterate over all the auth details stored
for(var i = 0; i < this.authStore.length; i++) {
this.auth(server, pool, this.authStore[i].db, this.authStore[i].username, this.authStore[i].password, function(err, r) {
count = count - 1;
// Done re-authenticating
if(count == 0) {
callback(null, null);
}
});
}
}
/**
* This is a result from a authentication strategy
*
* @callback authResultCallback
* @param {error} error An error object. Set to null if no error present
* @param {boolean} result The result of the authentication process
*/
module.exports = X509;

539
node_modules/mongodb-core/lib/connection/commands.js generated vendored Normal file
View File

@@ -0,0 +1,539 @@
"use strict";
var f = require('util').format
, Long = require('bson').Long
, setProperty = require('./utils').setProperty
, getProperty = require('./utils').getProperty
, getSingleProperty = require('./utils').getSingleProperty;
// Incrementing request id
var _requestId = 0;
// Wire command operation ids
var OP_QUERY = 2004;
var OP_GETMORE = 2005;
var OP_KILL_CURSORS = 2007;
// Query flags
var OPTS_NONE = 0;
var OPTS_TAILABLE_CURSOR = 2;
var OPTS_SLAVE = 4;
var OPTS_OPLOG_REPLAY = 8;
var OPTS_NO_CURSOR_TIMEOUT = 16;
var OPTS_AWAIT_DATA = 32;
var OPTS_EXHAUST = 64;
var OPTS_PARTIAL = 128;
// Response flags
var CURSOR_NOT_FOUND = 0;
var QUERY_FAILURE = 2;
var SHARD_CONFIG_STALE = 4;
var AWAIT_CAPABLE = 8;
/**************************************************************
* QUERY
**************************************************************/
var Query = function(bson, ns, query, options) {
var self = this;
// Basic options needed to be passed in
if(ns == null) throw new Error("ns must be specified for query");
if(query == null) throw new Error("query must be specified for query");
// Validate that we are not passing 0x00 in the colletion name
if(!!~ns.indexOf("\x00")) {
throw new Error("namespace cannot contain a null character");
}
// Basic options
this.bson = bson;
this.ns = ns;
this.query = query;
// Ensure empty options
this.options = options || {};
// Additional options
this.numberToSkip = options.numberToSkip || 0;
this.numberToReturn = options.numberToReturn || 0;
this.returnFieldSelector = options.returnFieldSelector || null;
this.requestId = _requestId++;
// Serialization option
this.serializeFunctions = typeof options.serializeFunctions == 'boolean' ? options.serializeFunctions : false;
this.ignoreUndefined = typeof options.ignoreUndefined == 'boolean' ? options.ignoreUndefined : false;
this.maxBsonSize = options.maxBsonSize || 1024 * 1024 * 16;
this.checkKeys = typeof options.checkKeys == 'boolean' ? options.checkKeys : true;
this.batchSize = self.numberToReturn;
// Flags
this.tailable = false;
this.slaveOk = false;
this.oplogReplay = false;
this.noCursorTimeout = false;
this.awaitData = false;
this.exhaust = false;
this.partial = false;
}
//
// Assign a new request Id
Query.prototype.incRequestId = function() {
this.requestId = _requestId++;
}
//
// Assign a new request Id
Query.nextRequestId = function() {
return _requestId + 1;
}
//
// Uses a single allocated buffer for the process, avoiding multiple memory allocations
Query.prototype.toBin = function() {
var self = this;
var buffers = [];
var projection = null;
// Set up the flags
var flags = 0;
if(this.tailable) {
flags |= OPTS_TAILABLE_CURSOR;
}
if(this.slaveOk) {
flags |= OPTS_SLAVE;
}
if(this.oplogReplay) {
flags |= OPTS_OPLOG_REPLAY;
}
if(this.noCursorTimeout) {
flags |= OPTS_NO_CURSOR_TIMEOUT;
}
if(this.awaitData) {
flags |= OPTS_AWAIT_DATA;
}
if(this.exhaust) {
flags |= OPTS_EXHAUST;
}
if(this.partial) {
flags |= OPTS_PARTIAL;
}
// If batchSize is different to self.numberToReturn
if(self.batchSize != self.numberToReturn) self.numberToReturn = self.batchSize;
// Allocate write protocol header buffer
var header = new Buffer(
4 * 4 // Header
+ 4 // Flags
+ Buffer.byteLength(self.ns) + 1 // namespace
+ 4 // numberToSkip
+ 4 // numberToReturn
);
// Add header to buffers
buffers.push(header);
// Serialize the query
var query = self.bson.serialize(this.query
, this.checkKeys
, true
, this.serializeFunctions
, 0, this.ignoreUndefined);
// Add query document
buffers.push(query);
if(self.returnFieldSelector && Object.keys(self.returnFieldSelector).length > 0) {
// Serialize the projection document
projection = self.bson.serialize(this.returnFieldSelector, this.checkKeys, true, this.serializeFunctions, this.ignoreUndefined);
// Add projection document
buffers.push(projection);
}
// Total message size
var totalLength = header.length + query.length + (projection ? projection.length : 0);
// Set up the index
var index = 4;
// Write total document length
header[3] = (totalLength >> 24) & 0xff;
header[2] = (totalLength >> 16) & 0xff;
header[1] = (totalLength >> 8) & 0xff;
header[0] = (totalLength) & 0xff;
// Write header information requestId
header[index + 3] = (this.requestId >> 24) & 0xff;
header[index + 2] = (this.requestId >> 16) & 0xff;
header[index + 1] = (this.requestId >> 8) & 0xff;
header[index] = (this.requestId) & 0xff;
index = index + 4;
// Write header information responseTo
header[index + 3] = (0 >> 24) & 0xff;
header[index + 2] = (0 >> 16) & 0xff;
header[index + 1] = (0 >> 8) & 0xff;
header[index] = (0) & 0xff;
index = index + 4;
// Write header information OP_QUERY
header[index + 3] = (OP_QUERY >> 24) & 0xff;
header[index + 2] = (OP_QUERY >> 16) & 0xff;
header[index + 1] = (OP_QUERY >> 8) & 0xff;
header[index] = (OP_QUERY) & 0xff;
index = index + 4;
// Write header information flags
header[index + 3] = (flags >> 24) & 0xff;
header[index + 2] = (flags >> 16) & 0xff;
header[index + 1] = (flags >> 8) & 0xff;
header[index] = (flags) & 0xff;
index = index + 4;
// Write collection name
index = index + header.write(this.ns, index, 'utf8') + 1;
header[index - 1] = 0;
// Write header information flags numberToSkip
header[index + 3] = (this.numberToSkip >> 24) & 0xff;
header[index + 2] = (this.numberToSkip >> 16) & 0xff;
header[index + 1] = (this.numberToSkip >> 8) & 0xff;
header[index] = (this.numberToSkip) & 0xff;
index = index + 4;
// Write header information flags numberToReturn
header[index + 3] = (this.numberToReturn >> 24) & 0xff;
header[index + 2] = (this.numberToReturn >> 16) & 0xff;
header[index + 1] = (this.numberToReturn >> 8) & 0xff;
header[index] = (this.numberToReturn) & 0xff;
index = index + 4;
// Return the buffers
return buffers;
}
Query.getRequestId = function() {
return ++_requestId;
}
/**************************************************************
* GETMORE
**************************************************************/
var GetMore = function(bson, ns, cursorId, opts) {
opts = opts || {};
this.numberToReturn = opts.numberToReturn || 0;
this.requestId = _requestId++;
this.bson = bson;
this.ns = ns;
this.cursorId = cursorId;
}
//
// Uses a single allocated buffer for the process, avoiding multiple memory allocations
GetMore.prototype.toBin = function() {
var length = 4 + Buffer.byteLength(this.ns) + 1 + 4 + 8 + (4 * 4);
// Create command buffer
var index = 0;
// Allocate buffer
var _buffer = new Buffer(length);
// Write header information
// index = write32bit(index, _buffer, length);
_buffer[index + 3] = (length >> 24) & 0xff;
_buffer[index + 2] = (length >> 16) & 0xff;
_buffer[index + 1] = (length >> 8) & 0xff;
_buffer[index] = (length) & 0xff;
index = index + 4;
// index = write32bit(index, _buffer, requestId);
_buffer[index + 3] = (this.requestId >> 24) & 0xff;
_buffer[index + 2] = (this.requestId >> 16) & 0xff;
_buffer[index + 1] = (this.requestId >> 8) & 0xff;
_buffer[index] = (this.requestId) & 0xff;
index = index + 4;
// index = write32bit(index, _buffer, 0);
_buffer[index + 3] = (0 >> 24) & 0xff;
_buffer[index + 2] = (0 >> 16) & 0xff;
_buffer[index + 1] = (0 >> 8) & 0xff;
_buffer[index] = (0) & 0xff;
index = index + 4;
// index = write32bit(index, _buffer, OP_GETMORE);
_buffer[index + 3] = (OP_GETMORE >> 24) & 0xff;
_buffer[index + 2] = (OP_GETMORE >> 16) & 0xff;
_buffer[index + 1] = (OP_GETMORE >> 8) & 0xff;
_buffer[index] = (OP_GETMORE) & 0xff;
index = index + 4;
// index = write32bit(index, _buffer, 0);
_buffer[index + 3] = (0 >> 24) & 0xff;
_buffer[index + 2] = (0 >> 16) & 0xff;
_buffer[index + 1] = (0 >> 8) & 0xff;
_buffer[index] = (0) & 0xff;
index = index + 4;
// Write collection name
index = index + _buffer.write(this.ns, index, 'utf8') + 1;
_buffer[index - 1] = 0;
// Write batch size
// index = write32bit(index, _buffer, numberToReturn);
_buffer[index + 3] = (this.numberToReturn >> 24) & 0xff;
_buffer[index + 2] = (this.numberToReturn >> 16) & 0xff;
_buffer[index + 1] = (this.numberToReturn >> 8) & 0xff;
_buffer[index] = (this.numberToReturn) & 0xff;
index = index + 4;
// Write cursor id
// index = write32bit(index, _buffer, cursorId.getLowBits());
_buffer[index + 3] = (this.cursorId.getLowBits() >> 24) & 0xff;
_buffer[index + 2] = (this.cursorId.getLowBits() >> 16) & 0xff;
_buffer[index + 1] = (this.cursorId.getLowBits() >> 8) & 0xff;
_buffer[index] = (this.cursorId.getLowBits()) & 0xff;
index = index + 4;
// index = write32bit(index, _buffer, cursorId.getHighBits());
_buffer[index + 3] = (this.cursorId.getHighBits() >> 24) & 0xff;
_buffer[index + 2] = (this.cursorId.getHighBits() >> 16) & 0xff;
_buffer[index + 1] = (this.cursorId.getHighBits() >> 8) & 0xff;
_buffer[index] = (this.cursorId.getHighBits()) & 0xff;
index = index + 4;
// Return buffer
return _buffer;
}
/**************************************************************
* KILLCURSOR
**************************************************************/
var KillCursor = function(bson, cursorIds) {
this.requestId = _requestId++;
this.cursorIds = cursorIds;
}
//
// Uses a single allocated buffer for the process, avoiding multiple memory allocations
KillCursor.prototype.toBin = function() {
var length = 4 + 4 + (4 * 4) + (this.cursorIds.length * 8);
// Create command buffer
var index = 0;
var _buffer = new Buffer(length);
// Write header information
// index = write32bit(index, _buffer, length);
_buffer[index + 3] = (length >> 24) & 0xff;
_buffer[index + 2] = (length >> 16) & 0xff;
_buffer[index + 1] = (length >> 8) & 0xff;
_buffer[index] = (length) & 0xff;
index = index + 4;
// index = write32bit(index, _buffer, requestId);
_buffer[index + 3] = (this.requestId >> 24) & 0xff;
_buffer[index + 2] = (this.requestId >> 16) & 0xff;
_buffer[index + 1] = (this.requestId >> 8) & 0xff;
_buffer[index] = (this.requestId) & 0xff;
index = index + 4;
// index = write32bit(index, _buffer, 0);
_buffer[index + 3] = (0 >> 24) & 0xff;
_buffer[index + 2] = (0 >> 16) & 0xff;
_buffer[index + 1] = (0 >> 8) & 0xff;
_buffer[index] = (0) & 0xff;
index = index + 4;
// index = write32bit(index, _buffer, OP_KILL_CURSORS);
_buffer[index + 3] = (OP_KILL_CURSORS >> 24) & 0xff;
_buffer[index + 2] = (OP_KILL_CURSORS >> 16) & 0xff;
_buffer[index + 1] = (OP_KILL_CURSORS >> 8) & 0xff;
_buffer[index] = (OP_KILL_CURSORS) & 0xff;
index = index + 4;
// index = write32bit(index, _buffer, 0);
_buffer[index + 3] = (0 >> 24) & 0xff;
_buffer[index + 2] = (0 >> 16) & 0xff;
_buffer[index + 1] = (0 >> 8) & 0xff;
_buffer[index] = (0) & 0xff;
index = index + 4;
// Write batch size
// index = write32bit(index, _buffer, this.cursorIds.length);
_buffer[index + 3] = (this.cursorIds.length >> 24) & 0xff;
_buffer[index + 2] = (this.cursorIds.length >> 16) & 0xff;
_buffer[index + 1] = (this.cursorIds.length >> 8) & 0xff;
_buffer[index] = (this.cursorIds.length) & 0xff;
index = index + 4;
// Write all the cursor ids into the array
for(var i = 0; i < this.cursorIds.length; i++) {
// Write cursor id
// index = write32bit(index, _buffer, cursorIds[i].getLowBits());
_buffer[index + 3] = (this.cursorIds[i].getLowBits() >> 24) & 0xff;
_buffer[index + 2] = (this.cursorIds[i].getLowBits() >> 16) & 0xff;
_buffer[index + 1] = (this.cursorIds[i].getLowBits() >> 8) & 0xff;
_buffer[index] = (this.cursorIds[i].getLowBits()) & 0xff;
index = index + 4;
// index = write32bit(index, _buffer, cursorIds[i].getHighBits());
_buffer[index + 3] = (this.cursorIds[i].getHighBits() >> 24) & 0xff;
_buffer[index + 2] = (this.cursorIds[i].getHighBits() >> 16) & 0xff;
_buffer[index + 1] = (this.cursorIds[i].getHighBits() >> 8) & 0xff;
_buffer[index] = (this.cursorIds[i].getHighBits()) & 0xff;
index = index + 4;
}
// Return buffer
return _buffer;
}
var Response = function(bson, data, opts) {
opts = opts || {promoteLongs: true};
this.parsed = false;
//
// Parse Header
//
this.index = 0;
this.raw = data;
this.data = data;
this.bson = bson;
this.opts = opts;
// Read the message length
this.length = data[this.index] | data[this.index + 1] << 8 | data[this.index + 2] << 16 | data[this.index + 3] << 24;
this.index = this.index + 4;
// Fetch the request id for this reply
this.requestId = data[this.index] | data[this.index + 1] << 8 | data[this.index + 2] << 16 | data[this.index + 3] << 24;
this.index = this.index + 4;
// Fetch the id of the request that triggered the response
this.responseTo = data[this.index] | data[this.index + 1] << 8 | data[this.index + 2] << 16 | data[this.index + 3] << 24;
this.index = this.index + 4;
// Skip op-code field
this.index = this.index + 4;
// Unpack flags
this.responseFlags = data[this.index] | data[this.index + 1] << 8 | data[this.index + 2] << 16 | data[this.index + 3] << 24;
this.index = this.index + 4;
// Unpack the cursor
var lowBits = data[this.index] | data[this.index + 1] << 8 | data[this.index + 2] << 16 | data[this.index + 3] << 24;
this.index = this.index + 4;
var highBits = data[this.index] | data[this.index + 1] << 8 | data[this.index + 2] << 16 | data[this.index + 3] << 24;
this.index = this.index + 4;
// Create long object
this.cursorId = new Long(lowBits, highBits);
// Unpack the starting from
this.startingFrom = data[this.index] | data[this.index + 1] << 8 | data[this.index + 2] << 16 | data[this.index + 3] << 24;
this.index = this.index + 4;
// Unpack the number of objects returned
this.numberReturned = data[this.index] | data[this.index + 1] << 8 | data[this.index + 2] << 16 | data[this.index + 3] << 24;
this.index = this.index + 4;
// Preallocate document array
this.documents = new Array(this.numberReturned);
// Flag values
this.cursorNotFound = (this.responseFlags & CURSOR_NOT_FOUND) != 0;
this.queryFailure = (this.responseFlags & QUERY_FAILURE) != 0;
this.shardConfigStale = (this.responseFlags & SHARD_CONFIG_STALE) != 0;
this.awaitCapable = (this.responseFlags & AWAIT_CAPABLE) != 0;
this.promoteLongs = typeof opts.promoteLongs == 'boolean' ? opts.promoteLongs : true;
}
Response.prototype.isParsed = function() {
return this.parsed;
}
// Validation buffers
var firstBatch = new Buffer('firstBatch', 'utf8');
var nextBatch = new Buffer('nextBatch', 'utf8');
var cursorId = new Buffer('id', 'utf8').toString('hex');
var documentBuffers = {
firstBatch: firstBatch.toString('hex'),
nextBatch: nextBatch.toString('hex')
};
Response.prototype.parse = function(options) {
// Don't parse again if not needed
if(this.parsed) return;
options = options || {};
// Allow the return of raw documents instead of parsing
var raw = options.raw || false;
var documentsReturnedIn = options.documentsReturnedIn || null;
//
// Single document and documentsReturnedIn set
//
if(this.numberReturned == 1 && documentsReturnedIn != null && raw) {
// Calculate the bson size
var bsonSize = this.data[this.index] | this.data[this.index + 1] << 8 | this.data[this.index + 2] << 16 | this.data[this.index + 3] << 24;
// Slice out the buffer containing the command result document
var document = this.data.slice(this.index, this.index + bsonSize);
// Set up field we wish to keep as raw
var fieldsAsRaw = {}
fieldsAsRaw[documentsReturnedIn] = true;
// Set up the options
var _options = {promoteLongs: this.opts.promoteLongs, fieldsAsRaw: fieldsAsRaw};
// Deserialize but keep the array of documents in non-parsed form
var doc = this.bson.deserialize(document, _options);
// Get the documents
this.documents = doc.cursor[documentsReturnedIn];
this.numberReturned = this.documents.length;
// Ensure we have a Long valie cursor id
this.cursorId = typeof doc.cursor.id == 'number'
? Long.fromNumber(doc.cursor.id)
: doc.cursor.id;
// Adjust the index
this.index = this.index + bsonSize;
// Set as parsed
this.parsed = true
return;
}
//
// Parse Body
//
for(var i = 0; i < this.numberReturned; i++) {
var bsonSize = this.data[this.index] | this.data[this.index + 1] << 8 | this.data[this.index + 2] << 16 | this.data[this.index + 3] << 24;
// Parse options
var _options = {promoteLongs: this.opts.promoteLongs};
// If we have raw results specified slice the return document
if(raw) {
this.documents[i] = this.data.slice(this.index, this.index + bsonSize);
} else {
this.documents[i] = this.bson.deserialize(this.data.slice(this.index, this.index + bsonSize), _options);
}
// Adjust the index
this.index = this.index + bsonSize;
}
// Set parsed
this.parsed = true;
}
module.exports = {
Query: Query
, GetMore: GetMore
, Response: Response
, KillCursor: KillCursor
}

488
node_modules/mongodb-core/lib/connection/connection.js generated vendored Normal file
View File

@@ -0,0 +1,488 @@
"use strict";
var inherits = require('util').inherits
, EventEmitter = require('events').EventEmitter
, net = require('net')
, tls = require('tls')
, f = require('util').format
, getSingleProperty = require('./utils').getSingleProperty
, debugOptions = require('./utils').debugOptions
, Response = require('./commands').Response
, MongoError = require('../error')
, Logger = require('./logger');
var _id = 0;
var debugFields = ['host', 'port', 'size', 'keepAlive', 'keepAliveInitialDelay', 'noDelay'
, 'connectionTimeout', 'socketTimeout', 'singleBufferSerializtion', 'ssl', 'ca', 'cert'
, 'rejectUnauthorized', 'promoteLongs', 'checkServerIdentity'];
/**
* Creates a new Connection instance
* @class
* @param {string} options.host The server host
* @param {number} options.port The server port
* @param {number} [options.size=5] Server connection pool size
* @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled
* @param {number} [options.keepAliveInitialDelay=0] Initial delay before TCP keep alive enabled
* @param {boolean} [options.noDelay=true] TCP Connection no delay
* @param {number} [options.connectionTimeout=0] TCP Connection timeout setting
* @param {number} [options.socketTimeout=0] TCP Socket timeout setting
* @param {boolean} [options.singleBufferSerializtion=true] Serialize into single buffer, trade of peak memory for serialization speed
* @param {boolean} [options.ssl=false] Use SSL for connection
* @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function.
* @param {Buffer} [options.ca] SSL Certificate store binary buffer
* @param {Buffer} [options.cert] SSL Certificate binary buffer
* @param {Buffer} [options.key] SSL Key file binary buffer
* @param {string} [options.passphrase] SSL Certificate pass phrase
* @param {boolean} [options.rejectUnauthorized=true] Reject unauthorized server certificates
* @param {boolean} [options.promoteLongs=true] Convert Long values from the db into Numbers if they fit into 53 bits
* @fires Connection#connect
* @fires Connection#close
* @fires Connection#error
* @fires Connection#timeout
* @fires Connection#parseError
* @return {Connection} A cursor instance
*/
var Connection = function(options) {
// Add event listener
EventEmitter.call(this);
// Set empty if no options passed
this.options = options || {};
// Identification information
this.id = _id++;
// Logger instance
this.logger = Logger('Connection', options);
// No bson parser passed in
if(!options.bson) throw new Error("must pass in valid bson parser");
// Get bson parser
this.bson = options.bson;
// Grouping tag used for debugging purposes
this.tag = options.tag;
// Message handler
this.messageHandler = options.messageHandler;
// Max BSON message size
this.maxBsonMessageSize = options.maxBsonMessageSize || (1024 * 1024 * 16 * 4);
// Debug information
if(this.logger.isDebug()) this.logger.debug(f('creating connection %s with options [%s]', this.id, JSON.stringify(debugOptions(debugFields, options))));
// Default options
this.port = options.port || 27017;
this.host = options.host || 'localhost';
this.keepAlive = typeof options.keepAlive == 'boolean' ? options.keepAlive : true;
this.keepAliveInitialDelay = options.keepAliveInitialDelay || 0;
this.noDelay = typeof options.noDelay == 'boolean' ? options.noDelay : true;
this.connectionTimeout = options.connectionTimeout || 0;
this.socketTimeout = options.socketTimeout || 0;
// If connection was destroyed
this.destroyed = false;
// Check if we have a domain socket
this.domainSocket = this.host.indexOf('\/') != -1;
// Serialize commands using function
this.singleBufferSerializtion = typeof options.singleBufferSerializtion == 'boolean' ? options.singleBufferSerializtion : true;
this.serializationFunction = this.singleBufferSerializtion ? 'toBinUnified' : 'toBin';
// SSL options
this.ca = options.ca || null;
this.cert = options.cert || null;
this.key = options.key || null;
this.passphrase = options.passphrase || null;
this.ssl = typeof options.ssl == 'boolean' ? options.ssl : false;
this.rejectUnauthorized = typeof options.rejectUnauthorized == 'boolean' ? options.rejectUnauthorized : true;
this.checkServerIdentity = typeof options.checkServerIdentity == 'boolean'
|| typeof options.checkServerIdentity == 'function' ? options.checkServerIdentity : true;
// If ssl not enabled
if(!this.ssl) this.rejectUnauthorized = false;
// Response options
this.responseOptions = {
promoteLongs: typeof options.promoteLongs == 'boolean' ? options.promoteLongs : true
}
// Flushing
this.flushing = false;
this.queue = [];
// Internal state
this.connection = null;
this.writeStream = null;
}
inherits(Connection, EventEmitter);
//
// Connection handlers
var errorHandler = function(self) {
return function(err) {
// Debug information
if(self.logger.isDebug()) self.logger.debug(f('connection %s for [%s:%s] errored out with [%s]', self.id, self.host, self.port, JSON.stringify(err)));
// Emit the error
if(self.listeners('error').length > 0) self.emit("error", MongoError.create(err), self);
}
}
var timeoutHandler = function(self) {
return function() {
// Debug information
if(self.logger.isDebug()) self.logger.debug(f('connection %s for [%s:%s] timed out', self.id, self.host, self.port));
// Emit timeout error
self.emit("timeout"
, MongoError.create(f("connection %s to %s:%s timed out", self.id, self.host, self.port))
, self);
}
}
var closeHandler = function(self) {
return function(hadError) {
// Debug information
if(self.logger.isDebug()) self.logger.debug(f('connection %s with for [%s:%s] closed', self.id, self.host, self.port));
// Emit close event
if(!hadError) {
self.emit("close"
, MongoError.create(f("connection %s to %s:%s closed", self.id, self.host, self.port))
, self);
}
}
}
var dataHandler = function(self) {
return function(data) {
// Parse until we are done with the data
while(data.length > 0) {
// If we still have bytes to read on the current message
if(self.bytesRead > 0 && self.sizeOfMessage > 0) {
// Calculate the amount of remaining bytes
var remainingBytesToRead = self.sizeOfMessage - self.bytesRead;
// Check if the current chunk contains the rest of the message
if(remainingBytesToRead > data.length) {
// Copy the new data into the exiting buffer (should have been allocated when we know the message size)
data.copy(self.buffer, self.bytesRead);
// Adjust the number of bytes read so it point to the correct index in the buffer
self.bytesRead = self.bytesRead + data.length;
// Reset state of buffer
data = new Buffer(0);
} else {
// Copy the missing part of the data into our current buffer
data.copy(self.buffer, self.bytesRead, 0, remainingBytesToRead);
// Slice the overflow into a new buffer that we will then re-parse
data = data.slice(remainingBytesToRead);
// Emit current complete message
try {
var emitBuffer = self.buffer;
// Reset state of buffer
self.buffer = null;
self.sizeOfMessage = 0;
self.bytesRead = 0;
self.stubBuffer = null;
// Emit the buffer
self.messageHandler(new Response(self.bson, emitBuffer, self.responseOptions), self);
} catch(err) {
var errorObject = {err:"socketHandler", trace:err, bin:self.buffer, parseState:{
sizeOfMessage:self.sizeOfMessage,
bytesRead:self.bytesRead,
stubBuffer:self.stubBuffer}};
// We got a parse Error fire it off then keep going
self.emit("parseError", errorObject, self);
}
}
} else {
// Stub buffer is kept in case we don't get enough bytes to determine the
// size of the message (< 4 bytes)
if(self.stubBuffer != null && self.stubBuffer.length > 0) {
// If we have enough bytes to determine the message size let's do it
if(self.stubBuffer.length + data.length > 4) {
// Prepad the data
var newData = new Buffer(self.stubBuffer.length + data.length);
self.stubBuffer.copy(newData, 0);
data.copy(newData, self.stubBuffer.length);
// Reassign for parsing
data = newData;
// Reset state of buffer
self.buffer = null;
self.sizeOfMessage = 0;
self.bytesRead = 0;
self.stubBuffer = null;
} else {
// Add the the bytes to the stub buffer
var newStubBuffer = new Buffer(self.stubBuffer.length + data.length);
// Copy existing stub buffer
self.stubBuffer.copy(newStubBuffer, 0);
// Copy missing part of the data
data.copy(newStubBuffer, self.stubBuffer.length);
// Exit parsing loop
data = new Buffer(0);
}
} else {
if(data.length > 4) {
// Retrieve the message size
// var sizeOfMessage = data.readUInt32LE(0);
var sizeOfMessage = data[0] | data[1] << 8 | data[2] << 16 | data[3] << 24;
// If we have a negative sizeOfMessage emit error and return
if(sizeOfMessage < 0 || sizeOfMessage > self.maxBsonMessageSize) {
var errorObject = {err:"socketHandler", trace:'', bin:self.buffer, parseState:{
sizeOfMessage: sizeOfMessage,
bytesRead: self.bytesRead,
stubBuffer: self.stubBuffer}};
// We got a parse Error fire it off then keep going
self.emit("parseError", errorObject, self);
return;
}
// Ensure that the size of message is larger than 0 and less than the max allowed
if(sizeOfMessage > 4 && sizeOfMessage < self.maxBsonMessageSize && sizeOfMessage > data.length) {
self.buffer = new Buffer(sizeOfMessage);
// Copy all the data into the buffer
data.copy(self.buffer, 0);
// Update bytes read
self.bytesRead = data.length;
// Update sizeOfMessage
self.sizeOfMessage = sizeOfMessage;
// Ensure stub buffer is null
self.stubBuffer = null;
// Exit parsing loop
data = new Buffer(0);
} else if(sizeOfMessage > 4 && sizeOfMessage < self.maxBsonMessageSize && sizeOfMessage == data.length) {
try {
var emitBuffer = data;
// Reset state of buffer
self.buffer = null;
self.sizeOfMessage = 0;
self.bytesRead = 0;
self.stubBuffer = null;
// Exit parsing loop
data = new Buffer(0);
// Emit the message
self.messageHandler(new Response(self.bson, emitBuffer, self.responseOptions), self);
} catch (err) {
var errorObject = {err:"socketHandler", trace:err, bin:self.buffer, parseState:{
sizeOfMessage:self.sizeOfMessage,
bytesRead:self.bytesRead,
stubBuffer:self.stubBuffer}};
// We got a parse Error fire it off then keep going
self.emit("parseError", errorObject, self);
}
} else if(sizeOfMessage <= 4 || sizeOfMessage > self.maxBsonMessageSize) {
var errorObject = {err:"socketHandler", trace:null, bin:data, parseState:{
sizeOfMessage:sizeOfMessage,
bytesRead:0,
buffer:null,
stubBuffer:null}};
// We got a parse Error fire it off then keep going
self.emit("parseError", errorObject, self);
// Clear out the state of the parser
self.buffer = null;
self.sizeOfMessage = 0;
self.bytesRead = 0;
self.stubBuffer = null;
// Exit parsing loop
data = new Buffer(0);
} else {
var emitBuffer = data.slice(0, sizeOfMessage);
// Reset state of buffer
self.buffer = null;
self.sizeOfMessage = 0;
self.bytesRead = 0;
self.stubBuffer = null;
// Copy rest of message
data = data.slice(sizeOfMessage);
// Emit the message
self.messageHandler(new Response(self.bson, emitBuffer, self.responseOptions), self);
}
} else {
// Create a buffer that contains the space for the non-complete message
self.stubBuffer = new Buffer(data.length)
// Copy the data to the stub buffer
data.copy(self.stubBuffer, 0);
// Exit parsing loop
data = new Buffer(0);
}
}
}
}
}
}
/**
* Connect
* @method
*/
Connection.prototype.connect = function(_options) {
var self = this;
_options = _options || {};
// Check if we are overriding the promoteLongs
if(typeof _options.promoteLongs == 'boolean') {
self.responseOptions.promoteLongs = _options.promoteLongs;
}
// Create new connection instance
self.connection = self.domainSocket
? net.createConnection(self.host)
: net.createConnection(self.port, self.host);
// Set the options for the connection
self.connection.setKeepAlive(self.keepAlive, self.keepAliveInitialDelay);
self.connection.setTimeout(self.connectionTimeout);
self.connection.setNoDelay(self.noDelay);
// If we have ssl enabled
if(self.ssl) {
var sslOptions = {
socket: self.connection
, rejectUnauthorized: self.rejectUnauthorized
}
if(self.ca) sslOptions.ca = self.ca;
if(self.cert) sslOptions.cert = self.cert;
if(self.key) sslOptions.key = self.key;
if(self.passphrase) sslOptions.passphrase = self.passphrase;
// Override checkServerIdentity behavior
if(self.checkServerIdentity == false) {
// Skip the identiy check by retuning undefined as per node documents
// https://nodejs.org/api/tls.html#tls_tls_connect_options_callback
sslOptions.checkServerIdentity = function(servername, cert) {
return undefined;
}
} else if(typeof self.checkServerIdentity == 'function') {
sslOptions.checkServerIdentity = self.checkServerIdentity;
}
// Attempt SSL connection
self.connection = tls.connect(self.port, self.host, sslOptions, function() {
// Error on auth or skip
if(self.connection.authorizationError && self.rejectUnauthorized) {
return self.emit("error", self.connection.authorizationError, self, {ssl:true});
}
// Set socket timeout instead of connection timeout
self.connection.setTimeout(self.socketTimeout);
// We are done emit connect
self.emit('connect', self);
});
self.connection.setTimeout(self.connectionTimeout);
} else {
self.connection.on('connect', function() {
// Set socket timeout instead of connection timeout
self.connection.setTimeout(self.socketTimeout);
// Emit connect event
self.emit('connect', self);
});
}
// Add handlers for events
self.connection.once('error', errorHandler(self));
self.connection.once('timeout', timeoutHandler(self));
self.connection.once('close', closeHandler(self));
self.connection.on('data', dataHandler(self));
}
/**
* Destroy connection
* @method
*/
Connection.prototype.destroy = function() {
if(this.connection) {
this.connection.end();
this.connection.destroy();
}
this.destroyed = true;
}
/**
* Write to connection
* @method
* @param {Command} command Command to write out need to implement toBin and toBinUnified
*/
Connection.prototype.write = function(buffer) {
// Debug Log
if(this.logger.isDebug()) {
if(!Array.isArray(buffer)) {
this.logger.debug(f('writing buffer [%s] to %s:%s', buffer.toString('hex'), this.host, this.port));
} else {
for(var i = 0; i < buffer.length; i++)
this.logger.debug(f('writing buffer [%s] to %s:%s', buffer[i].toString('hex'), this.host, this.port));
}
}
// Write out the command
if(!Array.isArray(buffer)) return this.connection.write(buffer, 'binary');
// Iterate over all buffers and write them in order to the socket
for(var i = 0; i < buffer.length; i++) this.connection.write(buffer[i], 'binary');
}
/**
* Return id of connection as a string
* @method
* @return {string}
*/
Connection.prototype.toString = function() {
return "" + this.id;
}
/**
* Return json object of connection
* @method
* @return {object}
*/
Connection.prototype.toJSON = function() {
return {id: this.id, host: this.host, port: this.port};
}
/**
* Is the connection connected
* @method
* @return {boolean}
*/
Connection.prototype.isConnected = function() {
if(this.destroyed) return false;
return !this.connection.destroyed && this.connection.writable;
}
/**
* A server connect event, used to verify that the connection is up and running
*
* @event Connection#connect
* @type {Connection}
*/
/**
* The server connection closed, all pool connections closed
*
* @event Connection#close
* @type {Connection}
*/
/**
* The server connection caused an error, all pool connections closed
*
* @event Connection#error
* @type {Connection}
*/
/**
* The server connection timed out, all pool connections closed
*
* @event Connection#timeout
* @type {Connection}
*/
/**
* The driver experienced an invalid message, all pool connections closed
*
* @event Connection#parseError
* @type {Connection}
*/
module.exports = Connection;

196
node_modules/mongodb-core/lib/connection/logger.js generated vendored Normal file
View File

@@ -0,0 +1,196 @@
"use strict";
var f = require('util').format
, MongoError = require('../error');
// Filters for classes
var classFilters = {};
var filteredClasses = {};
var level = null;
// Save the process id
var pid = process.pid;
// current logger
var currentLogger = null;
/**
* Creates a new Logger instance
* @class
* @param {string} className The Class name associated with the logging instance
* @param {object} [options=null] Optional settings.
* @param {Function} [options.logger=null] Custom logger function;
* @param {string} [options.loggerLevel=error] Override default global log level.
* @return {Logger} a Logger instance.
*/
var Logger = function(className, options) {
if(!(this instanceof Logger)) return new Logger(className, options);
options = options || {};
// Current reference
var self = this;
this.className = className;
// Current logger
if(currentLogger == null && options.logger) {
currentLogger = options.logger;
} else if(currentLogger == null) {
currentLogger = console.log;
}
// Set level of logging, default is error
if(level == null) {
level = options.loggerLevel || 'error';
}
// Add all class names
if(filteredClasses[this.className] == null) classFilters[this.className] = true;
}
/**
* Log a message at the debug level
* @method
* @param {string} message The message to log
* @param {object} object additional meta data to log
* @return {null}
*/
Logger.prototype.debug = function(message, object) {
if(this.isDebug()
&& ((Object.keys(filteredClasses).length > 0 && filteredClasses[this.className])
|| (Object.keys(filteredClasses).length == 0 && classFilters[this.className]))) {
var dateTime = new Date().getTime();
var msg = f("[%s-%s:%s] %s %s", 'DEBUG', this.className, pid, dateTime, message);
var state = {
type: 'debug', message: message, className: this.className, pid: pid, date: dateTime
};
if(object) state.meta = object;
currentLogger(msg, state);
}
}
/**
* Log a message at the info level
* @method
* @param {string} message The message to log
* @param {object} object additional meta data to log
* @return {null}
*/
Logger.prototype.info = function(message, object) {
if(this.isInfo()
&& ((Object.keys(filteredClasses).length > 0 && filteredClasses[this.className])
|| (Object.keys(filteredClasses).length == 0 && classFilters[this.className]))) {
var dateTime = new Date().getTime();
var msg = f("[%s-%s:%s] %s %s", 'INFO', this.className, pid, dateTime, message);
var state = {
type: 'info', message: message, className: this.className, pid: pid, date: dateTime
};
if(object) state.meta = object;
currentLogger(msg, state);
}
},
/**
* Log a message at the error level
* @method
* @param {string} message The message to log
* @param {object} object additional meta data to log
* @return {null}
*/
Logger.prototype.error = function(message, object) {
if(this.isError()
&& ((Object.keys(filteredClasses).length > 0 && filteredClasses[this.className])
|| (Object.keys(filteredClasses).length == 0 && classFilters[this.className]))) {
var dateTime = new Date().getTime();
var msg = f("[%s-%s:%s] %s %s", 'ERROR', this.className, pid, dateTime, message);
var state = {
type: 'error', message: message, className: this.className, pid: pid, date: dateTime
};
if(object) state.meta = object;
currentLogger(msg, state);
}
},
/**
* Is the logger set at info level
* @method
* @return {boolean}
*/
Logger.prototype.isInfo = function() {
return level == 'info' || level == 'debug';
},
/**
* Is the logger set at error level
* @method
* @return {boolean}
*/
Logger.prototype.isError = function() {
return level == 'error' || level == 'info' || level == 'debug';
},
/**
* Is the logger set at debug level
* @method
* @return {boolean}
*/
Logger.prototype.isDebug = function() {
return level == 'debug';
}
/**
* Resets the logger to default settings, error and no filtered classes
* @method
* @return {null}
*/
Logger.reset = function() {
level = 'error';
filteredClasses = {};
}
/**
* Get the current logger function
* @method
* @return {function}
*/
Logger.currentLogger = function() {
return currentLogger;
}
/**
* Set the current logger function
* @method
* @param {function} logger Logger function.
* @return {null}
*/
Logger.setCurrentLogger = function(logger) {
if(typeof logger != 'function') throw new MongoError("current logger must be a function");
currentLogger = logger;
}
/**
* Set what classes to log.
* @method
* @param {string} type The type of filter (currently only class)
* @param {string[]} values The filters to apply
* @return {null}
*/
Logger.filter = function(type, values) {
if(type == 'class' && Array.isArray(values)) {
filteredClasses = {};
values.forEach(function(x) {
filteredClasses[x] = true;
});
}
}
/**
* Set the current log level
* @method
* @param {string} level Set current log level (debug, info, error)
* @return {null}
*/
Logger.setLevel = function(_level) {
if(_level != 'info' && _level != 'error' && _level != 'debug') throw new Error(f("%s is an illegal logging level", _level));
level = _level;
}
module.exports = Logger;

321
node_modules/mongodb-core/lib/connection/pool.js generated vendored Normal file
View File

@@ -0,0 +1,321 @@
"use strict";
var inherits = require('util').inherits
, EventEmitter = require('events').EventEmitter
, Connection = require('./connection')
, Query = require('./commands').Query
, Logger = require('./logger')
, f = require('util').format;
var DISCONNECTED = 'disconnected';
var CONNECTING = 'connecting';
var CONNECTED = 'connected';
var DESTROYED = 'destroyed';
var _id = 0;
/**
* Creates a new Pool instance
* @class
* @param {string} options.host The server host
* @param {number} options.port The server port
* @param {number} [options.size=5] Server connection pool size
* @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled
* @param {number} [options.keepAliveInitialDelay=0] Initial delay before TCP keep alive enabled
* @param {boolean} [options.noDelay=true] TCP Connection no delay
* @param {number} [options.connectionTimeout=0] TCP Connection timeout setting
* @param {number} [options.socketTimeout=0] TCP Socket timeout setting
* @param {boolean} [options.singleBufferSerializtion=true] Serialize into single buffer, trade of peak memory for serialization speed
* @param {boolean} [options.ssl=false] Use SSL for connection
* @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function.
* @param {Buffer} [options.ca] SSL Certificate store binary buffer
* @param {Buffer} [options.cert] SSL Certificate binary buffer
* @param {Buffer} [options.key] SSL Key file binary buffer
* @param {string} [options.passPhrase] SSL Certificate pass phrase
* @param {boolean} [options.rejectUnauthorized=false] Reject unauthorized server certificates
* @param {boolean} [options.promoteLongs=true] Convert Long values from the db into Numbers if they fit into 53 bits
* @fires Pool#connect
* @fires Pool#close
* @fires Pool#error
* @fires Pool#timeout
* @fires Pool#parseError
* @return {Pool} A cursor instance
*/
var Pool = function(options) {
var self = this;
// Add event listener
EventEmitter.call(this);
// Set empty if no options passed
this.options = options || {};
this.size = typeof options.size == 'number' && !isNaN(options.size) ? options.size : 5;
// Message handler
this.messageHandler = options.messageHandler;
// No bson parser passed in
if(!options.bson) throw new Error("must pass in valid bson parser");
// Contains all connections
this.connections = [];
this.state = DISCONNECTED;
// Round robin index
this.index = 0;
this.dead = false;
// Logger instance
this.logger = Logger('Pool', options);
// If we are monitoring this server we will create an exclusive reserved socket for that
this.monitoring = typeof options.monitoring == 'boolean' ? options.monitoring : false;
// Pool id
this.id = _id++;
// Grouping tag used for debugging purposes
this.tag = options.tag;
}
inherits(Pool, EventEmitter);
var errorHandler = function(self) {
return function(err, connection) {
if(self.logger.isDebug()) self.logger.debug(f('pool [%s] errored out [%s] with connection [%s]', this.dead, JSON.stringify(err), JSON.stringify(connection)));
if(!self.dead) {
self.state = DISCONNECTED;
self.dead = true;
self.destroy();
self.emit('error', err, self);
}
}
}
var timeoutHandler = function(self) {
return function(err, connection) {
if(self.logger.isDebug()) self.logger.debug(f('pool [%s] timed out [%s] with connection [%s]', this.dead, JSON.stringify(err), JSON.stringify(connection)));
if(!self.dead) {
self.state = DISCONNECTED;
self.dead = true;
self.destroy();
self.emit('timeout', err, self);
}
}
}
var closeHandler = function(self) {
return function(err, connection) {
if(self.logger.isDebug()) self.logger.debug(f('pool [%s] closed [%s] with connection [%s]', this.dead, JSON.stringify(err), JSON.stringify(connection)));
if(!self.dead) {
self.state = DISCONNECTED;
self.dead = true;
self.destroy();
self.emit('close', err, self);
}
}
}
var parseErrorHandler = function(self) {
return function(err, connection) {
if(self.logger.isDebug()) self.logger.debug(f('pool [%s] errored out [%s] with connection [%s]', this.dead, JSON.stringify(err), JSON.stringify(connection)));
if(!self.dead) {
self.state = DISCONNECTED;
self.dead = true;
self.destroy();
self.emit('parseError', err, self);
}
}
}
var connectHandler = function(self) {
return function(connection) {
self.connections.push(connection);
// We have connected to all servers
if(self.connections.length == self.size) {
self.state = CONNECTED;
// Done connecting
self.emit("connect", self);
}
}
}
/**
* Destroy pool
* @method
*/
Pool.prototype.destroy = function() {
this.state = DESTROYED;
// Set dead
this.dead = true;
// Destroy all the connections
this.connections.forEach(function(c) {
// Destroy all event emitters
["close", "message", "error", "timeout", "parseError", "connect"].forEach(function(e) {
c.removeAllListeners(e);
});
// Destroy the connection
c.destroy();
});
}
var execute = null;
try {
execute = setImmediate;
} catch(err) {
execute = process.nextTick;
}
/**
* Connect pool
* @method
*/
Pool.prototype.connect = function(_options) {
var self = this;
// Set to connecting
this.state = CONNECTING
// No dead
this.dead = false;
// Ensure we allow for a little time to setup connections
var wait = 1;
// Connect all sockets
for(var i = 0; i < this.size; i++) {
setTimeout(function() {
execute(function() {
self.options.messageHandler = self.messageHandler;
var connection = new Connection(self.options);
// Add all handlers
connection.once('close', closeHandler(self));
connection.once('error', errorHandler(self));
connection.once('timeout', timeoutHandler(self));
connection.once('parseError', parseErrorHandler(self));
connection.on('connect', connectHandler(self));
// Start connection
connection.connect(_options);
});
}, wait);
// wait for 1 miliseconds before attempting to connect, spacing out connections
wait = wait + 1;
}
}
/**
* Get a pool connection (round-robin)
* @method
* @return {Connection}
*/
Pool.prototype.get = function(options) {
options = options || {};
// Set the current index
this.index = this.index + 1;
if(this.connections.length == 1) {
return this.connections[0];
} else if(this.monitoring && options.monitoring) {
return this.connections[this.connections.length - 1];
} else if(this.monitoring) {
this.index = this.index % (this.connections.length - 1);
return this.connections[this.index];
} else {
this.index = this.index % this.connections.length;
return this.connections[this.index];
}
}
/**
* Reduce the poolSize to the provided max connections value
* @method
* @param {number} maxConnections reduce the poolsize to maxConnections
*/
Pool.prototype.capConnections = function(maxConnections) {
// Do we have more connections than specified slice it
if(this.connections.length > maxConnections) {
// Get the rest of the connections
var connections = this.connections.slice(maxConnections);
// Cap the active connections
this.connections = this.connections.slice(0, maxConnections);
if (this.index >= maxConnections){
// Go back to the beggining of the pool if capping connections
this.index = 0;
}
// Remove all listeners
for(var i = 0; i < connections.length; i++) {
connections[i].removeAllListeners('close');
connections[i].removeAllListeners('error');
connections[i].removeAllListeners('timeout');
connections[i].removeAllListeners('parseError');
connections[i].removeAllListeners('connect');
connections[i].destroy();
}
}
}
/**
* Get all pool connections
* @method
* @return {array}
*/
Pool.prototype.getAll = function() {
return this.connections.slice(0);
}
/**
* Is the pool connected
* @method
* @return {boolean}
*/
Pool.prototype.isConnected = function() {
for(var i = 0; i < this.connections.length; i++) {
if(!this.connections[i].isConnected()) return false;
}
return this.state == CONNECTED;
}
/**
* Was the pool destroyed
* @method
* @return {boolean}
*/
Pool.prototype.isDestroyed = function() {
return this.state == DESTROYED;
}
/**
* A server connect event, used to verify that the connection is up and running
*
* @event Pool#connect
* @type {Pool}
*/
/**
* The server connection closed, all pool connections closed
*
* @event Pool#close
* @type {Pool}
*/
/**
* The server connection caused an error, all pool connections closed
*
* @event Pool#error
* @type {Pool}
*/
/**
* The server connection timed out, all pool connections closed
*
* @event Pool#timeout
* @type {Pool}
*/
/**
* The driver experienced an invalid message, all pool connections closed
*
* @event Pool#parseError
* @type {Pool}
*/
module.exports = Pool;

77
node_modules/mongodb-core/lib/connection/utils.js generated vendored Normal file
View File

@@ -0,0 +1,77 @@
"use strict";
// Set property function
var setProperty = function(obj, prop, flag, values) {
Object.defineProperty(obj, prop.name, {
enumerable:true,
set: function(value) {
if(typeof value != 'boolean') throw new Error(f("%s required a boolean", prop.name));
// Flip the bit to 1
if(value == true) values.flags |= flag;
// Flip the bit to 0 if it's set, otherwise ignore
if(value == false && (values.flags & flag) == flag) values.flags ^= flag;
prop.value = value;
}
, get: function() { return prop.value; }
});
}
// Set property function
var getProperty = function(obj, propName, fieldName, values, func) {
Object.defineProperty(obj, propName, {
enumerable:true,
get: function() {
// Not parsed yet, parse it
if(values[fieldName] == null && obj.isParsed && !obj.isParsed()) {
obj.parse();
}
// Do we have a post processing function
if(typeof func == 'function') return func(values[fieldName]);
// Return raw value
return values[fieldName];
}
});
}
// Set simple property
var getSingleProperty = function(obj, name, value) {
Object.defineProperty(obj, name, {
enumerable:true,
get: function() {
return value
}
});
}
// Shallow copy
var copy = function(fObj, tObj) {
tObj = tObj || {};
for(var name in fObj) tObj[name] = fObj[name];
return tObj;
}
var debugOptions = function(debugFields, options) {
var finaloptions = {};
debugFields.forEach(function(n) {
finaloptions[n] = options[n];
});
return finaloptions;
}
/**
* @ignore
*/
var bindToCurrentDomain = function(callback) {
var domain = process.domain;
if(domain == null || callback == null) return callback;
return domain.bind(callback);
}
exports.setProperty = setProperty;
exports.getProperty = getProperty;
exports.getSingleProperty = getSingleProperty;
exports.copy = copy;
exports.bindToCurrentDomain = bindToCurrentDomain;
exports.debugOptions = debugOptions;

776
node_modules/mongodb-core/lib/cursor.js generated vendored Normal file
View File

@@ -0,0 +1,776 @@
"use strict";
var Long = require('bson').Long
, Logger = require('./connection/logger')
, MongoError = require('./error')
, f = require('util').format;
/**
* This is a cursor results callback
*
* @callback resultCallback
* @param {error} error An error object. Set to null if no error present
* @param {object} document
*/
/**
* @fileOverview The **Cursor** class is an internal class that embodies a cursor on MongoDB
* allowing for iteration over the results returned from the underlying query.
*
* **CURSORS Cannot directly be instantiated**
* @example
* var Server = require('mongodb-core').Server
* , ReadPreference = require('mongodb-core').ReadPreference
* , assert = require('assert');
*
* var server = new Server({host: 'localhost', port: 27017});
* // Wait for the connection event
* server.on('connect', function(server) {
* assert.equal(null, err);
*
* // Execute the write
* var cursor = _server.cursor('integration_tests.inserts_example4', {
* find: 'integration_tests.example4'
* , query: {a:1}
* }, {
* readPreference: new ReadPreference('secondary');
* });
*
* // Get the first document
* cursor.next(function(err, doc) {
* assert.equal(null, err);
* server.destroy();
* });
* });
*
* // Start connecting
* server.connect();
*/
/**
* Creates a new Cursor, not to be used directly
* @class
* @param {object} bson An instance of the BSON parser
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {{object}|Long} cmd The selector (can be a command or a cursorId)
* @param {object} [options=null] Optional settings.
* @param {object} [options.batchSize=1000] Batchsize for the operation
* @param {array} [options.documents=[]] Initial documents list for cursor
* @param {object} [options.transforms=null] Transform methods for the cursor results
* @param {function} [options.transforms.query] Transform the value returned from the initial query
* @param {function} [options.transforms.doc] Transform each document returned from Cursor.prototype.next
* @param {object} topology The server topology instance.
* @param {object} topologyOptions The server topology options.
* @return {Cursor} A cursor instance
* @property {number} cursorBatchSize The current cursorBatchSize for the cursor
* @property {number} cursorLimit The current cursorLimit for the cursor
* @property {number} cursorSkip The current cursorSkip for the cursor
*/
var Cursor = function(bson, ns, cmd, options, topology, topologyOptions) {
options = options || {};
// Cursor reference
var self = this;
// Initial query
var query = null;
// Cursor connection
this.connection = null;
// Cursor server
this.server = null;
// Do we have a not connected handler
this.disconnectHandler = options.disconnectHandler;
// Set local values
this.bson = bson;
this.ns = ns;
this.cmd = cmd;
this.options = options;
this.topology = topology;
// All internal state
this.cursorState = {
cursorId: null
, cmd: cmd
, documents: options.documents || []
, cursorIndex: 0
, dead: false
, killed: false
, init: false
, notified: false
, limit: options.limit || cmd.limit || 0
, skip: options.skip || cmd.skip || 0
, batchSize: options.batchSize || cmd.batchSize || 1000
, currentLimit: 0
// Result field name if not a cursor (contains the array of results)
, transforms: options.transforms
}
// Callback controller
this.callbacks = null;
// Logger
this.logger = Logger('Cursor', options);
//
// Did we pass in a cursor id
if(typeof cmd == 'number') {
this.cursorState.cursorId = Long.fromNumber(cmd);
this.cursorState.lastCursorId = this.cursorState.cursorId;
} else if(cmd instanceof Long) {
this.cursorState.cursorId = cmd;
this.cursorState.lastCursorId = cmd;
}
}
Cursor.prototype.setCursorBatchSize = function(value) {
this.cursorState.batchSize = value;
}
Cursor.prototype.cursorBatchSize = function() {
return this.cursorState.batchSize;
}
Cursor.prototype.setCursorLimit = function(value) {
this.cursorState.limit = value;
}
Cursor.prototype.cursorLimit = function() {
return this.cursorState.limit;
}
Cursor.prototype.setCursorSkip = function(value) {
this.cursorState.skip = value;
}
Cursor.prototype.cursorSkip = function() {
return this.cursorState.skip;
}
//
// Execute the first query
var execInitialQuery = function(self, query, cmd, options, cursorState, connection, logger, callbacks, callback) {
if(logger.isDebug()) {
logger.debug(f("issue initial query [%s] with flags [%s]"
, JSON.stringify(cmd)
, JSON.stringify(query)));
}
var queryCallback = function(err, result) {
if(err) return callback(err);
if(result.queryFailure) {
return callback(MongoError.create(result.documents[0]), null);
}
// Check if we have a command cursor
if(Array.isArray(result.documents) && result.documents.length == 1
&& (!cmd.find || (cmd.find && cmd.virtual == false))
&& (result.documents[0].cursor != 'string'
|| result.documents[0]['$err']
|| result.documents[0]['errmsg']
|| Array.isArray(result.documents[0].result))
) {
// We have a an error document return the error
if(result.documents[0]['$err']
|| result.documents[0]['errmsg']) {
return callback(MongoError.create(result.documents[0]), null);
}
// We have a cursor document
if(result.documents[0].cursor != null
&& typeof result.documents[0].cursor != 'string') {
var id = result.documents[0].cursor.id;
// If we have a namespace change set the new namespace for getmores
if(result.documents[0].cursor.ns) {
self.ns = result.documents[0].cursor.ns;
}
// Promote id to long if needed
cursorState.cursorId = typeof id == 'number' ? Long.fromNumber(id) : id;
cursorState.lastCursorId = cursorState.cursorId;
// If we have a firstBatch set it
if(Array.isArray(result.documents[0].cursor.firstBatch)) {
cursorState.documents = result.documents[0].cursor.firstBatch;//.reverse();
}
// Return after processing command cursor
return callback(null, null);
}
if(Array.isArray(result.documents[0].result)) {
cursorState.documents = result.documents[0].result;
cursorState.cursorId = Long.ZERO;
return callback(null, null);
}
}
// Otherwise fall back to regular find path
cursorState.cursorId = result.cursorId;
cursorState.lastCursorId = result.cursorId;
cursorState.documents = result.documents;
// Transform the results with passed in transformation method if provided
if(cursorState.transforms && typeof cursorState.transforms.query == 'function') {
cursorState.documents = cursorState.transforms.query(result);
}
// Return callback
callback(null, null);
}
// If we have a raw query decorate the function
if(options.raw || cmd.raw) {
queryCallback.raw = options.raw || cmd.raw;
}
// Do we have documentsReturnedIn set on the query
if(typeof query.documentsReturnedIn == 'string') {
queryCallback.documentsReturnedIn = query.documentsReturnedIn;
}
// Set up callback
callbacks.register(query.requestId, queryCallback);
// Write the initial command out
connection.write(query.toBin());
}
//
// Handle callback (including any exceptions thrown)
var handleCallback = function(callback, err, result) {
try {
callback(err, result);
} catch(err) {
process.nextTick(function() {
throw err;
});
}
}
// Internal methods
Cursor.prototype._find = function(callback) {
var self = this;
// execInitialQuery(self, self.query, self.cmd, self.options, self.cursorState, self.connection, self.logger, self.callbacks, function(err, r) {
if(self.logger.isDebug()) {
self.logger.debug(f("issue initial query [%s] with flags [%s]"
, JSON.stringify(self.cmd)
, JSON.stringify(self.query)));
}
var queryCallback = function(err, result) {
if(err) return callback(err);
if(result.queryFailure) {
return callback(MongoError.create(result.documents[0]), null);
}
// Check if we have a command cursor
if(Array.isArray(result.documents) && result.documents.length == 1
&& (!self.cmd.find || (self.cmd.find && self.cmd.virtual == false))
&& (result.documents[0].cursor != 'string'
|| result.documents[0]['$err']
|| result.documents[0]['errmsg']
|| Array.isArray(result.documents[0].result))
) {
// We have a an error document return the error
if(result.documents[0]['$err']
|| result.documents[0]['errmsg']) {
return callback(MongoError.create(result.documents[0]), null);
}
// We have a cursor document
if(result.documents[0].cursor != null
&& typeof result.documents[0].cursor != 'string') {
var id = result.documents[0].cursor.id;
// If we have a namespace change set the new namespace for getmores
if(result.documents[0].cursor.ns) {
self.ns = result.documents[0].cursor.ns;
}
// Promote id to long if needed
self.cursorState.cursorId = typeof id == 'number' ? Long.fromNumber(id) : id;
self.cursorState.lastCursorId = self.cursorState.cursorId;
// If we have a firstBatch set it
if(Array.isArray(result.documents[0].cursor.firstBatch)) {
self.cursorState.documents = result.documents[0].cursor.firstBatch;//.reverse();
}
// Return after processing command cursor
return callback(null, null);
}
if(Array.isArray(result.documents[0].result)) {
self.cursorState.documents = result.documents[0].result;
self.cursorState.cursorId = Long.ZERO;
return callback(null, null);
}
}
// Otherwise fall back to regular find path
self.cursorState.cursorId = result.cursorId;
self.cursorState.documents = result.documents;
self.cursorState.lastCursorId = result.cursorId;
// Transform the results with passed in transformation method if provided
if(self.cursorState.transforms && typeof self.cursorState.transforms.query == 'function') {
self.cursorState.documents = self.cursorState.transforms.query(result);
}
// Return callback
callback(null, null);
}
// If we have a raw query decorate the function
if(self.options.raw || self.cmd.raw) {
queryCallback.raw = self.options.raw || self.cmd.raw;
}
// Do we have documentsReturnedIn set on the query
if(typeof self.query.documentsReturnedIn == 'string') {
queryCallback.documentsReturnedIn = self.query.documentsReturnedIn;
}
// Set up callback
self.callbacks.register(self.query.requestId, queryCallback);
// Write the initial command out
self.connection.write(self.query.toBin());
}
Cursor.prototype._getmore = function(callback) {
if(this.logger.isDebug()) this.logger.debug(f("schedule getMore call for query [%s]", JSON.stringify(this.query)))
// Determine if it's a raw query
var raw = this.options.raw || this.cmd.raw;
// Set the current batchSize
var batchSize = this.cursorState.batchSize;
if(this.cursorState.limit > 0
&& ((this.cursorState.currentLimit + batchSize) > this.cursorState.limit)) {
batchSize = this.cursorState.limit - this.cursorState.currentLimit;
}
// We have a wire protocol handler
this.server.wireProtocolHandler.getMore(this.bson, this.ns, this.cursorState, batchSize, raw, this.connection, this.callbacks, this.options, callback);
}
Cursor.prototype._killcursor = function(callback) {
// Set cursor to dead
this.cursorState.dead = true;
this.cursorState.killed = true;
// Remove documents
this.cursorState.documents = [];
// If no cursor id just return
if(this.cursorState.cursorId == null || this.cursorState.cursorId.isZero() || this.cursorState.init == false) {
if(callback) callback(null, null);
return;
}
// Execute command
this.server.wireProtocolHandler.killCursor(this.bson, this.ns, this.cursorState.cursorId, this.connection, this.callbacks, callback);
}
/**
* Clone the cursor
* @method
* @return {Cursor}
*/
Cursor.prototype.clone = function() {
return this.topology.cursor(this.ns, this.cmd, this.options);
}
/**
* Checks if the cursor is dead
* @method
* @return {boolean} A boolean signifying if the cursor is dead or not
*/
Cursor.prototype.isDead = function() {
return this.cursorState.dead == true;
}
/**
* Checks if the cursor was killed by the application
* @method
* @return {boolean} A boolean signifying if the cursor was killed by the application
*/
Cursor.prototype.isKilled = function() {
return this.cursorState.killed == true;
}
/**
* Checks if the cursor notified it's caller about it's death
* @method
* @return {boolean} A boolean signifying if the cursor notified the callback
*/
Cursor.prototype.isNotified = function() {
return this.cursorState.notified == true;
}
/**
* Returns current buffered documents length
* @method
* @return {number} The number of items in the buffered documents
*/
Cursor.prototype.bufferedCount = function() {
return this.cursorState.documents.length - this.cursorState.cursorIndex;
}
/**
* Returns current buffered documents
* @method
* @return {Array} An array of buffered documents
*/
Cursor.prototype.readBufferedDocuments = function(number) {
var unreadDocumentsLength = this.cursorState.documents.length - this.cursorState.cursorIndex;
var length = number < unreadDocumentsLength ? number : unreadDocumentsLength;
var elements = this.cursorState.documents.slice(this.cursorState.cursorIndex, this.cursorState.cursorIndex + length);
// Transform the doc with passed in transformation method if provided
if(this.cursorState.transforms && typeof this.cursorState.transforms.doc == 'function') {
// Transform all the elements
for(var i = 0; i < elements.length; i++) {
elements[i] = this.cursorState.transforms.doc(elements[i]);
}
}
// Ensure we do not return any more documents than the limit imposed
// Just return the number of elements up to the limit
if(this.cursorState.limit > 0 && (this.cursorState.currentLimit + elements.length) > this.cursorState.limit) {
elements = elements.slice(0, (this.cursorState.limit - this.cursorState.currentLimit));
this.kill();
}
// Adjust current limit
this.cursorState.currentLimit = this.cursorState.currentLimit + elements.length;
this.cursorState.cursorIndex = this.cursorState.cursorIndex + elements.length;
// Return elements
return elements;
}
/**
* Kill the cursor
* @method
* @param {resultCallback} callback A callback function
*/
Cursor.prototype.kill = function(callback) {
this._killcursor(callback);
}
/**
* Resets the cursor
* @method
* @return {null}
*/
Cursor.prototype.rewind = function() {
if(this.cursorState.init) {
if(!this.cursorState.dead) {
this.kill();
}
this.cursorState.currentLimit = 0;
this.cursorState.init = false;
this.cursorState.dead = false;
this.cursorState.killed = false;
this.cursorState.notified = false;
this.cursorState.documents = [];
this.cursorState.cursorId = null;
this.cursorState.cursorIndex = 0;
}
}
/**
* Validate if the connection is dead and return error
*/
var isConnectionDead = function(self, callback) {
if(self.connection
&& !self.connection.isConnected()) {
self.cursorState.notified = true;
self.cursorState.killed = true;
self.cursorState.documents = [];
self.cursorState.cursorIndex = 0;
callback(MongoError.create(f('connection to host %s:%s was destroyed', self.connection.host, self.connection.port)))
return true;
}
return false;
}
/**
* Validate if the cursor is dead but was not explicitly killed by user
*/
var isCursorDeadButNotkilled = function(self, callback) {
// Cursor is dead but not marked killed, return null
if(self.cursorState.dead && !self.cursorState.killed) {
self.cursorState.notified = true;
self.cursorState.killed = true;
self.cursorState.documents = [];
self.cursorState.cursorIndex = 0;
handleCallback(callback, null, null);
return true;
}
return false;
}
/**
* Validate if the cursor is dead and was killed by user
*/
var isCursorDeadAndKilled = function(self, callback) {
if(self.cursorState.dead && self.cursorState.killed) {
handleCallback(callback, MongoError.create("cursor is dead"));
return true;
}
return false;
}
/**
* Validate if the cursor was killed by the user
*/
var isCursorKilled = function(self, callback) {
if(self.cursorState.killed) {
self.cursorState.notified = true;
self.cursorState.documents = [];
self.cursorState.cursorIndex = 0;
handleCallback(callback, null, null);
return true;
}
return false;
}
/**
* Mark cursor as being dead and notified
*/
var setCursorDeadAndNotified = function(self, callback) {
self.cursorState.dead = true;
self.cursorState.notified = true;
self.cursorState.documents = [];
self.cursorState.cursorIndex = 0;
handleCallback(callback, null, null);
}
/**
* Mark cursor as being notified
*/
var setCursorNotified = function(self, callback) {
self.cursorState.notified = true;
self.cursorState.documents = [];
self.cursorState.cursorIndex = 0;
handleCallback(callback, null, null);
}
var nextFunction = function(self, callback) {
// Exhaust message and cursor already finished and notified
if(self.cmd.exhaust && self.cursorState.notified) return;
// We have notified about it
if(self.cursorState.notified) {
return callback(new Error('cursor is exhausted'));
}
// Cursor is killed return null
if(isCursorKilled(self, callback)) return;
// Cursor is dead but not marked killed, return null
if(isCursorDeadButNotkilled(self, callback)) return;
// We have a dead and killed cursor, attempting to call next should error
if(isCursorDeadAndKilled(self, callback)) return;
// We have just started the cursor
if(!self.cursorState.init) {
// Topology is not connected, save the call in the provided store to be
// Executed at some point when the handler deems it's reconnected
if(!self.topology.isConnected(self.options) && self.disconnectHandler != null) {
return self.disconnectHandler.addObjectAndMethod('cursor', self, 'next', [callback], callback);
}
try {
// Get a server
self.server = self.topology.getServer(self.options);
// Get a connection
self.connection = self.server.getConnection();
// Get the callbacks
self.callbacks = self.server.getCallbacks();
} catch(err) {
return callback(err);
}
// Set as init
self.cursorState.init = true;
try {
// Get the right wire protocol command
self.query = self.server.wireProtocolHandler.command(self.bson, self.ns, self.cmd, self.cursorState, self.topology, self.options);
} catch(err) {
return callback(err);
}
}
// Process exhaust messages
var processExhaustMessages = function(err, result) {
if(err) {
self.cursorState.dead = true;
self.callbacks.unregister(self.query.requestId);
return callback(err);
}
// Concatenate all the documents
self.cursorState.documents = self.cursorState.documents.concat(result.documents);
// If we have no documents left
if(Long.ZERO.equals(result.cursorId)) {
self.cursorState.cursorId = Long.ZERO;
self.callbacks.unregister(self.query.requestId);
return nextFunction(self, callback);
}
// Set up next listener
self.callbacks.register(result.requestId, processExhaustMessages)
// Initial result
if(self.cursorState.cursorId == null) {
self.cursorState.cursorId = result.cursorId;
self.cursorState.lastCursorId = result.cursorId;
nextFunction(self, callback);
}
}
// If we have exhaust
if(self.cmd.exhaust && self.cursorState.cursorId == null) {
// Handle all the exhaust responses
self.callbacks.register(self.query.requestId, processExhaustMessages);
// Write the initial command out
return self.connection.write(self.query.toBin());
} else if(self.cmd.exhaust && self.cursorState.cursorIndex < self.cursorState.documents.length) {
return handleCallback(callback, null, self.cursorState.documents[self.cursorState.cursorIndex++]);
} else if(self.cmd.exhaust && Long.ZERO.equals(self.cursorState.cursorId)) {
self.callbacks.unregister(self.query.requestId);
return setCursorNotified(self, callback);
} else if(self.cmd.exhaust) {
return setTimeout(function() {
if(Long.ZERO.equals(self.cursorState.cursorId)) return;
nextFunction(self, callback);
}, 1);
}
// If we don't have a cursorId execute the first query
if(self.cursorState.cursorId == null) {
// Check if connection is dead and return if not possible to
// execute the query against the db
if(isConnectionDead(self, callback)) return;
// Check if topology is destroyed
if(self.topology.isDestroyed()) return callback(new MongoError(f('connection destroyed, not possible to instantiate cursor')));
// query, cmd, options, cursorState, callback
self._find(function(err, r) {
if(err) return handleCallback(callback, err, null);
if(self.cursorState.documents.length == 0 && !self.cmd.tailable && !self.cmd.awaitData) {
return setCursorNotified(self, callback);
}
nextFunction(self, callback);
});
} else if(self.cursorState.limit > 0 && self.cursorState.currentLimit >= self.cursorState.limit) {
// Ensure we kill the cursor on the server
self.kill();
// Set cursor in dead and notified state
return setCursorDeadAndNotified(self, callback);
} else if(self.cursorState.cursorIndex == self.cursorState.documents.length
&& !Long.ZERO.equals(self.cursorState.cursorId)) {
// Ensure an empty cursor state
self.cursorState.documents = [];
self.cursorState.cursorIndex = 0;
// Check if topology is destroyed
if(self.topology.isDestroyed()) return callback(new MongoError(f('connection destroyed, not possible to instantiate cursor')));
// Check if connection is dead and return if not possible to
// execute a getmore on this connection
if(isConnectionDead(self, callback)) return;
// Execute the next get more
self._getmore(function(err, doc) {
if(err) return handleCallback(callback, err);
if(self.cursorState.documents.length == 0
&& Long.ZERO.equals(self.cursorState.cursorId) && !self.cmd.tailable) {
self.cursorState.dead = true;
// Finished iterating over the cursor
return setCursorDeadAndNotified(self, callback);
}
// Tailable cursor getMore result, notify owner about it
// No attempt is made here to retry, this is left to the user of the
// core module to handle to keep core simple
if(self.cursorState.documents.length == 0 && self.cmd.tailable) {
return handleCallback(callback, MongoError.create({
message: "No more documents in tailed cursor"
, tailable: self.cmd.tailable
, awaitData: self.cmd.awaitData
}));
}
if(self.cursorState.limit > 0 && self.cursorState.currentLimit >= self.cursorState.limit) {
return setCursorDeadAndNotified(self, callback);
}
nextFunction(self, callback);
});
} else if(self.cursorState.documents.length == self.cursorState.cursorIndex
&& self.cmd.tailable) {
return handleCallback(callback, MongoError.create({
message: "No more documents in tailed cursor"
, tailable: self.cmd.tailable
, awaitData: self.cmd.awaitData
}));
} else if(self.cursorState.documents.length == self.cursorState.cursorIndex
&& Long.ZERO.equals(self.cursorState.cursorId)) {
setCursorDeadAndNotified(self, callback);
} else {
if(self.cursorState.limit > 0 && self.cursorState.currentLimit >= self.cursorState.limit) {
// Ensure we kill the cursor on the server
self.kill();
// Set cursor in dead and notified state
return setCursorDeadAndNotified(self, callback);
}
// Increment the current cursor limit
self.cursorState.currentLimit += 1;
// Get the document
var doc = self.cursorState.documents[self.cursorState.cursorIndex++];
// Doc overflow
if(doc.$err) {
// Ensure we kill the cursor on the server
self.kill();
// Set cursor in dead and notified state
return setCursorDeadAndNotified(self, function() {
handleCallback(callback, new MongoError(doc.$err));
});
}
// Transform the doc with passed in transformation method if provided
if(self.cursorState.transforms && typeof self.cursorState.transforms.doc == 'function') {
doc = self.cursorState.transforms.doc(doc);
}
// Return the document
handleCallback(callback, null, doc);
}
}
/**
* Retrieve the next document from the cursor
* @method
* @param {resultCallback} callback A callback function
*/
Cursor.prototype.next = function(callback) {
nextFunction(this, callback);
}
module.exports = Cursor;

44
node_modules/mongodb-core/lib/error.js generated vendored Normal file
View File

@@ -0,0 +1,44 @@
"use strict";
/**
* Creates a new MongoError
* @class
* @augments Error
* @param {string} message The error message
* @return {MongoError} A MongoError instance
*/
function MongoError(message) {
this.name = 'MongoError';
this.message = message;
Error.captureStackTrace(this, MongoError);
}
/**
* Creates a new MongoError object
* @method
* @param {object} options The error options
* @return {MongoError} A MongoError instance
*/
MongoError.create = function(options) {
var err = null;
if(options instanceof Error) {
err = new MongoError(options.message);
err.stack = options.stack;
} else if(typeof options == 'string') {
err = new MongoError(options);
} else {
err = new MongoError(options.message || options.errmsg || options.$err || "n/a");
// Other options
for(var name in options) {
err[name] = options[name];
}
}
return err;
}
// Extend JavaScript error
MongoError.prototype = new Error;
module.exports = MongoError;

59
node_modules/mongodb-core/lib/tools/smoke_plugin.js generated vendored Normal file
View File

@@ -0,0 +1,59 @@
var fs = require('fs');
/* Note: because this plugin uses process.on('uncaughtException'), only one
* of these can exist at any given time. This plugin and anything else that
* uses process.on('uncaughtException') will conflict. */
exports.attachToRunner = function(runner, outputFile) {
var smokeOutput = { results : [] };
var runningTests = {};
var integraPlugin = {
beforeTest: function(test, callback) {
test.startTime = Date.now();
runningTests[test.name] = test;
callback();
},
afterTest: function(test, callback) {
smokeOutput.results.push({
status: test.status,
start: test.startTime,
end: Date.now(),
test_file: test.name,
exit_code: 0,
url: ""
});
delete runningTests[test.name];
callback();
},
beforeExit: function(obj, callback) {
fs.writeFile(outputFile, JSON.stringify(smokeOutput), function() {
callback();
});
}
};
// In case of exception, make sure we write file
process.on('uncaughtException', function(err) {
// Mark all currently running tests as failed
for (var testName in runningTests) {
smokeOutput.results.push({
status: "fail",
start: runningTests[testName].startTime,
end: Date.now(),
test_file: testName,
exit_code: 0,
url: ""
});
}
// write file
fs.writeFileSync(outputFile, JSON.stringify(smokeOutput));
// Standard NodeJS uncaught exception handler
console.error(err.stack);
process.exit(1);
});
runner.plugin(integraPlugin);
return integraPlugin;
};

View File

@@ -0,0 +1,37 @@
"use strict";
var setProperty = require('../connection/utils').setProperty
, getProperty = require('../connection/utils').getProperty
, getSingleProperty = require('../connection/utils').getSingleProperty;
/**
* Creates a new CommandResult instance
* @class
* @param {object} result CommandResult object
* @param {Connection} connection A connection instance associated with this result
* @return {CommandResult} A cursor instance
*/
var CommandResult = function(result, connection) {
this.result = result;
this.connection = connection;
}
/**
* Convert CommandResult to JSON
* @method
* @return {object}
*/
CommandResult.prototype.toJSON = function() {
return this.result;
}
/**
* Convert CommandResult to String representation
* @method
* @return {string}
*/
CommandResult.prototype.toString = function() {
return JSON.stringify(this.toJSON());
}
module.exports = CommandResult;

1042
node_modules/mongodb-core/lib/topologies/mongos.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,106 @@
"use strict";
var needSlaveOk = ['primaryPreferred', 'secondary', 'secondaryPreferred', 'nearest'];
/**
* @fileOverview The **ReadPreference** class is a class that represents a MongoDB ReadPreference and is
* used to construct connections.
*
* @example
* var ReplSet = require('mongodb-core').ReplSet
* , ReadPreference = require('mongodb-core').ReadPreference
* , assert = require('assert');
*
* var server = new ReplSet([{host: 'localhost', port: 30000}], {setName: 'rs'});
* // Wait for the connection event
* server.on('connect', function(server) {
* var cursor = server.cursor('db.test'
* , {find: 'db.test', query: {}}
* , {readPreference: new ReadPreference('secondary')});
* cursor.next(function(err, doc) {
* server.destroy();
* });
* });
*
* // Start connecting
* server.connect();
*/
/**
* Creates a new Pool instance
* @class
* @param {string} preference A string describing the preference (primary|primaryPreferred|secondary|secondaryPreferred|nearest)
* @param {object} tags The tags object
* @param {object} [options] Additional read preference options
* @property {string} preference The preference string (primary|primaryPreferred|secondary|secondaryPreferred|nearest)
* @property {object} tags The tags object
* @property {object} options Additional read preference options
* @return {ReadPreference}
*/
var ReadPreference = function(preference, tags, options) {
this.preference = preference;
this.tags = tags;
this.options = options;
}
/**
* This needs slaveOk bit set
* @method
* @return {boolean}
*/
ReadPreference.prototype.slaveOk = function() {
return needSlaveOk.indexOf(this.preference) != -1;
}
/**
* Are the two read preference equal
* @method
* @return {boolean}
*/
ReadPreference.prototype.equals = function(readPreference) {
return readPreference.preference == this.preference;
}
/**
* Return JSON representation
* @method
* @return {Object}
*/
ReadPreference.prototype.toJSON = function() {
var readPreference = {mode: this.preference};
if(Array.isArray(this.tags)) readPreference.tags = this.tags;
return readPreference;
}
/**
* Primary read preference
* @method
* @return {ReadPreference}
*/
ReadPreference.primary = new ReadPreference('primary');
/**
* Primary Preferred read preference
* @method
* @return {ReadPreference}
*/
ReadPreference.primaryPreferred = new ReadPreference('primaryPreferred');
/**
* Secondary read preference
* @method
* @return {ReadPreference}
*/
ReadPreference.secondary = new ReadPreference('secondary');
/**
* Secondary Preferred read preference
* @method
* @return {ReadPreference}
*/
ReadPreference.secondaryPreferred = new ReadPreference('secondaryPreferred');
/**
* Nearest read preference
* @method
* @return {ReadPreference}
*/
ReadPreference.nearest = new ReadPreference('nearest');
module.exports = ReadPreference;

1487
node_modules/mongodb-core/lib/topologies/replset.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,505 @@
"use strict";
var Logger = require('../connection/logger')
, f = require('util').format
, ObjectId = require('bson').ObjectId
, MongoError = require('../error');
var DISCONNECTED = 'disconnected';
var CONNECTING = 'connecting';
var CONNECTED = 'connected';
var DESTROYED = 'destroyed';
/**
* Creates a new Replicaset State object
* @class
* @property {object} primary Primary property
* @property {array} secondaries List of secondaries
* @property {array} arbiters List of arbiters
* @return {State} A cursor instance
*/
var State = function(replSet, options) {
this.replSet = replSet;
this.options = options;
this.secondaries = [];
this.arbiters = [];
this.passives = [];
this.primary = null;
// Initial state is disconnected
this.state = DISCONNECTED;
// Current electionId
this.electionId = null;
// Get a logger instance
this.logger = Logger('ReplSet', options);
// Unpacked options
this.id = options.id;
this.setName = options.setName;
this.connectingServers = options.connectingServers;
this.secondaryOnlyConnectionAllowed = options.secondaryOnlyConnectionAllowed;
}
/**
* Is there a secondary connected
* @method
* @return {boolean}
*/
State.prototype.isSecondaryConnected = function() {
for(var i = 0; i < this.secondaries.length; i++) {
if(this.secondaries[i].isConnected()) return true;
}
return false;
}
/**
* Is there a primary connection
* @method
* @return {boolean}
*/
State.prototype.isPrimaryConnected = function() {
return this.primary != null && this.primary.isConnected();
}
/**
* Is the given address the primary
* @method
* @param {string} address Server address
* @return {boolean}
*/
State.prototype.isPrimary = function(address) {
if(this.primary == null) return false;
return this.primary && this.primary.equals(address);
}
/**
* Is the given address a secondary
* @method
* @param {string} address Server address
* @return {boolean}
*/
State.prototype.isSecondary = function(address) {
// Check if the server is a secondary at the moment
for(var i = 0; i < this.secondaries.length; i++) {
if(this.secondaries[i].equals(address)) {
return true;
}
}
return false;
}
/**
* Is the given address a secondary
* @method
* @param {string} address Server address
* @return {boolean}
*/
State.prototype.isPassive = function(address) {
// Check if the server is a secondary at the moment
for(var i = 0; i < this.passives.length; i++) {
if(this.passives[i].equals(address)) {
return true;
}
}
return false;
}
/**
* Does the replicaset contain this server
* @method
* @param {string} address Server address
* @return {boolean}
*/
State.prototype.contains = function(address) {
if(this.primary && this.primary.equals(address)) return true;
for(var i = 0; i < this.secondaries.length; i++) {
if(this.secondaries[i].equals(address)) return true;
}
for(var i = 0; i < this.arbiters.length; i++) {
if(this.arbiters[i].equals(address)) return true;
}
for(var i = 0; i < this.passives.length; i++) {
if(this.passives[i].equals(address)) return true;
}
return false;
}
/**
* Clean out all dead connections
* @method
*/
State.prototype.clean = function() {
if(this.primary != null && !this.primary.isConnected()) {
this.primary = null;
}
// Filter out disconnected servers
this.secondaries = this.secondaries.filter(function(s) {
return s.isConnected();
});
// Filter out disconnected servers
this.arbiters = this.arbiters.filter(function(s) {
return s.isConnected();
});
}
/**
* Destroy state
* @method
*/
State.prototype.destroy = function() {
this.state = DESTROYED;
if(this.primary) this.primary.destroy();
this.secondaries.forEach(function(s) {
s.destroy();
});
this.arbiters.forEach(function(s) {
s.destroy();
});
}
/**
* Remove server from state
* @method
* @param {Server} Server to remove
* @return {string} Returns type of server removed (primary|secondary)
*/
State.prototype.remove = function(server) {
if(this.primary && this.primary.equals(server)) {
this.primary = null;
}
var length = this.arbiters.length;
// Filter out the server from the arbiters
this.arbiters = this.arbiters.filter(function(s) {
return !s.equals(server);
});
if(this.arbiters.length < length) return 'arbiter';
var length = this.passives.length;
// Filter out the server from the passives
this.passives = this.passives.filter(function(s) {
return !s.equals(server);
});
// We have removed a passive
if(this.passives.length < length) {
// Ensure we removed it from the list of secondaries as well if it exists
this.secondaries = this.secondaries.filter(function(s) {
return !s.equals(server);
});
}
// Filter out the server from the secondaries
this.secondaries = this.secondaries.filter(function(s) {
return !s.equals(server);
});
// Get the isMaster
var isMaster = server.lastIsMaster();
// Return primary if the server was primary
if(isMaster.ismaster) return 'primary';
if(isMaster.secondary) return 'secondary';
if(isMaster.passive) return 'passive';
return 'arbiter';
}
/**
* Get the server by name
* @method
* @param {string} address Server address
* @return {Server}
*/
State.prototype.get = function(server) {
var found = false;
// All servers to search
var servers = this.primary ? [this.primary] : [];
servers = servers.concat(this.secondaries);
// Locate the server
for(var i = 0; i < servers.length; i++) {
if(servers[i].equals(server)) {
return servers[i];
}
}
}
/**
* Get all the servers in the set
* @method
* @param {boolean} [options.includeArbiters] Include Arbiters in returned server list
* @return {array}
*/
State.prototype.getAll = function(options) {
options = options || {};
var servers = [];
if(this.primary) servers.push(this.primary);
servers = servers.concat(this.secondaries);
// Include the arbiters
if(options.includeArbiters) {
servers = servers.concat(this.arbiters);
}
// return ;
return servers;
}
/**
* All raw connections
* @method
* @param {boolean} [options.includeArbiters] Include Arbiters in returned server list
* @return {array}
*/
State.prototype.getAllConnections = function(options) {
options = options || {};
var connections = [];
if(this.primary) connections = connections.concat(this.primary.connections());
this.secondaries.forEach(function(s) {
connections = connections.concat(s.connections());
})
// Include the arbiters
if(options.includeArbiters) {
this.arbiters.forEach(function(s) {
connections = connections.concat(s.connections());
})
}
return connections;
}
/**
* Return JSON object
* @method
* @return {object}
*/
State.prototype.toJSON = function() {
return {
primary: this.primary ? this.primary.lastIsMaster().me : null
, secondaries: this.secondaries.map(function(s) {
return s.lastIsMaster().me
})
}
}
/**
* Returns the last known ismaster document for this server
* @method
* @return {object}
*/
State.prototype.lastIsMaster = function() {
if(this.primary) return this.primary.lastIsMaster();
if(this.secondaries.length > 0) return this.secondaries[0].lastIsMaster();
return {};
}
/**
* Promote server to primary
* @method
* @param {Server} server Server we wish to promote
*/
State.prototype.promotePrimary = function(server) {
var currentServer = this.get(server);
// Server does not exist in the state, add it as new primary
if(currentServer == null) {
this.primary = server;
return;
}
// We found a server, make it primary and remove it from the secondaries
// Remove the server first
this.remove(currentServer);
// Set as primary
this.primary = currentServer;
}
var add = function(list, server) {
// Check if the server is a secondary at the moment
for(var i = 0; i < list.length; i++) {
if(list[i].equals(server)) return false;
}
list.push(server);
return true;
}
/**
* Add server to list of secondaries
* @method
* @param {Server} server Server we wish to add
*/
State.prototype.addSecondary = function(server) {
return add(this.secondaries, server);
}
/**
* Add server to list of arbiters
* @method
* @param {Server} server Server we wish to add
*/
State.prototype.addArbiter = function(server) {
return add(this.arbiters, server);
}
/**
* Add server to list of passives
* @method
* @param {Server} server Server we wish to add
*/
State.prototype.addPassive = function(server) {
return add(this.passives, server);
}
var compareObjectIds = function(id1, id2) {
var a = new Buffer(id1.toHexString(), 'hex');
var b = new Buffer(id2.toHexString(), 'hex');
if(a === b) {
return 0;
}
if(typeof Buffer.compare === 'function') {
return Buffer.compare(a, b);
}
var x = a.length;
var y = b.length;
var len = Math.min(x, y);
for (var i = 0; i < len; i++) {
if (a[i] !== b[i]) {
break;
}
}
if (i !== len) {
x = a[i];
y = b[i];
}
return x < y ? -1 : y < x ? 1 : 0;
}
/**
* Update the state given a specific ismaster result
* @method
* @param {object} ismaster IsMaster result
* @param {Server} server IsMaster Server source
*/
State.prototype.update = function(ismaster, server) {
var self = this;
// Not in a known connection valid state
if((!ismaster.ismaster && !ismaster.secondary && !ismaster.arbiterOnly) || !Array.isArray(ismaster.hosts)) {
// Remove the state
var result = self.remove(server);
if(self.state == CONNECTED) {
if(self.logger.isInfo()) self.logger.info(f('[%s] removing %s from set', self.id, ismaster.me));
self.replSet.emit('left', self.remove(server), server);
}
return false;
}
// Set the setName if it's not set from the first server
if(self.setName == null && ismaster.setName) {
if(self.logger.isInfo()) self.logger.info(f('[%s] setting setName to %s', self.id, ismaster.setName));
self.setName = ismaster.setName;
}
// Check if the replicaset name matches the provided one
if(ismaster.setName && self.setName != ismaster.setName) {
if(self.logger.isError()) self.logger.error(f('[%s] server in replset %s is not part of the specified setName %s', self.id, ismaster.setName, self.setName));
self.remove(server);
self.replSet.emit('error', new MongoError("provided setName for Replicaset Connection does not match setName found in server seedlist"));
return false;
}
// Log information
if(self.logger.isInfo()) self.logger.info(f('[%s] updating replicaset state %s', self.id, JSON.stringify(this)));
// It's a master set it
if(ismaster.ismaster && self.setName == ismaster.setName && !self.isPrimary(ismaster.me)) {
// Check if the electionId is not null
if(ismaster.electionId instanceof ObjectId && self.electionId instanceof ObjectId) {
if(compareObjectIds(self.electionId, ismaster.electionId) == -1) {
self.electionId = ismaster.electionId;
} else if(compareObjectIds(self.electionId, ismaster.electionId) == 0) {
self.electionId = ismaster.electionId;
} else {
return false;
}
}
// Initial electionId
if(ismaster.electionId instanceof ObjectId && self.electionId == null) {
self.electionId = ismaster.electionId;
}
// Promote to primary
self.promotePrimary(server);
// Log change of primary
if(self.logger.isInfo()) self.logger.info(f('[%s] promoting %s to primary', self.id, ismaster.me));
// Emit primary
self.replSet.emit('joined', 'primary', this.primary);
// We are connected
if(self.state == CONNECTING) {
self.state = CONNECTED;
self.replSet.emit('connect', self.replSet);
} else {
self.state = CONNECTED;
self.replSet.emit('reconnect', server);
}
} else if(!ismaster.ismaster && self.setName == ismaster.setName
&& ismaster.arbiterOnly) {
if(self.addArbiter(server)) {
if(self.logger.isInfo()) self.logger.info(f('[%s] promoting %s to arbiter', self.id, ismaster.me));
self.replSet.emit('joined', 'arbiter', server);
return true;
};
return false;
} else if(!ismaster.ismaster && self.setName == ismaster.setName
&& ismaster.secondary && ismaster.passive) {
if(self.addPassive(server) && self.addSecondary(server)) {
if(self.logger.isInfo()) self.logger.info(f('[%s] promoting %s to passive', self.id, ismaster.me));
self.replSet.emit('joined', 'passive', server);
// If we have secondaryOnlyConnectionAllowed and just a passive it's
// still a valid connection
if(self.secondaryOnlyConnectionAllowed && self.state == CONNECTING) {
self.state = CONNECTED;
self.replSet.emit('connect', self.replSet);
}
return true;
};
return false;
} else if(!ismaster.ismaster && self.setName == ismaster.setName
&& ismaster.secondary) {
if(self.addSecondary(server)) {
if(self.logger.isInfo()) self.logger.info(f('[%s] promoting %s to secondary', self.id, ismaster.me));
self.replSet.emit('joined', 'secondary', server);
if(self.secondaryOnlyConnectionAllowed && self.state == CONNECTING) {
self.state = CONNECTED;
self.replSet.emit('connect', self.replSet);
}
return true;
};
return false;
}
// Return update applied
return true;
}
module.exports = State;

1264
node_modules/mongodb-core/lib/topologies/server.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

93
node_modules/mongodb-core/lib/topologies/session.js generated vendored Normal file
View File

@@ -0,0 +1,93 @@
"use strict";
var inherits = require('util').inherits
, f = require('util').format
, EventEmitter = require('events').EventEmitter;
/**
* Creates a new Authentication Session
* @class
* @param {object} [options] Options for the session
* @param {{Server}|{ReplSet}|{Mongos}} topology The topology instance underpinning the session
*/
var Session = function(options, topology) {
this.options = options;
this.topology = topology;
// Add event listener
EventEmitter.call(this);
}
inherits(Session, EventEmitter);
/**
* Execute a command
* @method
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {object} cmd The command hash
* @param {object} [options.readPreference] Specify read preference if command supports it
* @param {object} [options.connection] Specify connection object to execute command against
* @param {opResultCallback} callback A callback function
*/
Session.prototype.command = function(ns, cmd, options, callback) {
this.topology.command(ns, cmd, options, callback);
}
/**
* Insert one or more documents
* @method
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {array} ops An array of documents to insert
* @param {boolean} [options.ordered=true] Execute in order or out of order
* @param {object} [options.writeConcern={}] Write concern for the operation
* @param {opResultCallback} callback A callback function
*/
Session.prototype.insert = function(ns, ops, options, callback) {
this.topology.insert(ns, ops, options, callback);
}
/**
* Perform one or more update operations
* @method
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {array} ops An array of updates
* @param {boolean} [options.ordered=true] Execute in order or out of order
* @param {object} [options.writeConcern={}] Write concern for the operation
* @param {opResultCallback} callback A callback function
*/
Session.prototype.update = function(ns, ops, options, callback) {
this.topology.update(ns, ops, options, callback);
}
/**
* Perform one or more remove operations
* @method
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {array} ops An array of removes
* @param {boolean} [options.ordered=true] Execute in order or out of order
* @param {object} [options.writeConcern={}] Write concern for the operation
* @param {opResultCallback} callback A callback function
*/
Session.prototype.remove = function(ns, ops, options, callback) {
this.topology.remove(ns, ops, options, callback);
}
/**
* Perform one or more remove operations
* @method
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {{object}|{Long}} cmd Can be either a command returning a cursor or a cursorId
* @param {object} [options.batchSize=0] Batchsize for the operation
* @param {array} [options.documents=[]] Initial documents list for cursor
* @param {boolean} [options.tailable=false] Tailable flag set
* @param {boolean} [options.oplogReply=false] oplogReply flag set
* @param {boolean} [options.awaitdata=false] awaitdata flag set
* @param {boolean} [options.exhaust=false] exhaust flag set
* @param {boolean} [options.partial=false] partial flag set
* @param {opResultCallback} callback A callback function
*/
Session.prototype.cursor = function(ns, cmd, options) {
return this.topology.cursor(ns, cmd, options);
}
module.exports = Session;

View File

@@ -0,0 +1,276 @@
"use strict";
var Logger = require('../../connection/logger')
, EventEmitter = require('events').EventEmitter
, inherits = require('util').inherits
, f = require('util').format;
/**
* Creates a new Ping read preference strategy instance
* @class
* @param {number} [options.pingInterval=5000] Ping interval to check the response time to the different servers
* @param {number} [options.acceptableLatency=250] Acceptable latency for selecting a server for reading (in milliseconds)
* @return {Ping} A cursor instance
*/
var Ping = function(options) {
// Add event listener
EventEmitter.call(this);
// Contains the ping state
this.s = {
// Contains all the ping data
pings: {}
// Set no options if none provided
, options: options || {}
// Logger
, logger: Logger('Ping', options)
// Ping interval
, pingInterval: options.pingInterval || 10000
, acceptableLatency: options.acceptableLatency || 15
// Debug options
, debug: typeof options.debug == 'boolean' ? options.debug : false
// Index
, index: 0
// Current ping time
, lastPing: null
}
// Log the options set
if(this.s.logger.isDebug()) this.s.logger.debug(f('ping strategy interval [%s], acceptableLatency [%s]', this.s.pingInterval, this.s.acceptableLatency));
// If we have enabled debug
if(this.s.debug) {
// Add access to the read Preference Strategies
Object.defineProperty(this, 'data', {
enumerable: true, get: function() { return this.s.pings; }
});
}
}
inherits(Ping, EventEmitter);
/**
* @ignore
*/
var filterByTags = function(readPreference, servers) {
if(readPreference.tags == null) return servers;
var filteredServers = [];
var tags = readPreference.tags;
// Iterate over all the servers
for(var i = 0; i < servers.length; i++) {
var serverTag = servers[i].lastIsMaster().tags || {};
// Did we find the a matching server
var found = true;
// Check if the server is valid
for(var name in tags) {
if(serverTag[name] != tags[name]) found = false;
}
// Add to candidate list
if(found) filteredServers.push(servers[i]);
}
// Returned filtered servers
return filteredServers;
}
/**
* Pick a server
* @method
* @param {State} set The current replicaset state object
* @param {ReadPreference} readPreference The current readPreference object
* @param {readPreferenceResultCallback} callback The callback to return the result from the function
* @return {object}
*/
Ping.prototype.pickServer = function(set, readPreference) {
var self = this;
// Only get primary and secondaries as seeds
var seeds = {};
var servers = [];
if(set.primary) {
servers.push(set.primary);
}
for(var i = 0; i < set.secondaries.length; i++) {
servers.push(set.secondaries[i]);
}
// Filter by tags
servers = filterByTags(readPreference, servers);
// Transform the list
var serverList = [];
// for(var name in seeds) {
for(var i = 0; i < servers.length; i++) {
serverList.push({name: servers[i].name, time: self.s.pings[servers[i].name] || 0});
}
// Sort by time
serverList.sort(function(a, b) {
return a.time > b.time;
});
// Locate lowest time (picked servers are lowest time + acceptable Latency margin)
var lowest = serverList.length > 0 ? serverList[0].time : 0;
// Filter by latency
serverList = serverList.filter(function(s) {
return s.time <= lowest + self.s.acceptableLatency;
});
// No servers, default to primary
if(serverList.length == 0 && set.primary) {
if(self.s.logger.isInfo()) self.s.logger.info(f('picked primary server [%s]', set.primary.name));
return set.primary;
} else if(serverList.length == 0) {
return null
}
// We picked first server
if(self.s.logger.isInfo()) self.s.logger.info(f('picked server [%s] with ping latency [%s]', serverList[0].name, serverList[0].time));
// Add to the index
self.s.index = self.s.index + 1;
// Select the index
self.s.index = self.s.index % serverList.length;
// Return the first server of the sorted and filtered list
return set.get(serverList[self.s.index].name);
}
/**
* Start of an operation
* @method
* @param {Server} server The server the operation is running against
* @param {object} query The operation running
* @param {Date} date The start time of the operation
* @return {object}
*/
Ping.prototype.startOperation = function(server, query, date) {
}
/**
* End of an operation
* @method
* @param {Server} server The server the operation is running against
* @param {error} err An error from the operation
* @param {object} result The result from the operation
* @param {Date} date The start time of the operation
* @return {object}
*/
Ping.prototype.endOperation = function(server, err, result, date) {
}
/**
* High availability process running
* @method
* @param {State} set The current replicaset state object
* @param {resultCallback} callback The callback to return the result from the function
* @return {object}
*/
Ping.prototype.ha = function(topology, state, callback) {
var self = this;
var servers = state.getAll();
var count = servers.length;
// No servers return
if(servers.length == 0) return callback(null, null);
// Return if we have not yet reached the ping interval
if(self.s.lastPing != null) {
var diff = new Date().getTime() - self.s.lastPing.getTime();
if(diff < self.s.pingInterval) return callback(null, null);
}
// Execute operation
var operation = function(_server) {
var start = new Date();
// Execute ping against server
_server.command('system.$cmd', {ismaster:1}, function(err, r) {
count = count - 1;
var time = new Date().getTime() - start.getTime();
self.s.pings[_server.name] = time;
// Log info for debug
if(self.s.logger.isDebug()) self.s.logger.debug(f('ha latency for server [%s] is [%s] ms', _server.name, time));
// We are done with all the servers
if(count == 0) {
// Emit ping event
topology.emit('ping', err, r ? r.result : null);
// Update the last ping time
self.s.lastPing = new Date();
// Return
callback(null, null);
}
});
}
// Let's ping all servers
while(servers.length > 0) {
operation(servers.shift());
}
}
var removeServer = function(self, server) {
delete self.s.pings[server.name];
}
/**
* Server connection closed
* @method
* @param {Server} server The server that closed
*/
Ping.prototype.close = function(server) {
removeServer(this, server);
}
/**
* Server connection errored out
* @method
* @param {Server} server The server that errored out
*/
Ping.prototype.error = function(server) {
removeServer(this, server);
}
/**
* Server connection timeout
* @method
* @param {Server} server The server that timed out
*/
Ping.prototype.timeout = function(server) {
removeServer(this, server);
}
/**
* Server connection happened
* @method
* @param {Server} server The server that connected
* @param {resultCallback} callback The callback to return the result from the function
*/
Ping.prototype.connect = function(server, callback) {
var self = this;
// Get the command start date
var start = new Date();
// Execute ping against server
server.command('system.$cmd', {ismaster:1}, function(err, r) {
var time = new Date().getTime() - start.getTime();
self.s.pings[server.name] = time;
// Log info for debug
if(self.s.logger.isDebug()) self.s.logger.debug(f('connect latency for server [%s] is [%s] ms', server.name, time));
// Set last ping
self.s.lastPing = new Date();
// Done, return
callback(null, null);
});
}
/**
* This is a result from a readPreference strategy
*
* @callback readPreferenceResultCallback
* @param {error} error An error object. Set to null if no error present
* @param {Server} server The server picked by the strategy
*/
module.exports = Ping;

View File

@@ -0,0 +1,587 @@
"use strict";
var Insert = require('./commands').Insert
, Update = require('./commands').Update
, Remove = require('./commands').Remove
, Query = require('../connection/commands').Query
, copy = require('../connection/utils').copy
, KillCursor = require('../connection/commands').KillCursor
, GetMore = require('../connection/commands').GetMore
, Query = require('../connection/commands').Query
, ReadPreference = require('../topologies/read_preference')
, f = require('util').format
, CommandResult = require('../topologies/command_result')
, MongoError = require('../error')
, Long = require('bson').Long;
// Write concern fields
var writeConcernFields = ['w', 'wtimeout', 'j', 'fsync'];
var WireProtocol = function() {}
//
// Needs to support legacy mass insert as well as ordered/unordered legacy
// emulation
//
WireProtocol.prototype.insert = function(topology, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
options = options || {};
// Default is ordered execution
var ordered = typeof options.ordered == 'boolean' ? options.ordered : true;
var legacy = typeof options.legacy == 'boolean' ? options.legacy : false;
ops = Array.isArray(ops) ? ops :[ops];
// If we have more than a 1000 ops fails
if(ops.length > 1000) return callback(new MongoError("exceeded maximum write batch size of 1000"));
// Write concern
var writeConcern = options.writeConcern || {w:1};
// We are unordered
if(!ordered || writeConcern.w == 0) {
return executeUnordered('insert', Insert, ismaster, ns, bson, pool, callbacks, ops, options, callback);
}
return executeOrdered('insert', Insert, ismaster, ns, bson, pool, callbacks, ops, options, callback);
}
WireProtocol.prototype.update = function(topology, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
options = options || {};
// Default is ordered execution
var ordered = typeof options.ordered == 'boolean' ? options.ordered : true;
ops = Array.isArray(ops) ? ops :[ops];
// Write concern
var writeConcern = options.writeConcern || {w:1};
// We are unordered
if(!ordered || writeConcern.w == 0) {
return executeUnordered('update', Update, ismaster, ns, bson, pool, callbacks, ops, options, callback);
}
return executeOrdered('update', Update, ismaster, ns, bson, pool, callbacks, ops, options, callback);
}
WireProtocol.prototype.remove = function(topology, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
options = options || {};
// Default is ordered execution
var ordered = typeof options.ordered == 'boolean' ? options.ordered : true;
ops = Array.isArray(ops) ? ops :[ops];
// Write concern
var writeConcern = options.writeConcern || {w:1};
// We are unordered
if(!ordered || writeConcern.w == 0) {
return executeUnordered('remove', Remove, ismaster, ns, bson, pool, callbacks, ops, options, callback);
}
return executeOrdered('remove', Remove, ismaster, ns, bson, pool, callbacks, ops, options, callback);
}
WireProtocol.prototype.killCursor = function(bson, ns, cursorId, connection, callbacks, callback) {
// Create a kill cursor command
var killCursor = new KillCursor(bson, [cursorId]);
// Execute the kill cursor command
if(connection && connection.isConnected()) connection.write(killCursor.toBin());
// Set cursor to 0
cursorId = Long.ZERO;
// Return to caller
if(callback) callback(null, null);
}
WireProtocol.prototype.getMore = function(bson, ns, cursorState, batchSize, raw, connection, callbacks, options, callback) {
// Create getMore command
var getMore = new GetMore(bson, ns, cursorState.cursorId, {numberToReturn: batchSize});
// Query callback
var queryCallback = function(err, r) {
if(err) return callback(err);
// If we have a timed out query or a cursor that was killed
if((r.responseFlags & (1 << 0)) != 0) {
return callback(new MongoError("cursor killed or timed out"), null);
}
// Ensure we have a Long valie cursor id
var cursorId = typeof r.cursorId == 'number'
? Long.fromNumber(r.cursorId)
: r.cursorId;
// Set all the values
cursorState.documents = r.documents;
cursorState.cursorId = cursorId;
// Return
callback(null);
}
// If we have a raw query decorate the function
if(raw) {
queryCallback.raw = raw;
}
// Register a callback
callbacks.register(getMore.requestId, queryCallback);
// Write out the getMore command
connection.write(getMore.toBin());
}
WireProtocol.prototype.command = function(bson, ns, cmd, cursorState, topology, options) {
// Establish type of command
if(cmd.find) {
return setupClassicFind(bson, ns, cmd, cursorState, topology, options)
} else if(cursorState.cursorId != null) {
} else if(cmd) {
return setupCommand(bson, ns, cmd, cursorState, topology, options);
} else {
throw new MongoError(f("command %s does not return a cursor", JSON.stringify(cmd)));
}
}
//
// Execute a find command
var setupClassicFind = function(bson, ns, cmd, cursorState, topology, options) {
var readPreference = options.readPreference || new ReadPreference('primary');
if(typeof readPreference == 'string') readPreference = new ReadPreference(readPreference);
if(!(readPreference instanceof ReadPreference)) throw new MongoError('readPreference must be a ReadPreference instance');
// Does the cmd have a readPreference
if(cmd.readPreference) {
readPreference = cmd.readPreference;
}
// Ensure we have at least some options
options = options || {};
// Set the optional batchSize
cursorState.batchSize = cmd.batchSize || cursorState.batchSize;
var numberToReturn = 0;
// Unpack the limit and batchSize values
if(cursorState.limit == 0) {
numberToReturn = cursorState.batchSize;
} else if(cursorState.limit < 0 || cursorState.limit < cursorState.batchSize || (cursorState.limit > 0 && cursorState.batchSize == 0)) {
numberToReturn = cursorState.limit;
} else {
numberToReturn = cursorState.batchSize;
}
var numberToSkip = cursorState.skip || 0;
// Build actual find command
var findCmd = {};
// Using special modifier
var usesSpecialModifier = false;
// We have a Mongos topology, check if we need to add a readPreference
if(topology.type == 'mongos' && readPreference) {
findCmd['$readPreference'] = readPreference.toJSON();
usesSpecialModifier = true;
}
// Add special modifiers to the query
if(cmd.sort) findCmd['orderby'] = cmd.sort, usesSpecialModifier = true;
if(cmd.hint) findCmd['$hint'] = cmd.hint, usesSpecialModifier = true;
if(cmd.snapshot) findCmd['$snapshot'] = cmd.snapshot, usesSpecialModifier = true;
if(cmd.returnKey) findCmd['$returnKey'] = cmd.returnKey, usesSpecialModifier = true;
if(cmd.maxScan) findCmd['$maxScan'] = cmd.maxScan, usesSpecialModifier = true;
if(cmd.min) findCmd['$min'] = cmd.min, usesSpecialModifier = true;
if(cmd.max) findCmd['$max'] = cmd.max, usesSpecialModifier = true;
if(cmd.showDiskLoc) findCmd['$showDiskLoc'] = cmd.showDiskLoc, usesSpecialModifier = true;
if(cmd.comment) findCmd['$comment'] = cmd.comment, usesSpecialModifier = true;
if(cmd.maxTimeMS) findCmd['$maxTimeMS'] = cmd.maxTimeMS, usesSpecialModifier = true;
if(cmd.explain) {
// nToReturn must be 0 (match all) or negative (match N and close cursor)
// nToReturn > 0 will give explain results equivalent to limit(0)
numberToReturn = -Math.abs(cmd.limit || 0);
usesSpecialModifier = true;
findCmd['$explain'] = true;
}
// If we have a special modifier
if(usesSpecialModifier) {
findCmd['$query'] = cmd.query;
} else {
findCmd = cmd.query;
}
// Throw on majority readConcern passed in
if(cmd.readConcern && cmd.readConcern.level != 'local') {
throw new MongoError(f('server find command does not support a readConcern level of %s', cmd.readConcern.level));
}
// Remove readConcern, ensure no failing commands
if(cmd.readConcern) {
cmd = copy(cmd);
delete cmd['readConcern'];
}
// Set up the serialize and ignoreUndefined fields
var serializeFunctions = typeof options.serializeFunctions == 'boolean'
? options.serializeFunctions : false;
var ignoreUndefined = typeof options.ignoreUndefined == 'boolean'
? options.ignoreUndefined : false;
// Build Query object
var query = new Query(bson, ns, findCmd, {
numberToSkip: numberToSkip, numberToReturn: numberToReturn
, checkKeys: false, returnFieldSelector: cmd.fields
, serializeFunctions: serializeFunctions, ignoreUndefined: ignoreUndefined
});
// Set query flags
query.slaveOk = readPreference.slaveOk();
// Set up the option bits for wire protocol
if(typeof cmd.tailable == 'boolean') query.tailable = cmd.tailable;
if(typeof cmd.oplogReplay == 'boolean') query.oplogReplay = cmd.oplogReplay;
if(typeof cmd.noCursorTimeout == 'boolean') query.noCursorTimeout = cmd.noCursorTimeout;
if(typeof cmd.awaitData == 'boolean') query.awaitData = cmd.awaitData;
if(typeof cmd.exhaust == 'boolean') query.exhaust = cmd.exhaust;
if(typeof cmd.partial == 'boolean') query.partial = cmd.partial;
// Return the query
return query;
}
//
// Set up a command cursor
var setupCommand = function(bson, ns, cmd, cursorState, topology, options) {
var readPreference = options.readPreference || new ReadPreference('primary');
if(typeof readPreference == 'string') readPreference = new ReadPreference(readPreference);
if(!(readPreference instanceof ReadPreference)) throw new MongoError('readPreference must be a ReadPreference instance');
// Does the cmd have a readPreference
if(cmd.readPreference) {
readPreference = cmd.readPreference;
}
// Set empty options object
options = options || {}
// Final query
var finalCmd = {};
for(var name in cmd) {
finalCmd[name] = cmd[name];
}
// Build command namespace
var parts = ns.split(/\./);
// Throw on majority readConcern passed in
if(cmd.readConcern && cmd.readConcern.level != 'local') {
throw new MongoError(f('server %s command does not support a readConcern level of %s', JSON.stringify(cmd), cmd.readConcern.level));
}
// Remove readConcern, ensure no failing commands
if(cmd.readConcern) delete cmd['readConcern'];
// Serialize functions
var serializeFunctions = typeof options.serializeFunctions == 'boolean'
? options.serializeFunctions : false;
// Set up the serialize and ignoreUndefined fields
var ignoreUndefined = typeof options.ignoreUndefined == 'boolean'
? options.ignoreUndefined : false;
// We have a Mongos topology, check if we need to add a readPreference
if(topology.type == 'mongos'
&& readPreference
&& readPreference.preference != 'primary') {
finalCmd = {
'$query': finalCmd,
'$readPreference': readPreference.toJSON()
};
}
// Build Query object
var query = new Query(bson, f('%s.$cmd', parts.shift()), finalCmd, {
numberToSkip: 0, numberToReturn: -1
, checkKeys: false, serializeFunctions: serializeFunctions
, ignoreUndefined: ignoreUndefined
});
// Set query flags
query.slaveOk = readPreference.slaveOk();
// Return the query
return query;
}
/**
* @ignore
*/
var bindToCurrentDomain = function(callback) {
var domain = process.domain;
if(domain == null || callback == null) {
return callback;
} else {
return domain.bind(callback);
}
}
var hasWriteConcern = function(writeConcern) {
if(writeConcern.w
|| writeConcern.wtimeout
|| writeConcern.j == true
|| writeConcern.fsync == true
|| Object.keys(writeConcern).length == 0) {
return true;
}
return false;
}
var cloneWriteConcern = function(writeConcern) {
var wc = {};
if(writeConcern.w != null) wc.w = writeConcern.w;
if(writeConcern.wtimeout != null) wc.wtimeout = writeConcern.wtimeout;
if(writeConcern.j != null) wc.j = writeConcern.j;
if(writeConcern.fsync != null) wc.fsync = writeConcern.fsync;
return wc;
}
//
// Aggregate up all the results
//
var aggregateWriteOperationResults = function(opType, ops, results, connection) {
var finalResult = { ok: 1, n: 0 }
// Map all the results coming back
for(var i = 0; i < results.length; i++) {
var result = results[i];
var op = ops[i];
if((result.upserted || (result.updatedExisting == false)) && finalResult.upserted == null) {
finalResult.upserted = [];
}
// Push the upserted document to the list of upserted values
if(result.upserted) {
finalResult.upserted.push({index: i, _id: result.upserted});
}
// We have an upsert where we passed in a _id
if(result.updatedExisting == false && result.n == 1 && result.upserted == null) {
finalResult.upserted.push({index: i, _id: op.q._id});
}
// We have an insert command
if(result.ok == 1 && opType == 'insert' && result.err == null) {
finalResult.n = finalResult.n + 1;
}
// We have a command error
if(result != null && result.ok == 0 || result.err || result.errmsg) {
if(result.ok == 0) finalResult.ok = 0;
finalResult.code = result.code;
finalResult.errmsg = result.errmsg || result.err || result.errMsg;
// Check if we have a write error
if(result.code == 11000
|| result.code == 11001
|| result.code == 12582
|| result.code == 16544
|| result.code == 16538
|| result.code == 16542
|| result.code == 14
|| result.code == 13511) {
if(finalResult.writeErrors == null) finalResult.writeErrors = [];
finalResult.writeErrors.push({
index: i
, code: result.code
, errmsg: result.errmsg || result.err || result.errMsg
});
} else {
finalResult.writeConcernError = {
code: result.code
, errmsg: result.errmsg || result.err || result.errMsg
}
}
} else if(typeof result.n == 'number') {
finalResult.n += result.n;
} else {
finalResult.n += 1;
}
// Result as expected
if(result != null && result.lastOp) finalResult.lastOp = result.lastOp;
}
// Return finalResult aggregated results
return new CommandResult(finalResult, connection);
}
//
// Execute all inserts in an ordered manner
//
var executeOrdered = function(opType ,command, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
var _ops = ops.slice(0);
// Bind to current domain
callback = bindToCurrentDomain(callback);
// Collect all the getLastErrors
var getLastErrors = [];
// Execute an operation
var executeOp = function(list, _callback) {
// Get a pool connection
var connection = pool.get();
// No more items in the list
if(list.length == 0) {
return process.nextTick(function() {
_callback(null, aggregateWriteOperationResults(opType, ops, getLastErrors, connection));
});
}
// Get the first operation
var doc = list.shift();
// Create an insert command
var op = new command(Query.getRequestId(), ismaster, bson, ns, [doc], options);
// Write concern
var optionWriteConcern = options.writeConcern || {w:1};
// Final write concern
var writeConcern = cloneWriteConcern(optionWriteConcern);
// Get the db name
var db = ns.split('.').shift();
// Error out if no connection available
if(connection == null)
return _callback(new MongoError("no connection available"));
try {
// Execute the insert
connection.write(op.toBin());
// If write concern 0 don't fire getLastError
if(hasWriteConcern(writeConcern)) {
var getLastErrorCmd = {getlasterror: 1};
// Merge all the fields
for(var i = 0; i < writeConcernFields.length; i++) {
if(writeConcern[writeConcernFields[i]] != null)
getLastErrorCmd[writeConcernFields[i]] = writeConcern[writeConcernFields[i]];
}
// Create a getLastError command
var getLastErrorOp = new Query(bson, f("%s.$cmd", db), getLastErrorCmd, {numberToReturn: -1});
// Write the lastError message
connection.write(getLastErrorOp.toBin());
// Register the callback
callbacks.register(getLastErrorOp.requestId, function(err, result) {
if(err) return callback(err);
// Get the document
var doc = result.documents[0];
// Save the getLastError document
getLastErrors.push(doc);
// If we have an error terminate
if(doc.ok == 0 || doc.err || doc.errmsg) return callback(null, aggregateWriteOperationResults(opType, ops, getLastErrors, connection));
// Execute the next op in the list
executeOp(list, callback);
});
}
} catch(err) {
if(typeof err == 'string') err = new MongoError(err);
// We have a serialization error, rewrite as a write error to have same behavior as modern
// write commands
getLastErrors.push({ ok: 1, errmsg: err.message, code: 14 });
// Return due to an error
process.nextTick(function() {
callback(null, aggregateWriteOperationResults(opType, ops, getLastErrors, connection));
});
}
}
// Execute the operations
executeOp(_ops, callback);
}
var executeUnordered = function(opType, command, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
// Bind to current domain
callback = bindToCurrentDomain(callback);
// Total operations to write
var totalOps = ops.length;
// Collect all the getLastErrors
var getLastErrors = [];
// Write concern
var optionWriteConcern = options.writeConcern || {w:1};
// Final write concern
var writeConcern = cloneWriteConcern(optionWriteConcern);
// Driver level error
var error;
// Execute all the operations
for(var i = 0; i < ops.length; i++) {
// Create an insert command
var op = new command(Query.getRequestId(), ismaster, bson, ns, [ops[i]], options);
// Get db name
var db = ns.split('.').shift();
// Get a pool connection
var connection = pool.get();
// Error out if no connection available
if(connection == null) {
return process.nextTick(function() {
_callback(new MongoError("no connection available"));
});
}
try {
// Execute the insert
connection.write(op.toBin());
// If write concern 0 don't fire getLastError
if(hasWriteConcern(writeConcern)) {
var getLastErrorCmd = {getlasterror: 1};
// Merge all the fields
for(var j = 0; j < writeConcernFields.length; j++) {
if(writeConcern[writeConcernFields[j]] != null)
getLastErrorCmd[writeConcernFields[j]] = writeConcern[writeConcernFields[j]];
}
// Create a getLastError command
var getLastErrorOp = new Query(bson, f("%s.$cmd", db), getLastErrorCmd, {numberToReturn: -1});
// Write the lastError message
connection.write(getLastErrorOp.toBin());
// Give the result from getLastError the right index
var callbackOp = function(_index) {
return function(err, result) {
if(err) error = err;
// Update the number of operations executed
totalOps = totalOps - 1;
// Save the getLastError document
if(!err) getLastErrors[_index] = result.documents[0];
// Check if we are done
if(totalOps == 0) {
process.nextTick(function() {
if(error) return callback(error);
callback(null, aggregateWriteOperationResults(opType, ops, getLastErrors, connection));
});
}
}
}
// Register the callback
callbacks.register(getLastErrorOp.requestId, callbackOp(i));
}
} catch(err) {
if(typeof err == 'string') err = new MongoError(err);
// Update the number of operations executed
totalOps = totalOps - 1;
// We have a serialization error, rewrite as a write error to have same behavior as modern
// write commands
getLastErrors[i] = { ok: 1, errmsg: err.message, code: 14 };
// Check if we are done
if(totalOps == 0) {
callback(null, aggregateWriteOperationResults(opType, ops, getLastErrors, connection));
}
}
}
// Empty w:0 return
if(writeConcern
&& writeConcern.w == 0 && callback) {
callback(null, null);
}
}
module.exports = WireProtocol;

View File

@@ -0,0 +1,329 @@
"use strict";
var Insert = require('./commands').Insert
, Update = require('./commands').Update
, Remove = require('./commands').Remove
, Query = require('../connection/commands').Query
, copy = require('../connection/utils').copy
, KillCursor = require('../connection/commands').KillCursor
, GetMore = require('../connection/commands').GetMore
, Query = require('../connection/commands').Query
, ReadPreference = require('../topologies/read_preference')
, f = require('util').format
, CommandResult = require('../topologies/command_result')
, MongoError = require('../error')
, Long = require('bson').Long;
var WireProtocol = function() {}
//
// Execute a write operation
var executeWrite = function(topology, type, opsField, ns, ops, options, callback) {
if(ops.length == 0) throw new MongoError("insert must contain at least one document");
if(typeof options == 'function') {
callback = options;
options = {};
}
// Split the ns up to get db and collection
var p = ns.split(".");
var d = p.shift();
// Options
var ordered = typeof options.ordered == 'boolean' ? options.ordered : true;
var writeConcern = options.writeConcern || {};
// return skeleton
var writeCommand = {};
writeCommand[type] = p.join('.');
writeCommand[opsField] = ops;
writeCommand.ordered = ordered;
// Did we specify a write concern
if(writeConcern && Object.keys(writeConcern).length > 0) {
writeCommand.writeConcern = writeConcern;
}
// Options object
var opts = {};
if(type == 'insert') opts.checkKeys = true;
// Ensure we support serialization of functions
if(options.serializeFunctions) opts.serializeFunctions = options.serializeFunctions;
if(options.ignoreUndefined) opts.ignoreUndefined = options.ignoreUndefined;
// Execute command
topology.command(f("%s.$cmd", d), writeCommand, opts, callback);
}
//
// Needs to support legacy mass insert as well as ordered/unordered legacy
// emulation
//
WireProtocol.prototype.insert = function(topology, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
executeWrite(topology, 'insert', 'documents', ns, ops, options, callback);
}
WireProtocol.prototype.update = function(topology, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
executeWrite(topology, 'update', 'updates', ns, ops, options, callback);
}
WireProtocol.prototype.remove = function(topology, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
executeWrite(topology, 'delete', 'deletes', ns, ops, options, callback);
}
WireProtocol.prototype.killCursor = function(bson, ns, cursorId, connection, callbacks, callback) {
// Create a kill cursor command
var killCursor = new KillCursor(bson, [cursorId]);
// Execute the kill cursor command
if(connection && connection.isConnected()) connection.write(killCursor.toBin());
// Set cursor to 0
cursorId = Long.ZERO;
// Return to caller
if(callback) callback(null, null);
}
WireProtocol.prototype.getMore = function(bson, ns, cursorState, batchSize, raw, connection, callbacks, options, callback) {
// Create getMore command
var getMore = new GetMore(bson, ns, cursorState.cursorId, {numberToReturn: batchSize});
// Query callback
var queryCallback = function(err, r) {
if(err) return callback(err);
// If we have a timed out query or a cursor that was killed
if((r.responseFlags & (1 << 0)) != 0) {
return callback(new MongoError("cursor killed or timed out"), null);
}
// Ensure we have a Long valie cursor id
var cursorId = typeof r.cursorId == 'number'
? Long.fromNumber(r.cursorId)
: r.cursorId;
// Set all the values
cursorState.documents = r.documents;
cursorState.cursorId = cursorId;
// Return
callback(null);
}
// If we have a raw query decorate the function
if(raw) {
queryCallback.raw = raw;
}
// Register a callback
callbacks.register(getMore.requestId, queryCallback);
// Write out the getMore command
connection.write(getMore.toBin());
}
WireProtocol.prototype.command = function(bson, ns, cmd, cursorState, topology, options) {
// Establish type of command
if(cmd.find) {
return setupClassicFind(bson, ns, cmd, cursorState, topology, options)
} else if(cursorState.cursorId != null) {
} else if(cmd) {
return setupCommand(bson, ns, cmd, cursorState, topology, options);
} else {
throw new MongoError(f("command %s does not return a cursor", JSON.stringify(cmd)));
}
}
//
// Execute a find command
var setupClassicFind = function(bson, ns, cmd, cursorState, topology, options) {
var readPreference = options.readPreference || new ReadPreference('primary');
if(typeof readPreference == 'string') readPreference = new ReadPreference(readPreference);
if(!(readPreference instanceof ReadPreference)) throw new MongoError('readPreference must be a ReadPreference instance');
// Does the cmd have a readPreference
if(cmd.readPreference) {
readPreference = cmd.readPreference;
}
// Ensure we have at least some options
options = options || {};
// Set the optional batchSize
cursorState.batchSize = cmd.batchSize || cursorState.batchSize;
var numberToReturn = 0;
// Unpack the limit and batchSize values
if(cursorState.limit == 0) {
numberToReturn = cursorState.batchSize;
} else if(cursorState.limit < 0 || cursorState.limit < cursorState.batchSize || (cursorState.limit > 0 && cursorState.batchSize == 0)) {
numberToReturn = cursorState.limit;
} else {
numberToReturn = cursorState.batchSize;
}
var numberToSkip = cursorState.skip || 0;
// Build actual find command
var findCmd = {};
// Using special modifier
var usesSpecialModifier = false;
// We have a Mongos topology, check if we need to add a readPreference
if(topology.type == 'mongos' && readPreference) {
findCmd['$readPreference'] = readPreference.toJSON();
usesSpecialModifier = true;
}
// Add special modifiers to the query
if(cmd.sort) findCmd['orderby'] = cmd.sort, usesSpecialModifier = true;
if(cmd.hint) findCmd['$hint'] = cmd.hint, usesSpecialModifier = true;
if(cmd.snapshot) findCmd['$snapshot'] = cmd.snapshot, usesSpecialModifier = true;
if(cmd.returnKey) findCmd['$returnKey'] = cmd.returnKey, usesSpecialModifier = true;
if(cmd.maxScan) findCmd['$maxScan'] = cmd.maxScan, usesSpecialModifier = true;
if(cmd.min) findCmd['$min'] = cmd.min, usesSpecialModifier = true;
if(cmd.max) findCmd['$max'] = cmd.max, usesSpecialModifier = true;
if(cmd.showDiskLoc) findCmd['$showDiskLoc'] = cmd.showDiskLoc, usesSpecialModifier = true;
if(cmd.comment) findCmd['$comment'] = cmd.comment, usesSpecialModifier = true;
if(cmd.maxTimeMS) findCmd['$maxTimeMS'] = cmd.maxTimeMS, usesSpecialModifier = true;
if(cmd.explain) {
// nToReturn must be 0 (match all) or negative (match N and close cursor)
// nToReturn > 0 will give explain results equivalent to limit(0)
numberToReturn = -Math.abs(cmd.limit || 0);
usesSpecialModifier = true;
findCmd['$explain'] = true;
}
// If we have a special modifier
if(usesSpecialModifier) {
findCmd['$query'] = cmd.query;
} else {
findCmd = cmd.query;
}
// Throw on majority readConcern passed in
if(cmd.readConcern && cmd.readConcern.level != 'local') {
throw new MongoError(f('server find command does not support a readConcern level of %s', cmd.readConcern.level));
}
// Remove readConcern, ensure no failing commands
if(cmd.readConcern) {
cmd = copy(cmd);
delete cmd['readConcern'];
}
// Serialize functions
var serializeFunctions = typeof options.serializeFunctions == 'boolean'
? options.serializeFunctions : false;
var ignoreUndefined = typeof options.ignoreUndefined == 'boolean'
? options.ignoreUndefined : false;
// Build Query object
var query = new Query(bson, ns, findCmd, {
numberToSkip: numberToSkip, numberToReturn: numberToReturn
, checkKeys: false, returnFieldSelector: cmd.fields
, serializeFunctions: serializeFunctions
, ignoreUndefined: ignoreUndefined
});
// Set query flags
query.slaveOk = readPreference.slaveOk();
// Set up the option bits for wire protocol
if(typeof cmd.tailable == 'boolean') {
query.tailable = cmd.tailable;
}
if(typeof cmd.oplogReplay == 'boolean') {
query.oplogReplay = cmd.oplogReplay;
}
if(typeof cmd.noCursorTimeout == 'boolean') {
query.noCursorTimeout = cmd.noCursorTimeout;
}
if(typeof cmd.awaitData == 'boolean') {
query.awaitData = cmd.awaitData;
}
if(typeof cmd.exhaust == 'boolean') {
query.exhaust = cmd.exhaust;
}
if(typeof cmd.partial == 'boolean') {
query.partial = cmd.partial;
}
// Return the query
return query;
}
//
// Set up a command cursor
var setupCommand = function(bson, ns, cmd, cursorState, topology, options) {
var readPreference = options.readPreference || new ReadPreference('primary');
if(typeof readPreference == 'string') readPreference = new ReadPreference(readPreference);
if(!(readPreference instanceof ReadPreference)) throw new MongoError('readPreference must be a ReadPreference instance');
// Does the cmd have a readPreference
if(cmd.readPreference) {
readPreference = cmd.readPreference;
}
// Set empty options object
options = options || {}
// Final query
var finalCmd = {};
for(var name in cmd) {
finalCmd[name] = cmd[name];
}
// Build command namespace
var parts = ns.split(/\./);
// Serialize functions
var serializeFunctions = typeof options.serializeFunctions == 'boolean'
? options.serializeFunctions : false;
var ignoreUndefined = typeof options.ignoreUndefined == 'boolean'
? options.ignoreUndefined : false;
// Throw on majority readConcern passed in
if(cmd.readConcern && cmd.readConcern.level != 'local') {
throw new MongoError(f('server %s command does not support a readConcern level of %s', JSON.stringify(cmd), cmd.readConcern.level));
}
// Remove readConcern, ensure no failing commands
if(cmd.readConcern) delete cmd['readConcern'];
// We have a Mongos topology, check if we need to add a readPreference
if(topology.type == 'mongos'
&& readPreference
&& readPreference.preference != 'primary') {
finalCmd = {
'$query': finalCmd,
'$readPreference': readPreference.toJSON()
};
}
// Build Query object
var query = new Query(bson, f('%s.$cmd', parts.shift()), finalCmd, {
numberToSkip: 0, numberToReturn: -1
, checkKeys: false, serializeFunctions: serializeFunctions
, ignoreUndefined: ignoreUndefined
});
// Set query flags
query.slaveOk = readPreference.slaveOk();
// Return the query
return query;
}
/**
* @ignore
*/
var bindToCurrentDomain = function(callback) {
var domain = process.domain;
if(domain == null || callback == null) {
return callback;
} else {
return domain.bind(callback);
}
}
module.exports = WireProtocol;

View File

@@ -0,0 +1,523 @@
"use strict";
var Insert = require('./commands').Insert
, Update = require('./commands').Update
, Remove = require('./commands').Remove
, Query = require('../connection/commands').Query
, copy = require('../connection/utils').copy
, KillCursor = require('../connection/commands').KillCursor
, GetMore = require('../connection/commands').GetMore
, Query = require('../connection/commands').Query
, ReadPreference = require('../topologies/read_preference')
, f = require('util').format
, CommandResult = require('../topologies/command_result')
, MongoError = require('../error')
, Long = require('bson').Long;
var WireProtocol = function(legacyWireProtocol) {
this.legacyWireProtocol = legacyWireProtocol;
}
//
// Execute a write operation
var executeWrite = function(topology, type, opsField, ns, ops, options, callback) {
if(ops.length == 0) throw new MongoError("insert must contain at least one document");
if(typeof options == 'function') {
callback = options;
options = {};
}
// Split the ns up to get db and collection
var p = ns.split(".");
var d = p.shift();
// Options
var ordered = typeof options.ordered == 'boolean' ? options.ordered : true;
var writeConcern = options.writeConcern;
// return skeleton
var writeCommand = {};
writeCommand[type] = p.join('.');
writeCommand[opsField] = ops;
writeCommand.ordered = ordered;
// Did we specify a write concern
if(writeConcern && Object.keys(writeConcern).length > 0) {
writeCommand.writeConcern = writeConcern;
}
// Do we have bypassDocumentValidation set, then enable it on the write command
if(typeof options.bypassDocumentValidation == 'boolean') {
writeCommand.bypassDocumentValidation = options.bypassDocumentValidation;
}
// Options object
var opts = {};
if(type == 'insert') opts.checkKeys = true;
// Ensure we support serialization of functions
if(options.serializeFunctions) opts.serializeFunctions = options.serializeFunctions;
if(options.ignoreUndefined) opts.ignoreUndefined = options.ignoreUndefined;
// Execute command
topology.command(f("%s.$cmd", d), writeCommand, opts, callback);
}
//
// Needs to support legacy mass insert as well as ordered/unordered legacy
// emulation
//
WireProtocol.prototype.insert = function(topology, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
executeWrite(topology, 'insert', 'documents', ns, ops, options, callback);
}
WireProtocol.prototype.update = function(topology, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
executeWrite(topology, 'update', 'updates', ns, ops, options, callback);
}
WireProtocol.prototype.remove = function(topology, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
executeWrite(topology, 'delete', 'deletes', ns, ops, options, callback);
}
WireProtocol.prototype.killCursor = function(bson, ns, cursorId, connection, callbacks, callback) {
// Build command namespace
var parts = ns.split(/\./);
// Command namespace
var commandns = f('%s.$cmd', parts.shift());
// Create getMore command
var killcursorCmd = {
killCursors: parts.join('.'),
cursors: [cursorId]
}
// Build Query object
var query = new Query(bson, commandns, killcursorCmd, {
numberToSkip: 0, numberToReturn: -1
, checkKeys: false, returnFieldSelector: null
});
// Set query flags
query.slaveOk = true;
// Execute the kill cursor command
if(connection && connection.isConnected()) {
connection.write(query.toBin());
}
// Kill cursor callback
var killCursorCallback = function(err, r) {
if(err) {
if(typeof callback != 'function') return;
return callback(err);
}
// If we have a timed out query or a cursor that was killed
if((r.responseFlags & (1 << 0)) != 0) {
if(typeof callback != 'function') return;
return callback(new MongoError("cursor killed or timed out"), null);
}
if(!Array.isArray(r.documents) || r.documents.length == 0) {
if(typeof callback != 'function') return;
return callback(new MongoError(f('invalid getMore result returned for cursor id %s', cursorState.cursorId)));
}
// Return the result
if(typeof callback == 'function') {
callback(null, r.documents[0]);
}
}
// Register a callback
callbacks.register(query.requestId, killCursorCallback);
}
WireProtocol.prototype.getMore = function(bson, ns, cursorState, batchSize, raw, connection, callbacks, options, callback) {
var readPreference = options.readPreference || new ReadPreference('primary');
if(typeof readPreference == 'string') readPreference = new ReadPreference(readPreference);
if(!(readPreference instanceof ReadPreference)) throw new MongoError('readPreference must be a ReadPreference instance');
// Build command namespace
var parts = ns.split(/\./);
// Command namespace
var commandns = f('%s.$cmd', parts.shift());
// Check if we have an maxTimeMS set
var maxTimeMS = typeof cursorState.cmd.maxTimeMS == 'number' ? cursorState.cmd.maxTimeMS : 3000;
// Create getMore command
var getMoreCmd = {
getMore: cursorState.cursorId,
collection: parts.join('.'),
batchSize: Math.abs(batchSize)
}
if(cursorState.cmd.tailable
&& typeof cursorState.cmd.maxAwaitTimeMS == 'number') {
getMoreCmd.maxTimeMS = cursorState.cmd.maxAwaitTimeMS;
}
// Build Query object
var query = new Query(bson, commandns, getMoreCmd, {
numberToSkip: 0, numberToReturn: -1
, checkKeys: false, returnFieldSelector: null
});
// Set query flags
query.slaveOk = true;
// Query callback
var queryCallback = function(err, r) {
if(err) return callback(err);
// If we have a timed out query or a cursor that was killed
if((r.responseFlags & (1 << 0)) != 0) {
return callback(new MongoError("cursor killed or timed out"), null);
}
if(!Array.isArray(r.documents) || r.documents.length == 0)
return callback(new MongoError(f('invalid getMore result returned for cursor id %s', cursorState.cursorId)));
// We have an error detected
if(r.documents[0].ok == 0) {
return callback(MongoError.create(r.documents[0]));
}
// Raw, return all the extracted documents
if(raw) {
cursorState.documents = r.documents;
cursorState.cursorId = r.cursorId;
return callback(null, r.documents);
}
// Ensure we have a Long valie cursor id
var cursorId = typeof r.documents[0].cursor.id == 'number'
? Long.fromNumber(r.documents[0].cursor.id)
: r.documents[0].cursor.id;
// Set all the values
cursorState.documents = r.documents[0].cursor.nextBatch;
cursorState.cursorId = cursorId;
// Return the result
callback(null, r.documents[0]);
}
// If we have a raw query decorate the function
if(raw) {
queryCallback.raw = raw;
}
// Add the result field needed
queryCallback.documentsReturnedIn = 'nextBatch';
// Register a callback
callbacks.register(query.requestId, queryCallback);
// Write out the getMore command
connection.write(query.toBin());
}
WireProtocol.prototype.command = function(bson, ns, cmd, cursorState, topology, options) {
// Establish type of command
if(cmd.find) {
if(cmd.exhaust) {
return this.legacyWireProtocol.command(bson, ns, cmd, cursorState, topology, options);
}
// Create the find command
var query = executeFindCommand(bson, ns, cmd, cursorState, topology, options)
// Mark the cmd as virtual
cmd.virtual = false;
// Signal the documents are in the firstBatch value
query.documentsReturnedIn = 'firstBatch';
// Return the query
return query;
} else if(cursorState.cursorId != null) {
} else if(cmd) {
return setupCommand(bson, ns, cmd, cursorState, topology, options);
} else {
throw new MongoError(f("command %s does not return a cursor", JSON.stringify(cmd)));
}
}
// // Command
// {
// find: ns
// , query: <object>
// , limit: <n>
// , fields: <object>
// , skip: <n>
// , hint: <string>
// , explain: <boolean>
// , snapshot: <boolean>
// , batchSize: <n>
// , returnKey: <boolean>
// , maxScan: <n>
// , min: <n>
// , max: <n>
// , showDiskLoc: <boolean>
// , comment: <string>
// , maxTimeMS: <n>
// , raw: <boolean>
// , readPreference: <ReadPreference>
// , tailable: <boolean>
// , oplogReplay: <boolean>
// , noCursorTimeout: <boolean>
// , awaitdata: <boolean>
// , exhaust: <boolean>
// , partial: <boolean>
// }
// FIND/GETMORE SPEC
// {
// “find”: <string>,
// “filter”: { ... },
// “sort”: { ... },
// “projection”: { ... },
// “hint”: { ... },
// “skip”: <int>,
// “limit”: <int>,
// “batchSize”: <int>,
// “singleBatch”: <bool>,
// “comment”: <string>,
// “maxScan”: <int>,
// “maxTimeMS”: <int>,
// “max”: { ... },
// “min”: { ... },
// “returnKey”: <bool>,
// “showRecordId”: <bool>,
// “snapshot”: <bool>,
// “tailable”: <bool>,
// “oplogReplay”: <bool>,
// “noCursorTimeout”: <bool>,
// “awaitData”: <bool>,
// “partial”: <bool>,
// “$readPreference”: { ... }
// }
//
// Execute a find command
var executeFindCommand = function(bson, ns, cmd, cursorState, topology, options) {
var readPreference = options.readPreference || new ReadPreference('primary');
if(typeof readPreference == 'string') readPreference = new ReadPreference(readPreference);
if(!(readPreference instanceof ReadPreference)) throw new MongoError('readPreference must be a ReadPreference instance');
// Does the cmd have a readPreference
if(cmd.readPreference) {
readPreference = cmd.readPreference;
}
// Ensure we have at least some options
options = options || {};
// Set the optional batchSize
cursorState.batchSize = cmd.batchSize || cursorState.batchSize;
// Build command namespace
var parts = ns.split(/\./);
// Command namespace
var commandns = f('%s.$cmd', parts.shift());
// Build actual find command
var findCmd = {
find: parts.join('.')
};
// I we provided a filter
if(cmd.query) findCmd.filter = cmd.query;
// Sort value
var sortValue = cmd.sort;
// Handle issue of sort being an Array
if(Array.isArray(sortValue)) {
var sortObject = {};
if(sortValue.length > 0 && !Array.isArray(sortValue[0])) {
var sortDirection = sortValue[1];
// Translate the sort order text
if(sortDirection == 'asc') {
sortDirection = 1;
} else if(sortDirection == 'desc') {
sortDirection = -1;
}
// Set the sort order
sortObject[sortValue[0]] = sortDirection;
} else {
for(var i = 0; i < sortValue.length; i++) {
var sortDirection = sortValue[i][1];
// Translate the sort order text
if(sortDirection == 'asc') {
sortDirection = 1;
} else if(sortDirection == 'desc') {
sortDirection = -1;
}
// Set the sort order
sortObject[sortValue[i][0]] = sortDirection;
}
}
sortValue = sortObject;
};
// Add sort to command
if(cmd.sort) findCmd.sort = sortValue;
// Add a projection to the command
if(cmd.fields) findCmd.projection = cmd.fields;
// Add a hint to the command
if(cmd.hint) findCmd.hint = cmd.hint;
// Add a skip
if(cmd.skip) findCmd.skip = cmd.skip;
// Add a limit
if(cmd.limit) findCmd.limit = cmd.limit;
// Add a batchSize
if(typeof cmd.batchSize == 'number') findCmd.batchSize = Math.abs(cmd.batchSize);
// Check if we wish to have a singleBatch
if(cmd.limit < 0) {
findCmd.limit = Math.abs(cmd.limit);
findCmd.singleBatch = true;
}
// If we have comment set
if(cmd.comment) findCmd.comment = cmd.comment;
// If we have maxScan
if(cmd.maxScan) findCmd.maxScan = cmd.maxScan;
// If we have maxTimeMS set
if(cmd.maxTimeMS) findCmd.maxTimeMS = cmd.maxTimeMS;
// If we have min
if(cmd.min) findCmd.min = cmd.min;
// If we have max
if(cmd.max) findCmd.max = cmd.max;
// If we have returnKey set
if(cmd.returnKey) findCmd.returnKey = cmd.returnKey;
// If we have showDiskLoc set
if(cmd.showDiskLoc) findCmd.showRecordId = cmd.showDiskLoc;
// If we have snapshot set
if(cmd.snapshot) findCmd.snapshot = cmd.snapshot;
// If we have tailable set
if(cmd.tailable) findCmd.tailable = cmd.tailable;
// If we have oplogReplay set
if(cmd.oplogReplay) findCmd.oplogReplay = cmd.oplogReplay;
// If we have noCursorTimeout set
if(cmd.noCursorTimeout) findCmd.noCursorTimeout = cmd.noCursorTimeout;
// If we have awaitData set
if(cmd.awaitData) findCmd.awaitData = cmd.awaitData;
if(cmd.awaitdata) findCmd.awaitData = cmd.awaitdata;
// If we have partial set
if(cmd.partial) findCmd.partial = cmd.partial;
// If we have explain, we need to rewrite the find command
// to wrap it in the explain command
if(cmd.explain) {
findCmd = {
explain: findCmd
}
}
// Did we provide a readConcern
if(cmd.readConcern) findCmd.readConcern = cmd.readConcern;
// Set up the serialize and ignoreUndefined fields
var serializeFunctions = typeof options.serializeFunctions == 'boolean'
? options.serializeFunctions : false;
var ignoreUndefined = typeof options.ignoreUndefined == 'boolean'
? options.ignoreUndefined : false;
// We have a Mongos topology, check if we need to add a readPreference
if(topology.type == 'mongos'
&& readPreference
&& readPreference.preference != 'primary') {
findCmd = {
'$query': findCmd,
'$readPreference': readPreference.toJSON()
};
}
// Build Query object
var query = new Query(bson, commandns, findCmd, {
numberToSkip: 0, numberToReturn: -1
, checkKeys: false, returnFieldSelector: null
, serializeFunctions: serializeFunctions, ignoreUndefined: ignoreUndefined
});
// Set query flags
query.slaveOk = readPreference.slaveOk();
// Return the query
return query;
}
//
// Set up a command cursor
var setupCommand = function(bson, ns, cmd, cursorState, topology, options) {
var readPreference = options.readPreference || new ReadPreference('primary');
if(typeof readPreference == 'string') readPreference = new ReadPreference(readPreference);
if(!(readPreference instanceof ReadPreference)) throw new MongoError('readPreference must be a ReadPreference instance');
// Set empty options object
options = options || {}
// Final query
var finalCmd = {};
for(var name in cmd) {
finalCmd[name] = cmd[name];
}
// Build command namespace
var parts = ns.split(/\./);
// Serialize functions
var serializeFunctions = typeof options.serializeFunctions == 'boolean'
? options.serializeFunctions : false;
// Set up the serialize and ignoreUndefined fields
var ignoreUndefined = typeof options.ignoreUndefined == 'boolean'
? options.ignoreUndefined : false;
// We have a Mongos topology, check if we need to add a readPreference
if(topology.type == 'mongos'
&& readPreference
&& readPreference.preference != 'primary') {
finalCmd = {
'$query': finalCmd,
'$readPreference': readPreference.toJSON()
};
}
// Build Query object
var query = new Query(bson, f('%s.$cmd', parts.shift()), finalCmd, {
numberToSkip: 0, numberToReturn: -1
, checkKeys: false, serializeFunctions: serializeFunctions
, ignoreUndefined: ignoreUndefined
});
// Set query flags
query.slaveOk = readPreference.slaveOk();
// Return the query
return query;
}
/**
* @ignore
*/
var bindToCurrentDomain = function(callback) {
var domain = process.domain;
if(domain == null || callback == null) {
return callback;
} else {
return domain.bind(callback);
}
}
module.exports = WireProtocol;

357
node_modules/mongodb-core/lib/wireprotocol/commands.js generated vendored Normal file
View File

@@ -0,0 +1,357 @@
"use strict";
var MongoError = require('../error');
// Wire command operation ids
var OP_UPDATE = 2001;
var OP_INSERT = 2002;
var OP_DELETE = 2006;
var Insert = function(requestId, ismaster, bson, ns, documents, options) {
// Basic options needed to be passed in
if(ns == null) throw new MongoError("ns must be specified for query");
if(!Array.isArray(documents) || documents.length == 0) throw new MongoError("documents array must contain at least one document to insert");
// Validate that we are not passing 0x00 in the colletion name
if(!!~ns.indexOf("\x00")) {
throw new MongoError("namespace cannot contain a null character");
}
// Set internal
this.requestId = requestId;
this.bson = bson;
this.ns = ns;
this.documents = documents;
this.ismaster = ismaster;
// Ensure empty options
options = options || {};
// Unpack options
this.serializeFunctions = typeof options.serializeFunctions == 'boolean' ? options.serializeFunctions : false;
this.ignoreUndefined = typeof options.ignoreUndefined == 'boolean' ? options.ignoreUndefined : false;
this.checkKeys = typeof options.checkKeys == 'boolean' ? options.checkKeys : true;
this.continueOnError = typeof options.continueOnError == 'boolean' ? options.continueOnError : false;
// Set flags
this.flags = this.continueOnError ? 1 : 0;
}
// To Binary
Insert.prototype.toBin = function() {
// Contains all the buffers to be written
var buffers = [];
// Header buffer
var header = new Buffer(
4 * 4 // Header
+ 4 // Flags
+ Buffer.byteLength(this.ns) + 1 // namespace
);
// Add header to buffers
buffers.push(header);
// Total length of the message
var totalLength = header.length;
// Serialize all the documents
for(var i = 0; i < this.documents.length; i++) {
var buffer = this.bson.serialize(this.documents[i]
, this.checkKeys
, true
, this.serializeFunctions
, 0, this.ignoreUndefined);
// Document is larger than maxBsonObjectSize, terminate serialization
if(buffer.length > this.ismaster.maxBsonObjectSize) {
throw new MongoError("Document exceeds maximum allowed bson size of " + this.ismaster.maxBsonObjectSize + " bytes");
}
// Add to total length of wire protocol message
totalLength = totalLength + buffer.length;
// Add to buffer
buffers.push(buffer);
}
// Command is larger than maxMessageSizeBytes terminate serialization
if(totalLength > this.ismaster.maxMessageSizeBytes) {
throw new MongoError("Command exceeds maximum message size of " + this.ismaster.maxMessageSizeBytes + " bytes");
}
// Add all the metadata
var index = 0;
// Write header length
header[index + 3] = (totalLength >> 24) & 0xff;
header[index + 2] = (totalLength >> 16) & 0xff;
header[index + 1] = (totalLength >> 8) & 0xff;
header[index] = (totalLength) & 0xff;
index = index + 4;
// Write header requestId
header[index + 3] = (this.requestId >> 24) & 0xff;
header[index + 2] = (this.requestId >> 16) & 0xff;
header[index + 1] = (this.requestId >> 8) & 0xff;
header[index] = (this.requestId) & 0xff;
index = index + 4;
// No flags
header[index + 3] = (0 >> 24) & 0xff;
header[index + 2] = (0 >> 16) & 0xff;
header[index + 1] = (0 >> 8) & 0xff;
header[index] = (0) & 0xff;
index = index + 4;
// Operation
header[index + 3] = (OP_INSERT >> 24) & 0xff;
header[index + 2] = (OP_INSERT >> 16) & 0xff;
header[index + 1] = (OP_INSERT >> 8) & 0xff;
header[index] = (OP_INSERT) & 0xff;
index = index + 4;
// Flags
header[index + 3] = (this.flags >> 24) & 0xff;
header[index + 2] = (this.flags >> 16) & 0xff;
header[index + 1] = (this.flags >> 8) & 0xff;
header[index] = (this.flags) & 0xff;
index = index + 4;
// Write collection name
index = index + header.write(this.ns, index, 'utf8') + 1;
header[index - 1] = 0;
// Return the buffers
return buffers;
}
var Update = function(requestId, ismaster, bson, ns, update, options) {
// Basic options needed to be passed in
if(ns == null) throw new MongoError("ns must be specified for query");
// Ensure empty options
options = options || {};
// Set internal
this.requestId = requestId;
this.bson = bson;
this.ns = ns;
this.ismaster = ismaster;
// Unpack options
this.serializeFunctions = typeof options.serializeFunctions == 'boolean' ? options.serializeFunctions : false;
this.ignoreUndefined = typeof options.ignoreUndefined == 'boolean' ? options.ignoreUndefined : false;
this.checkKeys = typeof options.checkKeys == 'boolean' ? options.checkKeys : false;
// Unpack the update document
this.upsert = typeof update[0].upsert == 'boolean' ? update[0].upsert : false;
this.multi = typeof update[0].multi == 'boolean' ? update[0].multi : false;
this.q = update[0].q;
this.u = update[0].u;
// Create flag value
this.flags = this.upsert ? 1 : 0;
this.flags = this.multi ? this.flags | 2 : this.flags;
}
// To Binary
Update.prototype.toBin = function() {
// Contains all the buffers to be written
var buffers = [];
// Header buffer
var header = new Buffer(
4 * 4 // Header
+ 4 // ZERO
+ Buffer.byteLength(this.ns) + 1 // namespace
+ 4 // Flags
);
// Add header to buffers
buffers.push(header);
// Total length of the message
var totalLength = header.length;
// Serialize the selector
var selector = this.bson.serialize(this.q
, this.checkKeys
, true
, this.serializeFunctions
, 0, this.ignoreUndefined);
buffers.push(selector);
totalLength = totalLength + selector.length;
// Serialize the update
var update = this.bson.serialize(this.u
, this.checkKeys
, true
, this.serializeFunctions
, 0, this.ignoreUndefined);
buffers.push(update);
totalLength = totalLength + update.length;
// Index in header buffer
var index = 0;
// Write header length
header[index + 3] = (totalLength >> 24) & 0xff;
header[index + 2] = (totalLength >> 16) & 0xff;
header[index + 1] = (totalLength >> 8) & 0xff;
header[index] = (totalLength) & 0xff;
index = index + 4;
// Write header requestId
header[index + 3] = (this.requestId >> 24) & 0xff;
header[index + 2] = (this.requestId >> 16) & 0xff;
header[index + 1] = (this.requestId >> 8) & 0xff;
header[index] = (this.requestId) & 0xff;
index = index + 4;
// No flags
header[index + 3] = (0 >> 24) & 0xff;
header[index + 2] = (0 >> 16) & 0xff;
header[index + 1] = (0 >> 8) & 0xff;
header[index] = (0) & 0xff;
index = index + 4;
// Operation
header[index + 3] = (OP_UPDATE >> 24) & 0xff;
header[index + 2] = (OP_UPDATE >> 16) & 0xff;
header[index + 1] = (OP_UPDATE >> 8) & 0xff;
header[index] = (OP_UPDATE) & 0xff;
index = index + 4;
// Write ZERO
header[index + 3] = (0 >> 24) & 0xff;
header[index + 2] = (0 >> 16) & 0xff;
header[index + 1] = (0 >> 8) & 0xff;
header[index] = (0) & 0xff;
index = index + 4;
// Write collection name
index = index + header.write(this.ns, index, 'utf8') + 1;
header[index - 1] = 0;
// Flags
header[index + 3] = (this.flags >> 24) & 0xff;
header[index + 2] = (this.flags >> 16) & 0xff;
header[index + 1] = (this.flags >> 8) & 0xff;
header[index] = (this.flags) & 0xff;
index = index + 4;
// Return the buffers
return buffers;
}
var Remove = function(requestId, ismaster, bson, ns, remove, options) {
// Basic options needed to be passed in
if(ns == null) throw new MongoError("ns must be specified for query");
// Ensure empty options
options = options || {};
// Set internal
this.requestId = requestId;
this.bson = bson;
this.ns = ns;
this.ismaster = ismaster;
// Unpack options
this.serializeFunctions = typeof options.serializeFunctions == 'boolean' ? options.serializeFunctions : false;
this.ignoreUndefined = typeof options.ignoreUndefined == 'boolean' ? options.ignoreUndefined : false;
this.checkKeys = typeof options.checkKeys == 'boolean' ? options.checkKeys : false;
// Unpack the update document
this.limit = typeof remove[0].limit == 'number' ? remove[0].limit : 1;
this.q = remove[0].q;
// Create flag value
this.flags = this.limit == 1 ? 1 : 0;
}
// To Binary
Remove.prototype.toBin = function() {
// Contains all the buffers to be written
var buffers = [];
// Header buffer
var header = new Buffer(
4 * 4 // Header
+ 4 // ZERO
+ Buffer.byteLength(this.ns) + 1 // namespace
+ 4 // Flags
);
// Add header to buffers
buffers.push(header);
// Total length of the message
var totalLength = header.length;
// Serialize the selector
var selector = this.bson.serialize(this.q
, this.checkKeys
, true
, this.serializeFunctions
, 0, this.ignoreUndefined);
buffers.push(selector);
totalLength = totalLength + selector.length;
// Index in header buffer
var index = 0;
// Write header length
header[index + 3] = (totalLength >> 24) & 0xff;
header[index + 2] = (totalLength >> 16) & 0xff;
header[index + 1] = (totalLength >> 8) & 0xff;
header[index] = (totalLength) & 0xff;
index = index + 4;
// Write header requestId
header[index + 3] = (this.requestId >> 24) & 0xff;
header[index + 2] = (this.requestId >> 16) & 0xff;
header[index + 1] = (this.requestId >> 8) & 0xff;
header[index] = (this.requestId) & 0xff;
index = index + 4;
// No flags
header[index + 3] = (0 >> 24) & 0xff;
header[index + 2] = (0 >> 16) & 0xff;
header[index + 1] = (0 >> 8) & 0xff;
header[index] = (0) & 0xff;
index = index + 4;
// Operation
header[index + 3] = (OP_DELETE >> 24) & 0xff;
header[index + 2] = (OP_DELETE >> 16) & 0xff;
header[index + 1] = (OP_DELETE >> 8) & 0xff;
header[index] = (OP_DELETE) & 0xff;
index = index + 4;
// Write ZERO
header[index + 3] = (0 >> 24) & 0xff;
header[index + 2] = (0 >> 16) & 0xff;
header[index + 1] = (0 >> 8) & 0xff;
header[index] = (0) & 0xff;
index = index + 4;
// Write collection name
index = index + header.write(this.ns, index, 'utf8') + 1;
header[index - 1] = 0;
// Write ZERO
header[index + 3] = (this.flags >> 24) & 0xff;
header[index + 2] = (this.flags >> 16) & 0xff;
header[index + 1] = (this.flags >> 8) & 0xff;
header[index] = (this.flags) & 0xff;
index = index + 4;
// Return the buffers
return buffers;
}
module.exports = {
Insert: Insert
, Update: Update
, Remove: Remove
}