1
0
mirror of https://github.com/mgerb/mywebsite synced 2026-01-13 03:02:49 +00:00

updated bunch of file paths and changed the way posts are loaded

This commit is contained in:
2016-01-05 12:28:04 -06:00
parent 719ae331ae
commit c96a84d0ff
13249 changed files with 317868 additions and 2101398 deletions

View File

@@ -0,0 +1,587 @@
"use strict";
var Insert = require('./commands').Insert
, Update = require('./commands').Update
, Remove = require('./commands').Remove
, Query = require('../connection/commands').Query
, copy = require('../connection/utils').copy
, KillCursor = require('../connection/commands').KillCursor
, GetMore = require('../connection/commands').GetMore
, Query = require('../connection/commands').Query
, ReadPreference = require('../topologies/read_preference')
, f = require('util').format
, CommandResult = require('../topologies/command_result')
, MongoError = require('../error')
, Long = require('bson').Long;
// Write concern fields
var writeConcernFields = ['w', 'wtimeout', 'j', 'fsync'];
var WireProtocol = function() {}
//
// Needs to support legacy mass insert as well as ordered/unordered legacy
// emulation
//
WireProtocol.prototype.insert = function(topology, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
options = options || {};
// Default is ordered execution
var ordered = typeof options.ordered == 'boolean' ? options.ordered : true;
var legacy = typeof options.legacy == 'boolean' ? options.legacy : false;
ops = Array.isArray(ops) ? ops :[ops];
// If we have more than a 1000 ops fails
if(ops.length > 1000) return callback(new MongoError("exceeded maximum write batch size of 1000"));
// Write concern
var writeConcern = options.writeConcern || {w:1};
// We are unordered
if(!ordered || writeConcern.w == 0) {
return executeUnordered('insert', Insert, ismaster, ns, bson, pool, callbacks, ops, options, callback);
}
return executeOrdered('insert', Insert, ismaster, ns, bson, pool, callbacks, ops, options, callback);
}
WireProtocol.prototype.update = function(topology, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
options = options || {};
// Default is ordered execution
var ordered = typeof options.ordered == 'boolean' ? options.ordered : true;
ops = Array.isArray(ops) ? ops :[ops];
// Write concern
var writeConcern = options.writeConcern || {w:1};
// We are unordered
if(!ordered || writeConcern.w == 0) {
return executeUnordered('update', Update, ismaster, ns, bson, pool, callbacks, ops, options, callback);
}
return executeOrdered('update', Update, ismaster, ns, bson, pool, callbacks, ops, options, callback);
}
WireProtocol.prototype.remove = function(topology, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
options = options || {};
// Default is ordered execution
var ordered = typeof options.ordered == 'boolean' ? options.ordered : true;
ops = Array.isArray(ops) ? ops :[ops];
// Write concern
var writeConcern = options.writeConcern || {w:1};
// We are unordered
if(!ordered || writeConcern.w == 0) {
return executeUnordered('remove', Remove, ismaster, ns, bson, pool, callbacks, ops, options, callback);
}
return executeOrdered('remove', Remove, ismaster, ns, bson, pool, callbacks, ops, options, callback);
}
WireProtocol.prototype.killCursor = function(bson, ns, cursorId, connection, callbacks, callback) {
// Create a kill cursor command
var killCursor = new KillCursor(bson, [cursorId]);
// Execute the kill cursor command
if(connection && connection.isConnected()) connection.write(killCursor.toBin());
// Set cursor to 0
cursorId = Long.ZERO;
// Return to caller
if(callback) callback(null, null);
}
WireProtocol.prototype.getMore = function(bson, ns, cursorState, batchSize, raw, connection, callbacks, options, callback) {
// Create getMore command
var getMore = new GetMore(bson, ns, cursorState.cursorId, {numberToReturn: batchSize});
// Query callback
var queryCallback = function(err, r) {
if(err) return callback(err);
// If we have a timed out query or a cursor that was killed
if((r.responseFlags & (1 << 0)) != 0) {
return callback(new MongoError("cursor killed or timed out"), null);
}
// Ensure we have a Long valie cursor id
var cursorId = typeof r.cursorId == 'number'
? Long.fromNumber(r.cursorId)
: r.cursorId;
// Set all the values
cursorState.documents = r.documents;
cursorState.cursorId = cursorId;
// Return
callback(null);
}
// If we have a raw query decorate the function
if(raw) {
queryCallback.raw = raw;
}
// Register a callback
callbacks.register(getMore.requestId, queryCallback);
// Write out the getMore command
connection.write(getMore.toBin());
}
WireProtocol.prototype.command = function(bson, ns, cmd, cursorState, topology, options) {
// Establish type of command
if(cmd.find) {
return setupClassicFind(bson, ns, cmd, cursorState, topology, options)
} else if(cursorState.cursorId != null) {
} else if(cmd) {
return setupCommand(bson, ns, cmd, cursorState, topology, options);
} else {
throw new MongoError(f("command %s does not return a cursor", JSON.stringify(cmd)));
}
}
//
// Execute a find command
var setupClassicFind = function(bson, ns, cmd, cursorState, topology, options) {
var readPreference = options.readPreference || new ReadPreference('primary');
if(typeof readPreference == 'string') readPreference = new ReadPreference(readPreference);
if(!(readPreference instanceof ReadPreference)) throw new MongoError('readPreference must be a ReadPreference instance');
// Does the cmd have a readPreference
if(cmd.readPreference) {
readPreference = cmd.readPreference;
}
// Ensure we have at least some options
options = options || {};
// Set the optional batchSize
cursorState.batchSize = cmd.batchSize || cursorState.batchSize;
var numberToReturn = 0;
// Unpack the limit and batchSize values
if(cursorState.limit == 0) {
numberToReturn = cursorState.batchSize;
} else if(cursorState.limit < 0 || cursorState.limit < cursorState.batchSize || (cursorState.limit > 0 && cursorState.batchSize == 0)) {
numberToReturn = cursorState.limit;
} else {
numberToReturn = cursorState.batchSize;
}
var numberToSkip = cursorState.skip || 0;
// Build actual find command
var findCmd = {};
// Using special modifier
var usesSpecialModifier = false;
// We have a Mongos topology, check if we need to add a readPreference
if(topology.type == 'mongos' && readPreference) {
findCmd['$readPreference'] = readPreference.toJSON();
usesSpecialModifier = true;
}
// Add special modifiers to the query
if(cmd.sort) findCmd['orderby'] = cmd.sort, usesSpecialModifier = true;
if(cmd.hint) findCmd['$hint'] = cmd.hint, usesSpecialModifier = true;
if(cmd.snapshot) findCmd['$snapshot'] = cmd.snapshot, usesSpecialModifier = true;
if(cmd.returnKey) findCmd['$returnKey'] = cmd.returnKey, usesSpecialModifier = true;
if(cmd.maxScan) findCmd['$maxScan'] = cmd.maxScan, usesSpecialModifier = true;
if(cmd.min) findCmd['$min'] = cmd.min, usesSpecialModifier = true;
if(cmd.max) findCmd['$max'] = cmd.max, usesSpecialModifier = true;
if(cmd.showDiskLoc) findCmd['$showDiskLoc'] = cmd.showDiskLoc, usesSpecialModifier = true;
if(cmd.comment) findCmd['$comment'] = cmd.comment, usesSpecialModifier = true;
if(cmd.maxTimeMS) findCmd['$maxTimeMS'] = cmd.maxTimeMS, usesSpecialModifier = true;
if(cmd.explain) {
// nToReturn must be 0 (match all) or negative (match N and close cursor)
// nToReturn > 0 will give explain results equivalent to limit(0)
numberToReturn = -Math.abs(cmd.limit || 0);
usesSpecialModifier = true;
findCmd['$explain'] = true;
}
// If we have a special modifier
if(usesSpecialModifier) {
findCmd['$query'] = cmd.query;
} else {
findCmd = cmd.query;
}
// Throw on majority readConcern passed in
if(cmd.readConcern && cmd.readConcern.level != 'local') {
throw new MongoError(f('server find command does not support a readConcern level of %s', cmd.readConcern.level));
}
// Remove readConcern, ensure no failing commands
if(cmd.readConcern) {
cmd = copy(cmd);
delete cmd['readConcern'];
}
// Set up the serialize and ignoreUndefined fields
var serializeFunctions = typeof options.serializeFunctions == 'boolean'
? options.serializeFunctions : false;
var ignoreUndefined = typeof options.ignoreUndefined == 'boolean'
? options.ignoreUndefined : false;
// Build Query object
var query = new Query(bson, ns, findCmd, {
numberToSkip: numberToSkip, numberToReturn: numberToReturn
, checkKeys: false, returnFieldSelector: cmd.fields
, serializeFunctions: serializeFunctions, ignoreUndefined: ignoreUndefined
});
// Set query flags
query.slaveOk = readPreference.slaveOk();
// Set up the option bits for wire protocol
if(typeof cmd.tailable == 'boolean') query.tailable = cmd.tailable;
if(typeof cmd.oplogReplay == 'boolean') query.oplogReplay = cmd.oplogReplay;
if(typeof cmd.noCursorTimeout == 'boolean') query.noCursorTimeout = cmd.noCursorTimeout;
if(typeof cmd.awaitData == 'boolean') query.awaitData = cmd.awaitData;
if(typeof cmd.exhaust == 'boolean') query.exhaust = cmd.exhaust;
if(typeof cmd.partial == 'boolean') query.partial = cmd.partial;
// Return the query
return query;
}
//
// Set up a command cursor
var setupCommand = function(bson, ns, cmd, cursorState, topology, options) {
var readPreference = options.readPreference || new ReadPreference('primary');
if(typeof readPreference == 'string') readPreference = new ReadPreference(readPreference);
if(!(readPreference instanceof ReadPreference)) throw new MongoError('readPreference must be a ReadPreference instance');
// Does the cmd have a readPreference
if(cmd.readPreference) {
readPreference = cmd.readPreference;
}
// Set empty options object
options = options || {}
// Final query
var finalCmd = {};
for(var name in cmd) {
finalCmd[name] = cmd[name];
}
// Build command namespace
var parts = ns.split(/\./);
// Throw on majority readConcern passed in
if(cmd.readConcern && cmd.readConcern.level != 'local') {
throw new MongoError(f('server %s command does not support a readConcern level of %s', JSON.stringify(cmd), cmd.readConcern.level));
}
// Remove readConcern, ensure no failing commands
if(cmd.readConcern) delete cmd['readConcern'];
// Serialize functions
var serializeFunctions = typeof options.serializeFunctions == 'boolean'
? options.serializeFunctions : false;
// Set up the serialize and ignoreUndefined fields
var ignoreUndefined = typeof options.ignoreUndefined == 'boolean'
? options.ignoreUndefined : false;
// We have a Mongos topology, check if we need to add a readPreference
if(topology.type == 'mongos'
&& readPreference
&& readPreference.preference != 'primary') {
finalCmd = {
'$query': finalCmd,
'$readPreference': readPreference.toJSON()
};
}
// Build Query object
var query = new Query(bson, f('%s.$cmd', parts.shift()), finalCmd, {
numberToSkip: 0, numberToReturn: -1
, checkKeys: false, serializeFunctions: serializeFunctions
, ignoreUndefined: ignoreUndefined
});
// Set query flags
query.slaveOk = readPreference.slaveOk();
// Return the query
return query;
}
/**
* @ignore
*/
var bindToCurrentDomain = function(callback) {
var domain = process.domain;
if(domain == null || callback == null) {
return callback;
} else {
return domain.bind(callback);
}
}
var hasWriteConcern = function(writeConcern) {
if(writeConcern.w
|| writeConcern.wtimeout
|| writeConcern.j == true
|| writeConcern.fsync == true
|| Object.keys(writeConcern).length == 0) {
return true;
}
return false;
}
var cloneWriteConcern = function(writeConcern) {
var wc = {};
if(writeConcern.w != null) wc.w = writeConcern.w;
if(writeConcern.wtimeout != null) wc.wtimeout = writeConcern.wtimeout;
if(writeConcern.j != null) wc.j = writeConcern.j;
if(writeConcern.fsync != null) wc.fsync = writeConcern.fsync;
return wc;
}
//
// Aggregate up all the results
//
var aggregateWriteOperationResults = function(opType, ops, results, connection) {
var finalResult = { ok: 1, n: 0 }
// Map all the results coming back
for(var i = 0; i < results.length; i++) {
var result = results[i];
var op = ops[i];
if((result.upserted || (result.updatedExisting == false)) && finalResult.upserted == null) {
finalResult.upserted = [];
}
// Push the upserted document to the list of upserted values
if(result.upserted) {
finalResult.upserted.push({index: i, _id: result.upserted});
}
// We have an upsert where we passed in a _id
if(result.updatedExisting == false && result.n == 1 && result.upserted == null) {
finalResult.upserted.push({index: i, _id: op.q._id});
}
// We have an insert command
if(result.ok == 1 && opType == 'insert' && result.err == null) {
finalResult.n = finalResult.n + 1;
}
// We have a command error
if(result != null && result.ok == 0 || result.err || result.errmsg) {
if(result.ok == 0) finalResult.ok = 0;
finalResult.code = result.code;
finalResult.errmsg = result.errmsg || result.err || result.errMsg;
// Check if we have a write error
if(result.code == 11000
|| result.code == 11001
|| result.code == 12582
|| result.code == 16544
|| result.code == 16538
|| result.code == 16542
|| result.code == 14
|| result.code == 13511) {
if(finalResult.writeErrors == null) finalResult.writeErrors = [];
finalResult.writeErrors.push({
index: i
, code: result.code
, errmsg: result.errmsg || result.err || result.errMsg
});
} else {
finalResult.writeConcernError = {
code: result.code
, errmsg: result.errmsg || result.err || result.errMsg
}
}
} else if(typeof result.n == 'number') {
finalResult.n += result.n;
} else {
finalResult.n += 1;
}
// Result as expected
if(result != null && result.lastOp) finalResult.lastOp = result.lastOp;
}
// Return finalResult aggregated results
return new CommandResult(finalResult, connection);
}
//
// Execute all inserts in an ordered manner
//
var executeOrdered = function(opType ,command, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
var _ops = ops.slice(0);
// Bind to current domain
callback = bindToCurrentDomain(callback);
// Collect all the getLastErrors
var getLastErrors = [];
// Execute an operation
var executeOp = function(list, _callback) {
// Get a pool connection
var connection = pool.get();
// No more items in the list
if(list.length == 0) {
return process.nextTick(function() {
_callback(null, aggregateWriteOperationResults(opType, ops, getLastErrors, connection));
});
}
// Get the first operation
var doc = list.shift();
// Create an insert command
var op = new command(Query.getRequestId(), ismaster, bson, ns, [doc], options);
// Write concern
var optionWriteConcern = options.writeConcern || {w:1};
// Final write concern
var writeConcern = cloneWriteConcern(optionWriteConcern);
// Get the db name
var db = ns.split('.').shift();
// Error out if no connection available
if(connection == null)
return _callback(new MongoError("no connection available"));
try {
// Execute the insert
connection.write(op.toBin());
// If write concern 0 don't fire getLastError
if(hasWriteConcern(writeConcern)) {
var getLastErrorCmd = {getlasterror: 1};
// Merge all the fields
for(var i = 0; i < writeConcernFields.length; i++) {
if(writeConcern[writeConcernFields[i]] != null)
getLastErrorCmd[writeConcernFields[i]] = writeConcern[writeConcernFields[i]];
}
// Create a getLastError command
var getLastErrorOp = new Query(bson, f("%s.$cmd", db), getLastErrorCmd, {numberToReturn: -1});
// Write the lastError message
connection.write(getLastErrorOp.toBin());
// Register the callback
callbacks.register(getLastErrorOp.requestId, function(err, result) {
if(err) return callback(err);
// Get the document
var doc = result.documents[0];
// Save the getLastError document
getLastErrors.push(doc);
// If we have an error terminate
if(doc.ok == 0 || doc.err || doc.errmsg) return callback(null, aggregateWriteOperationResults(opType, ops, getLastErrors, connection));
// Execute the next op in the list
executeOp(list, callback);
});
}
} catch(err) {
if(typeof err == 'string') err = new MongoError(err);
// We have a serialization error, rewrite as a write error to have same behavior as modern
// write commands
getLastErrors.push({ ok: 1, errmsg: err.message, code: 14 });
// Return due to an error
process.nextTick(function() {
callback(null, aggregateWriteOperationResults(opType, ops, getLastErrors, connection));
});
}
}
// Execute the operations
executeOp(_ops, callback);
}
var executeUnordered = function(opType, command, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
// Bind to current domain
callback = bindToCurrentDomain(callback);
// Total operations to write
var totalOps = ops.length;
// Collect all the getLastErrors
var getLastErrors = [];
// Write concern
var optionWriteConcern = options.writeConcern || {w:1};
// Final write concern
var writeConcern = cloneWriteConcern(optionWriteConcern);
// Driver level error
var error;
// Execute all the operations
for(var i = 0; i < ops.length; i++) {
// Create an insert command
var op = new command(Query.getRequestId(), ismaster, bson, ns, [ops[i]], options);
// Get db name
var db = ns.split('.').shift();
// Get a pool connection
var connection = pool.get();
// Error out if no connection available
if(connection == null) {
return process.nextTick(function() {
_callback(new MongoError("no connection available"));
});
}
try {
// Execute the insert
connection.write(op.toBin());
// If write concern 0 don't fire getLastError
if(hasWriteConcern(writeConcern)) {
var getLastErrorCmd = {getlasterror: 1};
// Merge all the fields
for(var j = 0; j < writeConcernFields.length; j++) {
if(writeConcern[writeConcernFields[j]] != null)
getLastErrorCmd[writeConcernFields[j]] = writeConcern[writeConcernFields[j]];
}
// Create a getLastError command
var getLastErrorOp = new Query(bson, f("%s.$cmd", db), getLastErrorCmd, {numberToReturn: -1});
// Write the lastError message
connection.write(getLastErrorOp.toBin());
// Give the result from getLastError the right index
var callbackOp = function(_index) {
return function(err, result) {
if(err) error = err;
// Update the number of operations executed
totalOps = totalOps - 1;
// Save the getLastError document
if(!err) getLastErrors[_index] = result.documents[0];
// Check if we are done
if(totalOps == 0) {
process.nextTick(function() {
if(error) return callback(error);
callback(null, aggregateWriteOperationResults(opType, ops, getLastErrors, connection));
});
}
}
}
// Register the callback
callbacks.register(getLastErrorOp.requestId, callbackOp(i));
}
} catch(err) {
if(typeof err == 'string') err = new MongoError(err);
// Update the number of operations executed
totalOps = totalOps - 1;
// We have a serialization error, rewrite as a write error to have same behavior as modern
// write commands
getLastErrors[i] = { ok: 1, errmsg: err.message, code: 14 };
// Check if we are done
if(totalOps == 0) {
callback(null, aggregateWriteOperationResults(opType, ops, getLastErrors, connection));
}
}
}
// Empty w:0 return
if(writeConcern
&& writeConcern.w == 0 && callback) {
callback(null, null);
}
}
module.exports = WireProtocol;

View File

@@ -0,0 +1,329 @@
"use strict";
var Insert = require('./commands').Insert
, Update = require('./commands').Update
, Remove = require('./commands').Remove
, Query = require('../connection/commands').Query
, copy = require('../connection/utils').copy
, KillCursor = require('../connection/commands').KillCursor
, GetMore = require('../connection/commands').GetMore
, Query = require('../connection/commands').Query
, ReadPreference = require('../topologies/read_preference')
, f = require('util').format
, CommandResult = require('../topologies/command_result')
, MongoError = require('../error')
, Long = require('bson').Long;
var WireProtocol = function() {}
//
// Execute a write operation
var executeWrite = function(topology, type, opsField, ns, ops, options, callback) {
if(ops.length == 0) throw new MongoError("insert must contain at least one document");
if(typeof options == 'function') {
callback = options;
options = {};
}
// Split the ns up to get db and collection
var p = ns.split(".");
var d = p.shift();
// Options
var ordered = typeof options.ordered == 'boolean' ? options.ordered : true;
var writeConcern = options.writeConcern || {};
// return skeleton
var writeCommand = {};
writeCommand[type] = p.join('.');
writeCommand[opsField] = ops;
writeCommand.ordered = ordered;
// Did we specify a write concern
if(writeConcern && Object.keys(writeConcern).length > 0) {
writeCommand.writeConcern = writeConcern;
}
// Options object
var opts = {};
if(type == 'insert') opts.checkKeys = true;
// Ensure we support serialization of functions
if(options.serializeFunctions) opts.serializeFunctions = options.serializeFunctions;
if(options.ignoreUndefined) opts.ignoreUndefined = options.ignoreUndefined;
// Execute command
topology.command(f("%s.$cmd", d), writeCommand, opts, callback);
}
//
// Needs to support legacy mass insert as well as ordered/unordered legacy
// emulation
//
WireProtocol.prototype.insert = function(topology, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
executeWrite(topology, 'insert', 'documents', ns, ops, options, callback);
}
WireProtocol.prototype.update = function(topology, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
executeWrite(topology, 'update', 'updates', ns, ops, options, callback);
}
WireProtocol.prototype.remove = function(topology, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
executeWrite(topology, 'delete', 'deletes', ns, ops, options, callback);
}
WireProtocol.prototype.killCursor = function(bson, ns, cursorId, connection, callbacks, callback) {
// Create a kill cursor command
var killCursor = new KillCursor(bson, [cursorId]);
// Execute the kill cursor command
if(connection && connection.isConnected()) connection.write(killCursor.toBin());
// Set cursor to 0
cursorId = Long.ZERO;
// Return to caller
if(callback) callback(null, null);
}
WireProtocol.prototype.getMore = function(bson, ns, cursorState, batchSize, raw, connection, callbacks, options, callback) {
// Create getMore command
var getMore = new GetMore(bson, ns, cursorState.cursorId, {numberToReturn: batchSize});
// Query callback
var queryCallback = function(err, r) {
if(err) return callback(err);
// If we have a timed out query or a cursor that was killed
if((r.responseFlags & (1 << 0)) != 0) {
return callback(new MongoError("cursor killed or timed out"), null);
}
// Ensure we have a Long valie cursor id
var cursorId = typeof r.cursorId == 'number'
? Long.fromNumber(r.cursorId)
: r.cursorId;
// Set all the values
cursorState.documents = r.documents;
cursorState.cursorId = cursorId;
// Return
callback(null);
}
// If we have a raw query decorate the function
if(raw) {
queryCallback.raw = raw;
}
// Register a callback
callbacks.register(getMore.requestId, queryCallback);
// Write out the getMore command
connection.write(getMore.toBin());
}
WireProtocol.prototype.command = function(bson, ns, cmd, cursorState, topology, options) {
// Establish type of command
if(cmd.find) {
return setupClassicFind(bson, ns, cmd, cursorState, topology, options)
} else if(cursorState.cursorId != null) {
} else if(cmd) {
return setupCommand(bson, ns, cmd, cursorState, topology, options);
} else {
throw new MongoError(f("command %s does not return a cursor", JSON.stringify(cmd)));
}
}
//
// Execute a find command
var setupClassicFind = function(bson, ns, cmd, cursorState, topology, options) {
var readPreference = options.readPreference || new ReadPreference('primary');
if(typeof readPreference == 'string') readPreference = new ReadPreference(readPreference);
if(!(readPreference instanceof ReadPreference)) throw new MongoError('readPreference must be a ReadPreference instance');
// Does the cmd have a readPreference
if(cmd.readPreference) {
readPreference = cmd.readPreference;
}
// Ensure we have at least some options
options = options || {};
// Set the optional batchSize
cursorState.batchSize = cmd.batchSize || cursorState.batchSize;
var numberToReturn = 0;
// Unpack the limit and batchSize values
if(cursorState.limit == 0) {
numberToReturn = cursorState.batchSize;
} else if(cursorState.limit < 0 || cursorState.limit < cursorState.batchSize || (cursorState.limit > 0 && cursorState.batchSize == 0)) {
numberToReturn = cursorState.limit;
} else {
numberToReturn = cursorState.batchSize;
}
var numberToSkip = cursorState.skip || 0;
// Build actual find command
var findCmd = {};
// Using special modifier
var usesSpecialModifier = false;
// We have a Mongos topology, check if we need to add a readPreference
if(topology.type == 'mongos' && readPreference) {
findCmd['$readPreference'] = readPreference.toJSON();
usesSpecialModifier = true;
}
// Add special modifiers to the query
if(cmd.sort) findCmd['orderby'] = cmd.sort, usesSpecialModifier = true;
if(cmd.hint) findCmd['$hint'] = cmd.hint, usesSpecialModifier = true;
if(cmd.snapshot) findCmd['$snapshot'] = cmd.snapshot, usesSpecialModifier = true;
if(cmd.returnKey) findCmd['$returnKey'] = cmd.returnKey, usesSpecialModifier = true;
if(cmd.maxScan) findCmd['$maxScan'] = cmd.maxScan, usesSpecialModifier = true;
if(cmd.min) findCmd['$min'] = cmd.min, usesSpecialModifier = true;
if(cmd.max) findCmd['$max'] = cmd.max, usesSpecialModifier = true;
if(cmd.showDiskLoc) findCmd['$showDiskLoc'] = cmd.showDiskLoc, usesSpecialModifier = true;
if(cmd.comment) findCmd['$comment'] = cmd.comment, usesSpecialModifier = true;
if(cmd.maxTimeMS) findCmd['$maxTimeMS'] = cmd.maxTimeMS, usesSpecialModifier = true;
if(cmd.explain) {
// nToReturn must be 0 (match all) or negative (match N and close cursor)
// nToReturn > 0 will give explain results equivalent to limit(0)
numberToReturn = -Math.abs(cmd.limit || 0);
usesSpecialModifier = true;
findCmd['$explain'] = true;
}
// If we have a special modifier
if(usesSpecialModifier) {
findCmd['$query'] = cmd.query;
} else {
findCmd = cmd.query;
}
// Throw on majority readConcern passed in
if(cmd.readConcern && cmd.readConcern.level != 'local') {
throw new MongoError(f('server find command does not support a readConcern level of %s', cmd.readConcern.level));
}
// Remove readConcern, ensure no failing commands
if(cmd.readConcern) {
cmd = copy(cmd);
delete cmd['readConcern'];
}
// Serialize functions
var serializeFunctions = typeof options.serializeFunctions == 'boolean'
? options.serializeFunctions : false;
var ignoreUndefined = typeof options.ignoreUndefined == 'boolean'
? options.ignoreUndefined : false;
// Build Query object
var query = new Query(bson, ns, findCmd, {
numberToSkip: numberToSkip, numberToReturn: numberToReturn
, checkKeys: false, returnFieldSelector: cmd.fields
, serializeFunctions: serializeFunctions
, ignoreUndefined: ignoreUndefined
});
// Set query flags
query.slaveOk = readPreference.slaveOk();
// Set up the option bits for wire protocol
if(typeof cmd.tailable == 'boolean') {
query.tailable = cmd.tailable;
}
if(typeof cmd.oplogReplay == 'boolean') {
query.oplogReplay = cmd.oplogReplay;
}
if(typeof cmd.noCursorTimeout == 'boolean') {
query.noCursorTimeout = cmd.noCursorTimeout;
}
if(typeof cmd.awaitData == 'boolean') {
query.awaitData = cmd.awaitData;
}
if(typeof cmd.exhaust == 'boolean') {
query.exhaust = cmd.exhaust;
}
if(typeof cmd.partial == 'boolean') {
query.partial = cmd.partial;
}
// Return the query
return query;
}
//
// Set up a command cursor
var setupCommand = function(bson, ns, cmd, cursorState, topology, options) {
var readPreference = options.readPreference || new ReadPreference('primary');
if(typeof readPreference == 'string') readPreference = new ReadPreference(readPreference);
if(!(readPreference instanceof ReadPreference)) throw new MongoError('readPreference must be a ReadPreference instance');
// Does the cmd have a readPreference
if(cmd.readPreference) {
readPreference = cmd.readPreference;
}
// Set empty options object
options = options || {}
// Final query
var finalCmd = {};
for(var name in cmd) {
finalCmd[name] = cmd[name];
}
// Build command namespace
var parts = ns.split(/\./);
// Serialize functions
var serializeFunctions = typeof options.serializeFunctions == 'boolean'
? options.serializeFunctions : false;
var ignoreUndefined = typeof options.ignoreUndefined == 'boolean'
? options.ignoreUndefined : false;
// Throw on majority readConcern passed in
if(cmd.readConcern && cmd.readConcern.level != 'local') {
throw new MongoError(f('server %s command does not support a readConcern level of %s', JSON.stringify(cmd), cmd.readConcern.level));
}
// Remove readConcern, ensure no failing commands
if(cmd.readConcern) delete cmd['readConcern'];
// We have a Mongos topology, check if we need to add a readPreference
if(topology.type == 'mongos'
&& readPreference
&& readPreference.preference != 'primary') {
finalCmd = {
'$query': finalCmd,
'$readPreference': readPreference.toJSON()
};
}
// Build Query object
var query = new Query(bson, f('%s.$cmd', parts.shift()), finalCmd, {
numberToSkip: 0, numberToReturn: -1
, checkKeys: false, serializeFunctions: serializeFunctions
, ignoreUndefined: ignoreUndefined
});
// Set query flags
query.slaveOk = readPreference.slaveOk();
// Return the query
return query;
}
/**
* @ignore
*/
var bindToCurrentDomain = function(callback) {
var domain = process.domain;
if(domain == null || callback == null) {
return callback;
} else {
return domain.bind(callback);
}
}
module.exports = WireProtocol;

View File

@@ -0,0 +1,523 @@
"use strict";
var Insert = require('./commands').Insert
, Update = require('./commands').Update
, Remove = require('./commands').Remove
, Query = require('../connection/commands').Query
, copy = require('../connection/utils').copy
, KillCursor = require('../connection/commands').KillCursor
, GetMore = require('../connection/commands').GetMore
, Query = require('../connection/commands').Query
, ReadPreference = require('../topologies/read_preference')
, f = require('util').format
, CommandResult = require('../topologies/command_result')
, MongoError = require('../error')
, Long = require('bson').Long;
var WireProtocol = function(legacyWireProtocol) {
this.legacyWireProtocol = legacyWireProtocol;
}
//
// Execute a write operation
var executeWrite = function(topology, type, opsField, ns, ops, options, callback) {
if(ops.length == 0) throw new MongoError("insert must contain at least one document");
if(typeof options == 'function') {
callback = options;
options = {};
}
// Split the ns up to get db and collection
var p = ns.split(".");
var d = p.shift();
// Options
var ordered = typeof options.ordered == 'boolean' ? options.ordered : true;
var writeConcern = options.writeConcern;
// return skeleton
var writeCommand = {};
writeCommand[type] = p.join('.');
writeCommand[opsField] = ops;
writeCommand.ordered = ordered;
// Did we specify a write concern
if(writeConcern && Object.keys(writeConcern).length > 0) {
writeCommand.writeConcern = writeConcern;
}
// Do we have bypassDocumentValidation set, then enable it on the write command
if(typeof options.bypassDocumentValidation == 'boolean') {
writeCommand.bypassDocumentValidation = options.bypassDocumentValidation;
}
// Options object
var opts = {};
if(type == 'insert') opts.checkKeys = true;
// Ensure we support serialization of functions
if(options.serializeFunctions) opts.serializeFunctions = options.serializeFunctions;
if(options.ignoreUndefined) opts.ignoreUndefined = options.ignoreUndefined;
// Execute command
topology.command(f("%s.$cmd", d), writeCommand, opts, callback);
}
//
// Needs to support legacy mass insert as well as ordered/unordered legacy
// emulation
//
WireProtocol.prototype.insert = function(topology, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
executeWrite(topology, 'insert', 'documents', ns, ops, options, callback);
}
WireProtocol.prototype.update = function(topology, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
executeWrite(topology, 'update', 'updates', ns, ops, options, callback);
}
WireProtocol.prototype.remove = function(topology, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
executeWrite(topology, 'delete', 'deletes', ns, ops, options, callback);
}
WireProtocol.prototype.killCursor = function(bson, ns, cursorId, connection, callbacks, callback) {
// Build command namespace
var parts = ns.split(/\./);
// Command namespace
var commandns = f('%s.$cmd', parts.shift());
// Create getMore command
var killcursorCmd = {
killCursors: parts.join('.'),
cursors: [cursorId]
}
// Build Query object
var query = new Query(bson, commandns, killcursorCmd, {
numberToSkip: 0, numberToReturn: -1
, checkKeys: false, returnFieldSelector: null
});
// Set query flags
query.slaveOk = true;
// Execute the kill cursor command
if(connection && connection.isConnected()) {
connection.write(query.toBin());
}
// Kill cursor callback
var killCursorCallback = function(err, r) {
if(err) {
if(typeof callback != 'function') return;
return callback(err);
}
// If we have a timed out query or a cursor that was killed
if((r.responseFlags & (1 << 0)) != 0) {
if(typeof callback != 'function') return;
return callback(new MongoError("cursor killed or timed out"), null);
}
if(!Array.isArray(r.documents) || r.documents.length == 0) {
if(typeof callback != 'function') return;
return callback(new MongoError(f('invalid getMore result returned for cursor id %s', cursorState.cursorId)));
}
// Return the result
if(typeof callback == 'function') {
callback(null, r.documents[0]);
}
}
// Register a callback
callbacks.register(query.requestId, killCursorCallback);
}
WireProtocol.prototype.getMore = function(bson, ns, cursorState, batchSize, raw, connection, callbacks, options, callback) {
var readPreference = options.readPreference || new ReadPreference('primary');
if(typeof readPreference == 'string') readPreference = new ReadPreference(readPreference);
if(!(readPreference instanceof ReadPreference)) throw new MongoError('readPreference must be a ReadPreference instance');
// Build command namespace
var parts = ns.split(/\./);
// Command namespace
var commandns = f('%s.$cmd', parts.shift());
// Check if we have an maxTimeMS set
var maxTimeMS = typeof cursorState.cmd.maxTimeMS == 'number' ? cursorState.cmd.maxTimeMS : 3000;
// Create getMore command
var getMoreCmd = {
getMore: cursorState.cursorId,
collection: parts.join('.'),
batchSize: Math.abs(batchSize)
}
if(cursorState.cmd.tailable
&& typeof cursorState.cmd.maxAwaitTimeMS == 'number') {
getMoreCmd.maxTimeMS = cursorState.cmd.maxAwaitTimeMS;
}
// Build Query object
var query = new Query(bson, commandns, getMoreCmd, {
numberToSkip: 0, numberToReturn: -1
, checkKeys: false, returnFieldSelector: null
});
// Set query flags
query.slaveOk = true;
// Query callback
var queryCallback = function(err, r) {
if(err) return callback(err);
// If we have a timed out query or a cursor that was killed
if((r.responseFlags & (1 << 0)) != 0) {
return callback(new MongoError("cursor killed or timed out"), null);
}
if(!Array.isArray(r.documents) || r.documents.length == 0)
return callback(new MongoError(f('invalid getMore result returned for cursor id %s', cursorState.cursorId)));
// We have an error detected
if(r.documents[0].ok == 0) {
return callback(MongoError.create(r.documents[0]));
}
// Raw, return all the extracted documents
if(raw) {
cursorState.documents = r.documents;
cursorState.cursorId = r.cursorId;
return callback(null, r.documents);
}
// Ensure we have a Long valie cursor id
var cursorId = typeof r.documents[0].cursor.id == 'number'
? Long.fromNumber(r.documents[0].cursor.id)
: r.documents[0].cursor.id;
// Set all the values
cursorState.documents = r.documents[0].cursor.nextBatch;
cursorState.cursorId = cursorId;
// Return the result
callback(null, r.documents[0]);
}
// If we have a raw query decorate the function
if(raw) {
queryCallback.raw = raw;
}
// Add the result field needed
queryCallback.documentsReturnedIn = 'nextBatch';
// Register a callback
callbacks.register(query.requestId, queryCallback);
// Write out the getMore command
connection.write(query.toBin());
}
WireProtocol.prototype.command = function(bson, ns, cmd, cursorState, topology, options) {
// Establish type of command
if(cmd.find) {
if(cmd.exhaust) {
return this.legacyWireProtocol.command(bson, ns, cmd, cursorState, topology, options);
}
// Create the find command
var query = executeFindCommand(bson, ns, cmd, cursorState, topology, options)
// Mark the cmd as virtual
cmd.virtual = false;
// Signal the documents are in the firstBatch value
query.documentsReturnedIn = 'firstBatch';
// Return the query
return query;
} else if(cursorState.cursorId != null) {
} else if(cmd) {
return setupCommand(bson, ns, cmd, cursorState, topology, options);
} else {
throw new MongoError(f("command %s does not return a cursor", JSON.stringify(cmd)));
}
}
// // Command
// {
// find: ns
// , query: <object>
// , limit: <n>
// , fields: <object>
// , skip: <n>
// , hint: <string>
// , explain: <boolean>
// , snapshot: <boolean>
// , batchSize: <n>
// , returnKey: <boolean>
// , maxScan: <n>
// , min: <n>
// , max: <n>
// , showDiskLoc: <boolean>
// , comment: <string>
// , maxTimeMS: <n>
// , raw: <boolean>
// , readPreference: <ReadPreference>
// , tailable: <boolean>
// , oplogReplay: <boolean>
// , noCursorTimeout: <boolean>
// , awaitdata: <boolean>
// , exhaust: <boolean>
// , partial: <boolean>
// }
// FIND/GETMORE SPEC
// {
// “find”: <string>,
// “filter”: { ... },
// “sort”: { ... },
// “projection”: { ... },
// “hint”: { ... },
// “skip”: <int>,
// “limit”: <int>,
// “batchSize”: <int>,
// “singleBatch”: <bool>,
// “comment”: <string>,
// “maxScan”: <int>,
// “maxTimeMS”: <int>,
// “max”: { ... },
// “min”: { ... },
// “returnKey”: <bool>,
// “showRecordId”: <bool>,
// “snapshot”: <bool>,
// “tailable”: <bool>,
// “oplogReplay”: <bool>,
// “noCursorTimeout”: <bool>,
// “awaitData”: <bool>,
// “partial”: <bool>,
// “$readPreference”: { ... }
// }
//
// Execute a find command
var executeFindCommand = function(bson, ns, cmd, cursorState, topology, options) {
var readPreference = options.readPreference || new ReadPreference('primary');
if(typeof readPreference == 'string') readPreference = new ReadPreference(readPreference);
if(!(readPreference instanceof ReadPreference)) throw new MongoError('readPreference must be a ReadPreference instance');
// Does the cmd have a readPreference
if(cmd.readPreference) {
readPreference = cmd.readPreference;
}
// Ensure we have at least some options
options = options || {};
// Set the optional batchSize
cursorState.batchSize = cmd.batchSize || cursorState.batchSize;
// Build command namespace
var parts = ns.split(/\./);
// Command namespace
var commandns = f('%s.$cmd', parts.shift());
// Build actual find command
var findCmd = {
find: parts.join('.')
};
// I we provided a filter
if(cmd.query) findCmd.filter = cmd.query;
// Sort value
var sortValue = cmd.sort;
// Handle issue of sort being an Array
if(Array.isArray(sortValue)) {
var sortObject = {};
if(sortValue.length > 0 && !Array.isArray(sortValue[0])) {
var sortDirection = sortValue[1];
// Translate the sort order text
if(sortDirection == 'asc') {
sortDirection = 1;
} else if(sortDirection == 'desc') {
sortDirection = -1;
}
// Set the sort order
sortObject[sortValue[0]] = sortDirection;
} else {
for(var i = 0; i < sortValue.length; i++) {
var sortDirection = sortValue[i][1];
// Translate the sort order text
if(sortDirection == 'asc') {
sortDirection = 1;
} else if(sortDirection == 'desc') {
sortDirection = -1;
}
// Set the sort order
sortObject[sortValue[i][0]] = sortDirection;
}
}
sortValue = sortObject;
};
// Add sort to command
if(cmd.sort) findCmd.sort = sortValue;
// Add a projection to the command
if(cmd.fields) findCmd.projection = cmd.fields;
// Add a hint to the command
if(cmd.hint) findCmd.hint = cmd.hint;
// Add a skip
if(cmd.skip) findCmd.skip = cmd.skip;
// Add a limit
if(cmd.limit) findCmd.limit = cmd.limit;
// Add a batchSize
if(typeof cmd.batchSize == 'number') findCmd.batchSize = Math.abs(cmd.batchSize);
// Check if we wish to have a singleBatch
if(cmd.limit < 0) {
findCmd.limit = Math.abs(cmd.limit);
findCmd.singleBatch = true;
}
// If we have comment set
if(cmd.comment) findCmd.comment = cmd.comment;
// If we have maxScan
if(cmd.maxScan) findCmd.maxScan = cmd.maxScan;
// If we have maxTimeMS set
if(cmd.maxTimeMS) findCmd.maxTimeMS = cmd.maxTimeMS;
// If we have min
if(cmd.min) findCmd.min = cmd.min;
// If we have max
if(cmd.max) findCmd.max = cmd.max;
// If we have returnKey set
if(cmd.returnKey) findCmd.returnKey = cmd.returnKey;
// If we have showDiskLoc set
if(cmd.showDiskLoc) findCmd.showRecordId = cmd.showDiskLoc;
// If we have snapshot set
if(cmd.snapshot) findCmd.snapshot = cmd.snapshot;
// If we have tailable set
if(cmd.tailable) findCmd.tailable = cmd.tailable;
// If we have oplogReplay set
if(cmd.oplogReplay) findCmd.oplogReplay = cmd.oplogReplay;
// If we have noCursorTimeout set
if(cmd.noCursorTimeout) findCmd.noCursorTimeout = cmd.noCursorTimeout;
// If we have awaitData set
if(cmd.awaitData) findCmd.awaitData = cmd.awaitData;
if(cmd.awaitdata) findCmd.awaitData = cmd.awaitdata;
// If we have partial set
if(cmd.partial) findCmd.partial = cmd.partial;
// If we have explain, we need to rewrite the find command
// to wrap it in the explain command
if(cmd.explain) {
findCmd = {
explain: findCmd
}
}
// Did we provide a readConcern
if(cmd.readConcern) findCmd.readConcern = cmd.readConcern;
// Set up the serialize and ignoreUndefined fields
var serializeFunctions = typeof options.serializeFunctions == 'boolean'
? options.serializeFunctions : false;
var ignoreUndefined = typeof options.ignoreUndefined == 'boolean'
? options.ignoreUndefined : false;
// We have a Mongos topology, check if we need to add a readPreference
if(topology.type == 'mongos'
&& readPreference
&& readPreference.preference != 'primary') {
findCmd = {
'$query': findCmd,
'$readPreference': readPreference.toJSON()
};
}
// Build Query object
var query = new Query(bson, commandns, findCmd, {
numberToSkip: 0, numberToReturn: -1
, checkKeys: false, returnFieldSelector: null
, serializeFunctions: serializeFunctions, ignoreUndefined: ignoreUndefined
});
// Set query flags
query.slaveOk = readPreference.slaveOk();
// Return the query
return query;
}
//
// Set up a command cursor
var setupCommand = function(bson, ns, cmd, cursorState, topology, options) {
var readPreference = options.readPreference || new ReadPreference('primary');
if(typeof readPreference == 'string') readPreference = new ReadPreference(readPreference);
if(!(readPreference instanceof ReadPreference)) throw new MongoError('readPreference must be a ReadPreference instance');
// Set empty options object
options = options || {}
// Final query
var finalCmd = {};
for(var name in cmd) {
finalCmd[name] = cmd[name];
}
// Build command namespace
var parts = ns.split(/\./);
// Serialize functions
var serializeFunctions = typeof options.serializeFunctions == 'boolean'
? options.serializeFunctions : false;
// Set up the serialize and ignoreUndefined fields
var ignoreUndefined = typeof options.ignoreUndefined == 'boolean'
? options.ignoreUndefined : false;
// We have a Mongos topology, check if we need to add a readPreference
if(topology.type == 'mongos'
&& readPreference
&& readPreference.preference != 'primary') {
finalCmd = {
'$query': finalCmd,
'$readPreference': readPreference.toJSON()
};
}
// Build Query object
var query = new Query(bson, f('%s.$cmd', parts.shift()), finalCmd, {
numberToSkip: 0, numberToReturn: -1
, checkKeys: false, serializeFunctions: serializeFunctions
, ignoreUndefined: ignoreUndefined
});
// Set query flags
query.slaveOk = readPreference.slaveOk();
// Return the query
return query;
}
/**
* @ignore
*/
var bindToCurrentDomain = function(callback) {
var domain = process.domain;
if(domain == null || callback == null) {
return callback;
} else {
return domain.bind(callback);
}
}
module.exports = WireProtocol;

357
node_modules/mongodb-core/lib/wireprotocol/commands.js generated vendored Normal file
View File

@@ -0,0 +1,357 @@
"use strict";
var MongoError = require('../error');
// Wire command operation ids
var OP_UPDATE = 2001;
var OP_INSERT = 2002;
var OP_DELETE = 2006;
var Insert = function(requestId, ismaster, bson, ns, documents, options) {
// Basic options needed to be passed in
if(ns == null) throw new MongoError("ns must be specified for query");
if(!Array.isArray(documents) || documents.length == 0) throw new MongoError("documents array must contain at least one document to insert");
// Validate that we are not passing 0x00 in the colletion name
if(!!~ns.indexOf("\x00")) {
throw new MongoError("namespace cannot contain a null character");
}
// Set internal
this.requestId = requestId;
this.bson = bson;
this.ns = ns;
this.documents = documents;
this.ismaster = ismaster;
// Ensure empty options
options = options || {};
// Unpack options
this.serializeFunctions = typeof options.serializeFunctions == 'boolean' ? options.serializeFunctions : false;
this.ignoreUndefined = typeof options.ignoreUndefined == 'boolean' ? options.ignoreUndefined : false;
this.checkKeys = typeof options.checkKeys == 'boolean' ? options.checkKeys : true;
this.continueOnError = typeof options.continueOnError == 'boolean' ? options.continueOnError : false;
// Set flags
this.flags = this.continueOnError ? 1 : 0;
}
// To Binary
Insert.prototype.toBin = function() {
// Contains all the buffers to be written
var buffers = [];
// Header buffer
var header = new Buffer(
4 * 4 // Header
+ 4 // Flags
+ Buffer.byteLength(this.ns) + 1 // namespace
);
// Add header to buffers
buffers.push(header);
// Total length of the message
var totalLength = header.length;
// Serialize all the documents
for(var i = 0; i < this.documents.length; i++) {
var buffer = this.bson.serialize(this.documents[i]
, this.checkKeys
, true
, this.serializeFunctions
, 0, this.ignoreUndefined);
// Document is larger than maxBsonObjectSize, terminate serialization
if(buffer.length > this.ismaster.maxBsonObjectSize) {
throw new MongoError("Document exceeds maximum allowed bson size of " + this.ismaster.maxBsonObjectSize + " bytes");
}
// Add to total length of wire protocol message
totalLength = totalLength + buffer.length;
// Add to buffer
buffers.push(buffer);
}
// Command is larger than maxMessageSizeBytes terminate serialization
if(totalLength > this.ismaster.maxMessageSizeBytes) {
throw new MongoError("Command exceeds maximum message size of " + this.ismaster.maxMessageSizeBytes + " bytes");
}
// Add all the metadata
var index = 0;
// Write header length
header[index + 3] = (totalLength >> 24) & 0xff;
header[index + 2] = (totalLength >> 16) & 0xff;
header[index + 1] = (totalLength >> 8) & 0xff;
header[index] = (totalLength) & 0xff;
index = index + 4;
// Write header requestId
header[index + 3] = (this.requestId >> 24) & 0xff;
header[index + 2] = (this.requestId >> 16) & 0xff;
header[index + 1] = (this.requestId >> 8) & 0xff;
header[index] = (this.requestId) & 0xff;
index = index + 4;
// No flags
header[index + 3] = (0 >> 24) & 0xff;
header[index + 2] = (0 >> 16) & 0xff;
header[index + 1] = (0 >> 8) & 0xff;
header[index] = (0) & 0xff;
index = index + 4;
// Operation
header[index + 3] = (OP_INSERT >> 24) & 0xff;
header[index + 2] = (OP_INSERT >> 16) & 0xff;
header[index + 1] = (OP_INSERT >> 8) & 0xff;
header[index] = (OP_INSERT) & 0xff;
index = index + 4;
// Flags
header[index + 3] = (this.flags >> 24) & 0xff;
header[index + 2] = (this.flags >> 16) & 0xff;
header[index + 1] = (this.flags >> 8) & 0xff;
header[index] = (this.flags) & 0xff;
index = index + 4;
// Write collection name
index = index + header.write(this.ns, index, 'utf8') + 1;
header[index - 1] = 0;
// Return the buffers
return buffers;
}
var Update = function(requestId, ismaster, bson, ns, update, options) {
// Basic options needed to be passed in
if(ns == null) throw new MongoError("ns must be specified for query");
// Ensure empty options
options = options || {};
// Set internal
this.requestId = requestId;
this.bson = bson;
this.ns = ns;
this.ismaster = ismaster;
// Unpack options
this.serializeFunctions = typeof options.serializeFunctions == 'boolean' ? options.serializeFunctions : false;
this.ignoreUndefined = typeof options.ignoreUndefined == 'boolean' ? options.ignoreUndefined : false;
this.checkKeys = typeof options.checkKeys == 'boolean' ? options.checkKeys : false;
// Unpack the update document
this.upsert = typeof update[0].upsert == 'boolean' ? update[0].upsert : false;
this.multi = typeof update[0].multi == 'boolean' ? update[0].multi : false;
this.q = update[0].q;
this.u = update[0].u;
// Create flag value
this.flags = this.upsert ? 1 : 0;
this.flags = this.multi ? this.flags | 2 : this.flags;
}
// To Binary
Update.prototype.toBin = function() {
// Contains all the buffers to be written
var buffers = [];
// Header buffer
var header = new Buffer(
4 * 4 // Header
+ 4 // ZERO
+ Buffer.byteLength(this.ns) + 1 // namespace
+ 4 // Flags
);
// Add header to buffers
buffers.push(header);
// Total length of the message
var totalLength = header.length;
// Serialize the selector
var selector = this.bson.serialize(this.q
, this.checkKeys
, true
, this.serializeFunctions
, 0, this.ignoreUndefined);
buffers.push(selector);
totalLength = totalLength + selector.length;
// Serialize the update
var update = this.bson.serialize(this.u
, this.checkKeys
, true
, this.serializeFunctions
, 0, this.ignoreUndefined);
buffers.push(update);
totalLength = totalLength + update.length;
// Index in header buffer
var index = 0;
// Write header length
header[index + 3] = (totalLength >> 24) & 0xff;
header[index + 2] = (totalLength >> 16) & 0xff;
header[index + 1] = (totalLength >> 8) & 0xff;
header[index] = (totalLength) & 0xff;
index = index + 4;
// Write header requestId
header[index + 3] = (this.requestId >> 24) & 0xff;
header[index + 2] = (this.requestId >> 16) & 0xff;
header[index + 1] = (this.requestId >> 8) & 0xff;
header[index] = (this.requestId) & 0xff;
index = index + 4;
// No flags
header[index + 3] = (0 >> 24) & 0xff;
header[index + 2] = (0 >> 16) & 0xff;
header[index + 1] = (0 >> 8) & 0xff;
header[index] = (0) & 0xff;
index = index + 4;
// Operation
header[index + 3] = (OP_UPDATE >> 24) & 0xff;
header[index + 2] = (OP_UPDATE >> 16) & 0xff;
header[index + 1] = (OP_UPDATE >> 8) & 0xff;
header[index] = (OP_UPDATE) & 0xff;
index = index + 4;
// Write ZERO
header[index + 3] = (0 >> 24) & 0xff;
header[index + 2] = (0 >> 16) & 0xff;
header[index + 1] = (0 >> 8) & 0xff;
header[index] = (0) & 0xff;
index = index + 4;
// Write collection name
index = index + header.write(this.ns, index, 'utf8') + 1;
header[index - 1] = 0;
// Flags
header[index + 3] = (this.flags >> 24) & 0xff;
header[index + 2] = (this.flags >> 16) & 0xff;
header[index + 1] = (this.flags >> 8) & 0xff;
header[index] = (this.flags) & 0xff;
index = index + 4;
// Return the buffers
return buffers;
}
var Remove = function(requestId, ismaster, bson, ns, remove, options) {
// Basic options needed to be passed in
if(ns == null) throw new MongoError("ns must be specified for query");
// Ensure empty options
options = options || {};
// Set internal
this.requestId = requestId;
this.bson = bson;
this.ns = ns;
this.ismaster = ismaster;
// Unpack options
this.serializeFunctions = typeof options.serializeFunctions == 'boolean' ? options.serializeFunctions : false;
this.ignoreUndefined = typeof options.ignoreUndefined == 'boolean' ? options.ignoreUndefined : false;
this.checkKeys = typeof options.checkKeys == 'boolean' ? options.checkKeys : false;
// Unpack the update document
this.limit = typeof remove[0].limit == 'number' ? remove[0].limit : 1;
this.q = remove[0].q;
// Create flag value
this.flags = this.limit == 1 ? 1 : 0;
}
// To Binary
Remove.prototype.toBin = function() {
// Contains all the buffers to be written
var buffers = [];
// Header buffer
var header = new Buffer(
4 * 4 // Header
+ 4 // ZERO
+ Buffer.byteLength(this.ns) + 1 // namespace
+ 4 // Flags
);
// Add header to buffers
buffers.push(header);
// Total length of the message
var totalLength = header.length;
// Serialize the selector
var selector = this.bson.serialize(this.q
, this.checkKeys
, true
, this.serializeFunctions
, 0, this.ignoreUndefined);
buffers.push(selector);
totalLength = totalLength + selector.length;
// Index in header buffer
var index = 0;
// Write header length
header[index + 3] = (totalLength >> 24) & 0xff;
header[index + 2] = (totalLength >> 16) & 0xff;
header[index + 1] = (totalLength >> 8) & 0xff;
header[index] = (totalLength) & 0xff;
index = index + 4;
// Write header requestId
header[index + 3] = (this.requestId >> 24) & 0xff;
header[index + 2] = (this.requestId >> 16) & 0xff;
header[index + 1] = (this.requestId >> 8) & 0xff;
header[index] = (this.requestId) & 0xff;
index = index + 4;
// No flags
header[index + 3] = (0 >> 24) & 0xff;
header[index + 2] = (0 >> 16) & 0xff;
header[index + 1] = (0 >> 8) & 0xff;
header[index] = (0) & 0xff;
index = index + 4;
// Operation
header[index + 3] = (OP_DELETE >> 24) & 0xff;
header[index + 2] = (OP_DELETE >> 16) & 0xff;
header[index + 1] = (OP_DELETE >> 8) & 0xff;
header[index] = (OP_DELETE) & 0xff;
index = index + 4;
// Write ZERO
header[index + 3] = (0 >> 24) & 0xff;
header[index + 2] = (0 >> 16) & 0xff;
header[index + 1] = (0 >> 8) & 0xff;
header[index] = (0) & 0xff;
index = index + 4;
// Write collection name
index = index + header.write(this.ns, index, 'utf8') + 1;
header[index - 1] = 0;
// Write ZERO
header[index + 3] = (this.flags >> 24) & 0xff;
header[index + 2] = (this.flags >> 16) & 0xff;
header[index + 1] = (this.flags >> 8) & 0xff;
header[index] = (this.flags) & 0xff;
index = index + 4;
// Return the buffers
return buffers;
}
module.exports = {
Insert: Insert
, Update: Update
, Remove: Remove
}