Index: library/mongodb/README =================================================================== diff -u -re5d5a53bdd5482a7173ac81cff141906ac23cf32 -rcef3de5c4f65e767d0c66389bacc77bc3c2e5a68 --- library/mongodb/README (.../README) (revision e5d5a53bdd5482a7173ac81cff141906ac23cf32) +++ library/mongodb/README (.../README) (revision cef3de5c4f65e767d0c66389bacc77bc3c2e5a68) @@ -4,26 +4,44 @@ Ingredients: https://github.com/mongodb/mongo https://github.com/mongodb/mongo-c-driver + https://github.com/mongodb/libbson The current version is tested with - MongoDB v2.4.9 -- mongodb-c-driver v0.8.1 +- mongodb-c-driver 0.90.1 +- libbson 0.4.3 Compile or obtain mongodb (the database). + +Compile or obtain libbson (binary json library) + + cd /usr/local/src + git clone https://github.com/mongodb/libbson + cd libbson + sh autogen.sh + make + sudo make install + Compile or obtain the mongo-c-driver (client interface) + + cd /usr/local/src git clone https://github.com/mongodb/mongo-c-driver cd mongo-c-driver - git checkout v0.8.1 + export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig make - make install + sudo make install -Assume, Tcl is under /usr/local/ns/lib and the source of the -mongo-c-driver is under /usr/local/src/mongo-c-driver/, -then configure the nsf interface via the following -command from nsf*/library/mongodb/ +Assume the following installation directories + - Tcl: /usr/local/ns/lib/ + - mongo-c-driver: /usr/local/src/mongo-c-driver/ + - libbson: /usr/local/src/libbson +configure the mongodb nsf interface via the following +command in the directory nsf*/library/mongodb/ - ./configure --with-tcl=/usr/local/ns/lib --with-nsf=../../ \ - --with-mongodb=/usr/local/src/mongo-c-driver/src/,/usr/local/src/mongo-c-driver + ./configure --with-tcl=/usr/local/ns/lib/ --with-nsf=../../ \ + --with-mongoc=/usr/local/src/mongo-c-driver/mongoc/,/usr/local/src/mongo-c-driver/.libs \ + --with-bson=/usr/local/src/libbson/bson,/usr/local/src/libbson/.libs \ + --enable-threads --enable-symbols --prefix=/usr/local/ns In order to run the sample script, @@ -51,97 +69,33 @@ This example script is using the higher level object oriented interface for nx (nx::mongo). -* Further sample-scripts: + After running this script, you should could + check the content in MongoDB: - ./nxsh library/mongodb/example-nx-bi.tcl - ./nxsh library/mongodb/example-nx-reference-one.tcl - ./nxsh library/mongodb/example-nx-reference-many.tcl - ./nxsh library/mongodb/example-nsf-gridfs.tcl + % mongo + MongoDB shell version: 2.4.9 + connecting to: test + > use tutorial + switched to db tutorial + > db.persons.find(); + { "_id" : ObjectId("530c6e4649686ad16e261f81"), "name" : "Gustaf", "projects" : "nsf", "age" : 53 } + { "_id" : ObjectId("530c6e4649686ad16e261f82"), "name" : "Stefan", "projects" : "nsf" } + { "_id" : ObjectId("530c6e4649686ad16e261f83"), "name" : "Victor", "a" : [ "x", "y" ], "age" : 31 } + { "_id" : ObjectId("530c6e4649686ad16e261f84"), "name" : "Joe", "projects" : "abc", "age" : 23, "classes" : [ DBRef("courses", ObjectId("100000000000000000000000")) ] } + { "_id" : ObjectId("530c6e4649686ad16e261f85"), "name" : "Franz", "info" : { "x" : 203, "y" : 102 }, "age" : 29, "projects" : "gtat" } + { "_id" : ObjectId("530c6e4649686ad16e261f86"), "name" : "Selim", "ts" : Timestamp(1302945037, 1), "d" : ISODate("2011-04-16T09:53:39.279Z") } + > -After running the scripts, you should see output like the one below. +* Further sample-scripts: - % /usr/local/bin/mongo - MongoDB shell version: 2.4.4-pre- - connecting to: test - > use tutorial - switched to db tutorial - > db.persons.find(); + ./nxsh library/mongodb/tests/nx-bi.test + ./nxsh library/mongodb/tests/nx-reference-one.test + ./nxsh library/mongodb/tests/nx-reference-many.test + ./nxsh library/mongodb/tests/nx-rep.test + ./nxsh library/mongodb/tests/nx-serialize.test + ./nxsh library/mongodb/tests/nsf-gridfs.test + -gustaf neumann -=============================================================== -~/src/nsf-2.0.0% ./nxsh library/mongodb/example-nsf-mongo.tcl -/usr/local/src/tcl8.5.9/unix/tclsh -Inserting a few tuples - -Create an index on name (ascending) - -Full content -_id oid 4d9b0d56e7b0887e00000000 name string Joe projects string abc age integer 23 -_id oid 4d9b0d56e7b0887e00000001 name string Gustaf projects string nsf age integer 53 -_id oid 4d9b0d56e7b0887e00000002 name string Stefan projects string nsf -_id oid 4d9b0d56e7b0887e00000003 name string Franz info object {x integer 203 y integer 102} age integer 29 -_id oid 4d9b0d56e7b0887e00000004 name string Victor a array {0 string x 1 string y} age integer 31 - -Project members of nsf sorted by name -_id oid 4d9b0d56e7b0887e00000001 name string Gustaf projects string nsf age integer 53 -_id oid 4d9b0d56e7b0887e00000002 name string Stefan projects string nsf - -Age > 30 -_id oid 4d9b0d56e7b0887e00000001 name string Gustaf projects string nsf age integer 53 -_id oid 4d9b0d56e7b0887e00000004 name string Victor a array {0 string x 1 string y} age integer 31 - -Array 'a' contains 'x' -_id oid 4d9b0d56e7b0887e00000004 name string Victor a array {0 string x 1 string y} age integer 31 - -Embedded object has some value (info.y > 100) -_id oid 4d9b0d56e7b0887e00000003 name string Franz info object {x integer 203 y integer 102} age integer 29 - -Nsfmongo Exit -=============================================================== - - -~/src/nsf-2.0.0% ./nxsh library/mongodb/example-nx-mongo.tcl - -Query: {$query} object {name string Gustaf} -_id oid 4d9c224135018d4500000000 name string Gustaf age integer 53 projects array {0 string nsf} - -Query: {$query} object {name string Gustaf} -_id oid 4d9c224135018d4500000000 name string Gustaf age integer 55 projects array {0 string xowiki 1 string nsf} - -Project members of nsf: -Query: {$query} object {projects string nsf} -_id oid 4d9c224135018d4500000000 name string Gustaf age integer 55 projects array {0 string xowiki 1 string nsf} -_id oid 4d9c224135018d4500000001 name string Stefan projects array {0 string nsf} - ::nsf::__#5: Gustaf - ::nsf::__#6: Stefan - -All Persons sorted by name (ascending): -Query: {$query} object {} {$orderby} object {name int 1} -_id oid 4d9c224135018d4500000003 name string Franz age integer 29 projects array {0 string gtat 1 string annobackend 2 string abc} -_id oid 4d9c224135018d4500000000 name string Gustaf age integer 55 projects array {0 string xowiki 1 string nsf} -_id oid 4d9c224135018d4500000002 name string Joe age integer 23 projects array {0 string abc} -_id oid 4d9c224135018d4500000001 name string Stefan projects array {0 string nsf} - ::nsf::__#7: Franz - ::nsf::__#8: Gustaf - ::nsf::__#9: Joe - ::nsf::__#A: Stefan - -Members of Projects != 'abc' nsf sorted by name desc and age: -Query: {$query} object {projects object {{$ne} string abc}} {$orderby} object {name int -1 age int 1} -_id oid 4d9c224135018d4500000001 name string Stefan projects array {0 string nsf} -_id oid 4d9c224135018d4500000000 name string Gustaf age integer 55 projects array {0 string xowiki 1 string nsf} - ::nsf::__#B: Stefan - ::nsf::__#C: Gustaf - -Find persons age > 30: -Query: {$query} object {age object {{$gt} integer 30}} -_id oid 4d9c224135018d4500000000 name string Gustaf age integer 55 projects array {0 string xowiki 1 string nsf} - ::nsf::__#D: Gustaf - -Find oldies: -Query: {$query} object {age object {{$gt} integer 30}} -_id oid 4d9c224135018d4500000000 name string Gustaf age integer 55 projects array {0 string xowiki 1 string nsf} - ::nsf::__#E: Gustaf -Nsfmongo Exit Index: library/mongodb/configure =================================================================== diff -u -r4940f1317b9827162d7a0d28c74da0758ffe2d29 -rcef3de5c4f65e767d0c66389bacc77bc3c2e5a68 --- library/mongodb/configure (.../configure) (revision 4940f1317b9827162d7a0d28c74da0758ffe2d29) +++ library/mongodb/configure (.../configure) (revision cef3de5c4f65e767d0c66389bacc77bc3c2e5a68) @@ -727,8 +727,10 @@ ac_subst_files='' ac_user_opts=' enable_option_checking -with_mongodb +with_mongoc +with_bson with_nsf +enable_development with_tcl with_tclinclude enable_threads @@ -1358,6 +1360,8 @@ --disable-option-checking ignore unrecognized --enable/--with options --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) --enable-FEATURE[=ARG] include FEATURE [ARG=yes] + --enable-development build nsf with development support (intensive + runtime checking, etc.; default: disabled) --enable-threads build with threads --enable-shared build and link with shared libraries (default: on) --enable-64bit enable 64bit support (default: off) @@ -1369,9 +1373,12 @@ Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) - --with-mongodb=MONGO_INCLUDE_DIR,MONGO_LIB_DIR + --with-mongoc=MONGOC_INCLUDE_DIR,MONGOC_LIB_DIR + absolute path to mongo.h and optionally the path to the library, + --without-mongoc disables build of the mongo interface + --with-bson=BSON_INCLUDE_DIR,BSON_LIB_DIR absolute path to bson.h and optionally the path to the library, - --without-mongodb disables build of the mongo interface + --without-bson disables build of the mongo interface --with-nsf=DIR_CONTAINING_NSFCONFIG_SH absolute path to nsfConfig.sh, --without-nsf disables, but this is pointless @@ -2314,23 +2321,39 @@ # specify some extra flags #-------------------------------------------------------------------- -# Check whether --with-mongodb was given. -if test "${with_mongodb+set}" = set; then : - withval=$with_mongodb; with_mongodb=$withval +# Check whether --with-mongoc was given. +if test "${with_mongoc+set}" = set; then : + withval=$with_mongoc; with_mongoc=$withval else - with_mongodb=no + with_mongoc=no fi +# Check whether --with-bson was given. +if test "${with_bson+set}" = set; then : + withval=$with_bson; with_bson=$withval +else + with_bson=no +fi + + # Check whether --with-nsf was given. if test "${with_nsf+set}" = set; then : withval=$with_nsf; with_nsf=$withval else as_fn_error $? "--with-nsf is required" "$LINENO" 5 fi +# Check whether --enable-development was given. +if test "${enable_development+set}" = set; then : + enableval=$enable_development; enable_development=$enableval +else + enable_development=no +fi + + #-------------------------------------------------------------------- # Load the tclConfig.sh file #-------------------------------------------------------------------- @@ -4827,35 +4850,53 @@ # This defines PKG(_STUB)_SOURCES, PKG(_STUB)_OBJECTS, PKG_HEADERS # and PKG_TCL_SOURCES. #----------------------------------------------------------------------- -if test ! "${with_mongodb}" = no; then - MONGO_INC_DIR="`echo $with_mongodb |cut -f1 -d,`" - MONGO_LIB_DIR="`echo $with_mongodb |cut -f2 -d, -s`" +if test ! "${with_mongoc}" = no; then + MONGOC_INC_DIR="`echo $with_mongoc |cut -f1 -d,`" + MONGOC_LIB_DIR="`echo $with_mongoc |cut -f2 -d, -s`" fi -if test -z "$MONGO_INC_DIR" ; then +if test ! "${with_bson}" = no; then + BSON_INC_DIR="`echo $with_bson |cut -f1 -d,`" + BSON_LIB_DIR="`echo $with_bson |cut -f2 -d, -s`" +fi + +mongo_h_ok=1 +if test -z "$MONGOC_INC_DIR" ; then mongo_h_ok=0 MONGO_INC_SPEC="" -else - MONGO_INC_SPEC="-I${MONGO_INC_DIR}" - echo "Checking ${MONGO_INC_DIR}/bson.h" - if test -f "${MONGO_INC_DIR}/bson.h" ; then - mongo_h_ok=1 - else +fi +if test -z "$BSON_INC_DIR" ; then + mongo_h_ok=0 + MONGO_INC_SPEC="" +fi + +if test "${mongo_h_ok}" = "1" ; then + MONGO_INC_SPEC="-I${MONGOC_INC_DIR} -I${BSON_INC_DIR}" + echo "Checking ${MONGOC_INC_DIR}/mongoc.h" + if test ! -f "${MONGOC_INC_DIR}/mongoc.h" ; then mongo_h_ok=0 fi + echo "Checking ${BSON_INC_DIR}/bson.h" + if test ! -f "${BSON_INC_DIR}/bson.h" ; then + mongo_h_ok=0 + fi fi if test "${mongo_h_ok}" = "0" ; then as_fn_error $? " - Could not locate bson.h on your machine to build the nsf mongo interface. + Could not locate bson.h and mongo.h on your machine to build the nsf mongo interface. " "$LINENO" 5 fi -if test -z "${MONGO_LIB_DIR}" ; then +if test -z "${MONGOC_LIB_DIR}" ; then MONGO_LIB_SPEC="" else - MONGO_LIB_SPEC="-L${MONGO_LIB_DIR}" + MONGO_LIB_SPEC="-L${MONGOC_LIB_DIR}" fi +if test ! -z "${BSON_LIB_DIR}" ; then + MONGO_LIB_SPEC="${MONGO_LIB_SPEC} -L${BSON_LIB_DIR}" +fi + #echo "MONGO include spec = '${MONGO_INC_SPEC}'" #echo "MONGO lib spec = '${MONGO_LIB_SPEC}'" @@ -4921,8 +4962,9 @@ #TEA_ADD_LIBS([$NSF_BUILD_STUB_LIB_SPEC $MONGO_LIB_SPEC -Wl,-rpath,${MONGO_LIB_DIR} -L${MONGO_LIB_DIR} -lmongoc -lbson]) +#TEA_ADD_LIBS([$NSF_BUILD_STUB_LIB_SPEC $MONGO_LIB_SPEC -L${MONGO_LIB_DIR} -lmongoc -lbson]) - vars="$NSF_BUILD_STUB_LIB_SPEC $MONGO_LIB_SPEC -L${MONGO_LIB_DIR} -lmongoc -lbson" + vars="$NSF_BUILD_STUB_LIB_SPEC $MONGO_LIB_SPEC -lmongoc-1.0 -lbson-1.0" for i in $vars; do if test "${TEA_PLATFORM}" = "windows" -a "$GCC" = "yes" ; then # Convert foo.lib to -lfoo for GCC. No-op if not *.lib @@ -7529,7 +7571,13 @@ $as_echo "#define USE_TCL_STUBS 1" >>confdefs.h #AC_DEFINE(USE_TK_STUBS) +if test "$enable_development" = yes; then +$as_echo "#define NSF_DEVELOPMENT 1" >>confdefs.h + +fi + + #-------------------------------------------------------------------- # This macro generates a line to use when building a library. It # depends on values set by the TEA_ENABLE_SHARED, TEA_ENABLE_SYMBOLS, Index: library/mongodb/configure.ac =================================================================== diff -u -r4940f1317b9827162d7a0d28c74da0758ffe2d29 -rcef3de5c4f65e767d0c66389bacc77bc3c2e5a68 --- library/mongodb/configure.ac (.../configure.ac) (revision 4940f1317b9827162d7a0d28c74da0758ffe2d29) +++ library/mongodb/configure.ac (.../configure.ac) (revision cef3de5c4f65e767d0c66389bacc77bc3c2e5a68) @@ -35,18 +35,28 @@ #-------------------------------------------------------------------- # specify some extra flags #-------------------------------------------------------------------- -AC_ARG_WITH(mongodb, - [ --with-mongodb=MONGO_INCLUDE_DIR[,MONGO_LIB_DIR] +AC_ARG_WITH(mongoc, + [ --with-mongoc=MONGOC_INCLUDE_DIR[,MONGOC_LIB_DIR] + absolute path to mongo.h and optionally the path to the library, + --without-mongoc disables build of the mongo interface], + [with_mongoc=$withval], [with_mongoc=no]) +AC_ARG_WITH(bson, + [ --with-bson=BSON_INCLUDE_DIR[,BSON_LIB_DIR] absolute path to bson.h and optionally the path to the library, - --without-mongodb disables build of the mongo interface], - [with_mongodb=$withval], [with_mongodb=no]) + --without-bson disables build of the mongo interface], + [with_bson=$withval], [with_bson=no]) AC_ARG_WITH(nsf, [ --with-nsf=DIR_CONTAINING_NSFCONFIG_SH absolute path to nsfConfig.sh, --without-nsf disables, but this is pointless], [with_nsf=$withval], [AC_MSG_ERROR([--with-nsf is required])]) +AC_ARG_ENABLE(development, + AC_HELP_STRING([--enable-development], + [build nsf with development support (intensive runtime checking, etc.; default: disabled)]), + [enable_development=$enableval], [enable_development=no]) + #-------------------------------------------------------------------- # Load the tclConfig.sh file #-------------------------------------------------------------------- @@ -86,35 +96,53 @@ # This defines PKG(_STUB)_SOURCES, PKG(_STUB)_OBJECTS, PKG_HEADERS # and PKG_TCL_SOURCES. #----------------------------------------------------------------------- -if test ! "${with_mongodb}" = no; then - MONGO_INC_DIR="`echo $with_mongodb |cut -f1 -d,`" - MONGO_LIB_DIR="`echo $with_mongodb |cut -f2 -d, -s`" +if test ! "${with_mongoc}" = no; then + MONGOC_INC_DIR="`echo $with_mongoc |cut -f1 -d,`" + MONGOC_LIB_DIR="`echo $with_mongoc |cut -f2 -d, -s`" fi -if test -z "$MONGO_INC_DIR" ; then +if test ! "${with_bson}" = no; then + BSON_INC_DIR="`echo $with_bson |cut -f1 -d,`" + BSON_LIB_DIR="`echo $with_bson |cut -f2 -d, -s`" +fi + +mongo_h_ok=1 +if test -z "$MONGOC_INC_DIR" ; then mongo_h_ok=0 MONGO_INC_SPEC="" -else - MONGO_INC_SPEC="-I${MONGO_INC_DIR}" - echo "Checking ${MONGO_INC_DIR}/bson.h" - if test -f "${MONGO_INC_DIR}/bson.h" ; then - mongo_h_ok=1 - else +fi +if test -z "$BSON_INC_DIR" ; then + mongo_h_ok=0 + MONGO_INC_SPEC="" +fi + +if test "${mongo_h_ok}" = "1" ; then + MONGO_INC_SPEC="-I${MONGOC_INC_DIR} -I${BSON_INC_DIR}" + echo "Checking ${MONGOC_INC_DIR}/mongoc.h" + if test ! -f "${MONGOC_INC_DIR}/mongoc.h" ; then mongo_h_ok=0 fi + echo "Checking ${BSON_INC_DIR}/bson.h" + if test ! -f "${BSON_INC_DIR}/bson.h" ; then + mongo_h_ok=0 + fi fi if test "${mongo_h_ok}" = "0" ; then AC_MSG_ERROR([ - Could not locate bson.h on your machine to build the nsf mongo interface. + Could not locate bson.h and mongo.h on your machine to build the nsf mongo interface. ]) fi - -if test -z "${MONGO_LIB_DIR}" ; then + +if test -z "${MONGOC_LIB_DIR}" ; then MONGO_LIB_SPEC="" else - MONGO_LIB_SPEC="-L${MONGO_LIB_DIR}" + MONGO_LIB_SPEC="-L${MONGOC_LIB_DIR}" fi - + +if test ! -z "${BSON_LIB_DIR}" ; then + MONGO_LIB_SPEC="${MONGO_LIB_SPEC} -L${BSON_LIB_DIR}" +fi + #echo "MONGO include spec = '${MONGO_INC_SPEC}'" #echo "MONGO lib spec = '${MONGO_LIB_SPEC}'" @@ -129,7 +157,8 @@ TEA_ADD_HEADERS([]) TEA_ADD_INCLUDES([-I${with_nsf}/generic ${NSF_BUILD_INCLUDE_SPEC} ${MONGO_INC_SPEC}]) #TEA_ADD_LIBS([$NSF_BUILD_STUB_LIB_SPEC $MONGO_LIB_SPEC -Wl,-rpath,${MONGO_LIB_DIR} -L${MONGO_LIB_DIR} -lmongoc -lbson]) -TEA_ADD_LIBS([$NSF_BUILD_STUB_LIB_SPEC $MONGO_LIB_SPEC -L${MONGO_LIB_DIR} -lmongoc -lbson]) +#TEA_ADD_LIBS([$NSF_BUILD_STUB_LIB_SPEC $MONGO_LIB_SPEC -L${MONGO_LIB_DIR} -lmongoc -lbson]) +TEA_ADD_LIBS([$NSF_BUILD_STUB_LIB_SPEC $MONGO_LIB_SPEC -lmongoc-1.0 -lbson-1.0]) TEA_ADD_CFLAGS([]) TEA_ADD_STUB_SOURCES([]) TEA_ADD_TCL_SOURCES([]) @@ -211,7 +240,11 @@ AC_DEFINE(USE_TCL_STUBS) #AC_DEFINE(USE_TK_STUBS) +if test "$enable_development" = yes; then + AC_DEFINE(NSF_DEVELOPMENT, 1, [Are we building with development support?]) +fi + #-------------------------------------------------------------------- # This macro generates a line to use when building a library. It # depends on values set by the TEA_ENABLE_SHARED, TEA_ENABLE_SYMBOLS, Index: library/mongodb/mongoAPI.decls =================================================================== diff -u -r05b2776a0ecbc0453ae96bbfa9d94315e466f3f5 -rcef3de5c4f65e767d0c66389bacc77bc3c2e5a68 --- library/mongodb/mongoAPI.decls (.../mongoAPI.decls) (revision 05b2776a0ecbc0453ae96bbfa9d94315e466f3f5) +++ library/mongodb/mongoAPI.decls (.../mongoAPI.decls) (revision cef3de5c4f65e767d0c66389bacc77bc3c2e5a68) @@ -8,38 +8,44 @@ cmd "::mongo" } array set ptrConverter { - mongo 1 - gridfs 1 - gridfile 1 - mongo_cursor 1 + mongoc_client_t 1 + mongoc_collection_t 1 + mongoc_cursor_t 1 + mongoc_gridfs_file_t 1 + mongoc_gridfs_t 1 } cmd close NsfMongoClose { - {-argName "conn" -required 1 -type mongo -withObj 1} + {-argName "conn" -required 1 -type mongoc_client_t -withObj 1} } cmd connect NsfMongoConnect { - {-argName "-replica-set" -required 0 -nrargs 1} - {-argName "-server" -required 0 -nrargs 1 -type tclobj} - {-argName "-timeout" -required 0 -nrargs 1 -type int32} + {-argName "-uri" -required 0 -nrargs 1} } cmd run NsfMongoRunCmd { {-argName "-nocomplain" -required 0 -nrargs 0} - {-argName "conn" -required 1 -type mongo} + {-argName "conn" -required 1 -type mongoc_client_t} {-argName "db" -required 1} {-argName "cmd" -required 1 -type tclobj} } -cmd count NsfMongoCount { - {-argName "conn" -required 1 -type mongo} - {-argName "namespace" -required 1} +# +# collection +# +cmd "collection::close" NsfCollectionClose { + {-argName "collection" -required 1 -type mongoc_collection_t -withObj 1} +} +cmd "collection::count" NsfMongoCollectionCount { + {-argName "collection" -required 1 -type mongoc_collection_t} {-argName "query" -required 1 -type tclobj} } - -cmd index NsfMongoIndex { - {-argName "conn" -required 1 -type mongo} - {-argName "namespace" -required 1} +cmd "collection::delete" NsfMongoCollectionDelete { + {-argName "collection" -required 1 -type mongoc_collection_t} + {-argName "condition" -required 1 -type tclobj} +} +cmd "collection::index" NsfMongoCollectionIndex { + {-argName "collection" -required 1 -type mongoc_collection_t} {-argName "attributes" -required 1 -type tclobj} {-argName "-name" -required 0 -nrargs 1} {-argName "-background" -required 0 -nrargs 0} @@ -48,31 +54,25 @@ {-argName "-ttl" -required 0 -nrargs 1 -type int32} {-argName "-unique" -required 0 -nrargs 0} } - -cmd insert NsfMongoInsert { - {-argName "conn" -required 1 -type mongo} - {-argName "namespace" -required 1} +cmd "collection::insert" NsfMongoCollectionInsert { + {-argName "collection" -required 1 -type mongoc_collection_t} {-argName "values" -required 1 -type tclobj} } - -cmd query NsfMongoQuery { - {-argName "conn" -required 1 -type mongo} - {-argName "namespace" -required 1} +cmd collection::open NsfCollectionOpen { + {-argName "conn" -required 1 -type mongoc_client_t} + {-argName "dbname" -required 1} + {-argName "collectionname" -required 1} +} +cmd "collection::query" NsfMongoCollectionQuery { + {-argName "collection" -required 1 -type mongoc_collection_t} {-argName "query" -required 1 -type tclobj} {-argName "-atts" -required 0 -nrargs 1 -type tclobj} - {-argName "-limit" -required 0 -nrargs 1 -type int32} - {-argName "-skip" -required 0 -nrargs 1 -type int32} + {-argName "-limit" -required 0 -type int32} + {-argName "-skip" -required 0 -type int32} } -cmd remove NsfMongoRemove { - {-argName "conn" -required 1 -type mongo} - {-argName "namespace" -required 1} - {-argName "condition" -required 1 -type tclobj} -} - -cmd update NsfMongoUpdate { - {-argName "conn" -required 1 -type mongo} - {-argName "namespace" -required 1} +cmd "collection::update" NsfMongoCollectionUpdate { + {-argName "collection" -required 1 -type mongoc_collection_t} {-argName "cond" -required 1 -type tclobj} {-argName "values" -required 1 -type tclobj} {-argName "-upsert" -required 0 -nrargs 0} @@ -83,8 +83,7 @@ # Cursor # cmd cursor::find NsfMongoCursorFind { - {-argName "conn" -required 1 -type mongo} - {-argName "namespace" -required 1} + {-argName "collection" -required 1 -type mongoc_collection_t} {-argName "query" -required 1 -type tclobj} {-argName "-atts" -required 0 -nrargs 1 -type tclobj} {-argName "-limit" -required 0 -type int32} @@ -93,64 +92,77 @@ {-argName "-awaitdata" -required 0 -nrargs 0} } cmd cursor::next NsfMongoCursorNext { - {-argName "cursor" -required 1 -type mongo_cursor} + {-argName "cursor" -required 1 -type mongoc_cursor_t} } cmd cursor::close NsfMongoCursorClose { - {-argName "cursor" -required 1 -type mongo_cursor -withObj 1} + {-argName "cursor" -required 1 -type mongoc_cursor_t -withObj 1} } # # GridFS # +cmd gridfs::close NsfMongoGridFSClose { + {-argName "gfs" -required 1 -type mongoc_gridfs_t -withObj 1} +} + cmd gridfs::open NsfMongoGridFSOpen { - {-argName "conn" -required 1 -type mongo} + {-argName "conn" -required 1 -type mongoc_client_t} {-argName "dbname" -required 1} {-argName "prefix" -required 1} } -cmd gridfs::store_file NsfMongoGridFSStoreFile { - {-argName "gfs" -required 1 -type gridfs} - {-argName "filename" -required 1} - {-argName "remotename" -required 1} + +# +# GridFile commands operating on GridFS +# + +cmd gridfile::create NsfMongoGridFileCreate { + {-argName "-source" -required 1 -typeName "gridfilesource" -type "file|string"} + {-argName "gfs" -required 1 -type mongoc_gridfs_t} + {-argName "value" -required 1} + {-argName "name" -required 1} {-argName "contenttype" -required 1} } -cmd gridfs::remove_file NsfMongoGridFSRemoveFile { - {-argName "gfs" -required 1 -type gridfs} - {-argName "filename" -required 1} +cmd "gridfile::delete" NsfMongoGridFileDelete { + {-argName "gfs" -required 1 -type mongoc_gridfs_t} + {-argName "query" -required 1 -type tclobj} } - -cmd gridfs::close NsfMongoGridFSClose { - {-argName "gfs" -required 1 -type gridfs -withObj 1} +cmd "gridfile::open" NsfMongoGridFileOpen { + {-argName "gfs" -required 1 -type mongoc_gridfs_t} + {-argName "query" -required 1 -type tclobj} } + # # GridFile # cmd "gridfile::close" NsfMongoGridFileClose { - {-argName "file" -required 1 -type gridfile -withObj 1} + {-argName "gridfile" -required 1 -type mongoc_gridfs_file_t -withObj 1} } - cmd "gridfile::get_contentlength" NsfMongoGridFileGetContentlength { - {-argName "file" -required 1 -type gridfile} + {-argName "gridfile" -required 1 -type mongoc_gridfs_file_t} } cmd "gridfile::get_contenttype" NsfMongoGridFileGetContentType { - {-argName "file" -required 1 -type gridfile} + {-argName "gridfile" -required 1 -type mongoc_gridfs_file_t} } cmd "gridfile::get_metadata" NsfMongoGridFileGetMetaData { - {-argName "file" -required 1 -type gridfile} + {-argName "gridfile" -required 1 -type mongoc_gridfs_file_t} } -cmd "gridfile::open" NsfMongoGridFileOpen { - {-argName "fs" -required 1 -type gridfs} - {-argName "filename" -required 1} -} cmd "gridfile::read" NsfMongoGridFileRead { - {-argName "file" -required 1 -type gridfile} + {-argName "gridfile" -required 1 -type mongoc_gridfs_file_t} {-argName "size" -required 1 -type int32} } cmd "gridfile::seek" NsfMongoGridFileSeek { - {-argName "file" -required 1 -type gridfile} + {-argName "gridfile" -required 1 -type mongoc_gridfs_file_t} {-argName "offset" -required 1 -type int32} } + +# +# Local variables: +# mode: tcl +# tcl-indent-level: 2 +# indent-tabs-mode: nil +# End: Index: library/mongodb/mongoAPI.h =================================================================== diff -u -r05b2776a0ecbc0453ae96bbfa9d94315e466f3f5 -rcef3de5c4f65e767d0c66389bacc77bc3c2e5a68 --- library/mongodb/mongoAPI.h (.../mongoAPI.h) (revision 05b2776a0ecbc0453ae96bbfa9d94315e466f3f5) +++ library/mongodb/mongoAPI.h (.../mongoAPI.h) (revision cef3de5c4f65e767d0c66389bacc77bc3c2e5a68) @@ -64,89 +64,154 @@ +enum GridfilesourceIdx {GridfilesourceNULL, GridfilesourceFileIdx, GridfilesourceStringIdx}; +static int ConvertToGridfilesource(Tcl_Interp *interp, Tcl_Obj *objPtr, Nsf_Param CONST *pPtr, + ClientData *clientData, Tcl_Obj **outObjPtr) { + int index, result; + static CONST char *opts[] = {"file", "string", NULL}; + (void)pPtr; + result = Tcl_GetIndexFromObj(interp, objPtr, opts, "gridfilesource", 0, &index); + *clientData = (ClientData) INT2PTR(index + 1); + *outObjPtr = objPtr; + return result; +} + + + static Nsf_EnumeratorConverterEntry enumeratorConverterEntries[] = { + {ConvertToGridfilesource, "file|string"}, + {NULL, NULL} +}; + + /* just to define the symbol */ -static Nsf_methodDefinition method_definitions[24]; +static Nsf_methodDefinition method_definitions[26]; static CONST char *method_command_namespace_names[] = { "::mongo" }; +static int NsfCollectionCloseStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); +static int NsfCollectionOpenStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); static int NsfMongoCloseStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); +static int NsfMongoCollectionCountStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); +static int NsfMongoCollectionDeleteStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); +static int NsfMongoCollectionIndexStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); +static int NsfMongoCollectionInsertStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); +static int NsfMongoCollectionQueryStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); +static int NsfMongoCollectionUpdateStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); static int NsfMongoConnectStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); -static int NsfMongoCountStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); static int NsfMongoCursorCloseStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); static int NsfMongoCursorFindStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); static int NsfMongoCursorNextStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); static int NsfMongoGridFSCloseStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); static int NsfMongoGridFSOpenStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); -static int NsfMongoGridFSRemoveFileStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); -static int NsfMongoGridFSStoreFileStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); static int NsfMongoGridFileCloseStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); +static int NsfMongoGridFileCreateStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); +static int NsfMongoGridFileDeleteStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); static int NsfMongoGridFileGetContentTypeStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); static int NsfMongoGridFileGetContentlengthStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); static int NsfMongoGridFileGetMetaDataStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); static int NsfMongoGridFileOpenStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); static int NsfMongoGridFileReadStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); static int NsfMongoGridFileSeekStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); -static int NsfMongoIndexStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); -static int NsfMongoInsertStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); -static int NsfMongoQueryStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); -static int NsfMongoRemoveStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); static int NsfMongoRunCmdStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); -static int NsfMongoUpdateStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv []); -static int NsfMongoClose(Tcl_Interp *interp, mongo *connPtr, Tcl_Obj *connObj); -static int NsfMongoConnect(Tcl_Interp *interp, CONST char *withReplica_set, Tcl_Obj *withServer, int withTimeout); -static int NsfMongoCount(Tcl_Interp *interp, mongo *connPtr, CONST char *namespace, Tcl_Obj *query); -static int NsfMongoCursorClose(Tcl_Interp *interp, mongo_cursor *cursorPtr, Tcl_Obj *cursorObj); -static int NsfMongoCursorFind(Tcl_Interp *interp, mongo *connPtr, CONST char *namespace, Tcl_Obj *query, Tcl_Obj *withAtts, int withLimit, int withSkip, int withTailable, int withAwaitdata); -static int NsfMongoCursorNext(Tcl_Interp *interp, mongo_cursor *cursorPtr); -static int NsfMongoGridFSClose(Tcl_Interp *interp, gridfs *gfsPtr, Tcl_Obj *gfsObj); -static int NsfMongoGridFSOpen(Tcl_Interp *interp, mongo *connPtr, CONST char *dbname, CONST char *prefix); -static int NsfMongoGridFSRemoveFile(Tcl_Interp *interp, gridfs *gfsPtr, CONST char *filename); -static int NsfMongoGridFSStoreFile(Tcl_Interp *interp, gridfs *gfsPtr, CONST char *filename, CONST char *remotename, CONST char *contenttype); -static int NsfMongoGridFileClose(Tcl_Interp *interp, gridfile *filePtr, Tcl_Obj *fileObj); -static int NsfMongoGridFileGetContentType(Tcl_Interp *interp, gridfile *filePtr); -static int NsfMongoGridFileGetContentlength(Tcl_Interp *interp, gridfile *filePtr); -static int NsfMongoGridFileGetMetaData(Tcl_Interp *interp, gridfile *filePtr); -static int NsfMongoGridFileOpen(Tcl_Interp *interp, gridfs *fsPtr, CONST char *filename); -static int NsfMongoGridFileRead(Tcl_Interp *interp, gridfile *filePtr, int size); -static int NsfMongoGridFileSeek(Tcl_Interp *interp, gridfile *filePtr, int offset); -static int NsfMongoIndex(Tcl_Interp *interp, mongo *connPtr, CONST char *namespace, Tcl_Obj *attributes, CONST char *withName, int withBackground, int withDropdups, int withSparse, int withTtl, int withUnique); -static int NsfMongoInsert(Tcl_Interp *interp, mongo *connPtr, CONST char *namespace, Tcl_Obj *values); -static int NsfMongoQuery(Tcl_Interp *interp, mongo *connPtr, CONST char *namespace, Tcl_Obj *query, Tcl_Obj *withAtts, int withLimit, int withSkip); -static int NsfMongoRemove(Tcl_Interp *interp, mongo *connPtr, CONST char *namespace, Tcl_Obj *condition); -static int NsfMongoRunCmd(Tcl_Interp *interp, int withNocomplain, mongo *connPtr, CONST char *db, Tcl_Obj *cmd); -static int NsfMongoUpdate(Tcl_Interp *interp, mongo *connPtr, CONST char *namespace, Tcl_Obj *cond, Tcl_Obj *values, int withUpsert, int withAll); +static int NsfCollectionClose(Tcl_Interp *interp, mongoc_collection_t *collectionPtr, Tcl_Obj *collectionObj); +static int NsfCollectionOpen(Tcl_Interp *interp, mongoc_client_t *connPtr, CONST char *dbname, CONST char *collectionname); +static int NsfMongoClose(Tcl_Interp *interp, mongoc_client_t *connPtr, Tcl_Obj *connObj); +static int NsfMongoCollectionCount(Tcl_Interp *interp, mongoc_collection_t *collectionPtr, Tcl_Obj *query); +static int NsfMongoCollectionDelete(Tcl_Interp *interp, mongoc_collection_t *collectionPtr, Tcl_Obj *condition); +static int NsfMongoCollectionIndex(Tcl_Interp *interp, mongoc_collection_t *collectionPtr, Tcl_Obj *attributes, CONST char *withName, int withBackground, int withDropdups, int withSparse, int withTtl, int withUnique); +static int NsfMongoCollectionInsert(Tcl_Interp *interp, mongoc_collection_t *collectionPtr, Tcl_Obj *values); +static int NsfMongoCollectionQuery(Tcl_Interp *interp, mongoc_collection_t *collectionPtr, Tcl_Obj *query, Tcl_Obj *withAtts, int withLimit, int withSkip); +static int NsfMongoCollectionUpdate(Tcl_Interp *interp, mongoc_collection_t *collectionPtr, Tcl_Obj *cond, Tcl_Obj *values, int withUpsert, int withAll); +static int NsfMongoConnect(Tcl_Interp *interp, CONST char *withUri); +static int NsfMongoCursorClose(Tcl_Interp *interp, mongoc_cursor_t *cursorPtr, Tcl_Obj *cursorObj); +static int NsfMongoCursorFind(Tcl_Interp *interp, mongoc_collection_t *collectionPtr, Tcl_Obj *query, Tcl_Obj *withAtts, int withLimit, int withSkip, int withTailable, int withAwaitdata); +static int NsfMongoCursorNext(Tcl_Interp *interp, mongoc_cursor_t *cursorPtr); +static int NsfMongoGridFSClose(Tcl_Interp *interp, mongoc_gridfs_t *gfsPtr, Tcl_Obj *gfsObj); +static int NsfMongoGridFSOpen(Tcl_Interp *interp, mongoc_client_t *connPtr, CONST char *dbname, CONST char *prefix); +static int NsfMongoGridFileClose(Tcl_Interp *interp, mongoc_gridfs_file_t *gridfilePtr, Tcl_Obj *gridfileObj); +static int NsfMongoGridFileCreate(Tcl_Interp *interp, int withSource, mongoc_gridfs_t *gfsPtr, CONST char *value, CONST char *name, CONST char *contenttype); +static int NsfMongoGridFileDelete(Tcl_Interp *interp, mongoc_gridfs_t *gfsPtr, Tcl_Obj *query); +static int NsfMongoGridFileGetContentType(Tcl_Interp *interp, mongoc_gridfs_file_t *gridfilePtr); +static int NsfMongoGridFileGetContentlength(Tcl_Interp *interp, mongoc_gridfs_file_t *gridfilePtr); +static int NsfMongoGridFileGetMetaData(Tcl_Interp *interp, mongoc_gridfs_file_t *gridfilePtr); +static int NsfMongoGridFileOpen(Tcl_Interp *interp, mongoc_gridfs_t *gfsPtr, Tcl_Obj *query); +static int NsfMongoGridFileRead(Tcl_Interp *interp, mongoc_gridfs_file_t *gridfilePtr, int size); +static int NsfMongoGridFileSeek(Tcl_Interp *interp, mongoc_gridfs_file_t *gridfilePtr, int offset); +static int NsfMongoRunCmd(Tcl_Interp *interp, int withNocomplain, mongoc_client_t *connPtr, CONST char *db, Tcl_Obj *cmd); enum { + NsfCollectionCloseIdx, + NsfCollectionOpenIdx, NsfMongoCloseIdx, + NsfMongoCollectionCountIdx, + NsfMongoCollectionDeleteIdx, + NsfMongoCollectionIndexIdx, + NsfMongoCollectionInsertIdx, + NsfMongoCollectionQueryIdx, + NsfMongoCollectionUpdateIdx, NsfMongoConnectIdx, - NsfMongoCountIdx, NsfMongoCursorCloseIdx, NsfMongoCursorFindIdx, NsfMongoCursorNextIdx, NsfMongoGridFSCloseIdx, NsfMongoGridFSOpenIdx, - NsfMongoGridFSRemoveFileIdx, - NsfMongoGridFSStoreFileIdx, NsfMongoGridFileCloseIdx, + NsfMongoGridFileCreateIdx, + NsfMongoGridFileDeleteIdx, NsfMongoGridFileGetContentTypeIdx, NsfMongoGridFileGetContentlengthIdx, NsfMongoGridFileGetMetaDataIdx, NsfMongoGridFileOpenIdx, NsfMongoGridFileReadIdx, NsfMongoGridFileSeekIdx, - NsfMongoIndexIdx, - NsfMongoInsertIdx, - NsfMongoQueryIdx, - NsfMongoRemoveIdx, - NsfMongoRunCmdIdx, - NsfMongoUpdateIdx + NsfMongoRunCmdIdx } NsfMethods; static int +NsfCollectionCloseStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { + ParseContext pc; + (void)clientData; + + if (likely(ArgumentParse(interp, objc, objv, NULL, objv[0], + method_definitions[NsfCollectionCloseIdx].paramDefs, + method_definitions[NsfCollectionCloseIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, + &pc) == TCL_OK)) { + mongoc_collection_t *collectionPtr = (mongoc_collection_t *)pc.clientData[0]; + + assert(pc.status == 0); + return NsfCollectionClose(interp, collectionPtr,pc.objv[0]); + + } else { + return TCL_ERROR; + } +} + +static int +NsfCollectionOpenStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { + ParseContext pc; + (void)clientData; + + if (likely(ArgumentParse(interp, objc, objv, NULL, objv[0], + method_definitions[NsfCollectionOpenIdx].paramDefs, + method_definitions[NsfCollectionOpenIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, + &pc) == TCL_OK)) { + mongoc_client_t *connPtr = (mongoc_client_t *)pc.clientData[0]; + CONST char *dbname = (CONST char *)pc.clientData[1]; + CONST char *collectionname = (CONST char *)pc.clientData[2]; + + assert(pc.status == 0); + return NsfCollectionOpen(interp, connPtr, dbname, collectionname); + + } else { + return TCL_ERROR; + } +} + +static int NsfMongoCloseStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { ParseContext pc; (void)clientData; @@ -155,7 +220,7 @@ method_definitions[NsfMongoCloseIdx].paramDefs, method_definitions[NsfMongoCloseIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, &pc) == TCL_OK)) { - mongo *connPtr = (mongo *)pc.clientData[0]; + mongoc_client_t *connPtr = (mongoc_client_t *)pc.clientData[0]; assert(pc.status == 0); return NsfMongoClose(interp, connPtr,pc.objv[0]); @@ -166,488 +231,516 @@ } static int -NsfMongoConnectStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { +NsfMongoCollectionCountStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { ParseContext pc; (void)clientData; if (likely(ArgumentParse(interp, objc, objv, NULL, objv[0], - method_definitions[NsfMongoConnectIdx].paramDefs, - method_definitions[NsfMongoConnectIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, + method_definitions[NsfMongoCollectionCountIdx].paramDefs, + method_definitions[NsfMongoCollectionCountIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, &pc) == TCL_OK)) { - CONST char *withReplica_set = (CONST char *)pc.clientData[0]; - Tcl_Obj *withServer = (Tcl_Obj *)pc.clientData[1]; - int withTimeout = (int )PTR2INT(pc.clientData[2]); + mongoc_collection_t *collectionPtr = (mongoc_collection_t *)pc.clientData[0]; + Tcl_Obj *query = (Tcl_Obj *)pc.clientData[1]; assert(pc.status == 0); - return NsfMongoConnect(interp, withReplica_set, withServer, withTimeout); + return NsfMongoCollectionCount(interp, collectionPtr, query); } else { return TCL_ERROR; } } static int -NsfMongoCountStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { +NsfMongoCollectionDeleteStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { ParseContext pc; (void)clientData; if (likely(ArgumentParse(interp, objc, objv, NULL, objv[0], - method_definitions[NsfMongoCountIdx].paramDefs, - method_definitions[NsfMongoCountIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, + method_definitions[NsfMongoCollectionDeleteIdx].paramDefs, + method_definitions[NsfMongoCollectionDeleteIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, &pc) == TCL_OK)) { - mongo *connPtr = (mongo *)pc.clientData[0]; - CONST char *namespace = (CONST char *)pc.clientData[1]; - Tcl_Obj *query = (Tcl_Obj *)pc.clientData[2]; + mongoc_collection_t *collectionPtr = (mongoc_collection_t *)pc.clientData[0]; + Tcl_Obj *condition = (Tcl_Obj *)pc.clientData[1]; assert(pc.status == 0); - return NsfMongoCount(interp, connPtr, namespace, query); + return NsfMongoCollectionDelete(interp, collectionPtr, condition); } else { return TCL_ERROR; } } static int -NsfMongoCursorCloseStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { +NsfMongoCollectionIndexStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { ParseContext pc; (void)clientData; if (likely(ArgumentParse(interp, objc, objv, NULL, objv[0], - method_definitions[NsfMongoCursorCloseIdx].paramDefs, - method_definitions[NsfMongoCursorCloseIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, + method_definitions[NsfMongoCollectionIndexIdx].paramDefs, + method_definitions[NsfMongoCollectionIndexIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, &pc) == TCL_OK)) { - mongo_cursor *cursorPtr = (mongo_cursor *)pc.clientData[0]; + mongoc_collection_t *collectionPtr = (mongoc_collection_t *)pc.clientData[0]; + Tcl_Obj *attributes = (Tcl_Obj *)pc.clientData[1]; + CONST char *withName = (CONST char *)pc.clientData[2]; + int withBackground = (int )PTR2INT(pc.clientData[3]); + int withDropdups = (int )PTR2INT(pc.clientData[4]); + int withSparse = (int )PTR2INT(pc.clientData[5]); + int withTtl = (int )PTR2INT(pc.clientData[6]); + int withUnique = (int )PTR2INT(pc.clientData[7]); assert(pc.status == 0); - return NsfMongoCursorClose(interp, cursorPtr,pc.objv[0]); + return NsfMongoCollectionIndex(interp, collectionPtr, attributes, withName, withBackground, withDropdups, withSparse, withTtl, withUnique); } else { return TCL_ERROR; } } static int -NsfMongoCursorFindStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { +NsfMongoCollectionInsertStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { ParseContext pc; (void)clientData; if (likely(ArgumentParse(interp, objc, objv, NULL, objv[0], - method_definitions[NsfMongoCursorFindIdx].paramDefs, - method_definitions[NsfMongoCursorFindIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, + method_definitions[NsfMongoCollectionInsertIdx].paramDefs, + method_definitions[NsfMongoCollectionInsertIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, &pc) == TCL_OK)) { - mongo *connPtr = (mongo *)pc.clientData[0]; - CONST char *namespace = (CONST char *)pc.clientData[1]; - Tcl_Obj *query = (Tcl_Obj *)pc.clientData[2]; - Tcl_Obj *withAtts = (Tcl_Obj *)pc.clientData[3]; - int withLimit = (int )PTR2INT(pc.clientData[4]); - int withSkip = (int )PTR2INT(pc.clientData[5]); - int withTailable = (int )PTR2INT(pc.clientData[6]); - int withAwaitdata = (int )PTR2INT(pc.clientData[7]); + mongoc_collection_t *collectionPtr = (mongoc_collection_t *)pc.clientData[0]; + Tcl_Obj *values = (Tcl_Obj *)pc.clientData[1]; assert(pc.status == 0); - return NsfMongoCursorFind(interp, connPtr, namespace, query, withAtts, withLimit, withSkip, withTailable, withAwaitdata); + return NsfMongoCollectionInsert(interp, collectionPtr, values); } else { return TCL_ERROR; } } static int -NsfMongoCursorNextStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { +NsfMongoCollectionQueryStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { ParseContext pc; (void)clientData; if (likely(ArgumentParse(interp, objc, objv, NULL, objv[0], - method_definitions[NsfMongoCursorNextIdx].paramDefs, - method_definitions[NsfMongoCursorNextIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, + method_definitions[NsfMongoCollectionQueryIdx].paramDefs, + method_definitions[NsfMongoCollectionQueryIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, &pc) == TCL_OK)) { - mongo_cursor *cursorPtr = (mongo_cursor *)pc.clientData[0]; + mongoc_collection_t *collectionPtr = (mongoc_collection_t *)pc.clientData[0]; + Tcl_Obj *query = (Tcl_Obj *)pc.clientData[1]; + Tcl_Obj *withAtts = (Tcl_Obj *)pc.clientData[2]; + int withLimit = (int )PTR2INT(pc.clientData[3]); + int withSkip = (int )PTR2INT(pc.clientData[4]); assert(pc.status == 0); - return NsfMongoCursorNext(interp, cursorPtr); + return NsfMongoCollectionQuery(interp, collectionPtr, query, withAtts, withLimit, withSkip); } else { return TCL_ERROR; } } static int -NsfMongoGridFSCloseStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { +NsfMongoCollectionUpdateStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { ParseContext pc; (void)clientData; if (likely(ArgumentParse(interp, objc, objv, NULL, objv[0], - method_definitions[NsfMongoGridFSCloseIdx].paramDefs, - method_definitions[NsfMongoGridFSCloseIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, + method_definitions[NsfMongoCollectionUpdateIdx].paramDefs, + method_definitions[NsfMongoCollectionUpdateIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, &pc) == TCL_OK)) { - gridfs *gfsPtr = (gridfs *)pc.clientData[0]; + mongoc_collection_t *collectionPtr = (mongoc_collection_t *)pc.clientData[0]; + Tcl_Obj *cond = (Tcl_Obj *)pc.clientData[1]; + Tcl_Obj *values = (Tcl_Obj *)pc.clientData[2]; + int withUpsert = (int )PTR2INT(pc.clientData[3]); + int withAll = (int )PTR2INT(pc.clientData[4]); assert(pc.status == 0); - return NsfMongoGridFSClose(interp, gfsPtr,pc.objv[0]); + return NsfMongoCollectionUpdate(interp, collectionPtr, cond, values, withUpsert, withAll); } else { return TCL_ERROR; } } static int -NsfMongoGridFSOpenStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { +NsfMongoConnectStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { ParseContext pc; (void)clientData; if (likely(ArgumentParse(interp, objc, objv, NULL, objv[0], - method_definitions[NsfMongoGridFSOpenIdx].paramDefs, - method_definitions[NsfMongoGridFSOpenIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, + method_definitions[NsfMongoConnectIdx].paramDefs, + method_definitions[NsfMongoConnectIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, &pc) == TCL_OK)) { - mongo *connPtr = (mongo *)pc.clientData[0]; - CONST char *dbname = (CONST char *)pc.clientData[1]; - CONST char *prefix = (CONST char *)pc.clientData[2]; + CONST char *withUri = (CONST char *)pc.clientData[0]; assert(pc.status == 0); - return NsfMongoGridFSOpen(interp, connPtr, dbname, prefix); + return NsfMongoConnect(interp, withUri); } else { return TCL_ERROR; } } static int -NsfMongoGridFSRemoveFileStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { +NsfMongoCursorCloseStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { ParseContext pc; (void)clientData; if (likely(ArgumentParse(interp, objc, objv, NULL, objv[0], - method_definitions[NsfMongoGridFSRemoveFileIdx].paramDefs, - method_definitions[NsfMongoGridFSRemoveFileIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, + method_definitions[NsfMongoCursorCloseIdx].paramDefs, + method_definitions[NsfMongoCursorCloseIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, &pc) == TCL_OK)) { - gridfs *gfsPtr = (gridfs *)pc.clientData[0]; - CONST char *filename = (CONST char *)pc.clientData[1]; + mongoc_cursor_t *cursorPtr = (mongoc_cursor_t *)pc.clientData[0]; assert(pc.status == 0); - return NsfMongoGridFSRemoveFile(interp, gfsPtr, filename); + return NsfMongoCursorClose(interp, cursorPtr,pc.objv[0]); } else { return TCL_ERROR; } } static int -NsfMongoGridFSStoreFileStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { +NsfMongoCursorFindStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { ParseContext pc; (void)clientData; if (likely(ArgumentParse(interp, objc, objv, NULL, objv[0], - method_definitions[NsfMongoGridFSStoreFileIdx].paramDefs, - method_definitions[NsfMongoGridFSStoreFileIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, + method_definitions[NsfMongoCursorFindIdx].paramDefs, + method_definitions[NsfMongoCursorFindIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, &pc) == TCL_OK)) { - gridfs *gfsPtr = (gridfs *)pc.clientData[0]; - CONST char *filename = (CONST char *)pc.clientData[1]; - CONST char *remotename = (CONST char *)pc.clientData[2]; - CONST char *contenttype = (CONST char *)pc.clientData[3]; + mongoc_collection_t *collectionPtr = (mongoc_collection_t *)pc.clientData[0]; + Tcl_Obj *query = (Tcl_Obj *)pc.clientData[1]; + Tcl_Obj *withAtts = (Tcl_Obj *)pc.clientData[2]; + int withLimit = (int )PTR2INT(pc.clientData[3]); + int withSkip = (int )PTR2INT(pc.clientData[4]); + int withTailable = (int )PTR2INT(pc.clientData[5]); + int withAwaitdata = (int )PTR2INT(pc.clientData[6]); assert(pc.status == 0); - return NsfMongoGridFSStoreFile(interp, gfsPtr, filename, remotename, contenttype); + return NsfMongoCursorFind(interp, collectionPtr, query, withAtts, withLimit, withSkip, withTailable, withAwaitdata); } else { return TCL_ERROR; } } static int -NsfMongoGridFileCloseStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { +NsfMongoCursorNextStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { ParseContext pc; (void)clientData; if (likely(ArgumentParse(interp, objc, objv, NULL, objv[0], - method_definitions[NsfMongoGridFileCloseIdx].paramDefs, - method_definitions[NsfMongoGridFileCloseIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, + method_definitions[NsfMongoCursorNextIdx].paramDefs, + method_definitions[NsfMongoCursorNextIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, &pc) == TCL_OK)) { - gridfile *filePtr = (gridfile *)pc.clientData[0]; + mongoc_cursor_t *cursorPtr = (mongoc_cursor_t *)pc.clientData[0]; assert(pc.status == 0); - return NsfMongoGridFileClose(interp, filePtr,pc.objv[0]); + return NsfMongoCursorNext(interp, cursorPtr); } else { return TCL_ERROR; } } static int -NsfMongoGridFileGetContentTypeStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { +NsfMongoGridFSCloseStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { ParseContext pc; (void)clientData; if (likely(ArgumentParse(interp, objc, objv, NULL, objv[0], - method_definitions[NsfMongoGridFileGetContentTypeIdx].paramDefs, - method_definitions[NsfMongoGridFileGetContentTypeIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, + method_definitions[NsfMongoGridFSCloseIdx].paramDefs, + method_definitions[NsfMongoGridFSCloseIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, &pc) == TCL_OK)) { - gridfile *filePtr = (gridfile *)pc.clientData[0]; + mongoc_gridfs_t *gfsPtr = (mongoc_gridfs_t *)pc.clientData[0]; assert(pc.status == 0); - return NsfMongoGridFileGetContentType(interp, filePtr); + return NsfMongoGridFSClose(interp, gfsPtr,pc.objv[0]); } else { return TCL_ERROR; } } static int -NsfMongoGridFileGetContentlengthStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { +NsfMongoGridFSOpenStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { ParseContext pc; (void)clientData; if (likely(ArgumentParse(interp, objc, objv, NULL, objv[0], - method_definitions[NsfMongoGridFileGetContentlengthIdx].paramDefs, - method_definitions[NsfMongoGridFileGetContentlengthIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, + method_definitions[NsfMongoGridFSOpenIdx].paramDefs, + method_definitions[NsfMongoGridFSOpenIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, &pc) == TCL_OK)) { - gridfile *filePtr = (gridfile *)pc.clientData[0]; + mongoc_client_t *connPtr = (mongoc_client_t *)pc.clientData[0]; + CONST char *dbname = (CONST char *)pc.clientData[1]; + CONST char *prefix = (CONST char *)pc.clientData[2]; assert(pc.status == 0); - return NsfMongoGridFileGetContentlength(interp, filePtr); + return NsfMongoGridFSOpen(interp, connPtr, dbname, prefix); } else { return TCL_ERROR; } } static int -NsfMongoGridFileGetMetaDataStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { +NsfMongoGridFileCloseStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { ParseContext pc; (void)clientData; if (likely(ArgumentParse(interp, objc, objv, NULL, objv[0], - method_definitions[NsfMongoGridFileGetMetaDataIdx].paramDefs, - method_definitions[NsfMongoGridFileGetMetaDataIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, + method_definitions[NsfMongoGridFileCloseIdx].paramDefs, + method_definitions[NsfMongoGridFileCloseIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, &pc) == TCL_OK)) { - gridfile *filePtr = (gridfile *)pc.clientData[0]; + mongoc_gridfs_file_t *gridfilePtr = (mongoc_gridfs_file_t *)pc.clientData[0]; assert(pc.status == 0); - return NsfMongoGridFileGetMetaData(interp, filePtr); + return NsfMongoGridFileClose(interp, gridfilePtr,pc.objv[0]); } else { return TCL_ERROR; } } static int -NsfMongoGridFileOpenStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { +NsfMongoGridFileCreateStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { ParseContext pc; (void)clientData; if (likely(ArgumentParse(interp, objc, objv, NULL, objv[0], - method_definitions[NsfMongoGridFileOpenIdx].paramDefs, - method_definitions[NsfMongoGridFileOpenIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, + method_definitions[NsfMongoGridFileCreateIdx].paramDefs, + method_definitions[NsfMongoGridFileCreateIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, &pc) == TCL_OK)) { - gridfs *fsPtr = (gridfs *)pc.clientData[0]; - CONST char *filename = (CONST char *)pc.clientData[1]; + int withSource = (int )PTR2INT(pc.clientData[0]); + mongoc_gridfs_t *gfsPtr = (mongoc_gridfs_t *)pc.clientData[1]; + CONST char *value = (CONST char *)pc.clientData[2]; + CONST char *name = (CONST char *)pc.clientData[3]; + CONST char *contenttype = (CONST char *)pc.clientData[4]; assert(pc.status == 0); - return NsfMongoGridFileOpen(interp, fsPtr, filename); + return NsfMongoGridFileCreate(interp, withSource, gfsPtr, value, name, contenttype); } else { return TCL_ERROR; } } static int -NsfMongoGridFileReadStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { +NsfMongoGridFileDeleteStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { ParseContext pc; (void)clientData; if (likely(ArgumentParse(interp, objc, objv, NULL, objv[0], - method_definitions[NsfMongoGridFileReadIdx].paramDefs, - method_definitions[NsfMongoGridFileReadIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, + method_definitions[NsfMongoGridFileDeleteIdx].paramDefs, + method_definitions[NsfMongoGridFileDeleteIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, &pc) == TCL_OK)) { - gridfile *filePtr = (gridfile *)pc.clientData[0]; - int size = (int )PTR2INT(pc.clientData[1]); + mongoc_gridfs_t *gfsPtr = (mongoc_gridfs_t *)pc.clientData[0]; + Tcl_Obj *query = (Tcl_Obj *)pc.clientData[1]; assert(pc.status == 0); - return NsfMongoGridFileRead(interp, filePtr, size); + return NsfMongoGridFileDelete(interp, gfsPtr, query); } else { return TCL_ERROR; } } static int -NsfMongoGridFileSeekStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { +NsfMongoGridFileGetContentTypeStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { ParseContext pc; (void)clientData; if (likely(ArgumentParse(interp, objc, objv, NULL, objv[0], - method_definitions[NsfMongoGridFileSeekIdx].paramDefs, - method_definitions[NsfMongoGridFileSeekIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, + method_definitions[NsfMongoGridFileGetContentTypeIdx].paramDefs, + method_definitions[NsfMongoGridFileGetContentTypeIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, &pc) == TCL_OK)) { - gridfile *filePtr = (gridfile *)pc.clientData[0]; - int offset = (int )PTR2INT(pc.clientData[1]); + mongoc_gridfs_file_t *gridfilePtr = (mongoc_gridfs_file_t *)pc.clientData[0]; assert(pc.status == 0); - return NsfMongoGridFileSeek(interp, filePtr, offset); + return NsfMongoGridFileGetContentType(interp, gridfilePtr); } else { return TCL_ERROR; } } static int -NsfMongoIndexStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { +NsfMongoGridFileGetContentlengthStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { ParseContext pc; (void)clientData; if (likely(ArgumentParse(interp, objc, objv, NULL, objv[0], - method_definitions[NsfMongoIndexIdx].paramDefs, - method_definitions[NsfMongoIndexIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, + method_definitions[NsfMongoGridFileGetContentlengthIdx].paramDefs, + method_definitions[NsfMongoGridFileGetContentlengthIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, &pc) == TCL_OK)) { - mongo *connPtr = (mongo *)pc.clientData[0]; - CONST char *namespace = (CONST char *)pc.clientData[1]; - Tcl_Obj *attributes = (Tcl_Obj *)pc.clientData[2]; - CONST char *withName = (CONST char *)pc.clientData[3]; - int withBackground = (int )PTR2INT(pc.clientData[4]); - int withDropdups = (int )PTR2INT(pc.clientData[5]); - int withSparse = (int )PTR2INT(pc.clientData[6]); - int withTtl = (int )PTR2INT(pc.clientData[7]); - int withUnique = (int )PTR2INT(pc.clientData[8]); + mongoc_gridfs_file_t *gridfilePtr = (mongoc_gridfs_file_t *)pc.clientData[0]; assert(pc.status == 0); - return NsfMongoIndex(interp, connPtr, namespace, attributes, withName, withBackground, withDropdups, withSparse, withTtl, withUnique); + return NsfMongoGridFileGetContentlength(interp, gridfilePtr); } else { return TCL_ERROR; } } static int -NsfMongoInsertStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { +NsfMongoGridFileGetMetaDataStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { ParseContext pc; (void)clientData; if (likely(ArgumentParse(interp, objc, objv, NULL, objv[0], - method_definitions[NsfMongoInsertIdx].paramDefs, - method_definitions[NsfMongoInsertIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, + method_definitions[NsfMongoGridFileGetMetaDataIdx].paramDefs, + method_definitions[NsfMongoGridFileGetMetaDataIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, &pc) == TCL_OK)) { - mongo *connPtr = (mongo *)pc.clientData[0]; - CONST char *namespace = (CONST char *)pc.clientData[1]; - Tcl_Obj *values = (Tcl_Obj *)pc.clientData[2]; + mongoc_gridfs_file_t *gridfilePtr = (mongoc_gridfs_file_t *)pc.clientData[0]; assert(pc.status == 0); - return NsfMongoInsert(interp, connPtr, namespace, values); + return NsfMongoGridFileGetMetaData(interp, gridfilePtr); } else { return TCL_ERROR; } } static int -NsfMongoQueryStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { +NsfMongoGridFileOpenStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { ParseContext pc; (void)clientData; if (likely(ArgumentParse(interp, objc, objv, NULL, objv[0], - method_definitions[NsfMongoQueryIdx].paramDefs, - method_definitions[NsfMongoQueryIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, + method_definitions[NsfMongoGridFileOpenIdx].paramDefs, + method_definitions[NsfMongoGridFileOpenIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, &pc) == TCL_OK)) { - mongo *connPtr = (mongo *)pc.clientData[0]; - CONST char *namespace = (CONST char *)pc.clientData[1]; - Tcl_Obj *query = (Tcl_Obj *)pc.clientData[2]; - Tcl_Obj *withAtts = (Tcl_Obj *)pc.clientData[3]; - int withLimit = (int )PTR2INT(pc.clientData[4]); - int withSkip = (int )PTR2INT(pc.clientData[5]); + mongoc_gridfs_t *gfsPtr = (mongoc_gridfs_t *)pc.clientData[0]; + Tcl_Obj *query = (Tcl_Obj *)pc.clientData[1]; assert(pc.status == 0); - return NsfMongoQuery(interp, connPtr, namespace, query, withAtts, withLimit, withSkip); + return NsfMongoGridFileOpen(interp, gfsPtr, query); } else { return TCL_ERROR; } } static int -NsfMongoRemoveStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { +NsfMongoGridFileReadStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { ParseContext pc; (void)clientData; if (likely(ArgumentParse(interp, objc, objv, NULL, objv[0], - method_definitions[NsfMongoRemoveIdx].paramDefs, - method_definitions[NsfMongoRemoveIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, + method_definitions[NsfMongoGridFileReadIdx].paramDefs, + method_definitions[NsfMongoGridFileReadIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, &pc) == TCL_OK)) { - mongo *connPtr = (mongo *)pc.clientData[0]; - CONST char *namespace = (CONST char *)pc.clientData[1]; - Tcl_Obj *condition = (Tcl_Obj *)pc.clientData[2]; + mongoc_gridfs_file_t *gridfilePtr = (mongoc_gridfs_file_t *)pc.clientData[0]; + int size = (int )PTR2INT(pc.clientData[1]); assert(pc.status == 0); - return NsfMongoRemove(interp, connPtr, namespace, condition); + return NsfMongoGridFileRead(interp, gridfilePtr, size); } else { return TCL_ERROR; } } static int -NsfMongoRunCmdStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { +NsfMongoGridFileSeekStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { ParseContext pc; (void)clientData; if (likely(ArgumentParse(interp, objc, objv, NULL, objv[0], - method_definitions[NsfMongoRunCmdIdx].paramDefs, - method_definitions[NsfMongoRunCmdIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, + method_definitions[NsfMongoGridFileSeekIdx].paramDefs, + method_definitions[NsfMongoGridFileSeekIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, &pc) == TCL_OK)) { - int withNocomplain = (int )PTR2INT(pc.clientData[0]); - mongo *connPtr = (mongo *)pc.clientData[1]; - CONST char *db = (CONST char *)pc.clientData[2]; - Tcl_Obj *cmd = (Tcl_Obj *)pc.clientData[3]; + mongoc_gridfs_file_t *gridfilePtr = (mongoc_gridfs_file_t *)pc.clientData[0]; + int offset = (int )PTR2INT(pc.clientData[1]); assert(pc.status == 0); - return NsfMongoRunCmd(interp, withNocomplain, connPtr, db, cmd); + return NsfMongoGridFileSeek(interp, gridfilePtr, offset); } else { return TCL_ERROR; } } static int -NsfMongoUpdateStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { +NsfMongoRunCmdStub(ClientData clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[]) { ParseContext pc; (void)clientData; if (likely(ArgumentParse(interp, objc, objv, NULL, objv[0], - method_definitions[NsfMongoUpdateIdx].paramDefs, - method_definitions[NsfMongoUpdateIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, + method_definitions[NsfMongoRunCmdIdx].paramDefs, + method_definitions[NsfMongoRunCmdIdx].nrParameters, 0, NSF_ARGPARSE_BUILTIN, &pc) == TCL_OK)) { - mongo *connPtr = (mongo *)pc.clientData[0]; - CONST char *namespace = (CONST char *)pc.clientData[1]; - Tcl_Obj *cond = (Tcl_Obj *)pc.clientData[2]; - Tcl_Obj *values = (Tcl_Obj *)pc.clientData[3]; - int withUpsert = (int )PTR2INT(pc.clientData[4]); - int withAll = (int )PTR2INT(pc.clientData[5]); + int withNocomplain = (int )PTR2INT(pc.clientData[0]); + mongoc_client_t *connPtr = (mongoc_client_t *)pc.clientData[1]; + CONST char *db = (CONST char *)pc.clientData[2]; + Tcl_Obj *cmd = (Tcl_Obj *)pc.clientData[3]; assert(pc.status == 0); - return NsfMongoUpdate(interp, connPtr, namespace, cond, values, withUpsert, withAll); + return NsfMongoRunCmd(interp, withNocomplain, connPtr, db, cmd); } else { return TCL_ERROR; } } -static Nsf_methodDefinition method_definitions[24] = { -{"::mongo::close", NsfMongoCloseStub, 1, { - {"conn", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongo",NULL,NULL,NULL,NULL,NULL}} +static Nsf_methodDefinition method_definitions[26] = { +{"::mongo::collection::close", NsfCollectionCloseStub, 1, { + {"collection", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongoc_collection_t",NULL,NULL,NULL,NULL,NULL}} }, -{"::mongo::connect", NsfMongoConnectStub, 3, { - {"-replica-set", 0, 1, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, - {"-server", 0, 1, Nsf_ConvertTo_Tclobj, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, - {"-timeout", 0, 1, Nsf_ConvertTo_Int32, NULL,NULL,"int32",NULL,NULL,NULL,NULL,NULL}} +{"::mongo::collection::open", NsfCollectionOpenStub, 3, { + {"conn", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongoc_client_t",NULL,NULL,NULL,NULL,NULL}, + {"dbname", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, + {"collectionname", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}} }, -{"::mongo::count", NsfMongoCountStub, 3, { - {"conn", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongo",NULL,NULL,NULL,NULL,NULL}, - {"namespace", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, +{"::mongo::close", NsfMongoCloseStub, 1, { + {"conn", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongoc_client_t",NULL,NULL,NULL,NULL,NULL}} +}, +{"::mongo::collection::count", NsfMongoCollectionCountStub, 2, { + {"collection", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongoc_collection_t",NULL,NULL,NULL,NULL,NULL}, {"query", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Tclobj, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}} }, +{"::mongo::collection::delete", NsfMongoCollectionDeleteStub, 2, { + {"collection", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongoc_collection_t",NULL,NULL,NULL,NULL,NULL}, + {"condition", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Tclobj, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}} +}, +{"::mongo::collection::index", NsfMongoCollectionIndexStub, 8, { + {"collection", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongoc_collection_t",NULL,NULL,NULL,NULL,NULL}, + {"attributes", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Tclobj, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, + {"-name", 0, 1, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, + {"-background", 0, 0, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, + {"-dropdups", 0, 0, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, + {"-sparse", 0, 0, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, + {"-ttl", 0, 1, Nsf_ConvertTo_Int32, NULL,NULL,"int32",NULL,NULL,NULL,NULL,NULL}, + {"-unique", 0, 0, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}} +}, +{"::mongo::collection::insert", NsfMongoCollectionInsertStub, 2, { + {"collection", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongoc_collection_t",NULL,NULL,NULL,NULL,NULL}, + {"values", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Tclobj, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}} +}, +{"::mongo::collection::query", NsfMongoCollectionQueryStub, 5, { + {"collection", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongoc_collection_t",NULL,NULL,NULL,NULL,NULL}, + {"query", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Tclobj, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, + {"-atts", 0, 1, Nsf_ConvertTo_Tclobj, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, + {"-limit", 0, 1, Nsf_ConvertTo_Int32, NULL,NULL,"int32",NULL,NULL,NULL,NULL,NULL}, + {"-skip", 0, 1, Nsf_ConvertTo_Int32, NULL,NULL,"int32",NULL,NULL,NULL,NULL,NULL}} +}, +{"::mongo::collection::update", NsfMongoCollectionUpdateStub, 5, { + {"collection", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongoc_collection_t",NULL,NULL,NULL,NULL,NULL}, + {"cond", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Tclobj, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, + {"values", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Tclobj, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, + {"-upsert", 0, 0, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, + {"-all", 0, 0, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}} +}, +{"::mongo::connect", NsfMongoConnectStub, 1, { + {"-uri", 0, 1, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}} +}, {"::mongo::cursor::close", NsfMongoCursorCloseStub, 1, { - {"cursor", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongo_cursor",NULL,NULL,NULL,NULL,NULL}} + {"cursor", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongoc_cursor_t",NULL,NULL,NULL,NULL,NULL}} }, -{"::mongo::cursor::find", NsfMongoCursorFindStub, 8, { - {"conn", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongo",NULL,NULL,NULL,NULL,NULL}, - {"namespace", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, +{"::mongo::cursor::find", NsfMongoCursorFindStub, 7, { + {"collection", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongoc_collection_t",NULL,NULL,NULL,NULL,NULL}, {"query", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Tclobj, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, {"-atts", 0, 1, Nsf_ConvertTo_Tclobj, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, {"-limit", 0, 1, Nsf_ConvertTo_Int32, NULL,NULL,"int32",NULL,NULL,NULL,NULL,NULL}, @@ -656,92 +749,56 @@ {"-awaitdata", 0, 0, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}} }, {"::mongo::cursor::next", NsfMongoCursorNextStub, 1, { - {"cursor", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongo_cursor",NULL,NULL,NULL,NULL,NULL}} + {"cursor", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongoc_cursor_t",NULL,NULL,NULL,NULL,NULL}} }, {"::mongo::gridfs::close", NsfMongoGridFSCloseStub, 1, { - {"gfs", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"gridfs",NULL,NULL,NULL,NULL,NULL}} + {"gfs", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongoc_gridfs_t",NULL,NULL,NULL,NULL,NULL}} }, {"::mongo::gridfs::open", NsfMongoGridFSOpenStub, 3, { - {"conn", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongo",NULL,NULL,NULL,NULL,NULL}, + {"conn", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongoc_client_t",NULL,NULL,NULL,NULL,NULL}, {"dbname", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, {"prefix", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}} }, -{"::mongo::gridfs::remove_file", NsfMongoGridFSRemoveFileStub, 2, { - {"gfs", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"gridfs",NULL,NULL,NULL,NULL,NULL}, - {"filename", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}} +{"::mongo::gridfile::close", NsfMongoGridFileCloseStub, 1, { + {"gridfile", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongoc_gridfs_file_t",NULL,NULL,NULL,NULL,NULL}} }, -{"::mongo::gridfs::store_file", NsfMongoGridFSStoreFileStub, 4, { - {"gfs", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"gridfs",NULL,NULL,NULL,NULL,NULL}, - {"filename", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, - {"remotename", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, +{"::mongo::gridfile::create", NsfMongoGridFileCreateStub, 5, { + {"-source", NSF_ARG_REQUIRED|NSF_ARG_IS_ENUMERATION, 1, ConvertToGridfilesource, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, + {"gfs", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongoc_gridfs_t",NULL,NULL,NULL,NULL,NULL}, + {"value", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, + {"name", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, {"contenttype", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}} }, -{"::mongo::gridfile::close", NsfMongoGridFileCloseStub, 1, { - {"file", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"gridfile",NULL,NULL,NULL,NULL,NULL}} +{"::mongo::gridfile::delete", NsfMongoGridFileDeleteStub, 2, { + {"gfs", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongoc_gridfs_t",NULL,NULL,NULL,NULL,NULL}, + {"query", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Tclobj, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}} }, {"::mongo::gridfile::get_contenttype", NsfMongoGridFileGetContentTypeStub, 1, { - {"file", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"gridfile",NULL,NULL,NULL,NULL,NULL}} + {"gridfile", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongoc_gridfs_file_t",NULL,NULL,NULL,NULL,NULL}} }, {"::mongo::gridfile::get_contentlength", NsfMongoGridFileGetContentlengthStub, 1, { - {"file", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"gridfile",NULL,NULL,NULL,NULL,NULL}} + {"gridfile", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongoc_gridfs_file_t",NULL,NULL,NULL,NULL,NULL}} }, {"::mongo::gridfile::get_metadata", NsfMongoGridFileGetMetaDataStub, 1, { - {"file", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"gridfile",NULL,NULL,NULL,NULL,NULL}} + {"gridfile", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongoc_gridfs_file_t",NULL,NULL,NULL,NULL,NULL}} }, {"::mongo::gridfile::open", NsfMongoGridFileOpenStub, 2, { - {"fs", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"gridfs",NULL,NULL,NULL,NULL,NULL}, - {"filename", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}} + {"gfs", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongoc_gridfs_t",NULL,NULL,NULL,NULL,NULL}, + {"query", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Tclobj, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}} }, {"::mongo::gridfile::read", NsfMongoGridFileReadStub, 2, { - {"file", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"gridfile",NULL,NULL,NULL,NULL,NULL}, + {"gridfile", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongoc_gridfs_file_t",NULL,NULL,NULL,NULL,NULL}, {"size", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Int32, NULL,NULL,"int32",NULL,NULL,NULL,NULL,NULL}} }, {"::mongo::gridfile::seek", NsfMongoGridFileSeekStub, 2, { - {"file", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"gridfile",NULL,NULL,NULL,NULL,NULL}, + {"gridfile", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongoc_gridfs_file_t",NULL,NULL,NULL,NULL,NULL}, {"offset", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Int32, NULL,NULL,"int32",NULL,NULL,NULL,NULL,NULL}} }, -{"::mongo::index", NsfMongoIndexStub, 9, { - {"conn", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongo",NULL,NULL,NULL,NULL,NULL}, - {"namespace", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, - {"attributes", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Tclobj, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, - {"-name", 0, 1, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, - {"-background", 0, 0, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, - {"-dropdups", 0, 0, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, - {"-sparse", 0, 0, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, - {"-ttl", 0, 1, Nsf_ConvertTo_Int32, NULL,NULL,"int32",NULL,NULL,NULL,NULL,NULL}, - {"-unique", 0, 0, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}} -}, -{"::mongo::insert", NsfMongoInsertStub, 3, { - {"conn", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongo",NULL,NULL,NULL,NULL,NULL}, - {"namespace", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, - {"values", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Tclobj, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}} -}, -{"::mongo::query", NsfMongoQueryStub, 6, { - {"conn", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongo",NULL,NULL,NULL,NULL,NULL}, - {"namespace", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, - {"query", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Tclobj, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, - {"-atts", 0, 1, Nsf_ConvertTo_Tclobj, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, - {"-limit", 0, 1, Nsf_ConvertTo_Int32, NULL,NULL,"int32",NULL,NULL,NULL,NULL,NULL}, - {"-skip", 0, 1, Nsf_ConvertTo_Int32, NULL,NULL,"int32",NULL,NULL,NULL,NULL,NULL}} -}, -{"::mongo::remove", NsfMongoRemoveStub, 3, { - {"conn", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongo",NULL,NULL,NULL,NULL,NULL}, - {"namespace", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, - {"condition", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Tclobj, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}} -}, {"::mongo::run", NsfMongoRunCmdStub, 4, { {"-nocomplain", 0, 0, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, - {"conn", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongo",NULL,NULL,NULL,NULL,NULL}, + {"conn", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongoc_client_t",NULL,NULL,NULL,NULL,NULL}, {"db", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, {"cmd", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Tclobj, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}} -}, -{"::mongo::update", NsfMongoUpdateStub, 6, { - {"conn", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Pointer, NULL,NULL,"mongo",NULL,NULL,NULL,NULL,NULL}, - {"namespace", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, - {"cond", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Tclobj, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, - {"values", NSF_ARG_REQUIRED, 1, Nsf_ConvertTo_Tclobj, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, - {"-upsert", 0, 0, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}, - {"-all", 0, 0, Nsf_ConvertTo_String, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL}} },{NULL} }; Index: library/mongodb/nsfmongo.c =================================================================== diff -u -re5d5a53bdd5482a7173ac81cff141906ac23cf32 -rcef3de5c4f65e767d0c66389bacc77bc3c2e5a68 --- library/mongodb/nsfmongo.c (.../nsfmongo.c) (revision e5d5a53bdd5482a7173ac81cff141906ac23cf32) +++ library/mongodb/nsfmongo.c (.../nsfmongo.c) (revision cef3de5c4f65e767d0c66389bacc77bc3c2e5a68) @@ -18,8 +18,7 @@ #include #include #include "bson.h" -#include "mongo.h" -#include +#include "mongoc.h" #include #include @@ -28,45 +27,46 @@ /* * Define the counters to generate nice symbols for pointer converter */ -static int gridfsCount = 0; static int gridfileCount = 0; -static int mongoCount = 0; -static int cursorCount = 0; +static int gridfsCount = 0; +static int mongoClientCount = 0; +static int mongoCollectionCount = 0; +static int mongoCursorCount = 0; typedef enum { - NSF_BSON_INT, - NSF_BSON_LONG, - NSF_BSON_DATE, - NSF_BSON_DOUBLE, + NSF_BSON_ARRAY, NSF_BSON_BOOL, - NSF_BSON_REGEX, - NSF_BSON_STRING, + NSF_BSON_INT32, + NSF_BSON_INT64, + NSF_BSON_DATE_TIME, + NSF_BSON_DOCUMENT, + NSF_BSON_DOUBLE, NSF_BSON_MINKEY, NSF_BSON_MAXKEY, NSF_BSON_NULL, NSF_BSON_OID, + NSF_BSON_REGEX, + NSF_BSON_STRING, NSF_BSON_TIMESTAMP, - NSF_BSON_OBJECT, - NSF_BSON_ARRAY, NSF_BSON_UNKNOWN } nsfMongoTypes; static char * NsfMongoGlobalStrings[] = { - "integer", - "long", - "date", - "double", + "array", "boolean", - "regex", - "string", + "int32", + "int64", + "date_time", + "document", + "double", "minkey", "maxkey", "null", "oid", + "regex", + "string", "timestamp", - "object", - "array", "unknown", NULL }; @@ -94,8 +94,6 @@ NsfObject *object; } ParseContext; -#define NSF_ARG_REQUIRED 0x000001 - #define nr_elements(arr) ((int) (sizeof(arr) / sizeof(arr[0]))) #define ObjStr(obj) (obj)->bytes ? (obj)->bytes : Tcl_GetString(obj) #ifdef UNUSED @@ -157,7 +155,7 @@ * * BsonToList -- * - * Convert a bson structure to a tagged list. Each value field is + * Convert a bson_t structure to a tagged list. Each value field is * preceded by a tag denoting its bson type. * * Results: @@ -169,55 +167,80 @@ *---------------------------------------------------------------------- */ Tcl_Obj * -BsonToList(Tcl_Interp *interp, const char *data , int depth) { - bson_iterator i; +BsonToList(Tcl_Interp *interp, const bson_t *data , int depth) { + bson_iter_t i; char oidhex[25]; Tcl_Obj *resultObj, *elemObj; - bson_iterator_from_buffer( &i , data ); + bson_iter_init( &i , data ); resultObj = Tcl_NewListObj(0, NULL); - while ( bson_iterator_next( &i ) ){ - bson_type t = bson_iterator_type( &i ); + while ( bson_iter_next( &i ) ){ + bson_type_t t = bson_iter_type( &i ); nsfMongoTypes tag; const char *key; if ( t == 0 ) break; - key = bson_iterator_key( &i ); + key = bson_iter_key( &i ); /*fprintf(stderr, "BsonToList: key %s t %d string %d\n", key, t, bson_string);*/ switch ( t ){ - case BSON_INT: tag = NSF_BSON_INT; elemObj = Tcl_NewIntObj(bson_iterator_int( &i )); break; - case BSON_LONG: tag = NSF_BSON_LONG; elemObj = Tcl_NewLongObj(bson_iterator_long( &i )); break; - case BSON_DATE: tag = NSF_BSON_DATE; elemObj = Tcl_NewLongObj(bson_iterator_date( &i )); break; - case BSON_DOUBLE: tag = NSF_BSON_DOUBLE; elemObj = Tcl_NewDoubleObj(bson_iterator_double( &i )); break; - case BSON_BOOL: tag = NSF_BSON_BOOL; elemObj = Tcl_NewBooleanObj(bson_iterator_bool( &i )); break; - case BSON_REGEX: tag = NSF_BSON_REGEX; elemObj = Tcl_NewStringObj(bson_iterator_regex( &i ), -1); break; - case BSON_STRING: tag = NSF_BSON_STRING; elemObj = Tcl_NewStringObj(bson_iterator_string( &i ), -1); break; - case BSON_MINKEY: tag = NSF_BSON_MINKEY; elemObj = Tcl_NewStringObj("null", 4); break; - case BSON_MAXKEY: tag = NSF_BSON_MAXKEY; elemObj = Tcl_NewStringObj("null", 4); break; - case BSON_NULL: tag = NSF_BSON_NULL; elemObj = Tcl_NewStringObj("null", 4); break; - case BSON_OID: { + case BSON_TYPE_INT32: tag = NSF_BSON_INT32; elemObj = Tcl_NewIntObj(bson_iter_int32( &i )); break; + case BSON_TYPE_INT64: tag = NSF_BSON_INT64; elemObj = Tcl_NewLongObj(bson_iter_int64( &i )); break; + case BSON_TYPE_DATE_TIME: tag = NSF_BSON_DATE_TIME; elemObj = Tcl_NewLongObj(bson_iter_date_time( &i )); break; + case BSON_TYPE_DOUBLE: tag = NSF_BSON_DOUBLE; elemObj = Tcl_NewDoubleObj(bson_iter_double( &i )); break; + case BSON_TYPE_BOOL: tag = NSF_BSON_BOOL; elemObj = Tcl_NewBooleanObj(bson_iter_bool( &i )); break; + case BSON_TYPE_REGEX: { + const char *options = NULL; /* TODO: not handled */ + tag = NSF_BSON_REGEX; elemObj = Tcl_NewStringObj(bson_iter_regex( &i, &options ), -1); + break; + } + case BSON_TYPE_UTF8: { + uint32_t utf8_len; + const char *string = bson_iter_utf8( &i, &utf8_len); + /*fprintf(stderr, "append UTF8: <%s> %d\n", string, utf8_len);*/ + tag = NSF_BSON_STRING; elemObj = Tcl_NewStringObj(string, utf8_len); + break; + } + case BSON_TYPE_MINKEY: tag = NSF_BSON_MINKEY; elemObj = Tcl_NewStringObj("null", 4); break; + case BSON_TYPE_MAXKEY: tag = NSF_BSON_MAXKEY; elemObj = Tcl_NewStringObj("null", 4); break; + case BSON_TYPE_NULL: tag = NSF_BSON_NULL; elemObj = Tcl_NewStringObj("null", 4); break; + case BSON_TYPE_OID: { tag = NSF_BSON_OID; - bson_oid_to_string(bson_iterator_oid(&i), oidhex); + bson_oid_to_string(bson_iter_oid(&i), oidhex); elemObj = Tcl_NewStringObj(oidhex, -1); break; } - case BSON_TIMESTAMP: { - bson_timestamp_t ts; + case BSON_TYPE_TIMESTAMP: { + uint32_t timestamp, increment; tag = NSF_BSON_TIMESTAMP; - ts = bson_iterator_timestamp( &i ); + bson_iter_timestamp( &i, ×tamp, &increment ); elemObj = Tcl_NewListObj(0, NULL); - Tcl_ListObjAppendElement(interp, elemObj, Tcl_NewIntObj(ts.t)); - Tcl_ListObjAppendElement(interp, elemObj, Tcl_NewIntObj(ts.i)); + Tcl_ListObjAppendElement(interp, elemObj, Tcl_NewIntObj(timestamp)); + Tcl_ListObjAppendElement(interp, elemObj, Tcl_NewIntObj(increment)); break; } - case BSON_OBJECT: - case BSON_ARRAY: - tag = t == BSON_OBJECT ? NSF_BSON_OBJECT : NSF_BSON_ARRAY; - elemObj = BsonToList(interp, bson_iterator_value( &i ) , depth + 1 ); + case BSON_TYPE_DOCUMENT: { + const uint8_t *docbuf = NULL; + uint32_t doclen = 0; + bson_t b; + tag = NSF_BSON_DOCUMENT; + bson_iter_document (&i, &doclen, &docbuf); + bson_init_static(&b, docbuf, doclen); + elemObj = BsonToList(interp, &b , depth + 1 ); break; + } + case BSON_TYPE_ARRAY: { + const uint8_t *docbuf = NULL; + uint32_t doclen = 0; + bson_t b; + tag = NSF_BSON_ARRAY; + bson_iter_array(&i, &doclen, &docbuf); + bson_init_static (&b, docbuf, doclen); + elemObj = BsonToList(interp, &b , depth + 1 ); + break; + } default: tag = NSF_BSON_UNKNOWN; elemObj = Tcl_NewStringObj("", 0); @@ -250,34 +273,38 @@ * *---------------------------------------------------------------------- */ -bson_type +bson_type_t BsonTagToType(Tcl_Interp *interp, char *tag) { char firstChar = *tag; switch (firstChar) { - case 'a': /* array */ return BSON_ARRAY; - case 'b': /* bool */ return BSON_BOOL; + case 'a': /* array */ return BSON_TYPE_ARRAY; + case 'b': /* bool */ return BSON_TYPE_BOOL; case 'd': - if (*(tag + 1) == 'a') /* date */ return BSON_DATE; - if (*(tag + 1) == 'o') /* double */ return BSON_DOUBLE; - case 'i': /* integer */ return BSON_INT; - case 'l': /* long */ return BSON_LONG; + if (*(tag + 1) == 'a') /* date */ return BSON_TYPE_DATE_TIME; + if (*(tag + 1) == 'o' && *(tag + 2) == 'c') /* document */ return BSON_TYPE_DOCUMENT; + if (*(tag + 1) == 'o' && *(tag + 2) == 'u') /* double */ return BSON_TYPE_DOUBLE; + break; + case 'i': /* int32|64 */ + if (*(tag + 1) == 'n' && *(tag + 2) == 't' && *(tag + 3) == '3') return BSON_TYPE_INT32; + if (*(tag + 1) == 'n' && *(tag + 2) == 't' && *(tag + 3) == '6') return BSON_TYPE_INT64; + if (*(tag + 1) == 'n' && *(tag + 2) == 't') return BSON_TYPE_INT32; + break; case 'm': - if (*(tag + 1) == 'i') /* minkey */ return BSON_MINKEY; - if (*(tag + 1) == 'a') /* maxkey */ return BSON_MAXKEY; + if (*(tag + 1) == 'i') /* minkey */ return BSON_TYPE_MINKEY; + if (*(tag + 1) == 'a') /* maxkey */ return BSON_TYPE_MAXKEY; break; - case 'n': /* null */ return BSON_NULL; + case 'n': /* null */ return BSON_TYPE_NULL; case 'o': - if (*(tag + 1) == 'i') /* oid */ return BSON_OID; - if (*(tag + 1) == 'b') /* object */ return BSON_OBJECT; + if (*(tag + 1) == 'i') /* oid */ return BSON_TYPE_OID; break; - case 'r': /* regex */ return BSON_REGEX; - case 's': /* string */ return BSON_STRING; - case 't': /* timestamp */ return BSON_TIMESTAMP; + case 'r': /* regex */ return BSON_TYPE_REGEX; + case 's': /* string */ return BSON_TYPE_UTF8; + case 't': /* timestamp */ return BSON_TYPE_TIMESTAMP; } NsfLog(interp, NSF_LOG_WARN, "BsonTagToType: Treat unknown tag '%s' as string", tag); - return BSON_STRING; + return BSON_TYPE_UTF8; } /* @@ -296,127 +323,132 @@ *---------------------------------------------------------------------- */ static int -BsonAppend(Tcl_Interp *interp, bson *bbPtr, char *name, char *tag, Tcl_Obj *value) { +BsonAppend(Tcl_Interp *interp, bson_t *bbPtr, char *name, char *tag, Tcl_Obj *value) { int result = TCL_OK; - bson_type t = BsonTagToType(interp, tag); + bson_type_t t = BsonTagToType(interp, tag); + int keyLength = strlen(name); /*fprintf(stderr, "BsonAppend: add name %s tag %s value '%s'\n", name, tag, ObjStr(value));*/ switch ( t ){ - case BSON_STRING: - bson_append_string(bbPtr, name, ObjStr(value)); + case BSON_TYPE_UTF8: { + const char* string = ObjStr(value); + bson_append_utf8(bbPtr, name, keyLength, string, strlen(string)); break; - case BSON_INT: { - int v; + } + case BSON_TYPE_INT32: { + int32_t v; result = Tcl_GetIntFromObj(interp, value, &v); if (result != TCL_OK) break; - bson_append_int(bbPtr, name, v); + bson_append_int32(bbPtr, name, keyLength, v); break; } - case BSON_DOUBLE: { + case BSON_TYPE_DOUBLE: { double v; result = Tcl_GetDoubleFromObj(interp, value, &v); if (result != TCL_OK) break; - bson_append_double(bbPtr, name, v); + bson_append_double(bbPtr, name, keyLength, v); break; } - case BSON_BOOL: { + case BSON_TYPE_BOOL: { int v; result = Tcl_GetBooleanFromObj(interp, value, &v); if (result != TCL_OK) break; - bson_append_bool(bbPtr, name, v); + bson_append_bool(bbPtr, name, keyLength, v); break; } - case BSON_LONG: { + case BSON_TYPE_INT64: { long v; result = Tcl_GetLongFromObj(interp, value, &v); if (result != TCL_OK) break; - bson_append_long(bbPtr, name, v); + bson_append_int64(bbPtr, name, keyLength, v); break; } - case BSON_MAXKEY: - bson_append_maxkey(bbPtr, name); + case BSON_TYPE_MAXKEY: + bson_append_maxkey(bbPtr, name, keyLength); break; - case BSON_MINKEY: - bson_append_minkey(bbPtr, name); + case BSON_TYPE_MINKEY: + bson_append_minkey(bbPtr, name, keyLength); break; - case BSON_NULL: { - bson_append_null(bbPtr, name); + case BSON_TYPE_NULL: { + bson_append_null(bbPtr, name, keyLength); break; } - case BSON_OID: { + case BSON_TYPE_OID: { bson_oid_t v; - bson_oid_from_string(&v, ObjStr(value)); - bson_append_oid(bbPtr, name, &v); + bson_oid_init_from_string(&v, ObjStr(value)); + bson_append_oid(bbPtr, name, keyLength, &v); break; } - case BSON_REGEX: { + case BSON_TYPE_REGEX: { char *opts = ""; /* TODO: how to handle regex opts? */ - bson_append_regex(bbPtr, name, ObjStr(value), opts); + bson_append_regex(bbPtr, name, keyLength, ObjStr(value), opts); break; } - case BSON_DATE: { + case BSON_TYPE_DATE_TIME: { long v; result = Tcl_GetLongFromObj(interp, value, &v); if (result != TCL_OK) break; - bson_append_date(bbPtr, name, v); + bson_append_date_time(bbPtr, name, keyLength, v); break; } - case BSON_TIMESTAMP: { - bson_timestamp_t v; + case BSON_TYPE_TIMESTAMP: { + int timestamp, increment, objc = 0; Tcl_Obj **objv; - int objc = 0; + result = Tcl_ListObjGetElements(interp, value, &objc, &objv); if (result != TCL_OK || objc != 2) { return NsfPrintError(interp, "invalid timestamp: %s", ObjStr(value)); } - result = Tcl_GetIntFromObj(interp, objv[0], &v.t); + result = Tcl_GetIntFromObj(interp, objv[0], ×tamp); if (result == TCL_OK) { - result = Tcl_GetIntFromObj(interp, objv[1], &v.i); + result = Tcl_GetIntFromObj(interp, objv[1], &increment); } if (result != TCL_OK) break; - bson_append_timestamp(bbPtr, name, &v); + bson_append_timestamp(bbPtr, name, keyLength, timestamp, increment); break; } - case BSON_OBJECT: - case BSON_ARRAY: { - int i, objc; + case BSON_TYPE_DOCUMENT: + case BSON_TYPE_ARRAY: { + int i, objc; Tcl_Obj **objv; + bson_t child, *childPtr = &child; result = Tcl_ListObjGetElements(interp, value, &objc, &objv); if (result != TCL_OK || objc % 3 != 0) { return NsfPrintError(interp, "invalid %s value contain multiple of 3 elements %s", tag, ObjStr(value)); } - if (t == BSON_OBJECT) { - bson_append_start_object(bbPtr, name); + if (t == BSON_TYPE_DOCUMENT) { + bson_append_document_begin(bbPtr, name, keyLength, childPtr); } else { - bson_append_start_array(bbPtr, name); + bson_append_array_begin(bbPtr, name, keyLength, childPtr); } for (i = 0; i< objc; i += 3) { /*fprintf(stderr, "value %s, i %d, [0]: %s, [1]: %s, [2]: %s\n", ObjStr(value), i, ObjStr(objv[i]), ObjStr(objv[i+1]), ObjStr(objv[i+2]));*/ - result = BsonAppend(interp, bbPtr, ObjStr(objv[i]), ObjStr(objv[i+1]), objv[i+2]); + result = BsonAppend(interp, childPtr, ObjStr(objv[i]), ObjStr(objv[i+1]), objv[i+2]); if (result != TCL_OK) break; } - /* - * finish_object works for arrays and objects - */ - bson_append_finish_object(bbPtr); + if (t == BSON_TYPE_DOCUMENT) { + bson_append_document_end(bbPtr, childPtr); + } else { + bson_append_array_end(bbPtr, childPtr); + } break; } - case BSON_BINDATA: - case BSON_DBREF: - case BSON_CODE: - case BSON_SYMBOL: - case BSON_CODEWSCOPE: + case BSON_TYPE_BINARY: + case BSON_TYPE_DBPOINTER: + case BSON_TYPE_CODE: + case BSON_TYPE_SYMBOL: + case BSON_TYPE_CODEWSCOPE: return NsfPrintError(interp, "tag %s not handled yet", tag); break; - case BSON_UNDEFINED: - case BSON_EOO: + case BSON_TYPE_UNDEFINED: + case BSON_TYPE_EOD: break; /* no default here, to get the warning to the compilation log for the time being */ @@ -440,7 +472,7 @@ *---------------------------------------------------------------------- */ static int -BsonAppendObjv(Tcl_Interp *interp, bson *bPtr, int objc, Tcl_Obj **objv) { +BsonAppendObjv(Tcl_Interp *interp, bson_t *bPtr, int objc, Tcl_Obj **objv) { int i; bson_init(bPtr); @@ -451,138 +483,59 @@ /*fprintf(stderr, "adding pair '%s' (%s) '%s'\n", name, tag, ObjStr(value));*/ BsonAppend(interp, bPtr, name, tag, value); } - bson_finish(bPtr); return TCL_OK; } -static char * -ErrorMsg(int status) { - switch (status) { - case MONGO_CONN_NO_SOCKET: return "Could not create socket"; - case MONGO_CONN_FAIL: return "An error occured while calling connect()"; - case MONGO_CONN_ADDR_FAIL: return "An error occured while calling getaddrinfo()"; - case MONGO_CONN_NOT_MASTER: return "Connected to a non-master node (read-only)"; - case MONGO_CONN_BAD_SET_NAME: return "Given replica set name doesn't match this replica set"; - case MONGO_CONN_NO_PRIMARY: return "Can't find primary in replica set"; - - case MONGO_IO_ERROR: return "An error occurred while reading or writing on the socket"; - case MONGO_READ_SIZE_ERROR: return "The response is not the expected length"; - case MONGO_COMMAND_FAILED: return "The command returned with 'ok' value of 0"; - case MONGO_BSON_INVALID: return "BSON not valid for the specified op"; - case MONGO_BSON_NOT_FINISHED: return "BSON object has not been finished"; - - default: return "Unknown error (maybe mongodb server not running)"; - } -} - /*********************************************************************** * Define the api functions ***********************************************************************/ /* cmd close NsfMongoClose { - {-argName "conn" -required 1 -type mongo -withObj 1} + {-argName "conn" -required 1 -type mongoc_client_t -withObj 1} } */ static int -NsfMongoClose(Tcl_Interp *interp, mongo *connPtr, Tcl_Obj *connObj) { +NsfMongoClose(Tcl_Interp *interp, mongoc_client_t *clientPtr, Tcl_Obj *clientObj) { - if (connPtr) { - mongo_destroy(connPtr); - Nsf_PointerDelete(ObjStr(connObj), connPtr, 1); + if (clientPtr) { + mongoc_client_destroy(clientPtr); + Nsf_PointerDelete(ObjStr(clientObj), clientPtr, 0); } return TCL_OK; } /* cmd connect NsfMongoConnect { - {-argName "-replica-set" -required 0 -nrargs 1} - {-argName "-server" -required 0 -nrargs 1 -type tclobj} - {-argName "-timeout" -required 0 -nrargs 1 -type int32} + {-argName "-uri" -required 1 -nrargs 1} } */ static int -NsfMongoConnect(Tcl_Interp *interp, CONST char *replicaSet, Tcl_Obj *server, int withTimeout) { - char channelName[80], *buffer = NULL; - mongo_host_port host_port; - int status, objc = 0; - mongo *connPtr; - Tcl_Obj **objv; +NsfMongoConnect(Tcl_Interp *interp, CONST char *uri) { + char channelName[80]; + mongoc_client_t *clientPtr; - if (server) { - int result = Tcl_ListObjGetElements(interp, server, &objc, &objv); - if (result != TCL_OK) { - return NsfPrintError(interp, "The provided servers are not a well-formed list"); - } + if (uri == NULL) { + uri = "mongodb://127.0.0.1:27017/"; } + clientPtr = mongoc_client_new(uri); - connPtr = (mongo *)ckalloc(sizeof(mongo)); - - if (objc == 0) { - /* - * No -server argument or an empty list was provided; use the - * mongo default values. - */ - status = mongo_client( connPtr, "127.0.0.1", 27017 ); - - } else if (objc == 1 && replicaSet == NULL) { - /* - * A single element was provided to -server, we have no replica - * set specified. - */ - mongo_parse_host(ObjStr(objv[0]), &host_port); - status = mongo_client( connPtr, host_port.host, host_port.port ); - if (buffer) {ckfree(buffer);} - - } else if (replicaSet) { - /* - * A list of 1 or more server was provided together with a replica - * set. - */ - int i; - - mongo_replset_init( connPtr, replicaSet ); - - for (i = 0; i < objc; i++) { - mongo_parse_host(ObjStr(objv[i]), &host_port); - mongo_replset_add_seed(connPtr, host_port.host, host_port.port ); - if (buffer) {ckfree(buffer);} - } - - status = mongo_replset_connect( connPtr ); - - } else { - ckfree((char *)connPtr); - return NsfPrintError(interp, "A list of servers was provided, but not name for the replica set"); + if (clientPtr == NULL) { + return NsfPrintError(interp, "failed to parse Mongo URI"); } /* - * Process the status from either mongo_connect() or - * mongo_replset_connect(). - */ - if (status != MONGO_OK) { - ckfree((char *)connPtr); - return NsfPrintError(interp, ErrorMsg(status)); - } - - if (withTimeout > 0) { - /* - * setting connection timeout - measured in milliseconds - */ - if (mongo_set_op_timeout(connPtr, withTimeout) != MONGO_OK) { - ckfree((char *)connPtr); - return NsfPrintError(interp, "setting connection timeout failed"); - } - } - - /* * Make an entry in the symbol table and return entry name it as * result. */ - Nsf_PointerAdd(interp, channelName, "mongo", connPtr); + if (Nsf_PointerAdd(interp, channelName, "mongoc_client_t", clientPtr) != TCL_OK) { + mongoc_client_destroy(clientPtr); + return TCL_ERROR; + } + Tcl_SetObjResult(interp, Tcl_NewStringObj(channelName, -1)); return TCL_OK; @@ -591,51 +544,104 @@ /* cmd run NsfMongoRunCmd { {-argName "-nocomplain" -required 0 -nrargs 0} - {-argName "conn" -required 1 -type mongo} + {-argName "conn" -required 1 -type mongoc_client_t} {-argName "db" -required 1} {-argName "cmd" -required 1 -type tclobj} } */ static int -NsfMongoRunCmd(Tcl_Interp *interp, int withNocomplain, mongo *connPtr, CONST char *db, Tcl_Obj *cmdObj) { +NsfMongoRunCmd(Tcl_Interp *interp, int withNocomplain, mongoc_client_t *clientPtr, + CONST char *db, Tcl_Obj *cmdObj) { int result, objc; Tcl_Obj **objv; - bson cmd[1], out[1]; + bson_t cmd[1], out[1]; + mongoc_read_prefs_t *readPrefsPtr = NULL; /* TODO: not used */ + bson_error_t bsonError; result = Tcl_ListObjGetElements(interp, cmdObj, &objc, &objv); if (result != TCL_OK || (objc % 3 != 0)) { return NsfPrintError(interp, "%s: must contain a multiple of 3 elements", ObjStr(cmdObj)); } BsonAppendObjv(interp, cmd, objc, objv); - mongo_clear_errors( connPtr ); - result = mongo_run_command( connPtr, db, cmd, out ); + /*mongo_clear_errors( connPtr );*/ + result = mongoc_client_command_simple( clientPtr, db, cmd, readPrefsPtr, out, &bsonError); bson_destroy( cmd ); - if (withNocomplain == 0 && result != MONGO_OK) { - fprintf(stderr, "run result %d\n", result); - return NsfPrintError(interp, "mongo::run: command '%s' returned an unknown error", ObjStr(cmdObj)); + if (withNocomplain == 0 && result == 0) { + return NsfPrintError(interp, "mongo::run: command '%s' returned error: %s", ObjStr(cmdObj), bsonError.message); } - Tcl_SetObjResult(interp, Tcl_NewIntObj(result == MONGO_OK)); + Tcl_SetObjResult(interp, Tcl_NewIntObj(result)); return TCL_OK; } +/* +cmd collection::open NsfCollectionOpen { + {-argName "conn" -required 1 -type mongoc_client_t} + {-argName "dbname" -required 1} + {-argName "collectionname" -required 1} +} +*/ +int +NsfCollectionOpen(Tcl_Interp *interp, + mongoc_client_t *clientPtr, + const char *dbName, + const char *collectionName) { + int result = TCL_ERROR; + mongoc_collection_t *collectionPtr; + collectionPtr = mongoc_client_get_collection(clientPtr, dbName, collectionName); + if (collectionPtr != NULL) { + char buffer[80]; + + if (Nsf_PointerAdd(interp, buffer, "mongoc_collection_t", collectionPtr) == TCL_OK) { + Tcl_SetObjResult(interp, Tcl_NewStringObj(buffer, -1)); + result = TCL_OK; + } else { + mongoc_collection_destroy(collectionPtr); + result = TCL_ERROR; + } + } + + if (collectionPtr == NULL) { + result = NsfPrintError(interp, + "collection::open: could not open collection: %s.%s", + dbName, collectionName); + } + + return result; +} + /* -cmd query NsfMongoCount { - {-argName "conn" -required 1 -type mongo} - {-argName "namespace" -required 1} +cmd collection::close NsfCollectionClose { + {-argName "collection" -required 1 -type mongoc_collection_t -withObj 1} +} +*/ +static int +NsfCollectionClose(Tcl_Interp *interp, mongoc_collection_t *collectionPtr, Tcl_Obj *clientObj) { + if (collectionPtr) { + mongoc_collection_destroy(collectionPtr); + Nsf_PointerDelete(ObjStr(clientObj), collectionPtr, 0); + } + return TCL_OK; +} + +/* +cmd collection::count NsfMongoCollectionCount { + {-argName "collection" -required 1 -type mongoc_collection_t} {-argName "query" -required 1 -type tclobj} } */ static int -NsfMongoCount(Tcl_Interp *interp, mongo *connPtr, CONST char *namespace, Tcl_Obj *queryObj) { +NsfMongoCollectionCount(Tcl_Interp *interp, + mongoc_collection_t *collectionPtr, + Tcl_Obj *queryObj) { int objc, result; Tcl_Obj **objv; - char *db, *collection; - int count, length; - bson query[1]; + int count; + bson_t query[1]; + bson_error_t bsonError; result = Tcl_ListObjGetElements(interp, queryObj, &objc, &objv); if (result != TCL_OK || (objc % 3 != 0)) { @@ -644,33 +650,65 @@ BsonAppendObjv(interp, query, objc, objv); - length = strlen(namespace)+1; - db = ckalloc(length); - memcpy(db, namespace, length); - collection = strchr(db, '.'); + if (collectionPtr != NULL) { + count = mongoc_collection_count(collectionPtr, + 0 /* query flags */, query, + 0 /*skip */, 0 /*limit */, + NULL /* read preferences */, + &bsonError); + fprintf(stderr, "count returns %d \n", count); + if (count == -1) { + bson_destroy( query ); + return NsfPrintError(interp, "mongo::collection::count: error: %s", bsonError.message); + } - if (collection != NULL) { - /* successful */ - *collection = '\0'; - collection ++; - count = mongo_count(connPtr, db, collection, query); } else { count = 0; } bson_destroy( query ); - ckfree(db); Tcl_SetObjResult(interp, Tcl_NewIntObj(count)); return TCL_OK; } +/* +cmd "collection::delete" NsfMongoCollectionDelete { + {-argName "collection" -required 1 -type mongoc_collection_t} + {-argName "condition" -required 1 -type tclobj} +} +*/ +static int +NsfMongoCollectionDelete(Tcl_Interp *interp, + mongoc_collection_t *collectionPtr, + Tcl_Obj *conditionObj) { + int objc, result = TCL_OK, status; + Tcl_Obj **objv; + bson_t query[1]; + bson_error_t bsonError; + mongoc_delete_flags_t deleteFlags = 0; /* TODO: not handled */ + /* MONGOC_DELETE_SINGLE_REMOVE = 1 << 0,**/ + const mongoc_write_concern_t *writeConcern = NULL; /* TODO: not handled yet */ + result = Tcl_ListObjGetElements(interp, conditionObj, &objc, &objv); + if (result != TCL_OK || (objc % 3 != 0)) { + return NsfPrintError(interp, "%s: must contain a multiple of 3 elements", ObjStr(conditionObj)); + } + + BsonAppendObjv(interp, query, objc, objv); + status = mongoc_collection_delete(collectionPtr, deleteFlags, query, writeConcern, &bsonError); + + if (status == 0) { + result = NsfPrintError(interp, "mongo::collection::delete: error: %s", bsonError.message); + } + bson_destroy(query); + return result; +} + /* -cmd index NsfMongoIndex { - {-argName "conn" -required 1 -type mongo} - {-argName "namespace" -required 1} +cmd "collection::index" NsfMongoCollectionIndex { + {-argName "collection" -required 1 -type mongoc_collection_t} {-argName "attributes" -required 1 -type tclobj} {-argName "-name" -required 0 -nrargs 1} {-argName "-background" -required 0 -nrargs 0} @@ -681,64 +719,79 @@ } */ - static int -NsfMongoIndex(Tcl_Interp *interp, - mongo *connPtr, - CONST char *namespace, - Tcl_Obj *attributesObj, - CONST char *withName, - int withBackground, - int withDropdups, - int withSparse, - int withTtl, - int withUnique) { - bson_bool_t success; - int objc, result, options = 0; +NsfMongoCollectionIndex(Tcl_Interp *interp, + mongoc_collection_t *collectionPtr, + Tcl_Obj *attributesObj, + CONST char *withName, + int withBackground, + int withDropdups, + int withSparse, + int withTtl, + int withUnique) { + int success = 0; + int objc, result; Tcl_Obj **objv; - bson keys[1], out[1]; + bson_t keys[1]; + bson_error_t bsonError; + mongoc_index_opt_t options; result = Tcl_ListObjGetElements(interp, attributesObj, &objc, &objv); if (result != TCL_OK || (objc % 3 != 0)) { return NsfPrintError(interp, "%s: must contain a multiple of 3 elements", ObjStr(attributesObj)); } + BsonAppendObjv(interp, keys, objc, objv); - if (withBackground) {options |= MONGO_INDEX_BACKGROUND;} - if (withDropdups) {options |= MONGO_INDEX_DROP_DUPS;} - if (withSparse) {options |= MONGO_INDEX_SPARSE;} - if (withUnique) {options |= MONGO_INDEX_UNIQUE;} - success = mongo_create_index(connPtr, namespace, keys, withName, options, withTtl, out); + mongoc_index_opt_init(&options); + + if (withBackground) {options.background = 1;} + if (withDropdups) {options.drop_dups = 1;} + if (withSparse) {options.sparse = 1;} + if (withUnique) {options.unique = 1;} + if (withTtl) {options.expire_after_seconds = withTtl;} + if (withName) {options.name = withName;} + /* TODO: not handled: is_initialized, v, weights, default_language, laguage_override, padding */ + + success = mongoc_collection_ensure_index(collectionPtr, keys, &options, &bsonError); + bson_destroy(keys); - /* TODO: examples in mongo-client do not touch out; do we have to do - something about it? */ - Tcl_SetObjResult(interp, Tcl_NewBooleanObj(success == MONGO_OK)); + Tcl_SetObjResult(interp, Tcl_NewBooleanObj(success)); return TCL_OK; } - /* -cmd insert NsfMongoInsert { - {-argName "conn" -required 1 -type mongo} - {-argName "namespace" -required 1} +cmd "collection::insert" NsfMongoCollectionInsert { + {-argName "collection" -required 1 -type mongoc_collection_t} {-argName "values" -required 1 -type tclobj} } */ -static int NsfMongoInsert(Tcl_Interp *interp, mongo *connPtr, CONST char *namespace, Tcl_Obj *valuesObj) { +static int NsfMongoCollectionInsert(Tcl_Interp *interp, + mongoc_collection_t *collectionPtr, + Tcl_Obj *valuesObj) { int i, objc, result; Tcl_Obj **objv; - bson b[1]; + bson_t b[1]; + bson_oid_t oid; + bson_error_t bsonError; + mongoc_insert_flags_t insertFlags = MONGOC_INSERT_NO_VALIDATE; /* otherwise, we can't insert a DBRef */ + /* TODO: insertFlags not handled: + MONGOC_INSERT_NONE = 0, + MONGOC_INSERT_CONTINUE_ON_ERROR = 1 << 0, + MONGOC_INSERT_NO_VALIDATE = 1 << 31, + */ + const mongoc_write_concern_t *writeConcern = NULL; /* TODO: not handled yet */ result = Tcl_ListObjGetElements(interp, valuesObj, &objc, &objv); if (result != TCL_OK || (objc % 3 != 0)) { return NsfPrintError(interp, "%s: must contain a multiple of 3 elements", ObjStr(valuesObj)); } - //bson_init(buf); bson_init(b); - bson_append_new_oid(b, "_id"); + bson_oid_init(&oid, NULL); + bson_append_oid(b, "_id", 3, &oid); for (i = 0; i < objc; i += 3) { char *name = ObjStr(objv[i]); @@ -748,40 +801,41 @@ BsonAppend(interp, b, name, tag, value); } - bson_finish(b); - /* for the time being, no write_concern (last arg of mongo_insert()) */ - result = mongo_insert(connPtr, namespace, b, NULL); + result = mongoc_collection_insert(collectionPtr, insertFlags, b, writeConcern, &bsonError); - if (result == MONGO_ERROR) { - result = NsfPrintError(interp, ErrorMsg(connPtr->err)); + if (result == 0) { + bson_destroy(b); + return NsfPrintError(interp, "mongo::collection::insert: error: %s", bsonError.message); } else { - Tcl_Obj *resultObj = BsonToList(interp, b->data, 0); - Tcl_SetObjResult(interp, resultObj); + Tcl_SetObjResult(interp, BsonToList(interp, b, 0)); result = TCL_OK; + bson_destroy(b); } - bson_destroy(b); return result; } /* -cmd query NsfMongoQuery { - {-argName "conn" -required 1 -type mongo} - {-argName "namespace" -required 1} +cmd collection::query NsfMongoCollectionQuery { + {-argName "collection" -required 1 -type mongoc_collection_t} {-argName "query" -required 1 -type tclobj} {-argName "-atts" -required 0 -nrargs 1 -type tclobj} {-argName "-limit" -required 0 -type int32} {-argName "-skip" -required 0 -type int32} } */ static int -NsfMongoQuery(Tcl_Interp *interp, mongo *connPtr, CONST char *namespace, - Tcl_Obj *queryObj, Tcl_Obj *withAttsObj, - int withLimit, int withSkip) { +NsfMongoCollectionQuery(Tcl_Interp *interp, + mongoc_collection_t *collectionPtr, + Tcl_Obj *queryObj, Tcl_Obj *withAttsObj, + int withLimit, int withSkip) { int objc1, objc2, result; Tcl_Obj **objv1, **objv2, *resultObj; - mongo_cursor *cursor; - bson query[1]; - bson atts[1]; + mongoc_cursor_t *cursor; + bson_t query[1]; + bson_t atts[1]; + const bson_t *nextPtr; + mongoc_query_flags_t queryFlags = 0; /* TODO: not handled */ + mongoc_read_prefs_t *readPrefs = NULL; /* TODO: not handled */ /*fprintf(stderr, "NsfMongoQuery: namespace %s withLimit %d withSkip %d\n", namespace, withLimit, withSkip);*/ @@ -801,20 +855,23 @@ /* fprintf(stderr, "query # %d, atts # %d\n", objc1, objc2); */ BsonAppendObjv(interp, query, objc1, objv1); - BsonAppendObjv(interp, atts, objc2, objv2); + BsonAppendObjv(interp, atts, objc2, objv2); resultObj = Tcl_NewListObj(0, NULL); /* * The last field of mongo_find is options, semantics are described here * http://www.mongodb.org/display/DOCS/Mongo+Wire+Protocol#MongoWireProtocol-OPQUERY */ - cursor = mongo_find( connPtr, namespace, query, atts, withLimit, withSkip, 0 ); - while( mongo_cursor_next( cursor ) == MONGO_OK ) { - Tcl_ListObjAppendElement(interp, resultObj, BsonToList(interp, (&cursor->current)->data, 0)); + cursor = mongoc_collection_find( collectionPtr, queryFlags, + withSkip, withLimit, 0 /* batch_size */, + query, atts, readPrefs); + + while( mongoc_cursor_next( cursor, &nextPtr ) == 1 ) { + Tcl_ListObjAppendElement(interp, resultObj, BsonToList(interp, nextPtr, 0)); } - mongo_cursor_destroy( cursor ); + mongoc_cursor_destroy( cursor ); bson_destroy( query ); bson_destroy( atts ); @@ -823,50 +880,28 @@ return TCL_OK; } -/* -cmd remove NsfMongoRemove { - {-argName "conn" -required 1 -type mongo} - {-argName "namespace" -required 1} - {-argName "condition" -required 1 -type tclobj} -} -*/ -static int -NsfMongoRemove(Tcl_Interp *interp, mongo *connPtr, CONST char *namespace, Tcl_Obj *conditionObj) { - int objc, result, status; - Tcl_Obj **objv; - bson query[1]; - result = Tcl_ListObjGetElements(interp, conditionObj, &objc, &objv); - if (result != TCL_OK || (objc % 3 != 0)) { - return NsfPrintError(interp, "%s: must contain a multiple of 3 elements", ObjStr(conditionObj)); - } - - BsonAppendObjv(interp, query, objc, objv); - /* for the time being, no write_concern (last arg of mongo_remove()) */ - status = mongo_remove(connPtr, namespace, query, NULL); - - Tcl_SetObjResult(interp, Tcl_NewIntObj(status == MONGO_OK)); - - bson_destroy(query); - return TCL_OK; -} - /* -cmd insert NsfMongoUpdate { - {-argName "conn" -required 1 -type mongo} - {-argName "namespace" -required 1} +cmd "collection::update" NsfMongoCollectionUpdate { + {-argName "collection" -required 1 -type mongoc_collection_t} {-argName "cond" -required 1 -type tclobj} {-argName "values" -required 1 -type tclobj} {-argName "-upsert" -required 0 -nrargs 0} {-argName "-all" -required 0 -nrargs 0} } */ static int -NsfMongoUpdate(Tcl_Interp *interp, mongo *connPtr, CONST char *namespace, - Tcl_Obj *conditionObj, Tcl_Obj *valuesObj, int withUpsert, int withAll) { - int objc, result, mongorc, options = 0; +NsfMongoCollectionUpdate(Tcl_Interp *interp, + mongoc_collection_t *collectionPtr, + Tcl_Obj *conditionObj, Tcl_Obj *valuesObj, + int withUpsert, int withAll) { + + const mongoc_write_concern_t *writeConcern = NULL; /* TODO: not handled yet */ + mongoc_update_flags_t updateFlags = MONGOC_UPDATE_NO_VALIDATE; /* for dbrefs */ + bson_error_t bsonError; + bson_t cond[1], values[1]; + int objc, result = TCL_OK, success; Tcl_Obj **objv; - bson cond[1], values[1]; result = Tcl_ListObjGetElements(interp, conditionObj, &objc, &objv); if (result != TCL_OK || (objc % 3 != 0)) { @@ -883,13 +918,14 @@ BsonAppendObjv(interp, values, objc, objv); - if (withUpsert) {options |= 1;} - if (withAll) {options |= 2;} + if (withUpsert) {updateFlags |= MONGOC_UPDATE_UPSERT;} + if (withAll) {updateFlags |= MONGOC_UPDATE_MULTI_UPDATE;} - /* for the time being, no write_concern (last arg of mongo_update()) */ - mongorc = mongo_update(connPtr, namespace, cond, values, options, NULL); + success = mongoc_collection_update(collectionPtr, updateFlags, cond, values, writeConcern, &bsonError); - Tcl_SetObjResult(interp, Tcl_NewBooleanObj(mongorc == MONGO_OK)); + if (success == 0) { + result = NsfPrintError(interp, "mongo::collection::delete: error: %s", bsonError.message); + } return TCL_OK; } @@ -899,8 +935,7 @@ ***********************************************************************/ /* cmd cursor::find NsfMongoCursorFind { - {-argName "conn" -required 1 -type mongo} - {-argName "namespace" -required 1} + {-argName "collection" -required 1 -type mongoc_collection_t} {-argName "query" -required 1 -type tclobj} {-argName "-atts" -required 0 -nrargs 1 -type tclobj} {-argName "-limit" -required 0 -type int32} @@ -910,15 +945,18 @@ } */ static int -NsfMongoCursorFind(Tcl_Interp *interp, mongo *connPtr, CONST char *namespace, - Tcl_Obj *queryObj, Tcl_Obj *withAttsObj, +NsfMongoCursorFind(Tcl_Interp *interp, + mongoc_collection_t *collectionPtr, + Tcl_Obj *queryObj, Tcl_Obj *withAttsObj, int withLimit, int withSkip, int withTailable, int withAwaitdata) { - int objc1, objc2, result, options = 0; + int objc1, objc2, result; + mongoc_query_flags_t queryFlags = 0; Tcl_Obj **objv1, **objv2; - mongo_cursor *cursor; - bson query[1]; - bson atts[1]; + mongoc_cursor_t *cursor; + bson_t query[1]; + bson_t atts[1]; + mongoc_read_prefs_t *readPrefsPtr = NULL; /* TODO: not used */ /*fprintf(stderr, "NsfMongoQuery: namespace %s withLimit %d withSkip %d\n", namespace, withLimit, withSkip);*/ @@ -944,52 +982,65 @@ * http://www.mongodb.org/display/DOCS/Mongo+Wire+Protocol#MongoWireProtocol-OPQUERY */ if (withTailable) { - options |= MONGO_TAILABLE; + queryFlags |= MONGOC_QUERY_TAILABLE_CURSOR; } if (withAwaitdata) { - options |= MONGO_AWAIT_DATA; + queryFlags |= MONGOC_QUERY_AWAIT_DATA; } - cursor = mongo_find( connPtr, namespace, query, atts, withLimit, withSkip, options); - + /* TODO: query flags: + MONGOC_QUERY_SLAVE_OK = 1 << 2, + MONGOC_QUERY_OPLOG_REPLAY = 1 << 3, + MONGOC_QUERY_NO_CURSOR_TIMEOUT = 1 << 4, + MONGOC_QUERY_EXHAUST = 1 << 6, + MONGOC_QUERY_PARTIAL = 1 << 7, + */ + cursor = mongoc_collection_find(collectionPtr, queryFlags, + withSkip, withLimit, 0 /*TODO missing batch_size*/, + query, atts, readPrefsPtr); if (cursor) { char buffer[80]; - Nsf_PointerAdd(interp, buffer, "mongo_cursor", cursor); - Tcl_SetObjResult(interp, Tcl_NewStringObj(buffer, -1)); + if (Nsf_PointerAdd(interp, buffer, "mongoc_cursor_t", cursor) == TCL_OK) { + Tcl_SetObjResult(interp, Tcl_NewStringObj(buffer, -1)); + } else { + mongoc_cursor_destroy( cursor ); + result = TCL_ERROR; + } } else { Tcl_ResetResult(interp); } bson_destroy( query ); bson_destroy( atts ); - return TCL_OK; + return result; } /* cmd cursor::next NsfMongoCursorNext { - {-argName "cursor" -required 1 -type mongo_cursor} + {-argName "cursor" -required 1 -type mongoc_cursor_t} } */ static int -NsfMongoCursorNext(Tcl_Interp *interp, mongo_cursor *cursor) { +NsfMongoCursorNext(Tcl_Interp *interp, mongoc_cursor_t *cursor) { int result; + const bson_t *nextPtr; - result = mongo_cursor_next( cursor ); - if (result == MONGO_OK) { - Tcl_SetObjResult(interp, BsonToList(interp, (&cursor->current)->data, 0)); + result = mongoc_cursor_next( cursor, &nextPtr ); + if (result == 1) { + Tcl_SetObjResult(interp, BsonToList(interp, nextPtr, 0)); } return TCL_OK; } /* cmd cursor::close NsfMongoCursorClose { - {-argName "cursor" -required 1 -type mongo_cursor -withObj 1} + {-argName "cursor" -required 1 -type mongoc_cursor_t -withObj 1} } */ static int -NsfMongoCursorClose(Tcl_Interp *interp, mongo_cursor *cursor, Tcl_Obj *cursorObj) { +NsfMongoCursorClose(Tcl_Interp *interp, mongoc_cursor_t *cursor, Tcl_Obj *cursorObj) { - mongo_cursor_destroy( cursor ); + mongoc_cursor_destroy( cursor ); Nsf_PointerDelete(ObjStr(cursorObj), cursor, 0); return TCL_OK; @@ -1000,86 +1051,240 @@ /*********************************************************************** * GridFS interface ***********************************************************************/ + /* +cmd gridfs::close NsfMongoGridFSClose { + {-argName "gfs" -required 1 -type mongoc_gridfs_t -withObj 1} +} +*/ +static int +NsfMongoGridFSClose(Tcl_Interp *interp, mongoc_gridfs_t *gridfsPtr, Tcl_Obj *gridfsObj) { + + mongoc_gridfs_destroy(gridfsPtr); + Nsf_PointerDelete(ObjStr(gridfsObj), gridfsPtr, 0); + + return TCL_OK; +} + +/* cmd gridfs::open NsfMongoGridFSOpen { - {-argName "conn" -required 1 -type mongo} + {-argName "conn" -required 1 -type mongoc_client_t} {-argName "dbname" -required 1} {-argName "prefix" -required 1} } */ static int -NsfMongoGridFSOpen(Tcl_Interp *interp, mongo *connPtr, +NsfMongoGridFSOpen(Tcl_Interp *interp, mongoc_client_t *clientPtr, CONST char *dbname, CONST char *prefix) { char buffer[80]; - gridfs *gfsPtr; + int result = TCL_OK; + bson_error_t bsonError; + mongoc_gridfs_t *gfsPtr; - gfsPtr = (gridfs *)ckalloc(sizeof(gridfs)); - gridfs_init(connPtr, dbname, prefix, gfsPtr); + gfsPtr = mongoc_client_get_gridfs(clientPtr, dbname, prefix, &bsonError); - Nsf_PointerAdd(interp, buffer, "gridfs", gfsPtr); - Tcl_SetObjResult(interp, Tcl_NewStringObj(buffer, -1)); + if (gfsPtr == NULL) { + result = NsfPrintError(interp, "mongo::gridfs::open: error: %s", bsonError.message); + } - return TCL_OK; + if (Nsf_PointerAdd(interp, buffer, "mongoc_gridfs_t", gfsPtr) == TCL_OK) { + Tcl_SetObjResult(interp, Tcl_NewStringObj(buffer, -1)); + } else { + mongoc_gridfs_destroy(gfsPtr); + result = TCL_ERROR; + } + + return result; } +/*********************************************************************** + * GridFile interface operating on GridFS + * + * Currently we need a few private gridfs functions since the new + * c-driver has less functionality than the old one. + ***********************************************************************/ +#define MONGOC_INSIDE 1 +#include "mongoc-gridfs-private.h" +#include "mongoc-gridfs-file-private.h" +#undef MONGOC_INSIDE +#define MONGOC_GRIDFS_READ_CHUNK 4096*4 + /* -cmd gridfs::remove_file NsfMongoGridFSRemoveFile { - {-argName "gfs" -required 1 -type tclobj} - {-argName "filename" -required 1} +cmd gridfile::create NsfMongoGridFileCreate { + {-argName "-source" -required 1 -typeName "gridfilesource" -type "file|string"} + {-argName "gfs" -required 1 -type mongoc_gridfs_t} + {-argName "value" -required 1} + {-argName "name" -required 1} + {-argName "contenttype" -required 1} } */ static int -NsfMongoGridFSRemoveFile(Tcl_Interp *interp, gridfs *gridfsPtr, - CONST char *filename) { - int status; +NsfMongoGridFileCreate(Tcl_Interp *interp, int withSource, + mongoc_gridfs_t *gridfsPtr, + CONST char *value, CONST char *name, + CONST char *contenttype) { + int n, result = TCL_OK; + mongoc_gridfs_file_opt_t fileOpts = {NULL}; + mongoc_gridfs_file_t *gridFile; - /* the current interfaces does not return a status ! */ - status = gridfs_remove_filename(gridfsPtr, filename); + fprintf(stderr, "0 withSource %d\n", withSource); + if (withSource == GridfilesourceNULL) { + withSource = GridfilesourceFileIdx; + } + fprintf(stderr, "1 withSource %d\n", withSource); - Tcl_SetObjResult(interp, Tcl_NewIntObj(status == MONGO_OK)); + fileOpts.filename = name; + fileOpts.content_type = contenttype; + /* + const char *md5; + const char *filename; + const char *content_type; + const bson_t *aliases; + const bson_t *metadata; + uint32_t chunk_size; + */ + gridFile = mongoc_gridfs_create_file(gridfsPtr, &fileOpts); - return TCL_OK; + if (withSource == GridfilesourceFileIdx) { + uint8_t buf[MONGOC_GRIDFS_READ_CHUNK]; + struct iovec iov = { buf, 0 }; + int fd = open(value, O_RDONLY); + + for (;; ) { + n = read(fd, iov.iov_base, MONGOC_GRIDFS_READ_CHUNK); + if (n > 0) { + iov.iov_len = n; + n = mongoc_gridfs_file_writev(gridFile, &iov, 1, 0); + } else if (n == 0) { + break; + } else { + result = TCL_ERROR; + break; + } + } + close(fd); + } else { + struct iovec iov = { (char *)value, strlen(value) }; + mongoc_gridfs_file_writev(gridFile, &iov, 1, 0); + } + if (result == TCL_OK) { + mongoc_gridfs_file_save(gridFile); + } + + mongoc_gridfs_file_destroy(gridFile); + + Tcl_SetObjResult(interp, Tcl_NewIntObj(result == TCL_OK)); + + return result; } + /* -cmd gridfs::store_file NsfMongoGridFSStoreFile { - {-argName "gfs" -required 1 -type gridfs} - {-argName "filename" -required 1} - {-argName "remotename" -required 1} - {-argName "contenttype" -required 1} +cmd "gridfile::delete" NsfMongoGridFileDelete { + {-argName "gfs" -required 1 -type mongoc_gridfs_t} + {-argName "query" -required 1 -type tclobj} } */ static int -NsfMongoGridFSStoreFile(Tcl_Interp *interp, gridfs *gridfsPtr, - CONST char *filename, CONST char *remotename, - CONST char *contenttype) { - int flags = 0; // TODO: add/handle flags - int result = gridfs_store_file(gridfsPtr, filename, remotename, contenttype, flags); +NsfMongoGridFileDelete(Tcl_Interp *interp, + mongoc_gridfs_t *gridfsPtr, + Tcl_Obj *queryObj) { + bson_t query[1]; + mongoc_cursor_t *files; + bson_iter_t it[1]; + bson_oid_t id; + bson_t b[1]; + const bson_t *nextPtr; + bson_error_t bsonError; + Tcl_Obj **objv; + int objc, result; - /* currently, we do not get the bson structure; - Tcl_SetObjResult(interp, BsonToList(interp, b.data, 0));*/ + result = Tcl_ListObjGetElements(interp, queryObj, &objc, &objv); + if (result != TCL_OK || (objc % 3 != 0)) { + return NsfPrintError(interp, "%s: must contain a multiple of 3 elements", ObjStr(queryObj)); + } - Tcl_SetObjResult(interp, Tcl_NewIntObj(result == MONGO_OK)); + BsonAppendObjv(interp, query, objc, objv); + files = mongoc_collection_find( gridfsPtr->files, 0, + 0, 0, 0 /* batch_size */, + query, NULL, NULL); + bson_destroy(query); + /* files should be a valid cursor even if the file doesn't exist */ + if ( files == NULL ) { + return NsfPrintError(interp, "gridfs::remove_file: invalid cursor for files"); + } + + /* Remove each file and it's chunks from files named filename */ + while (mongoc_cursor_next(files, &nextPtr)) { + bson_iter_init_find(it, nextPtr, "_id"); + id = *bson_iter_oid(it); + + /* Remove the file with the specified id */ + bson_init(b); + bson_append_oid(b, "_id", 3, &id); + mongoc_collection_delete(gridfsPtr->files, 0, b, NULL, &bsonError); + bson_destroy(b); + + /* Remove all chunks from the file with the specified id */ + bson_init(b); + bson_append_oid(b, "files_id", 8, &id); + mongoc_collection_delete(gridfsPtr->chunks, 0, b, NULL, &bsonError); + bson_destroy(b); + } + + mongoc_cursor_destroy(files); return TCL_OK; } /* -cmd gridfs::close NsfMongoGridFSClose { - {-argName "gfs" -required 1 -type gridfs -withObj 1} +cmd gridfile::open NsfMongoGridFileOpen { + {-argName "gfs" -required 1 -type mongoc_gridfs_t} + {-argName "query" -required 1 -type tclobj} } */ static int -NsfMongoGridFSClose(Tcl_Interp *interp, gridfs *gridfsPtr, Tcl_Obj *gridfsObj) { +NsfMongoGridFileOpen(Tcl_Interp *interp, + mongoc_gridfs_t *gridfsPtr, + Tcl_Obj *queryObj) { + mongoc_gridfs_file_t* gridFilePtr; + bson_error_t bsonError; + int result = TCL_OK, objc; + bson_t query[1]; + Tcl_Obj **objv; - gridfs_destroy(gridfsPtr); - Nsf_PointerDelete(ObjStr(gridfsObj), gridfsPtr, 1); + /*fprintf(stderr, "NsfMongoQuery: namespace %s withLimit %d withSkip %d\n", + namespace, withLimit, withSkip);*/ - return TCL_OK; + result = Tcl_ListObjGetElements(interp, queryObj, &objc, &objv); + if (result != TCL_OK || (objc % 3 != 0)) { + return NsfPrintError(interp, "%s: must contain a multiple of 3 elements", ObjStr(queryObj)); + } + + BsonAppendObjv(interp, query, objc, objv); + + gridFilePtr = mongoc_gridfs_find_one(gridfsPtr, query, &bsonError); + + if (gridFilePtr != NULL) { + char buffer[80]; + + if (Nsf_PointerAdd(interp, buffer, "mongoc_gridfs_file_t", gridFilePtr) == TCL_OK) { + Tcl_SetObjResult(interp, Tcl_NewStringObj(buffer, -1)); + } else { + mongoc_gridfs_file_destroy(gridFilePtr); + result = TCL_ERROR; + } + } else { + Tcl_ResetResult(interp); + } + + bson_destroy(query); + return result; } + /*********************************************************************** * GridFile interface * @@ -1089,123 +1294,97 @@ /* cmd gridfile::close NsfMongoGridFileClose { - {-argName "file" -required 1 -type gridfile -withObj 1} + {-argName "gridfile" -required 1 -type mongoc_gridfs_file_t -withObj 1} } */ static int -NsfMongoGridFileClose(Tcl_Interp *interp, gridfile* gridFilePtr, Tcl_Obj *gridFileObj) { +NsfMongoGridFileClose(Tcl_Interp *interp, mongoc_gridfs_file_t* gridFilePtr, Tcl_Obj *gridFileObj) { - gridfile_destroy(gridFilePtr); - Nsf_PointerDelete(ObjStr(gridFileObj), gridFilePtr, 1); + mongoc_gridfs_file_destroy(gridFilePtr); + Nsf_PointerDelete(ObjStr(gridFileObj), gridFilePtr, 0); return TCL_OK; } /* cmd gridfile::get_contentlength NsfMongoGridFileGetContentlength { - {-argName "gridfile" -required 1 -type gridfile} + {-argName "gridfile" -required 1 -type mongoc_gridfs_file_t} } */ static int -NsfMongoGridFileGetContentlength(Tcl_Interp *interp, gridfile* gridFilePtr) { - gridfs_offset len; +NsfMongoGridFileGetContentlength(Tcl_Interp *interp, mongoc_gridfs_file_t* gridFilePtr) { + int64_t len; - len = gridfile_get_contentlength(gridFilePtr); + len = mongoc_gridfs_file_get_length(gridFilePtr); Tcl_SetObjResult(interp, Tcl_NewLongObj(len)); return TCL_OK; } /* cmd gridfile::get_contenttype NsfMongoGridFileGetContentType { - {-argName "gridfile" -required 1 -type gridfile} + {-argName "gridfile" -required 1 -type mongoc_gridfs_file_t} } */ static int -NsfMongoGridFileGetContentType(Tcl_Interp *interp, gridfile* gridFilePtr) { - CONST char *contentType; +NsfMongoGridFileGetContentType(Tcl_Interp *interp, mongoc_gridfs_file_t* gridFilePtr) { - contentType = gridfile_get_contenttype(gridFilePtr); - Tcl_SetObjResult(interp, Tcl_NewStringObj(contentType, -1)); + Tcl_SetObjResult(interp, Tcl_NewStringObj(gridFilePtr->bson_content_type, -1)); return TCL_OK; } /* cmd gridfile::get_metadata NsfMongoGridFileGetMetaData { - {-argName "gridfile" -required 1 -type tclgridfile* gridFilePtrobj} + {-argName "gridfile" -required 1 -type mongoc_gridfs_file_t} } */ static int -NsfMongoGridFileGetMetaData(Tcl_Interp *interp, gridfile* gridFilePtr) { - bson b; - bson_bool_t copyData = 0; // TODO: what does this +NsfMongoGridFileGetMetaData(Tcl_Interp *interp, mongoc_gridfs_file_t* gridFilePtr) { - gridfile_get_metadata(gridFilePtr, &b, copyData); - Tcl_SetObjResult(interp, BsonToList(interp, b.data, 0)); + Tcl_SetObjResult(interp, BsonToList(interp, &gridFilePtr->bson_metadata, 0)); return TCL_OK; } /* -cmd gridfile::open NsfMongoGridFileOpen { - {-argName "gfs" -required 1 -type gridfs} - {-argName "filename" -required 1} -} -*/ -static int -NsfMongoGridFileOpen(Tcl_Interp *interp, gridfs *gridfsPtr, CONST char *filename) { - gridfile* gridFilePtr; - int result; - - gridFilePtr = (gridfile *)ckalloc(sizeof(gridfile)); - result = gridfs_find_filename(gridfsPtr, filename, gridFilePtr); - - if (result == MONGO_OK) { - char buffer[80]; - Nsf_PointerAdd(interp, buffer, "gridfile", gridFilePtr); - Tcl_SetObjResult(interp, Tcl_NewStringObj(buffer, -1)); - } else { - ckfree((char *)gridFilePtr); - Tcl_ResetResult(interp); - } - - return TCL_OK; -} - -/* cmd gridfile::read NsfMongoGridFileRead { - {-argName "gridfile" -required 1 -type gridfile} + {-argName "gridfile" -required 1 -type mongoc_gridfs_file_t} {-argName "size" -required 1 -type int} } */ static int -NsfMongoGridFileRead(Tcl_Interp *interp, gridfile *gridFilePtr, int size) { +NsfMongoGridFileRead(Tcl_Interp *interp, mongoc_gridfs_file_t *gridFilePtr, int size) { int readSize; - char *buffer; + Tcl_Obj *resultObj = Tcl_NewByteArrayObj(NULL, size); + struct iovec iov = { NULL, size }; - buffer = ckalloc(size); - readSize = gridfile_read_buffer(gridFilePtr, buffer, size); - Tcl_SetObjResult(interp, Tcl_NewStringObj(buffer, readSize)); - ckfree(buffer); + iov.iov_base = Tcl_SetByteArrayLength(resultObj, size); + readSize = mongoc_gridfs_file_readv(gridFilePtr, &iov, 1, + 0 /* min_bytes */, + 0 /* timeout_msec */); + /*fprintf(stderr, "NsfMongoGridFileRead want %d got %d\n", size, readSize);*/ + Tcl_SetByteArrayLength(resultObj, readSize); + Tcl_SetObjResult(interp, resultObj); + return TCL_OK; } /* cmd "gridfile::seek" NsfMongoGridFileSeek { - {-argName "file" -required 1 -type gridfile} + {-argName "gridfile" -required 1 -type mongoc_gridfs_file_t} {-argName "offset" -required 1 -type int32} } */ static int -NsfMongoGridFileSeek(Tcl_Interp *interp, gridfile *gridFilePtr, int offset) { - int pos; +NsfMongoGridFileSeek(Tcl_Interp *interp, mongoc_gridfs_file_t *gridFilePtr, int offset) { + int result; - pos = gridfile_seek(gridFilePtr, offset); - Tcl_SetObjResult(interp, Tcl_NewIntObj(pos)); + /* TODO: whence SEEK_SET, SEEK_CUR or SEEK_END; implementation of SEEK_END looks incorrect */ + result = mongoc_gridfs_file_seek(gridFilePtr, offset, SEEK_SET); - return TCL_OK; + return result < 0 ? TCL_ERROR : TCL_OK; } /*********************************************************************** @@ -1271,11 +1450,16 @@ /* * register the pointer converter */ - Nsf_PointerTypeRegister(interp, "gridfs", &gridfsCount); - Nsf_PointerTypeRegister(interp, "gridfile", &gridfileCount); - Nsf_PointerTypeRegister(interp, "mongo", &mongoCount); - Nsf_PointerTypeRegister(interp, "mongo_cursor", &cursorCount); + Nsf_PointerTypeRegister(interp, "mongoc_client_t", &mongoClientCount); + Nsf_PointerTypeRegister(interp, "mongoc_collection_t", &mongoCollectionCount); + Nsf_PointerTypeRegister(interp, "mongoc_cursor_t", &mongoCursorCount); + Nsf_PointerTypeRegister(interp, "mongoc_gridfs_file_t", &gridfileCount); + Nsf_PointerTypeRegister(interp, "mongoc_gridfs_t", &gridfsCount); + for (i=0; i < nr_elements(method_command_namespace_names); i++) { + Tcl_CreateNamespace(interp, method_command_namespace_names[i], 0, (Tcl_NamespaceDeleteProc *)NULL); + } + /* create all method commands (will use the namespaces above) */ for (i=0; i < nr_elements(method_definitions)-1; i++) { Tcl_CreateObjCommand(interp, method_definitions[i].methodName, method_definitions[i].proc, 0, 0); @@ -1291,3 +1475,13 @@ { return Nsfmongo_Init(interp); } + +/* + * Local Variables: + * mode: c + * c-basic-offset: 2 + * fill-column: 78 + * indent-tabs-mode: nil + * End: + */ + Index: library/mongodb/nx-mongo.tcl =================================================================== diff -u -r4940f1317b9827162d7a0d28c74da0758ffe2d29 -rcef3de5c4f65e767d0c66389bacc77bc3c2e5a68 --- library/mongodb/nx-mongo.tcl (.../nx-mongo.tcl) (revision 4940f1317b9827162d7a0d28c74da0758ffe2d29) +++ library/mongodb/nx-mongo.tcl (.../nx-mongo.tcl) (revision cef3de5c4f65e767d0c66389bacc77bc3c2e5a68) @@ -9,7 +9,7 @@ # todo: how to handle multiple connections; currently we have a single, global connection # todo: all references are currently auto-fetched. make this optional -# todo: If "emebds" or "references" are used, the object must be of +# todo: If "embeds" or "references" are used, the object must be of # the specified classes, no subclasses allowed # todo: extend the query language syntax, e.g. regexp, ... # todo: handle remove for non-multivalued embedded objects @@ -25,6 +25,7 @@ ::nx::Object create ::nx::mongo::db { :object property db :object property mongoConn + :object property gridFsName :public object method connect {{-db test} args} { if {[info exists :db]} { @@ -37,8 +38,17 @@ set :db $db set :mongoConn [::mongo::connect {*}$args] } - :public object method close {} { - ::mongo::close ${:mongoConn} + + :public object method close {} { + if {[info exists :gridFs]} { + ::nsf::log notice "nx::mongo: auto close gridfs" + :gridfs close + } + foreach {ns coll} [array get :collection] { + ::nsf::log notice "nx::mongo: auto close collection $ns $coll" + ::mongo::collection::close $coll + } + ::mongo::close ${:mongoConn} unset :db :mongoConn } @@ -48,17 +58,124 @@ ::mongo::close ${:mongoConn} } } + :public object method collection {ns} { + set key :collection($ns) + if {[info exists $key]} {return [set $key]} + if {[regexp {^([^.]+)[.](.+)$} $ns _ db coll]} { + return [set $key [mongo::collection::open ${:mongoConn} $db $coll]] + } + error "invalid mongo namespace '$ns'" + } - :public object method count {args} {::mongo::count ${:mongoConn} {*}$args} - :public object method index {args} {::mongo::index ${:mongoConn} {*}$args} - :public object method insert {args} {::mongo::insert ${:mongoConn} {*}$args} - :public object method remove {args} {::mongo::remove ${:mongoConn} {*}$args} - :public object method query {args} {::mongo::query ${:mongoConn} {*}$args} - :public object method update {args} {::mongo::update ${:mongoConn} {*}$args} - :public object method "drop collection" {name} {::mongo::run -nocomplain ${:mongoConn} ${:db} [list drop string $name]} - :public object method "drop database" {} {::mongo::run -nocomplain ${:mongoConn} ${:db} [list dropDatabase integer 1]} - :public object method "reset error" {} {::mongo::run -nocomplain ${:mongoConn} ${:db} [list reseterror integer 1]} + :public object method count {ns args} {::mongo::collection::count [:collection $ns] {*}$args} + :public object method index {ns args} {::mongo::collection::index [:collection $ns] {*}$args} + :public object method insert {ns args} {::mongo::collection::insert [:collection $ns] {*}$args} + :public object method delete {ns args} {::mongo::collection::delete [:collection $ns] {*}$args} + :public object method query {ns args} {::mongo::collection::query [:collection $ns] {*}$args} + :public object method update {ns args} {::mongo::collection::update [:collection $ns] {*}$args} + :public object method "drop collection" {name} { + ::mongo::run -nocomplain ${:mongoConn} ${:db} [list drop string $name] + } + :public object method "drop database" {} { + ::mongo::run -nocomplain ${:mongoConn} ${:db} [list dropDatabase integer 1] + } + :public object method "reset error" {} { + ::mongo::run -nocomplain ${:mongoConn} ${:db} [list reseterror integer 1] + } :public object method is_oid {string} {expr {[string length $string] == 24}} + + # + # GridFS + # + :object property gridFs + + :public object method "gridfs open" {{name fs}} { + if {[info exists :gridFsName]} { + if {${:gridFsName} eq $name} {return ${:gridFs}} + :gridfs close + } + set :gridFsName $name + set :gridFs [::mongo::gridfs::open ${:mongoConn} ${:db} $name] + } + + :public object method "gridfs close" {} { + ::mongo::gridfs::close ${:gridFs} + unset :gridFs :gridFsName + } + + :public object method "gridfs store_file" {local remote {mime text/plain}} { + ::mongo::gridfs::store_file ${:gridFs} $local $remote $mime + } + :public object method "gridfs store_string" {string remote {mime text/plain}} { + ::mongo::gridfs::store_string ${:gridFs} $string $remote $mime + } + + :public object method "gridfs list" {name} { + if {[string first * $name] == -1} { + set info [::mongo::query ${:mongoConn} ${:db}.${:gridFsName}.files \ + [list \$query document [list filename string $name]] \ + -limit 1] + return [lindex $info 0] + } else { + ns_log notice "::mongo::query ${:mongoConn} ${:db}.${:gridFsName}.files" + set info [::mongo::query ${:mongoConn} ${:db}.${:gridFsName}.files {}] + return $info + } + } + + :public object method "gridfs update" {id bson} { + ::mongo::update ${:mongoConn} ${:db}.${:gridFsName}.files \ + [list _id oid $id] $bson + } + + :public object method "file content" {name} { + set f [mongo::gridfile::open ${:gridFs} $name] + set content "" + while {1} { + append content [set chunk [mongo::gridfile::read $f 4096]] + if {[string length $chunk] < 4096} { + break + } + } + mongo::gridfile::close $f + return $content + } + + :public object method "gridfs set attribute" {file attribute value} { + set info [::nx::mongo::db gridfs list $file] + if {$info eq ""} {error "no such file <$file> stored in gridfs"} + foreach {att type v} $info { dict set d $att $v } + if {[dict exists $d $attribute] && [dict get $d $attribute] eq $value} { + # right value, nothing to do + return + } elseif {[dict exists $d $attribute]} { + # wrong value replace it + set bson {} + foreach {att type v} $info { + if {$att eq $attribute} { + lappend bson $att $type $value + } else { + lappend bson $att $type $v + } + } + } else { + #no such value, add it + lappend bson {*}$info $attribute string $value + } + nx::mongo::db gridfs update [dict get $d _id] $bson + } + + :public object method "gridfs map" {file url} { + ::nx::mongo::db gridfs set attribute $file url $url + } + :public object method "gridfs mapped" {url} { + set info [::mongo::query ${:mongoConn} ${:db}.${:gridFsName}.files \ + [list \$query document [list url string $url]] \ + -limit 1] + return [lindex $info 0] + } + + } ####################################################################### @@ -120,8 +237,8 @@ set result [list] foreach {pos type v} $value {lappend result [:bson decode $type $v]} return $result - } elseif {$bsontype eq "object"} { - #puts stderr "*** we have an object '$value', [:serialize]" + } elseif {$bsontype eq "document"} { + #puts stderr "*** we have an document '$value', [:serialize]" if {${:type} eq "embedded" && [info exists :arg]} { #puts stderr "*** we have an embed class = ${:arg}" set value [${:arg} bson create $value] @@ -132,7 +249,7 @@ set value [:bson deref ${:arg} $value] #puts stderr "*** bson deref ${:arg} ==> $value" } else { - error "don't know how to decode object with value '$value'; [:serialize]" + error "don't know how to decode document with value '$value'; [:serialize]" } } return $value @@ -161,15 +278,16 @@ :method "bson encodeValue" {value} { if {${:mongotype} eq "embedded_object"} { - return [list object [$value bson encode]] + #puts "embedded_object <$value>" + return [list document [$value bson encode]] } elseif {${:mongotype} eq "referenced_object"} { if {![::nsf::var::exists $value _id]} { :log "autosave $value to obtain an object_id" $value save } set _id [$value cget -_id] set cls [$value info class] - return [list object [list \ + return [list document [list \ {$ref} string [$cls cget -mongo_collection] \ {$id} oid $_id \ {$db} string [$cls cget -mongo_db]]] @@ -296,10 +414,10 @@ switch $op { "=" {lappend bson $att [$slot cget -mongotype] $value} ">" - "<" - "<=" - ">=" - "!=" { - lappend bson $att object [list [:get relop $op] [$slot cget -mongotype] $value] + lappend bson $att document [list [:get relop $op] [$slot cget -mongotype] $value] } "in" - "all" { - lappend bson $att object [list [:get relop $op] {*}[$slot bson encode -array $value]] + lappend bson $att document [list [:get relop $op] {*}[$slot bson encode -array $value]] } default {error "unknown operator $op"} } @@ -311,15 +429,15 @@ :method "bson query" {{-cond ""} {-orderby ""}} { #puts "bson query -cond <$cond> -orderby <$orderby>" set bson [:bson cond $cond] - set result [list \$query object $bson] + set result [list \$query document $bson] if {[llength $orderby] > 0} { set bson [list] foreach attspec $orderby { lassign $attspec att direction lappend bson $att int [expr {$direction eq "desc" ? -1 : 1}] } - lappend result \$orderby object $bson + lappend result \$orderby document $bson } #puts "bson query -cond <$cond> -orderby <$orderby> => $result" return $result @@ -381,9 +499,9 @@ set result [list] foreach {name type value} $list { switch $type { - object { lappend result "\{ [:bson pp -indent $indent $value] \}" } - array { lappend result "\[ [:bson pp_array -indent $indent $value] \]" } - default { lappend result [list $value]} + document { lappend result "\{ [:bson pp -indent $indent $value] \}" } + array { lappend result "\[ [:bson pp_array -indent $indent $value] \]" } + default { lappend result [list $value]} } } return [join $result ", "] @@ -395,9 +513,9 @@ foreach {name type value} $list { set prefix "\n[string repeat { } $indent]$name: " switch $type { - object { lappend result "$prefix\{ [:bson pp -indent $nextIndent $value] \}" } - array { lappend result "$prefix\[ [:bson pp_array -indent $nextIndent $value] \]" } - default { lappend result $prefix[list $value]} + document { lappend result "$prefix\{ [:bson pp -indent $nextIndent $value] \}" } + array { lappend result "$prefix\[ [:bson pp_array -indent $nextIndent $value] \]" } + default { lappend result $prefix[list $value]} } } return [join $result ", "] @@ -448,26 +566,26 @@ :public method pretty_variables {} { set vars {} foreach p [lmap handle [lsort [:info variables]] {::nx::Object info variable parameter $handle}] { - if {[regexp {^([^:]+):(.*)$} $p _ name options]} { - set resultOptions {} - set opts [split $options ,] - if {[lindex $opts 0] eq "embedded"} { - set resultOpts {} - foreach opt $opts { - switch -glob $opt { - slot=* {continue} - arg=* {lappend resultOpts type=[string range $opt 4 end]} - default {lappend resultOpts $opt} - } - } - lappend vars $name:[join $resultOpts ,] - continue - } - } - lappend vars $p + if {[regexp {^([^:]+):(.*)$} $p _ name options]} { + set resultOptions {} + set opts [split $options ,] + if {[lindex $opts 0] eq "embedded"} { + set resultOpts {} + foreach opt $opts { + switch -glob $opt { + slot=* {continue} + arg=* {lappend resultOpts type=[string range $opt 4 end]} + default {lappend resultOpts $opt} + } + } + lappend vars $name:[join $resultOpts ,] + continue + } + } + lappend vars $p } return $vars - } + } # # index method @@ -513,7 +631,7 @@ -atts [:bson atts $atts] \ -limit 1] 0] if {$tuple eq ""} { - return "" + return "" } if {$instance ne ""} {set instance [:uplevel [list ::nsf::object::qualify $instance]]} return [:bson create -name $instance $tuple] @@ -548,7 +666,7 @@ {-limit} {-skip} {-puts:boolean 1} - } { + } { set opts [list] if {[info exists limit]} {lappend opts -limit $limit} if {[info exists skip]} {lappend opts -skip $skip} @@ -626,7 +744,7 @@ set body {} set c 0 foreach {k v} [$obj eval [list array get :$name]] { - lappend body [incr c] object [list k string $k v string $v] + lappend body [incr c] document [list k string $k v string $v] } return [list array $body] } @@ -646,7 +764,7 @@ dict for {k v} [$obj eval [list set :$name]] { lappend body $k string $v } - return [list object $body] + return [list document $body] } :public method "bson rep decode dict" {slot name bsontype value} { set result "" @@ -750,7 +868,7 @@ #puts "delete a non-embedded entry" if {[info exists :_id]} { set mongo_ns [[:info class] cget -mongo_ns] - ::nx::mongo::db remove $mongo_ns [list _id oid ${:_id}] + ::nx::mongo::db delete $mongo_ns [list _id oid ${:_id}] } else { error "[self]: object does not contain an _id; it can't be delete from the mongo db." } Index: library/mongodb/tests/nsf-gridfs.test =================================================================== diff -u -rdddc2aa68fb12f66f1b01089b66b607e719b4eb7 -rcef3de5c4f65e767d0c66389bacc77bc3c2e5a68 --- library/mongodb/tests/nsf-gridfs.test (.../nsf-gridfs.test) (revision dddc2aa68fb12f66f1b01089b66b607e719b4eb7) +++ library/mongodb/tests/nsf-gridfs.test (.../nsf-gridfs.test) (revision cef3de5c4f65e767d0c66389bacc77bc3c2e5a68) @@ -25,22 +25,22 @@ # # First, as usual, open the connection to the mongo db # -? {set mongoConn [::mongo::connect]} mongo:0 +? {set mongoConn [::mongo::connect]} mongoc_client_t:0 # # Open a GridFS in the mongo datbase "myfs" and use the usual prefix # "fs", such GridFS names the collections "fs.chunks" and "fs.files". # -? {set gridFS [::mongo::gridfs::open $mongoConn myfs fs]} gridfs:0 +? {set gridFS [::mongo::gridfs::open $mongoConn myfs fs]} mongoc_gridfs_t:0 - +set dir [file dirname [file dirname [info script]]] set fn README # gridfs::remove_file removes all files with the specified name # multiple store operations create "revisions" with different uploadDates -::mongo::gridfs::remove_file $gridFS $fn +::mongo::gridfile::delete $gridFS [list filename string $fn] -# make sure, nothing else is stored there. -::mongo::remove $mongoConn myfs.fs.files {} +# get the fs.files collection +set mongoColl [mongo::collection::open $mongoConn myfs fs.files] # # The current version of gridfs_store_file() is quite unfriendly, @@ -49,43 +49,52 @@ # # Store a known file: # -? {::mongo::gridfs::store_file $gridFS $fn $fn text/plain} 1 +? {::mongo::gridfile::create -source file $gridFS $dir/$fn $fn text/plain} 1 # -# Open a grid file, get some of its properties, and read it in chunks -# of 500 bytes, and close it finally. -# -? {set f [mongo::gridfile::open $gridFS README]} gridfile:0 +# Open grid file, get some of its properties, and read it in chunks +# of 1000 bytes, and close it finally. + +? {set f [mongo::gridfile::open $gridFS {filename string README}]} mongoc_gridfs_file_t:0 ? {mongo::gridfile::get_metadata $f} "" -? {mongo::gridfile::get_contentlength $f} [file size README] +? {mongo::gridfile::get_contentlength $f} [file size $dir/README] ? {mongo::gridfile::get_contenttype $f} text/plain + ? { set chunks 0 while {1} { - set chunk [mongo::gridfile::read $f 500] - if {[string length $chunk] < 500} { + set chunk [mongo::gridfile::read $f 1000] + puts "... read chunk length [string length $chunk]" + if {[string length $chunk] > 0} { incr chunks } + if {[string length $chunk] < 1000} { break } - incr chunks } set chunks -} 11 +} 4 ? {mongo::gridfile::close $f} "" # # Access the files stored in the gridfs via plain query interface. # (should be just one) puts "\nAll Files:" -? {llength [::mongo::query $mongoConn myfs.fs.files {}]} 1 +? {llength [::mongo::collection::query $mongoColl {}]} 1 +# store one more copy +? {::mongo::gridfile::create -source file $gridFS $dir/$fn $fn text/plain} 1 + +# we should have now two entries: +? {llength [::mongo::collection::query $mongoColl {}]} 2 +puts [join [::mongo::collection::query $mongoColl {}] \n] + # # Get the file named README from the gridfs via plain query interface # -? {set files [::mongo::query $mongoConn myfs.fs.files \ - [list \$query object {filename string README}] \ +? {set files [::mongo::collection::query $mongoColl \ + [list \$query document {filename string README}] \ -limit 1] llength [lindex $files 0] -} 24 +} 18 # # Extract the oid from the bson attributes @@ -101,19 +110,27 @@ # Add a dc:creator to the bson attributes # and update the entry in the gridfs # -? {::mongo::update $mongoConn myfs.fs.files [list _id oid $oid] \ - [concat [lindex $files 0] [list metadata object {dc:creator string "Gustaf Neumann"}]] -} 1 +? {::mongo::collection::update $mongoColl [list _id oid $oid] \ + [concat [lindex $files 0] [list metadata document {dc:creator string "Gustaf Neumann"}]] +} "" # # Now we can use the gridfs interface to obtain the additional # metadata as well # -set f [mongo::gridfile::open $gridFS README] +set f [mongo::gridfile::open $gridFS [list _id oid $oid]] ? {mongo::gridfile::get_metadata $f} "dc:creator string {Gustaf Neumann}" mongo::gridfile::close $f # # close everything # ::mongo::gridfs::close $gridFS +::mongo::collection::close $mongoColl ::mongo::close $mongoConn + +# +# Local variables: +# mode: tcl +# tcl-indent-level: 2 +# indent-tabs-mode: nil +# End: Index: library/mongodb/tests/nsf-mongo.test =================================================================== diff -u -r05b2776a0ecbc0453ae96bbfa9d94315e466f3f5 -rcef3de5c4f65e767d0c66389bacc77bc3c2e5a68 --- library/mongodb/tests/nsf-mongo.test (.../nsf-mongo.test) (revision 05b2776a0ecbc0453ae96bbfa9d94315e466f3f5) +++ library/mongodb/tests/nsf-mongo.test (.../nsf-mongo.test) (revision cef3de5c4f65e767d0c66389bacc77bc3c2e5a68) @@ -17,75 +17,101 @@ # > db.persons.find(); # -#set mongoConn [::mongo::connect -server 127.0.0.1:27017] -set mongoConn [::mongo::connect] +#set mongoConn [::mongo::connect -uri mongodb://127.0.0.1:27017/] +? {set mongoConn [::mongo::connect]} "mongoc_client_t:0" puts "Connection: $mongoConn" + + if {1} { - #::mongo::remove $mongoConn tutorial.persons {} + + #::mongo::collection::delete $mongoConn tutorial.persons {} # Drop old potenially old collection and # recreate it as a capped collection ::mongo::run -nocomplain $mongoConn tutorial {drop string persons} puts "\nCreate a capped collection:" ? {::mongo::run $mongoConn tutorial { create string persons capped bool 1 - size int 100000 + size int32 100000 }} 1 + ? {set mongoColl [::mongo::collection::open $mongoConn tutorial persons]} "mongoc_collection_t:0" + puts "Collection: $mongoColl" + + ? {mongo::collection::count $mongoColl {}} 0 + + puts "\nInserting a few tuples" + ? {llength [::mongo::collection::insert $mongoColl \ + [list name string Gustaf projects string nsf age int32 53]]} "12" + + ? {mongo::collection::count $mongoColl {}} 1 + + ::mongo::collection::insert $mongoColl \ + [list name string Stefan projects string nsf] + ::mongo::collection::insert $mongoColl \ + [list name string Victor a array {0 string "x" 1 string "y"} age int 31] + ? { - set r [::mongo::insert $mongoConn tutorial.persons [list name string Joe projects string abc age int 23 \ - classes array {0 object {$ref string courses $id oid 1}}]] + set r [::mongo::collection::insert $mongoColl \ + [list name string Joe \ + projects string abc \ + age int32 23 \ + classes array {0 document {$ref string courses $id oid 1}}]] string match "_id oid *" $r } 1 - ::mongo::insert $mongoConn tutorial.persons [list name string Gustaf projects string nsf age int 53] - ::mongo::insert $mongoConn tutorial.persons [list name string Stefan projects string nsf] - ::mongo::insert $mongoConn tutorial.persons [list name string Franz info object {x int 203 y int 102} age int 29 projects string gtat] - ::mongo::insert $mongoConn tutorial.persons [list name string Victor a array {0 string "x" 1 string "y"} age int 31] - ::mongo::insert $mongoConn tutorial.persons [list name string Selim ts timestamp {1302945037 1} d date 1302947619279] + ::mongo::collection::insert $mongoColl \ + [list name string Franz info document {x int 203 y int 102} age int 29 projects string gtat] + ::mongo::collection::insert $mongoColl \ + [list name string Selim ts timestamp {1302945037 1} d date 1302947619279] + ? {mongo::collection::count $mongoColl {}} 6 + puts stderr "\nCreate an index on name (ascending)" - ? {::mongo::index $mongoConn tutorial.persons [list name int 1]} 1 + ? {::mongo::collection::index $mongoColl [list name int 1]} 1 } puts stderr "\nFull content" -? {llength [::mongo::query $mongoConn tutorial.persons {}]} 6 +? {llength [::mongo::collection::query $mongoColl {}]} 6 puts stderr "\nProject members" ? { - llength [::mongo::query $mongoConn tutorial.persons \ - [list \$query object {projects string nsf} \$orderby object {name int 1}]] + llength [::mongo::collection::query $mongoColl \ + [list \$query document {projects string nsf} \$orderby document {name int 1}]] } 2 puts stderr "\nProject members of nsf sorted by name" ? { - set r [lindex [::mongo::query $mongoConn tutorial.persons \ - [list \$query object {projects string nsf} \$orderby object {name int 1}]] 0] + set r [lindex [::mongo::collection::query $mongoColl \ + [list \$query document {projects string nsf} \$orderby document {name int 1}]] 0] string match *Gustaf* $r } 1 puts stderr "\nAge > 30 (all atts)" ? { - set r [::mongo::query $mongoConn tutorial.persons [list \$query object {age object {$gt int 30}}]] + set r [::mongo::collection::query $mongoColl \ + [list \$query document {age document {$gt int 30}}]] set _ [llength $r]-[llength [lindex $r 0]] } 2-12 - + puts stderr "\nAge > 30 (only atts name and age, aside of _id)" ? { - set r [::mongo::query $mongoConn tutorial.persons [list \$query object {age object {$gt int 30}}] \ + set r [::mongo::collection::query $mongoColl \ + [list \$query document {age document {$gt int 30}}] \ -atts {name int 1 age int 1}] set _ [llength $r]-[llength [lindex $r 0]] } 2-9 puts stderr "\nCount Age > 30" -? {::mongo::count $mongoConn tutorial.persons {age object {$gt int 30}}} 2 +? {::mongo::collection::count $mongoColl {age document {$gt int 30}}} 2 puts stderr "\nAge > 30 (all atts, via cursor interface)" ? { - set cursor [::mongo::cursor::find $mongoConn tutorial.persons [list \$query object {age object {$gt int 30}}]] + set cursor [::mongo::cursor::find $mongoColl \ + [list \$query document {age document {$gt int 30}}]] puts "Cursor: $cursor" set r0 [::mongo::cursor::next $cursor] set r1 [::mongo::cursor::next $cursor] @@ -96,7 +122,8 @@ puts stderr "\nAge > 30 (all atts, via cursor interface, tailable)" ? { - set cursor [::mongo::cursor::find $mongoConn tutorial.persons [list \$query object {age object {$gt int 30}}] -tailable] + set cursor [::mongo::cursor::find $mongoColl \ + [list \$query document {age document {$gt int 30}}] -tailable] if {$cursor ne ""} { set r "" while {1} { @@ -110,7 +137,8 @@ puts stderr "\nEmpty result (via cursor interface)" ? { - set cursor [::mongo::cursor::find $mongoConn tutorial.persons [list \$query object {age object {$gt int 300}}]] + set cursor [::mongo::cursor::find $mongoColl \ + [list \$query document {age document {$gt int 300}}]] if {$cursor ne ""} { set r {} while {1} { @@ -123,13 +151,26 @@ } 0 puts stderr "\nArray 'a' contains 'x'" -? {llength [::mongo::query $mongoConn tutorial.persons [list \$query object {a string "x"}]]} 1 +? {llength [::mongo::collection::query $mongoColl \ + [list \$query document {a string "x"}]]} 1 -puts stderr "\nEmbedded object has some value (info.y > 100)" -? {llength [::mongo::query $mongoConn tutorial.persons [list \$query object {info.y object {$gt int 100}}]]} 1 +puts stderr "\nEmbedded document has some value (info.y > 100)" +? {llength [::mongo::collection::query $mongoColl \ + [list \$query document {info.y document {$gt int 100}}]]} 1 puts stderr "\nProjects in {nsf gtat}" -? { llength [::mongo::query $mongoConn tutorial.persons \ - [list \$query object {projects object {$in array {0 string nsf 1 string gtat}}}]]} 3 +? { llength [::mongo::collection::query $mongoColl \ + [list \$query document {projects document {$in array {0 string nsf 1 string gtat}}}]]} 3 +puts stderr "\nClose collecton $mongoColl" +::mongo::collection::close $mongoColl + +puts stderr "\nClose connection $mongoConn" ::mongo::close $mongoConn + +# +# Local variables: +# mode: tcl +# tcl-indent-level: 2 +# indent-tabs-mode: nil +# End: Index: library/mongodb/tests/nx-bi.test =================================================================== diff -u -r5c3834b15078b31970db26d0c65030ed1f66b18d -rcef3de5c4f65e767d0c66389bacc77bc3c2e5a68 --- library/mongodb/tests/nx-bi.test (.../nx-bi.test) (revision 5c3834b15078b31970db26d0c65030ed1f66b18d) +++ library/mongodb/tests/nx-bi.test (.../nx-bi.test) (revision cef3de5c4f65e767d0c66389bacc77bc3c2e5a68) @@ -141,7 +141,8 @@ Posting show -puts stderr ====EXIT +nx::mongo::db close + ###################################################################### # Output ###################################################################### @@ -179,4 +180,12 @@ # author: {John S.}, # ts: {05-Nov-09 10:33}, # tags: [ nsf, nx, finance, economy ] -# } \ No newline at end of file +# } +###################################################################### + +# +# Local variables: +# mode: tcl +# tcl-indent-level: 2 +# indent-tabs-mode: nil +# End: Index: library/mongodb/tests/nx-mongo.test =================================================================== diff -u -r5c3834b15078b31970db26d0c65030ed1f66b18d -rcef3de5c4f65e767d0c66389bacc77bc3c2e5a68 --- library/mongodb/tests/nx-mongo.test (.../nx-mongo.test) (revision 5c3834b15078b31970db26d0c65030ed1f66b18d) +++ library/mongodb/tests/nx-mongo.test (.../nx-mongo.test) (revision cef3de5c4f65e767d0c66389bacc77bc3c2e5a68) @@ -11,12 +11,20 @@ #nsf::configure debug 2 # Establish connection to the database -? {::nx::mongo::db connect -db "tutorial"} mongo:0 +? {::nx::mongo::db connect -db "tutorial"} mongoc_client_t:0 # Make sure, we start always from scratch -nx::mongo::db remove tutorial.persons {} -if {[::mongo::count [::nx::mongo::db cget -mongoConn] tutorial.persons {}] > 0} { +nx::mongo::db delete tutorial.persons {} + +# +# Create or lookup collection handle; the first operation is a create, +# the second a lookup. +# +? {::nx::mongo::db collection tutorial.persons} "mongoc_collection_t:0" +? {::nx::mongo::db collection tutorial.persons} "mongoc_collection_t:0" + +if {[::mongo::collection::count [::nx::mongo::db collection tutorial.persons] {}] > 0} { # when we create a capped colletion, we have to use "drop collection" to get rid of it. nx::mongo::db drop collection persons } @@ -122,4 +130,13 @@ ? {llength [set persons [Person find oldies]]} 1 ? {lsort [lmap p $persons {$p cget -name}]} "Gustaf" +# check autoclosing +nx::mongo::db close +# +# Local variables: +# mode: tcl +# tcl-indent-level: 2 +# indent-tabs-mode: nil +# End: + Index: library/mongodb/tests/nx-reference-many.test =================================================================== diff -u -r5c3834b15078b31970db26d0c65030ed1f66b18d -rcef3de5c4f65e767d0c66389bacc77bc3c2e5a68 --- library/mongodb/tests/nx-reference-many.test (.../nx-reference-many.test) (revision 5c3834b15078b31970db26d0c65030ed1f66b18d) +++ library/mongodb/tests/nx-reference-many.test (.../nx-reference-many.test) (revision cef3de5c4f65e767d0c66389bacc77bc3c2e5a68) @@ -10,7 +10,7 @@ package require nx::test # Establish connection to the database -? {::nx::mongo::db connect -db "tutorial"} mongo:0 +? {::nx::mongo::db connect -db "tutorial"} mongoc_client_t:0 # Make sure, we start always from scratch nx::mongo::db drop collection groups @@ -160,5 +160,13 @@ # _id: 51fa2ea113760b0000000006, # name: stefan2 # } +###################################################################### +# +# Local variables: +# mode: tcl +# tcl-indent-level: 2 +# indent-tabs-mode: nil +# End: + Index: library/mongodb/tests/nx-reference-one.test =================================================================== diff -u -r5c3834b15078b31970db26d0c65030ed1f66b18d -rcef3de5c4f65e767d0c66389bacc77bc3c2e5a68 --- library/mongodb/tests/nx-reference-one.test (.../nx-reference-one.test) (revision 5c3834b15078b31970db26d0c65030ed1f66b18d) +++ library/mongodb/tests/nx-reference-one.test (.../nx-reference-one.test) (revision cef3de5c4f65e767d0c66389bacc77bc3c2e5a68) @@ -11,7 +11,7 @@ package require nx::test # Establish connection to the database -? {::nx::mongo::db connect -db "tutorial"} mongo:0 +? {::nx::mongo::db connect -db "tutorial"} mongoc_client_t:0 # Make sure, we start always from scratch nx::mongo::db drop collection users @@ -144,3 +144,11 @@ # name: SmithR # } # + +###################################################################### +# +# Local variables: +# mode: tcl +# tcl-indent-level: 2 +# indent-tabs-mode: nil +# End: Index: library/mongodb/tests/nx-rep.test =================================================================== diff -u -r4940f1317b9827162d7a0d28c74da0758ffe2d29 -rcef3de5c4f65e767d0c66389bacc77bc3c2e5a68 --- library/mongodb/tests/nx-rep.test (.../nx-rep.test) (revision 4940f1317b9827162d7a0d28c74da0758ffe2d29) +++ library/mongodb/tests/nx-rep.test (.../nx-rep.test) (revision cef3de5c4f65e767d0c66389bacc77bc3c2e5a68) @@ -53,7 +53,7 @@ set body {} set c 0 foreach {k v} [$obj eval [list array get :$name]] { - lappend body [incr c] object [list k string $k v string $v] + lappend body [incr c] document [list k string $k v string $v] } return [list array $body] } @@ -73,7 +73,7 @@ dict for {k v} [$obj eval [list set :$name]] { lappend body $k string $v } - return [list object $body] + return [list document $body] } :public method "bson rep decode dict" {slot name bsontype value} { set result "" @@ -133,7 +133,7 @@ Foo show -puts stderr ====EXIT +puts stderr "====EXIT [info script]" ###################################################################### # Output ###################################################################### @@ -151,3 +151,10 @@ # title: {Hello World}, # tags: [ a ] # } +###################################################################### +# +# Local variables: +# mode: tcl +# tcl-indent-level: 2 +# indent-tabs-mode: nil +# End: Index: library/mongodb/tests/nx-serialize.test =================================================================== diff -u -r4940f1317b9827162d7a0d28c74da0758ffe2d29 -rcef3de5c4f65e767d0c66389bacc77bc3c2e5a68 --- library/mongodb/tests/nx-serialize.test (.../nx-serialize.test) (revision 4940f1317b9827162d7a0d28c74da0758ffe2d29) +++ library/mongodb/tests/nx-serialize.test (.../nx-serialize.test) (revision cef3de5c4f65e767d0c66389bacc77bc3c2e5a68) @@ -172,5 +172,10 @@ ? {Foo countArrays} 2 -exit - +###################################################################### +# +# Local variables: +# mode: tcl +# tcl-indent-level: 2 +# indent-tabs-mode: nil +# End: