summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJörg Frings-Fürst <debian@jff-webhosting.net>2014-12-02 10:06:21 +0100
committerJörg Frings-Fürst <debian@jff-webhosting.net>2014-12-02 10:06:21 +0100
commitfd841e416881cc0392e61ec312c1870f3a0004bd (patch)
tree8357ba56e79d614ba57f722e7878b853591dc339
Initial import of libmongo-client version 0.1.8-2
-rw-r--r--.gitignore37
-rw-r--r--.travis.yml17
-rw-r--r--Doxyfile.in151
-rw-r--r--LICENSE202
-rw-r--r--Makefile.am11
-rw-r--r--NEWS341
-rw-r--r--README.md69
-rw-r--r--configure.ac147
-rw-r--r--debian/README.source19
-rw-r--r--debian/changelog161
-rw-r--r--debian/compat1
-rw-r--r--debian/control81
-rw-r--r--debian/copyright52
-rw-r--r--debian/libmongo-client-dev.install4
-rw-r--r--debian/libmongo-client-doc.doc-base11
-rw-r--r--debian/libmongo-client-doc.install4
-rw-r--r--debian/libmongo-client-doc.links1
-rw-r--r--debian/libmongo-client-doc.lintian-overrides2
-rw-r--r--debian/libmongo-client0.docs1
-rw-r--r--debian/libmongo-client0.install1
-rw-r--r--debian/libmongo-client0.symbols9
-rwxr-xr-xdebian/rules49
-rw-r--r--debian/source/format1
-rw-r--r--debian/source/options1
-rw-r--r--debian/watch11
-rw-r--r--docs/Makefile.am2
-rw-r--r--docs/tutorial/Makefile.am13
-rw-r--r--docs/tutorial/examples/GNUmakefile36
-rw-r--r--docs/tutorial/examples/tut_bson_build.c81
-rw-r--r--docs/tutorial/examples/tut_bson_build.json16
-rw-r--r--docs/tutorial/examples/tut_bson_traverse.c123
-rw-r--r--docs/tutorial/examples/tut_hl_client.c107
-rw-r--r--docs/tutorial/examples/tut_json2bson.c132
-rw-r--r--docs/tutorial/examples/tut_mongo_sync.c273
-rw-r--r--docs/tutorial/examples/tut_mongo_sync_cmd_create.c82
-rw-r--r--docs/tutorial/examples/tut_mongo_sync_cmd_custom.c81
-rw-r--r--docs/tutorial/examples/tut_mongo_sync_cmd_index_create.c54
-rw-r--r--docs/tutorial/tut_bson.h10
-rw-r--r--docs/tutorial/tut_bson_build.h62
-rw-r--r--docs/tutorial/tut_bson_traverse.h135
-rw-r--r--docs/tutorial/tut_hl_client.h86
-rw-r--r--docs/tutorial/tut_json2bson.h97
-rw-r--r--docs/tutorial/tut_mongo_sync.h16
-rw-r--r--docs/tutorial/tut_mongo_sync_cmd_create.h53
-rw-r--r--docs/tutorial/tut_mongo_sync_cmd_custom.h64
-rw-r--r--docs/tutorial/tut_mongo_sync_cmd_index_create.h66
-rw-r--r--docs/tutorial/tut_mongo_sync_connect.h49
-rw-r--r--docs/tutorial/tut_mongo_sync_insert.h46
-rw-r--r--docs/tutorial/tut_mongo_sync_query.h67
-rw-r--r--docs/tutorial/tut_mongo_sync_query_complex.h43
-rw-r--r--docs/tutorial/tutorial.h34
-rw-r--r--examples/Makefile.am8
-rw-r--r--examples/bson-inspect.c341
-rw-r--r--examples/gridfs.c413
-rw-r--r--examples/mongo-dump.c224
-rw-r--r--m4/.placeholder0
-rw-r--r--src/Makefile.am51
-rw-r--r--src/bson.c1251
-rw-r--r--src/bson.h856
-rw-r--r--src/compat.c108
-rw-r--r--src/compat.h50
-rw-r--r--src/libmongo-client.pc.in12
-rw-r--r--src/libmongo-client.ver163
-rw-r--r--src/libmongo-macros.h51
-rw-r--r--src/libmongo-private.h276
-rw-r--r--src/mongo-client.c331
-rw-r--r--src/mongo-client.h116
-rw-r--r--src/mongo-sync-cursor.c118
-rw-r--r--src/mongo-sync-cursor.h103
-rw-r--r--src/mongo-sync-pool.c269
-rw-r--r--src/mongo-sync-pool.h133
-rw-r--r--src/mongo-sync.c2155
-rw-r--r--src/mongo-sync.h640
-rw-r--r--src/mongo-utils.c197
-rw-r--r--src/mongo-utils.h121
-rw-r--r--src/mongo-wire.c645
-rw-r--r--src/mongo-wire.h433
-rw-r--r--src/mongo.h49
-rw-r--r--src/sync-gridfs-chunk.c329
-rw-r--r--src/sync-gridfs-chunk.h134
-rw-r--r--src/sync-gridfs-stream.c507
-rw-r--r--src/sync-gridfs-stream.h141
-rw-r--r--src/sync-gridfs.c345
-rw-r--r--src/sync-gridfs.h193
-rw-r--r--tests/Makefile.am241
-rw-r--r--tests/README28
-rwxr-xr-xtests/coverage.sh43
-rw-r--r--tests/func/bson/f_weird_types.c71
-rw-r--r--tests/func/bson/huge_doc.c51
-rw-r--r--tests/func/mongo/client/f_client_big_packet.c57
-rw-r--r--tests/func/mongo/sync-cursor/f_sync_cursor_iterate.c88
-rw-r--r--tests/func/mongo/sync-cursor/f_sync_cursor_tailable.c115
-rw-r--r--tests/func/mongo/sync-gridfs-chunk/f_sync_gridfs_chunk.c499
-rw-r--r--tests/func/mongo/sync-gridfs-stream/f_sync_gridfs_stream.c501
-rw-r--r--tests/func/mongo/sync-pool/f_sync_pool.c169
-rw-r--r--tests/func/mongo/sync/f_sync_auto_reauth.c58
-rw-r--r--tests/func/mongo/sync/f_sync_auto_reconnect.c61
-rw-r--r--tests/func/mongo/sync/f_sync_auto_reconnect_cache.c107
-rw-r--r--tests/func/mongo/sync/f_sync_conn_seed_add.c58
-rw-r--r--tests/func/mongo/sync/f_sync_invalid_getlasterror.c27
-rw-r--r--tests/func/mongo/sync/f_sync_max_insert_size.c69
-rw-r--r--tests/func/mongo/sync/f_sync_oidtest.c44
-rw-r--r--tests/func/mongo/sync/f_sync_safe_mode.c112
-rw-r--r--tests/func/mongo/sync/f_sync_safe_mode_cache.c131
-rw-r--r--tests/func/mongo/sync/f_sync_write_error.c52
-rw-r--r--tests/libtap/Makefile.am4
-rw-r--r--tests/libtap/tap.c298
-rw-r--r--tests/libtap/tap.h85
-rw-r--r--tests/libtap/test.c183
-rw-r--r--tests/libtap/test.h84
-rw-r--r--tests/perf/bson/p_bson_find.c43
-rwxr-xr-xtests/runall17
-rw-r--r--tests/test_cleanup.c31
-rw-r--r--tests/tools/coverage-report-entry.pl70
-rw-r--r--tests/tools/coverage-report.pl125
-rw-r--r--tests/tools/coverage-report.xsl235
-rw-r--r--tests/unit/bson/bson_append_array.c65
-rw-r--r--tests/unit/bson/bson_append_binary.c56
-rw-r--r--tests/unit/bson/bson_append_boolean.c43
-rw-r--r--tests/unit/bson/bson_append_document.c67
-rw-r--r--tests/unit/bson/bson_append_double.c41
-rw-r--r--tests/unit/bson/bson_append_int32.c40
-rw-r--r--tests/unit/bson/bson_append_int64.c41
-rw-r--r--tests/unit/bson/bson_append_js_code.c66
-rw-r--r--tests/unit/bson/bson_append_js_code_w_scope.c79
-rw-r--r--tests/unit/bson/bson_append_null.c40
-rw-r--r--tests/unit/bson/bson_append_oid.c43
-rw-r--r--tests/unit/bson/bson_append_regexp.c45
-rw-r--r--tests/unit/bson/bson_append_string.c61
-rw-r--r--tests/unit/bson/bson_append_symbol.c61
-rw-r--r--tests/unit/bson/bson_append_timestamp.c41
-rw-r--r--tests/unit/bson/bson_append_utc_datetime.c41
-rw-r--r--tests/unit/bson/bson_build.c70
-rw-r--r--tests/unit/bson/bson_build_full.c71
-rw-r--r--tests/unit/bson/bson_cursor_find.c39
-rw-r--r--tests/unit/bson/bson_cursor_find_next.c33
-rw-r--r--tests/unit/bson/bson_cursor_get_array.c44
-rw-r--r--tests/unit/bson/bson_cursor_get_binary.c60
-rw-r--r--tests/unit/bson/bson_cursor_get_boolean.c43
-rw-r--r--tests/unit/bson/bson_cursor_get_document.c43
-rw-r--r--tests/unit/bson/bson_cursor_get_double.c43
-rw-r--r--tests/unit/bson/bson_cursor_get_int32.c43
-rw-r--r--tests/unit/bson/bson_cursor_get_int64.c45
-rw-r--r--tests/unit/bson/bson_cursor_get_javascript.c43
-rw-r--r--tests/unit/bson/bson_cursor_get_javascript_w_scope.c57
-rw-r--r--tests/unit/bson/bson_cursor_get_oid.c43
-rw-r--r--tests/unit/bson/bson_cursor_get_regex.c52
-rw-r--r--tests/unit/bson/bson_cursor_get_string.c43
-rw-r--r--tests/unit/bson/bson_cursor_get_symbol.c43
-rw-r--r--tests/unit/bson/bson_cursor_get_timestamp.c43
-rw-r--r--tests/unit/bson/bson_cursor_get_utc_datetime.c43
-rw-r--r--tests/unit/bson/bson_cursor_key.c30
-rw-r--r--tests/unit/bson/bson_cursor_new.c28
-rw-r--r--tests/unit/bson/bson_cursor_next.c42
-rw-r--r--tests/unit/bson/bson_cursor_type.c30
-rw-r--r--tests/unit/bson/bson_cursor_type_as_string.c31
-rw-r--r--tests/unit/bson/bson_empty.c22
-rw-r--r--tests/unit/bson/bson_find.c34
-rw-r--r--tests/unit/bson/bson_new.c28
-rw-r--r--tests/unit/bson/bson_new_from_data.c46
-rw-r--r--tests/unit/bson/bson_reset.c27
-rw-r--r--tests/unit/bson/bson_type_as_string.c40
-rw-r--r--tests/unit/bson/bson_validate_key.c36
-rw-r--r--tests/unit/mongo/client/connect.c34
-rw-r--r--tests/unit/mongo/client/connection_get_requestid.c44
-rw-r--r--tests/unit/mongo/client/connection_set_timeout.c33
-rw-r--r--tests/unit/mongo/client/disconnect.c32
-rw-r--r--tests/unit/mongo/client/packet_recv.c56
-rw-r--r--tests/unit/mongo/client/packet_send.c75
-rw-r--r--tests/unit/mongo/sync-cursor/sync_cursor_free.c34
-rw-r--r--tests/unit/mongo/sync-cursor/sync_cursor_get_data.c51
-rw-r--r--tests/unit/mongo/sync-cursor/sync_cursor_new.c40
-rw-r--r--tests/unit/mongo/sync-cursor/sync_cursor_next.c40
-rw-r--r--tests/unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_file_cursor_get_chunk.c15
-rw-r--r--tests/unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_file_cursor_new.c19
-rw-r--r--tests/unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_file_free.c16
-rw-r--r--tests/unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_file_new_from_buffer.c71
-rw-r--r--tests/unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_find.c38
-rw-r--r--tests/unit/mongo/sync-gridfs-stream/sync_gridfs_stream_close.c41
-rw-r--r--tests/unit/mongo/sync-gridfs-stream/sync_gridfs_stream_find.c36
-rw-r--r--tests/unit/mongo/sync-gridfs-stream/sync_gridfs_stream_new.c43
-rw-r--r--tests/unit/mongo/sync-gridfs-stream/sync_gridfs_stream_read.c44
-rw-r--r--tests/unit/mongo/sync-gridfs-stream/sync_gridfs_stream_seek.c65
-rw-r--r--tests/unit/mongo/sync-gridfs-stream/sync_gridfs_stream_write.c50
-rw-r--r--tests/unit/mongo/sync-gridfs/sync_gridfs_file_get_metadata.c23
-rw-r--r--tests/unit/mongo/sync-gridfs/sync_gridfs_free.c35
-rw-r--r--tests/unit/mongo/sync-gridfs/sync_gridfs_get_set_chunk_size.c33
-rw-r--r--tests/unit/mongo/sync-gridfs/sync_gridfs_list.c34
-rw-r--r--tests/unit/mongo/sync-gridfs/sync_gridfs_new.c54
-rw-r--r--tests/unit/mongo/sync-gridfs/sync_gridfs_remove.c34
-rw-r--r--tests/unit/mongo/sync-pool/sync_pool_free.c11
-rw-r--r--tests/unit/mongo/sync-pool/sync_pool_new.c19
-rw-r--r--tests/unit/mongo/sync-pool/sync_pool_pick.c11
-rw-r--r--tests/unit/mongo/sync-pool/sync_pool_return.c22
-rw-r--r--tests/unit/mongo/sync/sync_cmd_authenticate.c112
-rw-r--r--tests/unit/mongo/sync/sync_cmd_authenticate_cache.c60
-rw-r--r--tests/unit/mongo/sync/sync_cmd_count.c119
-rw-r--r--tests/unit/mongo/sync/sync_cmd_create.c78
-rw-r--r--tests/unit/mongo/sync/sync_cmd_custom.c100
-rw-r--r--tests/unit/mongo/sync/sync_cmd_delete.c135
-rw-r--r--tests/unit/mongo/sync/sync_cmd_drop.c93
-rw-r--r--tests/unit/mongo/sync/sync_cmd_exists.c85
-rw-r--r--tests/unit/mongo/sync/sync_cmd_get_last_error.c35
-rw-r--r--tests/unit/mongo/sync/sync_cmd_get_last_error_full.c35
-rw-r--r--tests/unit/mongo/sync/sync_cmd_get_more.c135
-rw-r--r--tests/unit/mongo/sync/sync_cmd_index_create.c62
-rw-r--r--tests/unit/mongo/sync/sync_cmd_index_drop.c51
-rw-r--r--tests/unit/mongo/sync/sync_cmd_index_drop_all.c49
-rw-r--r--tests/unit/mongo/sync/sync_cmd_insert.c78
-rw-r--r--tests/unit/mongo/sync/sync_cmd_insert_n.c100
-rw-r--r--tests/unit/mongo/sync/sync_cmd_is_master.c65
-rw-r--r--tests/unit/mongo/sync/sync_cmd_kill_cursors.c123
-rw-r--r--tests/unit/mongo/sync/sync_cmd_ping.c81
-rw-r--r--tests/unit/mongo/sync/sync_cmd_query.c125
-rw-r--r--tests/unit/mongo/sync/sync_cmd_reset_error.c31
-rw-r--r--tests/unit/mongo/sync/sync_cmd_update.c97
-rw-r--r--tests/unit/mongo/sync/sync_cmd_user_add.c95
-rw-r--r--tests/unit/mongo/sync/sync_cmd_user_add_with_roles.c89
-rw-r--r--tests/unit/mongo/sync/sync_cmd_user_remove.c92
-rw-r--r--tests/unit/mongo/sync/sync_conn_seed_add.c24
-rw-r--r--tests/unit/mongo/sync/sync_conn_seed_add_cache.c31
-rw-r--r--tests/unit/mongo/sync/sync_connect.c22
-rw-r--r--tests/unit/mongo/sync/sync_connect_cache.c42
-rw-r--r--tests/unit/mongo/sync/sync_connect_from_cache_enforce_primary.c47
-rw-r--r--tests/unit/mongo/sync/sync_disconnect.c22
-rw-r--r--tests/unit/mongo/sync/sync_get_set_auto_reconnect.c39
-rw-r--r--tests/unit/mongo/sync/sync_get_set_max_insert_size.c44
-rw-r--r--tests/unit/mongo/sync/sync_get_set_safe_mode.c38
-rw-r--r--tests/unit/mongo/sync/sync_get_set_slaveok.c38
-rw-r--r--tests/unit/mongo/sync/sync_reconnect.c143
-rw-r--r--tests/unit/mongo/utils/oid_as_string.c26
-rw-r--r--tests/unit/mongo/utils/oid_init.c19
-rw-r--r--tests/unit/mongo/utils/oid_new.c49
-rw-r--r--tests/unit/mongo/utils/oid_new_with_time.c46
-rw-r--r--tests/unit/mongo/utils/parse_addr.c244
-rw-r--r--tests/unit/mongo/wire/cmd_custom.c67
-rw-r--r--tests/unit/mongo/wire/cmd_delete.c73
-rw-r--r--tests/unit/mongo/wire/cmd_get_more.c50
-rw-r--r--tests/unit/mongo/wire/cmd_insert.c83
-rw-r--r--tests/unit/mongo/wire/cmd_insert_n.c95
-rw-r--r--tests/unit/mongo/wire/cmd_kill_cursors.c58
-rw-r--r--tests/unit/mongo/wire/cmd_query.c117
-rw-r--r--tests/unit/mongo/wire/cmd_update.c97
-rw-r--r--tests/unit/mongo/wire/packet_get_set_data.c65
-rw-r--r--tests/unit/mongo/wire/packet_get_set_header.c58
-rw-r--r--tests/unit/mongo/wire/packet_get_set_header_raw.c56
-rw-r--r--tests/unit/mongo/wire/packet_new.c20
-rw-r--r--tests/unit/mongo/wire/reply_packet_get_data.c52
-rw-r--r--tests/unit/mongo/wire/reply_packet_get_header.c54
-rw-r--r--tests/unit/mongo/wire/reply_packet_get_nth_document.c68
250 files changed, 25022 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..4ddce5a
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,37 @@
+*~
+*.o
+*.la
+*.pc
+*.lo
+Makefile.in
+Makefile
+.deps/
+.libs/
+/aclocal.m4
+/autom4te.cache/
+/autoscan.log
+/compile
+/config.guess
+/config.h.in
+/config.h
+/config.log
+/config.status
+/config.sub
+/configure
+/configure.scan
+/depcomp
+/Doxyfile
+/examples/bson-inspect
+/examples/gridfs
+/examples/mongo-dump
+/install-sh
+/libtool
+/ltmain.sh
+/m4/*.m4
+/missing
+/tests/*.ok
+/docs/html/
+/docs/tutorial/examples/tut/
+/b/
+/.pc/
+/stamp-h1
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..81f8d5b
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,17 @@
+language: c
+install:
+ - sudo apt-get update -qq
+ - sudo apt-get install -qq libglib2.0-dev perl
+before_script:
+ - install -d m4
+ - autoreconf -i
+script:
+ - ./configure
+ - make
+ - "export TEST_PRIMARY=localhost:27017"
+ - make check
+compiler:
+ - gcc
+ - clang
+services:
+ - mongodb
diff --git a/Doxyfile.in b/Doxyfile.in
new file mode 100644
index 0000000..69e5e6e
--- /dev/null
+++ b/Doxyfile.in
@@ -0,0 +1,151 @@
+# Doxyfile 1.7.1 -*- conf-unix -*-
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+DOXYFILE_ENCODING = UTF-8
+PROJECT_NAME = libmongo-client
+PROJECT_NUMBER = @VERSION@
+
+OUTPUT_DIRECTORY = docs
+OUTPUT_LANGUAGE = English
+
+FULL_PATH_NAMES = YES
+STRIP_FROM_PATH = @top_srcdir@
+SHORT_NAMES = NO
+
+JAVADOC_AUTOBRIEF = YES
+
+OPTIMIZE_OUTPUT_FOR_C = YES
+TYPEDEF_HIDES_STRUCT = YES
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+EXTRACT_ALL = NO
+EXTRACT_PRIVATE = YES
+EXTRACT_STATIC = NO
+EXTRACT_LOCAL_CLASSES = YES
+
+HIDE_UNDOC_MEMBERS = NO
+INTERNAL_DOCS = YES
+
+GENERATE_TODOLIST = YES
+GENERATE_TESTLIST = YES
+GENERATE_BUGLIST = YES
+GENERATE_DEPRECATEDLIST= YES
+
+SHOW_USED_FILES = NO
+SHOW_FILES = NO
+SHOW_NAMESPACES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+QUIET = YES
+
+WARNINGS = YES
+WARN_IF_UNDOCUMENTED = YES
+WARN_IF_DOC_ERROR = YES
+WARN_NO_PARAMDOC = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+INPUT = @top_srcdir@/src/ @top_srcdir@/docs/ @top_srcdir@/docs/tutorial/
+INPUT_ENCODING = UTF-8
+
+EXAMPLE_PATH = @top_srcdir@/docs/tutorial/examples/
+EXAMPLE_PATTERNS = *.c *.json
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+SOURCE_BROWSER = NO
+INLINE_SOURCES = NO
+VERBATIM_HEADERS = NO
+
+STRIP_CODE_COMMENTS = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+GENERATE_HTML = YES
+GENERATE_TREEVIEW = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+GENERATE_LATEX = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+GENERATE_RTF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+GENERATE_MAN = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+GENERATE_XML = NO
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+GENERATE_PERLMOD = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+ENABLE_PREPROCESSING = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+CLASS_DIAGRAMS = NO
+CLASS_GRAPH = NO
+COLLABORATION_GRAPH = YES
+GROUP_GRAPHS = YES
+INCLUDE_GRAPH = NO
+INCLUDED_BY_GRAPH = NO
+CALL_GRAPH = NO
+CALLER_GRAPH = NO
+GRAPHICAL_HIERARCHY = YES
+DIRECTORY_GRAPH = YES
+
+HIDE_UNDOC_RELATIONS = YES
+
+HAVE_DOT = YES
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Makefile.am b/Makefile.am
new file mode 100644
index 0000000..0a30aa0
--- /dev/null
+++ b/Makefile.am
@@ -0,0 +1,11 @@
+SUBDIRS = docs src tests examples
+
+ACLOCAL_AMFLAGS = -I m4 --install
+EXTRA_DIST = NEWS README.md m4/.placeholder
+
+coverage:
+ @echo "Making $@ in src"
+ ($(am__cd) src && $(MAKE) $(AM_MAKEFLAGS) $@)
+
+doxygen:
+ $(AM_V_GEN)doxygen
diff --git a/NEWS b/NEWS
new file mode 100644
index 0000000..14e7ca6
--- /dev/null
+++ b/NEWS
@@ -0,0 +1,341 @@
+#+STARTUP: indent showall -*- org -*-
+
+* 0.1.8 - <2014-05-22 Thu>
+
+** Feature: Connection recovery cache
+
+This new feature makes the replica set support easier to use, and more
+robust. New functions include: `mongo_sync_conn_recovery_cache_new`,
+`mongo_sync_conn_recovery_cache_free`,
+`mongo_sync_conn_recovery_cache_discard`,
+`mongo_sync_conn_recovery_cache_seed_add`, and
+`mongo_sync_connect_recovery_cache`.
+
+** New function: mongo_sync_cmd_get_last_error_full
+
+The new `mongo_sync_cmd_get_last_error_full` function can be used to
+retrieve the last server-side error as a full BSON object, not just
+the textual error message.
+
+** New function: mongo_sync_conn_get_last_error
+
+Returns the last cached error message.
+
+** Portability: Test suite improvements
+
+The test suite was improved to work properly on even more platforms.
+
+** Bugfix: Support reconnecting to Unix domain sockets
+
+When a replica-set is set up using unix domain sockets, reconnecting
+failed. This has been corrected in this version.
+
+** Bugfix: Fix random reconnect failures
+
+When reconnecting, only close the old connection if it differs from
+the new one.
+
+** Bugfix: Memory leak in an error-case
+
+When sending a packet to MongoDB, while enforcing being connected to a
+master node, if ensuring that connection failed, memory was leaked.
+The hole has been plugged.
+
+* 0.1.7.1 - <2014-03-12 Wed>
+
+** Portability: Support HP-UX
+
+Compatibility code was added to make libmongo-client portable to HP-UX.
+
+** Portability: prove made optional
+
+On platforms where the `prove' utility is not readily available, a
+custom test runner can be used by setting the PROVE environment
+variable when running `make check'.
+
+** Bugfix: Safe-mode robustness improvement
+
+When safe mode is turned on, the code now verifies not only that
+getLastError() doesn't return an error, but also verifies that the
+command succeeds, too.
+
+* 0.1.7 - <2013-12-10 Tue>
+
+** Feature: Automatic re-authentication
+
+This release adds support for automatically re-authenticating when
+reconnecting, by storing the credentials used for the last
+authentication in a safe, memory-locked area.
+
+** New function: mongo_sync_cmd_user_add_with_roles()
+
+A new function was added to create users with their roles already set
+at the same time.
+
+** Bugfix: Enable subdir-objects for automake
+
+Since we have sources that reside in a subdirectory, to avoid
+conflicts and a lot of automake warnings, enable the subdir-objects
+automake option.
+
+* 0.1.6.3 - <2013-08-27 Tue>
+
+Another bugfix release with a couple of important bugfixes, and a few
+minor improvements here and there.
+
+** Improvement: configure.ac updated to modern autotools
+
+The configure.ac script was updated to not use obsolete macros, and to
+work with automake 1.13+ (where the aforementioned macros were
+removed).
+
+** Bugfix: Fix chunked GridFS file retrieval
+
+When retrieving files via the chunked GridFS API, order the chunks by
+their number, so reassembling them will succeed.
+
+** Bugfix: Stop crashing in case verification fails
+
+When safe-mode is enabled, but getLastError() fails, the library
+crashed due to an uninitialised pointer. This has been fixed in this
+release.
+
+* 0.1.6.2 - <2012-12-21 Fri>
+
+Another bugfix release with mostly minor improvements and
+documentation updates.
+
+** Feature: Support for the Binary subtype in GridFS
+
+The GridFS support in the library only supported the "Generic"
+subtype until now, but with this release, the (obsolete) "Binary"
+subtype is supported aswell.
+
+** Bugfix: Plugged a memory leak in the GridFS code
+
+The GridFS code (the chunked file constructor, in particular) leaked
+the generated ObjectId. This has been corrected.
+
+** Documentation: New tutorials
+
+New tutorials were written for showing how to handle indexes, and how
+to run custom commands.
+
+* 0.1.6.1 - <2012-10-14 Sun>
+
+This is a quick bugfix release, that restores ABI compatibility with
+versions prior to 0.1.6, and cleans up a few other minor issues.
+
+* 0.1.6 - <2012-10-14 Sun>
+
+With this release, thanks to Conrad Hoffmann <ch@bitfehler.net>, it is
+now possible to connect to mongodb via unix sockets.
+
+See the documentation and the updated mongo-dump example for details.
+
+* 0.1.5 - <2012-04-13 Fri>
+
+This is a bugfix release, without any new functionality. It is
+expected that this will be the last release before a complete overhaul
+of the library.
+
+** Bugfix: Restore glib 2.12 compatibility
+
+The examples used features not available in glib 2.12, they were
+changed to not do that, and work with this old version too.
+
+** Bugfix: Do not accept invalid index specifications
+
+When creating an index, the library now bails out early with an error
+in case the index specification is not acceptable.
+
+** Bugfix: Fix glib sanity check when glib is on a non-standard path
+
+In case glib was installed to a path that is not on the compiler's
+default search path, the sanity check at configure time failed. This
+has been corrected.
+
+** Bugfix: bson_cursor_find() & co. must match the whole key
+
+Due to a silly matching logic in bson_cursor_find(), and anything that
+built on it (bson_find() included) was able to match keys of which the
+sought name was a prefix of.
+
+This is now fixed, and the find functions will correctly match the
+whole key.
+
+** Bugfix: Fixed OID generation on 64-bit big-endian machines
+
+Due to a rounding error, OID generation on certain 64-bit big-endian
+platforms resulted in the timestamp part always being zeroed out,
+which made OIDs generated on such platforms useless.
+
+The rounding error was eliminated.
+
+* 0.1.4 - <2011-08-27 Sat>
+
+This release is a minor update, with neither new functionality, nor
+any bugfixes, except in the packaging.
+
+It does come with slightly more documentation, however.
+
+* 0.1.3 - <2011-07-19 Tue>
+
+** New feature: GridFS support.
+This version introduces a set of GridFS APIs, to ease working with
+GridFS, be that retrieving files chunk by chunk, listing and removing
+files, or offering a file-like streaming API.
+
+** New function: mongo_sync_cmd_create()
+A new helper function was introduced to aid us in creating capped and
+pre-allocated collections.
+
+** New function: mongo_sync_cmd_exists()
+Another new helper function to aid in retrieving information about a
+collection - whether it exists, and if so, with what parameters.
+
+** New function: mongo_util_oid_as_string()
+When one needs to print the value of an ObjectId in human-readable
+hexadecimal format, this new function is the one to turn to.
+
+** New index options: background & sparse.
+It is now possible to create sparse indexes, and create them in the
+background, using two new option flags: MONGO_INDEX_BACKGROUND and
+MONGO_INDEX_SPARSE.
+
+* 0.1.2 - <2011-07-01 Fri>
+
+** Bugfix: mongo_packet_recv() waits for all data.
+Previously, mongo_packet_recv() did not wait for all data to arrive,
+and returned whatever already arrived at the time of reading.
+
+This has been corrected, mongo_packet_recv() correctly blocks now.
+
+** Implemented support for passive secondaries.
+Support was added for passive secondaries, they're now automatically
+discovered too.
+
+** New feature: Index handling functions
+Implemented some helper functions to create and delete indexes.
+
+** New function: bson_cursor_find()
+Combining the powers of bson_find() and bson_cursor_find_next(), this
+new function can find a key anywhere in a BSON object, yet, maintains
+the ability to continue a previous scan.
+
+** New function: mongo_connection_set_timeout()
+On systems that support it (most modern systems should), sets a
+timeout for send and receive operations. Setting the timeout to zero
+clears the timeout, and these calls will block forever.
+
+The timeout is not preserved accross reconnects, if using the Sync
+API, however.
+
+** Removed dependency on OpenSSL
+Instead of using OpenSSL's MD5 functions, use the checksum
+capabilities of glib 2.16 and newer. This halves the dependencies of
+the library!
+
+* 0.1.1 - <2011-06-16 Thu>
+
+** Cursor-based query iterator API
+The new cursor-based query iterator API makes it possible to grab the
+results of a mongo_sync_cmd_query(), and conveniently iterate over the
+results, hiding the get_more() calls underneath.
+
+The implementation is clever enough to only query the database when it
+needs to, and when it does, query in bulk (asking for the same amount
+of documents that the last query returned).
+
+Thanks to Federico Rodriguez <fed.rod@gmail.com> for the suggestion.
+
+** New function: bson_validate_key()
+With this new function, one can validate BSON keys: whether they can
+contain dots (allowed when using dot-notation, forbidden otherwise),
+or whether they start with a '$' sign.
+
+It is up to the application developer to decide when to validate a
+key, and what restrictions make sense in the given context.
+
+The function does NOT do UTF-8 validation, that is completely left up
+to the application developer.
+
+** New function: bson_cursor_find_next()
+When we know that key G always comes after key A, but we do not care
+how far apart they may be, and we do not want to use a separate cursor
+(due to, for example, performance reasons), this new function can
+help: unlike bson_find(), this will start scanning from the current
+cursor position.
+
+** New function: bson_stream_doc_size()
+A little helper function to help determine the size of a BSON document
+when it's only available as a bytestream. This is mostly for use with
+bson_new_from_data().
+
+** Symbol versioning
+The symbols of the library can now be versioned, if configured with
+the --with-versioned-symbols configure flag (not enabled by default).
+
+* 0.1.0 - <2011-05-25 Wed>
+
+** Shared library
+The configure script now defaults to enabling shared library building
+by default (along with a static library).
+
+** Bug fixes
+The pkg-config file was setting an incorrect include directory in it's
+Cflags. This is now fixed.
+
+** C++ Compatibility
+The public headers are now guarded by extern "C" {} wrappers, so that
+they can be included in C++ projects.
+
+** Performance enhancements
+A minor performance issue was corrected in the BSON code, that had the
+possibility of forcing unneccessary memory allocations. The library
+now allocates the proper amount of memory at the soonest time
+possible, so it doesn't have to grow it later on unnecessarily.
+
+Anoter performance issue was corrected in the BSON library:
+bson_find() was comparing key names in BSON objects with the sought
+for key using strcmp(), which was unacceptably slow when dealing with
+BSON objects that have a lot of keys. We now use memcmp(), which means
+we don't have to traverse the sought for key all the time.
+
+* 0.0.2 - <2011-05-07 Sat>
+
+** Replica set seeding support
+It is now possible to add seeds to a connection, so that the library
+can try connecting to those in case automatic discovery fails (or if
+the seeds are hidden).
+
+** Failover redesign
+Instead of preemptively pinging & checking for a master before each
+and every command, the library does so only upon errors. This way,
+when everything's working as expected, there is no extra
+overhead. Yet, if things go wrong, failover will still work.
+
+By design, automatic failover only occurs when an error is detected
+during a write operation. When an error occurs during read, it will be
+propagated back to the application.
+
+Automatic failover is disabled by default, and can be turned on via
+the mongo_sync_conn_set_auto_reconnect() function.
+
+** safe-mode support
+With safe mode enabled, extra care will be taken to ensure that data
+gets to the server, and that the library does everything within its
+power to maintain a connection.
+
+This means that after insert and update operations, the library will
+issue a getLastError command, and only return successfully if that
+command did not signal an error.
+
+Safe-mode also enables the previously default preemptive connection
+checks (along with the post-mortem failover).
+
+Safe-mode is off by default.
+
+* 0.0.1 - <2011-04-10 Sun>
+
+Initial public release.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..1cedd09
--- /dev/null
+++ b/README.md
@@ -0,0 +1,69 @@
+libmongo-client
+===============
+
+[![Build Status](https://travis-ci.org/algernon/libmongo-client.png?branch=master)](https://travis-ci.org/algernon/libmongo-client)
+
+This is an alternative C driver for [MongoDB][mongodb], with slightly
+different goals than the official one:
+
+libmongo-client is meant to be a stable (API, ABI and quality alike),
+clean, well documented and well tested shared library, that strives to
+make the most common use cases as convenient as possible.
+
+Features
+--------
+
+The main purpose of this library is to provide a well maintained
+implementation, that suits a wider range of needs than the original:
+
+* Well documented, easy, clean and stable API.
+* Comprehensive test suite, with over 90% code coverage, and
+ increasing.
+* Ability to easily construct mongodb commands, to be sent at a later
+ time (comes in handy when one is trying to write from a separate
+ thread).
+* ReplicaSet support, with support for automatic reconnecting and
+ discovery.
+* Safe-mode support, to optionally enable extra safety checks on
+ writes, that make sure the server accepted the write.
+* Convenient APIs to work with BSON objects and MongoDB documents.
+* Connection pooling.
+
+Apart from these, the library is meant to be used as a shared library,
+therefore API and ABI compatibility will be maintained, and only
+broken when absolutely neccessary - in which case, a sufficiently
+bumped version will be branched off.
+
+Requirements
+------------
+
+Apart from [glib][glib] (with header files - usually found in a
+development package - installed), there are no other hard
+dependencies. Though, one will need [Perl][perl] (with a suitable
+version of Test::Harness, along with the prove utility) to run the
+test suite.
+
+To build the documentation, [Doxygen][doxygen] will be needed too.
+
+Installation
+------------
+
+The library follows the usual autotools way of installation (one will
+need libtool 2.2+ to build from git!):
+
+ $ git clone git://github.com/algernon/libmongo-client.git
+ $ cd libmongo-client
+ $ autoreconf -i
+ $ ./configure && make && make install
+
+License
+-------
+
+Although the code is not based on any other driver, it is released
+under the same Apache License, version 2.0 (included as the file
+LICENSE).
+
+ [mongodb]: http://www.mongodb.org/
+ [glib]: http://developer.gnome.org/glib/
+ [perl]: http://www.perl.org/
+ [doxygen]: http://www.stack.nl/~dimitri/doxygen/
diff --git a/configure.ac b/configure.ac
new file mode 100644
index 0000000..f698ec2
--- /dev/null
+++ b/configure.ac
@@ -0,0 +1,147 @@
+dnl Process this file with autoconf to produce a configure script.
+dnl
+
+AC_INIT([libmongo-client],[0.1.8],[algernon@madhouse-project.org],[libmongo-client],[https://github.com/algernon/libmongo-client])
+AM_INIT_AUTOMAKE([1.9 tar-ustar foreign no-dist-gzip dist-xz subdir-objects])
+
+dnl ***************************************************************************
+dnl dependencies
+
+GLIB_MIN_VERSION="2.12.0"
+OPENSSL_MIN_VERSION="0.9.8"
+
+dnl ***************************************************************************
+dnl Initial setup
+
+AC_CONFIG_MACRO_DIR([m4])
+m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
+AC_CONFIG_HEADERS([config.h])
+
+dnl ***************************************************************************
+dnl Checks for programs.
+AC_PROG_CC
+AC_PROG_CC_C_O
+
+AC_PROG_MAKE_SET
+PKG_PROG_PKG_CONFIG
+LT_INIT([shared])
+
+dnl ***************************************************************************
+dnl Miscellanneous headers
+dnl ***************************************************************************
+
+AC_HEADER_STDC
+
+dnl ***************************************************************************
+dnl Header checks
+dnl ***************************************************************************
+AC_CHECK_HEADERS([arpa/inet.h fcntl.h netinet/in.h sys/socket.h netdb.h])
+
+AC_EGREP_HEADER([MSG_NOSIGNAL], [sys/socket.h], AC_DEFINE([HAVE_MSG_NOSIGNAL], [1], [Define to 1 when your system supports MSG_NOSIGNAL]))
+
+dnl ***************************************************************************
+dnl Checks for libraries
+AC_CHECK_FUNC(socket,,
+ AC_CHECK_LIB(socket, socket))
+
+AC_FUNC_MMAP
+AC_TYPE_OFF_T
+AC_TYPE_PID_T
+AC_TYPE_SIZE_T
+AC_CHECK_FUNCS(memset socket getaddrinfo munmap strtol strerror)
+
+dnl ***************************************************************************
+dnl GLib headers/libraries
+dnl ***************************************************************************
+
+PKG_CHECK_MODULES(GLIB, glib-2.0 >= $GLIB_MIN_VERSION,,)
+
+old_CPPFLAGS=$CPPFLAGS
+CPPFLAGS="$GLIB_CFLAGS"
+old_LDFLAGS=$LDFLAGS
+LDFLAGS="$LDFLAGS $GLIB_LIBS"
+
+AC_CACHE_CHECK(sanity checking Glib headers,
+ blb_cv_glib_sane,
+[AC_RUN_IFELSE([AC_LANG_SOURCE([[
+#include <glib.h>
+
+int main()
+{
+ if (sizeof(long) != GLIB_SIZEOF_LONG)
+ return 1;
+ return 0;
+}
+]])],[blb_cv_glib_sane=yes],[blb_cv_glib_sane=no],[blb_cv_glib_sane=yes])])
+CPPFLAGS=$old_CPPFLAGS
+LDFLAGS=$old_LDFLAGS
+
+if test "x$blb_cv_glib_sane" = "xno"; then
+ AC_MSG_ERROR([Glib headers inconsistent with current compiler setting. You might be using 32 bit Glib with a 64 bit compiler, check PKG_CONFIG_PATH])
+fi
+
+dnl Check for g_checksum_new
+old_LIBS=$LIBS
+
+dnl to make sure we're using glib from the proper path
+LIBS=$GLIB_LIBS
+AC_CHECK_LIB(glib-2.0, g_checksum_new, [glib_checksum="yes"; with_openssl="0"], [glib_checksum="no"; with_openssl="1"])
+LIBS=$old_LIBS
+if test "x$glib_checksum" = "xno"; then
+ dnl Using the compat stuff disables symbol versioning
+ symbol_versioning=no
+ PKG_CHECK_MODULES(OPENSSL, openssl >= $OPENSSL_MIN_VERSION,, OPENSSL_LIBS="")
+ if test "x$OPENSSL_LIBS" = "x"; then
+ AC_MSG_ERROR([OpenSSL is required when glib-2.0 << 2.16.0])
+ fi
+fi
+
+AC_DEFINE_UNQUOTED(WITH_OPENSSL, $with_openssl, [Compile with OpenSSL])
+
+dnl ***************************************************************************
+dnl misc features to be enabled
+dnl ***************************************************************************
+AC_C_INLINE
+
+# Check for linker version script support.
+if test "x$symbol_versioning" != "xno"; then
+ ac_save_LDFLAGS=$LDFLAGS
+ AC_CACHE_CHECK(whether $LD supports symbol version scripts,
+ ac_cv_prog_ld_version_script,
+ [ac_cv_prog_ld_version_script=no
+ echo "TEST { local: *; };" > conftest.ver
+ LDFLAGS="$LDFLAGS -Wl,--version-script,conftest.ver"
+ _AC_LINK_IFELSE([AC_LANG_PROGRAM()],
+ [ac_cv_prog_ld_version_script=yes], [])
+ ])
+ LDFLAGS=$ac_save_LDFLAGS
+fi
+AM_CONDITIONAL([HAVE_VERSIONING], [test x$ac_cv_prog_ld_version_script = xyes])
+
+enable_value () {
+ case "$1" in
+ yes|y*)
+ echo 1
+ ;;
+ *)
+ echo 0
+ ;;
+ esac
+}
+
+AC_DEFINE_UNQUOTED(VERSIONED_SYMBOLS, `enable_value ${ac_cv_prog_ld_version_script}`,
+ [Define to 1 if symbol versioning is enabled])
+AC_DEFINE_UNQUOTED(PACKAGE, "$PACKAGE", [package name])
+AC_DEFINE_UNQUOTED(VERSION, "$VERSION", [version number])
+
+AC_CONFIG_FILES([Doxyfile
+ Makefile
+ docs/Makefile
+ docs/tutorial/Makefile
+ examples/Makefile
+ src/Makefile
+ src/libmongo-client.pc
+ tests/Makefile
+ tests/libtap/Makefile
+])
+AC_OUTPUT
diff --git a/debian/README.source b/debian/README.source
new file mode 100644
index 0000000..d390955
--- /dev/null
+++ b/debian/README.source
@@ -0,0 +1,19 @@
+Working with the Debianized libmongo-client
+===========================================
+
+The package is meant to be built from a git checkout, the original
+tarballs are git snapshots aswell.
+
+Since I'm also upstream for the package, the upstream branch for
+git-buildpackage has been set to the 'stable' branch, while the Debian
+branch remained the default: 'debian'. This is set in the
+debian/gbp.conf file. If one wants to build a snapshot of master, that
+file will need an edit too.
+
+Other than this, whenever a new upload is finalised, the stable branch
+gets merged into the debian branch, and all is well: the Debian branch
+should contain the debianised sources, with all changes already
+applied, based on whichever upstream source is appropriate.
+
+--
+Gergely Nagy <algernon@madhouse-project.org>
diff --git a/debian/changelog b/debian/changelog
new file mode 100644
index 0000000..758dd5c
--- /dev/null
+++ b/debian/changelog
@@ -0,0 +1,161 @@
+libmongo-client (0.1.8-2) unstable; urgency=medium
+
+ * New Maintainer (Closes: #770801).
+ * debian/contol:
+ - Set myself as maintainer.
+ - Bump Standards-Version to 3.9.6 (no changes required).
+ - Change Vcs-* to collab-maint since old Vcs is not longer available.
+ * debian/copyright:
+ - Add missing license for tests/tools/*.
+ - Add myself to the list of authors for debian/*.
+ * debian/rules:
+ - Remove useless override_dh_builddeb: xz is now standard.
+ - Remove oldstyle debhelper parts.
+ * debian/watch:
+ - disabled because upstream repository contains debian tarballs.
+
+ -- Jörg Frings-Fürst <debian@jff-webhosting.net> Tue, 02 Dec 2014 09:59:39 +0100
+
+libmongo-client (0.1.8-1) unstable; urgency=medium
+
+ * New upstream release.
+ * debian/symbols updated with the new LMC_0.1.8 version tag.
+ * debian/copyright updated to include 2014 in copyright years.
+ * debian/rules updated to drop --with-versioned-symbols: it is
+ automatically enabled now, if supported.
+
+ -- Gergely Nagy <algernon@madhouse-project.org> Thu, 22 May 2014 13:05:18 +0200
+
+libmongo-client (0.1.7.1-1) unstable; urgency=medium
+
+ * New upstream release.
+ + Improves the roboustness of safe mode.
+
+ -- Gergely Nagy <algernon@madhouse-project.org> Fri, 14 Mar 2014 11:04:43 +0100
+
+libmongo-client (0.1.7-1) unstable; urgency=low
+
+ * New upstream release.
+ + Automatic re-authentication support, even when using automatic
+ failover with replicasets.
+ * debian/symbols updated with the new LMC_0.1.7 version tag.
+
+ -- Gergely Nagy <algernon@madhouse-project.org> Sun, 15 Dec 2013 11:37:03 +0100
+
+libmongo-client (0.1.6.3-2) unstable; urgency=low
+
+ * debian/copyright: Point to /usr/share/common-licenses/Apache-2.0 for
+ the full text of the license. (Thanks, lintian!)
+ * Bumped Standards-Version to 3.9.5, no changed necessary.
+
+ -- Gergely Nagy <algernon@madhouse-project.org> Tue, 05 Nov 2013 10:38:33 +0100
+
+libmongo-client (0.1.6.3-1) unstable; urgency=low
+
+ * New upstream release.
+ + Fixed chunked GridFS file retrieval, to keep the chunk order.
+ + Fixed a crash that happened when verification in safe-mode failed.
+ * Bumped Standards-Version to 3.9.4, no changes necessary.
+ * debian/copyright: Updated the copyright years.
+
+ -- Gergely Nagy <algernon@madhouse-project.org> Tue, 27 Aug 2013 10:10:37 +0200
+
+libmongo-client (0.1.6.2-1) unstable; urgency=low
+
+ * New upstream release.
+ + Minor memory leak fix in the GridFS code
+ + Support for the obsolete Binary subtype in GridFS files
+ + Documentation improvements
+ * Don't muck with m4/ during build, it is included upstream now.
+
+ -- Gergely Nagy <algernon@madhouse-project.org> Fri, 21 Dec 2012 12:35:43 +0100
+
+libmongo-client (0.1.6.1-3) unstable; urgency=low
+
+ * Make libmongo-client-dev depend on libglib2.0-dev. Thanks Michael
+ Biebl. (Closes: #690992)
+
+ -- Gergely Nagy <algernon@madhouse-project.org> Sat, 20 Oct 2012 08:30:09 +0200
+
+libmongo-client (0.1.6.1-2) unstable; urgency=low
+
+ * Really build with verbose messages enabled, the test suite too.
+ * Install and clean the m4/ directory too during build, to make
+ backporting easier.
+
+ -- Gergely Nagy <algernon@madhouse-project.org> Mon, 15 Oct 2012 08:38:07 +0200
+
+libmongo-client (0.1.6.1-1) unstable; urgency=low
+
+ * New upstream bugfix release.
+ + Restores ABI compatibility with versions prior to 0.1.6.
+
+ -- Gergely Nagy <algernon@madhouse-project.org> Sun, 14 Oct 2012 23:15:47 +0200
+
+libmongo-client (0.1.6-1) unstable; urgency=low
+
+ * New upstream release.
+ * Build with verbose messages enabled.
+ * Use xz compression for source & binaries too.
+
+ -- Gergely Nagy <algernon@madhouse-project.org> Sun, 14 Oct 2012 21:18:39 +0200
+
+libmongo-client (0.1.5-1) unstable; urgency=low
+
+ * New upstream bugfix release.
+ + Fixes build on s390x, sparc64 and ppc64.
+ * Bump debhelper build-dependency to >= 9~
+ * Update debian/copyright to copyright-format-1.0.
+ * Bump Standards-Version to 3.9.3 (no other changes necessary)
+
+ -- Gergely Nagy <algernon@madhouse-project.org> Fri, 13 Apr 2012 18:37:21 +0200
+
+libmongo-client (0.1.4-3) unstable; urgency=low
+
+ * Build-Depend on debhelper (>= 8.9.7~) for -arch/-indep override
+ support.
+ * Move doxygen & graphviz to Build-Depends-Indep, and only build docs
+ when building -indep. This'll save quite a bit of disk space on
+ buildds. Thanks Laszlo Boszormenyi <gcs@debian.hu> for the suggestion.
+
+ -- Gergely Nagy <algernon@madhouse-project.org> Wed, 14 Sep 2011 23:08:53 +0200
+
+libmongo-client (0.1.4-2) unstable; urgency=low
+
+ * Build a libmongo-client-doc package, with API docs, the tutorial and
+ examples. Based on a patch from Guido Günther <agx@sigxcpu.org>
+ (Closes: #639940).
+
+ -- Gergely Nagy <algernon@madhouse-project.org> Sat, 03 Sep 2011 23:55:10 +0200
+
+libmongo-client (0.1.4-1) unstable; urgency=low
+
+ * New upstream release.
+
+ -- Gergely Nagy <algernon@madhouse-project.org> Sat, 27 Aug 2011 13:26:17 +0200
+
+libmongo-client (0.1.3-1) unstable; urgency=low
+
+ * New upstream release.
+
+ -- Gergely Nagy <algernon@madhouse-project.org> Tue, 19 Jul 2011 19:48:33 +0200
+
+libmongo-client (0.1.2-1) unstable; urgency=low
+
+ * New upstream release.
+ * Converted to multi-arch.
+ * Use DEP-5 format debian/copyright.
+
+ -- Gergely Nagy <algernon@madhouse-project.org> Fri, 01 Jul 2011 11:08:57 +0200
+
+libmongo-client (0.1.1-1) unstable; urgency=low
+
+ * New upstream release.
+
+ -- Gergely Nagy <algernon@madhouse-project.org> Thu, 16 Jun 2011 12:25:16 +0200
+
+libmongo-client (0.1.0-1) unstable; urgency=low
+
+ * Initial release (Closes: #626969)
+
+ -- Gergely Nagy <algernon@madhouse-project.org> Wed, 25 May 2011 11:00:05 +0200
diff --git a/debian/compat b/debian/compat
new file mode 100644
index 0000000..ec63514
--- /dev/null
+++ b/debian/compat
@@ -0,0 +1 @@
+9
diff --git a/debian/control b/debian/control
new file mode 100644
index 0000000..e922a67
--- /dev/null
+++ b/debian/control
@@ -0,0 +1,81 @@
+Source: libmongo-client
+Priority: extra
+Maintainer: Jörg Frings-Fürst <debian@jff-webhosting.net>
+Build-Depends:
+ debhelper (>= 9~),
+ libglib2.0-dev, perl,
+ dh-autoreconf
+Build-Depends-Indep:
+ doxygen,
+ graphviz
+Standards-Version: 3.9.6
+Section: libs
+Homepage: https://github.com/algernon/libmongo-client
+Vcs-Browser: http://anonscm.debian.org/cgit/collab-maint/libmongo-client.git
+Vcs-Git: git://anonscm.debian.org/collab-maint/libmongo-client.git
+
+Package: libmongo-client0
+Architecture: any
+Multi-Arch: same
+Pre-Depends: ${misc:Pre-Depends}
+Depends:
+ ${shlibs:Depends},
+ ${misc:Depends}
+Description: Alternate C driver for the MongoDB document-oriented datastore
+ MongoDB is a high-performance, open source, schema-free
+ document-oriented data store.
+ .
+ This library provides an alternative C language driver, focusing on
+ stability, ease of use, striving to make the most common use cases as
+ convenient as possible.
+ .
+ Among its features are:
+ .
+ * Well documented, easy, clean and stable API.
+ * Support for asynchronous operation.
+ * ReplicaSet support, with support for automatic reconnecting and
+ discovery.
+ * Safe-mode support, to optionally enable extra safety checks on
+ writes.
+
+Package: libmongo-client-dev
+Section: libdevel
+Architecture: any
+Depends:
+ libmongo-client0 (= ${binary:Version}),
+ pkg-config,
+ libglib2.0-dev,
+ ${misc:Depends}
+Description: Development files for the alternate C driver for MongoDB
+ libmongo-client is an alternative C language driver to the MongoDB
+ document-oriented datastore.
+ .
+ This package is needed to compile programs against libmongo-client0,
+ as only it includes the header files and static libraries needed for
+ compiling.
+
+Package: libmongo-client0-dbg
+Section: debug
+Architecture: any
+Multi-Arch: same
+Depends:
+ libmongo-client0 (= ${binary:Version}),
+ ${misc:Depends}
+Description: Alternate C driver for MongoDB (debugging symbols)
+ libmongo-client is an alternative C language driver to the MongoDB
+ document-oriented datastore.
+ .
+ This package contains detached debugging symbols.
+ .
+ Most people will not need this package.
+
+Package: libmongo-client-doc
+Section: doc
+Architecture: all
+Depends: ${misc:Depends}
+Recommends: libjs-jquery
+Description: Documentation for the alternate C driver for MongoDB
+ libmongo-client is an alternative C language driver to the MongoDB
+ document-oriented datastore.
+ .
+ This package contains the API documentation, tutorials and examples.
diff --git a/debian/copyright b/debian/copyright
new file mode 100644
index 0000000..b613245
--- /dev/null
+++ b/debian/copyright
@@ -0,0 +1,52 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: libmongo-client
+Upstream-Contact: Gergely Nagy <algernon@balabit.hu>
+Source: git://git.madhouse-project.org/projects/libmongo-client.git
+Copyright: Copyright (C) 2011-2014 Gergely Nagy <algernon@balabit.hu>
+License: Apache-2.0
+
+Files: *
+Copyright: 2011-2014 Gergely Nagy <algernon@balabit.hu>
+License: Apache-2.0
+
+Files: tests/tools/*
+Copyright: 2006 Daniel Berrange
+License: GPL-2+
+
+Files: debian/*
+Copyright: 2011-2014 Gergely Nagy <algernon@madhouse-project.org>
+ 2014 Jörg Frings-Fürst <debian@jff-webhosting.net>
+License: Apache-2.0
+
+License: Apache-2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ .
+ On Debian systems, the complete text of the Apache License can be found in
+ the file `/usr/share/common-licenses/Apache-2.0'.
+
+License: GPL-2+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 2 of the License, or
+ (at your option) any later version.
+ .
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+ .
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ .
+ The complete text of the GNU General Public License
+ can be found in /usr/share/common-licenses/GPL-2 file.
diff --git a/debian/libmongo-client-dev.install b/debian/libmongo-client-dev.install
new file mode 100644
index 0000000..76f28fa
--- /dev/null
+++ b/debian/libmongo-client-dev.install
@@ -0,0 +1,4 @@
+usr/include/*
+usr/lib/*/lib*.a
+usr/lib/*/lib*.so
+usr/lib/*/pkgconfig/*
diff --git a/debian/libmongo-client-doc.doc-base b/debian/libmongo-client-doc.doc-base
new file mode 100644
index 0000000..02aca77
--- /dev/null
+++ b/debian/libmongo-client-doc.doc-base
@@ -0,0 +1,11 @@
+Document: libmongo-client
+Title: libmongo-client documentation
+Author: Gergely Nagy <algernon@madhouse-project.org>
+Abstract: API documentation and tutorial for the libmongo-client
+ library, an alternative C language driver for the MongoDB document
+ store.
+Section: Programming/C
+
+Format: HTML
+Index: /usr/share/doc/libmongo-client-doc/html/index.html
+Files: /usr/share/doc/libmongo-client-doc/html/*.html
diff --git a/debian/libmongo-client-doc.install b/debian/libmongo-client-doc.install
new file mode 100644
index 0000000..45dc8ae
--- /dev/null
+++ b/debian/libmongo-client-doc.install
@@ -0,0 +1,4 @@
+build-tree/docs/html/* usr/share/doc/libmongo-client0/html/
+docs/tutorial/examples/tut_* usr/share/doc/libmongo-client0/examples/tutorial/
+docs/tutorial/examples/GNUmakefile usr/share/doc/libmongo-client0/examples/tutorial/
+examples/*.c usr/share/doc/libmongo-client0/examples/
diff --git a/debian/libmongo-client-doc.links b/debian/libmongo-client-doc.links
new file mode 100644
index 0000000..23b7085
--- /dev/null
+++ b/debian/libmongo-client-doc.links
@@ -0,0 +1 @@
+/usr/share/javascript/jquery/jquery.js usr/share/doc/libmongo-client0/html/jquery.js
diff --git a/debian/libmongo-client-doc.lintian-overrides b/debian/libmongo-client-doc.lintian-overrides
new file mode 100644
index 0000000..3cf1246
--- /dev/null
+++ b/debian/libmongo-client-doc.lintian-overrides
@@ -0,0 +1,2 @@
+# doxygen generated stuff, it's not feasible to remove the duplicates.
+libmongo-client-doc: duplicate-files *
diff --git a/debian/libmongo-client0.docs b/debian/libmongo-client0.docs
new file mode 100644
index 0000000..edc0071
--- /dev/null
+++ b/debian/libmongo-client0.docs
@@ -0,0 +1 @@
+NEWS
diff --git a/debian/libmongo-client0.install b/debian/libmongo-client0.install
new file mode 100644
index 0000000..3ddde58
--- /dev/null
+++ b/debian/libmongo-client0.install
@@ -0,0 +1 @@
+usr/lib/*/lib*.so.*
diff --git a/debian/libmongo-client0.symbols b/debian/libmongo-client0.symbols
new file mode 100644
index 0000000..b3d154c
--- /dev/null
+++ b/debian/libmongo-client0.symbols
@@ -0,0 +1,9 @@
+libmongo-client.so.0 #PACKAGE# #MINVER#
+ (symver)LMC_0.1.0 0.1.0
+ (symver)LMC_0.1.0_INTERNAL 0.1.0
+ (symver)LMC_0.1.1 0.1.1
+ (symver)LMC_0.1.2 0.1.2
+ (symver)LMC_0.1.3 0.1.3
+ (symver)LMC_0.1.6 0.1.6
+ (symver)LMC_0.1.7 0.1.7
+ (symver)LMC_0.1.8 0.1.8
diff --git a/debian/rules b/debian/rules
new file mode 100755
index 0000000..c4ddaa7
--- /dev/null
+++ b/debian/rules
@@ -0,0 +1,49 @@
+#!/usr/bin/make -f
+# -*- makefile -*-
+
+export DH_OPTIONS += -O-Bbuild-tree
+
+UPSTREAM_VERSION=$(shell dpkg-parsechangelog | sed -n '/\(Version:\)/{s/^Version:[[:space:]]\+\([0-9]\+:\)\?\(.*\)/\2/p}' | rev | cut -d- -f 2- | rev)
+
+%:
+ dh $@ --with autoreconf
+
+##
+# Arch dependent overrides
+##
+override_dh_autoreconf-arch:
+ dh_autoreconf
+
+override_dh_auto_configure-arch:
+ dh_auto_configure -- --enable-shared
+
+override_dh_auto_build-arch:
+ dh_auto_build -- V=1
+
+override_dh_auto_test-arch:
+ dh_auto_test -- V=1
+
+##
+# Arch independent overrides
+##
+override_dh_autoreconf-indep override_dh_auto_configure-indep: ;
+override_dh_auto_test-indep override_dh_auto_install-indep: ;
+override_dh_auto_build-indep:
+ install -d build-tree/docs/html
+ cd build-tree && sed -e "s,@VERSION@,${UPSTREAM_VERSION},g" \
+ -e "s,@top_srcdir@,../,g" \
+ <../Doxyfile.in >Doxyfile
+ cd build-tree && doxygen
+
+##
+# Overrides common to both
+##
+override_dh_installdocs:
+ dh_installdocs --link-doc=libmongo-client0
+
+override_dh_compress:
+ dh_compress -Xusr/share/doc/libmongo-client0/examples/ \
+ -Xusr/share/doc/libmongo-client0/html/
+
+override_dh_strip:
+ dh_strip --dbg-package=libmongo-client0-dbg
diff --git a/debian/source/format b/debian/source/format
new file mode 100644
index 0000000..163aaf8
--- /dev/null
+++ b/debian/source/format
@@ -0,0 +1 @@
+3.0 (quilt)
diff --git a/debian/source/options b/debian/source/options
new file mode 100644
index 0000000..b7bc1f2
--- /dev/null
+++ b/debian/source/options
@@ -0,0 +1 @@
+compression = "xz"
diff --git a/debian/watch b/debian/watch
new file mode 100644
index 0000000..c324c04
--- /dev/null
+++ b/debian/watch
@@ -0,0 +1,11 @@
+# Compulsory line, this is a version 3 file
+version=3
+
+#
+# Watch flie disabled because github repository
+# contains debian tarballs
+#
+
+#opts=filenamemangle=s/.+\/v?(\d\S*)\.tar\.gz/libmongo-client-$1\.tar\.gz/ \
+# https://github.com/algernon/libmongo-client/tags .*/v?(\d\S*)\.tar\.gz
+
diff --git a/docs/Makefile.am b/docs/Makefile.am
new file mode 100644
index 0000000..11996c1
--- /dev/null
+++ b/docs/Makefile.am
@@ -0,0 +1,2 @@
+SUBDIRS = tutorial
+
diff --git a/docs/tutorial/Makefile.am b/docs/tutorial/Makefile.am
new file mode 100644
index 0000000..4ebd301
--- /dev/null
+++ b/docs/tutorial/Makefile.am
@@ -0,0 +1,13 @@
+EXTRA_DIST = tutorial.h \
+ examples/GNUmakefile \
+ tut_bson.h \
+ tut_bson_build.h examples/tut_bson_build.c examples/tut_bson_build.json \
+ tut_bson_traverse.h examples/tut_bson_traverse.c \
+ tut_mongo_sync.h \
+ tut_mongo_sync_connect.h tut_mongo_sync_insert.h \
+ tut_mongo_sync_query.h tut_mongo_sync_query_complex.h \
+ examples/tut_mongo_sync.c \
+ tut_mongo_sync_cmd_create.h \
+ examples/tut_mongo_sync_cmd_create.c \
+ tut_hl_client.h examples/tut_hl_client.c \
+ tut_json2bson.h examples/tut_json2bson.c
diff --git a/docs/tutorial/examples/GNUmakefile b/docs/tutorial/examples/GNUmakefile
new file mode 100644
index 0000000..01b5363
--- /dev/null
+++ b/docs/tutorial/examples/GNUmakefile
@@ -0,0 +1,36 @@
+# NOTE: This Makefile assumes that a recent enough version of
+# libmongo-client is installed!
+#
+# It will NOT work in the build directory, without an installed
+# libmongo-client library.
+
+TUTORIAL_PROGRAMS = tut/bson_build \
+ tut/bson_traverse \
+ tut/mongo_sync \
+ tut/mongo_sync_cmd_create \
+ tut/mongo_sync_cmd_custom \
+ tut/mongo_sync_cmd_index_create \
+ tut/hl_client \
+ tut/json2bson
+
+LMC_CFLAGS = $(shell pkg-config --cflags libmongo-client)
+LMC_LIBS = $(shell pkg-config --libs libmongo-client)
+
+JSON_C_CFLAGS = $(shell pkg-config --cflags json)
+JSON_C_LIBS = $(shell pkg-config --libs json)
+
+TUT_CFLAGS = ${LMC_CFLAGS}
+TUT_LIBS = ${LMC_LIBS}
+
+all: ${TUTORIAL_PROGRAMS}
+clean:
+ rm -f ${TUTORIAL_PROGRAMS}
+ -rmdir tut/
+
+tut/json2bson: TUT_CFLAGS += ${JSON_C_CFLAGS}
+tut/json2bson: TUT_LIBS += ${JSON_C_LIBS}
+${TUTORIAL_PROGRAMS}: tut/%: tut_%.c
+ @install -d tut
+ ${CC} ${TUT_CFLAGS} ${CFLAGS} $< ${TUT_LIBS} ${LDFLAGS} -o $@
+
+.PHONY: all clean
diff --git a/docs/tutorial/examples/tut_bson_build.c b/docs/tutorial/examples/tut_bson_build.c
new file mode 100644
index 0000000..2624310
--- /dev/null
+++ b/docs/tutorial/examples/tut_bson_build.c
@@ -0,0 +1,81 @@
+#include <mongo.h>
+
+#include <string.h>
+#include <stdio.h>
+
+int
+main (void)
+{
+ bson *b_new, *b_builder, *b_builder_full;
+ bson *page1, *page2, *pages;
+
+ page1 = bson_new ();
+ bson_append_string (page1, "title", "BSON tutorial", -1);
+ bson_append_string (page1, "content", "...", -1);
+ bson_append_int32 (page1, "importance", 1);
+ bson_finish (page1);
+
+ page2 = bson_new ();
+ bson_append_string (page2, "title", "Some other thing", -1);
+ bson_append_string (page2, "content", "...", -1);
+ bson_append_int32 (page2, "importance", 0);
+ bson_finish (page2);
+
+ pages = bson_new ();
+ bson_append_document (pages, "1", page1);
+ bson_append_document (pages, "2", page2);
+ bson_finish (pages);
+
+ b_new = bson_new ();
+ bson_append_string (b_new, "author", "Gergely Nagy", -1);
+ bson_append_array (b_new, "pages", pages);
+ bson_append_boolean (b_new, "inline", TRUE);
+ bson_finish (b_new);
+
+ b_builder = bson_build (BSON_TYPE_STRING, "author", "Gergely Nagy", -1,
+ BSON_TYPE_ARRAY, "pages", pages,
+ BSON_TYPE_BOOLEAN, "inline", TRUE,
+ BSON_TYPE_NONE);
+ bson_finish (b_builder);
+
+ b_builder_full = bson_build_full
+ (BSON_TYPE_STRING, "author", FALSE, "Gergely Nagy", -1,
+ BSON_TYPE_ARRAY, "pages", TRUE,
+ bson_build_full (BSON_TYPE_DOCUMENT, "1", TRUE,
+ bson_build (BSON_TYPE_STRING, "title", "BSON tutorial", -1,
+ BSON_TYPE_STRING, "content", "...", -1,
+ BSON_TYPE_INT32, "importance", 1,
+ BSON_TYPE_NONE),
+ BSON_TYPE_DOCUMENT, "2", TRUE,
+ bson_build (BSON_TYPE_STRING, "title", "Some other thing", -1,
+ BSON_TYPE_STRING, "content", "...", -1,
+ BSON_TYPE_INT32, "importance", 0,
+ BSON_TYPE_NONE),
+ BSON_TYPE_NONE),
+ BSON_TYPE_BOOLEAN, "inline", FALSE, TRUE,
+ BSON_TYPE_NONE);
+ bson_finish (b_builder_full);
+
+ if (bson_size (b_new) != bson_size (b_builder) ||
+ bson_size (b_new) != bson_size (b_builder_full))
+ {
+ fprintf (stderr, "There's something fishy: the three BSON objects have different sizes");
+ return 1;
+ }
+
+ if (memcmp (bson_data (b_new), bson_data (b_builder), bson_size (b_new)) != 0 ||
+ memcmp (bson_data (b_new), bson_data (b_builder_full), bson_size (b_new)) != 0)
+ {
+ fprintf (stderr, "The BSON objects do not match. Something smells.");
+ return 1;
+ }
+
+ bson_free (b_builder_full);
+ bson_free (b_builder);
+ bson_free (b_new);
+ bson_free (pages);
+ bson_free (page2);
+ bson_free (page1);
+
+ return 0;
+}
diff --git a/docs/tutorial/examples/tut_bson_build.json b/docs/tutorial/examples/tut_bson_build.json
new file mode 100644
index 0000000..078cf53
--- /dev/null
+++ b/docs/tutorial/examples/tut_bson_build.json
@@ -0,0 +1,16 @@
+{
+ author: "Gergely Nagy",
+ pages: [
+ {
+ title: "BSON tutorial",
+ content: "...",
+ importance: 1
+ },
+ {
+ title: "Some other thing",
+ content: "...",
+ importance: 0
+ }
+ ],
+ inline: true
+}
diff --git a/docs/tutorial/examples/tut_bson_traverse.c b/docs/tutorial/examples/tut_bson_traverse.c
new file mode 100644
index 0000000..4be7b1d
--- /dev/null
+++ b/docs/tutorial/examples/tut_bson_traverse.c
@@ -0,0 +1,123 @@
+#include <mongo.h>
+
+#include <string.h>
+#include <stdio.h>
+
+bson *
+tut_bson (void)
+{
+ bson *b;
+
+ b = bson_build_full
+ (BSON_TYPE_STRING, "author", FALSE, "Gergely Nagy", -1,
+ BSON_TYPE_ARRAY, "pages", TRUE,
+ bson_build_full (BSON_TYPE_DOCUMENT, "1", TRUE,
+ bson_build (BSON_TYPE_STRING, "title", "BSON tutorial", -1,
+ BSON_TYPE_STRING, "content", "...", -1,
+ BSON_TYPE_INT32, "importance", 1,
+ BSON_TYPE_NONE),
+ BSON_TYPE_DOCUMENT, "2", TRUE,
+ bson_build (BSON_TYPE_STRING, "title", "Some other thing", -1,
+ BSON_TYPE_STRING, "content", "...", -1,
+ BSON_TYPE_INT32, "importance", 0,
+ BSON_TYPE_NONE),
+ BSON_TYPE_NONE),
+ BSON_TYPE_BOOLEAN, "inline", FALSE, TRUE,
+ BSON_TYPE_NONE);
+ bson_finish (b);
+
+ return b;
+}
+
+int
+main (void)
+{
+ bson *doc;
+ bson_cursor *c, *c_arr, *c_page;
+
+ bson *v_doc, *v_array;
+ gboolean v_bool;
+ const gchar *v_str;
+
+ doc = tut_bson ();
+
+ c = bson_find (doc, "author");
+ bson_cursor_get_string (c, &v_str);
+ printf ("Author: %s\n", v_str);
+
+ bson_cursor_next (c);
+ bson_cursor_next (c);
+
+ bson_cursor_get_boolean (c, &v_bool);
+ printf ("inline: %s\n", (v_bool) ? "TRUE" : "FALSE");
+
+ bson_cursor_free (c);
+
+ c = bson_find (doc, "author");
+ bson_cursor_get_string (c, &v_str);
+ bson_cursor_free (c);
+ c = bson_find (doc, "inline");
+ bson_cursor_get_boolean (c, &v_bool);
+ bson_cursor_free (c);
+
+ printf ("Author: %s; inline: %s; (bson_find)\n",
+ v_str, (v_bool) ? "TRUE" : "FALSE");
+
+ c = bson_find (doc, "author");
+ bson_cursor_get_string (c, &v_str);
+ while (bson_cursor_next (c))
+ {
+ if (strcmp (bson_cursor_key (c), "inline") == 0)
+ {
+ bson_cursor_get_boolean (c, &v_bool);
+ break;
+ }
+ }
+ bson_cursor_free (c);
+
+ printf ("Author: %s; inline: %s; (bson_cursor_next)\n",
+ v_str, (v_bool) ? "TRUE" : "FALSE");
+
+ c = bson_find (doc, "author");
+ bson_cursor_get_string (c, &v_str);
+ bson_cursor_find_next (c, "inline");
+ bson_cursor_get_boolean (c, &v_bool);
+ bson_cursor_free (c);
+
+ printf ("Author: %s; inline: %s; (bson_cursor_find_next)\n",
+ v_str, (v_bool) ? "TRUE" : "FALSE");
+
+ c = bson_find (doc, "pages");
+ bson_cursor_find (c, "inline");
+ bson_cursor_get_boolean (c, &v_bool);
+ bson_cursor_find (c, "author");
+ bson_cursor_get_string (c, &v_str);
+ bson_cursor_free (c);
+
+ printf ("Author: %s; inline: %s; (bson_cursor_find)\n",
+ v_str, (v_bool) ? "TRUE" : "FALSE");
+
+ c = bson_cursor_new (doc);
+ while (bson_cursor_next (c))
+ {
+ printf ("Key: %s; type=%s\n", bson_cursor_key (c),
+ bson_cursor_type_as_string (c));
+ }
+ bson_cursor_free (c);
+
+ c = bson_find (doc, "pages");
+ bson_cursor_get_array (c, &v_array);
+ c_arr = bson_find (v_array, "2");
+ bson_cursor_get_document (c_arr, &v_doc);
+ c_page = bson_find (v_doc, "title");
+ bson_cursor_get_string (c_page, &v_str);
+
+ bson_cursor_free (c_page);
+ bson_cursor_free (c_arr);
+ bson_cursor_free (c);
+
+ printf ("Title of the 2nd page in the pages array: %s\n", v_str);
+
+ bson_free (doc);
+ return 0;
+}
diff --git a/docs/tutorial/examples/tut_hl_client.c b/docs/tutorial/examples/tut_hl_client.c
new file mode 100644
index 0000000..68ceb8f
--- /dev/null
+++ b/docs/tutorial/examples/tut_hl_client.c
@@ -0,0 +1,107 @@
+#include <mongo.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+
+static void
+do_inserts (mongo_sync_connection *conn)
+{
+ bson *base;
+ gint i;
+
+ base = bson_build
+ (BSON_TYPE_STRING, "tutorial-program", "tut_hl_client.c", -1,
+ BSON_TYPE_INT32, "the answer to life, the universe and everything", 42,
+ BSON_TYPE_NONE);
+ bson_finish (base);
+
+ for (i = 0; i < 1000; i++)
+ {
+ bson *n;
+
+ n = bson_new_from_data (bson_data (base), bson_size (base) - 1);
+ bson_append_int32 (n, "counter", i);
+ bson_finish (n);
+
+ if (!mongo_sync_cmd_insert (conn, "lmc.tutorial", n, NULL))
+ {
+ fprintf (stderr, "Error inserting document %d: %s\n", i,
+ strerror (errno));
+ exit (1);
+ }
+ bson_free (n);
+ }
+ bson_free (base);
+}
+
+static void
+do_query (mongo_sync_connection *conn)
+{
+ mongo_sync_cursor *c;
+ bson *query;
+ gchar *error = NULL;
+
+ query = bson_build
+ (BSON_TYPE_STRING, "tutorial-program", "tut_hl_client.c", -1,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+
+ c = mongo_sync_cursor_new (conn, "lmc.tutorial",
+ mongo_sync_cmd_query (conn, "lmc.tutorial", 0,
+ 0, 10, query, NULL));
+ if (!c)
+ {
+ fprintf (stderr, "Error creating the query cursor: %s\n",
+ strerror (errno));
+ exit (1);
+ }
+ bson_free (query);
+
+ while (mongo_sync_cursor_next (c))
+ {
+ bson *b = mongo_sync_cursor_get_data (c);
+ bson_cursor *bc;
+ gint32 cnt;
+
+ if (!b)
+ {
+ int e = errno;
+
+ mongo_sync_cmd_get_last_error (conn, "lmc", &error);
+ fprintf (stderr, "Error retrieving cursor data: %s\n",
+ (error) ? error : strerror (e));
+ exit (1);
+ }
+
+ bc = bson_find (b, "counter");
+ bson_cursor_get_int32 (bc, &cnt);
+ printf ("\rCounter: %d", cnt);
+
+ bson_cursor_free (bc);
+ bson_free (b);
+ }
+ printf ("\n");
+
+ mongo_sync_cursor_free (c);
+}
+
+int
+main (void)
+{
+ mongo_sync_connection *conn;
+
+ conn = mongo_sync_connect ("localhost", 27017, FALSE);
+ if (!conn)
+ {
+ fprintf (stderr, "Connection failed: %s\n", strerror (errno));
+ return 1;
+ }
+
+ do_inserts (conn);
+ do_query (conn);
+
+ mongo_sync_disconnect (conn);
+ return 0;
+}
diff --git a/docs/tutorial/examples/tut_json2bson.c b/docs/tutorial/examples/tut_json2bson.c
new file mode 100644
index 0000000..3ad5b9a
--- /dev/null
+++ b/docs/tutorial/examples/tut_json2bson.c
@@ -0,0 +1,132 @@
+#define __STRICT_ANSI__ 1
+
+#include <bson.h>
+#include <json.h>
+
+#include <stdio.h>
+#include <unistd.h>
+#include <glib.h>
+
+static bson *json_to_bson (struct json_object *json);
+
+static void
+json_key_to_bson_key (bson *b, void *val,
+ const gchar *key)
+{
+ switch (json_object_get_type (val))
+ {
+ case json_type_boolean:
+ bson_append_boolean (b, key, json_object_get_boolean (val));
+ break;
+ case json_type_double:
+ bson_append_double (b, key, json_object_get_double (val));
+ break;
+ case json_type_int:
+ bson_append_int32 (b, key, json_object_get_int (val));
+ break;
+ case json_type_string:
+ bson_append_string (b, key, json_object_get_string (val), -1);
+ break;
+ case json_type_object:
+ {
+ bson *sub;
+
+ sub = json_to_bson (val);
+ bson_append_document (b, key, sub);
+ bson_free (sub);
+ break;
+ }
+ case json_type_array:
+ {
+ gint pos;
+ bson *sub;
+
+ sub = bson_new ();
+
+ for (pos = 0; pos < json_object_array_length (val); pos++)
+ {
+ gchar *nk = g_strdup_printf ("%d", pos);
+
+ json_key_to_bson_key (sub, json_object_array_get_idx (val, pos),
+ nk);
+ g_free (nk);
+ }
+ bson_finish (sub);
+
+ bson_append_array (b, key, sub);
+ bson_free (sub);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static void
+json_to_bson_foreach (bson *b, struct json_object *json)
+{
+ json_object_object_foreach (json, key, val)
+ {
+ json_key_to_bson_key (b, val, key);
+ }
+}
+
+static bson *
+json_to_bson (struct json_object *json)
+{
+ bson *b;
+
+ b = bson_new ();
+ json_to_bson_foreach (b, json);
+ bson_finish (b);
+
+ return b;
+}
+
+int
+main (int argc, char **argv)
+{
+ GIOChannel *input;
+ GString *json_str;
+ GError *error = NULL;
+ struct json_tokener *tokener;
+
+ input = g_io_channel_unix_new (0);
+
+ json_str = g_string_new (NULL);
+ tokener = json_tokener_new ();
+
+ while (g_io_channel_read_line_string (input, json_str,
+ NULL, &error) == G_IO_STATUS_NORMAL)
+ {
+ struct json_object *json;
+ bson *bson;
+
+ json_tokener_reset (tokener);
+
+ json = json_tokener_parse_ex (tokener, json_str->str, json_str->len);
+ if (!json)
+ {
+ fprintf (stderr, "Error parsing json: %s\n", json_str->str);
+ break;
+ }
+
+ if (json_object_get_type (json) != json_type_object)
+ {
+ fprintf (stderr,
+ "Error: json's top-level object is not object: %s\n",
+ json_str->str);
+ json_object_put (json);
+ break;
+ }
+
+ bson = json_to_bson (json);
+ json_object_put (json);
+
+ write (1, bson_data (bson), bson_size (bson));
+
+ bson_free (bson);
+ }
+
+ return 0;
+}
diff --git a/docs/tutorial/examples/tut_mongo_sync.c b/docs/tutorial/examples/tut_mongo_sync.c
new file mode 100644
index 0000000..ff27560
--- /dev/null
+++ b/docs/tutorial/examples/tut_mongo_sync.c
@@ -0,0 +1,273 @@
+#include <mongo.h>
+#include <errno.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+void
+tut_sync_connect (void)
+{
+ mongo_sync_connection *conn;
+
+ conn = mongo_sync_connect ("localhost", 27017, TRUE);
+ if (!conn)
+ {
+ perror ("mongo_sync_connect()");
+ exit (1);
+ }
+ mongo_sync_disconnect (conn);
+}
+
+void
+tut_sync_connect_replica (void)
+{
+ mongo_sync_connection *conn;
+
+ conn = mongo_sync_connect ("mongo-master", 27017, TRUE);
+ if (!conn)
+ {
+ perror ("mongo_sync_connect()");
+ return;
+ }
+
+ if (!mongo_sync_conn_set_auto_reconnect (conn, TRUE))
+ {
+ perror ("mongo_sync_conn_set_auto_reconnect()");
+ return;
+ }
+
+ if (!mongo_sync_conn_seed_add (conn, "mongo-replica", 27017))
+ {
+ perror ("mongo_sync_conn_seed_add()");
+ return;
+ }
+ if (!mongo_sync_conn_seed_add (conn, "mongo-replica-2", 27017))
+ {
+ perror ("mongo_sync_conn_seed_add()");
+ return;
+ }
+
+ mongo_sync_disconnect (conn);
+}
+
+void
+tut_sync_insert (void)
+{
+ mongo_sync_connection *conn;
+ bson *doc1, *doc2, *doc3;
+
+ conn = mongo_sync_connect ("localhost", 27017, FALSE);
+ if (!conn)
+ {
+ perror ("mongo_sync_connect()");
+ exit (1);
+ }
+
+ doc1 = bson_build (BSON_TYPE_STRING, "hello", "world", -1,
+ BSON_TYPE_INT32, "the_final_answer", 42,
+ BSON_TYPE_BOOLEAN, "yes?", FALSE,
+ BSON_TYPE_INT32, "n", 1,
+ BSON_TYPE_NONE);
+ bson_finish (doc1);
+
+ if (!mongo_sync_cmd_insert (conn, "tutorial.docs", doc1, NULL))
+ {
+ perror ("mongo_sync_cmd_insert()");
+ exit (1);
+ }
+
+ doc2 = bson_build (BSON_TYPE_INT32, "n", 2,
+ BSON_TYPE_BOOLEAN, "yes?", FALSE,
+ BSON_TYPE_STRING, "hello", "dolly", -1,
+ BSON_TYPE_NONE);
+ bson_finish (doc2);
+
+ doc3 = bson_build (BSON_TYPE_INT32, "n", 3,
+ BSON_TYPE_STRING, "hello", "nurse", -1,
+ BSON_TYPE_BOOLEAN, "yes?", TRUE,
+ BSON_TYPE_NONE);
+ bson_finish (doc3);
+
+ if (!mongo_sync_cmd_insert (conn, "tutorial.docs", doc2, doc3, NULL))
+ {
+ perror ("mongo_sync_cmd_insert()");
+ exit (1);
+ }
+
+ bson_free (doc3);
+ bson_free (doc2);
+ bson_free (doc1);
+
+ mongo_sync_disconnect (conn);
+}
+
+void
+tut_sync_query_simple (void)
+{
+ mongo_sync_connection *conn;
+ mongo_packet *p;
+ mongo_sync_cursor *cursor;
+ bson *query;
+ gint i = 0;
+
+ conn = mongo_sync_connect ("localhost", 27017, FALSE);
+ if (!conn)
+ {
+ perror ("mongo_sync_connect()");
+ exit (1);
+ }
+
+ query = bson_new ();
+ bson_finish (query);
+
+ p = mongo_sync_cmd_query (conn, "tutorial.docs", 0,
+ 0, 10, query, NULL);
+ if (!p)
+ {
+ perror ("mongo_sync_cmd_query()");
+ exit (1);
+ }
+ bson_free (query);
+
+ cursor = mongo_sync_cursor_new (conn, "tutorial.docs", p);
+ if (!cursor)
+ {
+ perror ("mongo_sync_cursor_new()");
+ exit (1);
+ }
+
+ while (mongo_sync_cursor_next (cursor))
+ {
+ bson *result = mongo_sync_cursor_get_data (cursor);
+ bson_cursor *c;
+
+ if (!result)
+ {
+ perror ("mongo_sync_cursor_get_data()");
+ exit (1);
+ }
+
+ printf ("Keys in document #%d:\n", i);
+ c = bson_cursor_new (result);
+ while (bson_cursor_next (c))
+ printf ("\t%s\n", bson_cursor_key (c));
+
+ i++;
+ bson_cursor_free (c);
+ bson_free (result);
+ }
+
+ mongo_sync_cursor_free (cursor);
+ mongo_sync_disconnect (conn);
+}
+
+void
+tut_sync_query_complex (void)
+{
+ mongo_sync_connection *conn;
+ mongo_packet *p;
+ mongo_sync_cursor *cursor;
+ bson *query, *select;
+ gint i = 0;
+
+ conn = mongo_sync_connect ("localhost", 27017, FALSE);
+ if (!conn)
+ {
+ perror ("mongo_sync_connect()");
+ exit (1);
+ }
+
+ query = bson_build_full (BSON_TYPE_DOCUMENT, "$query", TRUE,
+ bson_build (BSON_TYPE_BOOLEAN, "yes?", FALSE,
+ BSON_TYPE_NONE),
+ BSON_TYPE_DOCUMENT, "$orderby", TRUE,
+ bson_build (BSON_TYPE_INT32, "n", 1,
+ BSON_TYPE_NONE),
+ BSON_TYPE_NONE);
+ bson_finish (query);
+
+ select = bson_build (BSON_TYPE_INT32, "hello", 1,
+ BSON_TYPE_INT32, "n", 1,
+ BSON_TYPE_INT32, "yes?", 1,
+ BSON_TYPE_NONE);
+ bson_finish (select);
+
+ p = mongo_sync_cmd_query (conn, "tutorial.docs", 0,
+ 0, 10, query, select);
+ if (!p)
+ {
+ perror ("mongo_sync_cmd_query()");
+ exit (1);
+ }
+ bson_free (query);
+ bson_free (select);
+
+ cursor = mongo_sync_cursor_new (conn, "tutorial.docs", p);
+ if (!cursor)
+ {
+ perror ("mongo_sync_cursor_new()");
+ exit (1);
+ }
+
+ while (mongo_sync_cursor_next (cursor))
+ {
+ const char *hello;
+ gint32 n;
+ gboolean yes;
+
+ bson *result;
+ bson_cursor *c;
+
+ result = mongo_sync_cursor_get_data (cursor);
+ if (!result)
+ {
+ perror ("mongo_sync_cursor_get_data()");
+ exit (1);
+ }
+
+ c = bson_find (result, "hello");
+ if (!bson_cursor_get_string (c, &hello))
+ {
+ perror ("bson_cursor_get_string()");
+ exit (1);
+ }
+ bson_cursor_free (c);
+
+ c = bson_find (result, "n");
+ if (!bson_cursor_get_int32 (c, &n))
+ {
+ perror ("bson_cursor_get_int32()");
+ exit (1);
+ }
+ bson_cursor_free (c);
+
+ c = bson_find (result, "yes?");
+ if (!bson_cursor_get_boolean (c, &yes))
+ {
+ perror ("bson_cursor_get_boolean()");
+ exit (1);
+ }
+ bson_cursor_free (c);
+
+ printf ("Document #%d: hello=%s; n=%d; yes?=%s\n",
+ i, hello, n, (yes) ? "TRUE" : "FALSE");
+
+ bson_free (result);
+ i++;
+ }
+
+ mongo_sync_cursor_free (cursor);
+ mongo_sync_disconnect (conn);
+}
+
+int
+main (int argc, char *argv[])
+{
+ tut_sync_connect ();
+ tut_sync_connect_replica ();
+ tut_sync_insert ();
+ tut_sync_query_simple ();
+ tut_sync_query_complex ();
+
+ return 0;
+}
diff --git a/docs/tutorial/examples/tut_mongo_sync_cmd_create.c b/docs/tutorial/examples/tut_mongo_sync_cmd_create.c
new file mode 100644
index 0000000..9b31c91
--- /dev/null
+++ b/docs/tutorial/examples/tut_mongo_sync_cmd_create.c
@@ -0,0 +1,82 @@
+#include <mongo.h>
+
+#include <errno.h>
+#include <stdio.h>
+
+static void
+print_coll_info (bson *info)
+{
+ bson_cursor *c = NULL;
+ bson *options = NULL;
+
+ const gchar *name;
+ gboolean capped = FALSE;
+ gint64 size = -1;
+ gint64 max = -1;
+
+ c = bson_find (info, "name");
+ bson_cursor_get_string (c, &name);
+ bson_cursor_find (c, "options");
+
+ bson_cursor_get_document (c, &options);
+
+ printf ("Options for %s:\n", name);
+
+ bson_cursor_free (c);
+ bson_free (info);
+
+ c = bson_find (options, "capped");
+ bson_cursor_get_boolean (c, &capped);
+ bson_cursor_free (c);
+
+ c = bson_find (options, "size");
+ bson_cursor_get_int64 (c, &size);
+ bson_cursor_free (c);
+
+ c = bson_find (options, "max");
+ bson_cursor_get_int64 (c, &max);
+ bson_cursor_free (c);
+
+ bson_free (options);
+
+ printf ("\tCapped: %s\n", (capped) ? "yes" : "no");
+ if (size > 0)
+ printf ("\tSize : %lu\n", size);
+ if (max > 0)
+ printf ("\tMax : %lu\n", max);
+ printf ("\n");
+}
+
+int
+main (void)
+{
+ mongo_sync_connection *conn;
+
+ conn = mongo_sync_connect ("localhost", 27017, FALSE);
+ if (!conn)
+ {
+ fprintf (stderr, "Connection failed: %s\n", strerror (errno));
+ return 1;
+ }
+
+ mongo_sync_cmd_create (conn, "lmc", "cmd_create", MONGO_COLLECTION_DEFAULTS);
+ print_coll_info (mongo_sync_cmd_exists (conn, "lmc", "cmd_create"));
+
+ mongo_sync_cmd_create (conn, "lmc", "cmd_create_capped",
+ MONGO_COLLECTION_CAPPED, 655360);
+ print_coll_info (mongo_sync_cmd_exists (conn, "lmc", "cmd_create_capped"));
+
+ mongo_sync_cmd_create (conn, "lmc", "cmd_create_capped_max",
+ MONGO_COLLECTION_CAPPED | MONGO_COLLECTION_CAPPED_MAX,
+ 655360, 100);
+ print_coll_info (mongo_sync_cmd_exists (conn, "lmc",
+ "cmd_create_capped_max"));
+
+ mongo_sync_cmd_create (conn, "lmc", "cmd_create_sized",
+ MONGO_COLLECTION_SIZED, 655360);
+ print_coll_info (mongo_sync_cmd_exists (conn, "lmc", "cmd_create_sized"));
+
+ mongo_sync_disconnect (conn);
+
+ return 0;
+}
diff --git a/docs/tutorial/examples/tut_mongo_sync_cmd_custom.c b/docs/tutorial/examples/tut_mongo_sync_cmd_custom.c
new file mode 100644
index 0000000..4e48b18
--- /dev/null
+++ b/docs/tutorial/examples/tut_mongo_sync_cmd_custom.c
@@ -0,0 +1,81 @@
+#include <mongo.h>
+
+#include <errno.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+int
+main (void)
+{
+ mongo_sync_connection *conn;
+ mongo_packet *p;
+ mongo_sync_cursor *cursor;
+ bson *eval;
+
+ conn = mongo_sync_connect ("localhost", 27017, FALSE);
+ if (!conn)
+ {
+ perror ("mongo_sync_connect()");
+ exit (1);
+ }
+
+ eval = bson_build_full (BSON_TYPE_JS_CODE, "$eval", FALSE,
+ "function(x){return x + 4.2;}", -1,
+ BSON_TYPE_ARRAY, "args", TRUE,
+ bson_build (BSON_TYPE_INT32, "0", 1,
+ BSON_TYPE_NONE),
+ BSON_TYPE_NONE);
+ bson_finish (eval);
+
+ p = mongo_sync_cmd_custom (conn, "test", eval);
+
+ if (!p)
+ {
+ gchar *error = NULL;
+
+ mongo_sync_cmd_get_last_error (conn, "test", &error);
+ fprintf (stderr, "Can't run db.eval: %s\n", error);
+ g_free (error);
+
+ exit (1);
+ }
+
+ cursor = mongo_sync_cursor_new (conn, "test", p);
+
+ if (!cursor)
+ {
+ perror ("mongo_sync_cursor_new()");
+ exit (1);
+ }
+
+ while (mongo_sync_cursor_next (cursor))
+ {
+ bson *result;
+ bson_cursor *c;
+ gdouble r;
+
+ result = mongo_sync_cursor_get_data (cursor);
+ if (!result)
+ {
+ perror ("mongo_sync_cursor_get_data()");
+ exit (1);
+ }
+
+ c = bson_find (result, "retval");
+ if (!bson_cursor_get_double (c, &r))
+ {
+ perror ("bson_cursor_get_double()");
+ exit (1);
+ }
+ bson_cursor_free (c);
+ bson_free (result);
+
+ printf ("Result: %2.1f\n", r);
+ }
+
+ mongo_sync_cursor_free (cursor);
+ mongo_sync_disconnect (conn);
+
+ return 0;
+}
diff --git a/docs/tutorial/examples/tut_mongo_sync_cmd_index_create.c b/docs/tutorial/examples/tut_mongo_sync_cmd_index_create.c
new file mode 100644
index 0000000..0e2f0b5
--- /dev/null
+++ b/docs/tutorial/examples/tut_mongo_sync_cmd_index_create.c
@@ -0,0 +1,54 @@
+#include <mongo.h>
+
+#include <errno.h>
+#include <stdio.h>
+
+static void
+create_and_verify_index(mongo_sync_connection *conn,
+ bson *index)
+{
+ if (!mongo_sync_cmd_index_create (conn, "lmc.indexed", index,
+ MONGO_INDEX_UNIQUE | MONGO_INDEX_DROP_DUPS |
+ MONGO_INDEX_SPARSE))
+ {
+ gchar *error = NULL;
+ int e = errno;
+
+ mongo_sync_cmd_get_last_error (conn, "lmc.indexed", &error);
+ fprintf (stderr, "Can't create indexes: %s\n", error ? error : strerror (e));
+ g_free (error);
+ }
+ else
+ printf ("Index successfully created!\n");
+}
+
+int
+main (void)
+{
+ mongo_sync_connection *conn;
+ bson *invalid_index, *index;
+
+ invalid_index = bson_build (BSON_TYPE_STRING, "name", "", -1,
+ BSON_TYPE_NONE);
+ bson_finish (invalid_index);
+
+ index = bson_build (BSON_TYPE_INT32, "name", 1,
+ BSON_TYPE_NONE);
+ bson_finish (index);
+
+ conn = mongo_sync_connect ("localhost", 27017, FALSE);
+ if (!conn)
+ {
+ fprintf (stderr, "Connection failed: %s\n", strerror (errno));
+ return 1;
+ }
+
+ create_and_verify_index (conn, invalid_index);
+ create_and_verify_index (conn, index);
+
+ bson_free (invalid_index);
+ bson_free (index);
+ mongo_sync_disconnect (conn);
+
+ return 0;
+}
diff --git a/docs/tutorial/tut_bson.h b/docs/tutorial/tut_bson.h
new file mode 100644
index 0000000..973f12d
--- /dev/null
+++ b/docs/tutorial/tut_bson.h
@@ -0,0 +1,10 @@
+/** @page tut_bson Working with BSON objects
+ *
+ * In this section, we'll cover the basics of working with BSON
+ * objects in a few big steps. Working with BSON is fairly
+ * straightforward, so we will not be going into much details here.
+ *
+ * Contents:
+ * - @subpage tut_bson_build
+ * - @subpage tut_bson_traverse
+ */
diff --git a/docs/tutorial/tut_bson_build.h b/docs/tutorial/tut_bson_build.h
new file mode 100644
index 0000000..5c5eb54
--- /dev/null
+++ b/docs/tutorial/tut_bson_build.h
@@ -0,0 +1,62 @@
+/** @page tut_bson_build Building BSON objects
+ *
+ * Our first task will be to build a BSON document, which we can later
+ * insert into MongoDB. For this example, we want something more
+ * complex than a simple "Hello World"-style object, so we can
+ * showcase all the interesting functions of the BSON API.
+ *
+ * Lets build a document that would look like this, if we were writing
+ * JSON:
+ * @verbinclude tut_bson_build.json
+ *
+ * @dontinclude tut_bson_build.c
+ *
+ * First we start by including the main libmongo-client header. It's
+ * convenient to include the whole lot instead of including the used
+ * headers one by one, unless one's embedding only parts of the
+ * library.
+ * @until mongo.h
+ *
+ * @until {
+ *
+ * We'll be building the same BSON object in various different ways,
+ * so we declare a few more variables than we'd normally need.
+ * @until pages
+ *
+ * Next, we create the two pages:
+ * @until bson_finish (page2)
+ *
+ * Then we construct the "pages" array. Do note how we set the key to
+ * "1" and "2", and how pages is just a document! This is because in
+ * BSON, an array is a document that has a special type, and where
+ * keys are numbers.
+ * @until bson_finish (pages)
+ *
+ * Finally, now that we have all the subdocuments ready, we build up
+ * our main object:
+ * @until bson_finish (b_new)
+ *
+ * And that's about it! But surely, there is an easier way to do
+ * this... And indeed, there is, using bson_build():
+ * @until bson_finish (b_builder)
+ *
+ * Much cleaner, but still, we had to create the pages array in three
+ * steps beforehand. Couldn't we do it in one gigantic function call
+ * instead?
+ * @until bson_finish (b_builder_full)
+ *
+ * Wonderful! We have three BSON objects created now, in three
+ * different ways! But are they the same? That's really easy to figure
+ * out. As a quick check, we can compare their sizes: if they do not
+ * match, we can bail out fast:
+ * @until }
+ *
+ * Or, we can do a more expensive comparsion, and compare the data:
+ * @until }
+ *
+ * And now that we are done, we free up the resources we allocated.
+ * @until bson_free (page1)
+ *
+ *
+ * @until }
+ */
diff --git a/docs/tutorial/tut_bson_traverse.h b/docs/tutorial/tut_bson_traverse.h
new file mode 100644
index 0000000..712ff71
--- /dev/null
+++ b/docs/tutorial/tut_bson_traverse.h
@@ -0,0 +1,135 @@
+/** @page tut_bson_traverse Traversing BSON objects
+ *
+ * Now that we can build BSON objects (see the @ref tut_bson_build
+ * "previous section"), it is time that we learn how to find things in
+ * them!
+ *
+ * We will use the same JSON structure as in the previous example.
+ *
+ * @dontinclude tut_bson_traverse.c
+ * First, we include our stuff, and create a function that gives us a
+ * BSON object to work with:
+ * @until return b
+ * @line }
+ *
+ * @until {
+ * First, we need a variable to hold our BSON object, and a cursor,
+ * with which we can find the keys we're looking for.
+ * @until bson_cursor
+ *
+ * Then we need a few variables to store the retrieved information
+ * in. Extracting data out of a BSON object involves storing them in a
+ * variable of our own.
+ * @until v_str
+ *
+ * @until doc = tut_bson
+ *
+ * In order to find something in a BSON object, we must know the keys
+ * name. There's two ways to find information within a BSON object:
+ * bson_find() and bson_cursor_next() and its friends.
+ *
+ * The first takes a BSON object, and returns a cursor that points to
+ * the key we wanted to find (or to NULL, if it was not found, of
+ * course). We can then extract that value, and either close the
+ * cursor, or use bson_cursor_next() to iterate over the rest of the
+ * keys.
+ *
+ * The advantage of bson_find() is that we can jump to a specific key
+ * easily, but at the cost of having to use a new cursor every time,
+ * which in turn will start searching from the beginning of a BSON
+ * document.
+ *
+ * On the other hand, bson_cursor_next() remembers its position, and
+ * jumping to the next key is straightforward. It can also be used in
+ * conjunction with bson_find() to locate the first key we're
+ * interested in, and move on to the next until we're done.
+ *
+ * For our first traversal example, lets say we want to see who the
+ * author of our BSON object is! We can extract that information by
+ * locating the "author" key, and retrieving the string contents:
+ * @until printf
+ *
+ * Now, if we also want to know whether the 'inline' key is set to
+ * true or false, we have two options: if we know the exact order of
+ * keys, we can use the appropriate amount of bson_cursor_next()
+ * calls:
+ * @until printf
+ *
+ * But this is not only ugly, it's very sensitive to the order of the
+ * keys, and it's hard to understand aswell, without being familiar
+ * with the document's structure.
+ *
+ * So, while it is a little more inconvenient, it's better to find the
+ * key in some other way. But first, let's close our cursor, as we'll
+ * need to start again anyway...
+ * @until bson_cursor_free
+ *
+ * First, we'll go the easy route, and use bson_find():
+ * @until printf
+ *
+ * Now, the downside of this, is that we ran through the BSON object
+ * twice. Well, once and a little, since the "author" key was the
+ * first. But using bson_find() this way has the potential of
+ * traversing through a document multiple times.
+ *
+ * Now, if we know the one key always comes after another, then we can
+ * use bson_cursor_next() to help us find it, without having to
+ * restart from the beginning. We do this by extracting the key name
+ * from the cursor, and comparing it to whatever we're searching for:
+ * @until printf
+ *
+ * Now the above is still picky about key order, but it's at least
+ * more flexible: we can put any number of keys between author and
+ * inline, and it will work. In most cases, that's good enough. In
+ * every other case, where we have no idea about key ordering,
+ * bson_find() is still there for us.
+ *
+ * Even better, the library itself provides a function that does
+ * something similar: it takes a cursor and a key name, and attempts
+ * to find the key past the cursor's current position. So if we
+ * already have the author, we can use this function to find the next
+ * inline key aswell:
+ * @until printf
+ *
+ * However, if we suspect that a key might come after the current
+ * position, but we're not sure, and still want to find the key with
+ * as little code as possible, we shall not despair! The
+ * bson_cursor_find() function does just that: it will try to find the
+ * key starting from the current position, and wrap over once if it
+ * can't.
+ *
+ * Lets see how it works! First, we find a key in the middle:
+ * @until bson_find
+ *
+ * Now that we have a key in the middle, lets find a key after it:
+ * @until get_boolean
+ *
+ * And once we have that - and we're at the end of our BSON object -,
+ * lets try finding the author key aswell:
+ * @until get_string
+ *
+ * That works, just like that! To verify, we go ahead and print the
+ * results, which should be the same as it was in the previous
+ * examples:
+ *
+ * @until cursor_free
+ *
+ * @until printf
+ *
+ * One thing we did not explore yet, is bson_cursor_new(): this will
+ * create a new cursor, and point to to the very beginning of our BSON
+ * object, just before the first key. Thus, the first
+ * bson_cursor_next() done afterwards will yield the first key.
+ * @until bson_cursor_free
+ *
+ * Very well! But what if we want to see the title of the second page
+ * within the pages array? Unfortunately, that is a bit more work to
+ * accomplish: we'll need to extract the pages array from our
+ * document, and then extract its second element, and then we can find
+ * stuff in that:
+ * @until printf
+ *
+ * And that concludes our BSON traversing tutorial!
+ *
+ * @until }
+ */
diff --git a/docs/tutorial/tut_hl_client.h b/docs/tutorial/tut_hl_client.h
new file mode 100644
index 0000000..8b8e806
--- /dev/null
+++ b/docs/tutorial/tut_hl_client.h
@@ -0,0 +1,86 @@
+/** @page tut_hl_client A full-blown application
+ *
+ * As the next step of our tutorial, we will write a full blown
+ * application. While it does not solve any real-life problems, and
+ * what it does is entirely pointless, it nevertheless is a good
+ * example to showcase certain patterns one is likely to run into
+ * while developing with libmongo-client.
+ *
+ * @dontinclude tut_hl_client.c
+ * @until string.h
+ *
+ * Our first task is to add a handful of items to our test
+ * collection. We'll have two static keys, and one that's different
+ * for each key.
+ * @until gint i
+ *
+ * First, we'll build a base BSON object:
+ * @until bson_finish
+ *
+ * Then, we create a copy, append a counter element to the object,
+ * insert it, and do this a thousand times over.
+ * @until bson_free
+ * @until }
+ *
+ * This was pretty simple, wasn't it? And we even have error handling!
+ * Lets finish this function up, and move on.
+ *
+ * @until }
+ *
+ * Next up comes the interesting part: doing queries. We will use the
+ * @ref mongo_sync_cursor "cursor API" to iterate over all our
+ * results, hiding the gory details of database access behind its
+ * convenience layer.
+ *
+ * @until {
+ *
+ * We'll need a couple of things: a cursor, a query, and a string to
+ * store error messages in, if any.
+ *
+ * @until error
+ *
+ * Before we can query the database, we must build a query object:
+ * @until bson_finish
+ *
+ * Once that is done, we create a cursor, cleverly embedding the
+ * mongo_sync_cmd_query() call into the constructor:
+ * @until bson_free
+ *
+ * Again, we properly handle errors. It is very important to not just
+ * blindly assume things will work. While the library tries its best
+ * to handle invalid data gracefully, it's easy to get lost between
+ * the layers when one forgets to handle error cases at the
+ * appropriate level.
+ *
+ * But I digress, lets get back to our program!
+ *
+ * We have a nice little query cursor, it's time to loop through the
+ * database, extract the counter from the current BSON object, and
+ * move on:
+ * @until }
+ *
+ * At this point, we have the current document in the @a b variable,
+ * handled the error case, and as such, we're ready to dig deep into
+ * the BSON object!
+ * @until printf
+ *
+ * And once we're done working with the BSON object, we free the
+ * cursor, and the object, and continue the loop.
+ * @until }
+ *
+ * And in the end, we emit a newline, and free the cursor to wrap up
+ * our query routine.
+ * @until }
+ *
+ * All that is left now, is the glue that holds this together, and
+ * connects to MongoDB:
+ * @until }
+ * @until }
+ *
+ * I believe that does not need any further explanation.
+ *
+ * As an exercise, one can add another feature: dropping the temporary
+ * collection on error. Or perhaps, count the number of documents
+ * returned, and see if and how the count changes between subsequent
+ * runs of the test program.
+ */
diff --git a/docs/tutorial/tut_json2bson.h b/docs/tutorial/tut_json2bson.h
new file mode 100644
index 0000000..f263234
--- /dev/null
+++ b/docs/tutorial/tut_json2bson.h
@@ -0,0 +1,97 @@
+/** @page tut_json2bson JSON to BSON converter
+ *
+ * Now that we have a basic grasp of the library, we'll write a
+ * solution to a real life problem: converting JSON to BSON.
+ *
+ * Our program will expect correctly formatted JSON, in condensed
+ * one-line format, and will output a BSON document for each line of
+ * JSON received.
+ *
+ * @dontinclude tut_json2bson.c
+ * @until glib.h
+ *
+ * First, we forward declare the json_to_bson() function, because
+ * we'll recursively use it later on:
+ * @until json_to_bson
+ *
+ * Next, we create the heart of the program, a function that takes a
+ * BSON object, a value and a key, and appends the key-value pair to
+ * the bson object, with the correct type.
+ * @until {
+ *
+ * We do this by checking the JSON object's type, and acting up on it:
+ * @until switch
+ * @until {
+ *
+ * The boolean, double, integer and string types are easy: we just use
+ * the appropriate bson_append_*() function:
+ * @until break
+ * @until break
+ * @until break
+ * @until break
+ *
+ * Converting a JSON object to BSON is a bit more complicated, yet,
+ * straightforward nevertheless:
+ * @until }
+ *
+ * This is one of the reasons we needed to forward-declare
+ * json_to_bson(): we're using it to turn the JSON value into BSON,
+ * and append it as a subdocument.
+ *
+ * Next up: arrays! This is even trickier than sub-documents, as we
+ * need to iterate over the elements, and append each
+ * individually. But, trickier as it may be, it's still
+ * straightforward;
+ * @until }
+ * @until }
+ *
+ * Anything else, we ignore:
+ * @until break
+ *
+ * @until }
+ *
+ * @until }
+ *
+ * And to bind this together with JSON-C's API, we need two more
+ * functions. The first one will simply iterate over a JSON object,
+ * and call the function we wrote above:
+ * @until }
+ * @until }
+ *
+ * The next one is another wrapper around this former: it creates a
+ * BSON document, calls the foreach method, then finishes the BSON
+ * object and we're done:
+ * @until }
+ *
+ * We're almost done! All that is left is writing our program's entry
+ * point: something that will read the input, turn it into BSON, and
+ * write it out:
+ *
+ * @until json_tokener
+ *
+ * We do some setting up, creating a new IO channel, and a JSON
+ * tokenizer:
+ * @until tokener =
+ *
+ * Then, until we have something to read...
+ * @until {
+ * @until bson
+ *
+ * We reset the tokenizer before parsing another line, then parse the
+ * JSON we received:
+ * @until }
+ *
+ * If we received something other than a JSON object, we can't turn
+ * that into BSON, so we write an error to STDERR, and skip this line:
+ * @until }
+ *
+ * Otherwise, we turn it into BSON, and write it to STDOUT:
+ * @until bson_free
+ *
+ * @until }
+ *
+ * And that was our program, a very simple application that turns each
+ * line of JSON into BSON.
+ *
+ * @until }
+ */
diff --git a/docs/tutorial/tut_mongo_sync.h b/docs/tutorial/tut_mongo_sync.h
new file mode 100644
index 0000000..bc86a7e
--- /dev/null
+++ b/docs/tutorial/tut_mongo_sync.h
@@ -0,0 +1,16 @@
+/** @page tut_mongo_sync Working with the Mongo Sync API
+ *
+ * In this section we'll be going over various parts of the
+ * synchronous API provided by libmongo-client. From connecting to a
+ * single host, through replica sets to performing various more
+ * complex operations.
+ *
+ * Contents:
+ * - @subpage tut_mongo_sync_connect
+ * - @subpage tut_mongo_sync_insert
+ * - @subpage tut_mongo_sync_query
+ * - @subpage tut_mongo_sync_query_complex
+ * - @subpage tut_mongo_sync_cmd_create
+ * - @subpage tut_mongo_sync_cmd_index_create
+ * - @subpage tut_mongo_sync_cmd_custom
+ */
diff --git a/docs/tutorial/tut_mongo_sync_cmd_create.h b/docs/tutorial/tut_mongo_sync_cmd_create.h
new file mode 100644
index 0000000..f05940d
--- /dev/null
+++ b/docs/tutorial/tut_mongo_sync_cmd_create.h
@@ -0,0 +1,53 @@
+/** @page tut_mongo_sync_cmd_create Creating collections
+ *
+ * In this simple example we'll learn how to explicitly create
+ * collections with the library, be those normal collections, capped
+ * ones or simply preallocated.
+ *
+ * Our application will attempt to create a normal collection, a
+ * capped one, a capped one that's also capped on the number of
+ * entries, and a pre-allocated (but uncapped) collection.
+ *
+ * It will print these properties of the collections aswell, so that
+ * we can verify that the creation did indeed work.
+ *
+ * @dontinclude tut_mongo_sync_cmd_create.c
+ * @until stdio.h
+ *
+ * First of all, we need a function that prints the collection
+ * properties. Because we're lazy, it will take a BSON object, as
+ * returned by mongo_sync_cmd_exists().
+ *
+ * The output of said command is a BSON object that has a @a name
+ * field, which is the full name of the collection, the database part
+ * included; and an @a options subdocument, which lists various
+ * options specified during creating, such as cappedness, size and
+ * maximum number of elements.
+ *
+ * Our very simple function will extract all these and print what's
+ * appropriate. It will also free the BSON object it was given, so
+ * that we don't leak memory.
+ * @until printf ("\n")
+ * @until }
+ *
+ * With that done, lets get down to business, and create the
+ * collections, after connecting to the server, of course.
+ * @until }
+ *
+ * First we create a completely normal collection, with the default
+ * settings:
+ * @until print_coll_info
+ *
+ * Then a capped collection:
+ * @until print_coll_info
+ *
+ * Followed by another capped collection, one that is also capped by
+ * the number of elements, not only by size:
+ * @until print_coll_info
+ *
+ * And finally, we create a pre-allocated collection:
+ * @until print_coll_info
+ *
+ * And that's about it, really.
+ * @until }
+ */
diff --git a/docs/tutorial/tut_mongo_sync_cmd_custom.h b/docs/tutorial/tut_mongo_sync_cmd_custom.h
new file mode 100644
index 0000000..0b224b2
--- /dev/null
+++ b/docs/tutorial/tut_mongo_sync_cmd_custom.h
@@ -0,0 +1,64 @@
+/** @page tut_mongo_sync_cmd_custom Running custom commands
+ *
+ * Sometimes it is necessary to run custom commands against a
+ * database, commands for which the library does not provide a
+ * convenience wrapper for. In this tutorial, we will explore how to
+ * run server-side evaluations, using the @a $eval command.
+ *
+ * @dontinclude tut_mongo_sync_cmd_custom.c
+ * @until stdlib.h
+ *
+ * @until eval
+ *
+ * First, we connect to the database, and create a BSON object that
+ * will hold our command, one that creates a function server side,
+ * that takes one argument, and returns the argument plus 4.2. The
+ * BSON object will also set up the arguments passed to this function,
+ * which, in our case, will be the number @a 1.
+ *
+ * @line conn =
+ * @until }
+ *
+ * @line eval =
+ * @until bson_finish
+ *
+ * Once we have the connection and the query established, it is time
+ * to send the command:
+ *
+ * @line p =
+ * @until }
+ *
+ * We then create a cursor from the returned packet, and iterate over
+ * it (in case there are multiple documents returned - which will not
+ * be the case here):
+ *
+ * @line cursor =
+ * @until }
+ *
+ * @until gdouble
+ *
+ * We want to retrieve each document, and find the @a retval key in
+ * them, because that's where @a $eval returns the results to us.
+ *
+ * @line result =
+ * @until }
+ * @until }
+ *
+ * At this point, we have successfully extracted the data, so we can
+ * free up the BSON and cursor objects.
+ *
+ * @line bson_cursor_free
+ * @until bson_free
+ *
+ * And finally, print the result:
+ *
+ * @until printf
+ *
+ * @until }
+ *
+ * And that's it! We clean up, disconnect, and that's all there is to
+ * running custom commands!
+ *
+ * @line mongo_sync_cursor_free
+ * @until }
+ */
diff --git a/docs/tutorial/tut_mongo_sync_cmd_index_create.h b/docs/tutorial/tut_mongo_sync_cmd_index_create.h
new file mode 100644
index 0000000..22842e0
--- /dev/null
+++ b/docs/tutorial/tut_mongo_sync_cmd_index_create.h
@@ -0,0 +1,66 @@
+/** @page tut_mongo_sync_cmd_index_create Creating indexes
+ *
+ * This example will show how to properly create indexes, how to
+ * verify its success, and will also give hints on how to prepare the
+ * BSON used to create the index.
+ *
+ * @dontinclude tut_mongo_sync_cmd_index_create.c
+ * @until stdio.h
+ *
+ * We will be creating and verifying multiple indexes, so lets create
+ * a function that takes a connection, a prepared index, and does the
+ * create and verify magic.
+ *
+ * @line static void
+ * @until }
+ * @until }
+ *
+ * This will create the index, and if it succeeds, write that to
+ * stdout. If it fails, it will try to query the last error, and print
+ * that to stderr.
+ *
+ * All we have to do past this point, is to build a few index
+ * specifications in BSON, and see what happens:
+ *
+ * @line int
+ * @until bson
+ *
+ * @line invalid
+ * @until bson_finish
+ *
+ * The first index spec we create will have a single index field, @a
+ * name, where we set the value to an empty string. However - as we
+ * will soon see - this is not a valid specification, as MongoDB does
+ * not accept string-typed fields in the index spec.
+ *
+ * @line index
+ * @until bson_finish
+ *
+ * Armed with the knowledge that strings are not going to work, we
+ * turn to our trusty old integers. Integers (32-bit integers at that,
+ * there really is no need to use a 64-bit value here) are the best
+ * fit for the type of an index field, because one can tell MongoDB
+ * the sort order (with negative or positive numbers) with them easily.
+ *
+ * @line conn =
+ * @until }
+ *
+ * We now have two index specs in BSON, and an established connection,
+ * lets see what happens!
+ *
+ * @line create_and_verify
+ * @until conn, index
+ *
+ * The first will - as explained above - fail, the second will
+ * succeed.
+ *
+ * And that is all it takes to create simple indexes! We now free up
+ * our BSON objects and disconnect, and the tutorial program is all
+ * done and finished.
+ *
+ * @line bson_free
+ * @until disconnect
+ *
+ * @line return
+ * @until }
+ */
diff --git a/docs/tutorial/tut_mongo_sync_connect.h b/docs/tutorial/tut_mongo_sync_connect.h
new file mode 100644
index 0000000..1fb8b4a
--- /dev/null
+++ b/docs/tutorial/tut_mongo_sync_connect.h
@@ -0,0 +1,49 @@
+/** @page tut_mongo_sync_connect Connecting to MongoDB
+ *
+ * The next step in our journey is to connect to MongoDB: to a single
+ * server and to a replicaset alike.
+ *
+ * Our first task is to connect to a MongoDB server, listening
+ * on localhost's 27017 port. And we don't care whether it is a master
+ * or a secondary, so we set the slave_ok argument to TRUE:
+ *
+ * @dontinclude tut_mongo_sync.c
+ * @skip void
+ * @until mongo_sync_disconnect
+ * @until }
+ *
+ * It's fairly straightforward: error handling is done using errno
+ * values, because that saves us from inventing our own statuses:
+ * POSIX provides us with plenty, and the C library uses errno
+ * extensively well. So does libmongo-client!
+ *
+ * Next up, is connecting to a replicaset:
+ *
+ * @until return;
+ * @until }
+ *
+ * Wait a minute! Does this look familiar? Isn't it @b exactly the
+ * same as in the former example (apart from the host name)? It sure
+ * is! There really is no difference between connecting to a single
+ * server and to a replica set when using the Sync API. It hides all
+ * the boring things from the user.
+ *
+ * However, if the server is a replicaset, we can add seeds: seeds are
+ * hosts that are not listed in the replicaset's public config
+ * (meaning they're hidden), but we still want to be able to use them,
+ * if so need be.
+ *
+ * It's not neccessary to add seeds, if the replica set itself
+ * advertises secondaries: the library will discover those, and
+ * reconnect to them, if automatic reconnection is turned on. Lets
+ * just do that!
+ *
+ * @until }
+ *
+ * Then we can add the seeds:
+ * @until }
+ * @until }
+ *
+ * And that's about it! We wrap up our function, and we're done!
+ * @until }
+ */
diff --git a/docs/tutorial/tut_mongo_sync_insert.h b/docs/tutorial/tut_mongo_sync_insert.h
new file mode 100644
index 0000000..aaf0754
--- /dev/null
+++ b/docs/tutorial/tut_mongo_sync_insert.h
@@ -0,0 +1,46 @@
+/** @page tut_mongo_sync_insert Inserting documents into MongoDB
+ *
+ * Now that we know how to connect, it is time to take another step
+ * forward, and put something into our database. We already learned
+ * how to build BSON objects in @ref tut_bson_build, so lets put that
+ * knowledge together, and insert a document!
+ *
+ * @dontinclude tut_mongo_sync.c
+ * @skip tut_sync_connect_replica
+ * @skip }
+ * @skip void
+ * @until {
+ *
+ * First, we define a couple of variables, a connection, and three documents:
+ *
+ * @until bson
+ *
+ * Then we do our routine connect:
+ * @until }
+ *
+ * And then build a BSON object, as we've learned earlier:
+ * @until bson_finish
+ *
+ * Now we have a connection, and we have a document, it's time to get
+ * dirty, and insert:
+ *
+ * @until }
+ *
+ * One might wonder what that @c NULL is at the end of
+ * mongo_sync_cmd_insert()'s argument list: it's the @a sentinel. The
+ * value that marks the end of the documents we want to insert. It is
+ * needed, because the insert command can take any number of
+ * documents, and it will try to insert them in bulk.
+ *
+ * Lets try that, and build two more documents:
+ * @until bson_finish (doc3)
+ *
+ * Then we insert the two new documents into the same collection, at
+ * the same time:
+ * @until }
+ *
+ * And we're done! It's that straightforward. All we have left is
+ * cleaning up!
+ *
+ * @until }
+ */
diff --git a/docs/tutorial/tut_mongo_sync_query.h b/docs/tutorial/tut_mongo_sync_query.h
new file mode 100644
index 0000000..ebed5a1
--- /dev/null
+++ b/docs/tutorial/tut_mongo_sync_query.h
@@ -0,0 +1,67 @@
+/** @page tut_mongo_sync_query Querying documents
+ *
+ * We can connect, insert, and we still vaguely remember how to build
+ * BSON objects: how about we go out and query the database this time?
+ *
+ * @dontinclude tut_mongo_sync.c
+ * @skip tut_sync_insert
+ * @skip }
+ * @skip void
+ * @until {
+ *
+ * First, we define a couple of variables: a connection, a packet, a
+ * cursor, a BSON object and a counter:
+ *
+ * @until gint i
+ *
+ * We'll use the packet as an intermediate step between querying and
+ * making a cursor. We'll see why later. The query will be used to
+ * limit the documents queried, and the counter is just for pretty
+ * printing.
+ *
+ * Then we do our routine connect:
+ * @until }
+ *
+ * And then build a BSON object, an empty one, because an empty query
+ * means we're interested in all of the documents!
+ * @until bson_finish
+ *
+ * Now we have a connection, and we have a query, lets tell MongoDB
+ * what we want:
+ *
+ * @until bson_free
+ *
+ * The first two parameters are obvious. The third is a set of flags -
+ * but we don't use any right now. Then comes the number of documents
+ * to skip, and the number of documents to return, followed by a
+ * query, and an optional field selector, which we just left empty
+ * (meaning we want all fields returned).
+ *
+ * There's more than one way to figure out the data returned by a
+ * query: we can either use the returned packet as-is, and extract
+ * data from it using the low-level mongo_wire family of functions. Or
+ * we can make a cursor out of this packet, and iterate over the
+ * elements:
+ *
+ * @until }
+ *
+ * @until }
+ *
+ * The first thing we do inside of the loop is to get the data from
+ * the cursor - or bail out with an error if we can't.
+ *
+ * @until printf
+ *
+ * Then we proceed to make a BSON cursor, and print all the keys that
+ * belong to the document.
+ *
+ * Once that's done, we free the resources we used, and continue along
+ * the loop, until our cursor signals the end of the query.
+ *
+ * @until printf
+ *
+ * @until }
+ *
+ * Then we clean up and go home:
+ * @until }
+ */
diff --git a/docs/tutorial/tut_mongo_sync_query_complex.h b/docs/tutorial/tut_mongo_sync_query_complex.h
new file mode 100644
index 0000000..145ab0e
--- /dev/null
+++ b/docs/tutorial/tut_mongo_sync_query_complex.h
@@ -0,0 +1,43 @@
+/** @page tut_mongo_sync_query_complex Querying documents, part two
+ *
+ * We learned how to make simple queries in the previous section,
+ * we'll be brave and do something much more advanced this time: we'll
+ * limit the query to documents that have their @c "yes?" field set to
+ * @a FALSE, and sort the results by the @c "n" field, in ascending
+ * order.
+ *
+ * @dontinclude tut_mongo_sync.c
+ * @skip tut_sync_query_simple
+ * @skip }
+ * @skip void
+ * @until {
+ *
+ * @until gint i
+ *
+ * @until }
+ *
+ * After our routine connect, we build the query and select BSON
+ * objects:
+ *
+ * @until bson_finish (select)
+ *
+ * Then we launch the query:
+ * @until bson_free (select)
+ *
+ * And make a cursor, just like last time:
+ * @until }
+ *
+ * And that's pretty much the bulk of what we wanted to do: we just
+ * constructed our query and select BSON objects appropriately, and
+ * mongo_sync_cmd_query() does the rest.
+ *
+ * But just to make sure our results are sane, we iterate over the
+ * returned documents, and print the fields we're interested in:
+ *
+ * @until i++
+ * @until }
+ *
+ * And when that is done, all that is left, is to clean up after
+ * ourselves:
+ * @until }
+ */
diff --git a/docs/tutorial/tutorial.h b/docs/tutorial/tutorial.h
new file mode 100644
index 0000000..e136071
--- /dev/null
+++ b/docs/tutorial/tutorial.h
@@ -0,0 +1,34 @@
+/** @page tutorial Tutorial
+ *
+ * These pages will attempt to guide one through the libmongo-client
+ * library, starting from the basic BSON building blocks, through the
+ * low level wire protocol API, until the highest level synchronous
+ * API.
+ *
+ * The documentation assumes a reasonable amount of C knowledge, and
+ * basic familiarity with MongoDB concepts.
+ *
+ * The example programs can be found in the @c docs/tut/examples
+ * directory in the source tree, along with a Makefile. Would one want
+ * to compile the examples, or modified versions of them by hand, then
+ * the following command should work:
+ *
+ * @verbatim
+$ cc $(pkg-config --cflags libmongo-client) tut_bson_build.c $(pkg-config --libs libmongo-client) -o tut_bson_build
+@endverbatim
+ *
+ * Contents:
+ * - @subpage tut_bson
+ * - @ref tut_bson_build
+ * - @ref tut_bson_traverse
+ * - @subpage tut_mongo_sync
+ * - @ref tut_mongo_sync_connect
+ * - @ref tut_mongo_sync_insert
+ * - @ref tut_mongo_sync_query
+ * - @ref tut_mongo_sync_query_complex
+ * - @ref tut_mongo_sync_cmd_create
+ * - @ref tut_mongo_sync_cmd_index_create
+ * - @ref tut_mongo_sync_cmd_custom
+ * - @subpage tut_hl_client
+ * - @subpage tut_json2bson
+ */
diff --git a/examples/Makefile.am b/examples/Makefile.am
new file mode 100644
index 0000000..0cff31f
--- /dev/null
+++ b/examples/Makefile.am
@@ -0,0 +1,8 @@
+noinst_PROGRAMS = mongo-dump bson-inspect gridfs
+
+AM_CFLAGS = -I$(top_srcdir)/src/ @GLIB_CFLAGS@
+LDADD = $(top_builddir)/src/libmongo-client.la @GLIB_LIBS@
+
+mongo_dump_SOURCES = mongo-dump.c
+bson_inspect_SOURCES = bson-inspect.c
+gridfs_SOURCES = gridfs.c
diff --git a/examples/bson-inspect.c b/examples/bson-inspect.c
new file mode 100644
index 0000000..841dc45
--- /dev/null
+++ b/examples/bson-inspect.c
@@ -0,0 +1,341 @@
+/* bson-inspect.c - BSON inspector, example application.
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <glib.h>
+#include <bson.h>
+
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+static void
+_indent (gint level, gboolean verbose)
+{
+ gint i;
+
+ if (!verbose)
+ return;
+
+ for (i = 1; i <= level; i++)
+ printf (" ");
+}
+
+static void
+bson_dump (bson *b, gint ilevel, gboolean verbose, gboolean as_array)
+{
+ bson_cursor *c;
+ gboolean first = TRUE;
+
+ c = bson_cursor_new (b);
+ while (bson_cursor_next (c))
+ {
+ if (!first)
+ {
+ printf (", ");
+ if (verbose)
+ printf ("\n");
+ }
+ first = FALSE;
+ if (verbose)
+ {
+ _indent (ilevel, verbose);
+ printf ("/* type='%s'; */\n",
+ bson_cursor_type_as_string (c) + 10);
+ }
+ _indent (ilevel, verbose);
+ if (!as_array)
+ {
+ printf ("\"%s\" : ", bson_cursor_key (c));
+ }
+ switch (bson_cursor_type (c))
+ {
+ case BSON_TYPE_DOUBLE:
+ {
+ gdouble d;
+ bson_cursor_get_double (c, &d);
+ printf ("%f", d);
+ break;
+ }
+ case BSON_TYPE_STRING:
+ {
+ const gchar *s;
+ gchar *s2;
+ bson_cursor_get_string (c, &s);
+ s2 = g_strescape (s, NULL);
+ printf ("\"%s\"", s2);
+ g_free (s2);
+ break;
+ }
+ case BSON_TYPE_OID:
+ {
+ const guint8 *oid;
+ gint j;
+ bson_cursor_get_oid (c, &oid);
+ printf ("ObjectId( \"");
+ for (j = 0; j < 12; j++)
+ printf ("%02x", oid[j]);
+ printf ("\" )");
+ break;
+ }
+ case BSON_TYPE_BOOLEAN:
+ {
+ gboolean b;
+ bson_cursor_get_boolean (c, &b);
+ printf ((b) ? "true" : "false");
+ break;
+ }
+ case BSON_TYPE_REGEXP:
+ {
+ const gchar *r, *o;
+ gchar *r2, *o2;
+ bson_cursor_get_regex (c, &r, &o);
+ r2 = g_strescape (r, NULL);
+ o2 = g_strescape (o, NULL);
+ printf ("Regex(\"/%s/%s\")", r2, o2);
+ g_free (r2);
+ g_free (o2);
+ break;
+ }
+ case BSON_TYPE_NULL:
+ {
+ printf ("null");
+ break;
+ }
+ case BSON_TYPE_JS_CODE:
+ {
+ const gchar *js;
+ gchar *js2;
+ bson_cursor_get_javascript (c, &js);
+ js2 = g_strescape (js, NULL);
+ printf ("%s", js2);
+ g_free (js2);
+ break;
+ }
+ case BSON_TYPE_SYMBOL:
+ {
+ const gchar *s;
+ gchar *s2;
+ bson_cursor_get_symbol (c, &s);
+ s2 = g_strescape (s, NULL);
+ printf ("%s", s2);
+ g_free (s2);
+ break;
+ }
+ case BSON_TYPE_INT32:
+ {
+ gint32 l32;
+ bson_cursor_get_int32 (c, &l32);
+ printf ("%d", l32);
+ break;
+ }
+ case BSON_TYPE_INT64:
+ {
+ gint64 l64;
+ bson_cursor_get_int64 (c, &l64);
+ printf ("%" G_GINT64_FORMAT, l64);
+ break;
+ }
+ case BSON_TYPE_DOCUMENT:
+ {
+ bson *sd;
+ bson_cursor_get_document (c, &sd);
+ printf ("{ ");
+ if (verbose)
+ printf ("/* size='%d' */\n", bson_size (sd));
+ bson_dump (sd, ilevel + 1, verbose, FALSE);
+ if (verbose)
+ {
+ printf ("\n");
+ _indent (ilevel, verbose);
+ printf ("}");
+ }
+ else
+ printf (" }");
+ bson_free (sd);
+ break;
+ }
+ case BSON_TYPE_ARRAY:
+ {
+ bson *sa;
+
+ bson_cursor_get_array (c, &sa);
+ printf ("[ ");
+ if (verbose)
+ printf ("/* size='%d' */\n", bson_size (sa));
+ bson_dump (sa, ilevel + 1, verbose, TRUE);
+ if (verbose)
+ {
+ printf ("\n");
+ _indent (ilevel, verbose);
+ printf ("]");
+ }
+ else
+ printf (" ]");
+ bson_free (sa);
+ break;
+ }
+ case BSON_TYPE_BINARY:
+ {
+ const guint8 *data;
+ gint32 size;
+ bson_binary_subtype t;
+ gchar *b64;
+
+ bson_cursor_get_binary (c, &t, &data, &size);
+ b64 = g_base64_encode (data, size);
+ printf ("{ ");
+ if (verbose)
+ {
+ printf ("/* size='%d' */\n", size);
+ _indent (ilevel + 1, verbose);
+ }
+ printf ("\"$binary\" : \"%s\",", b64);
+ if (verbose)
+ {
+ printf ("\n");
+ _indent (ilevel + 1, verbose);
+ }
+ else
+ printf (" ");
+ printf ("\"$type\" : \"%02d\"", t);
+ if (verbose)
+ {
+ printf ("\n");
+ _indent (ilevel, verbose);
+ }
+ else
+ printf (" ");
+ printf ("}");
+ g_free (b64);
+ break;
+ }
+ case BSON_TYPE_JS_CODE_W_SCOPE:
+ case BSON_TYPE_UNDEFINED:
+ case BSON_TYPE_UTC_DATETIME:
+ case BSON_TYPE_DBPOINTER:
+ case BSON_TYPE_TIMESTAMP:
+ case BSON_TYPE_MIN:
+ case BSON_TYPE_MAX:
+ default:
+ printf ("\"<unimplemented>\"");
+ break;
+ }
+ }
+ bson_cursor_free (c);
+}
+
+int
+main (int argc, char *argv[])
+{
+ int fd;
+ off_t offs = 0;
+ bson *b;
+ guint8 *data;
+ struct stat st;
+ gint64 i = 1;
+ GOptionContext *context;
+ gboolean verbose = FALSE;
+ GError *error = NULL;
+
+ GOptionEntry entries[] =
+ {
+ { "verbose", 'v', 0, G_OPTION_ARG_NONE, &verbose,
+ "Be verbose", NULL },
+ { NULL, 0, 0, 0, NULL, NULL, NULL }
+ };
+
+ context = g_option_context_new ("- inspect a BSON dump");
+ g_option_context_add_main_entries (context, entries, "bson-inspect");
+ if (!g_option_context_parse (context, &argc, &argv, &error))
+ {
+ g_print ("option parsing failed: %s\n", error->message);
+ exit (1);
+ }
+
+ if (argc < 2)
+ {
+ gchar **nargv;
+ argc = 2;
+
+ nargv = g_new (gchar *, 3);
+ nargv[0] = argv[0];
+ nargv[1] = "--help";
+ nargv[2] = NULL;
+
+ g_option_context_parse (context, &argc, (gchar ***)&nargv, &error);
+
+ exit (1);
+ }
+
+ fd = open (argv[1], O_RDONLY);
+ if (fd == -1)
+ {
+ fprintf (stderr, "Error opening file '%s': %s\n",
+ argv[1], strerror (errno));
+ exit (1);
+ }
+ if (fstat (fd, &st) != 0)
+ {
+ fprintf (stderr, "Error fstat()ing file '%s': %s\n",
+ argv[1], strerror (errno));
+ close (fd);
+ exit (1);
+ }
+
+ data = mmap (NULL, (size_t)st.st_size, PROT_READ, MAP_SHARED, fd, 0);
+ if (data == MAP_FAILED)
+ {
+ fprintf (stderr, "Error mmap()ing file '%s': %s\n",
+ argv[1], strerror (errno));
+ close (fd);
+ exit (1);
+ }
+
+ while (offs < st.st_size)
+ {
+ b = bson_new_from_data ((const guint8 *)(data + offs),
+ bson_stream_doc_size (data, offs) - 1);
+ bson_finish (b);
+ offs += bson_size (b);
+
+ if (verbose)
+ printf ("/* Document #%" G_GUINT64_FORMAT "; size='%d' */\n", i,
+ bson_size (b));
+ printf ("{ ");
+ if (verbose)
+ printf ("\n");
+ bson_dump (b, 1, verbose, FALSE);
+ if (verbose)
+ printf ("\n}\n");
+ else
+ printf (" }\n");
+ if (verbose)
+ printf ("\n");
+
+ bson_free (b);
+ i++;
+ }
+ munmap (data, st.st_size);
+ close (fd);
+
+ return 0;
+}
diff --git a/examples/gridfs.c b/examples/gridfs.c
new file mode 100644
index 0000000..2d19aee
--- /dev/null
+++ b/examples/gridfs.c
@@ -0,0 +1,413 @@
+/* gridfs.c - A GridFS utility; example application
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <mongo.h>
+
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+
+typedef struct
+{
+ gchar *host;
+ gint port;
+ gchar *db;
+ gchar *coll;
+ gchar *ns;
+ gboolean verbose;
+ gboolean slaveok;
+ gboolean master_sync;
+} config_t;
+
+#define VLOG(...) { if (config->verbose) fprintf (stderr, __VA_ARGS__); }
+
+void
+mongo_gridfs_error (int e)
+{
+ fprintf (stderr, "Error encountered: %s\n", strerror (e));
+ exit (1);
+}
+
+mongo_sync_gridfs *
+mongo_gridfs_connect (config_t *config)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+
+ VLOG ("Connecting to %s:%d/%s.%s...\n", config->host, config->port,
+ config->db, config->coll);
+
+ conn = mongo_sync_connect (config->host, config->port, config->slaveok);
+ if (!conn)
+ mongo_gridfs_error (errno);
+
+ if (config->master_sync)
+ {
+ VLOG ("Syncing to master...\n");
+ conn = mongo_sync_reconnect (conn, TRUE);
+ if (!conn)
+ mongo_gridfs_error (errno);
+ }
+
+ gfs = mongo_sync_gridfs_new (conn, config->ns);
+ if (!gfs)
+ mongo_gridfs_error (errno);
+
+ return gfs;
+}
+
+void
+mongo_gridfs_get (config_t *config, gint argc, gchar *argv[])
+{
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_chunked_file *gfile;
+ mongo_sync_cursor *cursor;
+ gint64 n = 0;
+ bson *query;
+ int fd;
+
+ gchar *gfn, *ofn;
+
+ if (argc < 4)
+ {
+ fprintf (stderr, "Usage: %s get GRIDFS_FILENAME OUTPUT_FILENAME\n",
+ argv[0]);
+ exit (1);
+ }
+ gfn = argv[2];
+ ofn = argv[3];
+
+ gfs = mongo_gridfs_connect (config);
+
+ VLOG ("Trying to find '%s'...\n", gfn);
+
+ query = bson_build (BSON_TYPE_STRING, "filename", gfn, -1,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+ gfile = mongo_sync_gridfs_chunked_find (gfs, query);
+ if (!gfile)
+ mongo_gridfs_error (errno);
+ bson_free (query);
+
+ VLOG ("Opening output file '%s'...\n", ofn);
+ fd = open (ofn, O_RDWR | O_CREAT | O_TRUNC, 0600);
+ if (fd == -1)
+ {
+ fprintf (stderr, "Error opening output file '%s': %s\n",
+ ofn, strerror (errno));
+ exit (1);
+ }
+
+ VLOG ("Writing '%s' -> '%s' (%" G_GINT64_FORMAT " bytes in %" G_GINT64_FORMAT
+ " chunks)\n", gfn, ofn,
+ mongo_sync_gridfs_file_get_length (gfile),
+ mongo_sync_gridfs_file_get_chunks (gfile));
+
+ cursor = mongo_sync_gridfs_chunked_file_cursor_new (gfile, 0, 0);
+ if (!cursor)
+ mongo_gridfs_error (errno);
+
+ while (mongo_sync_cursor_next (cursor))
+ {
+ gint32 size;
+ guint8 *data;
+
+ VLOG ("\rWriting chunk %" G_GINT64_FORMAT "...", n++);
+
+ data = mongo_sync_gridfs_chunked_file_cursor_get_chunk (cursor, &size);
+ if (!data)
+ mongo_gridfs_error (errno);
+
+ if (write (fd, data, size) != size)
+ {
+ perror ("write()");
+ exit (1);
+ }
+ g_free (data);
+ }
+ mongo_sync_cursor_free (cursor);
+ mongo_sync_gridfs_chunked_file_free (gfile);
+
+ close (fd);
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+
+ VLOG("\n");
+}
+
+void
+mongo_gridfs_put (config_t *config, gint argc, gchar *argv[])
+{
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_chunked_file *gfile;
+ bson *meta;
+ int fd;
+ guint8 *data;
+ struct stat st;
+
+ gchar *gfn, *ifn, *oid_s;
+
+ if (argc < 4)
+ {
+ fprintf (stderr, "Usage: %s put INPUT_FILENAME GRIDFS_FILENAME\n",
+ argv[0]);
+ exit (1);
+ }
+ ifn = argv[2];
+ gfn = argv[3];
+
+ mongo_util_oid_init (0);
+
+ gfs = mongo_gridfs_connect (config);
+
+ VLOG ("Opening input file: '%s'...\n", ifn);
+ fd = open (ifn, O_RDONLY);
+ if (!fd)
+ {
+ fprintf (stderr, "Error opening input file: %s\n",
+ strerror (errno));
+ exit (1);
+ }
+ if (fstat (fd, &st) != 0)
+ {
+ fprintf (stderr, "Error stat'ing the input file: %s\n",
+ strerror (errno));
+ exit (1);
+ }
+
+ data = mmap (NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
+ if (data == MAP_FAILED)
+ {
+ fprintf (stderr, "Error mmapping the input file: %s\n",
+ strerror (errno));
+ }
+
+ meta = bson_build (BSON_TYPE_STRING, "filename", gfn, -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ VLOG ("Uploading '%s' -> '%s'...\n", ifn, gfn);
+
+ gfile = mongo_sync_gridfs_chunked_file_new_from_buffer (gfs, meta,
+ data, st.st_size);
+ if (!gfile)
+ mongo_gridfs_error (errno);
+ bson_free (meta);
+ munmap (data, st.st_size);
+
+ oid_s = mongo_util_oid_as_string (mongo_sync_gridfs_file_get_id (gfile));
+ printf ("Uploaded file: %s (_id: %s; md5 = %s)\n", gfn,
+ oid_s,
+ mongo_sync_gridfs_file_get_md5 (gfile));
+
+ g_free (oid_s);
+ mongo_sync_gridfs_chunked_file_free (gfile);
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+mongo_gridfs_list (config_t *config)
+{
+ mongo_sync_cursor *cursor;
+ mongo_sync_gridfs *gfs;
+
+ gfs = mongo_gridfs_connect (config);
+
+ cursor = mongo_sync_gridfs_list (gfs, NULL);
+
+ while (mongo_sync_cursor_next (cursor))
+ {
+ bson *meta = mongo_sync_cursor_get_data (cursor);
+ bson_cursor *c;
+ const guint8 oid[12];
+ gint32 i32, chunk_size;
+ gint64 length, date;
+ const gchar *md5, *filename = NULL;
+ gchar *oid_s;
+
+ c = bson_find (meta, "_id");
+ if (!bson_cursor_get_oid (c, (const guint8 **)&oid))
+ mongo_gridfs_error (errno);
+
+ bson_cursor_find (c, "length");
+ if (!bson_cursor_get_int32 (c, &i32))
+ {
+ if (!bson_cursor_get_int64 (c, &length))
+ mongo_gridfs_error (errno);
+ }
+ else
+ length = i32;
+
+ bson_cursor_find (c, "chunkSize");
+ if (!bson_cursor_get_int32 (c, &chunk_size))
+ mongo_gridfs_error (errno);
+
+ bson_cursor_find (c, "uploadDate");
+ if (!bson_cursor_get_utc_datetime (c, &date))
+ mongo_gridfs_error (errno);
+
+ bson_cursor_find (c, "md5");
+ if (!bson_cursor_get_string (c, &md5))
+ mongo_gridfs_error (errno);
+
+ bson_cursor_find (c, "filename");
+ bson_cursor_get_string (c, &filename);
+
+ bson_cursor_free (c);
+
+ oid_s = mongo_util_oid_as_string (oid);
+ printf ("{ _id: ObjectID(\"%s\"), length: %" G_GINT64_FORMAT
+ ", chunkSize: %i, uploadDate: %"
+ G_GINT64_FORMAT ", md5: \"%s\"",
+
+ oid_s, length, chunk_size, date, md5);
+ g_free (oid_s);
+
+ if (filename)
+ printf (", filename: \"%s\"", filename);
+ printf (" }\n");
+
+ if (config->verbose)
+ {
+ c = bson_cursor_new (meta);
+ printf ("\tExtra metadata: [ ");
+ while (bson_cursor_next (c))
+ {
+ if (strcmp (bson_cursor_key (c), "_id") &&
+ strcmp (bson_cursor_key (c), "length") &&
+ strcmp (bson_cursor_key (c), "chunkSize") &&
+ strcmp (bson_cursor_key (c), "uploadDate") &&
+ strcmp (bson_cursor_key (c), "md5") &&
+ strcmp (bson_cursor_key (c), "filename"))
+ {
+ printf ("%s (%s), ", bson_cursor_key (c),
+ bson_cursor_type_as_string (c));
+ }
+ }
+ bson_cursor_free (c);
+ printf ("]\n");
+ }
+ }
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+mongo_gridfs_remove (config_t *config, gint argc, gchar *argv[])
+{
+ mongo_sync_gridfs *gfs;
+ bson *query;
+ gchar *fn;
+
+ if (argc < 3)
+ {
+ fprintf (stderr, "Usage: %s remove GRIDFS_FILENAME\n", argv[0]);
+ exit (1);
+ }
+ fn = argv[2];
+
+ gfs = mongo_gridfs_connect (config);
+
+ VLOG ("Deleting file: '%s'...\n", fn);
+
+ query = bson_build (BSON_TYPE_STRING, "filename", fn, -1,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+
+ if (mongo_sync_gridfs_remove (gfs, query))
+ {
+ VLOG ("\tDeleted\n");
+ }
+ else
+ {
+ VLOG ("\tFailed: %s\n", strerror (errno));
+ }
+ bson_free (query);
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+int
+main (int argc, char *argv[])
+{
+ GError *error = NULL;
+ GOptionContext *context;
+ config_t config = {
+ NULL, 27017, NULL, NULL, NULL, FALSE, FALSE, FALSE
+ };
+
+ GOptionEntry entries[] =
+ {
+ { "host", 'h', 0, G_OPTION_ARG_STRING, &config.host,
+ "Host to connect to", "HOST" },
+ { "port", 'p', 0, G_OPTION_ARG_INT, &config.port, "Port", "PORT" },
+ { "db", 'd', 0, G_OPTION_ARG_STRING, &config.db, "Database", "DB" },
+ { "collection", 'c', 0, G_OPTION_ARG_STRING, &config.coll, "Collection",
+ "COLL" },
+ { "verbose", 'v', 0, G_OPTION_ARG_NONE, &config.verbose,
+ "Be verbose", NULL },
+ { "slave-ok", 's', 0, G_OPTION_ARG_NONE, &config.slaveok,
+ "Connecting to slaves is ok", NULL },
+ { "master-sync", 'm', 0, G_OPTION_ARG_NONE, &config.master_sync,
+ "Reconnect to the replica master", NULL },
+ { NULL, 0, 0, 0, NULL, NULL, NULL }
+ };
+
+ context = g_option_context_new ("- GridFS utility");
+ g_option_context_add_main_entries (context, entries, "mongo-dump");
+ if (!g_option_context_parse (context, &argc, &argv, &error))
+ {
+ g_print ("option parsing failed: %s\n", error->message);
+ exit (1);
+ }
+
+ if (!config.host || !config.port || !config.db ||
+ !config.coll || argc < 2)
+ {
+ gchar **nargv;
+ argc = 2;
+
+ nargv = g_new (gchar *, 3);
+ nargv[0] = argv[0];
+ nargv[1] = "--help";
+ nargv[2] = NULL;
+
+ g_option_context_parse (context, &argc, (gchar ***)&nargv, &error);
+
+ exit (1);
+ }
+ config.ns = g_strdup_printf ("%s.%s", config.db, config.coll);
+
+ if (g_ascii_strcasecmp (argv[1], "get") == 0)
+ mongo_gridfs_get (&config, argc, argv);
+ else if (g_ascii_strcasecmp (argv[1], "put") == 0)
+ mongo_gridfs_put (&config, argc, argv);
+ else if (g_ascii_strcasecmp (argv[1], "list") == 0)
+ mongo_gridfs_list (&config);
+ else if (g_ascii_strcasecmp (argv[1], "remove") == 0)
+ mongo_gridfs_remove (&config, argc, argv);
+
+ g_free (config.ns);
+ g_option_context_free (context);
+
+ return 0;
+}
diff --git a/examples/mongo-dump.c b/examples/mongo-dump.c
new file mode 100644
index 0000000..7e6419f
--- /dev/null
+++ b/examples/mongo-dump.c
@@ -0,0 +1,224 @@
+/* mongo-dump.c - MongoDB database dumper; example application.
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <mongo.h>
+
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+typedef struct
+{
+ gchar *addr;
+ gint port;
+ gchar *db;
+ gchar *coll;
+ gchar *output;
+ gchar *ns;
+ gboolean verbose;
+ gboolean slaveok;
+ gboolean master_sync;
+} config_t;
+
+#define VLOG(...) { if (config->verbose) fprintf (stderr, __VA_ARGS__); }
+
+void
+mongo_dump (config_t *config)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_cursor *cursor;
+ bson *b;
+ int fd;
+
+ glong cnt, pos = 0;
+
+ gchar *error = NULL;
+ int e;
+
+ if (config->port == MONGO_CONN_LOCAL)
+ {
+ VLOG ("Connecting to %s/%s.%s...\n", config->addr, config->db,
+ config->coll);
+ }
+ else
+ {
+ VLOG ("Connecting to %s:%d/%s.%s...\n", config->addr, config->port,
+ config->db, config->coll);
+ }
+ conn = mongo_sync_connect (config->addr, config->port, config->slaveok);
+
+ if (!conn)
+ {
+ e = errno;
+
+ mongo_sync_cmd_get_last_error (conn, config->db, &error);
+ fprintf (stderr, "Error connecting to %s:%d: %s\n", config->addr,
+ config->port, (error) ? error : strerror (e));
+ g_free (error);
+ exit (1);
+ }
+
+ if (config->master_sync)
+ {
+ VLOG ("Syncing to master...\n");
+ conn = mongo_sync_reconnect (conn, TRUE);
+ if (!conn)
+ {
+ e = errno;
+
+ mongo_sync_cmd_get_last_error (conn, config->db, &error);
+ fprintf (stderr, "Error reconnecting to the master of %s:%d: %s\n",
+ config->addr, config->port, (error) ? error : strerror (e));
+ exit (1);
+ }
+ }
+
+ VLOG ("Counting documents...\n");
+ cnt = mongo_sync_cmd_count (conn, config->db, config->coll, NULL);
+ if (cnt < 0)
+ {
+ e = errno;
+
+ mongo_sync_cmd_get_last_error (conn, config->db, &error);
+ fprintf (stderr, "Error counting documents in %s.%s: %s\n",
+ config->db, config->coll, (error) ? error : strerror (e));
+ mongo_sync_disconnect (conn);
+ exit (1);
+ }
+
+ VLOG ("Opening output file '%s'...\n", config->output);
+ if (strcmp (config->output, "-") == 0)
+ fd = 1;
+ else
+ {
+ fd = open (config->output, O_RDWR | O_CREAT | O_TRUNC, 0600);
+ if (fd == -1)
+ {
+ fprintf (stderr, "Error opening output file '%s': %s\n",
+ config->output, strerror (errno));
+ mongo_sync_disconnect (conn);
+ exit (1);
+ }
+ }
+
+ VLOG ("Launching initial query...\n");
+ b = bson_new ();
+ bson_finish (b);
+ cursor = mongo_sync_cursor_new (conn, config->ns,
+ mongo_sync_cmd_query (conn, config->ns,
+ MONGO_WIRE_FLAG_QUERY_NO_CURSOR_TIMEOUT,
+ 0, 10, b, NULL));
+ bson_free (b);
+
+ while ((pos < cnt) && mongo_sync_cursor_next (cursor))
+ {
+ bson *b = mongo_sync_cursor_get_data (cursor);
+ pos++;
+
+ if (!b)
+ {
+ e = errno;
+
+ mongo_sync_cmd_get_last_error (conn, config->db, &error);
+ fprintf (stderr, "Error advancing the cursor: %s\n",
+ (error) ? error : strerror (e));
+ mongo_sync_disconnect (conn);
+ exit (1);
+ }
+
+ if (pos % 10 == 0)
+ VLOG ("\rDumping... %03.2f%%", (pos * 1.0) / (cnt * 1.0) * 100);
+
+ if (write (fd, bson_data (b), bson_size (b)) != bson_size (b))
+ {
+ perror ("write()");
+ exit (1);
+ }
+ bson_free (b);
+ }
+ VLOG ("\rDumping... %03.2f%%\n", (double)((pos / cnt) * 100));
+
+ mongo_sync_cursor_free (cursor);
+
+ close (fd);
+ mongo_sync_disconnect (conn);
+}
+
+int
+main (int argc, char *argv[])
+{
+ GError *error = NULL;
+ GOptionContext *context;
+ config_t config = {
+ NULL, 27017, NULL, NULL, NULL, NULL, FALSE, FALSE, FALSE
+ };
+
+ GOptionEntry entries[] =
+ {
+ { "addr", 'a', 0, G_OPTION_ARG_STRING, &config.addr,
+ "Address to connect to", "ADDRESS" },
+ { "port", 'p', 0, G_OPTION_ARG_INT, &config.port, "Port", "PORT" },
+ { "db", 'd', 0, G_OPTION_ARG_STRING, &config.db, "Database", "DB" },
+ { "collection", 'c', 0, G_OPTION_ARG_STRING, &config.coll, "Collection",
+ "COLL" },
+ { "verbose", 'v', 0, G_OPTION_ARG_NONE, &config.verbose,
+ "Be verbose", NULL },
+ { "output", 'o', 0, G_OPTION_ARG_STRING, &config.output,
+ "Output", "FILENAME" },
+ { "slave-ok", 's', 0, G_OPTION_ARG_NONE, &config.slaveok,
+ "Connecting to slaves is ok", NULL },
+ { "master-sync", 'm', 0, G_OPTION_ARG_NONE, &config.master_sync,
+ "Reconnect to the replica master", NULL },
+ { NULL, 0, 0, 0, NULL, NULL, NULL }
+ };
+
+ context = g_option_context_new ("- dump a complete mongo collection");
+ g_option_context_add_main_entries (context, entries, "mongo-dump");
+ if (!g_option_context_parse (context, &argc, &argv, &error))
+ {
+ g_print ("option parsing failed: %s\n", error->message);
+ exit (1);
+ }
+
+ if (!((config.addr && config.port)) || !config.db ||
+ !config.coll || !config.output)
+ {
+ gchar **nargv;
+ argc = 2;
+
+ nargv = g_new (gchar *, 3);
+ nargv[0] = argv[0];
+ nargv[1] = "--help";
+ nargv[2] = NULL;
+
+ g_option_context_parse (context, &argc, (gchar ***)&nargv, &error);
+
+ exit (1);
+ }
+
+ config.ns = g_strdup_printf ("%s.%s", config.db, config.coll);
+ mongo_dump (&config);
+
+ g_free (config.ns);
+ g_option_context_free (context);
+
+ return 0;
+}
diff --git a/m4/.placeholder b/m4/.placeholder
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/m4/.placeholder
diff --git a/src/Makefile.am b/src/Makefile.am
new file mode 100644
index 0000000..243cb84
--- /dev/null
+++ b/src/Makefile.am
@@ -0,0 +1,51 @@
+LMC_CURRENT = 5
+LMC_REVISION = 0
+LMC_AGE = 5
+
+lib_LTLIBRARIES = libmongo-client.la
+libmongo_client_la_LIBADD = @GLIB_LIBS@ @OPENSSL_LIBS@
+libmongo_client_la_CFLAGS = @GLIB_CFLAGS@ @OPENSSL_CFLAGS@
+libmongo_client_la_LDFLAGS = -version-info ${LMC_CURRENT}:${LMC_REVISION}:${LMC_AGE}
+
+libmongo_client_la_SOURCES = \
+ compat.c compat.h \
+ bson.c bson.h \
+ mongo-wire.c mongo-wire.h \
+ mongo-client.c mongo-client.h \
+ mongo-utils.c mongo-utils.h \
+ mongo-sync.c mongo-sync.h \
+ mongo-sync-cursor.c mongo-sync-cursor.h \
+ mongo-sync-pool.c mongo-sync-pool.h \
+ sync-gridfs.c sync-gridfs.h \
+ sync-gridfs-chunk.c sync-gridfs-chunk.h \
+ sync-gridfs-stream.c sync-gridfs-stream.h \
+ mongo.h \
+ libmongo-private.h libmongo-macros.h
+
+libmongo_client_includedir = $(includedir)/mongo-client
+libmongo_client_include_HEADERS = \
+ bson.h mongo-wire.h mongo-client.h mongo-utils.h \
+ mongo-sync.h mongo-sync-cursor.h mongo-sync-pool.h \
+ sync-gridfs.h sync-gridfs-chunk.h sync-gridfs-stream.h \
+ mongo.h
+
+if HAVE_VERSIONING
+libmongo_client_la_LDFLAGS += \
+ -Wl,--version-script,$(top_srcdir)/src/libmongo-client.ver
+libmongo_client_la_DEPENDENCIES = ${top_srcdir}/src/libmongo-client.ver
+endif
+
+pkgconfigdir = $(libdir)/pkgconfig
+pkgconfig_DATA = libmongo-client.pc
+
+CLEANFILES = *.gcda *.gcno *.gcov
+CLEANDIRS = coverage
+
+coverage:
+ $(AM_V_GEN)
+ $(AM_V_at) SOURCES="$(SOURCES)" builddir="$(builddir)" srcdir="$(srcdir)" top_srcdir="$(top_srcdir)" $(top_srcdir)/tests/coverage.sh
+
+clean-local:
+ -test -z "$(CLEANDIRS)" || rm -rf "$(CLEANDIRS)"
+
+.PHONY: coverage
diff --git a/src/bson.c b/src/bson.c
new file mode 100644
index 0000000..845f6de
--- /dev/null
+++ b/src/bson.c
@@ -0,0 +1,1251 @@
+/* bson.c - libmongo-client's BSON implementation
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file src/bson.c
+ * Implementation of the BSON API.
+ */
+
+#include <glib.h>
+#include <errno.h>
+#include <string.h>
+#include <stdarg.h>
+
+#include "bson.h"
+#include "libmongo-macros.h"
+#include "libmongo-private.h"
+
+/** @internal BSON cursor structure.
+ */
+struct _bson_cursor
+{
+ const bson *obj; /**< The BSON object this is a cursor for. */
+ const gchar *key; /**< Pointer within the BSON object to the
+ current key. */
+ size_t pos; /**< Position within the BSON object, pointing at the
+ element type. */
+ size_t value_pos; /**< The start of the value within the BSON
+ object, pointing right after the end of the
+ key. */
+};
+
+/** @internal Append a byte to a BSON stream.
+ *
+ * @param b is the BSON stream to append to.
+ * @param byte is the byte to append.
+ */
+static inline void
+_bson_append_byte (bson *b, const guint8 byte)
+{
+ b->data = g_byte_array_append (b->data, &byte, sizeof (byte));
+}
+
+/** @internal Append a 32-bit integer to a BSON stream.
+ *
+ * @param b is the BSON stream to append to.
+ * @param i is the integer to append.
+ */
+static inline void
+_bson_append_int32 (bson *b, const gint32 i)
+{
+ b->data = g_byte_array_append (b->data, (const guint8 *)&i, sizeof (gint32));
+}
+
+/** @internal Append a 64-bit integer to a BSON stream.
+ *
+ * @param b is the BSON stream to append to.
+ * @param i is the integer to append.
+ */
+static inline void
+_bson_append_int64 (bson *b, const gint64 i)
+{
+ b->data = g_byte_array_append (b->data, (const guint8 *)&i, sizeof (gint64));
+}
+
+/** @internal Append an element header to a BSON stream.
+ *
+ * The element header is a single byte, signaling the type of the
+ * element, followed by a NULL-terminated C string: the key (element)
+ * name.
+ *
+ * @param b is the BSON object to append to.
+ * @param type is the element type to append.
+ * @param name is the key name.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+static inline gboolean
+_bson_append_element_header (bson *b, bson_type type, const gchar *name)
+{
+ if (!name || !b)
+ return FALSE;
+
+ if (b->finished)
+ return FALSE;
+
+ _bson_append_byte (b, (guint8) type);
+ b->data = g_byte_array_append (b->data, (const guint8 *)name,
+ strlen (name) + 1);
+
+ return TRUE;
+}
+
+/** @internal Append a string-like element to a BSON object.
+ *
+ * There are a few string-like elements in the BSON spec that differ
+ * only in type, not in structure. This convenience function is used
+ * to append them with the appropriate type.
+ *
+ * @param b is the BSON object to append to.
+ * @param type is the string-like type to append.
+ * @param name is the key name.
+ * @param val is the value to append.
+ * @param length is the length of the value.
+ *
+ * @note Passing @a -1 as length will use the full length of @a
+ * val.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+static gboolean
+_bson_append_string_element (bson *b, bson_type type, const gchar *name,
+ const gchar *val, gint32 length)
+{
+ size_t len;
+
+ if (!val || !length || length < -1)
+ return FALSE;
+
+ len = (length != -1) ? (size_t)length + 1: strlen (val) + 1;
+
+ if (!_bson_append_element_header (b, type, name))
+ return FALSE;
+
+ _bson_append_int32 (b, GINT32_TO_LE (len));
+
+ b->data = g_byte_array_append (b->data, (const guint8 *)val, len - 1);
+ _bson_append_byte (b, 0);
+
+ return TRUE;
+}
+
+/** @internal Append a document-like element to a BSON object.
+ *
+ * Arrays and documents are both similar, and differ very little:
+ * different type, and arrays have restrictions on key names (which
+ * are not enforced by this library).
+ *
+ * This convenience function can append both types.
+ *
+ * @param b is the BSON object to append to.
+ * @param type is the document-like type to append.
+ * @param name is the key name.
+ * @param doc is the document-like object to append.
+ *
+ * @note The @a doc must be a finished BSON object.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+static gboolean
+_bson_append_document_element (bson *b, bson_type type, const gchar *name,
+ const bson *doc)
+{
+ if (bson_size (doc) < 0)
+ return FALSE;
+
+ if (!_bson_append_element_header (b, type, name))
+ return FALSE;
+
+ b->data = g_byte_array_append (b->data, bson_data (doc), bson_size (doc));
+ return TRUE;
+}
+
+/** @internal Append a 64-bit integer to a BSON object.
+ *
+ * @param b is the BSON object to append to.
+ * @param type is the int64-like type to append.
+ * @param name is the key name.
+ * @param i is the 64-bit value to append.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+static inline gboolean
+_bson_append_int64_element (bson *b, bson_type type, const gchar *name,
+ gint64 i)
+{
+ if (!_bson_append_element_header (b, type, name))
+ return FALSE;
+
+ _bson_append_int64 (b, GINT64_TO_LE (i));
+ return TRUE;
+}
+
+/********************
+ * Public interface *
+ ********************/
+
+const gchar *
+bson_type_as_string (bson_type type)
+{
+ switch (type)
+ {
+ case BSON_TYPE_NONE:
+ return "BSON_TYPE_NONE";
+ case BSON_TYPE_DOUBLE:
+ return "BSON_TYPE_DOUBLE";
+ case BSON_TYPE_STRING:
+ return "BSON_TYPE_STRING";
+ case BSON_TYPE_DOCUMENT:
+ return "BSON_TYPE_DOCUMENT";
+ case BSON_TYPE_ARRAY:
+ return "BSON_TYPE_ARRAY";
+ case BSON_TYPE_BINARY:
+ return "BSON_TYPE_BINARY";
+ case BSON_TYPE_UNDEFINED:
+ return "BSON_TYPE_UNDEFINED";
+ case BSON_TYPE_OID:
+ return "BSON_TYPE_OID";
+ case BSON_TYPE_BOOLEAN:
+ return "BSON_TYPE_BOOLEAN";
+ case BSON_TYPE_UTC_DATETIME:
+ return "BSON_TYPE_UTC_DATETIME";
+ case BSON_TYPE_NULL:
+ return "BSON_TYPE_NULL";
+ case BSON_TYPE_REGEXP:
+ return "BSON_TYPE_REGEXP";
+ case BSON_TYPE_DBPOINTER:
+ return "BSON_TYPE_DBPOINTER";
+ case BSON_TYPE_JS_CODE:
+ return "BSON_TYPE_JS_CODE";
+ case BSON_TYPE_SYMBOL:
+ return "BSON_TYPE_SYMBOL";
+ case BSON_TYPE_JS_CODE_W_SCOPE:
+ return "BSON_TYPE_JS_CODE_W_SCOPE";
+ case BSON_TYPE_INT32:
+ return "BSON_TYPE_INT32";
+ case BSON_TYPE_TIMESTAMP:
+ return "BSON_TYPE_TIMESTAMP";
+ case BSON_TYPE_INT64:
+ return "BSON_TYPE_INT64";
+ case BSON_TYPE_MIN:
+ return "BSON_TYPE_MIN";
+ case BSON_TYPE_MAX:
+ return "BSON_TYPE_MAX";
+ default:
+ return NULL;
+ }
+}
+
+bson *
+bson_new (void)
+{
+ return bson_new_sized (0);
+}
+
+bson *
+bson_new_sized (gint32 size)
+{
+ bson *b = g_new0 (bson, 1);
+
+ b->data = g_byte_array_sized_new (size + sizeof (gint32) + sizeof (guint8));
+ _bson_append_int32 (b, 0);
+
+ return b;
+}
+
+bson *
+bson_new_from_data (const guint8 *data, gint32 size)
+{
+ bson *b;
+
+ if (!data || size <= 0)
+ return NULL;
+
+ b = g_new0 (bson, 1);
+ b->data = g_byte_array_sized_new (size + sizeof (guint8));
+ b->data = g_byte_array_append (b->data, data, size);
+
+ return b;
+}
+
+/** @internal Add a single element of any type to a BSON object.
+ *
+ * Used internally by bson_build() and bson_build_full(), this
+ * function adds a single element of any supported type to the target
+ * BSON object.
+ *
+ * @param b is the target BSON object.
+ * @param type is the element type to add.
+ * @param name is the key name.
+ * @param free_after signals whether to free the values after adding
+ * them.
+ * @param ap is the list of remaining parameters.
+ *
+ * @returns TRUE in @a single_result on success, FALSE otherwise.
+ */
+#define _bson_build_add_single(b,type,name,free_after,ap) \
+ { \
+ single_result = TRUE; \
+ switch (type) \
+ { \
+ case BSON_TYPE_NONE: \
+ case BSON_TYPE_UNDEFINED: \
+ case BSON_TYPE_DBPOINTER: \
+ single_result = FALSE; \
+ break; \
+ case BSON_TYPE_MIN: \
+ case BSON_TYPE_MAX: \
+ default: \
+ single_result = FALSE; \
+ break; \
+ case BSON_TYPE_DOUBLE: \
+ { \
+ gdouble d = (gdouble)va_arg (ap, gdouble); \
+ bson_append_double (b, name, d); \
+ break; \
+ } \
+ case BSON_TYPE_STRING: \
+ { \
+ gchar *s = (gchar *)va_arg (ap, gpointer); \
+ gint32 l = (gint32)va_arg (ap, gint32); \
+ bson_append_string (b, name, s, l); \
+ if (free_after) \
+ g_free (s); \
+ break; \
+ } \
+ case BSON_TYPE_DOCUMENT: \
+ { \
+ bson *d = (bson *)va_arg (ap, gpointer); \
+ if (free_after && bson_size (d) < 0) \
+ bson_finish (d); \
+ bson_append_document (b, name, d); \
+ if (free_after) \
+ bson_free (d); \
+ break; \
+ } \
+ case BSON_TYPE_ARRAY: \
+ { \
+ bson *d = (bson *)va_arg (ap, gpointer); \
+ if (free_after && bson_size (d) < 0) \
+ bson_finish (d); \
+ bson_append_array (b, name, d); \
+ if (free_after) \
+ bson_free (d); \
+ break; \
+ } \
+ case BSON_TYPE_BINARY: \
+ { \
+ bson_binary_subtype s = \
+ (bson_binary_subtype)va_arg (ap, guint); \
+ guint8 *d = (guint8 *)va_arg (ap, gpointer); \
+ gint32 l = (gint32)va_arg (ap, gint32); \
+ bson_append_binary (b, name, s, d, l); \
+ if (free_after) \
+ g_free (d); \
+ break; \
+ } \
+ case BSON_TYPE_OID: \
+ { \
+ guint8 *oid = (guint8 *)va_arg (ap, gpointer); \
+ bson_append_oid (b, name, oid); \
+ if (free_after) \
+ g_free (oid); \
+ break; \
+ } \
+ case BSON_TYPE_BOOLEAN: \
+ { \
+ gboolean v = (gboolean)va_arg (ap, guint); \
+ bson_append_boolean (b, name, v); \
+ break; \
+ } \
+ case BSON_TYPE_UTC_DATETIME: \
+ { \
+ gint64 ts = (gint64)va_arg (ap, gint64); \
+ bson_append_utc_datetime (b, name, ts); \
+ break; \
+ } \
+ case BSON_TYPE_NULL: \
+ { \
+ bson_append_null (b, name); \
+ break; \
+ } \
+ case BSON_TYPE_REGEXP: \
+ { \
+ gchar *r = (gchar *)va_arg (ap, gpointer); \
+ gchar *o = (gchar *)va_arg (ap, gpointer); \
+ bson_append_regex (b, name, r, o); \
+ if (free_after) \
+ { \
+ g_free (r); \
+ g_free (o); \
+ } \
+ break; \
+ } \
+ case BSON_TYPE_JS_CODE: \
+ { \
+ gchar *s = (gchar *)va_arg (ap, gpointer); \
+ gint32 l = (gint32)va_arg (ap, gint32); \
+ bson_append_javascript (b, name, s, l); \
+ if (free_after) \
+ g_free (s); \
+ break; \
+ } \
+ case BSON_TYPE_SYMBOL: \
+ { \
+ gchar *s = (gchar *)va_arg (ap, gpointer); \
+ gint32 l = (gint32)va_arg (ap, gint32); \
+ bson_append_symbol (b, name, s, l); \
+ if (free_after) \
+ g_free (s); \
+ break; \
+ } \
+ case BSON_TYPE_JS_CODE_W_SCOPE: \
+ { \
+ gchar *s = (gchar *)va_arg (ap, gpointer); \
+ gint32 l = (gint32)va_arg (ap, gint32); \
+ bson *scope = (bson *)va_arg (ap, gpointer); \
+ if (free_after && bson_size (scope) < 0) \
+ bson_finish (scope); \
+ bson_append_javascript_w_scope (b, name, s, l, scope); \
+ if (free_after) \
+ bson_free (scope); \
+ break; \
+ } \
+ case BSON_TYPE_INT32: \
+ { \
+ gint32 l = (gint32)va_arg (ap, gint32); \
+ bson_append_int32 (b, name, l); \
+ break; \
+ } \
+ case BSON_TYPE_TIMESTAMP: \
+ { \
+ gint64 ts = (gint64)va_arg (ap, gint64); \
+ bson_append_timestamp (b, name, ts); \
+ break; \
+ } \
+ case BSON_TYPE_INT64: \
+ { \
+ gint64 l = (gint64)va_arg (ap, gint64); \
+ bson_append_int64 (b, name, l); \
+ break; \
+ } \
+ } \
+ }
+
+bson *
+bson_build (bson_type type, const gchar *name, ...)
+{
+ va_list ap;
+ bson_type t;
+ const gchar *n;
+ bson *b;
+ gboolean single_result;
+
+ b = bson_new ();
+ va_start (ap, name);
+ _bson_build_add_single (b, type, name, FALSE, ap);
+
+ if (!single_result)
+ {
+ bson_free (b);
+ va_end (ap);
+ return NULL;
+ }
+
+ while ((t = (bson_type)va_arg (ap, gint)))
+ {
+ n = (const gchar *)va_arg (ap, gpointer);
+ _bson_build_add_single (b, t, n, FALSE, ap);
+ if (!single_result)
+ {
+ bson_free (b);
+ va_end (ap);
+ return NULL;
+ }
+ }
+ va_end (ap);
+
+ return b;
+}
+
+bson *
+bson_build_full (bson_type type, const gchar *name, gboolean free_after, ...)
+{
+ va_list ap;
+ bson_type t;
+ const gchar *n;
+ gboolean f;
+ bson *b;
+ gboolean single_result;
+
+ b = bson_new ();
+ va_start (ap, free_after);
+ _bson_build_add_single (b, type, name, free_after, ap);
+ if (!single_result)
+ {
+ bson_free (b);
+ va_end (ap);
+ return NULL;
+ }
+
+ while ((t = (bson_type)va_arg (ap, gint)))
+ {
+ n = (const gchar *)va_arg (ap, gpointer);
+ f = (gboolean)va_arg (ap, gint);
+ _bson_build_add_single (b, t, n, f, ap);
+ if (!single_result)
+ {
+ bson_free (b);
+ va_end (ap);
+ return NULL;
+ }
+ }
+ va_end (ap);
+
+ return b;
+}
+
+gboolean
+bson_finish (bson *b)
+{
+ gint32 *i;
+
+ if (!b)
+ return FALSE;
+
+ if (b->finished)
+ return TRUE;
+
+ _bson_append_byte (b, 0);
+
+ i = (gint32 *) (&b->data->data[0]);
+ *i = GINT32_TO_LE ((gint32) (b->data->len));
+
+ b->finished = TRUE;
+
+ return TRUE;
+}
+
+gint32
+bson_size (const bson *b)
+{
+ if (!b)
+ return -1;
+
+ if (b->finished)
+ return b->data->len;
+ else
+ return -1;
+}
+
+const guint8 *
+bson_data (const bson *b)
+{
+ if (!b)
+ return NULL;
+
+ if (b->finished)
+ return b->data->data;
+ else
+ return NULL;
+}
+
+gboolean
+bson_reset (bson *b)
+{
+ if (!b)
+ return FALSE;
+
+ b->finished = FALSE;
+ g_byte_array_set_size (b->data, 0);
+ _bson_append_int32 (b, 0);
+
+ return TRUE;
+}
+
+void
+bson_free (bson *b)
+{
+ if (!b)
+ return;
+
+ if (b->data)
+ g_byte_array_free (b->data, TRUE);
+ g_free (b);
+}
+
+gboolean
+bson_validate_key (const gchar *key, gboolean forbid_dots,
+ gboolean no_dollar)
+{
+ if (!key)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+ errno = 0;
+
+ if (no_dollar && key[0] == '$')
+ return FALSE;
+
+ if (forbid_dots && strchr (key, '.') != NULL)
+ return FALSE;
+
+ return TRUE;
+}
+
+/*
+ * Append elements
+ */
+
+gboolean
+bson_append_double (bson *b, const gchar *name, gdouble val)
+{
+ gdouble d = GDOUBLE_TO_LE (val);
+
+ if (!_bson_append_element_header (b, BSON_TYPE_DOUBLE, name))
+ return FALSE;
+
+ b->data = g_byte_array_append (b->data, (const guint8 *)&d, sizeof (val));
+ return TRUE;
+}
+
+gboolean
+bson_append_string (bson *b, const gchar *name, const gchar *val,
+ gint32 length)
+{
+ return _bson_append_string_element (b, BSON_TYPE_STRING, name, val, length);
+}
+
+gboolean
+bson_append_document (bson *b, const gchar *name, const bson *doc)
+{
+ return _bson_append_document_element (b, BSON_TYPE_DOCUMENT, name, doc);
+}
+
+gboolean
+bson_append_array (bson *b, const gchar *name, const bson *array)
+{
+ return _bson_append_document_element (b, BSON_TYPE_ARRAY, name, array);
+}
+
+gboolean
+bson_append_binary (bson *b, const gchar *name, bson_binary_subtype subtype,
+ const guint8 *data, gint32 size)
+{
+ if (!data || !size || size <= 0)
+ return FALSE;
+
+ if (!_bson_append_element_header (b, BSON_TYPE_BINARY, name))
+ return FALSE;
+
+ _bson_append_int32 (b, GINT32_TO_LE (size));
+ _bson_append_byte (b, (guint8)subtype);
+
+ b->data = g_byte_array_append (b->data, data, size);
+ return TRUE;
+}
+
+gboolean
+bson_append_oid (bson *b, const gchar *name, const guint8 *oid)
+{
+ if (!oid)
+ return FALSE;
+
+ if (!_bson_append_element_header (b, BSON_TYPE_OID, name))
+ return FALSE;
+
+ b->data = g_byte_array_append (b->data, oid, 12);
+ return TRUE;
+}
+
+gboolean
+bson_append_boolean (bson *b, const gchar *name, gboolean value)
+{
+ if (!_bson_append_element_header (b, BSON_TYPE_BOOLEAN, name))
+ return FALSE;
+
+ _bson_append_byte (b, (guint8)value);
+ return TRUE;
+}
+
+gboolean
+bson_append_utc_datetime (bson *b, const gchar *name, gint64 ts)
+{
+ return _bson_append_int64_element (b, BSON_TYPE_UTC_DATETIME, name, ts);
+}
+
+gboolean
+bson_append_null (bson *b, const gchar *name)
+{
+ return _bson_append_element_header (b, BSON_TYPE_NULL, name);
+}
+
+gboolean
+bson_append_regex (bson *b, const gchar *name, const gchar *regexp,
+ const gchar *options)
+{
+ if (!regexp || !options)
+ return FALSE;
+
+ if (!_bson_append_element_header (b, BSON_TYPE_REGEXP, name))
+ return FALSE;
+
+ b->data = g_byte_array_append (b->data, (const guint8 *)regexp,
+ strlen (regexp) + 1);
+ b->data = g_byte_array_append (b->data, (const guint8 *)options,
+ strlen (options) + 1);
+
+ return TRUE;
+}
+
+gboolean
+bson_append_javascript (bson *b, const gchar *name, const gchar *js,
+ gint32 len)
+{
+ return _bson_append_string_element (b, BSON_TYPE_JS_CODE, name, js, len);
+}
+
+gboolean
+bson_append_symbol (bson *b, const gchar *name, const gchar *symbol,
+ gint32 len)
+{
+ return _bson_append_string_element (b, BSON_TYPE_SYMBOL, name, symbol, len);
+}
+
+gboolean
+bson_append_javascript_w_scope (bson *b, const gchar *name,
+ const gchar *js, gint32 len,
+ const bson *scope)
+{
+ gint size;
+ size_t length;
+
+ if (!js || !scope || bson_size (scope) < 0 || len < -1)
+ return FALSE;
+
+ if (!_bson_append_element_header (b, BSON_TYPE_JS_CODE_W_SCOPE, name))
+ return FALSE;
+
+ length = (len != -1) ? (size_t)len + 1: strlen (js) + 1;
+
+ size = length + sizeof (gint32) + sizeof (gint32) + bson_size (scope);
+
+ _bson_append_int32 (b, GINT32_TO_LE (size));
+
+ /* Append the JS code */
+ _bson_append_int32 (b, GINT32_TO_LE (length));
+ b->data = g_byte_array_append (b->data, (const guint8 *)js, length - 1);
+ _bson_append_byte (b, 0);
+
+ /* Append the scope */
+ b->data = g_byte_array_append (b->data, bson_data (scope),
+ bson_size (scope));
+
+ return TRUE;
+}
+
+gboolean
+bson_append_int32 (bson *b, const gchar *name, gint32 i)
+{
+ if (!_bson_append_element_header (b, BSON_TYPE_INT32, name))
+ return FALSE;
+
+ _bson_append_int32 (b, GINT32_TO_LE (i));
+ return TRUE;
+ }
+
+gboolean
+bson_append_timestamp (bson *b, const gchar *name, gint64 ts)
+{
+ return _bson_append_int64_element (b, BSON_TYPE_TIMESTAMP, name, ts);
+}
+
+gboolean
+bson_append_int64 (bson *b, const gchar *name, gint64 i)
+{
+ return _bson_append_int64_element (b, BSON_TYPE_INT64, name, i);
+}
+
+/*
+ * Find & retrieve data
+ */
+bson_cursor *
+bson_cursor_new (const bson *b)
+{
+ bson_cursor *c;
+
+ if (bson_size (b) == -1)
+ return NULL;
+
+ c = (bson_cursor *)g_new0 (bson_cursor, 1);
+ c->obj = b;
+
+ return c;
+}
+
+void
+bson_cursor_free (bson_cursor *c)
+{
+ g_free (c);
+}
+
+/** @internal Figure out the block size of a given type.
+ *
+ * Provided a #bson_type and some raw data, figures out the length of
+ * the block, counted from rigth after the element name's position.
+ *
+ * @param type is the type of object we need the size for.
+ * @param data is the raw data (starting right after the element's
+ * name).
+ *
+ * @returns The size of the block, or -1 on error.
+ */
+static gint32
+_bson_get_block_size (bson_type type, const guint8 *data)
+{
+ glong l;
+
+ switch (type)
+ {
+ case BSON_TYPE_STRING:
+ case BSON_TYPE_JS_CODE:
+ case BSON_TYPE_SYMBOL:
+ return bson_stream_doc_size (data, 0) + sizeof (gint32);
+ case BSON_TYPE_DOCUMENT:
+ case BSON_TYPE_ARRAY:
+ case BSON_TYPE_JS_CODE_W_SCOPE:
+ return bson_stream_doc_size (data, 0);
+ case BSON_TYPE_DOUBLE:
+ return sizeof (gdouble);
+ case BSON_TYPE_BINARY:
+ return bson_stream_doc_size (data, 0) +
+ sizeof (gint32) + sizeof (guint8);
+ case BSON_TYPE_OID:
+ return 12;
+ case BSON_TYPE_BOOLEAN:
+ return 1;
+ case BSON_TYPE_UTC_DATETIME:
+ case BSON_TYPE_TIMESTAMP:
+ case BSON_TYPE_INT64:
+ return sizeof (gint64);
+ case BSON_TYPE_NULL:
+ case BSON_TYPE_UNDEFINED:
+ case BSON_TYPE_MIN:
+ case BSON_TYPE_MAX:
+ return 0;
+ case BSON_TYPE_REGEXP:
+ l = strlen((gchar *)data);
+ return l + strlen((gchar *)(data + l + 1)) + 2;
+ case BSON_TYPE_INT32:
+ return sizeof (gint32);
+ case BSON_TYPE_DBPOINTER:
+ return bson_stream_doc_size (data, 0) + sizeof (gint32) + 12;
+ case BSON_TYPE_NONE:
+ default:
+ return -1;
+ }
+}
+
+gboolean
+bson_cursor_next (bson_cursor *c)
+{
+ const guint8 *d;
+ gint32 pos, bs;
+
+ if (!c)
+ return FALSE;
+
+ d = bson_data (c->obj);
+
+ if (c->pos == 0)
+ pos = sizeof (guint32);
+ else
+ {
+ bs = _bson_get_block_size (bson_cursor_type (c), d + c->value_pos);
+ if (bs == -1)
+ return FALSE;
+ pos = c->value_pos + bs;
+ }
+
+ if (pos >= bson_size (c->obj) - 1)
+ return FALSE;
+
+ c->pos = pos;
+ c->key = (gchar *) &d[c->pos + 1];
+ c->value_pos = c->pos + strlen (c->key) + 2;
+
+ return TRUE;
+}
+
+static inline gboolean
+_bson_cursor_find (const bson *b, const gchar *name, size_t start_pos,
+ gint32 end_pos, gboolean wrap_over, bson_cursor *dest_c)
+{
+ gint32 pos = start_pos, bs;
+ const guint8 *d;
+ gint32 name_len;
+
+ name_len = strlen (name);
+
+ d = bson_data (b);
+
+ while (pos < end_pos)
+ {
+ bson_type t = (bson_type) d[pos];
+ const gchar *key = (gchar *) &d[pos + 1];
+ gint32 key_len = strlen (key);
+ gint32 value_pos = pos + key_len + 2;
+
+ if (key_len == name_len && memcmp (key, name, key_len) == 0)
+ {
+ dest_c->obj = b;
+ dest_c->key = key;
+ dest_c->pos = pos;
+ dest_c->value_pos = value_pos;
+
+ return TRUE;
+ }
+ bs = _bson_get_block_size (t, &d[value_pos]);
+ if (bs == -1)
+ return FALSE;
+ pos = value_pos + bs;
+ }
+
+ if (wrap_over)
+ return _bson_cursor_find (b, name, sizeof (gint32), start_pos,
+ FALSE, dest_c);
+
+ return FALSE;
+}
+
+gboolean
+bson_cursor_find (bson_cursor *c, const gchar *name)
+{
+ if (!c || !name)
+ return FALSE;
+
+ return _bson_cursor_find (c->obj, name, c->pos, bson_size (c->obj) - 1,
+ TRUE, c);
+}
+
+gboolean
+bson_cursor_find_next (bson_cursor *c, const gchar *name)
+{
+ if (!c || !name)
+ return FALSE;
+
+ return _bson_cursor_find (c->obj, name, c->pos, bson_size (c->obj) - 1,
+ FALSE, c);
+}
+
+bson_cursor *
+bson_find (const bson *b, const gchar *name)
+{
+ bson_cursor *c;
+
+ if (bson_size (b) == -1 || !name)
+ return NULL;
+
+ c = bson_cursor_new (b);
+ if (_bson_cursor_find (b, name, sizeof (gint32), bson_size (c->obj) - 1,
+ FALSE, c))
+ return c;
+ bson_cursor_free (c);
+ return NULL;
+}
+
+bson_type
+bson_cursor_type (const bson_cursor *c)
+{
+ if (!c || c->pos < sizeof (gint32))
+ return BSON_TYPE_NONE;
+
+ return (bson_type)(bson_data (c->obj)[c->pos]);
+}
+
+const gchar *
+bson_cursor_type_as_string (const bson_cursor *c)
+{
+ if (!c || c->pos < sizeof (gint32))
+ return NULL;
+
+ return bson_type_as_string (bson_cursor_type (c));
+}
+
+const gchar *
+bson_cursor_key (const bson_cursor *c)
+{
+ if (!c)
+ return NULL;
+
+ return c->key;
+}
+
+/** @internal Convenience macro to verify a cursor's type.
+ *
+ * Verifies that the cursor's type is the same as the type requested
+ * by the caller, and returns FALSE if there is a mismatch.
+ */
+#define BSON_CURSOR_CHECK_TYPE(c,type) \
+ if (bson_cursor_type(c) != type) \
+ return FALSE;
+
+gboolean
+bson_cursor_get_string (const bson_cursor *c, const gchar **dest)
+{
+ if (!dest)
+ return FALSE;
+
+ BSON_CURSOR_CHECK_TYPE (c, BSON_TYPE_STRING);
+
+ *dest = (gchar *)(bson_data (c->obj) + c->value_pos + sizeof (gint32));
+
+ return TRUE;
+}
+
+gboolean
+bson_cursor_get_double (const bson_cursor *c, gdouble *dest)
+{
+ if (!dest)
+ return FALSE;
+
+ BSON_CURSOR_CHECK_TYPE (c, BSON_TYPE_DOUBLE);
+
+ memcpy (dest, bson_data (c->obj) + c->value_pos, sizeof (gdouble));
+ *dest = GDOUBLE_FROM_LE (*dest);
+
+ return TRUE;
+}
+
+gboolean
+bson_cursor_get_document (const bson_cursor *c, bson **dest)
+{
+ bson *b;
+ gint32 size;
+
+ if (!dest)
+ return FALSE;
+
+ BSON_CURSOR_CHECK_TYPE (c, BSON_TYPE_DOCUMENT);
+
+ size = bson_stream_doc_size (bson_data(c->obj), c->value_pos) -
+ sizeof (gint32) - 1;
+ b = bson_new_sized (size);
+ b->data = g_byte_array_append (b->data,
+ bson_data (c->obj) + c->value_pos +
+ sizeof (gint32), size);
+ bson_finish (b);
+
+ *dest = b;
+
+ return TRUE;
+}
+
+gboolean
+bson_cursor_get_array (const bson_cursor *c, bson **dest)
+{
+ bson *b;
+ gint32 size;
+
+ if (!dest)
+ return FALSE;
+
+ BSON_CURSOR_CHECK_TYPE (c, BSON_TYPE_ARRAY);
+
+ size = bson_stream_doc_size (bson_data(c->obj), c->value_pos) -
+ sizeof (gint32) - 1;
+ b = bson_new_sized (size);
+ b->data = g_byte_array_append (b->data,
+ bson_data (c->obj) + c->value_pos +
+ sizeof (gint32), size);
+ bson_finish (b);
+
+ *dest = b;
+
+ return TRUE;
+}
+
+gboolean
+bson_cursor_get_binary (const bson_cursor *c,
+ bson_binary_subtype *subtype,
+ const guint8 **data, gint32 *size)
+{
+ if (!subtype || !size || !data)
+ return FALSE;
+
+ BSON_CURSOR_CHECK_TYPE (c, BSON_TYPE_BINARY);
+
+ *size = bson_stream_doc_size (bson_data(c->obj), c->value_pos);
+ *subtype = (bson_binary_subtype)(bson_data (c->obj)[c->value_pos +
+ sizeof (gint32)]);
+ *data = (guint8 *)(bson_data (c->obj) + c->value_pos + sizeof (gint32) + 1);
+
+ return TRUE;
+}
+
+gboolean
+bson_cursor_get_oid (const bson_cursor *c, const guint8 **dest)
+{
+ if (!dest)
+ return FALSE;
+
+ BSON_CURSOR_CHECK_TYPE (c, BSON_TYPE_OID);
+
+ *dest = (guint8 *)(bson_data (c->obj) + c->value_pos);
+
+ return TRUE;
+}
+
+gboolean
+bson_cursor_get_boolean (const bson_cursor *c, gboolean *dest)
+{
+ if (!dest)
+ return FALSE;
+
+ BSON_CURSOR_CHECK_TYPE (c, BSON_TYPE_BOOLEAN);
+
+ *dest = (gboolean)(bson_data (c->obj) + c->value_pos)[0];
+
+ return TRUE;
+}
+
+gboolean
+bson_cursor_get_utc_datetime (const bson_cursor *c,
+ gint64 *dest)
+{
+ if (!dest)
+ return FALSE;
+
+ BSON_CURSOR_CHECK_TYPE (c, BSON_TYPE_UTC_DATETIME);
+
+ memcpy (dest, bson_data (c->obj) + c->value_pos, sizeof (gint64));
+ *dest = GINT64_FROM_LE (*dest);
+
+ return TRUE;
+}
+
+gboolean
+bson_cursor_get_regex (const bson_cursor *c, const gchar **regex,
+ const gchar **options)
+{
+ if (!regex || !options)
+ return FALSE;
+
+ BSON_CURSOR_CHECK_TYPE (c, BSON_TYPE_REGEXP);
+
+ *regex = (gchar *)(bson_data (c->obj) + c->value_pos);
+ *options = (gchar *)(*regex + strlen(*regex) + 1);
+
+ return TRUE;
+}
+
+gboolean
+bson_cursor_get_javascript (const bson_cursor *c, const gchar **dest)
+{
+ if (!dest)
+ return FALSE;
+
+ BSON_CURSOR_CHECK_TYPE (c, BSON_TYPE_JS_CODE);
+
+ *dest = (gchar *)(bson_data (c->obj) + c->value_pos + sizeof (gint32));
+
+ return TRUE;
+}
+
+gboolean
+bson_cursor_get_symbol (const bson_cursor *c, const gchar **dest)
+{
+ if (!dest)
+ return FALSE;
+
+ BSON_CURSOR_CHECK_TYPE (c, BSON_TYPE_SYMBOL);
+
+ *dest = (gchar *)(bson_data (c->obj) + c->value_pos + sizeof (gint32));
+
+ return TRUE;
+}
+
+gboolean
+bson_cursor_get_javascript_w_scope (const bson_cursor *c,
+ const gchar **js,
+ bson **scope)
+{
+ bson *b;
+ gint32 size, docpos;
+
+ if (!js || !scope)
+ return FALSE;
+
+ BSON_CURSOR_CHECK_TYPE (c, BSON_TYPE_JS_CODE_W_SCOPE);
+
+ docpos = bson_stream_doc_size (bson_data (c->obj),
+ c->value_pos + sizeof (gint32)) +
+ sizeof (gint32) * 2;
+ size = bson_stream_doc_size (bson_data (c->obj), c->value_pos + docpos) -
+ sizeof (gint32) - 1;
+ b = bson_new_sized (size);
+ b->data = g_byte_array_append (b->data,
+ bson_data (c->obj) + c->value_pos + docpos +
+ sizeof (gint32), size);
+ bson_finish (b);
+
+ *scope = b;
+ *js = (gchar *)(bson_data (c->obj) + c->value_pos + sizeof (gint32) * 2);
+
+ return TRUE;
+}
+
+gboolean
+bson_cursor_get_int32 (const bson_cursor *c, gint32 *dest)
+{
+ if (!dest)
+ return FALSE;
+
+ BSON_CURSOR_CHECK_TYPE (c, BSON_TYPE_INT32);
+
+ memcpy (dest, bson_data (c->obj) + c->value_pos, sizeof (gint32));
+ *dest = GINT32_FROM_LE (*dest);
+
+ return TRUE;
+}
+
+gboolean
+bson_cursor_get_timestamp (const bson_cursor *c, gint64 *dest)
+{
+ if (!dest)
+ return FALSE;
+
+ BSON_CURSOR_CHECK_TYPE (c, BSON_TYPE_TIMESTAMP);
+
+ memcpy (dest, bson_data (c->obj) + c->value_pos, sizeof (gint64));
+ *dest = GINT64_FROM_LE (*dest);
+
+ return TRUE;
+}
+
+gboolean
+bson_cursor_get_int64 (const bson_cursor *c, gint64 *dest)
+{
+ if (!dest)
+ return FALSE;
+
+ BSON_CURSOR_CHECK_TYPE (c, BSON_TYPE_INT64);
+
+ memcpy (dest, bson_data (c->obj) + c->value_pos, sizeof (gint64));
+ *dest = GINT64_FROM_LE (*dest);
+
+ return TRUE;
+}
diff --git a/src/bson.h b/src/bson.h
new file mode 100644
index 0000000..9349ea9
--- /dev/null
+++ b/src/bson.h
@@ -0,0 +1,856 @@
+/* bson.h - libmongo-client's BSON implementation
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file src/bson.h
+ * The BSON API's public header.
+ */
+
+#ifndef LIBMONGO_CLIENT_BSON_H
+#define LIBMONGO_CLIENT_BSON_H 1
+
+#include <glib.h>
+#include <string.h>
+
+G_BEGIN_DECLS
+
+/** @defgroup bson_mod BSON
+ *
+ * The types, functions and everything else within this module is
+ * meant to allow one to work with BSON objects easily.
+ *
+ * @addtogroup bson_mod
+ * @{
+ */
+
+/** @defgroup bson_types Types
+ *
+ * @addtogroup bson_types
+ * @{
+ */
+
+/** An opaque BSON object.
+ * A BSON object represents a full BSON document, as specified at
+ * http://bsonspec.org/.
+ *
+ * Each object has two states: open and finished. While the document
+ * is open, it can be appended to, but it cannot be read from. While
+ * it is finished, it can be read from, and iterated over, but cannot
+ * be appended to.
+ */
+typedef struct _bson bson;
+
+/** Opaque BSON cursor.
+ * Cursors are used to represent a single entry within a BSON object,
+ * and to help iterating over said document.
+ */
+typedef struct _bson_cursor bson_cursor;
+
+/** Supported BSON object types.
+ */
+typedef enum
+ {
+ BSON_TYPE_NONE = 0, /**< Only used for errors */
+ BSON_TYPE_DOUBLE = 0x01, /**< 8byte double */
+ BSON_TYPE_STRING, /**< 4byte length + NULL terminated string */
+ BSON_TYPE_DOCUMENT, /**< 4byte length + NULL terminated document */
+ BSON_TYPE_ARRAY, /**< 4byte length + NULL terminated document */
+ BSON_TYPE_BINARY, /**< 4byte length + 1byte subtype + data */
+ BSON_TYPE_UNDEFINED, /* Deprecated*/
+ BSON_TYPE_OID, /**< 12byte ObjectID */
+ BSON_TYPE_BOOLEAN, /**< 1byte boolean value */
+ BSON_TYPE_UTC_DATETIME, /**< 8byte timestamp; milliseconds since
+ Unix epoch */
+ BSON_TYPE_NULL, /**< NULL value, No following data. */
+ BSON_TYPE_REGEXP, /**< Two NULL terminated C strings, the regex
+ itself, and the options. */
+ BSON_TYPE_DBPOINTER, /* Deprecated */
+ BSON_TYPE_JS_CODE, /**< 4byte length + NULL terminated string */
+ BSON_TYPE_SYMBOL, /**< 4byte length + NULL terminated string */
+ BSON_TYPE_JS_CODE_W_SCOPE, /**< 4byte length, followed by a
+ string and a document */
+ BSON_TYPE_INT32, /**< 4byte integer */
+ BSON_TYPE_TIMESTAMP, /**< 4bytes increment + 4bytes timestamp */
+ BSON_TYPE_INT64, /**< 8byte integer */
+ BSON_TYPE_MIN = 0xff,
+ BSON_TYPE_MAX = 0x7f
+ } bson_type;
+
+/** Return a type's stringified name.
+ *
+ * @param type is the type to stringify.
+ *
+ * @returns The stringified type, or NULL on error.
+ */
+const gchar *bson_type_as_string (bson_type type);
+
+/** Supported BSON binary subtypes.
+ */
+typedef enum
+ {
+ BSON_BINARY_SUBTYPE_GENERIC = 0x00, /**< The Generic subtype, the
+ default. */
+ BSON_BINARY_SUBTYPE_FUNCTION = 0x01, /**< Binary representation
+ of a function. */
+ BSON_BINARY_SUBTYPE_BINARY = 0x02, /**< Obsolete, do not use. */
+ BSON_BINARY_SUBTYPE_UUID = 0x03, /**< Binary representation of an
+ UUID. */
+ BSON_BINARY_SUBTYPE_MD5 = 0x05, /**< Binary representation of an
+ MD5 sum. */
+ BSON_BINARY_SUBTYPE_USER_DEFINED = 0x80 /**< User defined data,
+ nothing's known about
+ the structure. */
+ } bson_binary_subtype;
+
+/** @} */
+
+/** @defgroup bson_object_access Object Access
+ *
+ * Functions that operate on whole BSON objects.
+ *
+ * @addtogroup bson_object_access
+ * @{
+ */
+
+/** Create a new BSON object.
+ *
+ * @note The created object will have no memory pre-allocated for data,
+ * resulting in possibly more reallocations than neccessary when
+ * appending elements.
+ *
+ * @note If at all possible, use bson_new_sized() instead.
+ *
+ * @returns A newly allocated object, or NULL on error.
+ */
+bson *bson_new (void);
+
+/** Create a new BSON object, preallocating a given amount of space.
+ *
+ * Creates a new BSON object, pre-allocating @a size bytes of space
+ * for the data.
+ *
+ * @param size is the space to pre-allocate for data.
+ *
+ * @note It is not an error to pre-allocate either less, or more space
+ * than what will really end up being added. Pre-allocation does not
+ * set the size of the final object, it is merely a hint, a way to
+ * help the system avoid memory reallocations.
+ *
+ * @returns A newly allocated object, or NULL on error.
+ */
+bson *bson_new_sized (gint32 size);
+
+/** Create a BSON object from existing data.
+ *
+ * In order to be able to parse existing BSON, one must load it up
+ * into a bson object - and this function does just that.
+ *
+ * @note Objects created by this function are not final objects, in
+ * order to be able to extend them. As such, when importing existing
+ * BSON data, which are terminated by a zero byte, specify the size as
+ * one smaller than the original data stream.
+ *
+ * @note This is because bson_finish() will append a zero byte, thus
+ * one would end up with an invalid document if it had an extra one.
+ *
+ * @param data is the BSON byte stream to import.
+ * @param size is the size of the byte stream.
+ *
+ * @returns A newly allocated object, with a copy of @a data as its
+ * contents.
+ */
+bson *bson_new_from_data (const guint8 *data, gint32 size);
+
+/** Build a BSON object in one go, with full control.
+ *
+ * This function can be used to build a BSON object in one simple
+ * step, chaining all the elements together (including sub-documents,
+ * created by this same function - more about that later).
+ *
+ * One has to specify the type, the key name, and whether he wants to
+ * see the added object free'd after addition. Each element type is
+ * freed appropriately, and documents and arrays are finished before
+ * addition, if they're to be freed afterwards.
+ *
+ * This way of operation allows one to build a full BSON object, even
+ * with embedded documents, without leaking memory.
+ *
+ * After the three required parameters, one will need to list the data
+ * itself, in the same order as one would if he'd add with the
+ * bson_append family of functions.
+ *
+ * The list must be closed with a #BSON_TYPE_NONE element, and the @a
+ * name and @a free_after parameters are not needed for the closing
+ * entry.
+ *
+ * @param type is the element type we'll be adding.
+ * @param name is the key name.
+ * @param free_after determines whether the original variable will be
+ * freed after adding it to the BSON object.
+ *
+ * @returns A newly allocated, unfinished BSON object, which must be
+ * finalized and freed, once not needed anymore, by the caller. Or
+ * NULL on error.
+ */
+bson *bson_build_full (bson_type type, const gchar *name,
+ gboolean free_after, ...);
+
+/** Build a BSON object in one go.
+ *
+ * Very similar to bson_build_full(), so much so, that it's exactly
+ * the same, except that the @a free_after parameter is always FALSE,
+ * and must not be specified in this case.
+ *
+ * @param type is the element type we'll be adding.
+ * @param name is the key name.
+ *
+ * @returns A newly allocated, unfinished BSON object, which must be
+ * finalized and freed, once not needed anymore, by the caller. Or
+ * NULL on error.
+ */
+bson *bson_build (bson_type type, const gchar *name, ...);
+
+/** Finish a BSON object.
+ *
+ * Terminate a BSON object. This includes appending the trailing zero
+ * byte and finalising the length of the object.
+ *
+ * The object cannot be appended to after it is finalised.
+ *
+ * @param b is the BSON object to close & finish.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_finish (bson *b);
+
+/** Reset a BSON object.
+ *
+ * Resetting a BSON object clears the finished status, and sets its
+ * size to zero. Resetting is most useful when wants to keep the
+ * already allocated memory around for reuse.
+ *
+ * @param b is the BSON object to reset.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_reset (bson *b);
+
+/** Free the memory associated with a BSON object.
+ *
+ * Frees up all memory associated with a BSON object. The variable
+ * shall not be used afterwards.
+ *
+ * @param b is the BSON object to free.
+ */
+void bson_free (bson *b);
+
+/** Return the size of a finished BSON object.
+ *
+ * @param b is the finished BSON object.
+ *
+ * @returns The size of the document, or -1 on error.
+ *
+ * @note Trying to get the size of a BSON object that has not been
+ * closed by bson_finish() is considered an error.
+ */
+gint32 bson_size (const bson *b);
+
+/** Return the raw bytestream form of the BSON object.
+ *
+ * @param b is the BSON object to retrieve data from.
+ *
+ * @returns The raw datastream or NULL on error. The stream s all not
+ * be freed.
+ *
+ * @note Trying to retrieve the data of an unfinished BSON object is
+ * considered an error.
+ */
+const guint8 *bson_data (const bson *b);
+
+/** Validate a BSON key.
+ *
+ * Verifies that a given key is a valid BSON field name. Depending on
+ * context (togglable by the boolean flags) this means that the string
+ * must either be free of dots, or must not start with a dollar sign.
+ *
+ * @param key is the field name to validate.
+ * @param forbid_dots toggles whether to disallow dots in the name
+ * altogether.
+ * @param no_dollar toggles whether to forbid key names starting with
+ * a dollar sign.
+ *
+ * @returns TRUE if the field name is found to be valid, FALSE
+ * otherwise.
+ *
+ * @note This function does NOT do UTF-8 validation. That is left up
+ * to the application.
+ */
+gboolean bson_validate_key (const gchar *key, gboolean forbid_dots,
+ gboolean no_dollar);
+
+/** Reads out the 32-bit documents size from a BSON bytestream.
+ *
+ * This function can be used when reading data from a stream, and one
+ * wants to build a BSON object from the bytestream: for
+ * bson_new_from_data(), one needs the length. This function provides
+ * that.
+ *
+ * @param doc is the byte stream to check the size of.
+ * @param pos is the position in the bytestream to start reading at.
+ *
+ * @returns The size of the document at the appropriate position.
+ *
+ * @note The byte stream is expected to be in little-endian byte
+ * order.
+ */
+static __inline__ gint32 bson_stream_doc_size (const guint8 *doc, gint32 pos)
+{
+ gint32 size;
+
+ memcpy (&size, doc + pos, sizeof (gint32));
+ return GINT32_FROM_LE (size);
+}
+
+/** @} */
+
+/** @defgroup bson_append Appending
+ *
+ * @brief Functions to append various kinds of elements to existing
+ * BSON objects.
+ *
+ * Every such function expects the BSON object to be open, and will
+ * return FALSE immediately if it finds that the object has had
+ * bson_finish() called on it before.
+ *
+ * The only way to append to a finished BSON object is to @a clone it
+ * with bson_new_from_data(), and append to the newly created object.
+ *
+ * @addtogroup bson_append
+ * @{
+ */
+
+/** Append a string to a BSON object.
+ *
+ * @param b is the BSON object to append to.
+ * @param name is the key name.
+ * @param val is the value to append.
+ * @param length is the length of value. Use @a -1 to use the full
+ * string supplied as @a name.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_append_string (bson *b, const gchar *name, const gchar *val,
+ gint32 length);
+
+/** Append a double to a BSON object.
+ *
+ * @param b is the BSON object to append to.
+ * @param name is the key name.
+ * @param d is the double value to append.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_append_double (bson *b, const gchar *name, gdouble d);
+
+/** Append a BSON document to a BSON object.
+ *
+ * @param b is the BSON object to append to.
+ * @param name is the key name.
+ * @param doc is the BSON document to append.
+ *
+ * @note @a doc MUST be a finished BSON document.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_append_document (bson *b, const gchar *name, const bson *doc);
+
+/** Append a BSON array to a BSON object.
+ *
+ * @param b is the BSON object to append to.
+ * @param name is the key name.
+ * @param array is the BSON array to append.
+ *
+ * @note @a array MUST be a finished BSON document.
+ *
+ * @note The difference between plain documents and arrays - as far as
+ * this library is concerned, and apart from the type - is that array
+ * keys must be numbers in increasing order. However, no extra care is
+ * taken to verify that: it is the responsibility of the caller to set
+ * the array up appropriately.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_append_array (bson *b, const gchar *name, const bson *array);
+
+/** Append a BSON binary blob to a BSON object.
+ *
+ * @param b is the BSON object to append to.
+ * @param name is the key name.
+ * @param subtype is the BSON binary subtype to use.
+ * @param data is a pointer to the blob data.
+ * @param size is the size of the blob.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_append_binary (bson *b, const gchar *name,
+ bson_binary_subtype subtype,
+ const guint8 *data, gint32 size);
+
+/** Append an ObjectID to a BSON object.
+ *
+ * ObjectIDs are 12 byte values, the first four being a timestamp in
+ * big endian byte order, the next three a machine ID, then two bytes
+ * for the PID, and finally three bytes of sequence number, in big
+ * endian byte order again.
+ *
+ * @param b is the BSON object to append to.
+ * @param name is the key name.
+ * @param oid is the ObjectID to append.
+ *
+ * @note The OID must be 12 bytes long, and formatting it
+ * appropriately is the responsiblity of the caller.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_append_oid (bson *b, const gchar *name, const guint8 *oid);
+
+/** Append a boolean to a BSON object.
+ *
+ * @param b is the BSON object to append to.
+ * @param name is the key name.
+ * @param value is the boolean value to append.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_append_boolean (bson *b, const gchar *name, gboolean value);
+
+/** Append an UTC datetime to a BSON object.
+ *
+ * @param b is the BSON object to append to.
+ * @param name is the key name.
+ * @param ts is the UTC timestamp: the number of milliseconds since
+ * the Unix epoch.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_append_utc_datetime (bson *b, const gchar *name, gint64 ts);
+
+/** Append a NULL value to a BSON object.
+ *
+ * @param b is the BSON object to append to.
+ * @param name is the key name.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_append_null (bson *b, const gchar *name);
+
+/** Append a regexp object to a BSON object.
+ *
+ * @param b is the BSON object to append to.
+ * @param name is the key name.
+ * @param regexp is the regexp string itself.
+ * @param options represents the regexp options, serialised to a
+ * string.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_append_regex (bson *b, const gchar *name, const gchar *regexp,
+ const gchar *options);
+
+/** Append Javascript code to a BSON object.
+ *
+ * @param b is the BSON object to append to.
+ * @param name is the key name.
+ * @param js is the javascript code as a C string.
+ * @param len is the length of the code, use @a -1 to use the full
+ * length of the string supplised in @a js.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_append_javascript (bson *b, const gchar *name, const gchar *js,
+ gint32 len);
+
+/** Append a symbol to a BSON object.
+ *
+ * @param b is the BSON object to append to.
+ * @param name is the key name.
+ * @param symbol is the symbol to append.
+ * @param len is the length of the code, use @a -1 to use the full
+ * length of the string supplised in @a symbol.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_append_symbol (bson *b, const gchar *name, const gchar *symbol,
+ gint32 len);
+
+/** Append Javascript code (with scope) to a BSON object.
+ *
+ * @param b is the BSON object to append to.
+ * @param name is the key name.
+ * @param js is the javascript code as a C string.
+ * @param len is the length of the code, use @a -1 to use the full
+ * length of the string supplied in @a js.
+ * @param scope is scope to evaluate the javascript code in.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_append_javascript_w_scope (bson *b, const gchar *name,
+ const gchar *js, gint32 len,
+ const bson *scope);
+
+/** Append a 32-bit integer to a BSON object.
+ *
+ * @param b is the BSON object to append to.
+ * @param name is the key name.
+ * @param i is the integer to append.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_append_int32 (bson *b, const gchar *name, gint32 i);
+
+/** Append a timestamp to a BSON object.
+ *
+ * @param b is the BSON object to append to.
+ * @param name is the key name.
+ * @param ts is the timestamp to append.
+ *
+ * @note The ts param should consists of 4 bytes of increment,
+ * followed by 4 bytes of timestamp. It is the responsibility of the
+ * caller to set the variable up appropriately.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_append_timestamp (bson *b, const gchar *name, gint64 ts);
+
+/** Append a 64-bit integer to a BSON object.
+ *
+ * @param b is the BSON object to append to.
+ * @param name is the key name.
+ * @param i is the integer to append.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_append_int64 (bson *b, const gchar *name, gint64 i);
+
+/** @} */
+
+/** @defgroup bson_cursor Cursor & Retrieval
+ *
+ * This section documents the cursors, and the data retrieval
+ * functions. Each and every function here operates on finished BSON
+ * objects, and will return with an error if passed an open object.
+ *
+ * Data can be retrieved from cursors, which in turn point to a
+ * specific part of the BSON object.
+ *
+ * The idea is to place the cursor to the appropriate key first, then
+ * retrieve the data stored there. Trying to retrieve data that is of
+ * different type than what the cursor is results in an error.
+ *
+ * Functions to iterate to the next key, and retrieve the current
+ * keys name are also provided.
+ *
+ * @addtogroup bson_cursor
+ * @{
+ */
+
+/** Create a new cursor.
+ *
+ * Creates a new cursor, and positions it to the beginning of the
+ * supplied BSON object.
+ *
+ * @param b is the BSON object to create a cursor for.
+ *
+ * @returns A newly allocated cursor, or NULL on error.
+ */
+bson_cursor *bson_cursor_new (const bson *b);
+
+/** Create a new cursor positioned at a given key.
+ *
+ * Creates a new cursor, and positions it to the supplied key within
+ * the BSON object.
+ *
+ * @param b is the BSON object to create a cursor for.
+ * @param name is the key name to position to.
+ *
+ * @returns A newly allocated cursor, or NULL on error.
+ */
+bson_cursor *bson_find (const bson *b, const gchar *name);
+
+/** Delete a cursor, and free up all resources used by it.
+ *
+ * @param c is the cursor to free.
+ */
+void bson_cursor_free (bson_cursor *c);
+
+/** Position the cursor to the next key.
+ *
+ * @param c is the cursor to move forward.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_cursor_next (bson_cursor *c);
+
+/** Move the cursor to a given key, past the current one.
+ *
+ * Scans the BSON object past the current key, in search for the
+ * specified one, and positions the cursor there if found, leaves it
+ * in place if not.
+ *
+ * @param c is the cursor to move forward.
+ * @param name is the key name to position to.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_cursor_find_next (bson_cursor *c, const gchar *name);
+
+/** Move the cursor to a given key
+ *
+ * Like bson_cursor_find_next(), this function will start scanning the
+ * BSON object at the current position. If the key is not found after
+ * it, it will wrap over and search up to the original position.
+ *
+ * @param c is the cursor to move.
+ * @param name is the key name to position to.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_cursor_find (bson_cursor *c, const gchar *name);
+
+/** Determine the type of the current element.
+ *
+ * @param c is the cursor pointing at the appropriate element.
+ *
+ * @returns The type of the element, or #BSON_TYPE_NONE on error.
+ */
+bson_type bson_cursor_type (const bson_cursor *c);
+
+/** Retrieve the type of the current element, as string.
+ *
+ * @param c is the cursor pointing at the appropriate element.
+ *
+ * @returns The type of the element, as string, or NULL on error.
+ *
+ * @note The string points to an internal structure, it should not be
+ * freed or modified.
+ */
+const gchar *bson_cursor_type_as_string (const bson_cursor *c);
+
+/** Determine the name of the current elements key.
+ *
+ * @param c is the cursor pointing at the appropriate element.
+ *
+ * @returns The name of the key, or NULL on error.
+ *
+ * @note The name is a pointer to an internal string, one must NOT
+ * free it.
+ */
+const gchar *bson_cursor_key (const bson_cursor *c);
+
+/** Get the value stored at the cursor, as string.
+ *
+ * @param c is the cursor pointing at the appropriate element.
+ * @param dest is a pointer to a variable where the value can be
+ * stored.
+ *
+ * @note The @a dest pointer will be set to point to an internal
+ * structure, and must not be freed or modified by the caller.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_cursor_get_string (const bson_cursor *c, const gchar **dest);
+
+/** Get the value stored at the cursor, as a double.
+ *
+ * @param c is the cursor pointing at the appropriate element.
+ * @param dest is a pointer to a variable where the value can be
+ * stored.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_cursor_get_double (const bson_cursor *c, gdouble *dest);
+
+/** Get the value stored at the cursor, as a BSON document.
+ *
+ * @param c is the cursor pointing at the appropriate element.
+ * @param dest is a pointer to a variable where the value can be
+ * stored.
+ *
+ * @note The @a dest pointer will be a newly allocated, finished
+ * object: it is the responsibility of the caller to free it.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_cursor_get_document (const bson_cursor *c, bson **dest);
+
+/** Get the value stored at the cursor, as a BSON array.
+ *
+ * @param c is the cursor pointing at the appropriate element.
+ * @param dest is a pointer to a variable where the value can be
+ * stored.
+ *
+ * @note The @a dest pointer will be a newly allocated, finished
+ * object: it is the responsibility of the caller to free it.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_cursor_get_array (const bson_cursor *c, bson **dest);
+
+/** Get the value stored at the cursor, as binary data.
+ *
+ * @param c is the cursor pointing at the appropriate element.
+ * @param subtype is a pointer to store the binary subtype at.
+ * @param data is a pointer to where the data shall be stored.
+ * @param size is a pointer to store the size at.
+ *
+ * @note The @a data pointer will be pointing to an internal
+ * structure, it must not be freed or modified.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_cursor_get_binary (const bson_cursor *c,
+ bson_binary_subtype *subtype,
+ const guint8 **data, gint32 *size);
+
+/** Get the value stored at the cursor, as an ObjectID.
+ *
+ * @param c is the cursor pointing at the appropriate element.
+ * @param dest is a pointer to a variable where the value can be
+ * stored.
+ *
+ * @note The @a dest pointer will be set to point to an internal
+ * structure, and must not be freed or modified by the caller.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_cursor_get_oid (const bson_cursor *c, const guint8 **dest);
+
+/** Get the value stored at the cursor, as a boolean.
+ *
+ * @param c is the cursor pointing at the appropriate element.
+ * @param dest is a pointer to a variable where the value can be
+ * stored.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_cursor_get_boolean (const bson_cursor *c, gboolean *dest);
+
+/** Get the value stored at the cursor, as an UTC datetime.
+ *
+ * @param c is the cursor pointing at the appropriate element.
+ * @param dest is a pointer to a variable where the value can be
+ * stored.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_cursor_get_utc_datetime (const bson_cursor *c, gint64 *dest);
+
+/** Get the value stored at the cursor, as a regexp.
+ *
+ * @param c is the cursor pointing at the appropriate element.
+ * @param regex is a pointer to a variable where the regex can be
+ * stored.
+ * @param options is a pointer to a variable where the options can be
+ * stored.
+ *
+ * @note Both the @a regex and @a options pointers will be set to
+ * point to an internal structure, and must not be freed or modified
+ * by the caller.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_cursor_get_regex (const bson_cursor *c, const gchar **regex,
+ const gchar **options);
+
+/** Get the value stored at the cursor, as javascript code.
+ *
+ * @param c is the cursor pointing at the appropriate element.
+ * @param dest is a pointer to a variable where the value can be
+ * stored.
+ *
+ * @note The @a dest pointer will be set to point to an internal
+ * structure, and must not be freed or modified by the caller.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_cursor_get_javascript (const bson_cursor *c, const gchar **dest);
+
+/** Get the value stored at the cursor, as a symbol.
+ *
+ * @param c is the cursor pointing at the appropriate element.
+ * @param dest is a pointer to a variable where the value can be
+ * stored.
+ *
+ * @note The @a dest pointer will be set to point to an internal
+ * structure, and must not be freed or modified by the caller.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_cursor_get_symbol (const bson_cursor *c, const gchar **dest);
+
+/** Get the value stored at the cursor, as javascript code w/ scope.
+ *
+ * @param c is the cursor pointing at the appropriate element.
+ * @param js is a pointer to a variable where the javascript code can
+ * be stored.
+ * @param scope is a pointer to a variable where the scope can be
+ * stored.
+ *
+ * @note The @a scope pointer will be a newly allocated, finished
+ * BSON object: it is the responsibility of the caller to free it.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_cursor_get_javascript_w_scope (const bson_cursor *c,
+ const gchar **js,
+ bson **scope);
+
+/** Get the value stored at the cursor, as a 32-bit integer.
+ *
+ * @param c is the cursor pointing at the appropriate element.
+ * @param dest is a pointer to a variable where the value can be
+ * stored.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_cursor_get_int32 (const bson_cursor *c, gint32 *dest);
+
+/** Get the value stored at the cursor, as a timestamp.
+ *
+ * @param c is the cursor pointing at the appropriate element.
+ * @param dest is a pointer to a variable where the value can be
+ * stored.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_cursor_get_timestamp (const bson_cursor *c, gint64 *dest);
+
+/** Get the value stored at the cursor, as a 64-bit integer.
+ *
+ * @param c is the cursor pointing at the appropriate element.
+ * @param dest is a pointer to a variable where the value can be
+ * stored.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean bson_cursor_get_int64 (const bson_cursor *c, gint64 *dest);
+
+/** @} */
+
+/** @} */
+
+G_END_DECLS
+
+#endif
diff --git a/src/compat.c b/src/compat.c
new file mode 100644
index 0000000..d0b1be4
--- /dev/null
+++ b/src/compat.c
@@ -0,0 +1,108 @@
+/* compat.c - Various compatibility functions
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "config.h"
+
+#if WITH_OPENSSL
+
+#include "compat.h"
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <openssl/md5.h>
+
+struct _GChecksum
+{
+ GChecksumType type;
+ char hex_digest[33];
+
+ MD5_CTX context;
+};
+
+GChecksum *
+g_checksum_new (GChecksumType checksum_type)
+{
+ GChecksum *chk;
+
+ if (checksum_type != G_CHECKSUM_MD5)
+ {
+ errno = ENOSYS;
+ return NULL;
+ }
+
+ chk = calloc (1, sizeof (GChecksum));
+ chk->type = checksum_type;
+
+ MD5_Init (&chk->context);
+
+ return chk;
+}
+
+void
+g_checksum_free (GChecksum *checksum)
+{
+ if (checksum)
+ free (checksum);
+}
+
+void
+g_checksum_update (GChecksum *checksum,
+ const unsigned char *data,
+ ssize_t length)
+{
+ size_t l = length;
+
+ if (!checksum || !data || length == 0)
+ {
+ errno = EINVAL;
+ return;
+ }
+ errno = 0;
+
+ if (length < 0)
+ l = strlen ((const char *)data);
+
+ MD5_Update (&checksum->context, (const void *)data, l);
+}
+
+const char *
+g_checksum_get_string (GChecksum *checksum)
+{
+ unsigned char digest[16];
+ static const char hex[16] =
+ {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ 'a', 'b', 'c', 'd', 'e', 'f'};
+ int i;
+
+ if (!checksum)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ MD5_Final (digest, &checksum->context);
+
+ for (i = 0; i < 16; i++)
+ {
+ checksum->hex_digest[2 * i] = hex[(digest[i] & 0xf0) >> 4];
+ checksum->hex_digest[2 * i + 1] = hex[digest[i] & 0x0f];
+ }
+ checksum->hex_digest[32] = '\0';
+
+ return checksum->hex_digest;
+}
+
+#endif /* WITH_OPENSSL */
diff --git a/src/compat.h b/src/compat.h
new file mode 100644
index 0000000..f5ab52f
--- /dev/null
+++ b/src/compat.h
@@ -0,0 +1,50 @@
+/* compat.h - Various compatibility functions
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBMONGO_COMPAT_H
+#define LIBMONGO_COMPAT_H 1
+
+#include "config.h"
+
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#if WITH_OPENSSL
+
+#include <openssl/md5.h>
+
+typedef enum {
+ G_CHECKSUM_MD5,
+ G_CHECKSUM_SHA1,
+ G_CHECKSUM_SHA256
+} GChecksumType;
+
+typedef struct _GChecksum GChecksum;
+
+GChecksum *g_checksum_new (GChecksumType checksum_type);
+void g_checksum_free (GChecksum *checksum);
+void g_checksum_update (GChecksum *checksum,
+ const unsigned char *data,
+ ssize_t length);
+const char *g_checksum_get_string (GChecksum *checksum);
+
+#endif /* WITH_OPENSSL */
+
+#ifndef MSG_WAITALL
+#define MSG_WAITALL 0x40
+#endif
+
+#endif
diff --git a/src/libmongo-client.pc.in b/src/libmongo-client.pc.in
new file mode 100644
index 0000000..ce3a783
--- /dev/null
+++ b/src/libmongo-client.pc.in
@@ -0,0 +1,12 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: libmongo-client
+Version: @VERSION@
+Description: MongoDB client library
+URL: https://github.com/algernon/libmongo-client
+Requires.private: glib-2.0
+Libs: -L${libdir} -lmongo-client
+Cflags: -I${includedir}/mongo-client
diff --git a/src/libmongo-client.ver b/src/libmongo-client.ver
new file mode 100644
index 0000000..58f08f1
--- /dev/null
+++ b/src/libmongo-client.ver
@@ -0,0 +1,163 @@
+LMC_0.1.0_INTERNAL {
+ global:
+ mongo_wire_cmd_kill_cursors_va;
+ mongo_wire_packet_get_header_raw;
+ mongo_wire_packet_set_header_raw;
+ local:
+ *;
+};
+
+LMC_0.1.0 {
+ bson_append_array;
+ bson_append_binary;
+ bson_append_boolean;
+ bson_append_document;
+ bson_append_double;
+ bson_append_int32;
+ bson_append_int64;
+ bson_append_javascript;
+ bson_append_javascript_w_scope;
+ bson_append_null;
+ bson_append_oid;
+ bson_append_regex;
+ bson_append_string;
+ bson_append_symbol;
+ bson_append_timestamp;
+ bson_append_utc_datetime;
+ bson_build;
+ bson_build_full;
+ bson_cursor_free;
+ bson_cursor_get_array;
+ bson_cursor_get_binary;
+ bson_cursor_get_boolean;
+ bson_cursor_get_document;
+ bson_cursor_get_double;
+ bson_cursor_get_int32;
+ bson_cursor_get_int64;
+ bson_cursor_get_javascript;
+ bson_cursor_get_javascript_w_scope;
+ bson_cursor_get_oid;
+ bson_cursor_get_regex;
+ bson_cursor_get_string;
+ bson_cursor_get_symbol;
+ bson_cursor_get_timestamp;
+ bson_cursor_get_utc_datetime;
+ bson_cursor_key;
+ bson_cursor_new;
+ bson_cursor_next;
+ bson_cursor_type;
+ bson_cursor_type_as_string;
+ bson_data;
+ bson_find;
+ bson_finish;
+ bson_free;
+ bson_new;
+ bson_new_from_data;
+ bson_new_sized;
+ bson_reset;
+ bson_size;
+ bson_type_as_string;
+ mongo_connection_get_requestid;
+ mongo_disconnect;
+ mongo_packet_recv;
+ mongo_packet_send;
+ mongo_sync_cmd_authenticate;
+ mongo_sync_cmd_count;
+ mongo_sync_cmd_custom;
+ mongo_sync_cmd_delete;
+ mongo_sync_cmd_drop;
+ mongo_sync_cmd_get_last_error;
+ mongo_sync_cmd_get_more;
+ mongo_sync_cmd_insert;
+ mongo_sync_cmd_insert_n;
+ mongo_sync_cmd_is_master;
+ mongo_sync_cmd_kill_cursors;
+ mongo_sync_cmd_ping;
+ mongo_sync_cmd_query;
+ mongo_sync_cmd_reset_error;
+ mongo_sync_cmd_update;
+ mongo_sync_cmd_user_add;
+ mongo_sync_cmd_user_remove;
+ mongo_sync_conn_get_auto_reconnect;
+ mongo_sync_conn_get_max_insert_size;
+ mongo_sync_conn_get_safe_mode;
+ mongo_sync_conn_get_slaveok;
+ mongo_sync_conn_seed_add;
+ mongo_sync_conn_set_auto_reconnect;
+ mongo_sync_conn_set_max_insert_size;
+ mongo_sync_conn_set_safe_mode;
+ mongo_sync_conn_set_slaveok;
+ mongo_sync_disconnect;
+ mongo_sync_pool_free;
+ mongo_sync_pool_new;
+ mongo_sync_pool_pick;
+ mongo_sync_pool_return;
+ mongo_sync_reconnect;
+ mongo_util_oid_init;
+ mongo_util_oid_new;
+ mongo_util_oid_new_with_time;
+ mongo_util_parse_addr;
+ mongo_wire_cmd_custom;
+ mongo_wire_cmd_delete;
+ mongo_wire_cmd_get_more;
+ mongo_wire_cmd_insert;
+ mongo_wire_cmd_insert_n;
+ mongo_wire_cmd_kill_cursors;
+ mongo_wire_cmd_kill_cursors_va;
+ mongo_wire_cmd_query;
+ mongo_wire_cmd_update;
+ mongo_wire_packet_free;
+ mongo_wire_packet_get_data;
+ mongo_wire_packet_get_header;
+ mongo_wire_packet_get_header_raw;
+ mongo_wire_packet_new;
+ mongo_wire_packet_set_data;
+ mongo_wire_packet_set_header;
+ mongo_wire_packet_set_header_raw;
+ mongo_wire_reply_packet_get_data;
+ mongo_wire_reply_packet_get_header;
+ mongo_wire_reply_packet_get_nth_document;
+} LMC_0.1.0_INTERNAL;
+
+LMC_0.1.1 {
+ bson_validate_key;
+ bson_cursor_find_next;
+ bson_stream_doc_size;
+ mongo_sync_cursor_*;
+} LMC_0.1.0;
+
+LMC_0.1.2 {
+ bson_cursor_find;
+ mongo_connection_set_timeout;
+ mongo_sync_cmd_index_*;
+} LMC_0.1.1;
+
+LMC_0.1.3 {
+ mongo_sync_gridfs_*;
+ mongo_sync_cmd_create;
+ mongo_sync_cmd_exists;
+ mongo_util_oid_as_string;
+} LMC_0.1.2;
+
+LMC_0.1.6 {
+ global:
+ mongo_connect;
+ mongo_sync_connect;
+ local:
+ mongo_tcp_connect;
+ mongo_sync_connect_0_1_0;
+} LMC_0.1.3;
+
+LMC_0.1.7 {
+ mongo_sync_cmd_user_add_with_roles;
+} LMC_0.1.6;
+
+LMC_0.1.8 {
+ mongo_sync_conn_recovery_cache_new;
+ mongo_sync_conn_recovery_cache_free;
+ mongo_sync_conn_recovery_cache_discard;
+ mongo_sync_conn_recovery_cache_seed_add;
+ mongo_sync_connect_recovery_cache;
+ mongo_sync_conn_get_last_error;
+ mongo_sync_cmd_get_last_error_full;
+} LMC_0.1.7;
diff --git a/src/libmongo-macros.h b/src/libmongo-macros.h
new file mode 100644
index 0000000..644fbe8
--- /dev/null
+++ b/src/libmongo-macros.h
@@ -0,0 +1,51 @@
+/* libmongo-macros.h - helper macros for libmongo-client.
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBMONGO_MACROS_H
+#define LIBMONGO_MACROS_H 1
+
+#include <glib.h>
+
+inline static gdouble
+GDOUBLE_SWAP_LE_BE(gdouble in)
+{
+ union
+ {
+ guint64 i;
+ gdouble d;
+ } u;
+
+ u.d = in;
+ u.i = GUINT64_SWAP_LE_BE (u.i);
+ return u.d;
+}
+
+#if G_BYTE_ORDER == G_LITTLE_ENDIAN
+#define GDOUBLE_TO_LE(val) ((gdouble) (val))
+#define GDOUBLE_TO_BE(val) (GDOUBLE_SWAP_LE_BE (val))
+
+#elif G_BYTE_ORDER == G_BIG_ENDIAN
+#define GDOUBLE_TO_LE(val) (GDOUBLE_SWAP_LE_BE (val))
+#define GDOUBLE_TO_BE(val) ((gdouble) (val))
+
+#else /* !G_LITTLE_ENDIAN && !G_BIG_ENDIAN */
+#error unknown ENDIAN type
+#endif /* !G_LITTLE_ENDIAN && !G_BIG_ENDIAN */
+
+#define GDOUBLE_FROM_LE(val) (GDOUBLE_TO_LE (val))
+#define GDOUBLE_FROM_BE(val) (GDOUBLE_TO_BE (val))
+
+#endif
diff --git a/src/libmongo-private.h b/src/libmongo-private.h
new file mode 100644
index 0000000..e13f0da
--- /dev/null
+++ b/src/libmongo-private.h
@@ -0,0 +1,276 @@
+/* libmongo-private.h - private headers for libmongo-client
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file libmongo-private.h
+ *
+ * Private types and functions, for internal use in libmongo-client only.
+ */
+
+#ifndef LIBMONGO_PRIVATE_H
+#define LIBMONGO_PRIVATE_H 1
+
+#include "mongo.h"
+#include "compat.h"
+
+/** @internal BSON structure.
+ */
+struct _bson
+{
+ GByteArray *data; /**< The actual data of the BSON object. */
+ gboolean finished; /**< Flag to indicate whether the object is open
+ or finished. */
+};
+
+/** @internal Mongo Connection state object. */
+struct _mongo_connection
+{
+ gint fd; /**< The file descriptor associated with the connection. */
+ gint32 request_id; /**< The last sent command's requestID. */
+};
+
+/** @internal Mongo Replica Set object. */
+typedef struct _replica_set
+{
+ GList *seeds; /**< Replica set seeds, as a list of strings. */
+ GList *hosts; /**< Replica set members, as a list of strings. */
+ gchar *primary; /**< The replica master, if any. */
+} replica_set; /**< Replica Set properties. */
+
+/** @internal MongoDb Authentication Credentials object.
+ * These values are mlock()'ed.
+ */
+typedef struct _auth_credentials
+{
+ gchar *db; /**< The database to authenticate against. */
+ gchar *user; /**< The username to authenticate with. */
+ gchar *pw; /**< The password to authenticate with. */
+} auth_credentials;
+
+/** @internal Connection Recovery Cache for MongoDb. */
+struct _mongo_sync_conn_recovery_cache
+{
+ replica_set rs; /**< The replica set. */
+ auth_credentials auth; /**< The authentication credentials.*/
+};
+
+/** @internal Synchronous connection object. */
+struct _mongo_sync_connection
+{
+ mongo_connection super; /**< The parent object. */
+ gboolean slaveok; /**< Whether queries against slave nodes are
+ acceptable. */
+ gboolean safe_mode; /**< Safe-mode signal flag. */
+ gboolean auto_reconnect; /**< Auto-reconnect flag. */
+
+ gchar *last_error; /**< The last error from the server, caught
+ during queries. */
+ gint32 max_insert_size; /**< Maximum number of bytes an insert
+ command can be before being split to
+ smaller chunks. Used for bulk inserts. */
+
+ replica_set rs; /**< Replica set. */
+ auth_credentials auth; /**< Authentication credentials. */
+
+ mongo_sync_conn_recovery_cache *recovery_cache; /**< Reference to the externally managed recovery cache. */
+};
+
+/** @internal MongoDB cursor object.
+ *
+ * The cursor object can be used to conveniently iterate over a query
+ * result set.
+ */
+struct _mongo_sync_cursor
+{
+ mongo_sync_connection *conn; /**< The connection associated with
+ the cursor. Owned by the caller. */
+ gchar *ns; /**< The namespace of the cursor. */
+ mongo_packet *results; /**< The current result set, as a mongo
+ packet. */
+
+ gint32 offset; /**< Offset of the cursor within the active result
+ set. */
+ mongo_reply_packet_header ph; /**< The reply headers extracted from
+ the active result set. */
+};
+
+/** @internal Synchronous pool connection object. */
+struct _mongo_sync_pool_connection
+{
+ mongo_sync_connection super; /**< The parent object. */
+
+ gint pool_id; /**< ID of the connection. */
+ gboolean in_use; /**< Whether the object is in use or not. */
+};
+
+/** @internal GridFS object */
+struct _mongo_sync_gridfs
+{
+ mongo_sync_connection *conn; /**< Connection the object is
+ associated to. */
+
+ struct
+ {
+ gchar *prefix; /**< The namespace prefix. */
+ gchar *files; /**< The file metadata namespace. */
+ gchar *chunks; /**< The chunk namespace. */
+
+ gchar *db; /**< The database part of the namespace. */
+ } ns; /**< Namespaces */
+
+ gint32 chunk_size; /**< The default chunk size. */
+};
+
+/** @internal GridFS file types. */
+typedef enum
+{
+ LMC_GRIDFS_FILE_CHUNKED, /**< Chunked file. */
+ LMC_GRIDFS_FILE_STREAM_READER, /**< Streamed file, reader. */
+ LMC_GRIDFS_FILE_STREAM_WRITER, /**< Streamed file, writer. */
+} _mongo_gridfs_type;
+
+/** @internal GridFS common file properties.
+ *
+ * This is shared between chunked and streamed files.
+ */
+typedef struct
+{
+ gint32 chunk_size; /**< Maximum chunk size for this file. */
+ gint64 length; /**< Total length of the file. */
+
+ union
+ {
+ /** Chunked file data. */
+ struct
+ {
+ const guint8 *oid; /**< The file's ObjectID. */
+ const gchar *md5; /**< MD5 sum of the file. */
+ gint64 date; /**< The upload date. */
+ bson *metadata; /**< Full file metadata, including user-set
+ keys. */
+ };
+
+ /** Streamed file data */
+ struct
+ {
+ gint64 offset; /**< Offset we're into the file. */
+ gint64 current_chunk; /**< The current chunk we're on. */
+ guint8 *id; /**< A copy of the file's ObjectID. */
+ };
+ };
+
+ _mongo_gridfs_type type; /**< The type of the GridFS file. */
+} mongo_sync_gridfs_file_common;
+
+/** @internal GridFS file object. */
+struct _mongo_sync_gridfs_chunked_file
+{
+ mongo_sync_gridfs_file_common meta; /**< The file metadata. */
+ mongo_sync_gridfs *gfs; /**< The GridFS the file is on. */
+};
+
+/** @internal GridFS file stream object. */
+struct _mongo_sync_gridfs_stream
+{
+ mongo_sync_gridfs_file_common file; /**< Common file data. */
+ mongo_sync_gridfs *gfs; /**< The GridFS the file is on. */
+
+ /** Reader & Writer structure union.
+ */
+ union
+ {
+ /** Reader-specific data.
+ */
+ struct
+ {
+ bson *bson; /**< The current chunk as BSON. */
+
+ /** Chunk state information.
+ */
+ struct
+ {
+ const guint8 *data; /**< The current chunk data, pointing
+ into ->reader.bson. */
+ gint32 start_offset; /**< Offset to start reading data from,
+ needed to support the binary subtype. */
+ gint32 size; /**< Size of the current chunk. */
+ gint32 offset; /**< Offset we're into the chunk. */
+ } chunk;
+ } reader;
+
+ /** Writer-specific data.
+ */
+ struct
+ {
+ bson *metadata; /**< Copy of the user-supplied metadata. */
+ guint8 *buffer; /**< The current output buffer. */
+ gint32 buffer_offset; /**< Offset into the output buffer. */
+
+ GChecksum *checksum; /**< The running checksum of the output
+ file. */
+ } writer;
+ };
+};
+
+/** @internal Construct a kill cursors command, using a va_list.
+ *
+ * @param id is the sequence id.
+ * @param n is the number of cursors to delete.
+ * @param ap is the va_list of cursors to kill.
+ *
+ * @note One must supply exaclty @a n number of cursor IDs.
+ *
+ * @returns A newly allocated packet, or NULL on error. It is the
+ * responsibility of the caller to free the packet once it is not used
+ * anymore.
+ */
+mongo_packet *mongo_wire_cmd_kill_cursors_va (gint32 id, gint32 n,
+ va_list ap);
+
+/** @internal Get the header data of a packet, without conversion.
+ *
+ * Retrieve the mongo packet's header data, but do not convert the
+ * values from little-endian. Use only when the source has the data in
+ * the right byte order already.
+ *
+ * @param p is the packet which header we seek.
+ * @param header is a pointer to a variable which will hold the data.
+ *
+ * @note Allocating the @a header is the responsibility of the caller.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean
+mongo_wire_packet_get_header_raw (const mongo_packet *p,
+ mongo_packet_header *header);
+
+/** @internal Set the header data of a packet, without conversion.
+ *
+ * Override the mongo packet's header data, but do not convert the
+ * values from little-endian. Use only when the source has the data in
+ * the right byte order already.
+ *
+ * @note No sanity checks are done, use this function with great care.
+ *
+ * @param p is the packet whose header we want to override.
+ * @param header is the header structure to use.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean
+mongo_wire_packet_set_header_raw (mongo_packet *p,
+ const mongo_packet_header *header);
+
+#endif
diff --git a/src/mongo-client.c b/src/mongo-client.c
new file mode 100644
index 0000000..a46cc0d
--- /dev/null
+++ b/src/mongo-client.c
@@ -0,0 +1,331 @@
+/* mongo-client.c - libmongo-client user API
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file src/mongo-client.c
+ * MongoDB client API implementation.
+ */
+
+#include "config.h"
+#include "mongo-client.h"
+#include "bson.h"
+#include "mongo-wire.h"
+#include "libmongo-private.h"
+
+#include <glib.h>
+
+#include <string.h>
+#include <arpa/inet.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <netdb.h>
+#include <sys/uio.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <errno.h>
+
+#ifndef HAVE_MSG_NOSIGNAL
+#define MSG_NOSIGNAL 0
+#endif
+
+static const int one = 1;
+
+mongo_connection *
+mongo_tcp_connect (const char *host, int port)
+{
+ struct addrinfo *res = NULL, *r;
+ struct addrinfo hints;
+ int e, fd = -1;
+ gchar *port_s;
+ mongo_connection *conn;
+
+ if (!host)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ memset (&hints, 0, sizeof (hints));
+ hints.ai_socktype = SOCK_STREAM;
+
+#ifdef __linux__
+ hints.ai_flags = AI_ADDRCONFIG;
+#endif
+
+ port_s = g_strdup_printf ("%d", port);
+ e = getaddrinfo (host, port_s, &hints, &res);
+ if (e != 0)
+ {
+ int err = errno;
+
+ g_free (port_s);
+ errno = err;
+ return NULL;
+ }
+ g_free (port_s);
+
+ for (r = res; r != NULL; r = r->ai_next)
+ {
+ fd = socket (r->ai_family, r->ai_socktype, r->ai_protocol);
+ if (fd != -1 && connect (fd, r->ai_addr, r->ai_addrlen) == 0)
+ break;
+ if (fd != -1)
+ {
+ close (fd);
+ fd = -1;
+ }
+ }
+ freeaddrinfo (res);
+
+ if (fd == -1)
+ {
+ errno = EADDRNOTAVAIL;
+ return NULL;
+ }
+
+ setsockopt (fd, IPPROTO_TCP, TCP_NODELAY, (char *)&one, sizeof (one));
+
+ conn = g_new0 (mongo_connection, 1);
+ conn->fd = fd;
+
+ return conn;
+}
+
+static mongo_connection *
+mongo_unix_connect (const char *path)
+{
+ int fd = -1;
+ mongo_connection *conn;
+ struct sockaddr_un remote;
+
+ if (!path || strlen (path) >= sizeof (remote.sun_path))
+ {
+ errno = path ? ENAMETOOLONG : EINVAL;
+ return NULL;
+ }
+
+ fd = socket (AF_UNIX, SOCK_STREAM, 0);
+ if (fd == -1)
+ {
+ errno = EADDRNOTAVAIL;
+ return NULL;
+ }
+
+ remote.sun_family = AF_UNIX;
+ strncpy (remote.sun_path, path, sizeof (remote.sun_path));
+ if (connect (fd, (struct sockaddr *)&remote, sizeof (remote)) == -1)
+ {
+ close (fd);
+ errno = EADDRNOTAVAIL;
+ return NULL;
+ }
+
+ conn = g_new0 (mongo_connection, 1);
+ conn->fd = fd;
+
+ return conn;
+}
+
+mongo_connection *
+mongo_connect (const char *address, int port)
+{
+ if (port == MONGO_CONN_LOCAL)
+ return mongo_unix_connect (address);
+
+ return mongo_tcp_connect (address, port);
+}
+
+#if VERSIONED_SYMBOLS
+__asm__(".symver mongo_tcp_connect,mongo_connect@LMC_0.1.0");
+#endif
+
+void
+mongo_disconnect (mongo_connection *conn)
+{
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return;
+ }
+
+ if (conn->fd >= 0)
+ close (conn->fd);
+
+ g_free (conn);
+ errno = 0;
+}
+
+gboolean
+mongo_packet_send (mongo_connection *conn, const mongo_packet *p)
+{
+ const guint8 *data;
+ gint32 data_size;
+ mongo_packet_header h;
+ struct iovec iov[2];
+ struct msghdr msg;
+
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return FALSE;
+ }
+ if (!p)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ if (conn->fd < 0)
+ {
+ errno = EBADF;
+ return FALSE;
+ }
+
+ if (!mongo_wire_packet_get_header_raw (p, &h))
+ return FALSE;
+
+ data_size = mongo_wire_packet_get_data (p, &data);
+
+ if (data_size == -1)
+ return FALSE;
+
+ iov[0].iov_base = (void *)&h;
+ iov[0].iov_len = sizeof (h);
+ iov[1].iov_base = (void *)data;
+ iov[1].iov_len = data_size;
+
+ memset (&msg, 0, sizeof (struct msghdr));
+ msg.msg_iov = iov;
+ msg.msg_iovlen = 2;
+
+ if (sendmsg (conn->fd, &msg, MSG_NOSIGNAL) != (gint32)sizeof (h) + data_size)
+ return FALSE;
+
+ conn->request_id = h.id;
+
+ return TRUE;
+}
+
+mongo_packet *
+mongo_packet_recv (mongo_connection *conn)
+{
+ mongo_packet *p;
+ guint8 *data;
+ guint32 size;
+ mongo_packet_header h;
+
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return NULL;
+ }
+
+ if (conn->fd < 0)
+ {
+ errno = EBADF;
+ return NULL;
+ }
+
+ memset (&h, 0, sizeof (h));
+ if (recv (conn->fd, &h, sizeof (mongo_packet_header),
+ MSG_NOSIGNAL | MSG_WAITALL) != sizeof (mongo_packet_header))
+ {
+ return NULL;
+ }
+
+ h.length = GINT32_FROM_LE (h.length);
+ h.id = GINT32_FROM_LE (h.id);
+ h.resp_to = GINT32_FROM_LE (h.resp_to);
+ h.opcode = GINT32_FROM_LE (h.opcode);
+
+ p = mongo_wire_packet_new ();
+
+ if (!mongo_wire_packet_set_header_raw (p, &h))
+ {
+ int e = errno;
+
+ mongo_wire_packet_free (p);
+ errno = e;
+ return NULL;
+ }
+
+ size = h.length - sizeof (mongo_packet_header);
+ data = g_new0 (guint8, size);
+ if ((guint32)recv (conn->fd, data, size, MSG_NOSIGNAL | MSG_WAITALL) != size)
+ {
+ int e = errno;
+
+ g_free (data);
+ mongo_wire_packet_free (p);
+ errno = e;
+ return NULL;
+ }
+
+ if (!mongo_wire_packet_set_data (p, data, size))
+ {
+ int e = errno;
+
+ g_free (data);
+ mongo_wire_packet_free (p);
+ errno = e;
+ return NULL;
+ }
+
+ g_free (data);
+
+ return p;
+}
+
+gint32
+mongo_connection_get_requestid (const mongo_connection *conn)
+{
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return -1;
+ }
+
+ return conn->request_id;
+}
+
+gboolean
+mongo_connection_set_timeout (mongo_connection *conn, gint timeout)
+{
+ struct timeval tv;
+
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return FALSE;
+ }
+ if (timeout < 0)
+ {
+ errno = ERANGE;
+ return FALSE;
+ }
+
+ tv.tv_sec = timeout / 1000;
+ tv.tv_usec = (timeout % 1000) * 1000;
+
+ if (setsockopt (conn->fd, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof (tv)) == -1)
+ return FALSE;
+ if (setsockopt (conn->fd, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof (tv)) == -1)
+ return FALSE;
+ return TRUE;
+}
diff --git a/src/mongo-client.h b/src/mongo-client.h
new file mode 100644
index 0000000..d31b273
--- /dev/null
+++ b/src/mongo-client.h
@@ -0,0 +1,116 @@
+/* mongo-client.h - libmongo-client user API
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file src/mongo-client.h
+ * MongoDB client API public header.
+ */
+
+#ifndef LIBMONGO_CLIENT_H
+#define LIBMONGO_CLIENT_H 1
+
+#include <bson.h>
+#include <mongo-wire.h>
+
+#include <glib.h>
+
+G_BEGIN_DECLS
+
+/** @defgroup mongo_client Mongo Client
+ *
+ * @addtogroup mongo_client
+ * @{
+ */
+
+/** Opaque MongoDB connection object type. */
+typedef struct _mongo_connection mongo_connection;
+
+/** Constant to signal that a connection is local (unix socket).
+ *
+ * When passed to mongo_connect() or mongo_sync_connect() as the port
+ * parameter, it signals that the address is to be interpreted as a
+ * unix socket path, not a hostname or IP.
+ */
+#define MONGO_CONN_LOCAL -1
+
+/** Connect to a MongoDB server.
+ *
+ * Connects to a single MongoDB server.
+ *
+ * @param address is the address of the server (IP or unix socket path).
+ * @param port is the port to connect to, or #MONGO_CONN_LOCAL if
+ * address is a unix socket.
+ *
+ * @returns A newly allocated mongo_connection object or NULL on
+ * error. It is the responsibility of the caller to free it once it is
+ * not used anymore.
+ */
+mongo_connection *mongo_connect (const char *address, int port);
+
+/** Disconnect from a MongoDB server.
+ *
+ * @param conn is the connection object to disconnect from.
+ *
+ * @note This also frees up the object.
+ */
+void mongo_disconnect (mongo_connection *conn);
+
+/** Sends an assembled command packet to MongoDB.
+ *
+ * @param conn is the connection to use for sending.
+ * @param p is the packet to send.
+ *
+ * @returns TRUE on success, when the whole packet was sent, FALSE
+ * otherwise.
+ */
+gboolean mongo_packet_send (mongo_connection *conn, const mongo_packet *p);
+
+/** Receive a packet from MongoDB.
+ *
+ * @param conn is the connection to use for receiving.
+ *
+ * @returns A response packet, or NULL upon error.
+ */
+mongo_packet *mongo_packet_recv (mongo_connection *conn);
+
+/** Get the last requestID from a connection object.
+ *
+ * @param conn is the connection to get the requestID from.
+ *
+ * @returns The last requestID used, or -1 on error.
+ */
+gint32 mongo_connection_get_requestid (const mongo_connection *conn);
+
+/** Set a timeout for read/write operations on a connection
+ *
+ * On systems that support it, set a timeout for read/write operations
+ * on a socket.
+ *
+ * @param conn is the connection to set a timeout on.
+ * @param timeout is the timeout to set, in milliseconds.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ *
+ * @note The timeout is not preserved accross reconnects, if using the
+ * Sync API, however. It only applies to the active connection, and
+ * nothing else.
+ */
+gboolean mongo_connection_set_timeout (mongo_connection *conn, gint timeout);
+
+/** @} */
+
+G_END_DECLS
+
+#endif
diff --git a/src/mongo-sync-cursor.c b/src/mongo-sync-cursor.c
new file mode 100644
index 0000000..b2492be
--- /dev/null
+++ b/src/mongo-sync-cursor.c
@@ -0,0 +1,118 @@
+/* mongo-sync-cursor.c - libmongo-client cursor API on top of Sync
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file src/mongo-sync-cursor.c
+ * MongoDB Cursor API implementation.
+ */
+
+#include "config.h"
+#include "mongo.h"
+#include "libmongo-private.h"
+
+#include <errno.h>
+
+mongo_sync_cursor *
+mongo_sync_cursor_new (mongo_sync_connection *conn, const gchar *ns,
+ mongo_packet *packet)
+{
+ mongo_sync_cursor *c;
+
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return NULL;
+ }
+ if (!ns || !packet)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ c = g_new0 (mongo_sync_cursor, 1);
+ c->conn = conn;
+ c->ns = g_strdup (ns);
+ c->results = packet;
+ c->offset = -1;
+
+ mongo_wire_reply_packet_get_header (c->results, &c->ph);
+
+ return c;
+}
+
+gboolean
+mongo_sync_cursor_next (mongo_sync_cursor *cursor)
+{
+ if (!cursor)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+ errno = 0;
+
+ if (cursor->offset >= cursor->ph.returned - 1)
+ {
+ gint32 ret = cursor->ph.returned;
+ gint64 cid = cursor->ph.cursor_id;
+
+ mongo_wire_packet_free (cursor->results);
+ cursor->offset = -1;
+ cursor->results = mongo_sync_cmd_get_more (cursor->conn, cursor->ns,
+ ret, cid);
+ if (!cursor->results)
+ return FALSE;
+ mongo_wire_reply_packet_get_header (cursor->results, &cursor->ph);
+ }
+ cursor->offset++;
+ return TRUE;
+}
+
+void
+mongo_sync_cursor_free (mongo_sync_cursor *cursor)
+{
+ if (!cursor)
+ {
+ errno = ENOTCONN;
+ return;
+ }
+ errno = 0;
+
+ mongo_sync_cmd_kill_cursors (cursor->conn, 1, cursor->ph.cursor_id);
+ g_free (cursor->ns);
+ mongo_wire_packet_free (cursor->results);
+ g_free (cursor);
+}
+
+bson *
+mongo_sync_cursor_get_data (mongo_sync_cursor *cursor)
+{
+ bson *r;
+
+ if (!cursor)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ if (!mongo_wire_reply_packet_get_nth_document (cursor->results,
+ cursor->offset + 1,
+ &r))
+ {
+ errno = ERANGE;
+ return NULL;
+ }
+ bson_finish (r);
+ return r;
+}
diff --git a/src/mongo-sync-cursor.h b/src/mongo-sync-cursor.h
new file mode 100644
index 0000000..949cc65
--- /dev/null
+++ b/src/mongo-sync-cursor.h
@@ -0,0 +1,103 @@
+/* mongo-sync-cursor.h - libmongo-client cursor API on top of Sync
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file src/mongo-sync-cursor.h
+ * MongoDB cursor API public header.
+ *
+ * @addtogroup mongo_sync
+ * @{
+ */
+
+#ifndef LIBMONGO_SYNC_CURSOR_H
+#define LIBMONGO_SYNC_CURSOR_H 1
+
+#include <glib.h>
+#include <mongo-sync.h>
+
+G_BEGIN_DECLS
+
+/** @defgroup mongo_sync_cursor Mongo Sync Cursor API
+ *
+ * @addtogroup mongo_sync_cursor
+ * @{
+ */
+
+/** Opaque Mongo Cursor object. */
+typedef struct _mongo_sync_cursor mongo_sync_cursor;
+
+/** Create a new MongoDB Cursor.
+ *
+ * This function can be used to create a new cursor, with which one
+ * can conveniently iterate over using mongo_sync_cursor_next().
+ *
+ * The @a packet argument is supposed to be the output of - for
+ * example - mongo_sync_cmd_query().
+ *
+ * @param conn is the connection to associate with the cursor.
+ * @param ns is the namespace to use with the cursor.
+ * @param packet is a reply packet on which the cursor should be
+ * based. The packet should not be freed or touched by the application
+ * afterwards, it will be handled by the cursor functions.
+ *
+ * @returns A newly allocated cursor, or NULL on error.
+ */
+mongo_sync_cursor *mongo_sync_cursor_new (mongo_sync_connection *conn,
+ const gchar *ns,
+ mongo_packet *packet);
+
+/** Iterate a MongoDB cursor.
+ *
+ * Iterating the cursor will move its position to the next document in
+ * the result set, querying the database if so need be.
+ *
+ * Queries will be done in bulks, provided that the original query was
+ * done so aswell.
+ *
+ * @param cursor is the cursor to advance.
+ *
+ * @returns TRUE if the cursor could be advanced, FALSE otherwise. If
+ * the cursor could not be advanced due to an error, then errno will
+ * be set appropriately.
+ */
+gboolean mongo_sync_cursor_next (mongo_sync_cursor *cursor);
+
+/** Retrieve the BSON document at the cursor's position.
+ *
+ * @param cursor is the cursor to retrieve data from.
+ *
+ * @returns A newly allocated BSON object, or NULL on failure. It is
+ * the responsiblity of the caller to free the BSON object once it is
+ * no longer needed.
+ */
+bson *mongo_sync_cursor_get_data (mongo_sync_cursor *cursor);
+
+/** Free a MongoDB cursor.
+ *
+ * Freeing a MongoDB cursor involves destroying the active cursor the
+ * database is holding, and then freeing up the resources allocated
+ * for it.
+ *
+ * @param cursor is the cursor to destroy.
+ */
+void mongo_sync_cursor_free (mongo_sync_cursor *cursor);
+
+/** @} */
+
+/** @} */
+
+G_END_DECLS
+
+#endif
diff --git a/src/mongo-sync-pool.c b/src/mongo-sync-pool.c
new file mode 100644
index 0000000..52f5042
--- /dev/null
+++ b/src/mongo-sync-pool.c
@@ -0,0 +1,269 @@
+/* mongo-sync-pool.c - libmongo-client connection pool implementation
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file src/mongo-sync-pool.c
+ * MongoDB connection pool API implementation.
+ */
+
+#include <errno.h>
+#include <string.h>
+#include <glib.h>
+#include <mongo.h>
+#include "libmongo-private.h"
+
+/** @internal A connection pool object. */
+struct _mongo_sync_pool
+{
+ gint nmasters; /**< Number of master connections in the pool. */
+ gint nslaves; /**< Number of slave connections in the pool. */
+
+ GList *masters; /**< List of master connections in the pool. */
+ GList *slaves; /**< List of slave connections in the pool. */
+};
+
+static mongo_sync_pool_connection *
+_mongo_sync_pool_connect (const gchar *host, gint port, gboolean slaveok)
+{
+ mongo_sync_connection *c;
+ mongo_sync_pool_connection *conn;
+
+ c = mongo_sync_connect (host, port, slaveok);
+ if (!c)
+ return NULL;
+ conn = g_realloc (c, sizeof (mongo_sync_pool_connection));
+ conn->pool_id = 0;
+ conn->in_use = FALSE;
+
+ return conn;
+}
+
+mongo_sync_pool *
+mongo_sync_pool_new (const gchar *host,
+ gint port,
+ gint nmasters, gint nslaves)
+{
+ mongo_sync_pool *pool;
+ mongo_sync_pool_connection *conn;
+ gint i, j = 0;
+
+ if (!host || port < 0)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+ if (nmasters < 0 || nslaves < 0)
+ {
+ errno = ERANGE;
+ return NULL;
+ }
+ if (nmasters + nslaves <= 0)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ conn = _mongo_sync_pool_connect (host, port, FALSE);
+ if (!conn)
+ return FALSE;
+
+ if (!mongo_sync_cmd_is_master ((mongo_sync_connection *)conn))
+ {
+ mongo_sync_disconnect ((mongo_sync_connection *)conn);
+ errno = EPROTO;
+ return NULL;
+ }
+
+ pool = g_new0 (mongo_sync_pool, 1);
+ pool->nmasters = nmasters;
+ pool->nslaves = nslaves;
+
+ for (i = 0; i < pool->nmasters; i++)
+ {
+ mongo_sync_pool_connection *c;
+
+ c = _mongo_sync_pool_connect (host, port, FALSE);
+ c->pool_id = i;
+
+ pool->masters = g_list_append (pool->masters, c);
+ }
+
+ for (i = 0; i < pool->nslaves; i++)
+ {
+ mongo_sync_pool_connection *c;
+ gchar *shost = NULL;
+ gint sport = 27017;
+ GList *l;
+ gboolean found = FALSE;
+ gboolean need_restart = (j != 0);
+
+ /* Select the next secondary */
+ l = g_list_nth (conn->super.rs.hosts, j);
+
+ do
+ {
+ j++;
+ if (l && mongo_util_parse_addr ((gchar *)l->data, &shost, &sport))
+ {
+ if (sport != port || strcmp (host, shost) != 0)
+ {
+ found = TRUE;
+ break;
+ }
+ }
+ l = g_list_next (l);
+ if (!l && need_restart)
+ {
+ need_restart = FALSE;
+ j = 0;
+ l = g_list_nth (conn->super.rs.hosts, j);
+ }
+ }
+ while (l);
+
+ if (!found)
+ {
+ pool->nslaves = i - 1;
+ break;
+ }
+
+ /* Connect to it*/
+ c = _mongo_sync_pool_connect (shost, sport, TRUE);
+ c->pool_id = pool->nmasters + i + 1;
+
+ pool->slaves = g_list_append (pool->slaves, c);
+ }
+
+ mongo_sync_disconnect ((mongo_sync_connection *)conn);
+ return pool;
+}
+
+void
+mongo_sync_pool_free (mongo_sync_pool *pool)
+{
+ GList *l;
+
+ if (!pool)
+ return;
+
+ l = pool->masters;
+ while (l)
+ {
+ mongo_sync_disconnect ((mongo_sync_connection *)l->data);
+ l = g_list_delete_link (l, l);
+ }
+
+ l = pool->slaves;
+ while (l)
+ {
+ mongo_sync_disconnect ((mongo_sync_connection *)l->data);
+ l = g_list_delete_link (l, l);
+ }
+
+ g_free (pool);
+}
+
+mongo_sync_pool_connection *
+mongo_sync_pool_pick (mongo_sync_pool *pool,
+ gboolean want_master)
+{
+ GList *l;
+
+ if (!pool)
+ {
+ errno = ENOTCONN;
+ return NULL;
+ }
+
+ if (!want_master)
+ {
+ l = pool->slaves;
+
+ while (l)
+ {
+ mongo_sync_pool_connection *c;
+
+ c = (mongo_sync_pool_connection *)l->data;
+ if (!c->in_use)
+ {
+ c->in_use = TRUE;
+ return c;
+ }
+ l = g_list_next (l);
+ }
+ }
+
+ l = pool->masters;
+ while (l)
+ {
+ mongo_sync_pool_connection *c;
+
+ c = (mongo_sync_pool_connection *)l->data;
+ if (!c->in_use)
+ {
+ c->in_use = TRUE;
+ return c;
+ }
+ l = g_list_next (l);
+ }
+
+ errno = EAGAIN;
+ return NULL;
+}
+
+gboolean
+mongo_sync_pool_return (mongo_sync_pool *pool,
+ mongo_sync_pool_connection *conn)
+{
+ if (!pool)
+ {
+ errno = ENOTCONN;
+ return FALSE;
+ }
+ if (!conn)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ if (conn->pool_id > pool->nmasters)
+ {
+ mongo_sync_pool_connection *c;
+
+ if (conn->pool_id - pool->nmasters > pool->nslaves ||
+ pool->nslaves == 0)
+ {
+ errno = ERANGE;
+ return FALSE;
+ }
+
+ c = (mongo_sync_pool_connection *)g_list_nth_data
+ (pool->slaves, conn->pool_id - pool->nmasters - 1);
+ c->in_use = FALSE;
+ return TRUE;
+ }
+ else
+ {
+ mongo_sync_pool_connection *c;
+
+ c = (mongo_sync_pool_connection *)g_list_nth_data (pool->masters,
+ conn->pool_id);
+ c->in_use = FALSE;
+ return TRUE;
+ }
+
+ errno = ENOENT;
+ return FALSE;
+}
diff --git a/src/mongo-sync-pool.h b/src/mongo-sync-pool.h
new file mode 100644
index 0000000..8750815
--- /dev/null
+++ b/src/mongo-sync-pool.h
@@ -0,0 +1,133 @@
+/* mongo-sync-pool.h - libmongo-client connection pool API
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file src/mongo-sync-pool.h
+ * MongoDB connection pool API public header.
+ *
+ * @addtogroup mongo_sync
+ * @{
+ */
+
+#ifndef LIBMONGO_POOL_H
+#define LIBMONGO_POOL_H 1
+
+#include <mongo-sync.h>
+#include <glib.h>
+
+G_BEGIN_DECLS
+
+/** @defgroup mongo_sync_pool_api Mongo Sync Pool API
+ *
+ * These commands implement connection pooling over the mongo_sync
+ * family of commands.
+ *
+ * Once a pool is set up, one can pick and return connections at one's
+ * leisure. Picking is done in a round-robin fashion (excluding
+ * connections that have been picked but not returned yet).
+ *
+ * @addtogroup mongo_sync_pool_api
+ * @{
+ */
+
+/** Opaque synchronous connection pool object.
+ *
+ * This represents a single connection within the pool.
+ */
+typedef struct _mongo_sync_pool_connection mongo_sync_pool_connection;
+
+/** Opaque synchronous pool object.
+ *
+ * This is the entire connection pool, with all its meta-data.
+ */
+typedef struct _mongo_sync_pool mongo_sync_pool;
+
+/** Create a new synchronous connection pool.
+ *
+ * Sets up a connection pool towards a given MongoDB server, and all
+ * its secondaries (if any).
+ *
+ * @param host is the address of the server.
+ * @param port is the port to connect to.
+ * @param nmasters is the number of connections to make towards the
+ * master.
+ * @param nslaves is the number of connections to make towards the
+ * secondaries.
+ *
+ * @note Either @a nmasters or @a nslaves can be zero, but not both at
+ * the same time.
+ *
+ * @note The @a host MUST be a master, otherwise the function will
+ * return an error.
+ *
+ * @returns A newly allocated mongo_sync_pool object, or NULL on
+ * error. It is the responsibility of the caller to close and free the
+ * pool when appropriate.
+ */
+mongo_sync_pool *mongo_sync_pool_new (const gchar *host,
+ gint port,
+ gint nmasters, gint nslaves);
+
+/** Close and free a synchronous connection pool.
+ *
+ * @param pool is the pool to shut down.
+ *
+ * @note The object will be freed, and shall not be used afterwards!
+ */
+void mongo_sync_pool_free (mongo_sync_pool *pool);
+
+/** Pick a connection from a synchronous connection pool.
+ *
+ * Based on given preferences, selects a free connection object from
+ * the pool, and returns it.
+ *
+ * @param pool is the pool to select from.
+ * @param want_master flags whether the caller wants a master connection,
+ * or secondaries are acceptable too.
+ *
+ * @note For write operations, always select a master!
+ *
+ * @returns A connection object from the pool.
+ *
+ * @note The returned object can be safely casted to
+ * mongo_sync_connection, and passed to any of the mongo_sync family
+ * of commands. Do note however, that one shall not close or otherwise
+ * free a connection object returned by this function.
+ */
+mongo_sync_pool_connection *mongo_sync_pool_pick (mongo_sync_pool *pool,
+ gboolean want_master);
+
+/** Return a connection to the synchronous connection pool.
+ *
+ * Once one is not using a connection anymore, it should be returned
+ * to the pool using this function.
+ *
+ * @param pool is the pool to return to.
+ * @param conn is the connection to return.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ *
+ * @note The returned connection should not be used afterwards.
+ */
+gboolean mongo_sync_pool_return (mongo_sync_pool *pool,
+ mongo_sync_pool_connection *conn);
+
+/** @} */
+
+/** @} */
+
+G_END_DECLS
+
+#endif
diff --git a/src/mongo-sync.c b/src/mongo-sync.c
new file mode 100644
index 0000000..cd37ec5
--- /dev/null
+++ b/src/mongo-sync.c
@@ -0,0 +1,2155 @@
+/* mongo-sync.c - libmongo-client synchronous wrapper API
+ * Copyright 2011, 2012, 2013, 2014 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file src/mongo-sync.c
+ * MongoDB synchronous wrapper API implementation.
+ */
+
+#include "config.h"
+#include "mongo.h"
+#include "libmongo-private.h"
+
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+static void
+_list_free_full (GList **list)
+{
+ GList *l;
+
+ if (!list || !*list)
+ return;
+
+ l = *list;
+ while (l)
+ {
+ g_free (l->data);
+ l = g_list_delete_link (l, l);
+ }
+
+ *list = NULL;
+}
+
+static void
+_mongo_auth_prop_destroy (gchar **prop)
+{
+ size_t l;
+
+ if (!prop || !*prop)
+ return;
+
+ l = strlen (*prop);
+ memset (*prop, 0, l);
+ munlock (*prop, l);
+ g_free (*prop);
+
+ *prop = NULL;
+}
+
+static void
+_replica_set_free(replica_set *rs)
+{
+ g_free (rs->primary);
+
+ _list_free_full (&rs->hosts);
+ _list_free_full (&rs->seeds);
+
+ rs->hosts = NULL;
+ rs->seeds = NULL;
+ rs->primary = NULL;
+}
+
+static GList *
+_list_copy_full (GList *list)
+{
+ GList *new_list = NULL;
+ guint i;
+
+ for (i = 0; i < g_list_length (list); i++)
+ {
+ gchar *data = (gchar *)g_list_nth_data (list, i);
+ new_list = g_list_append (new_list, g_strdup (data));
+ }
+
+ return new_list;
+}
+
+static void
+_recovery_cache_store (mongo_sync_conn_recovery_cache *cache,
+ mongo_sync_connection *conn)
+{
+ mongo_sync_conn_recovery_cache_discard (cache);
+ cache->rs.seeds = _list_copy_full (conn->rs.seeds);
+ cache->rs.hosts = _list_copy_full (conn->rs.hosts);
+ cache->rs.primary = g_strdup (conn->rs.primary);
+
+ if (conn->auth.db)
+ {
+ cache->auth.db = g_strdup (conn->auth.db);
+ mlock (cache->auth.db, strlen (cache->auth.db));
+ _mongo_auth_prop_destroy (&conn->auth.db);
+ }
+
+ if (conn->auth.user)
+ {
+ cache->auth.user = g_strdup (conn->auth.user);
+ mlock (cache->auth.user, strlen (cache->auth.user));
+ _mongo_auth_prop_destroy (&conn->auth.user);
+ }
+
+ if (conn->auth.pw)
+ {
+ cache->auth.pw = g_strdup (conn->auth.pw);
+ mlock (cache->auth.pw, strlen (cache->auth.pw));
+ _mongo_auth_prop_destroy (&conn->auth.pw);
+ }
+}
+
+static void
+_recovery_cache_load (mongo_sync_conn_recovery_cache *cache,
+ mongo_sync_connection *conn)
+{
+ conn->rs.seeds = _list_copy_full (cache->rs.seeds);
+ conn->rs.hosts = _list_copy_full (cache->rs.hosts);
+ conn->rs.primary = g_strdup (cache->rs.primary);
+
+ _mongo_auth_prop_destroy (&conn->auth.db);
+ if (cache->auth.db)
+ {
+ conn->auth.db = g_strdup (cache->auth.db);
+ mlock (conn->auth.db, strlen (conn->auth.db));
+ }
+
+ _mongo_auth_prop_destroy (&conn->auth.user);
+ if (cache->auth.user)
+ {
+ conn->auth.user = g_strdup (cache->auth.user);
+ mlock (conn->auth.user, strlen (conn->auth.user));
+ }
+
+ _mongo_auth_prop_destroy (&conn->auth.pw);
+ if (cache->auth.pw)
+ {
+ conn->auth.pw = g_strdup (cache->auth.pw);
+ mlock (conn->auth.pw, strlen (conn->auth.pw));
+ }
+
+ conn->recovery_cache = cache;
+}
+
+static void
+_mongo_sync_conn_init (mongo_sync_connection *conn, gboolean slaveok)
+{
+ conn->slaveok = slaveok;
+ conn->safe_mode = FALSE;
+ conn->auto_reconnect = FALSE;
+ conn->last_error = NULL;
+ conn->max_insert_size = MONGO_SYNC_DEFAULT_MAX_INSERT_SIZE;
+ conn->recovery_cache = NULL;
+ conn->rs.seeds = NULL;
+ conn->rs.hosts = NULL;
+ conn->rs.primary = NULL;
+ conn->auth.db = NULL;
+ conn->auth.user = NULL;
+ conn->auth.pw = NULL;
+}
+
+static mongo_sync_connection *
+_recovery_cache_connect (mongo_sync_conn_recovery_cache *cache,
+ const gchar *address, gint port,
+ gboolean slaveok)
+{
+ mongo_sync_connection *s;
+ mongo_connection *c;
+
+ c = mongo_connect (address, port);
+ if (!c)
+ return NULL;
+ s = g_realloc (c, sizeof (mongo_sync_connection));
+
+ _mongo_sync_conn_init (s, slaveok);
+
+ if (!cache)
+ {
+ s->rs.seeds = g_list_append (NULL, g_strdup_printf ("%s:%d", address, port));
+ }
+ else
+ {
+ _recovery_cache_load (cache, s);
+ }
+
+ return s;
+}
+
+mongo_sync_connection *
+mongo_sync_connect (const gchar *address, gint port,
+ gboolean slaveok)
+{
+ return _recovery_cache_connect (NULL, address, port, slaveok);
+}
+
+mongo_sync_connection *
+mongo_sync_connect_0_1_0 (const gchar *host, gint port,
+ gboolean slaveok)
+{
+ return mongo_sync_connect (host, port, slaveok);
+}
+
+#if VERSIONED_SYMBOLS
+__asm__(".symver mongo_sync_connect_0_1_0,mongo_sync_connect@LMC_0.1.0");
+#endif
+
+gboolean
+mongo_sync_conn_seed_add (mongo_sync_connection *conn,
+ const gchar *host, gint port)
+{
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return FALSE;
+ }
+ if (!host || port < 0)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ conn->rs.seeds = g_list_append (conn->rs.seeds,
+ g_strdup_printf ("%s:%d", host, port));
+
+ return TRUE;
+}
+
+static void
+_mongo_sync_connect_replace (mongo_sync_connection *old,
+ mongo_sync_connection *new)
+{
+ if (!old || !new)
+ return;
+
+ g_free (old->rs.primary);
+ old->rs.primary = NULL;
+
+ /* Delete the host list. */
+ _list_free_full (&old->rs.hosts);
+
+ /* Free the replicaset struct in the new connection. These aren't
+ copied, in order to avoid infinite loops. */
+ _list_free_full (&new->rs.hosts);
+ _list_free_full (&new->rs.seeds);
+ g_free (new->rs.primary);
+
+ g_free (new->last_error);
+ if (old->super.fd && (old->super.fd != new->super.fd))
+ close (old->super.fd);
+
+ old->super.fd = new->super.fd;
+ old->super.request_id = -1;
+ old->slaveok = new->slaveok;
+ g_free (old->last_error);
+ old->last_error = NULL;
+
+ g_free (new);
+}
+
+mongo_sync_connection *
+mongo_sync_reconnect (mongo_sync_connection *conn,
+ gboolean force_master)
+{
+ gboolean ping = FALSE;
+ guint i;
+ mongo_sync_connection *nc;
+ gchar *host;
+ gint port;
+
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return NULL;
+ }
+
+ ping = mongo_sync_cmd_ping (conn);
+
+ if (ping)
+ {
+ if (!force_master)
+ return conn;
+ if (force_master && mongo_sync_cmd_is_master (conn))
+ return conn;
+
+ /* Force refresh the host list. */
+ mongo_sync_cmd_is_master (conn);
+ }
+
+ /* We either didn't ping, or we're not master, and have to
+ * reconnect.
+ *
+ * First, check if we have a primary, and if we can connect there.
+ */
+
+ if (conn->rs.primary)
+ {
+ if (mongo_util_parse_addr (conn->rs.primary, &host, &port))
+ {
+ nc = mongo_sync_connect (host, port, conn->slaveok);
+
+ g_free (host);
+ if (nc)
+ {
+ int e;
+
+ /* We can call ourselves here, since connect does not set
+ conn->rs, thus, we won't end up in an infinite loop. */
+ nc = mongo_sync_reconnect (nc, force_master);
+ e = errno;
+ _mongo_sync_connect_replace (conn, nc);
+ errno = e;
+ if (conn->auth.db && conn->auth.user && conn->auth.pw)
+ mongo_sync_cmd_authenticate (conn, conn->auth.db,
+ conn->auth.user,
+ conn->auth.pw);
+ return conn;
+ }
+ }
+ }
+
+ /* No primary found, or we couldn't connect, try the rest of the
+ hosts. */
+
+ for (i = 0; i < g_list_length (conn->rs.hosts); i++)
+ {
+ gchar *addr = (gchar *)g_list_nth_data (conn->rs.hosts, i);
+ int e;
+
+ if (!mongo_util_parse_addr (addr, &host, &port))
+ continue;
+
+ nc = mongo_sync_connect (host, port, conn->slaveok);
+ g_free (host);
+ if (!nc)
+ continue;
+
+ nc = mongo_sync_reconnect (nc, force_master);
+ e = errno;
+ _mongo_sync_connect_replace (conn, nc);
+ errno = e;
+
+ if (conn->auth.db && conn->auth.user && conn->auth.pw)
+ mongo_sync_cmd_authenticate (conn, conn->auth.db,
+ conn->auth.user,
+ conn->auth.pw);
+
+ return conn;
+ }
+
+ /* And if that failed too, try the seeds. */
+
+ for (i = 0; i < g_list_length (conn->rs.seeds); i++)
+ {
+ gchar *addr = (gchar *)g_list_nth_data (conn->rs.seeds, i);
+ int e;
+
+ if (!mongo_util_parse_addr (addr, &host, &port))
+ continue;
+
+ nc = mongo_sync_connect (host, port, conn->slaveok);
+
+ g_free (host);
+
+ if (!nc)
+ continue;
+
+ nc = mongo_sync_reconnect (nc, force_master);
+ e = errno;
+ _mongo_sync_connect_replace (conn, nc);
+ errno = e;
+
+ if (conn->auth.db && conn->auth.user && conn->auth.pw)
+ mongo_sync_cmd_authenticate (conn, conn->auth.db,
+ conn->auth.user,
+ conn->auth.pw);
+
+ return conn;
+ }
+
+ errno = EHOSTUNREACH;
+ return NULL;
+}
+
+void
+mongo_sync_disconnect (mongo_sync_connection *conn)
+{
+ if (!conn)
+ return;
+
+ g_free (conn->last_error);
+
+ if (conn->recovery_cache)
+ {
+ _recovery_cache_store (conn->recovery_cache, conn);
+ }
+
+ _mongo_auth_prop_destroy (&conn->auth.db);
+ _mongo_auth_prop_destroy (&conn->auth.user);
+ _mongo_auth_prop_destroy (&conn->auth.pw);
+
+ _replica_set_free (&conn->rs);
+
+ mongo_disconnect ((mongo_connection *)conn);
+}
+
+gint32
+mongo_sync_conn_get_max_insert_size (mongo_sync_connection *conn)
+{
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return -1;
+ }
+ return conn->max_insert_size;
+}
+
+gboolean
+mongo_sync_conn_set_max_insert_size (mongo_sync_connection *conn,
+ gint32 max_size)
+{
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return FALSE;
+ }
+ if (max_size <= 0)
+ {
+ errno = ERANGE;
+ return FALSE;
+ }
+
+ errno = 0;
+ conn->max_insert_size = max_size;
+ return TRUE;
+}
+
+gboolean
+mongo_sync_conn_get_safe_mode (const mongo_sync_connection *conn)
+{
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return FALSE;
+ }
+
+ errno = 0;
+ return conn->safe_mode;
+}
+
+gboolean
+mongo_sync_conn_set_safe_mode (mongo_sync_connection *conn,
+ gboolean safe_mode)
+{
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return FALSE;
+ }
+
+ errno = 0;
+ conn->safe_mode = safe_mode;
+ return TRUE;
+}
+
+gboolean
+mongo_sync_conn_get_auto_reconnect (const mongo_sync_connection *conn)
+{
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return FALSE;
+ }
+
+ errno = 0;
+ return conn->auto_reconnect;
+}
+
+gboolean
+mongo_sync_conn_set_auto_reconnect (mongo_sync_connection *conn,
+ gboolean auto_reconnect)
+{
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return FALSE;
+ }
+
+ conn->auto_reconnect = auto_reconnect;
+ return TRUE;
+}
+
+gboolean
+mongo_sync_conn_get_slaveok (const mongo_sync_connection *conn)
+{
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return FALSE;
+ }
+
+ errno = 0;
+ return conn->slaveok;
+}
+
+gboolean
+mongo_sync_conn_set_slaveok (mongo_sync_connection *conn,
+ gboolean slaveok)
+{
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return FALSE;
+ }
+
+ errno = 0;
+ conn->slaveok = slaveok;
+ return TRUE;
+}
+
+#define _SLAVE_FLAG(c) ((c->slaveok) ? MONGO_WIRE_FLAG_QUERY_SLAVE_OK : 0)
+
+static inline gboolean
+_mongo_cmd_ensure_conn (mongo_sync_connection *conn,
+ gboolean force_master)
+{
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return FALSE;
+ }
+
+ if (force_master || !conn->slaveok)
+ {
+ errno = 0;
+ if (!mongo_sync_cmd_is_master (conn))
+ {
+ if (errno == EPROTO)
+ return FALSE;
+ if (!conn->auto_reconnect)
+ {
+ errno = ENOTCONN;
+ return FALSE;
+ }
+ if (!mongo_sync_reconnect (conn, TRUE))
+ return FALSE;
+ }
+ return TRUE;
+ }
+
+ errno = 0;
+ if (!mongo_sync_cmd_ping (conn))
+ {
+ if (errno == EPROTO)
+ return FALSE;
+ if (!conn->auto_reconnect)
+ {
+ errno = ENOTCONN;
+ return FALSE;
+ }
+ if (!mongo_sync_reconnect (conn, FALSE))
+ {
+ errno = ENOTCONN;
+ return FALSE;
+ }
+ }
+ errno = 0;
+ return TRUE;
+}
+
+static inline gboolean
+_mongo_cmd_verify_slaveok (mongo_sync_connection *conn)
+{
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return FALSE;
+ }
+
+ if (conn->slaveok || !conn->safe_mode)
+ return TRUE;
+
+ errno = 0;
+ if (!mongo_sync_cmd_is_master (conn))
+ {
+ if (errno == EPROTO)
+ return FALSE;
+ if (!conn->auto_reconnect)
+ {
+ errno = ENOTCONN;
+ return FALSE;
+ }
+ if (!mongo_sync_reconnect (conn, TRUE))
+ return FALSE;
+ }
+ return TRUE;
+}
+
+static inline gboolean
+_mongo_sync_packet_send (mongo_sync_connection *conn,
+ mongo_packet *p,
+ gboolean force_master,
+ gboolean auto_reconnect)
+{
+ gboolean out = FALSE;
+
+ if (force_master)
+ if (!_mongo_cmd_ensure_conn (conn, force_master))
+ {
+ mongo_wire_packet_free (p);
+ return FALSE;
+ }
+
+ for (;;)
+ {
+ if (!mongo_packet_send ((mongo_connection *)conn, p))
+ {
+ int e = errno;
+
+ if (!auto_reconnect || (conn && !conn->auto_reconnect))
+ {
+ mongo_wire_packet_free (p);
+ errno = e;
+ return FALSE;
+ }
+
+ if (out || !mongo_sync_reconnect (conn, force_master))
+ {
+ mongo_wire_packet_free (p);
+ errno = e;
+ return FALSE;
+ }
+
+ out = TRUE;
+ continue;
+ }
+ break;
+ }
+ mongo_wire_packet_free (p);
+ return TRUE;
+}
+
+static inline mongo_packet *
+_mongo_sync_packet_recv (mongo_sync_connection *conn, gint32 rid, gint32 flags)
+{
+ mongo_packet *p;
+ mongo_packet_header h;
+ mongo_reply_packet_header rh;
+
+ p = mongo_packet_recv ((mongo_connection *)conn);
+ if (!p)
+ return NULL;
+
+ if (!mongo_wire_packet_get_header_raw (p, &h))
+ {
+ int e = errno;
+
+ mongo_wire_packet_free (p);
+ errno = e;
+ return NULL;
+ }
+
+ if (h.resp_to != rid)
+ {
+ mongo_wire_packet_free (p);
+ errno = EPROTO;
+ return NULL;
+ }
+
+ if (!mongo_wire_reply_packet_get_header (p, &rh))
+ {
+ int e = errno;
+
+ mongo_wire_packet_free (p);
+ errno = e;
+ return NULL;
+ }
+
+ if (rh.flags & flags)
+ {
+ mongo_wire_packet_free (p);
+ errno = EPROTO;
+ return NULL;
+ }
+
+ if (rh.returned == 0)
+ {
+ mongo_wire_packet_free (p);
+ errno = ENOENT;
+ return NULL;
+ }
+
+ return p;
+}
+
+static gboolean
+_mongo_sync_check_ok (bson *b)
+{
+ bson_cursor *c;
+ gdouble d;
+
+ c = bson_find (b, "ok");
+ if (!c)
+ {
+ errno = ENOENT;
+ return FALSE;
+ }
+
+ if (!bson_cursor_get_double (c, &d))
+ {
+ bson_cursor_free (c);
+ errno = EINVAL;
+ return FALSE;
+ }
+ bson_cursor_free (c);
+ errno = (d == 1) ? 0 : EPROTO;
+ return (d == 1);
+}
+
+static gboolean
+_mongo_sync_get_error (const bson *rep, gchar **error)
+{
+ bson_cursor *c;
+
+ if (!error)
+ return FALSE;
+
+ c = bson_find (rep, "err");
+ if (!c)
+ {
+ c = bson_find (rep, "errmsg");
+ if (!c)
+ {
+ errno = EPROTO;
+ return FALSE;
+ }
+ }
+ if (bson_cursor_type (c) == BSON_TYPE_NONE ||
+ bson_cursor_type (c) == BSON_TYPE_NULL)
+ {
+ *error = NULL;
+ bson_cursor_free (c);
+ return TRUE;
+ }
+ else if (bson_cursor_type (c) == BSON_TYPE_STRING)
+ {
+ const gchar *err;
+
+ bson_cursor_get_string (c, &err);
+ *error = g_strdup (err);
+ bson_cursor_free (c);
+ return TRUE;
+ }
+ errno = EPROTO;
+ return FALSE;
+}
+
+static mongo_packet *
+_mongo_sync_packet_check_error (mongo_sync_connection *conn, mongo_packet *p,
+ gboolean check_ok)
+{
+ bson *b;
+ gboolean error;
+
+ if (!p)
+ return NULL;
+
+ if (!mongo_wire_reply_packet_get_nth_document (p, 1, &b))
+ {
+ mongo_wire_packet_free (p);
+ errno = EPROTO;
+ return NULL;
+ }
+ bson_finish (b);
+
+ if (check_ok)
+ {
+ if (!_mongo_sync_check_ok (b))
+ {
+ int e = errno;
+
+ g_free (conn->last_error);
+ conn->last_error = NULL;
+ _mongo_sync_get_error (b, &conn->last_error);
+ bson_free (b);
+ mongo_wire_packet_free (p);
+ errno = e;
+ return NULL;
+ }
+ bson_free (b);
+ return p;
+ }
+
+ g_free (conn->last_error);
+ conn->last_error = NULL;
+ error = _mongo_sync_get_error (b, &conn->last_error);
+ bson_free (b);
+
+ if (error)
+ {
+ mongo_wire_packet_free (p);
+ return NULL;
+ }
+ return p;
+}
+
+static inline gboolean
+_mongo_sync_cmd_verify_result (mongo_sync_connection *conn,
+ const gchar *ns)
+{
+ gchar *error = NULL, *db, *tmp;
+ gboolean res;
+
+ if (!conn || !ns)
+ return FALSE;
+ if (!conn->safe_mode)
+ return TRUE;
+
+ tmp = g_strstr_len (ns, -1, ".");
+ if (tmp)
+ db = g_strndup (ns, tmp - ns);
+ else
+ db = g_strdup (ns);
+
+ res = mongo_sync_cmd_get_last_error (conn, db, &error);
+ g_free (db);
+ res = res && ((error) ? FALSE : TRUE);
+ g_free (error);
+
+ return res;
+}
+
+static void
+_set_last_error (mongo_sync_connection *conn, int err)
+{
+ g_free (conn->last_error);
+ conn->last_error = g_strdup(strerror(err));
+}
+
+gboolean
+mongo_sync_cmd_update (mongo_sync_connection *conn,
+ const gchar *ns,
+ gint32 flags, const bson *selector,
+ const bson *update)
+{
+ mongo_packet *p;
+ gint32 rid;
+
+ rid = mongo_connection_get_requestid ((mongo_connection *)conn) + 1;
+
+ p = mongo_wire_cmd_update (rid, ns, flags, selector, update);
+ if (!p)
+ return FALSE;
+
+ if (!_mongo_sync_packet_send (conn, p, TRUE, TRUE))
+ return FALSE;
+
+ return _mongo_sync_cmd_verify_result (conn, ns);
+}
+
+gboolean
+mongo_sync_cmd_insert_n (mongo_sync_connection *conn,
+ const gchar *ns, gint32 n,
+ const bson **docs)
+{
+ mongo_packet *p;
+ gint32 rid;
+ gint32 pos = 0, c, i = 0;
+ gint32 size = 0;
+
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return FALSE;
+ }
+
+ if (!ns || !docs)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+ if (n <= 0)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ for (i = 0; i < n; i++)
+ {
+ if (bson_size (docs[i]) >= conn->max_insert_size)
+ {
+ errno = EMSGSIZE;
+ return FALSE;
+ }
+ }
+
+ do
+ {
+ i = pos;
+ c = 0;
+
+ while (i < n && size < conn->max_insert_size)
+ {
+ size += bson_size (docs[i++]);
+ c++;
+ }
+ size = 0;
+ if (i < n)
+ c--;
+
+ rid = mongo_connection_get_requestid ((mongo_connection *)conn) + 1;
+
+ p = mongo_wire_cmd_insert_n (rid, ns, c, &docs[pos]);
+ if (!p)
+ return FALSE;
+
+ if (!_mongo_sync_packet_send (conn, p, TRUE, TRUE))
+ {
+ _set_last_error (conn, errno);
+ return FALSE;
+ }
+
+ if (!_mongo_sync_cmd_verify_result (conn, ns))
+ return FALSE;
+
+ pos += c;
+ } while (pos < n);
+
+ return TRUE;
+}
+
+gboolean
+mongo_sync_cmd_insert (mongo_sync_connection *conn,
+ const gchar *ns, ...)
+{
+ gboolean b;
+ bson **docs, *d;
+ gint32 n = 0;
+ va_list ap;
+
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return FALSE;
+ }
+
+ if (!ns)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ docs = (bson **)g_new0 (bson *, 1);
+
+ va_start (ap, ns);
+ while ((d = (bson *)va_arg (ap, gpointer)))
+ {
+ if (bson_size (d) < 0)
+ {
+ g_free (docs);
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ docs = (bson **)g_renew (bson *, docs, n + 1);
+ docs[n++] = d;
+ }
+ va_end (ap);
+
+ b = mongo_sync_cmd_insert_n (conn, ns, n, (const bson **)docs);
+ g_free (docs);
+ return b;
+}
+
+mongo_packet *
+mongo_sync_cmd_query (mongo_sync_connection *conn,
+ const gchar *ns, gint32 flags,
+ gint32 skip, gint32 ret,
+ const bson *query, const bson *sel)
+{
+ mongo_packet *p;
+ gint32 rid;
+
+ if (!_mongo_cmd_verify_slaveok (conn))
+ return FALSE;
+
+ rid = mongo_connection_get_requestid ((mongo_connection *)conn) + 1;
+
+ p = mongo_wire_cmd_query (rid, ns, flags | _SLAVE_FLAG (conn),
+ skip, ret, query, sel);
+ if (!p)
+ return NULL;
+
+ if (!_mongo_sync_packet_send (conn, p,
+ !((conn && conn->slaveok) ||
+ (flags & MONGO_WIRE_FLAG_QUERY_SLAVE_OK)),
+ TRUE))
+ return NULL;
+
+ p = _mongo_sync_packet_recv (conn, rid, MONGO_REPLY_FLAG_QUERY_FAIL);
+ return _mongo_sync_packet_check_error (conn, p, FALSE);
+}
+
+mongo_packet *
+mongo_sync_cmd_get_more (mongo_sync_connection *conn,
+ const gchar *ns,
+ gint32 ret, gint64 cursor_id)
+{
+ mongo_packet *p;
+ gint32 rid;
+
+ if (!_mongo_cmd_verify_slaveok (conn))
+ return FALSE;
+
+ rid = mongo_connection_get_requestid ((mongo_connection *)conn) + 1;
+
+ p = mongo_wire_cmd_get_more (rid, ns, ret, cursor_id);
+ if (!p)
+ return NULL;
+
+ if (!_mongo_sync_packet_send (conn, p, FALSE, TRUE))
+ return FALSE;
+
+ p = _mongo_sync_packet_recv (conn, rid, MONGO_REPLY_FLAG_NO_CURSOR);
+ return _mongo_sync_packet_check_error (conn, p, FALSE);
+}
+
+gboolean
+mongo_sync_cmd_delete (mongo_sync_connection *conn, const gchar *ns,
+ gint32 flags, const bson *sel)
+{
+ mongo_packet *p;
+ gint32 rid;
+
+ rid = mongo_connection_get_requestid ((mongo_connection *)conn) + 1;
+
+ p = mongo_wire_cmd_delete (rid, ns, flags, sel);
+ if (!p)
+ return FALSE;
+
+ return _mongo_sync_packet_send (conn, p, TRUE, TRUE);
+}
+
+gboolean
+mongo_sync_cmd_kill_cursors (mongo_sync_connection *conn,
+ gint32 n, ...)
+{
+ mongo_packet *p;
+ gint32 rid;
+ va_list ap;
+
+ if (n <= 0)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ rid = mongo_connection_get_requestid ((mongo_connection *)conn) + 1;
+
+ va_start (ap, n);
+ p = mongo_wire_cmd_kill_cursors_va (rid, n, ap);
+ if (!p)
+ {
+ int e = errno;
+
+ va_end (ap);
+ errno = e;
+ return FALSE;
+ }
+ va_end (ap);
+
+ return _mongo_sync_packet_send (conn, p, FALSE, TRUE);
+}
+
+static mongo_packet *
+_mongo_sync_cmd_custom (mongo_sync_connection *conn,
+ const gchar *db,
+ const bson *command,
+ gboolean check_conn,
+ gboolean force_master)
+{
+ mongo_packet *p;
+ gint32 rid;
+
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return NULL;
+ }
+
+ rid = mongo_connection_get_requestid ((mongo_connection *)conn) + 1;
+
+ p = mongo_wire_cmd_custom (rid, db, _SLAVE_FLAG (conn), command);
+ if (!p)
+ return NULL;
+
+ if (!_mongo_sync_packet_send (conn, p, force_master, check_conn))
+ return NULL;
+
+ p = _mongo_sync_packet_recv (conn, rid, MONGO_REPLY_FLAG_QUERY_FAIL);
+ return _mongo_sync_packet_check_error (conn, p, TRUE);
+}
+
+mongo_packet *
+mongo_sync_cmd_custom (mongo_sync_connection *conn,
+ const gchar *db,
+ const bson *command)
+{
+ return _mongo_sync_cmd_custom (conn, db, command, TRUE, FALSE);
+}
+
+gdouble
+mongo_sync_cmd_count (mongo_sync_connection *conn,
+ const gchar *db, const gchar *coll,
+ const bson *query)
+{
+ mongo_packet *p;
+ bson *cmd;
+ bson_cursor *c;
+ gdouble d;
+
+ cmd = bson_new_sized (bson_size (query) + 32);
+ bson_append_string (cmd, "count", coll, -1);
+ if (query)
+ bson_append_document (cmd, "query", query);
+ bson_finish (cmd);
+
+ p = _mongo_sync_cmd_custom (conn, db, cmd, TRUE, FALSE);
+ if (!p)
+ {
+ int e = errno;
+
+ bson_free (cmd);
+ errno = e;
+ return -1;
+ }
+ bson_free (cmd);
+
+ if (!mongo_wire_reply_packet_get_nth_document (p, 1, &cmd))
+ {
+ int e = errno;
+
+ mongo_wire_packet_free (p);
+ errno = e;
+ return -1;
+ }
+ mongo_wire_packet_free (p);
+ bson_finish (cmd);
+
+ c = bson_find (cmd, "n");
+ if (!c)
+ {
+ bson_free (cmd);
+ errno = ENOENT;
+ return -1;
+ }
+ if (!bson_cursor_get_double (c, &d))
+ {
+ bson_free (cmd);
+ bson_cursor_free (c);
+ errno = EINVAL;
+ return -1;
+ }
+ bson_cursor_free (c);
+ bson_free (cmd);
+
+ return d;
+}
+
+gboolean
+mongo_sync_cmd_create (mongo_sync_connection *conn,
+ const gchar *db, const gchar *coll,
+ gint flags, ...)
+{
+ mongo_packet *p;
+ bson *cmd;
+
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return FALSE;
+ }
+ if (!db || !coll)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ cmd = bson_new_sized (128);
+ bson_append_string (cmd, "create", coll, -1);
+ if (flags & MONGO_COLLECTION_AUTO_INDEX_ID)
+ bson_append_boolean (cmd, "autoIndexId", TRUE);
+ if (flags & MONGO_COLLECTION_CAPPED ||
+ flags & MONGO_COLLECTION_CAPPED_MAX ||
+ flags & MONGO_COLLECTION_SIZED)
+ {
+ va_list ap;
+ gint64 i;
+
+ if (flags & MONGO_COLLECTION_CAPPED ||
+ flags & MONGO_COLLECTION_CAPPED_MAX)
+ bson_append_boolean (cmd, "capped", TRUE);
+
+ va_start (ap, flags);
+ i = (gint64)va_arg (ap, gint64);
+ if (i <= 0)
+ {
+ bson_free (cmd);
+ errno = ERANGE;
+ return FALSE;
+ }
+ bson_append_int64 (cmd, "size", i);
+
+ if (flags & MONGO_COLLECTION_CAPPED_MAX)
+ {
+ i = (gint64)va_arg (ap, gint64);
+ if (i <= 0)
+ {
+ bson_free (cmd);
+ errno = ERANGE;
+ return FALSE;
+ }
+ bson_append_int64 (cmd, "max", i);
+ }
+ va_end (ap);
+ }
+ bson_finish (cmd);
+
+ p = _mongo_sync_cmd_custom (conn, db, cmd, TRUE, TRUE);
+ if (!p)
+ {
+ int e = errno;
+
+ bson_free (cmd);
+ errno = e;
+ return FALSE;
+ }
+ bson_free (cmd);
+ mongo_wire_packet_free (p);
+
+ return TRUE;
+}
+
+bson *
+mongo_sync_cmd_exists (mongo_sync_connection *conn,
+ const gchar *db, const gchar *coll)
+{
+ bson *cmd, *r;
+ mongo_packet *p;
+ gchar *ns, *sys;
+ gint32 rid;
+
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return NULL;
+ }
+ if (!db || !coll)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ rid = mongo_connection_get_requestid ((mongo_connection *)conn) + 1;
+
+ ns = g_strconcat (db, ".", coll, NULL);
+ cmd = bson_new_sized (128);
+ bson_append_string (cmd, "name", ns, -1);
+ bson_finish (cmd);
+ g_free (ns);
+
+ sys = g_strconcat (db, ".system.namespaces", NULL);
+
+ p = mongo_wire_cmd_query (rid, sys, _SLAVE_FLAG (conn), 0, 1, cmd, NULL);
+ if (!p)
+ {
+ int e = errno;
+
+ bson_free (cmd);
+ g_free (sys);
+
+ errno = e;
+ return NULL;
+ }
+ g_free (sys);
+ bson_free (cmd);
+
+ if (!_mongo_sync_packet_send (conn, p, !conn->slaveok, TRUE))
+ return NULL;
+
+ p = _mongo_sync_packet_recv (conn, rid, MONGO_REPLY_FLAG_QUERY_FAIL);
+ if (!p)
+ return NULL;
+
+ p = _mongo_sync_packet_check_error (conn, p, FALSE);
+ if (!p)
+ return NULL;
+
+ if (!mongo_wire_reply_packet_get_nth_document (p, 1, &r))
+ {
+ int e = errno;
+
+ mongo_wire_packet_free (p);
+ errno = e;
+ return NULL;
+ }
+ mongo_wire_packet_free (p);
+ bson_finish (r);
+
+ return r;
+}
+
+gboolean
+mongo_sync_cmd_drop (mongo_sync_connection *conn,
+ const gchar *db, const gchar *coll)
+{
+ mongo_packet *p;
+ bson *cmd;
+
+ cmd = bson_new_sized (64);
+ bson_append_string (cmd, "drop", coll, -1);
+ bson_finish (cmd);
+
+ p = _mongo_sync_cmd_custom (conn, db, cmd, TRUE, TRUE);
+ if (!p)
+ {
+ int e = errno;
+
+ bson_free (cmd);
+ errno = e;
+ return FALSE;
+ }
+ bson_free (cmd);
+ mongo_wire_packet_free (p);
+
+ return TRUE;
+}
+
+gboolean
+mongo_sync_cmd_get_last_error_full (mongo_sync_connection *conn,
+ const gchar *db, bson **error)
+{
+ mongo_packet *p;
+ bson *cmd;
+
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return FALSE;
+ }
+ if (!error)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ cmd = bson_new_sized (64);
+ bson_append_int32 (cmd, "getlasterror", 1);
+ bson_finish (cmd);
+
+ p = _mongo_sync_cmd_custom (conn, db, cmd, FALSE, FALSE);
+ if (!p)
+ {
+ int e = errno;
+
+ bson_free (cmd);
+ errno = e;
+ _set_last_error (conn, e);
+ return FALSE;
+ }
+ bson_free (cmd);
+
+ if (!mongo_wire_reply_packet_get_nth_document (p, 1, error))
+ {
+ int e = errno;
+
+ mongo_wire_packet_free (p);
+ errno = e;
+ _set_last_error (conn, e);
+ return FALSE;
+ }
+ mongo_wire_packet_free (p);
+ bson_finish (*error);
+
+ return TRUE;
+}
+
+gboolean
+mongo_sync_cmd_get_last_error (mongo_sync_connection *conn,
+ const gchar *db, gchar **error)
+{
+ bson *err_bson;
+
+ if (!error)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ if (!mongo_sync_cmd_get_last_error_full (conn, db, &err_bson))
+ return FALSE;
+
+ if (!_mongo_sync_get_error (err_bson, error))
+ {
+ int e = errno;
+
+ bson_free (err_bson);
+ errno = e;
+ _set_last_error (conn, e);
+ return FALSE;
+ }
+ bson_free (err_bson);
+
+ if (*error == NULL)
+ *error = g_strdup (conn->last_error);
+ else
+ {
+ g_free (conn->last_error);
+ conn->last_error = g_strdup(*error);
+ }
+
+ return TRUE;
+}
+
+gboolean
+mongo_sync_cmd_reset_error (mongo_sync_connection *conn,
+ const gchar *db)
+{
+ mongo_packet *p;
+ bson *cmd;
+
+ if (conn)
+ {
+ g_free (conn->last_error);
+ conn->last_error = NULL;
+ }
+
+ cmd = bson_new_sized (32);
+ bson_append_int32 (cmd, "reseterror", 1);
+ bson_finish (cmd);
+
+ p = _mongo_sync_cmd_custom (conn, db, cmd, FALSE, FALSE);
+ if (!p)
+ {
+ int e = errno;
+
+ bson_free (cmd);
+ errno = e;
+ return FALSE;
+ }
+ bson_free (cmd);
+ mongo_wire_packet_free (p);
+ return TRUE;
+}
+
+gboolean
+mongo_sync_cmd_is_master (mongo_sync_connection *conn)
+{
+ bson *cmd, *res, *hosts;
+ mongo_packet *p;
+ bson_cursor *c;
+ gboolean b;
+
+ cmd = bson_new_sized (32);
+ bson_append_int32 (cmd, "ismaster", 1);
+ bson_finish (cmd);
+
+ p = _mongo_sync_cmd_custom (conn, "system", cmd, FALSE, FALSE);
+ if (!p)
+ {
+ int e = errno;
+
+ bson_free (cmd);
+ errno = e;
+ return FALSE;
+ }
+ bson_free (cmd);
+
+ if (!mongo_wire_reply_packet_get_nth_document (p, 1, &res))
+ {
+ int e = errno;
+
+ mongo_wire_packet_free (p);
+ errno = e;
+ return FALSE;
+ }
+ mongo_wire_packet_free (p);
+ bson_finish (res);
+
+ c = bson_find (res, "ismaster");
+ if (!bson_cursor_get_boolean (c, &b))
+ {
+ bson_cursor_free (c);
+ bson_free (res);
+ errno = EPROTO;
+ return FALSE;
+ }
+ bson_cursor_free (c);
+
+ if (!b)
+ {
+ const gchar *s;
+
+ /* We're not the master, so we should have a 'primary' key in
+ the response. */
+ c = bson_find (res, "primary");
+ if (bson_cursor_get_string (c, &s))
+ {
+ g_free (conn->rs.primary);
+ conn->rs.primary = g_strdup (s);
+ }
+ bson_cursor_free (c);
+ }
+
+ /* Find all the members of the set, and cache them. */
+ c = bson_find (res, "hosts");
+ if (!c)
+ {
+ bson_free (res);
+ errno = 0;
+ return b;
+ }
+
+ if (!bson_cursor_get_array (c, &hosts))
+ {
+ bson_cursor_free (c);
+ bson_free (res);
+ errno = 0;
+ return b;
+ }
+ bson_cursor_free (c);
+ bson_finish (hosts);
+
+ /* Delete the old host list. */
+ _list_free_full (&conn->rs.hosts);
+
+ c = bson_cursor_new (hosts);
+ while (bson_cursor_next (c))
+ {
+ const gchar *s;
+
+ if (bson_cursor_get_string (c, &s))
+ conn->rs.hosts = g_list_append (conn->rs.hosts, g_strdup (s));
+ }
+ bson_cursor_free (c);
+ bson_free (hosts);
+
+ c = bson_find (res, "passives");
+ if (bson_cursor_get_array (c, &hosts))
+ {
+ bson_cursor_free (c);
+ bson_finish (hosts);
+
+ c = bson_cursor_new (hosts);
+ while (bson_cursor_next (c))
+ {
+ const gchar *s;
+
+ if (bson_cursor_get_string (c, &s))
+ conn->rs.hosts = g_list_append (conn->rs.hosts, g_strdup (s));
+ }
+ bson_free (hosts);
+ }
+ bson_cursor_free (c);
+
+ bson_free (res);
+ errno = 0;
+ return b;
+}
+
+gboolean
+mongo_sync_cmd_ping (mongo_sync_connection *conn)
+{
+ bson *cmd;
+ mongo_packet *p;
+
+ cmd = bson_new_sized (32);
+ bson_append_int32 (cmd, "ping", 1);
+ bson_finish (cmd);
+
+ p = _mongo_sync_cmd_custom (conn, "system", cmd, FALSE, FALSE);
+ if (!p)
+ {
+ int e = errno;
+
+ bson_free (cmd);
+ errno = e;
+ return FALSE;
+ }
+ bson_free (cmd);
+ mongo_wire_packet_free (p);
+
+ errno = 0;
+ return TRUE;
+}
+
+static gchar *
+_pass_digest (const gchar *user, const gchar *pw)
+{
+ GChecksum *chk;
+ gchar *digest;
+
+ chk = g_checksum_new (G_CHECKSUM_MD5);
+ g_checksum_update (chk, (const guchar *)user, -1);
+ g_checksum_update (chk, (const guchar *)":mongo:", 7);
+ g_checksum_update (chk, (const guchar *)pw, -1);
+ digest = g_strdup (g_checksum_get_string (chk));
+ g_checksum_free (chk);
+
+ return digest;
+}
+
+gboolean
+mongo_sync_cmd_user_add_with_roles (mongo_sync_connection *conn,
+ const gchar *db,
+ const gchar *user,
+ const gchar *pw,
+ const bson *roles)
+{
+ bson *s, *u;
+ gchar *userns;
+ gchar *hex_digest;
+
+ if (!db || !user || !pw)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ userns = g_strconcat (db, ".system.users", NULL);
+
+ hex_digest = _pass_digest (user, pw);
+
+ s = bson_build (BSON_TYPE_STRING, "user", user, -1,
+ BSON_TYPE_NONE);
+ bson_finish (s);
+ u = bson_build_full (BSON_TYPE_DOCUMENT, "$set", TRUE,
+ bson_build (BSON_TYPE_STRING, "pwd", hex_digest, -1,
+ BSON_TYPE_NONE),
+ BSON_TYPE_NONE);
+ if (roles)
+ bson_append_array (u, "roles", roles);
+ bson_finish (u);
+ g_free (hex_digest);
+
+ if (!mongo_sync_cmd_update (conn, userns, MONGO_WIRE_FLAG_UPDATE_UPSERT,
+ s, u))
+ {
+ int e = errno;
+
+ bson_free (s);
+ bson_free (u);
+ g_free (userns);
+ errno = e;
+ return FALSE;
+ }
+ bson_free (s);
+ bson_free (u);
+ g_free (userns);
+
+ return TRUE;
+}
+
+gboolean
+mongo_sync_cmd_user_add (mongo_sync_connection *conn,
+ const gchar *db,
+ const gchar *user,
+ const gchar *pw)
+{
+ return mongo_sync_cmd_user_add_with_roles (conn, db, user, pw, NULL);
+}
+
+gboolean
+mongo_sync_cmd_user_remove (mongo_sync_connection *conn,
+ const gchar *db,
+ const gchar *user)
+{
+ bson *s;
+ gchar *userns;
+
+ if (!db || !user)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ userns = g_strconcat (db, ".system.users", NULL);
+
+ s = bson_build (BSON_TYPE_STRING, "user", user, -1,
+ BSON_TYPE_NONE);
+ bson_finish (s);
+
+ if (!mongo_sync_cmd_delete (conn, userns, 0, s))
+ {
+ int e = errno;
+
+ bson_free (s);
+ g_free (userns);
+ errno = e;
+ return FALSE;
+ }
+ bson_free (s);
+ g_free (userns);
+
+ return TRUE;
+}
+
+gboolean
+mongo_sync_cmd_authenticate (mongo_sync_connection *conn,
+ const gchar *db,
+ const gchar *user,
+ const gchar *pw)
+{
+ bson *b;
+ mongo_packet *p;
+ const gchar *s;
+ gchar *nonce;
+ bson_cursor *c;
+
+ GChecksum *chk;
+ gchar *hex_digest;
+ const gchar *digest;
+ gchar *ndb, *nuser, *npw;
+
+ if (!db || !user || !pw)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ /* Obtain nonce */
+ b = bson_new_sized (32);
+ bson_append_int32 (b, "getnonce", 1);
+ bson_finish (b);
+
+ p = mongo_sync_cmd_custom (conn, db, b);
+ if (!p)
+ {
+ int e = errno;
+
+ bson_free (b);
+ errno = e;
+ return FALSE;
+ }
+ bson_free (b);
+
+ if (!mongo_wire_reply_packet_get_nth_document (p, 1, &b))
+ {
+ int e = errno;
+
+ mongo_wire_packet_free (p);
+ errno = e;
+ return FALSE;
+ }
+ mongo_wire_packet_free (p);
+ bson_finish (b);
+
+ c = bson_find (b, "nonce");
+ if (!c)
+ {
+ bson_free (b);
+ errno = EPROTO;
+ return FALSE;
+ }
+ if (!bson_cursor_get_string (c, &s))
+ {
+ bson_free (b);
+ errno = EPROTO;
+ return FALSE;
+ }
+ nonce = g_strdup (s);
+ bson_cursor_free (c);
+ bson_free (b);
+
+ /* Generate the password digest. */
+ hex_digest = _pass_digest (user, pw);
+
+ /* Generate the key */
+ chk = g_checksum_new (G_CHECKSUM_MD5);
+ g_checksum_update (chk, (const guchar *)nonce, -1);
+ g_checksum_update (chk, (const guchar *)user, -1);
+ g_checksum_update (chk, (const guchar *)hex_digest, -1);
+ g_free (hex_digest);
+
+ digest = g_checksum_get_string (chk);
+
+ /* Run the authenticate command. */
+ b = bson_build (BSON_TYPE_INT32, "authenticate", 1,
+ BSON_TYPE_STRING, "user", user, -1,
+ BSON_TYPE_STRING, "nonce", nonce, -1,
+ BSON_TYPE_STRING, "key", digest, -1,
+ BSON_TYPE_NONE);
+ bson_finish (b);
+ g_free (nonce);
+ g_checksum_free (chk);
+
+ p = mongo_sync_cmd_custom (conn, db, b);
+ if (!p)
+ {
+ int e = errno;
+
+ bson_free (b);
+ errno = e;
+ return FALSE;
+ }
+ bson_free (b);
+ mongo_wire_packet_free (p);
+
+ ndb = g_strdup (db);
+ _mongo_auth_prop_destroy (&conn->auth.db);
+ conn->auth.db = ndb;
+ mlock (conn->auth.db, strlen (ndb));
+
+ nuser = g_strdup (user);
+ _mongo_auth_prop_destroy (&conn->auth.user);
+ conn->auth.user = nuser;
+ mlock (conn->auth.user, strlen (nuser));
+
+ npw = g_strdup (pw);
+ _mongo_auth_prop_destroy (&conn->auth.pw);
+ conn->auth.pw = npw;
+ mlock (conn->auth.pw, strlen (npw));
+
+ return TRUE;
+}
+
+static GString *
+_mongo_index_gen_name (const bson *key)
+{
+ bson_cursor *c;
+ GString *name;
+
+ name = g_string_new ("_");
+ c = bson_cursor_new (key);
+ while (bson_cursor_next (c))
+ {
+ gint64 v = 0;
+
+ g_string_append (name, bson_cursor_key (c));
+ g_string_append_c (name, '_');
+
+ switch (bson_cursor_type (c))
+ {
+ case BSON_TYPE_BOOLEAN:
+ {
+ gboolean vb;
+
+ bson_cursor_get_boolean (c, &vb);
+ v = vb;
+ break;
+ }
+ case BSON_TYPE_INT32:
+ {
+ gint32 vi;
+
+ bson_cursor_get_int32 (c, &vi);
+ v = vi;
+ break;
+ }
+ case BSON_TYPE_INT64:
+ {
+ gint64 vl;
+
+ bson_cursor_get_int64 (c, &vl);
+ v = vl;
+ break;
+ }
+ case BSON_TYPE_DOUBLE:
+ {
+ gdouble vd;
+
+ bson_cursor_get_double (c, &vd);
+ v = (gint64)vd;
+ break;
+ }
+ default:
+ bson_cursor_free (c);
+ g_string_free (name, TRUE);
+ return NULL;
+ }
+ if (v != 0)
+ g_string_append_printf (name, "%" G_GINT64_FORMAT "_", v);
+ }
+ bson_cursor_free (c);
+
+ return name;
+}
+
+gboolean
+mongo_sync_cmd_index_create (mongo_sync_connection *conn,
+ const gchar *ns,
+ const bson *key,
+ gint options)
+{
+ GString *name;
+ gchar *idxns, *t;
+ bson *cmd;
+
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return FALSE;
+ }
+ if (!ns || !key)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+ if (strchr (ns, '.') == NULL)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ name = _mongo_index_gen_name (key);
+ if (!name)
+ {
+ errno = ENOTSUP;
+ return FALSE;
+ }
+
+ cmd = bson_new_sized (bson_size (key) + name->len + 128);
+ bson_append_document (cmd, "key", key);
+ bson_append_string (cmd, "ns", ns, -1);
+ bson_append_string (cmd, "name", name->str, name->len);
+ if (options & MONGO_INDEX_UNIQUE)
+ bson_append_boolean (cmd, "unique", TRUE);
+ if (options & MONGO_INDEX_DROP_DUPS)
+ bson_append_boolean (cmd, "dropDups", TRUE);
+ if (options & MONGO_INDEX_BACKGROUND)
+ bson_append_boolean (cmd, "background", TRUE);
+ if (options & MONGO_INDEX_SPARSE)
+ bson_append_boolean (cmd, "sparse", TRUE);
+ bson_finish (cmd);
+ g_string_free (name, TRUE);
+
+ t = g_strdup (ns);
+ *(strchr (t, '.')) = '\0';
+ idxns = g_strconcat (t, ".system.indexes", NULL);
+ g_free (t);
+
+ if (!mongo_sync_cmd_insert_n (conn, idxns, 1, (const bson **)&cmd))
+ {
+ int e = errno;
+
+ bson_free (cmd);
+ g_free (idxns);
+ errno = e;
+ return FALSE;
+ }
+ bson_free (cmd);
+ g_free (idxns);
+
+ return TRUE;
+}
+
+static gboolean
+_mongo_sync_cmd_index_drop (mongo_sync_connection *conn,
+ const gchar *full_ns,
+ const gchar *index_name)
+{
+ bson *cmd;
+ gchar *db, *ns;
+ mongo_packet *p;
+
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return FALSE;
+ }
+ if (!full_ns || !index_name)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+ ns = strchr (full_ns, '.');
+ if (ns == NULL)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+ ns++;
+
+ cmd = bson_new_sized (256 + strlen (index_name));
+ bson_append_string (cmd, "deleteIndexes", ns, -1);
+ bson_append_string (cmd, "index", index_name, -1);
+ bson_finish (cmd);
+
+ db = g_strndup (full_ns, ns - full_ns - 1);
+ p = mongo_sync_cmd_custom (conn, db, cmd);
+ if (!p)
+ {
+ int e = errno;
+
+ bson_free (cmd);
+ g_free (db);
+ errno = e;
+ return FALSE;
+ }
+ mongo_wire_packet_free (p);
+ g_free (db);
+ bson_free (cmd);
+
+ return TRUE;
+}
+
+gboolean
+mongo_sync_cmd_index_drop (mongo_sync_connection *conn,
+ const gchar *ns,
+ const bson *key)
+{
+ GString *name;
+ gboolean b;
+
+ if (!key)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ name = _mongo_index_gen_name (key);
+
+ b = _mongo_sync_cmd_index_drop (conn, ns, name->str);
+ g_string_free (name, TRUE);
+ return b;
+}
+
+gboolean
+mongo_sync_cmd_index_drop_all (mongo_sync_connection *conn,
+ const gchar *ns)
+{
+ return _mongo_sync_cmd_index_drop (conn, ns, "*");
+}
+
+mongo_sync_conn_recovery_cache *
+mongo_sync_conn_recovery_cache_new (void)
+{
+ mongo_sync_conn_recovery_cache *cache;
+
+ cache = g_new0 (mongo_sync_conn_recovery_cache, 1);
+
+ return cache;
+}
+
+void
+mongo_sync_conn_recovery_cache_free (mongo_sync_conn_recovery_cache *cache)
+{
+ mongo_sync_conn_recovery_cache_discard(cache);
+
+ g_free(cache);
+}
+
+void
+mongo_sync_conn_recovery_cache_discard (mongo_sync_conn_recovery_cache *cache)
+{
+ _mongo_auth_prop_destroy (&cache->auth.db);
+ _mongo_auth_prop_destroy (&cache->auth.user);
+ _mongo_auth_prop_destroy (&cache->auth.pw);
+
+ _replica_set_free (&cache->rs);
+}
+
+gboolean
+mongo_sync_conn_recovery_cache_seed_add (mongo_sync_conn_recovery_cache *cache,
+ const gchar *host,
+ gint port)
+{
+ if (!host)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ cache->rs.seeds = g_list_append (cache->rs.seeds, g_strdup_printf ("%s:%d", host, port));
+
+ return TRUE;
+}
+
+static mongo_sync_connection *
+_recovery_cache_pick_connect_from_list (mongo_sync_conn_recovery_cache *cache,
+ GList *address_list,
+ gboolean slaveok)
+{
+ gint port;
+ guint i;
+ gchar *host;
+ mongo_sync_connection *c = NULL;
+
+ if (address_list)
+ {
+ for (i = 0; i < g_list_length (address_list); i++)
+ {
+ gchar *addr = (gchar *)g_list_nth_data (address_list, i);
+
+ if (!mongo_util_parse_addr (addr, &host, &port))
+ continue;
+
+ c = _recovery_cache_connect (cache, host, port, slaveok);
+ g_free (host);
+ if (c)
+ {
+ if (slaveok)
+ return c;
+ mongo_sync_conn_recovery_cache_discard (c->recovery_cache);
+ return mongo_sync_reconnect (c, FALSE);
+ }
+ }
+ }
+
+ return NULL;
+}
+
+mongo_sync_connection *
+mongo_sync_connect_recovery_cache (mongo_sync_conn_recovery_cache *cache,
+ gboolean slaveok)
+{
+ mongo_sync_connection *c = NULL;
+ gchar *host;
+ gint port;
+
+ if (cache->rs.primary && mongo_util_parse_addr (cache->rs.primary, &host, &port))
+ {
+ if ( (c = _recovery_cache_connect (cache, host, port, slaveok)) )
+ {
+ g_free (host);
+ if (slaveok)
+ return c;
+ mongo_sync_conn_recovery_cache_discard (c->recovery_cache);
+ return mongo_sync_reconnect (c, FALSE);
+ }
+ }
+
+ c = _recovery_cache_pick_connect_from_list (cache, cache->rs.seeds, slaveok);
+
+ if (!c)
+ c = _recovery_cache_pick_connect_from_list (cache, cache->rs.hosts, slaveok);
+
+ return c;
+}
+
+const gchar *
+mongo_sync_conn_get_last_error (mongo_sync_connection *conn)
+{
+ return conn->last_error;
+}
diff --git a/src/mongo-sync.h b/src/mongo-sync.h
new file mode 100644
index 0000000..0ae813b
--- /dev/null
+++ b/src/mongo-sync.h
@@ -0,0 +1,640 @@
+/* mongo-sync.h - libmongo-client synchronous wrapper API
+ * Copyright 2011, 2012, 2013, 2014 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file src/mongo-sync.h
+ * MongoDB synchronous wrapper API public header.
+ */
+
+#ifndef LIBMONGO_SYNC_H
+#define LIBMONGO_SYNC_H 1
+
+#include <mongo-client.h>
+
+#include <glib.h>
+
+G_BEGIN_DECLS
+
+/** Default maximum size for a single bulk insert.
+ *
+ * Defaults to somewhat shy of 4Mb.
+ */
+#define MONGO_SYNC_DEFAULT_MAX_INSERT_SIZE 4 * 1000 * 1000
+
+/** @defgroup mongo_sync Mongo Sync API
+ *
+ * These commands provide wrappers for the most often used MongoDB
+ * commands. All of these will send the command, and receive any
+ * results, thus saving the caller from having to do that himself.
+ *
+ * However, these are only of use when blocking the application is not
+ * an issue. For asynchronous operation, one should still construct
+ * the packets himself, and send / receive when appropriate.
+ *
+ * @addtogroup mongo_sync
+ * @{
+ */
+
+/** Opaque synchronous connection object. */
+typedef struct _mongo_sync_connection mongo_sync_connection;
+
+/** synchronous connection recovery cache object */
+typedef struct _mongo_sync_conn_recovery_cache mongo_sync_conn_recovery_cache;
+
+/** Create a new connection recovery cache object.
+ *
+ * @return the newly created recovery cache object
+ */
+mongo_sync_conn_recovery_cache *mongo_sync_conn_recovery_cache_new (void);
+
+/** Free a connection recovery cache object.
+ *
+ * @param cache is the recovery cache object
+ */
+void mongo_sync_conn_recovery_cache_free (mongo_sync_conn_recovery_cache *cache);
+
+/** Discards a connection recovery cache object.
+ *
+ * @param cache is the recovery cache object
+ */
+void mongo_sync_conn_recovery_cache_discard (mongo_sync_conn_recovery_cache *cache);
+
+/** Add a seed to a connection recovery cache object.
+ *
+ * The seed list will be used for reconnects, prioritized before the
+ * automatically discovered host list.
+ *
+ * @param cache is the connection recovery cache to add a seed to.
+ * @param host is the seed host to add.
+ * @param port is the seed's port.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_sync_conn_recovery_cache_seed_add (mongo_sync_conn_recovery_cache *cache,
+ const gchar *host, gint port);
+
+/** Synchronously connect to a MongoDB server using an external
+ * connection recovery cache object.
+ *
+ * Sets up a synchronous connection to a MongoDB server.
+ *
+ * @param cache is the externally managed connection recovery cache object.
+ * @param slaveok signals whether queries made against a slave are
+ * acceptable.
+ *
+ * @returns A newly allocated mongo_sync_connection object, or NULL on
+ * error. It is the responsibility of the caller to close and free the
+ * connection when appropriate.
+ */
+mongo_sync_connection *mongo_sync_connect_recovery_cache (mongo_sync_conn_recovery_cache *cache,
+ gboolean slaveok);
+
+/** Synchronously connect to a MongoDB server.
+ *
+ * Sets up a synchronous connection to a MongoDB server.
+ *
+ * @param address is the address of the server (IP or unix socket path).
+ * @param port is the port to connect to, or #MONGO_CONN_LOCAL if
+ * address is a unix socket.
+ * @param slaveok signals whether queries made against a slave are
+ * acceptable.
+ *
+ * @returns A newly allocated mongo_sync_connection object, or NULL on
+ * error. It is the responsibility of the caller to close and free the
+ * connection when appropriate.
+ */
+mongo_sync_connection *mongo_sync_connect (const gchar *address,
+ gint port,
+ gboolean slaveok);
+
+/** Add a seed to an existing MongoDB connection.
+ *
+ * The seed list will be used for reconnects, prioritized before the
+ * automatically discovered host list.
+ *
+ * @param conn is the connection to add a seed to.
+ * @param host is the seed host to add.
+ * @param port is the seed's port.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_sync_conn_seed_add (mongo_sync_connection *conn,
+ const gchar *host, gint port);
+
+/** Attempt to connect to another member of a replica set.
+ *
+ * Given an existing connection, this function will try to connect to
+ * an available node (enforcing that it's a primary, if asked to) by
+ * trying all known hosts until it finds one available.
+ *
+ * @param conn is an existing MongoDB connection.
+ * @param force_master signals whether a primary node should be found.
+ *
+ * @returns A mongo_sync_collection object, or NULL if the reconnect fails
+ * for one reason or the other.
+ *
+ * @note The original connection object will be updated too!
+ */
+mongo_sync_connection *mongo_sync_reconnect (mongo_sync_connection *conn,
+ gboolean force_master);
+
+/** Close and free a synchronous MongoDB connection.
+ *
+ * @param conn is the connection to close.
+ *
+ * @note The object will be freed, and shall not be used afterwards!
+ */
+void mongo_sync_disconnect (mongo_sync_connection *conn);
+
+/** Retrieve the state of the SLAVE_OK flag from a sync connection.
+ *
+ * @param conn is the connection to check the flag on.
+ *
+ * @returns The state of the SLAVE_OK flag.
+ */
+gboolean mongo_sync_conn_get_slaveok (const mongo_sync_connection *conn);
+
+/** Set the SLAVE_OK flag on a sync connection.
+ *
+ * @param conn is the connection to set the flag on.
+ * @param slaveok is the state to set.
+ *
+ * @returns TRUE on sucess, FALSE otherwise.
+ */
+gboolean mongo_sync_conn_set_slaveok (mongo_sync_connection *conn,
+ gboolean slaveok);
+
+/** Retrieve the state of the safe mode flag from a sync connection.
+ *
+ * @param conn is the connection to check the flag on.
+ *
+ * @returns The state of the safe mode flag.
+ */
+gboolean mongo_sync_conn_get_safe_mode (const mongo_sync_connection *conn);
+
+/** Set the safe mode flag on a sync connection.
+ *
+ * Enabling safe mode will result in an additional getLastError() call
+ * after each insert or update, and extra checks performed on other
+ * commands aswell.
+ *
+ * The upside is more guarantees that the commands succeed, at the
+ * expense of network traffic and speed.
+ *
+ * @param conn is the connection to set the flag on.
+ * @param safe_mode is the state to set it to.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_sync_conn_set_safe_mode (mongo_sync_connection *conn,
+ gboolean safe_mode);
+
+/** Get the state of the auto-reconnect flag from a sync connection.
+ *
+ * @param conn is the connection to check the flag on.
+ *
+ * @returns The state of the auto-reconnect flag.
+ */
+gboolean mongo_sync_conn_get_auto_reconnect (const mongo_sync_connection *conn);
+
+/** Set the state of the auto-reconnect flag on a sync connection.
+ *
+ * When auto-reconnect is enabled, the library will automatically
+ * attempt to reconnect to a server behind the scenes, when it detects
+ * an error.
+ *
+ * If safe-mode is turned on aswell, then auto-reconnect will only
+ * happen if the error is detected before a command is sent towards
+ * the database.
+ *
+ * @param conn is the connection to set auto-reconnect on.
+ * @param auto_reconnect is the state to set it to.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_sync_conn_set_auto_reconnect (mongo_sync_connection *conn,
+ gboolean auto_reconnect);
+
+/** Get the maximum size of a bulk insert package.
+ *
+ * @param conn is the connection to get the maximum size from.
+ *
+ * @returns The maximum size, or -1 on failiure.
+ */
+gint32 mongo_sync_conn_get_max_insert_size (mongo_sync_connection *conn);
+
+/** Set the maximum size of a bulk insert package.
+ *
+ * When inserting multiple documents at a time, the library can
+ * automatically split the pack up into smaller chunks. With this
+ * function, one can set the maximum size, past which, the request
+ * will be split into smaller chunks.
+ *
+ * @param conn is the connection to set the maximum size for.
+ * @param max_size is the maximum size, in bytes.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_sync_conn_set_max_insert_size (mongo_sync_connection *conn,
+ gint32 max_size);
+
+/** Send an update command to MongoDB.
+ *
+ * Constructs and sends an update command to MongoDB.
+ *
+ * @param conn is the connection to work with.
+ * @param ns is the namespace to work in.
+ * @param flags are the flags for the update command. See
+ * mongo_wire_cmd_update().
+ * @param selector is the BSON document that will act as the selector.
+ * @param update is the BSON document that contains the updated
+ * values.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_sync_cmd_update (mongo_sync_connection *conn,
+ const gchar *ns,
+ gint32 flags, const bson *selector,
+ const bson *update);
+
+/** Send an insert command to MongoDB.
+ *
+ * Constructs and sends an insert command to MongodB.
+ *
+ * @param conn is the connection to work with.
+ * @param ns is the namespace to work in.
+ * @tparam docs are the documents to insert. One must close the list
+ * with a NULL value.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_sync_cmd_insert (mongo_sync_connection *conn,
+ const gchar *ns, ...) G_GNUC_NULL_TERMINATED;
+
+
+/** Send an insert command to MongoDB.
+ *
+ * Constructs and sends an insert command to MongodB.
+ *
+ * @param conn is the connection to work with.
+ * @param ns is the namespace to work in.
+ * @param n is the number of documents to insert.
+ * @param docs is the array the documents to insert. There must be at
+ * least @a n documents in the array.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_sync_cmd_insert_n (mongo_sync_connection *conn,
+ const gchar *ns, gint32 n,
+ const bson **docs);
+
+/** Send a query command to MongoDB.
+ *
+ * @param conn is the connection to work with.
+ * @param ns is the namespace, the database and collection name
+ * concatenated, and separated with a single dot.
+ * @param flags are the query options. See mongo_wire_cmd_query().
+ * @param skip is the number of documents to skip.
+ * @param ret is the number of documents to return.
+ * @param query is the query BSON object.
+ * @param sel is the (optional) selector BSON object indicating the
+ * fields to return. Passing NULL will return all fields.
+ *
+ * @returns A newly allocated reply packet, or NULL on error. It is the
+ * responsibility of the caller to free the packet once it is not used
+ * anymore.
+ */
+mongo_packet *mongo_sync_cmd_query (mongo_sync_connection *conn,
+ const gchar *ns, gint32 flags,
+ gint32 skip, gint32 ret, const bson *query,
+ const bson *sel);
+
+/** Send a get more command to MongoDB.
+ *
+ * @param conn is the connection to work with.
+ * @param ns is the namespace, the database and collection name
+ * concatenated, and separated with a single dot.
+ * @param ret is the number of documents to return.
+ * @param cursor_id is the ID of the cursor to use.
+ *
+ * @returns A newly allocated reply packet, or NULL on error. It is
+ * the responsibility of the caller to free the packet once it is not
+ * used anymore.
+ */
+mongo_packet *mongo_sync_cmd_get_more (mongo_sync_connection *conn,
+ const gchar *ns,
+ gint32 ret, gint64 cursor_id);
+
+/** Send a delete command to MongoDB.
+ *
+ * @param conn is the connection to work with.
+ * @param ns is the namespace, the database and collection name
+ * concatenated, and separated with a single dot.
+ * @param flags are the delete options. See mongo_wire_cmd_delete().
+ * @param sel is the BSON object to use as a selector.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_sync_cmd_delete (mongo_sync_connection *conn, const gchar *ns,
+ gint32 flags, const bson *sel);
+
+/** Send a kill_cursors command to MongoDB.
+ *
+ * @param conn is the connection to work with.
+ * @param n is the number of cursors to kill.
+ * @tparam cursor_ids is the list of cursor ids to kill.
+ *
+ * @note One must supply exaclty @a n number of cursor IDs.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_sync_cmd_kill_cursors (mongo_sync_connection *conn,
+ gint32 n, ...);
+
+/** Send a custom command to MongoDB.
+ *
+ * Custom commands are queries run in the db.$cmd namespace. The
+ * commands themselves are queries, and as such, BSON objects.
+ *
+ * @param conn is the connection to work with.
+ * @param db is the database in which the command shall be run.
+ * @param command is the BSON object representing the command.
+ *
+ * @returns A newly allocated reply packet, or NULL on error. It is
+ * the responsibility of the caller to free the packet once it is not
+ * used anymore.
+ */
+mongo_packet *mongo_sync_cmd_custom (mongo_sync_connection *conn,
+ const gchar *db,
+ const bson *command);
+
+/** Send a count() command to MongoDB.
+ *
+ * The count command is an efficient way to count tha available
+ * documents matching a selector.
+ *
+ * @param conn is the connection to work with.
+ * @param db is the name of the database.
+ * @param coll is the name of the collection.
+ * @param query is the optional selector (NULL will count all
+ * documents within the collection).
+ *
+ * @returns The number of matching documents, or -1 on error.
+ */
+gdouble mongo_sync_cmd_count (mongo_sync_connection *conn,
+ const gchar *db, const gchar *coll,
+ const bson *query);
+
+/** Flags that can be set during collection creation. */
+enum
+ {
+ /** Default options. */
+ MONGO_COLLECTION_DEFAULTS = 0,
+ /** The collection is capped. */
+ MONGO_COLLECTION_CAPPED = 1 << 0,
+ /** The collection is capped by element number aswell. */
+ MONGO_COLLECTION_CAPPED_MAX = 1 << 1,
+ /** The collection's _id should be autoindexed. */
+ MONGO_COLLECTION_AUTO_INDEX_ID = 1 << 2,
+ /** The collection needs to be pre-allocated. */
+ MONGO_COLLECTION_SIZED = 1 << 3
+ };
+
+/** Create a new MongoDB collection.
+ *
+ * This command can be used to explicitly create a MongoDB collection,
+ * with various parameters pre-set.
+ *
+ * @param conn is the connection to work with.
+ * @param db is the name of the database.
+ * @param coll is the name of the collection to create.
+ * @param flags is a collection of flags for the collection. Any
+ * combination of MONGO_COLLECTION_DEFAULTS, MONGO_COLLECTION_CAPPED,
+ * MONGO_COLLECTION_CAPPED_MAX, MONGO_COLLECTION_SIZED and
+ * MONGO_COLLECTION_AUTO_INDEX_ID is acceptable.
+ *
+ * @tparam size @b MUST be a 64-bit integer, if
+ * MONGO_COLLECTION_CAPPED or MONGO_COLLECTION_SIZED is specified, and
+ * it must follow the @a flags parameter.
+ * @tparam max @b MUST be a 64-bit integer, if
+ * MONGO_COLLECTION_CAPPED_MAX is specified, and must follow @a size.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_sync_cmd_create (mongo_sync_connection *conn,
+ const gchar *db, const gchar *coll,
+ gint flags, ...);
+
+/** Check whether a collection exists in MongoDB.
+ *
+ * @param conn is the connection to work with.
+ * @param db is the database to search for the collection.
+ * @param coll is the collection to search for.
+ *
+ * @returns A newly allocated BSON object, with data about the
+ * collection on success, NULL otherwise. It is the responsiblity of
+ * the caller to free the BSON object once it is no longer needed.
+ */
+bson *mongo_sync_cmd_exists (mongo_sync_connection *conn,
+ const gchar *db, const gchar *coll);
+
+/** Send a drop() command to MongoDB.
+ *
+ * With this command, one can easily drop a collection.
+ *
+ * @param conn is the connection to work with.
+ * @param db is the name of the database.
+ * @param coll is the name of the collection to drop.
+ *
+ * @returns TRUE if the collection was dropped, FALSE otherwise.
+ */
+gboolean mongo_sync_cmd_drop (mongo_sync_connection *conn,
+ const gchar *db, const gchar *coll);
+
+/** Get the last error from MongoDB.
+ *
+ * Retrieves the last error from MongoDB.
+ *
+ * @param conn is the connection to work with.
+ * @param db is the name of the database.
+ * @param error is a pointer to a string variable that will hold the
+ * error message.
+ *
+ * @returns TRUE if the error was succesfully retrieved, FALSE
+ * otherwise. The output variable @a error is only set if the function
+ * is returning TRUE.
+ */
+gboolean mongo_sync_cmd_get_last_error (mongo_sync_connection *conn,
+ const gchar *db, gchar **error);
+
+/** Get the last error from MongoDB.
+ *
+ * Retrieves the last error from MongoDB.
+ *
+ * @param conn is the connection to work with.
+ * @param db is the name of the database.
+ * @param error is a pointer to a BSON variable that will hold the
+ * error message.
+ *
+ * @returns TRUE if the error was succesfully retrieved, FALSE
+ * otherwise. The output variable @a error is only set if the function
+ * is returning TRUE.
+ */
+gboolean mongo_sync_cmd_get_last_error_full (mongo_sync_connection *conn,
+ const gchar *db, bson **error);
+
+/** Reset the last error variable in MongoDB.
+ *
+ * @param conn is the connection to work with.
+ * @param db is the name of the database.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_sync_cmd_reset_error (mongo_sync_connection *conn,
+ const gchar *db);
+
+/** Check whether the current node is the master.
+ *
+ * @param conn is the connection to work with.
+ *
+ * @returns TRUE if it is master, FALSE otherwise and on errors.
+ */
+gboolean mongo_sync_cmd_is_master (mongo_sync_connection *conn);
+
+/** Send a PING command to MongoDB.
+ *
+ * @param conn is the connection to work with.
+ *
+ * @returns TRUE if the connection is alive and kicking, FALSE
+ * otherwise.
+ */
+gboolean mongo_sync_cmd_ping (mongo_sync_connection *conn);
+
+/** Add a user to MongoDB.
+ *
+ * @param conn is the connection to work with.
+ * @param db is the database to add the user to.
+ * @param user is the user to add.
+ * @param pw is the password.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_sync_cmd_user_add (mongo_sync_connection *conn,
+ const gchar *db,
+ const gchar *user,
+ const gchar *pw);
+
+/** Add a user to MongoDB, with roles.
+ *
+ * @param conn is the connection to work with.
+ * @param db is the database to add the user to.
+ * @param user is the user to add.
+ * @param pw is the password.
+ * @param roles is a BSON array containing the roles for the user.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_sync_cmd_user_add_with_roles (mongo_sync_connection *conn,
+ const gchar *db,
+ const gchar *user,
+ const gchar *pw,
+ const bson *roles);
+
+/** Remove a user from MongoDB.
+ *
+ * @param conn is the connection to work with.
+ * @param db is the database to remove the user from.
+ * @param user is the username to remove.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_sync_cmd_user_remove (mongo_sync_connection *conn,
+ const gchar *db,
+ const gchar *user);
+
+/** Authenticate a user with MongoDB.
+ *
+ * @param conn is the connection to work with.
+ * @param db is the database to authenticate against.
+ * @param user is the username.
+ * @param pw is the password.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_sync_cmd_authenticate (mongo_sync_connection *conn,
+ const gchar *db,
+ const gchar *user,
+ const gchar *pw);
+
+/** Flags that can be set at index creation. */
+enum
+ {
+ MONGO_INDEX_UNIQUE = 0x01, /**< Create a unique index. */
+ MONGO_INDEX_DROP_DUPS = 0x02, /**< Drop duplicate entries when
+ creating the indexes. */
+ MONGO_INDEX_BACKGROUND = 0x04, /**< Create indexes in the
+ background. */
+ MONGO_INDEX_SPARSE = 0x08 /**< Create sparse indexes. */
+ };
+
+/** Create an index.
+ *
+ * @param conn is the connection to work with.
+ * @param ns is the namespace to create indexes for.
+ * @param key is the key pattern to base indexes on.
+ * @param options are the index options.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_sync_cmd_index_create (mongo_sync_connection *conn,
+ const gchar *ns,
+ const bson *key,
+ gint options);
+
+/** Drop an index.
+ *
+ * @param conn is the connection to work with.
+ * @param ns is the namespace to drop the index from.
+ * @param key is the index pattern to drop.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_sync_cmd_index_drop (mongo_sync_connection *conn,
+ const gchar *ns,
+ const bson *key);
+
+/** Drop all indexes from a namespace.
+ *
+ * @param conn is the connection to work with.
+ * @param ns is the namespace whose indexes to drop.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_sync_cmd_index_drop_all (mongo_sync_connection *conn,
+ const gchar *ns);
+
+/** Get the last error message on a connection
+ *
+ * @param conn is the connection
+ *
+ * @returns pointer to the error message, if exists, NULL otherwise
+ */
+const gchar *mongo_sync_conn_get_last_error (mongo_sync_connection *conn);
+
+/** @} */
+
+G_END_DECLS
+
+#endif
diff --git a/src/mongo-utils.c b/src/mongo-utils.c
new file mode 100644
index 0000000..6676aa9
--- /dev/null
+++ b/src/mongo-utils.c
@@ -0,0 +1,197 @@
+/* mongo-utils.c - libmongo-client utility functions
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file src/mongo-utils.c
+ * Implementation for various libmongo-client helper functions.
+ */
+
+#include <glib.h>
+#include <glib/gprintf.h>
+
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <time.h>
+#include <unistd.h>
+#include <errno.h>
+
+#include "mongo-client.h"
+
+static guint32 machine_id = 0;
+static gint16 pid = 0;
+
+void
+mongo_util_oid_init (gint32 mid)
+{
+ pid_t p = getpid ();
+
+ if (mid == 0)
+ {
+ srand (time (NULL));
+ machine_id = rand ();
+ }
+ else
+ machine_id = mid;
+
+ /*
+ * If our pid has more than 16 bits, let half the bits modulate the
+ * machine_id.
+ */
+ if (sizeof (pid_t) > 2)
+ {
+ machine_id ^= pid >> 16;
+ }
+ pid = (gint16)p;
+}
+
+guint8 *
+mongo_util_oid_new_with_time (gint32 ts, gint32 seq)
+{
+ guint8 *oid;
+ gint32 t = GINT32_TO_BE (ts);
+ gint32 tmp = GINT32_TO_BE (seq);
+
+ if (machine_id == 0 || pid == 0)
+ return NULL;
+
+ oid = (guint8 *)g_new0 (guint8, 12);
+
+ /* Sequence number, last 3 bytes
+ * For simplicity's sake, we put this in first, and overwrite the
+ * first byte later.
+ */
+ memcpy (oid + 4 + 2 + 2, &tmp, 4);
+ /* First four bytes: the time, BE byte order */
+ memcpy (oid, &t, 4);
+ /* Machine ID, byte order doesn't matter, 3 bytes */
+ memcpy (oid + 4, &machine_id, 3);
+ /* PID, byte order doesn't matter, 2 bytes */
+ memcpy (oid + 4 + 3, &pid, 2);
+
+ return oid;
+}
+
+guint8 *
+mongo_util_oid_new (gint32 seq)
+{
+ return mongo_util_oid_new_with_time (time (NULL), seq);
+}
+
+gchar *
+mongo_util_oid_as_string (const guint8 *oid)
+{
+ gchar *str;
+ gint j;
+
+ if (!oid)
+ return NULL;
+
+ str = g_new (gchar, 26);
+ for (j = 0; j < 12; j++)
+ g_sprintf (&str[j * 2], "%02x", oid[j]);
+ str[25] = 0;
+ return str;
+}
+
+gboolean
+mongo_util_parse_addr (const gchar *addr, gchar **host, gint *port)
+{
+ gchar *port_s, *ep;
+ glong p;
+
+ if (!addr || !host || !port)
+ {
+ if (host)
+ *host = NULL;
+ if (port)
+ *port = -1;
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ /* Check for IPv6 literal */
+ if (addr[0] == '[')
+ {
+ /* Host is everything between [] */
+ port_s = strchr (addr + 1, ']');
+ if (!port_s || port_s - addr == 1)
+ {
+ *host = NULL;
+ *port = -1;
+ errno = EINVAL;
+ return FALSE;
+ }
+ *host = g_strndup (addr + 1, port_s - addr - 1);
+
+ port_s += 2;
+ if (port_s - addr >= (glong)strlen (addr))
+ {
+ *port = -1;
+ return TRUE;
+ }
+ }
+ else
+ {
+ /* Dealing with something that's not an IPv6 literal */
+
+ /* Split up to host:port */
+ port_s = g_strrstr (addr, ":");
+ if (!port_s)
+ {
+ *host = g_strdup (addr);
+ *port = -1;
+ return TRUE;
+ }
+ if (port_s == addr)
+ {
+ *host = NULL;
+ *port = -1;
+ errno = EINVAL;
+ return FALSE;
+ }
+ port_s++;
+ *host = g_strndup (addr, port_s - addr - 1);
+ }
+
+ p = strtol (port_s, &ep, 10);
+ if (p == LONG_MIN || p == LONG_MAX)
+ {
+ g_free (*host);
+ *host = NULL;
+ *port = -1;
+ errno = ERANGE;
+ return FALSE;
+ }
+ if ((p != MONGO_CONN_LOCAL) && (p < 0 || p > INT_MAX))
+ {
+ g_free (*host);
+ *host = NULL;
+ *port = -1;
+ errno = ERANGE;
+ return FALSE;
+ }
+ *port = (gint)p;
+
+ if (ep && *ep)
+ {
+ g_free (*host);
+ *host = NULL;
+ *port = -1;
+ errno = EINVAL;
+ return FALSE;
+ }
+ return TRUE;
+}
diff --git a/src/mongo-utils.h b/src/mongo-utils.h
new file mode 100644
index 0000000..3c3b5df
--- /dev/null
+++ b/src/mongo-utils.h
@@ -0,0 +1,121 @@
+/* mongo-utils.h - libmongo-client utility functions
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file src/mongo-utils.h
+ * Public header for various libmongo-client helper functions.
+ */
+
+#ifndef LIBMONGO_CLIENT_UTILS_H
+#define LIBMONGO_CLIENT_UTILS_H 1
+
+#include <glib.h>
+
+G_BEGIN_DECLS
+
+/** @defgroup mongo_util Mongo Utils
+ *
+ * Various utility functions related to MongoDB.
+ *
+ * @addtogroup mongo_util
+ * @{
+ */
+
+/** Intitialize the static ObjectID components.
+ *
+ * @param machine_id is the machine id to use, or zero to generate one
+ * automatically.
+ *
+ * This function needs to be called once, before any OIDs are
+ * generated. It is also a good idea to call it whenever the calling
+ * program's PID might change.
+ */
+void mongo_util_oid_init (gint32 machine_id);
+
+/** Generate a new ObjectID.
+ *
+ * Based on the current time, the pre-determined pid and machine ID
+ * and a supplied sequence number, generate a new ObjectID.
+ *
+ * The machine id and the PID are updated whenever
+ * mongo_util_oid_init() is called.
+ *
+ * @param seq is the sequence number to use.
+ *
+ * @note The ObjectID has space for only 24 bits of sequence bytes, so
+ * it should be noted that while @a seq is 32 bits wide, only 24 of
+ * that will be used.
+ *
+ * @returns A newly allocated ObjectID or NULL on error. Freeing it is
+ * the responsibility of the caller.
+ */
+guint8 *mongo_util_oid_new (gint32 seq);
+
+/** Generate a new ObjectID, with a predefined timestamp.
+ *
+ * Based on the suppiled time and sequence number, and the
+ * pre-determined pid and machine ID, generate a new ObjectID.
+ *
+ * The machine id and the PID are updated whenever
+ * mongo_util_oid_init() is called.
+ *
+ * @param time is the timestamp to use.
+ * @param seq is the sequence number to use.
+ *
+ *
+ * @note The ObjectID has space for only 24 bits of sequence bytes, so
+ * it should be noted that while @a seq is 32 bits wide, only 24 of
+ * that will be used.
+ *
+ * @returns A newly allocated ObjectID or NULL on error. Freeing it is
+ * the responsibility of the caller.
+ */
+guint8 *mongo_util_oid_new_with_time (gint32 time, gint32 seq);
+
+/** Convert an ObjectID to its string representation.
+ *
+ * Turns a binary ObjectID into a hexadecimal string.
+ *
+ * @param oid is the binary ObjectID.
+ *
+ * @returns A newly allocated string representation of the ObjectID,
+ * or NULL on error. It is the responsibility of the caller to free it
+ * once it is no longer needed.
+ */
+gchar *mongo_util_oid_as_string (const guint8 *oid);
+
+/** Parse a HOST:IP pair.
+ *
+ * Given a HOST:IP pair, split it up into a host and a port. IPv6
+ * addresses supported, the function cuts at the last ":".
+ *
+ * @param addr is the address to split.
+ * @param host is a pointer to a string where the host part will be
+ * stored.
+ * @param port is a pointer to an integer, where the port part will be
+ * stored.
+ *
+ * @returns TRUE on success, FALSE otherwise. The @a host parameter
+ * will contain a newly allocated string on succes. On failiure, host
+ * will be set to NULL, and port to -1.
+ */
+gboolean mongo_util_parse_addr (const gchar *addr, gchar **host,
+ gint *port);
+
+/** @} */
+
+G_END_DECLS
+
+#endif
diff --git a/src/mongo-wire.c b/src/mongo-wire.c
new file mode 100644
index 0000000..cf140a5
--- /dev/null
+++ b/src/mongo-wire.c
@@ -0,0 +1,645 @@
+/* mongo-wire.c - libmongo-client's MongoDB wire protocoll implementation.
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file src/mongo-wire.c
+ * Implementation of the MongoDB Wire Protocol.
+ */
+
+#include <glib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+
+#include "bson.h"
+#include "mongo-wire.h"
+#include "libmongo-private.h"
+
+/** @internal Constant zero value. */
+static const gint32 zero = 0;
+
+/** @internal A MongoDB command, as it appears on the wire.
+ *
+ * For the sake of clarity, and sanity of the library, the header and
+ * data parts are stored separately, and as such, will need to be sent
+ * separately aswell.
+ */
+struct _mongo_packet
+{
+ mongo_packet_header header; /**< The packet header. */
+ guint8 *data; /**< The actual data of the packet. */
+ gint32 data_size; /**< Size of the data payload. */
+};
+
+/** @internal Mongo command opcodes. */
+typedef enum
+ {
+ OP_REPLY = 1, /**< Message is a reply. Only sent by the server. */
+ OP_MSG = 1000, /**< Message is a generic message. */
+ OP_UPDATE = 2001, /**< Message is an update command. */
+ OP_INSERT = 2002, /**< Message is an insert command. */
+ OP_RESERVED = 2003, /**< Reserved and unused. */
+ OP_QUERY = 2004, /**< Message is a query command. */
+ OP_GET_MORE = 2005, /**< Message is a get more command. */
+ OP_DELETE = 2006, /**< Message is a delete command. */
+ OP_KILL_CURSORS = 2007 /**< Message is a kill cursors command. */
+ } mongo_wire_opcode;
+
+mongo_packet *
+mongo_wire_packet_new (void)
+{
+ mongo_packet *p = (mongo_packet *)g_new0 (mongo_packet, 1);
+
+ p->header.length = GINT32_TO_LE (sizeof (mongo_packet_header));
+ return p;
+}
+
+gboolean
+mongo_wire_packet_get_header (const mongo_packet *p,
+ mongo_packet_header *header)
+{
+ if (!p || !header)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ header->length = GINT32_FROM_LE (p->header.length);
+ header->id = GINT32_FROM_LE (p->header.id);
+ header->resp_to = GINT32_FROM_LE (p->header.resp_to);
+ header->opcode = GINT32_FROM_LE (p->header.opcode);
+
+ return TRUE;
+}
+
+gboolean
+mongo_wire_packet_get_header_raw (const mongo_packet *p,
+ mongo_packet_header *header)
+{
+ if (!p || !header)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ header->length = p->header.length;
+ header->id = p->header.id;
+ header->resp_to = p->header.resp_to;
+ header->opcode = p->header.opcode;
+
+ return TRUE;
+}
+
+gboolean
+mongo_wire_packet_set_header (mongo_packet *p,
+ const mongo_packet_header *header)
+{
+ if (!p || !header)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+ if (GINT32_FROM_LE (header->length) < (gint32)sizeof (mongo_packet_header))
+ {
+ errno = ERANGE;
+ return FALSE;
+ }
+
+ p->header.length = GINT32_TO_LE (header->length);
+ p->header.id = GINT32_TO_LE (header->id);
+ p->header.resp_to = GINT32_TO_LE (header->resp_to);
+ p->header.opcode = GINT32_TO_LE (header->opcode);
+
+ p->data_size = header->length - sizeof (mongo_packet_header);
+
+ return TRUE;
+}
+
+gboolean
+mongo_wire_packet_set_header_raw (mongo_packet *p,
+ const mongo_packet_header *header)
+{
+ if (!p || !header)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ p->header.length = header->length;
+ p->header.id = header->id;
+ p->header.resp_to = header->resp_to;
+ p->header.opcode = header->opcode;
+
+ p->data_size = header->length - sizeof (mongo_packet_header);
+
+ return TRUE;
+}
+
+gint32
+mongo_wire_packet_get_data (const mongo_packet *p, const guint8 **data)
+{
+ if (!p || !data)
+ {
+ errno = EINVAL;
+ return -1;
+ }
+ if (p->data == NULL)
+ {
+ errno = EINVAL;
+ return -1;
+ }
+
+ *data = (const guint8 *)p->data;
+ return p->data_size;
+}
+
+gboolean
+mongo_wire_packet_set_data (mongo_packet *p, const guint8 *data, gint32 size)
+{
+ if (!p || !data || size <= 0)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ if (p->data)
+ g_free (p->data);
+ p->data = g_malloc (size);
+ memcpy (p->data, data, size);
+
+ p->data_size = size;
+ p->header.length =
+ GINT32_TO_LE (p->data_size + sizeof (mongo_packet_header));
+
+ return TRUE;
+}
+
+void
+mongo_wire_packet_free (mongo_packet *p)
+{
+ if (!p)
+ {
+ errno = EINVAL;
+ return;
+ }
+
+ if (p->data)
+ g_free (p->data);
+ g_free (p);
+}
+
+mongo_packet *
+mongo_wire_cmd_update (gint32 id, const gchar *ns, gint32 flags,
+ const bson *selector, const bson *update)
+{
+ mongo_packet *p;
+ gint32 t_flags = GINT32_TO_LE (flags);
+ gint nslen;
+
+ if (!ns || !selector || !update)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ if (bson_size (selector) < 0 ||
+ bson_size (update) < 0)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ p = (mongo_packet *)g_new0 (mongo_packet, 1);
+ p->header.id = GINT32_TO_LE (id);
+ p->header.opcode = GINT32_TO_LE (OP_UPDATE);
+
+ nslen = strlen (ns) + 1;
+ p->data_size = bson_size (selector) + bson_size (update) +
+ sizeof (gint32) * 2 + nslen;
+
+ p->data = g_malloc (p->data_size);
+
+ memcpy (p->data, (void *)&zero, sizeof (gint32));
+ memcpy (p->data + sizeof (gint32), (void *)ns, nslen);
+ memcpy (p->data + sizeof (gint32) + nslen, (void *)&t_flags,
+ sizeof (gint32));
+ memcpy (p->data + sizeof (gint32) * 2 + nslen,
+ bson_data (selector), bson_size (selector));
+ memcpy (p->data + sizeof (gint32) * 2 + nslen + bson_size (selector),
+ bson_data (update), bson_size (update));
+
+ p->header.length = GINT32_TO_LE (sizeof (p->header) + p->data_size);
+
+ return p;
+}
+
+mongo_packet *
+mongo_wire_cmd_insert_n (gint32 id, const gchar *ns, gint32 n,
+ const bson **docs)
+{
+ mongo_packet *p;
+ gint32 pos, dsize = 0;
+ gint32 i;
+
+ if (!ns || !docs)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ if (n <= 0)
+ {
+ errno = ERANGE;
+ return NULL;
+ }
+
+ for (i = 0; i < n; i++)
+ {
+ if (bson_size (docs[i]) <= 0)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+ dsize += bson_size (docs[i]);
+ }
+
+ p = (mongo_packet *)g_new0 (mongo_packet, 1);
+ p->header.id = GINT32_TO_LE (id);
+ p->header.opcode = GINT32_TO_LE (OP_INSERT);
+
+ pos = sizeof (gint32) + strlen (ns) + 1;
+ p->data_size = pos + dsize;
+ p->data = (guint8 *)g_malloc (p->data_size);
+
+ memcpy (p->data, (void *)&zero, sizeof (gint32));
+ memcpy (p->data + sizeof (gint32), (void *)ns, strlen (ns) + 1);
+
+ for (i = 0; i < n; i++)
+ {
+ memcpy (p->data + pos, bson_data (docs[i]), bson_size (docs[i]));
+ pos += bson_size (docs[i]);
+ }
+
+ p->header.length = GINT32_TO_LE (sizeof (p->header) + p->data_size);
+
+ return p;
+}
+
+mongo_packet *
+mongo_wire_cmd_insert (gint32 id, const gchar *ns, ...)
+{
+ mongo_packet *p;
+ bson **docs, *d;
+ gint32 n = 0;
+ va_list ap;
+
+ if (!ns)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ docs = (bson **)g_new0 (bson *, 1);
+
+ va_start (ap, ns);
+ while ((d = (bson *)va_arg (ap, gpointer)))
+ {
+ if (bson_size (d) < 0)
+ {
+ g_free (docs);
+ errno = EINVAL;
+ return NULL;
+ }
+
+ docs = (bson **)g_renew (bson *, docs, n + 1);
+ docs[n++] = d;
+ }
+ va_end (ap);
+
+ p = mongo_wire_cmd_insert_n (id, ns, n, (const bson **)docs);
+ g_free (docs);
+ return p;
+}
+
+mongo_packet *
+mongo_wire_cmd_query (gint32 id, const gchar *ns, gint32 flags,
+ gint32 skip, gint32 ret, const bson *query,
+ const bson *sel)
+{
+ mongo_packet *p;
+ gint32 tmp, nslen;
+
+ if (!ns || !query)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ if (bson_size (query) < 0 || (sel && bson_size (sel) < 0))
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ p = (mongo_packet *)g_new0 (mongo_packet, 1);
+ p->header.id = GINT32_TO_LE (id);
+ p->header.opcode = GINT32_TO_LE (OP_QUERY);
+
+ nslen = strlen (ns) + 1;
+ p->data_size =
+ sizeof (gint32) + nslen + sizeof (gint32) * 2 + bson_size (query);
+
+ if (sel)
+ p->data_size += bson_size (sel);
+ p->data = g_malloc (p->data_size);
+
+ tmp = GINT32_TO_LE (flags);
+ memcpy (p->data, (void *)&tmp, sizeof (gint32));
+ memcpy (p->data + sizeof (gint32), (void *)ns, nslen);
+ tmp = GINT32_TO_LE (skip);
+ memcpy (p->data + sizeof (gint32) + nslen, (void *)&tmp, sizeof (gint32));
+ tmp = GINT32_TO_LE (ret);
+ memcpy (p->data + sizeof (gint32) * 2 + nslen,
+ (void *)&tmp, sizeof (gint32));
+ memcpy (p->data + sizeof (gint32) * 3 + nslen, bson_data (query),
+ bson_size (query));
+
+ if (sel)
+ memcpy (p->data + sizeof (gint32) * 3 + nslen + bson_size (query),
+ bson_data (sel), bson_size (sel));
+
+ p->header.length = GINT32_TO_LE (sizeof (p->header) + p->data_size);
+
+ return p;
+}
+
+mongo_packet *
+mongo_wire_cmd_get_more (gint32 id, const gchar *ns,
+ gint32 ret, gint64 cursor_id)
+{
+ mongo_packet *p;
+ gint32 t_ret;
+ gint64 t_cid;
+ gint32 nslen;
+
+ if (!ns)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ p = (mongo_packet *)g_new0 (mongo_packet, 1);
+ p->header.id = GINT32_TO_LE (id);
+ p->header.opcode = GINT32_TO_LE (OP_GET_MORE);
+
+ t_ret = GINT32_TO_LE (ret);
+ t_cid = GINT64_TO_LE (cursor_id);
+
+ nslen = strlen (ns) + 1;
+ p->data_size = sizeof (gint32) + nslen + sizeof (gint32) + sizeof (gint64);
+ p->data = g_malloc (p->data_size);
+
+ memcpy (p->data, (void *)&zero, sizeof (gint32));
+ memcpy (p->data + sizeof (gint32), (void *)ns, nslen);
+ memcpy (p->data + sizeof (gint32) + nslen, (void *)&t_ret, sizeof (gint32));
+ memcpy (p->data + sizeof (gint32) * 2 + nslen,
+ (void *)&t_cid, sizeof (gint64));
+
+ p->header.length = GINT32_TO_LE (sizeof (p->header) + p->data_size);
+
+ return p;
+}
+
+mongo_packet *
+mongo_wire_cmd_delete (gint32 id, const gchar *ns,
+ gint32 flags, const bson *sel)
+{
+ mongo_packet *p;
+ gint32 t_flags, nslen;
+
+ if (!ns || !sel)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ if (bson_size (sel) < 0)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ p = (mongo_packet *)g_new0 (mongo_packet, 1);
+ p->header.id = GINT32_TO_LE (id);
+ p->header.opcode = GINT32_TO_LE (OP_DELETE);
+
+ nslen = strlen (ns) + 1;
+ p->data_size = sizeof (gint32) + nslen + sizeof (gint32) + bson_size (sel);
+ p->data = g_malloc (p->data_size);
+
+ t_flags = GINT32_TO_LE (flags);
+
+ memcpy (p->data, (void *)&zero, sizeof (gint32));
+ memcpy (p->data + sizeof (gint32), (void *)ns, nslen);
+ memcpy (p->data + sizeof (gint32) + nslen,
+ (void *)&t_flags, sizeof (gint32));
+ memcpy (p->data + sizeof (gint32) * 2 + nslen,
+ bson_data (sel), bson_size (sel));
+
+ p->header.length = GINT32_TO_LE (sizeof (p->header) + p->data_size);
+
+ return p;
+}
+
+mongo_packet *
+mongo_wire_cmd_kill_cursors_va (gint32 id, gint32 n, va_list ap)
+{
+ mongo_packet *p;
+ gint32 i, t_n, pos;
+ gint64 t_cid;
+
+ p = (mongo_packet *)g_new0 (mongo_packet, 1);
+ p->header.id = GINT32_TO_LE (id);
+ p->header.opcode = GINT32_TO_LE (OP_KILL_CURSORS);
+
+ p->data_size = sizeof (gint32) + sizeof (gint32) + sizeof (gint64)* n;
+ p->data = g_malloc (p->data_size);
+
+ t_n = GINT32_TO_LE (n);
+ pos = sizeof (gint32) * 2;
+ memcpy (p->data, (void *)&zero, sizeof (gint32));
+ memcpy (p->data + sizeof (gint32), (void *)&t_n, sizeof (gint32));
+
+ for (i = 1; i <= n; i++)
+ {
+ t_cid = va_arg (ap, gint64);
+ t_cid = GINT64_TO_LE (t_cid);
+
+ memcpy (p->data + pos, (void *)&t_cid, sizeof (gint64));
+ pos += sizeof (gint64);
+ }
+
+ p->header.length = GINT32_TO_LE (sizeof (p->header) + p->data_size);
+
+ return p;
+}
+
+mongo_packet *
+mongo_wire_cmd_kill_cursors (gint32 id, gint32 n, ...)
+{
+ va_list ap;
+ mongo_packet *p;
+
+ if (n <= 0)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ va_start (ap, n);
+ p = mongo_wire_cmd_kill_cursors_va (id, n, ap);
+ va_end (ap);
+
+ return p;
+}
+
+mongo_packet *
+mongo_wire_cmd_custom (gint32 id, const gchar *db, gint32 flags,
+ const bson *command)
+{
+ mongo_packet *p;
+ gchar *ns;
+ bson *empty;
+
+ if (!db || !command)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ if (bson_size (command) < 0)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ ns = g_strconcat (db, ".$cmd", NULL);
+
+ empty = bson_new ();
+ bson_finish (empty);
+
+ p = mongo_wire_cmd_query (id, ns, flags, 0, 1, command, empty);
+ g_free (ns);
+ bson_free (empty);
+ return p;
+}
+
+gboolean
+mongo_wire_reply_packet_get_header (const mongo_packet *p,
+ mongo_reply_packet_header *hdr)
+{
+ mongo_reply_packet_header h;
+ const guint8 *data;
+
+ if (!p || !hdr)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ if (p->header.opcode != OP_REPLY)
+ {
+ errno = EPROTO;
+ return FALSE;
+ }
+
+ if (mongo_wire_packet_get_data (p, &data) == -1)
+ return FALSE;
+
+ memcpy (&h, data, sizeof (mongo_reply_packet_header));
+
+ hdr->flags = GINT32_FROM_LE (h.flags);
+ hdr->cursor_id = GINT64_FROM_LE (h.cursor_id);
+ hdr->start = GINT32_FROM_LE (h.start);
+ hdr->returned = GINT32_FROM_LE (h.returned);
+
+ return TRUE;
+}
+
+gboolean
+mongo_wire_reply_packet_get_data (const mongo_packet *p,
+ const guint8 **data)
+{
+ const guint8 *d;
+
+ if (!p || !data)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ if (p->header.opcode != OP_REPLY)
+ {
+ errno = EPROTO;
+ return FALSE;
+ }
+
+ if (mongo_wire_packet_get_data (p, &d) == -1)
+ return FALSE;
+
+ *data = d + sizeof (mongo_reply_packet_header);
+ return TRUE;
+}
+
+gboolean
+mongo_wire_reply_packet_get_nth_document (const mongo_packet *p,
+ gint32 n,
+ bson **doc)
+{
+ const guint8 *d;
+ mongo_reply_packet_header h;
+ gint32 i;
+ gint32 pos = 0;
+
+ if (!p || !doc || n <= 0)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ if (p->header.opcode != OP_REPLY)
+ {
+ errno = EPROTO;
+ return FALSE;
+ }
+
+ if (!mongo_wire_reply_packet_get_header (p, &h))
+ return FALSE;
+
+ if (h.returned < n)
+ {
+ errno = ERANGE;
+ return FALSE;
+ }
+
+ if (!mongo_wire_reply_packet_get_data (p, &d))
+ return FALSE;
+
+ for (i = 1; i < n; i++)
+ pos += bson_stream_doc_size (d, pos);
+
+ *doc = bson_new_from_data (d + pos, bson_stream_doc_size (d, pos) - 1);
+ return TRUE;
+}
diff --git a/src/mongo-wire.h b/src/mongo-wire.h
new file mode 100644
index 0000000..081a3e2
--- /dev/null
+++ b/src/mongo-wire.h
@@ -0,0 +1,433 @@
+/* mongo-wire.h - libmongo-client's MongoDB wire protocoll implementation.
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file src/mongo-wire.h
+ * MongoDB Wire Protocol API public header.
+ */
+
+#ifndef LIBMONGO_CLIENT_MONGO_WIRE_H
+#define LIBMONGO_CLIENT_MONGO_WIRE_H 1
+
+#include <glib.h>
+
+#include <bson.h>
+
+G_BEGIN_DECLS
+
+/** @defgroup mongo_wire Mongo Wire Protocol
+ *
+ * The structures and functions within this module implement the
+ * MongoDB wire protocol: functions to assemble various commands into
+ * binary blobs that can be sent over the wire.
+ *
+ * @see mongo_client
+ *
+ * @addtogroup mongo_wire
+ * @{
+ */
+
+/** @defgroup mongo_wire_packet Packets
+ *
+ * @addtogroup mongo_wire_packet
+ * @{
+ */
+
+/** Mongo packet header.
+ *
+ * Every mongo packet has a header like this. Normally, one does not
+ * need to touch it, though.
+ */
+typedef struct
+{
+ gint32 length; /**< Full length of the packet, including the
+ header. */
+ gint32 id; /**< Sequence ID, used when MongoDB responds to a
+ command. */
+ gint32 resp_to; /**< ID the response is an answer to. Only sent by
+ the MongoDB server, never set on client-side. */
+ gint32 opcode; /**< The opcode of the command. @see
+ mongo_wire_opcode. <*/
+} mongo_packet_header;
+
+/** An opaque Mongo Packet on the wire.
+ *
+ * This structure contains the binary data that can be written
+ * straight to the wire.
+ */
+typedef struct _mongo_packet mongo_packet;
+
+/** Create an empty packet.
+ *
+ * Creates an empty packet to be filled in later with
+ * mongo_wire_packet_set_header() and mongo_packet_set_data().
+ *
+ * @returns A newly allocated packet, or NULL on error.
+ */
+mongo_packet *mongo_wire_packet_new (void);
+
+/** Get the header data of a packet.
+ *
+ * Retrieve the mongo packet's header data.
+ *
+ * @param p is the packet which header we seek.
+ * @param header is a pointer to a variable which will hold the data.
+ *
+ * @note Allocating the @a header is the responsibility of the caller.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_wire_packet_get_header (const mongo_packet *p,
+ mongo_packet_header *header);
+
+/** Set the header data of a packet.
+ *
+ * Override the mongo packet's header data.
+ *
+ * @note No sanity checks are done, use this function with great care.
+ *
+ * @param p is the packet whose header we want to override.
+ * @param header is the header structure to use.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_wire_packet_set_header (mongo_packet *p,
+ const mongo_packet_header *header);
+
+/** Get the data part of a packet.
+ *
+ * Retrieve the raw binary blob of the mongo packet's data.
+ *
+ * @param p is the packet which header we seek.
+ * @param data is a pointer to a variable which will hold the data.
+ *
+ * @note The @a data parameter will point to an internal structure,
+ * which shall not be freed or written to.
+ *
+ * @returns The size of the data, or -1 on error.
+ */
+gint32 mongo_wire_packet_get_data (const mongo_packet *p, const guint8 **data);
+
+/** Set the data part of a packet.
+ *
+ * Overrides the data part of a packet, adjusting the packet length in
+ * the header too.
+ *
+ * @note No sanity checks are performed on the data, it is the
+ * caller's responsibility to supply valid information.
+ *
+ * @param p is the packet whose data is to be set.
+ * @param data is the data to set.
+ * @param size is the size of the data.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_wire_packet_set_data (mongo_packet *p, const guint8 *data,
+ gint32 size);
+
+/** Free up a mongo packet.
+ *
+ * @param p is the packet to free.
+ *
+ * @note The packet shall not be used afterwards.
+ */
+void mongo_wire_packet_free (mongo_packet *p);
+
+/** @} */
+
+/** @defgroup mongo_wire_reply Reply handling
+ *
+ * @addtogroup mongo_wire_reply
+ * @{
+ */
+
+/** Flags the server can set in replies. */
+enum
+ {
+ /** Set when get_more is called but the cursor id is invalid. */
+ MONGO_REPLY_FLAG_NO_CURSOR = 0x1,
+ /** Set when the query failed. */
+ MONGO_REPLY_FLAG_QUERY_FAIL = 0x2,
+ /** Set when the server suppots the AwaitData query option.
+ * If not set, the client should sleep a little between get_more
+ * calls on a tailable cursor. On Mongo >= 1.6, this flag is
+ * always set.
+ */
+ MONGO_REPLY_FLAG_AWAITCAPABLE = 0x8
+ };
+
+/** Mongo reply packet header.
+ */
+#pragma pack(1)
+typedef struct
+{
+ gint32 flags; /**< Response flags. */
+ gint64 cursor_id; /**< Cursor ID, in case the client needs to do
+ get_more requests. */
+ gint32 start; /**< Starting position of the reply within the
+ cursor. */
+ gint32 returned; /**< Number of documents returned in the reply. */
+} mongo_reply_packet_header;
+#pragma pack()
+
+/** Get the header of a reply packet.
+ *
+ * @param p is the packet to retrieve the reply header from.
+ * @param hdr is a pointer to a variable where the reply header will
+ * be stored.
+ *
+ * @note It is the responsibility of the caller to allocate space for
+ * the header.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_wire_reply_packet_get_header (const mongo_packet *p,
+ mongo_reply_packet_header *hdr);
+
+/** Get the full data part of a reply packet.
+ *
+ * The result will include the full, unparsed data part of the reply.
+ *
+ * @param p is the packet to retrieve the data from.
+ * @param data is a pointer to a variable where the replys data can be
+ * stored.
+ *
+ * @note The @a data variable will point to an internal structure,
+ * which must not be freed or modified.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_wire_reply_packet_get_data (const mongo_packet *p,
+ const guint8 **data);
+
+/** Get the Nth document from a reply packet.
+ *
+ * @param p is the packet to retrieve a document from.
+ * @param n is the number of the document to retrieve.
+ * @param doc is a pointer to a variable to hold the BSON document.
+ *
+ * @note The @a doc variable will be a newly allocated object, it is
+ * the responsibility of the caller to free it once it is not needed
+ * anymore.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_wire_reply_packet_get_nth_document (const mongo_packet *p,
+ gint32 n,
+ bson **doc);
+
+/** @}*/
+
+/** @defgroup mongo_wire_cmd Commands
+ *
+ * Each command has an @a id parameter, which can be used to track
+ * replies to various commands. It is the responsibility of the caller
+ * to keep track of IDs.
+ *
+ * @addtogroup mongo_wire_cmd
+ * @{
+ */
+
+/** Flags available for the update command.
+ * @see mongo_wire_cmd_update().
+ */
+enum
+ {
+ /** When set, inserts if no matching document was found. */
+ MONGO_WIRE_FLAG_UPDATE_UPSERT = 0x1,
+ /** When set, all matching documents will be updated, not just
+ the first. */
+ MONGO_WIRE_FLAG_UPDATE_MULTI = 0x2
+ };
+
+/** Construct an update command.
+ *
+ * @param id is the sequence id.
+ * @param ns is the namespace, the database and collection name
+ * concatenated, and separated with a single dot.
+ * @param flags are the flags for the update command. Available flags
+ * are #MONGO_WIRE_FLAG_UPDATE_UPSERT and
+ * #MONGO_WIRE_FLAG_UPDATE_MULTI.
+ * @param selector is the BSON document that will act as the selector.
+ * @param update is the BSON document that contains the updated values.
+ *
+ * @returns A newly allocated packet, or NULL on error. It is the
+ * responsibility of the caller to free the packet once it is not used
+ * anymore.
+ */
+mongo_packet *mongo_wire_cmd_update (gint32 id, const gchar *ns,
+ gint32 flags, const bson *selector,
+ const bson *update);
+
+/** Construct an insert command.
+ *
+ * @param id is the sequence id.
+ * @param ns is the namespace, the database and collection name
+ * concatenated, and separated with a single dot.
+ * @tparam docs are the BSON documents to insert. One must close the
+ * list with a NULL value.
+ *
+ * @returns A newly allocated packet, or NULL on error. It is the
+ * responsibility of the caller to free the packet once it is not used
+ * anymore.
+ */
+mongo_packet *mongo_wire_cmd_insert (gint32 id, const gchar *ns, ...)
+ G_GNUC_NULL_TERMINATED;
+
+/** Construct an insert command with N documents.
+ *
+ * @param id is the sequence id.
+ * @param ns is the namespace, the database and collection name
+ * concatenated, and separated with a single dot.
+ * @param n is the number of documents to insert.
+ * @param docs is the array containing the bson documents to insert.
+ *
+ * @returns A newly allocated packet, or NULL on error. It is the
+ * responsibility of the caller to free the packet once it is not used
+ * anymore.
+ */
+mongo_packet *mongo_wire_cmd_insert_n (gint32 id, const gchar *ns, gint32 n,
+ const bson **docs);
+
+/** Flags available for the query command.
+ * @see mongo_wire_cmd_query().
+ */
+enum
+ {
+ /** Set the TailableCursor flag on the query. */
+ MONGO_WIRE_FLAG_QUERY_TAILABLE_CURSOR = 1 << 1,
+ /** Allow queries made against a replica slave. */
+ MONGO_WIRE_FLAG_QUERY_SLAVE_OK = 1 << 2,
+ /** Disable cursor timeout. */
+ MONGO_WIRE_FLAG_QUERY_NO_CURSOR_TIMEOUT = 1 << 4,
+ /** Block if at the end of the data block, awaiting data.
+ * Use only with #MONGO_WIRE_FLAG_QUERY_TAILABLE_CURSOR!
+ */
+ MONGO_WIRE_FLAG_QUERY_AWAIT_DATA = 1 << 5,
+ /** Stream the data down full blast in multiple packages.
+ * When set, the client is not allowed not to read all the data,
+ * unless it closes connection.
+ */
+ MONGO_WIRE_FLAG_QUERY_EXHAUST = 1 << 6,
+ /** Allow partial results in a sharded environment.
+ * In case one or more required shards are down, with this flag
+ * set, partial results will be returned instead of failing.
+ */
+ MONGO_WIRE_FLAG_QUERY_PARTIAL_RESULTS = 1 << 7
+ };
+
+/** Construct a query command.
+ *
+ * @param id is the sequence id.
+ * @param ns is the namespace, the database and collection name
+ * concatenated, and separated with a single dot.
+ * @param flags are the query options. Available flags are:
+ * #MONGO_WIRE_FLAG_QUERY_TAILABLE_CURSOR,
+ * #MONGO_WIRE_FLAG_QUERY_SLAVE_OK,
+ * #MONGO_WIRE_FLAG_QUERY_NO_CURSOR_TIMEOUT,
+ * #MONGO_WIRE_FLAG_QUERY_AWAIT_DATA, #MONGO_WIRE_FLAG_QUERY_EXHAUST.
+ * @param skip is the number of documents to skip.
+ * @param ret is the number of documents to return.
+ * @param query is the query BSON object.
+ * @param sel is the (optional) selector BSON object indicating the
+ * fields to return. Passing NULL will return all fields.
+ *
+ * @returns A newly allocated packet, or NULL on error. It is the
+ * responsibility of the caller to free the packet once it is not used
+ * anymore.
+ */
+mongo_packet *mongo_wire_cmd_query (gint32 id, const gchar *ns, gint32 flags,
+ gint32 skip, gint32 ret, const bson *query,
+ const bson *sel);
+
+/** Construct a get more command.
+ *
+ * @param id is the sequence id.
+ * @param ns is the namespace, the database and collection name
+ * concatenated, and separated with a single dot.
+ * @param ret is the number of documents to return.
+ * @param cursor_id is the ID of the cursor to use.
+ *
+ * @returns A newly allocated packet, or NULL on error. It is the
+ * responsibility of the caller to free the packet once it is not used
+ * anymore.
+ */
+mongo_packet *mongo_wire_cmd_get_more (gint32 id, const gchar *ns,
+ gint32 ret, gint64 cursor_id);
+
+/** Flags available for the delete command.
+ */
+enum
+ {
+ /** Only remove the first match. */
+ MONGO_WIRE_FLAG_DELETE_SINGLE = 0x1
+ };
+
+/** Construct a delete command.
+ *
+ * @param id is the sequence id.
+ * @param ns is the namespace, the database and collection name
+ * concatenated, and separated with a single dot.
+ * @param flags are the delete options. The only available flag is
+ * MONGO_WIRE_FLAG_DELETE_SINGLE.
+ * @param sel is the BSON object to use as a selector.
+ *
+ * @returns A newly allocated packet, or NULL on error. It is the
+ * responsibility of the caller to free the packet once it is not used
+ * anymore.
+ */
+mongo_packet *mongo_wire_cmd_delete (gint32 id, const gchar *ns,
+ gint32 flags, const bson *sel);
+
+/** Construct a kill cursors command.
+ *
+ * @param id is the sequence id.
+ * @param n is the number of cursors to delete.
+ * @tparam cursor_ids are the ids of the cursors to delete.
+ *
+ * @note One must supply exaclty @a n number of cursor IDs.
+ *
+ * @returns A newly allocated packet, or NULL on error. It is the
+ * responsibility of the caller to free the packet once it is not used
+ * anymore.
+ */
+mongo_packet *mongo_wire_cmd_kill_cursors (gint32 id, gint32 n, ...);
+
+/** Construct a custom command.
+ *
+ * Custom commands are queries run in the db.$cmd namespace. The
+ * commands themselves are queries, and as such, BSON objects.
+ *
+ * @param id is the sequence id.
+ * @param db is the database in which the command shall be run.
+ * @param flags are the query flags. See mongo_wire_cmd_query() for a
+ * list.
+ * @param command is the BSON object representing the command.
+ *
+ * @returns A newly allocated packet, or NULL on error. It is the
+ * responsibility of the caller to free the packet once it is not used
+ * anymore.
+ */
+mongo_packet *mongo_wire_cmd_custom (gint32 id, const gchar *db,
+ gint32 flags,
+ const bson *command);
+
+/** @} */
+
+/** @} */
+
+G_END_DECLS
+
+#endif
diff --git a/src/mongo.h b/src/mongo.h
new file mode 100644
index 0000000..49f0187
--- /dev/null
+++ b/src/mongo.h
@@ -0,0 +1,49 @@
+/* mongo.h - libmongo-client general header
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file src/mongo.h
+ * libmongo-client meta-header.
+ *
+ * This header includes all the rest, it is advised for applications
+ * to include this header, and this header only.
+ */
+
+#include <bson.h>
+#include <mongo-wire.h>
+#include <mongo-client.h>
+#include <mongo-utils.h>
+#include <mongo-sync.h>
+#include <mongo-sync-cursor.h>
+#include <mongo-sync-pool.h>
+#include <sync-gridfs.h>
+#include <sync-gridfs-chunk.h>
+#include <sync-gridfs-stream.h>
+
+/** @mainpage libmongo-client
+ *
+ * @section Introduction
+ *
+ * libmongo-client is an alternative MongoDB driver for the C
+ * language, with clarity, correctness and completeness in mind.
+ *
+ * Contents:
+ * @htmlonly
+ * <ul>
+ * <li><a href="modules.html"><b>API Documentation</b></a></li>
+ * <li><a href="tutorial.html"><b>Tutorial</b></a></li>
+ * </ul>
+ * @endhtmlonly
+ */
diff --git a/src/sync-gridfs-chunk.c b/src/sync-gridfs-chunk.c
new file mode 100644
index 0000000..9bcc62e
--- /dev/null
+++ b/src/sync-gridfs-chunk.c
@@ -0,0 +1,329 @@
+/* sync-gridfs-chunk.c - libmongo-client GridFS chunk access implementation
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file src/sync-gridfs-chunk.c
+ * MongoDB GridFS chunk access implementation.
+ */
+
+#include "sync-gridfs-chunk.h"
+#include "libmongo-private.h"
+
+#include <unistd.h>
+#include <errno.h>
+
+void
+mongo_sync_gridfs_chunked_file_free (mongo_sync_gridfs_chunked_file *gfile)
+{
+ if (!gfile)
+ {
+ errno = ENOTCONN;
+ return;
+ }
+ bson_free (gfile->meta.metadata);
+ g_free (gfile);
+
+ errno = 0;
+}
+
+mongo_sync_gridfs_chunked_file *
+mongo_sync_gridfs_chunked_find (mongo_sync_gridfs *gfs, const bson *query)
+{
+ mongo_sync_gridfs_chunked_file *f;
+ mongo_packet *p;
+ bson_cursor *c;
+
+ if (!gfs)
+ {
+ errno = ENOTCONN;
+ return NULL;
+ }
+ if (!query)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ p = mongo_sync_cmd_query (gfs->conn, gfs->ns.files, 0, 0, 1, query, NULL);
+ if (!p)
+ return NULL;
+
+ f = g_new0 (mongo_sync_gridfs_chunked_file, 1);
+ f->gfs = gfs;
+ f->meta.type = LMC_GRIDFS_FILE_CHUNKED;
+
+ mongo_wire_reply_packet_get_nth_document (p, 1, &f->meta.metadata);
+ bson_finish (f->meta.metadata);
+ mongo_wire_packet_free (p);
+
+ c = bson_find (f->meta.metadata, "_id");
+ if (!bson_cursor_get_oid (c, &f->meta.oid))
+ {
+ mongo_sync_gridfs_chunked_file_free (f);
+ bson_cursor_free (c);
+ errno = EPROTO;
+ return NULL;
+ }
+
+ bson_cursor_find (c, "length");
+ bson_cursor_get_int64 (c, &f->meta.length);
+
+ if (f->meta.length == 0)
+ {
+ gint32 i = 0;
+
+ bson_cursor_get_int32 (c, &i);
+ f->meta.length = i;
+ }
+
+ bson_cursor_find (c, "chunkSize");
+ bson_cursor_get_int32 (c, &f->meta.chunk_size);
+
+ if (f->meta.length == 0 || f->meta.chunk_size == 0)
+ {
+ bson_cursor_free (c);
+ mongo_sync_gridfs_chunked_file_free (f);
+ errno = EPROTO;
+ return NULL;
+ }
+
+ bson_cursor_find (c, "uploadDate");
+ if (!bson_cursor_get_utc_datetime (c, &f->meta.date))
+ {
+ mongo_sync_gridfs_chunked_file_free (f);
+ bson_cursor_free (c);
+ errno = EPROTO;
+ return NULL;
+ }
+
+ bson_cursor_find (c, "md5");
+ if (!bson_cursor_get_string (c, &f->meta.md5))
+ {
+ mongo_sync_gridfs_chunked_file_free (f);
+ bson_cursor_free (c);
+ errno = EPROTO;
+ return NULL;
+ }
+ bson_cursor_free (c);
+
+ return f;
+}
+
+mongo_sync_cursor *
+mongo_sync_gridfs_chunked_file_cursor_new (mongo_sync_gridfs_chunked_file *gfile,
+ gint start, gint num)
+{
+ bson *q;
+ mongo_sync_cursor *cursor;
+ mongo_packet *p;
+
+ if (!gfile)
+ {
+ errno = ENOTCONN;
+ return NULL;
+ }
+ if (start < 0 || num < 0)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ q = bson_build_full (BSON_TYPE_DOCUMENT, "$query", TRUE,
+ bson_build (BSON_TYPE_OID, "files_id", gfile->meta.oid, BSON_TYPE_NONE),
+ BSON_TYPE_DOCUMENT, "$orderby", TRUE,
+ bson_build (BSON_TYPE_INT32, "n", 1, BSON_TYPE_NONE),
+ BSON_TYPE_NONE);
+ bson_finish (q);
+
+ p = mongo_sync_cmd_query (gfile->gfs->conn, gfile->gfs->ns.chunks, 0,
+ start, num, q, NULL);
+ cursor = mongo_sync_cursor_new (gfile->gfs->conn,
+ gfile->gfs->ns.chunks, p);
+ bson_free (q);
+
+ return cursor;
+}
+
+guint8 *
+mongo_sync_gridfs_chunked_file_cursor_get_chunk (mongo_sync_cursor *cursor,
+ gint32 *size)
+{
+ bson *b;
+ bson_cursor *c;
+ const guint8 *d;
+ guint8 *data;
+ gint32 s;
+ bson_binary_subtype sub = BSON_BINARY_SUBTYPE_USER_DEFINED;
+ gboolean r;
+
+ if (!cursor)
+ {
+ errno = ENOTCONN;
+ return NULL;
+ }
+
+ b = mongo_sync_cursor_get_data (cursor);
+ c = bson_find (b, "data");
+ r = bson_cursor_get_binary (c, &sub, &d, &s);
+ if (!r || (sub != BSON_BINARY_SUBTYPE_GENERIC &&
+ sub != BSON_BINARY_SUBTYPE_BINARY))
+ {
+ bson_cursor_free (c);
+ errno = EPROTO;
+ return NULL;
+ }
+ bson_cursor_free (c);
+
+ if (sub == BSON_BINARY_SUBTYPE_BINARY)
+ {
+ s -= 4;
+ data = g_malloc (s);
+ memcpy (data, d + 4, s);
+ }
+ else
+ {
+ data = g_malloc (s);
+ memcpy (data, d, s);
+ }
+
+ if (size)
+ *size = s;
+
+ bson_free (b);
+ return data;
+}
+
+mongo_sync_gridfs_chunked_file *
+mongo_sync_gridfs_chunked_file_new_from_buffer (mongo_sync_gridfs *gfs,
+ const bson *metadata,
+ const guint8 *data,
+ gint64 size)
+{
+ mongo_sync_gridfs_chunked_file *gfile;
+ bson *meta;
+ bson_cursor *c;
+ guint8 *oid;
+ gint64 pos = 0, chunk_n = 0, upload_date;
+ GTimeVal tv;
+ GChecksum *chk;
+
+ if (!gfs)
+ {
+ errno = ENOTCONN;
+ return NULL;
+ }
+ if (!data || size <= 0)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ oid = mongo_util_oid_new
+ (mongo_connection_get_requestid ((mongo_connection *)gfs->conn));
+ if (!oid)
+ {
+ errno = EFAULT;
+ return NULL;
+ }
+
+ chk = g_checksum_new (G_CHECKSUM_MD5);
+
+ /* Insert chunks first */
+ while (pos < size)
+ {
+ bson *chunk;
+ gint32 csize = gfs->chunk_size;
+
+ if (size - pos < csize)
+ csize = size - pos;
+
+ chunk = bson_new_sized (gfs->chunk_size + 128);
+ bson_append_oid (chunk, "files_id", oid);
+ bson_append_int64 (chunk, "n", (gint64)chunk_n);
+ bson_append_binary (chunk, "data", BSON_BINARY_SUBTYPE_GENERIC,
+ data + pos, csize);
+ bson_finish (chunk);
+
+ g_checksum_update (chk, data + pos, csize);
+
+ if (!mongo_sync_cmd_insert (gfs->conn, gfs->ns.chunks, chunk, NULL))
+ {
+ int e = errno;
+
+ bson_free (chunk);
+ g_free (oid);
+ errno = e;
+ return NULL;
+ }
+ bson_free (chunk);
+
+ pos += csize;
+ chunk_n++;
+ }
+
+ /* Insert metadata */
+ if (metadata)
+ meta = bson_new_from_data (bson_data (metadata),
+ bson_size (metadata) - 1);
+ else
+ meta = bson_new_sized (128);
+
+ g_get_current_time (&tv);
+ upload_date = (((gint64) tv.tv_sec) * 1000) + (gint64)(tv.tv_usec / 1000);
+
+ bson_append_int64 (meta, "length", size);
+ bson_append_int32 (meta, "chunkSize", gfs->chunk_size);
+ bson_append_utc_datetime (meta, "uploadDate", upload_date);
+ bson_append_string (meta, "md5", g_checksum_get_string (chk), -1);
+ bson_append_oid (meta, "_id", oid);
+ bson_finish (meta);
+
+ g_checksum_free (chk);
+
+ if (!mongo_sync_cmd_insert (gfs->conn, gfs->ns.files, meta, NULL))
+ {
+ int e = errno;
+
+ bson_free (meta);
+ g_free (oid);
+ errno = e;
+ return NULL;
+ }
+
+ /* Return the resulting gfile.
+ * No need to check cursor errors here, as we constructed the BSON
+ * just above, and all the fields exist and have the appropriate
+ * types.
+ */
+ gfile = g_new0 (mongo_sync_gridfs_chunked_file, 1);
+ gfile->gfs = gfs;
+
+ gfile->meta.metadata = meta;
+ gfile->meta.length = size;
+ gfile->meta.chunk_size = gfs->chunk_size;
+ gfile->meta.date = 0;
+ gfile->meta.type = LMC_GRIDFS_FILE_CHUNKED;
+
+ c = bson_find (meta, "_id");
+ bson_cursor_get_oid (c, &gfile->meta.oid);
+
+ bson_cursor_find (c, "md5");
+ bson_cursor_get_string (c, &gfile->meta.md5);
+ bson_cursor_free (c);
+
+ g_free (oid);
+
+ return gfile;
+}
diff --git a/src/sync-gridfs-chunk.h b/src/sync-gridfs-chunk.h
new file mode 100644
index 0000000..e567328
--- /dev/null
+++ b/src/sync-gridfs-chunk.h
@@ -0,0 +1,134 @@
+/* sync-gridfs-chunk.h - libmong-client GridFS chunk API
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file src/sync-gridfs-chunk.h
+ * MongoDB GridFS Chunk API.
+ *
+ * @addtogroup mongo_sync_gridfs_api
+ * @{
+ */
+
+#ifndef LIBMONGO_SYNC_GRIDFS_CHUNK_H
+#define LIBMONGO_SYNC_GRIDFS_CHUNK_H 1
+
+#include <sync-gridfs.h>
+#include <glib.h>
+
+G_BEGIN_DECLS
+
+/** @defgroup mongo_sync_gridfs_chunk_api Mongo GridFS Chunk API
+ *
+ * This submodule provides chunk-based access to GridFS
+ * files. Chunk-based access has the advantage of being reasonably
+ * lightweight and fast, and the disadvantage of making it harder to
+ * do arbitrary reads or multi-part writes.
+ *
+ * It's best used when the whole file needs to be retrieved, or when
+ * uploading files that either fit in a buffer, or can be mmapped.
+ *
+ * @addtogroup mongo_sync_gridfs_chunk_api
+ * @{
+ */
+
+/** Opaque GridFS chunked file object. */
+typedef struct _mongo_sync_gridfs_chunked_file mongo_sync_gridfs_chunked_file;
+
+/** Find a file on GridFS.
+ *
+ * Finds a file on GridFS, based on a custom query.
+ *
+ * @param gfs is the GridFS to find the file in.
+ * @param query is the custom query based on which the file shall be
+ * sought.
+ *
+ * @returns A newly allocated chunked file object, or NULL on
+ * error. It is the responsibility of the caller to free the returned
+ * object once it is no longer needed.
+ */
+mongo_sync_gridfs_chunked_file *mongo_sync_gridfs_chunked_find (mongo_sync_gridfs *gfs,
+ const bson *query);
+
+/** Upload a file to GridFS from a buffer.
+ *
+ * Create a new file on GridFS from a buffer, using custom meta-data.
+ *
+ * @param gfs is the GridFS to create the file on.
+ * @param metadata is the (optional) file metadata.
+ * @param data is the data to store on GridFS.
+ * @param size is the size of the data.
+ *
+ * @returns A newly allocated file object, or NULL on error. It is the
+ * responsibility of the caller to free the returned object once it is
+ * no longer needed.
+ *
+ * @note The metadata MUST NOT contain any of the required GridFS
+ * metadata fields (_id, length, chunkSize, uploadDate, md5),
+ * otherwise a conflict will occurr, against which the function does
+ * not guard by design.
+ */
+mongo_sync_gridfs_chunked_file *mongo_sync_gridfs_chunked_file_new_from_buffer (mongo_sync_gridfs *gfs,
+ const bson *metadata,
+ const guint8 *data,
+ gint64 size);
+/** Free a GridFS chunked file object.
+ *
+ * @param gfile is the file object to free.
+ */
+void mongo_sync_gridfs_chunked_file_free (mongo_sync_gridfs_chunked_file *gfile);
+
+/* Data access */
+
+/** Create a cursor for a GridFS chunked file.
+ *
+ * The cursor can be used (via
+ * mongo_sync_gridfs_file_cursor_get_chunk()) to retrieve a GridFS
+ * file chunk by chunk.
+ *
+ * @param gfile is the GridFS chunked file to work with.
+ * @param start is the starting chunk.
+ * @param num is the total number of chunks to make a cursor for.
+ *
+ * @returns A newly allocated cursor object, or NULL on error. It is
+ * the responsibility of the caller to free the cursor once it is no
+ * longer needed.
+ */
+mongo_sync_cursor *mongo_sync_gridfs_chunked_file_cursor_new (mongo_sync_gridfs_chunked_file *gfile,
+ gint start, gint num);
+
+/** Get the data of a GridFS file chunk, via a cursor.
+ *
+ * Once we have a cursor, it can be iterated over with
+ * mongo_sync_cursor_next(), and its data can be conveniently accessed
+ * with this function.
+ *
+ * @param cursor is the cursor object to work with.
+ * @param size is a pointer to a variable where the chunk's actual
+ * size can be stored.
+ *
+ * @returns A pointer to newly allocated memory that holds the current
+ * chunk's data, or NULL on error. It is the responsibility of the
+ * caller to free this once it is no longer needed.
+ */
+guint8 *mongo_sync_gridfs_chunked_file_cursor_get_chunk (mongo_sync_cursor *cursor,
+ gint32 *size);
+
+/** @} */
+
+G_END_DECLS
+
+/** @} */
+
+#endif
diff --git a/src/sync-gridfs-stream.c b/src/sync-gridfs-stream.c
new file mode 100644
index 0000000..c9b11ed
--- /dev/null
+++ b/src/sync-gridfs-stream.c
@@ -0,0 +1,507 @@
+/* sync-gridfs-stream.c - libmongo-client GridFS streaming implementation
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file src/sync-gridfs-stream.c
+ * MongoDB GridFS Streaming API implementation.
+ */
+
+#include "sync-gridfs-stream.h"
+#include "libmongo-private.h"
+
+#include <unistd.h>
+#include <errno.h>
+
+mongo_sync_gridfs_stream *
+mongo_sync_gridfs_stream_find (mongo_sync_gridfs *gfs,
+ const bson *query)
+{
+ mongo_sync_gridfs_stream *stream;
+ bson *meta = NULL;
+ bson_cursor *c;
+ mongo_packet *p;
+ const guint8 *oid;
+
+ if (!gfs)
+ {
+ errno = ENOTCONN;
+ return NULL;
+ }
+ if (!query)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ p = mongo_sync_cmd_query (gfs->conn, gfs->ns.files, 0, 0, 1, query, NULL);
+ if (!p)
+ return NULL;
+
+ stream = g_new0 (mongo_sync_gridfs_stream, 1);
+ stream->gfs = gfs;
+ stream->file.type = LMC_GRIDFS_FILE_STREAM_READER;
+
+ mongo_wire_reply_packet_get_nth_document (p, 1, &meta);
+ bson_finish (meta);
+ mongo_wire_packet_free (p);
+
+ c = bson_find (meta, "_id");
+ if (!bson_cursor_get_oid (c, &oid))
+ {
+ bson_cursor_free (c);
+ bson_free (meta);
+ g_free (stream);
+
+ errno = EPROTO;
+ return NULL;
+ }
+ stream->file.id = g_malloc (12);
+ memcpy (stream->file.id, oid, 12);
+
+ bson_cursor_find (c, "length");
+ bson_cursor_get_int64 (c, &stream->file.length);
+ if (stream->file.length == 0)
+ {
+ gint32 i = 0;
+
+ bson_cursor_get_int32 (c, &i);
+ stream->file.length = i;
+ }
+
+ bson_cursor_find (c, "chunkSize");
+ bson_cursor_get_int32 (c, &stream->file.chunk_size);
+
+ bson_cursor_free (c);
+ bson_free (meta);
+
+ if (stream->file.length == 0 ||
+ stream->file.chunk_size == 0)
+ {
+ g_free (stream->file.id);
+ g_free (stream);
+
+ errno = EPROTO;
+ return NULL;
+ }
+
+ return stream;
+}
+
+mongo_sync_gridfs_stream *
+mongo_sync_gridfs_stream_new (mongo_sync_gridfs *gfs,
+ const bson *metadata)
+{
+ mongo_sync_gridfs_stream *stream;
+ bson_cursor *c;
+
+ if (!gfs)
+ {
+ errno = ENOTCONN;
+ return NULL;
+ }
+
+ stream = g_new0 (mongo_sync_gridfs_stream, 1);
+ stream->file.type = LMC_GRIDFS_FILE_STREAM_WRITER;
+ stream->gfs = gfs;
+
+ stream->file.chunk_size = gfs->chunk_size;
+
+ stream->writer.metadata = bson_new_from_data (bson_data (metadata),
+ bson_size (metadata) - 1);
+
+ c = bson_find (metadata, "_id");
+ if (!c)
+ {
+ stream->file.id = mongo_util_oid_new
+ (mongo_connection_get_requestid ((mongo_connection *)gfs->conn));
+ if (!stream->file.id)
+ {
+ bson_free (stream->writer.metadata);
+ g_free (stream);
+
+ errno = EFAULT;
+ return NULL;
+ }
+ bson_append_oid (stream->writer.metadata, "_id", stream->file.id);
+ }
+ else
+ {
+ const guint8 *oid;
+
+ if (!bson_cursor_get_oid (c, &oid))
+ {
+ bson_cursor_free (c);
+ bson_free (stream->writer.metadata);
+ g_free (stream);
+
+ errno = EPROTO;
+ return NULL;
+ }
+
+ stream->file.id = g_malloc (12);
+ memcpy (stream->file.id, oid, 12);
+ }
+ bson_cursor_free (c);
+ bson_finish (stream->writer.metadata);
+
+ stream->writer.buffer = g_malloc (stream->file.chunk_size);
+ stream->writer.checksum = g_checksum_new (G_CHECKSUM_MD5);
+
+ return stream;
+}
+
+static inline gboolean
+_stream_seek_chunk (mongo_sync_gridfs_stream *stream,
+ gint64 chunk)
+{
+ bson *b;
+ mongo_packet *p;
+ bson_cursor *c;
+ bson_binary_subtype subt = BSON_BINARY_SUBTYPE_USER_DEFINED;
+ gboolean r;
+
+ b = bson_new_sized (32);
+ bson_append_oid (b, "files_id", stream->file.id);
+ bson_append_int64 (b, "n", chunk);
+ bson_finish (b);
+
+ p = mongo_sync_cmd_query (stream->gfs->conn,
+ stream->gfs->ns.chunks, 0,
+ 0, 1, b, NULL);
+ bson_free (b);
+
+ bson_free (stream->reader.bson);
+ stream->reader.bson = NULL;
+ stream->reader.chunk.data = NULL;
+
+ mongo_wire_reply_packet_get_nth_document (p, 1, &stream->reader.bson);
+ mongo_wire_packet_free (p);
+ bson_finish (stream->reader.bson);
+
+ c = bson_find (stream->reader.bson, "data");
+ r = bson_cursor_get_binary (c, &subt, &stream->reader.chunk.data,
+ &stream->reader.chunk.size);
+ if (!r || (subt != BSON_BINARY_SUBTYPE_GENERIC &&
+ subt != BSON_BINARY_SUBTYPE_BINARY))
+ {
+ bson_cursor_free (c);
+ bson_free (stream->reader.bson);
+ stream->reader.bson = NULL;
+ stream->reader.chunk.data = NULL;
+
+ errno = EPROTO;
+ return FALSE;
+ }
+ bson_cursor_free (c);
+
+ if (subt == BSON_BINARY_SUBTYPE_BINARY)
+ {
+ stream->reader.chunk.start_offset = 4;
+ stream->reader.chunk.size -= 4;
+ }
+ stream->reader.chunk.offset = 0;
+
+ return TRUE;
+}
+
+gint64
+mongo_sync_gridfs_stream_read (mongo_sync_gridfs_stream *stream,
+ guint8 *buffer,
+ gint64 size)
+{
+ gint64 pos = 0;
+
+ if (!stream)
+ {
+ errno = ENOENT;
+ return -1;
+ }
+ if (stream->file.type != LMC_GRIDFS_FILE_STREAM_READER)
+ {
+ errno = EOPNOTSUPP;
+ return -1;
+ }
+ if (!buffer || size <= 0)
+ {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (!stream->reader.chunk.data)
+ {
+ if (!_stream_seek_chunk (stream, 0))
+ return -1;
+ }
+
+ while (pos < size && stream->file.offset +
+ stream->reader.chunk.start_offset < stream->file.length)
+ {
+ gint32 csize = stream->reader.chunk.size - stream->reader.chunk.offset;
+
+ if (size - pos < csize)
+ csize = size - pos;
+
+ memcpy (buffer + pos,
+ stream->reader.chunk.data +
+ stream->reader.chunk.start_offset +
+ stream->reader.chunk.offset, csize);
+
+ stream->reader.chunk.offset += csize;
+ stream->file.offset += csize;
+ pos += csize;
+
+ if (stream->reader.chunk.offset + stream->reader.chunk.start_offset >=
+ stream->reader.chunk.size &&
+ stream->file.offset + stream->reader.chunk.start_offset <
+ stream->file.length)
+ {
+ stream->file.current_chunk++;
+ if (!_stream_seek_chunk (stream, stream->file.current_chunk))
+ return -1;
+ }
+ }
+
+ return pos;
+}
+
+static gboolean
+_stream_chunk_write (mongo_sync_gridfs *gfs,
+ const guint8 *oid, gint64 n,
+ const guint8 *buffer, gint32 size)
+{
+ bson *chunk;
+
+ chunk = bson_new_sized (size + 128);
+ bson_append_oid (chunk, "files_id", oid);
+ bson_append_int64 (chunk, "n", n);
+ bson_append_binary (chunk, "data", BSON_BINARY_SUBTYPE_GENERIC,
+ buffer, size);
+ bson_finish (chunk);
+
+ if (!mongo_sync_cmd_insert (gfs->conn, gfs->ns.chunks, chunk, NULL))
+ {
+ int e = errno;
+
+ bson_free (chunk);
+ errno = e;
+ return FALSE;
+ }
+ bson_free (chunk);
+
+ return TRUE;
+}
+
+gboolean
+mongo_sync_gridfs_stream_write (mongo_sync_gridfs_stream *stream,
+ const guint8 *buffer,
+ gint64 size)
+{
+ gint64 pos = 0;
+
+ if (!stream)
+ {
+ errno = ENOENT;
+ return FALSE;
+ }
+ if (stream->file.type != LMC_GRIDFS_FILE_STREAM_WRITER)
+ {
+ errno = EOPNOTSUPP;
+ return FALSE;
+ }
+ if (!buffer || size <= 0)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ while (pos < size)
+ {
+ gint32 csize = stream->file.chunk_size - stream->writer.buffer_offset;
+
+ if (size - pos < csize)
+ csize = size - pos;
+
+ memcpy (stream->writer.buffer + stream->writer.buffer_offset,
+ buffer + pos, csize);
+ stream->writer.buffer_offset += csize;
+ stream->file.offset += csize;
+ stream->file.length += csize;
+ pos += csize;
+
+ if (stream->writer.buffer_offset == stream->file.chunk_size)
+ {
+ if (!_stream_chunk_write (stream->gfs,
+ stream->file.id,
+ stream->file.current_chunk,
+ stream->writer.buffer,
+ stream->file.chunk_size))
+ return FALSE;
+ g_checksum_update (stream->writer.checksum, stream->writer.buffer,
+ stream->file.chunk_size);
+
+ stream->writer.buffer_offset = 0;
+ stream->file.current_chunk++;
+ }
+ }
+
+ return TRUE;
+}
+
+gboolean
+mongo_sync_gridfs_stream_seek (mongo_sync_gridfs_stream *stream,
+ gint64 pos,
+ gint whence)
+{
+ gint64 real_pos = 0;
+ gint64 chunk;
+ gint32 offs;
+
+ if (!stream)
+ {
+ errno = ENOENT;
+ return FALSE;
+ }
+ if (stream->file.type != LMC_GRIDFS_FILE_STREAM_READER)
+ {
+ errno = EOPNOTSUPP;
+ return FALSE;
+ }
+
+ switch (whence)
+ {
+ case SEEK_SET:
+ if (pos == stream->file.offset)
+ return TRUE;
+ if (pos < 0 || pos > stream->file.length)
+ {
+ errno = ERANGE;
+ return FALSE;
+ }
+ real_pos = pos;
+ break;
+ case SEEK_CUR:
+ if (pos + stream->file.offset < 0 ||
+ pos + stream->file.offset > stream->file.length)
+ {
+ errno = ERANGE;
+ return FALSE;
+ }
+ if (pos == 0)
+ return TRUE;
+ real_pos = pos + stream->file.offset;
+ break;
+ case SEEK_END:
+ if (pos > 0 || pos + stream->file.length < 0)
+ {
+ errno = ERANGE;
+ return FALSE;
+ }
+ real_pos = pos + stream->file.length;
+ break;
+ default:
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ chunk = real_pos / stream->file.chunk_size;
+ offs = real_pos % stream->file.chunk_size;
+
+ if (!_stream_seek_chunk (stream, chunk))
+ return FALSE;
+
+ stream->reader.chunk.offset = offs;
+ stream->file.current_chunk = chunk;
+ stream->file.offset = real_pos;
+
+ return TRUE;
+}
+
+gboolean
+mongo_sync_gridfs_stream_close (mongo_sync_gridfs_stream *stream)
+{
+ if (!stream)
+ {
+ errno = ENOENT;
+ return FALSE;
+ }
+
+ if (stream->file.type != LMC_GRIDFS_FILE_STREAM_READER &&
+ stream->file.type != LMC_GRIDFS_FILE_STREAM_WRITER)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ if (stream->file.type == LMC_GRIDFS_FILE_STREAM_WRITER)
+ {
+ bson *meta;
+ gint64 upload_date;
+ GTimeVal tv;
+ gboolean closed = FALSE;
+
+ if (stream->writer.buffer_offset > 0)
+ {
+ closed = _stream_chunk_write (stream->gfs,
+ stream->file.id,
+ stream->file.current_chunk,
+ stream->writer.buffer,
+ stream->writer.buffer_offset);
+
+ if (closed)
+ g_checksum_update (stream->writer.checksum,
+ stream->writer.buffer,
+ stream->writer.buffer_offset);
+ }
+
+ if (closed)
+ {
+ g_get_current_time (&tv);
+ upload_date = (((gint64) tv.tv_sec) * 1000) +
+ (gint64)(tv.tv_usec / 1000);
+
+ /* _id is guaranteed by _stream_new() */
+ meta = bson_new_from_data (bson_data (stream->writer.metadata),
+ bson_size (stream->writer.metadata) - 1);
+ bson_append_int64 (meta, "length", stream->file.length);
+ bson_append_int32 (meta, "chunkSize", stream->file.chunk_size);
+ bson_append_utc_datetime (meta, "uploadDate", upload_date);
+ if (stream->file.length)
+ bson_append_string (meta, "md5",
+ g_checksum_get_string (stream->writer.checksum), -1);
+ bson_finish (meta);
+
+ if (!mongo_sync_cmd_insert (stream->gfs->conn,
+ stream->gfs->ns.files, meta, NULL))
+ {
+ int e = errno;
+
+ bson_free (meta);
+ errno = e;
+ return FALSE;
+ }
+ bson_free (meta);
+ }
+
+ bson_free (stream->writer.metadata);
+ g_checksum_free (stream->writer.checksum);
+ g_free (stream->writer.buffer);
+ }
+ else
+ bson_free (stream->reader.bson);
+
+ g_free (stream->file.id);
+ g_free (stream);
+ return TRUE;
+}
diff --git a/src/sync-gridfs-stream.h b/src/sync-gridfs-stream.h
new file mode 100644
index 0000000..017f2ea
--- /dev/null
+++ b/src/sync-gridfs-stream.h
@@ -0,0 +1,141 @@
+/* sync-gridfs-stream.h - libmong-client GridFS streaming API
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file src/sync-gridfs-stream.h
+ * MongoDB GridFS Streaming API.
+ *
+ * @addtogroup mongo_sync_gridfs_api
+ * @{
+ */
+
+#ifndef LIBMONGO_SYNC_GRIDFS_STREAM_H
+#define LIBMONGO_SYNC_GRIDFS_STREAM_H 1
+
+#include <sync-gridfs.h>
+#include <glib.h>
+
+G_BEGIN_DECLS
+
+/** @defgroup mongo_sync_gridfs_stream_api Mongo GridFS Streaming API
+ *
+ * Ths submodule provides stream-based access to GridFS files. Stream
+ * based access has the advantage of allowing arbitrary reads and
+ * multi-part writes, at the cost of slightly higher memory usage and
+ * lower performance speed.
+ *
+ * It's best used when one needs only part of a file (and not
+ * neccessarily a full chunk, or the parts cross chunk boundaries), or
+ * when uploading a file from a source that cannot be fully stored in
+ * a memory buffer, and cannot be mmapped. Such as a network
+ * connection.
+ *
+ * @addtogroup mongo_sync_gridfs_stream_api
+ * @{
+ */
+
+/** Opaque GridFS file stream object type. */
+typedef struct _mongo_sync_gridfs_stream mongo_sync_gridfs_stream;
+
+/** Create a stream reader by finding the file matching a query.
+ *
+ * @param gfs is the GridFS to search on.
+ * @param query is the query based on which the file should be
+ * searched.
+ *
+ * @returns A newly allocated read-only stream object, or NULL on
+ * error.
+ *
+ * @note It is the responsiblity of the caller to free the stream once
+ * it is no longer needed.
+ */
+mongo_sync_gridfs_stream *mongo_sync_gridfs_stream_find (mongo_sync_gridfs *gfs,
+ const bson *query);
+
+/** Create a new GridFS stream writer.
+ *
+ * @param gfs is the GridFS to create a file on.
+ * @param metadata is the optional extra file metadata to use.
+ *
+ * @returns A newly allocated write-only stream object, or NULL on
+ * error.
+ *
+ * @note It is the responsiblity of the caller to free the stream once
+ * it is no longer needed.
+ */
+mongo_sync_gridfs_stream *mongo_sync_gridfs_stream_new (mongo_sync_gridfs *gfs,
+ const bson *metadata);
+
+/** Read an arbitrary number of bytes from a GridFS stream.
+ *
+ * @param stream is the read-only stream to read from.
+ * @param buffer is the buffer to store the read data in.
+ * @param size is the maximum number of bytes to read.
+ *
+ * @returns The number of bytes read, or -1 on error.
+ *
+ * @note The @a buffer parameter must have enough space allocated to
+ * hold at most @a size bytes.
+ */
+gint64 mongo_sync_gridfs_stream_read (mongo_sync_gridfs_stream *stream,
+ guint8 *buffer,
+ gint64 size);
+
+/** Write an arbitrary number of bytes to a GridFS stream.
+ *
+ * @param stream is the write-only stream to write to.
+ * @param buffer is the data to write.
+ * @param size is the amount of data to write.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_sync_gridfs_stream_write (mongo_sync_gridfs_stream *stream,
+ const guint8 *buffer,
+ gint64 size);
+
+/** Seek to an arbitrary position in a GridFS stream.
+ *
+ * @param stream is the read-only stream to seek in.
+ * @param pos is the position to seek to.
+ * @param whence is used to determine how to seek. Possible values are
+ * @b SEEK_SET which means seek to the given position, @b SEEK_CUR
+ * meaning seek to the current position plus @a pos and @b SEEK_END
+ * which will seek from the end of the file.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_sync_gridfs_stream_seek (mongo_sync_gridfs_stream *stream,
+ gint64 pos,
+ gint whence);
+
+/** Close a GridFS stream.
+ *
+ * Closes the GridFS stream, by writing out the buffered data, and the
+ * metadata if it's a write stream, and freeing up all resources in
+ * all cases.
+ *
+ * @param stream is the GridFS stream to close and free.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_sync_gridfs_stream_close (mongo_sync_gridfs_stream *stream);
+
+/** @} */
+
+G_END_DECLS
+
+/** @} */
+
+#endif
diff --git a/src/sync-gridfs.c b/src/sync-gridfs.c
new file mode 100644
index 0000000..7d1af24
--- /dev/null
+++ b/src/sync-gridfs.c
@@ -0,0 +1,345 @@
+/* sync-gridfs.c - libmongo-client GridFS implementation
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file src/sync-gridfs.c
+ * MongoDB GridFS implementation.
+ */
+
+#include "sync-gridfs.h"
+#include "libmongo-private.h"
+
+#include <errno.h>
+
+mongo_sync_gridfs *
+mongo_sync_gridfs_new (mongo_sync_connection *conn,
+ const gchar *ns_prefix)
+{
+ mongo_sync_gridfs *gfs;
+ bson *index;
+ gchar *db;
+
+ if (!conn)
+ {
+ errno = ENOTCONN;
+ return NULL;
+ }
+ if (!ns_prefix)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+ db = strchr (ns_prefix, '.');
+ if (!db)
+ {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ gfs = g_new (mongo_sync_gridfs, 1);
+ gfs->conn = conn;
+
+ gfs->ns.prefix = g_strdup (ns_prefix);
+ gfs->ns.files = g_strconcat (gfs->ns.prefix, ".files", NULL);
+ gfs->ns.chunks = g_strconcat (gfs->ns.prefix, ".chunks", NULL);
+ gfs->ns.db = g_strndup (ns_prefix, db - ns_prefix);
+
+ gfs->chunk_size = 256 * 1024;
+
+ index = bson_new_sized (256);
+ bson_append_int32 (index, "files_id", 1);
+ bson_append_int32 (index, "n", 1);
+ bson_finish (index);
+
+ if (!mongo_sync_cmd_index_create (conn, gfs->ns.chunks, index,
+ MONGO_INDEX_UNIQUE))
+ {
+ bson_free (index);
+ mongo_sync_gridfs_free (gfs, FALSE);
+
+ errno = EPROTO;
+ return NULL;
+ }
+ bson_free (index);
+
+ return gfs;
+}
+
+void
+mongo_sync_gridfs_free (mongo_sync_gridfs *gfs, gboolean disconnect)
+{
+ if (!gfs)
+ {
+ errno = ENOTCONN;
+ return;
+ }
+
+ g_free (gfs->ns.prefix);
+ g_free (gfs->ns.files);
+ g_free (gfs->ns.chunks);
+ g_free (gfs->ns.db);
+
+ if (disconnect)
+ mongo_sync_disconnect (gfs->conn);
+
+ g_free (gfs);
+ errno = 0;
+}
+
+gint32
+mongo_sync_gridfs_get_chunk_size (mongo_sync_gridfs *gfs)
+{
+ if (!gfs)
+ {
+ errno = ENOTCONN;
+ return -1;
+ }
+ return gfs->chunk_size;
+}
+
+gboolean
+mongo_sync_gridfs_set_chunk_size (mongo_sync_gridfs *gfs,
+ gint32 chunk_size)
+{
+ if (!gfs)
+ {
+ errno = ENOTCONN;
+ return FALSE;
+ }
+ if (chunk_size < 1)
+ {
+ errno = EINVAL;
+ return FALSE;
+ }
+
+ gfs->chunk_size = chunk_size;
+ return TRUE;
+}
+
+mongo_sync_cursor *
+mongo_sync_gridfs_list (mongo_sync_gridfs *gfs,
+ const bson *query)
+{
+ mongo_sync_cursor *cursor;
+ bson *q = NULL;
+
+ if (!gfs)
+ {
+ errno = ENOTCONN;
+ return NULL;
+ }
+
+ if (!query)
+ {
+ q = bson_new ();
+ bson_finish (q);
+ }
+
+ cursor = mongo_sync_cursor_new
+ (gfs->conn, gfs->ns.files,
+ mongo_sync_cmd_query (gfs->conn, gfs->ns.files, 0, 0, 0,
+ (q) ? q : query, NULL));
+ if (!cursor)
+ {
+ int e = errno;
+
+ bson_free (q);
+ errno = e;
+ return NULL;
+ }
+ bson_free (q);
+ return cursor;
+}
+
+const guint8 *
+mongo_sync_gridfs_file_get_id (gpointer gfile)
+{
+ mongo_sync_gridfs_chunked_file *c = (mongo_sync_gridfs_chunked_file *)gfile;
+ mongo_sync_gridfs_stream *s = (mongo_sync_gridfs_stream *)gfile;
+
+ if (!gfile)
+ {
+ errno = ENOTCONN;
+ return NULL;
+ }
+ if (c->meta.type == LMC_GRIDFS_FILE_CHUNKED)
+ return c->meta.oid;
+ else
+ return s->file.id;
+}
+
+gint64
+mongo_sync_gridfs_file_get_length (gpointer gfile)
+{
+ mongo_sync_gridfs_file_common *f = (mongo_sync_gridfs_file_common *)gfile;
+
+ if (!gfile)
+ {
+ errno = ENOTCONN;
+ return -1;
+ }
+ return f->length;
+}
+
+gint32
+mongo_sync_gridfs_file_get_chunk_size (gpointer gfile)
+{
+ mongo_sync_gridfs_file_common *f = (mongo_sync_gridfs_file_common *)gfile;
+
+ if (!gfile)
+ {
+ errno = ENOTCONN;
+ return -1;
+ }
+ return f->chunk_size;
+}
+
+const gchar *
+mongo_sync_gridfs_file_get_md5 (gpointer gfile)
+{
+ mongo_sync_gridfs_chunked_file *f = (mongo_sync_gridfs_chunked_file *)gfile;
+
+ if (!gfile)
+ {
+ errno = ENOTCONN;
+ return NULL;
+ }
+ if (f->meta.type != LMC_GRIDFS_FILE_CHUNKED)
+ {
+ errno = EOPNOTSUPP;
+ return NULL;
+ }
+
+ return f->meta.md5;
+}
+
+gint64
+mongo_sync_gridfs_file_get_date (gpointer gfile)
+{
+ mongo_sync_gridfs_chunked_file *f = (mongo_sync_gridfs_chunked_file *)gfile;
+
+ if (!gfile)
+ {
+ errno = ENOTCONN;
+ return -1;
+ }
+ if (f->meta.type != LMC_GRIDFS_FILE_CHUNKED)
+ {
+ errno = EOPNOTSUPP;
+ return -1;
+ }
+
+ return f->meta.date;
+}
+
+const bson *
+mongo_sync_gridfs_file_get_metadata (gpointer gfile)
+{
+ mongo_sync_gridfs_chunked_file *f = (mongo_sync_gridfs_chunked_file *)gfile;
+
+ if (!gfile)
+ {
+ errno = ENOTCONN;
+ return NULL;
+ }
+ if (f->meta.type != LMC_GRIDFS_FILE_CHUNKED)
+ {
+ errno = EOPNOTSUPP;
+ return NULL;
+ }
+
+ return f->meta.metadata;
+}
+
+gint64
+mongo_sync_gridfs_file_get_chunks (gpointer gfile)
+{
+ mongo_sync_gridfs_file_common *f = (mongo_sync_gridfs_file_common *)gfile;
+ double chunk_count;
+
+ if (!gfile)
+ {
+ errno = ENOTCONN;
+ return -1;
+ }
+
+ chunk_count = (double)f->length / (double)f->chunk_size;
+ return (chunk_count - (gint64)chunk_count > 0) ?
+ (gint64)(chunk_count + 1) : (gint64)(chunk_count);
+}
+
+gboolean
+mongo_sync_gridfs_remove (mongo_sync_gridfs *gfs,
+ const bson *query)
+{
+ mongo_sync_cursor *fc;
+
+ fc = mongo_sync_gridfs_list (gfs, query);
+ if (!fc)
+ {
+ if (errno != ENOTCONN)
+ errno = ENOENT;
+ return FALSE;
+ }
+
+ while (mongo_sync_cursor_next (fc))
+ {
+ bson *meta = mongo_sync_cursor_get_data (fc), *q;
+ bson_cursor *c;
+ const guint8 *ooid;
+ guint8 oid[12];
+
+ c = bson_find (meta, "_id");
+ if (!bson_cursor_get_oid (c, &ooid))
+ {
+ bson_free (meta);
+ bson_cursor_free (c);
+ mongo_sync_cursor_free (fc);
+
+ errno = EPROTO;
+ return FALSE;
+ }
+ bson_cursor_free (c);
+ memcpy (oid, ooid, 12);
+ bson_free (meta);
+
+ /* Delete metadata */
+ q = bson_build (BSON_TYPE_OID, "_id", oid,
+ BSON_TYPE_NONE);
+ bson_finish (q);
+
+ if (!mongo_sync_cmd_delete (gfs->conn, gfs->ns.files, 0, q))
+ {
+ bson_free (q);
+ mongo_sync_cursor_free (fc);
+ return FALSE;
+ }
+ bson_free (q);
+
+ /* Delete chunks */
+ q = bson_build (BSON_TYPE_OID, "files_id", oid,
+ BSON_TYPE_NONE);
+ bson_finish (q);
+
+ /* Chunks may or may not exist, an error in this case is
+ non-fatal. */
+ mongo_sync_cmd_delete (gfs->conn, gfs->ns.chunks, 0, q);
+ bson_free (q);
+ }
+
+ mongo_sync_cursor_free (fc);
+
+ return TRUE;
+}
diff --git a/src/sync-gridfs.h b/src/sync-gridfs.h
new file mode 100644
index 0000000..5d9ae1c
--- /dev/null
+++ b/src/sync-gridfs.h
@@ -0,0 +1,193 @@
+/* sync-gridfs.h - libmong-client GridFS API
+ * Copyright 2011, 2012 Gergely Nagy <algernon@balabit.hu>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file src/sync-gridfs.h
+ * MongoDB GridFS API.
+ *
+ * @addtogroup mongo_sync
+ * @{
+ */
+
+#ifndef LIBMONGO_SYNC_GRIDFS_H
+#define LIBMONGO_SYNC_GRIDFS_H 1
+
+#include <mongo-sync.h>
+#include <mongo-sync-cursor.h>
+#include <glib.h>
+
+G_BEGIN_DECLS
+
+/** @defgroup mongo_sync_gridfs_api Mongo GridFS API
+ *
+ * The GridFS API - and related modules, like @ref
+ * mongo_sync_gridfs_chunk_api and @ref mongo_sync_gridfs_stream_api -
+ * provide a conveneint way to work with GridFS, and files stored on
+ * it.
+ *
+ * This module implements the GridFS support functions, which allow
+ * one to connect to or create new GridFS instances, list or remove
+ * files, or retrieve metadata about files opened by one of the
+ * sub-modules.
+ *
+ * @addtogroup mongo_sync_gridfs_api
+ * @{
+ */
+
+/** Opaque GridFS object. */
+typedef struct _mongo_sync_gridfs mongo_sync_gridfs;
+
+/** Create a new GridFS object.
+ *
+ * @param conn is the MongoDB connection to base the filesystem object
+ * on.
+ * @param ns_prefix is the prefix the GridFS collections should be
+ * under.
+ *
+ * @returns A newly allocated GridFS object, or NULL on error.
+ */
+mongo_sync_gridfs *mongo_sync_gridfs_new (mongo_sync_connection *conn,
+ const gchar *ns_prefix);
+
+/** Close and free a GridFS object.
+ *
+ * @param gfs is the GridFS object to free up.
+ * @param disconnect signals whether to free the underlying connection
+ * aswell.
+ */
+void mongo_sync_gridfs_free (mongo_sync_gridfs *gfs, gboolean disconnect);
+
+/** Get the default chunk size of a GridFS object.
+ *
+ * @param gfs is the GridFS object to get the default chunk size of.
+ *
+ * @returns The chunk size in bytes, or -1 on error.
+ */
+gint32 mongo_sync_gridfs_get_chunk_size (mongo_sync_gridfs *gfs);
+
+/** Set the default chunk size of a GridFS object.
+ *
+ * @param gfs is the GridFS object to set the default chunk size of.
+ * @param chunk_size is the desired default chunk size.
+ *
+ * @returns TRUE on success, FALSE otherwise.
+ */
+gboolean mongo_sync_gridfs_set_chunk_size (mongo_sync_gridfs *gfs,
+ gint32 chunk_size);
+
+/** List GridFS files matching a query.
+ *
+ * Finds all files on a GridFS, based on a custom query.
+ *
+ * @param gfs is the GridFS to list files from.
+ * @param query is the custom query based on which files shall be
+ * sought. Passing a NULL query will find all files, without
+ * restriction.
+ *
+ * @returns A newly allocated cursor object, or NULL on error. It is
+ * the responsibility of the caller to free the returned cursor once
+ * it is no longer needed.
+ */
+mongo_sync_cursor *mongo_sync_gridfs_list (mongo_sync_gridfs *gfs,
+ const bson *query);
+
+/** Delete files matching a query from GridFS.
+ *
+ * Finds all files on a GridFS, based on a custom query, and removes
+ * them.
+ *
+ * @param gfs is the GridFS to delete files from.
+ * @param query is the custom query based on which files shall be
+ * sought. Passing a NULL query will find all files, without
+ * restriction.
+ *
+ * @returns TRUE if all files were deleted successfully, FALSE
+ * otherwise.
+ */
+gboolean mongo_sync_gridfs_remove (mongo_sync_gridfs *gfs,
+ const bson *query);
+
+/* Metadata */
+
+/** Get the file ID of a GridFS file.
+ *
+ * @param gfile is the GridFS file to work with.
+ *
+ * @returns The ObjectID of the file, or NULL on error. The returned
+ * pointer points to an internal area, and should not be modified or
+ * freed, and is only valid as long as the file object is valid.
+ */
+const guint8 *mongo_sync_gridfs_file_get_id (gpointer gfile);
+
+/** Get the length of a GridFS file.
+ *
+ * @param gfile is the GridFS file to work with.
+ *
+ * @returns The length of the file, or -1 on error.
+ */
+gint64 mongo_sync_gridfs_file_get_length (gpointer gfile);
+
+/** Get the chunk size of a GridFS file.
+ *
+ * @param gfile is the GridFS file to work with.
+ *
+ * @returns The maximum size of the chunks of the file, or -1 on error.
+ */
+gint32 mongo_sync_gridfs_file_get_chunk_size (gpointer gfile);
+
+/** Get the MD5 digest of a GridFS file.
+ *
+ * @param gfile is the GridFS file to work with.
+ *
+ * @returns The MD5 digest of the file, or NULL on error. The returned
+ * pointer points to an internal area, and should not be modified or
+ * freed, and is only valid as long as the file object is valid.
+ */
+const gchar *mongo_sync_gridfs_file_get_md5 (gpointer gfile);
+
+/** Get the upload date of a GridFS file.
+ *
+ * @param gfile is the GridFS file to work with.
+ *
+ * @returns The upload date of the file, or -1 on error.
+ */
+gint64 mongo_sync_gridfs_file_get_date (gpointer gfile);
+
+/** Get the full metadata of a GridFS file
+ *
+ * @param gfile is the GridFS file to work with.
+ *
+ * @returns A BSON object containing the full metadata, or NULL on
+ * error. The returned pointer points to an internal area, and should
+ * not be modified or freed, and is only valid as long as the file
+ * object is valid.
+ */
+const bson *mongo_sync_gridfs_file_get_metadata (gpointer gfile);
+
+/** Get the number of chunks in a GridFS file.
+ *
+ * @param gfile is the GridFS file to work with.
+ *
+ * @returns The number of chunks in the GridFS file, or -1 on error.
+ */
+gint64 mongo_sync_gridfs_file_get_chunks (gpointer gfile);
+
+/** @} */
+
+G_END_DECLS
+
+/** @} */
+
+#endif
diff --git a/tests/Makefile.am b/tests/Makefile.am
new file mode 100644
index 0000000..b6328e0
--- /dev/null
+++ b/tests/Makefile.am
@@ -0,0 +1,241 @@
+SUBDIRS = libtap
+
+bson_unit_tests = \
+ unit/bson/bson_new \
+ unit/bson/bson_empty \
+ unit/bson/bson_validate_key \
+ \
+ unit/bson/bson_append_string \
+ unit/bson/bson_append_double \
+ unit/bson/bson_append_boolean \
+ unit/bson/bson_append_utc_datetime \
+ unit/bson/bson_append_null \
+ unit/bson/bson_append_int32 \
+ unit/bson/bson_append_int64 \
+ unit/bson/bson_append_regexp \
+ unit/bson/bson_append_binary \
+ unit/bson/bson_append_js_code \
+ unit/bson/bson_append_symbol \
+ unit/bson/bson_append_js_code_w_scope \
+ unit/bson/bson_append_timestamp \
+ unit/bson/bson_append_oid \
+ unit/bson/bson_append_document \
+ unit/bson/bson_append_array \
+ \
+ unit/bson/bson_reset \
+ unit/bson/bson_new_from_data \
+ \
+ unit/bson/bson_build \
+ unit/bson/bson_build_full \
+ \
+ unit/bson/bson_type_as_string \
+ \
+ unit/bson/bson_cursor_new \
+ unit/bson/bson_find \
+ unit/bson/bson_cursor_next \
+ unit/bson/bson_cursor_find_next \
+ unit/bson/bson_cursor_find \
+ unit/bson/bson_cursor_type \
+ unit/bson/bson_cursor_type_as_string \
+ unit/bson/bson_cursor_key \
+ \
+ unit/bson/bson_cursor_get_string \
+ unit/bson/bson_cursor_get_double \
+ unit/bson/bson_cursor_get_document \
+ unit/bson/bson_cursor_get_array \
+ unit/bson/bson_cursor_get_binary \
+ unit/bson/bson_cursor_get_oid \
+ unit/bson/bson_cursor_get_boolean \
+ unit/bson/bson_cursor_get_utc_datetime \
+ unit/bson/bson_cursor_get_regex \
+ unit/bson/bson_cursor_get_javascript \
+ unit/bson/bson_cursor_get_symbol \
+ unit/bson/bson_cursor_get_javascript_w_scope \
+ unit/bson/bson_cursor_get_int32 \
+ unit/bson/bson_cursor_get_timestamp \
+ unit/bson/bson_cursor_get_int64
+
+bson_func_tests = \
+ func/bson/huge_doc \
+ func/bson/f_weird_types
+
+bson_perf_tests = \
+ perf/bson/p_bson_find
+
+mongo_utils_unit_tests = \
+ unit/mongo/utils/oid_init \
+ unit/mongo/utils/oid_new \
+ unit/mongo/utils/oid_new_with_time \
+ unit/mongo/utils/oid_as_string \
+ unit/mongo/utils/parse_addr
+
+mongo_wire_unit_tests = \
+ unit/mongo/wire/packet_new \
+ unit/mongo/wire/packet_get_set_header \
+ unit/mongo/wire/packet_get_set_header_raw \
+ unit/mongo/wire/packet_get_set_data \
+ \
+ unit/mongo/wire/reply_packet_get_header \
+ unit/mongo/wire/reply_packet_get_data \
+ unit/mongo/wire/reply_packet_get_nth_document \
+ \
+ unit/mongo/wire/cmd_update \
+ unit/mongo/wire/cmd_insert \
+ unit/mongo/wire/cmd_insert_n \
+ unit/mongo/wire/cmd_query \
+ unit/mongo/wire/cmd_get_more \
+ unit/mongo/wire/cmd_delete \
+ unit/mongo/wire/cmd_kill_cursors \
+ unit/mongo/wire/cmd_custom
+
+mongo_client_unit_tests = \
+ unit/mongo/client/connect \
+ unit/mongo/client/disconnect \
+ unit/mongo/client/packet_send \
+ unit/mongo/client/packet_recv \
+ unit/mongo/client/connection_set_timeout \
+ unit/mongo/client/connection_get_requestid
+
+mongo_client_func_tests = \
+ func/mongo/client/f_client_big_packet
+
+mongo_sync_unit_tests = \
+ unit/mongo/sync/sync_connect \
+ unit/mongo/sync/sync_connect_cache \
+ unit/mongo/sync/sync_conn_seed_add \
+ unit/mongo/sync/sync_conn_seed_add_cache \
+ unit/mongo/sync/sync_reconnect \
+ unit/mongo/sync/sync_disconnect \
+ unit/mongo/sync/sync_get_set_auto_reconnect \
+ unit/mongo/sync/sync_get_set_safe_mode \
+ unit/mongo/sync/sync_get_set_slaveok \
+ unit/mongo/sync/sync_get_set_max_insert_size \
+ unit/mongo/sync/sync_cmd_update \
+ unit/mongo/sync/sync_cmd_insert \
+ unit/mongo/sync/sync_cmd_insert_n \
+ unit/mongo/sync/sync_cmd_query \
+ unit/mongo/sync/sync_cmd_get_more \
+ unit/mongo/sync/sync_cmd_delete \
+ unit/mongo/sync/sync_cmd_kill_cursors \
+ unit/mongo/sync/sync_cmd_custom \
+ unit/mongo/sync/sync_cmd_count \
+ unit/mongo/sync/sync_cmd_create \
+ unit/mongo/sync/sync_cmd_exists \
+ unit/mongo/sync/sync_cmd_drop \
+ unit/mongo/sync/sync_cmd_get_last_error \
+ unit/mongo/sync/sync_cmd_get_last_error_full \
+ unit/mongo/sync/sync_cmd_reset_error \
+ unit/mongo/sync/sync_cmd_is_master \
+ unit/mongo/sync/sync_cmd_ping \
+ unit/mongo/sync/sync_cmd_user_add \
+ unit/mongo/sync/sync_cmd_user_add_with_roles \
+ unit/mongo/sync/sync_cmd_user_remove \
+ unit/mongo/sync/sync_cmd_authenticate \
+ unit/mongo/sync/sync_cmd_authenticate_cache \
+ unit/mongo/sync/sync_cmd_index_create \
+ unit/mongo/sync/sync_cmd_index_drop \
+ unit/mongo/sync/sync_cmd_index_drop_all \
+ unit/mongo/sync/sync_connect_from_cache_enforce_primary
+
+mongo_sync_func_tests = \
+ func/mongo/sync/f_sync_max_insert_size \
+ func/mongo/sync/f_sync_conn_seed_add \
+ func/mongo/sync/f_sync_safe_mode \
+ func/mongo/sync/f_sync_safe_mode_cache \
+ func/mongo/sync/f_sync_auto_reconnect \
+ func/mongo/sync/f_sync_auto_reconnect_cache \
+ func/mongo/sync/f_sync_oidtest \
+ func/mongo/sync/f_sync_auto_reauth \
+ func/mongo/sync/f_sync_invalid_getlasterror \
+ func/mongo/sync/f_sync_write_error
+
+mongo_sync_cursor_unit_tests = \
+ unit/mongo/sync-cursor/sync_cursor_new \
+ unit/mongo/sync-cursor/sync_cursor_next \
+ unit/mongo/sync-cursor/sync_cursor_get_data \
+ unit/mongo/sync-cursor/sync_cursor_free
+
+mongo_sync_cursor_func_tests = \
+ func/mongo/sync-cursor/f_sync_cursor_iterate \
+ func/mongo/sync-cursor/f_sync_cursor_tailable
+
+mongo_sync_pool_unit_tests = \
+ unit/mongo/sync-pool/sync_pool_new \
+ unit/mongo/sync-pool/sync_pool_free \
+ unit/mongo/sync-pool/sync_pool_pick \
+ unit/mongo/sync-pool/sync_pool_return
+
+mongo_sync_pool_func_tests = \
+ func/mongo/sync-pool/f_sync_pool
+
+mongo_sync_gridfs_unit_tests = \
+ unit/mongo/sync-gridfs/sync_gridfs_new \
+ unit/mongo/sync-gridfs/sync_gridfs_free \
+ unit/mongo/sync-gridfs/sync_gridfs_get_set_chunk_size \
+ unit/mongo/sync-gridfs/sync_gridfs_list \
+ unit/mongo/sync-gridfs/sync_gridfs_remove \
+ unit/mongo/sync-gridfs/sync_gridfs_file_get_metadata
+
+mongo_sync_gridfs_chunk_unit_tests = \
+ unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_find \
+ unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_file_new_from_buffer \
+ unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_file_free \
+ unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_file_cursor_new \
+ unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_file_cursor_get_chunk
+
+mongo_sync_gridfs_chunk_func_tests = \
+ func/mongo/sync-gridfs-chunk/f_sync_gridfs_chunk
+
+mongo_sync_gridfs_stream_unit_tests = \
+ unit/mongo/sync-gridfs-stream/sync_gridfs_stream_find \
+ unit/mongo/sync-gridfs-stream/sync_gridfs_stream_new \
+ unit/mongo/sync-gridfs-stream/sync_gridfs_stream_read \
+ unit/mongo/sync-gridfs-stream/sync_gridfs_stream_write \
+ unit/mongo/sync-gridfs-stream/sync_gridfs_stream_seek \
+ unit/mongo/sync-gridfs-stream/sync_gridfs_stream_close
+
+mongo_sync_gridfs_stream_func_tests = \
+ func/mongo/sync-gridfs-stream/f_sync_gridfs_stream
+
+UNIT_TESTS = ${bson_unit_tests} ${mongo_utils_unit_tests} \
+ ${mongo_wire_unit_tests} ${mongo_client_unit_tests} \
+ ${mongo_sync_unit_tests} ${mongo_sync_cursor_unit_tests} \
+ ${mongo_sync_pool_unit_tests} ${mongo_sync_gridfs_unit_tests} \
+ ${mongo_sync_gridfs_chunk_unit_tests} \
+ ${mongo_sync_gridfs_stream_unit_tests}
+FUNC_TESTS = ${bson_func_tests} ${mongo_sync_func_tests} \
+ ${mongo_client_func_tests} \
+ ${mongo_sync_cursor_func_tests} ${mongo_sync_pool_func_tests} \
+ ${mongo_sync_gridfs_func_tests} \
+ ${mongo_sync_gridfs_chunk_func_tests} \
+ ${mongo_sync_gridfs_stream_func_tests}
+PERF_TESTS = ${bson_perf_tests}
+TESTCASES = ${UNIT_TESTS} ${FUNC_TESTS} ${PERF_TESTS}
+
+check_PROGRAMS = ${TESTCASES} test_cleanup
+
+AM_CFLAGS = -I$(top_srcdir)/src/ -I${top_srcdir}/tests/libtap/ @GLIB_CFLAGS@
+AM_LDFLAGS = -no-install
+LDADD = $(top_builddir)/src/libmongo-client.la ${top_builddir}/tests/libtap/libtap.la @GLIB_LIBS@
+
+EXTRA_DIST = README \
+ runall \
+ coverage.sh \
+ tools/coverage-report-entry.pl tools/coverage-report.pl \
+ tools/coverage-report.xsl
+
+PROVE = prove -e "${PROVE_ENV}" ${PROVE_OPTIONS}
+
+check-%: BASE=$(subst -,_,$(subst check-,,$@))
+check-%: TESTCASES=$(value $(BASE)_unit_tests) $(value $(BASE)_func_tests) $(value $(BASE)_tests)
+check-%: check-recursive test_cleanup ${TESTCASES}
+ $(AM_V_at) ${builddir}/test_cleanup
+ $(AM_V_GEN) srcdir=${srcdir} ${PROVE} ${TESTCASES}
+ $(AM_V_at) ${builddir}/test_cleanup
+
+check: check-recursive test_cleanup ${TESTCASES}
+ $(AM_V_at) ${builddir}/test_cleanup
+ $(AM_V_GEN) srcdir=${srcdir} ${PROVE} ${TESTCASES}
+ $(AM_V_at) ${builddir}/test_cleanup
+
+.PHONY: check
diff --git a/tests/README b/tests/README
new file mode 100644
index 0000000..f8a2c08
--- /dev/null
+++ b/tests/README
@@ -0,0 +1,28 @@
+About the test suite -*- org -*-
+====================
+
+The test suite has two parts: the basic tests, which do not require
+anything outside of this library, and networked tests, which require a
+certain network setup if one wants to run them all.
+
+The basic tests are run as part of `make check', while to run the
+network tests, one must do a few other things, after which the
+networked tests will be run aswell:
+
+* Set up a mongodb server, and set up variables for the test suite
+
+One must set the `TEST_PRIMARY' variable to the "IP:PORT" of the
+mongodb server.
+
+For example, assuming a bourne shell:
+
+ $ TEST_PRIMARY="127.0.0.1:27017"; export TEST_PRIMARY
+
+* To test replica sets, point the test suite to a secondary node
+
+First of all, one will need to set up a Replica Set (see the mongodb
+documentation for examples and a tutorial), and point the test suite
+to a *secondary* node by setting the `TEST_SECONDARY' environment
+variable:
+
+ $ TEST_SECONDARY="127.0.0.1:27018"; export TEST_SECONDARY
diff --git a/tests/coverage.sh b/tests/coverage.sh
new file mode 100755
index 0000000..f3a32f4
--- /dev/null
+++ b/tests/coverage.sh
@@ -0,0 +1,43 @@
+#! /bin/sh
+
+install -d coverage
+rm -f coverage/report.txt
+
+for src in ${SOURCES}; do
+ case "$src" in
+ *.c)
+ obj=`echo $src | sed 's|\.c|.o|'`
+ gc=`echo $src | sed 's|\.c|.gcno|'`
+ if test -f "${builddir}/.libs/libmongo_client_la-$obj"; then
+ objdir=${builddir}/.libs
+ else
+ objdir=${builddir}
+ fi
+ if ! test -f "${objdir}/libmongo_client_la-${gc}"; then
+ continue
+ fi
+ gcov -b -f ${srcdir}/$src -o $objdir/libmongo_client_la-$obj >coverage/$src.cov
+ ;;
+ esac
+done
+
+perl ${top_srcdir}/tests/tools/coverage-report.pl coverage/*.cov >coverage/index.xml
+xsltproc ${top_srcdir}/tests/tools/coverage-report.xsl coverage/index.xml >coverage/index.html
+
+for src in ${SOURCES}; do
+ case "$src" in
+ *.c)
+ if ! test -f "${src}.gcov"; then
+ continue
+ fi
+
+ perl ${top_srcdir}/tests/tools/coverage-report-entry.pl ${src}.gcov > coverage/${src}.gcov.html
+ grep -A4 -m 1 "File '${srcdir}/$src'" coverage/$src.cov | grep -v "^--" >>coverage/report.txt
+ echo >>coverage/report.txt
+ ;;
+ esac
+done
+
+coverage=`(echo "scale=2"; echo -n "("; echo -n $(grep "Lines executed" coverage/report.txt | cut -d: -f2 | cut -d "%" -f 1) | sed -e "s, , + ,g"; echo ") / " $(grep -c "Lines executed" coverage/report.txt)) | bc -q`
+lines=`(echo -n "("; echo -n $(grep "Lines executed" coverage/report.txt | cut -d% -f2- | cut -d " " -f3-) | sed -e "s, , + ,g"; echo ")") | bc -q`
+echo "Overall coverage: $coverage% of $lines source lines" >>coverage/report.txt
diff --git a/tests/func/bson/f_weird_types.c b/tests/func/bson/f_weird_types.c
new file mode 100644
index 0000000..100db8c
--- /dev/null
+++ b/tests/func/bson/f_weird_types.c
@@ -0,0 +1,71 @@
+#include "bson.h"
+#include "tap.h"
+#include "test.h"
+
+#include "libmongo-private.h"
+
+#include <string.h>
+
+static void
+test_func_weird_types (void)
+{
+ bson *b;
+ bson_cursor *c;
+ guint8 type = BSON_TYPE_DBPOINTER;
+ gint32 slen;
+
+ b = bson_new ();
+ bson_append_int32 (b, "int32", 42);
+
+ /* Append weird stuff */
+ b->data = g_byte_array_append (b->data, (const guint8 *)&type, sizeof (type));
+ b->data = g_byte_array_append (b->data, (const guint8 *)"dbpointer",
+ strlen ("dbpointer") + 1);
+ slen = GINT32_TO_LE (strlen ("refname") + 1);
+ b->data = g_byte_array_append (b->data, (const guint8 *)&slen, sizeof (gint32));
+ b->data = g_byte_array_append (b->data, (const guint8 *)"refname",
+ strlen ("refname") + 1);
+ b->data = g_byte_array_append (b->data, (const guint8 *)"0123456789ABCDEF",
+ 12);
+
+ bson_append_boolean (b, "Here be dragons?", TRUE);
+ bson_finish (b);
+
+ c = bson_find (b, "Here be dragons?");
+ ok (c != NULL,
+ "bson_find() can find elements past unsupported BSON types");
+ bson_cursor_free (c);
+ bson_free (b);
+
+ /* Now do it again, but append a type we can't iterate over */
+ b = bson_new ();
+ bson_append_int32 (b, "int32", 42);
+
+ /* Append BSON_TYPE_NONE */
+ type = BSON_TYPE_NONE;
+ b->data = g_byte_array_append (b->data, (const guint8 *)&type, sizeof (type));
+ b->data = g_byte_array_append (b->data, (const guint8 *)"dbpointer",
+ strlen ("dbpointer") + 1);
+ b->data = g_byte_array_append (b->data, (const guint8 *)"0123456789ABCDEF",
+ 12);
+
+ bson_append_boolean (b, "Here be dragons?", TRUE);
+ bson_finish (b);
+
+ c = bson_find (b, "Here be dragons?");
+ ok (c == NULL,
+ "bson_find() should bail out when encountering an invalid element.");
+ bson_cursor_free (c);
+
+ c = bson_cursor_new (b);
+ bson_cursor_next (c); /* This will find the first element, and
+ position us there. */
+ bson_cursor_next (c); /* This positions after the first element. */
+ ok (bson_cursor_next (c) == FALSE,
+ "bson_cursor_next() should bail out when encountering an invalid element.");
+ bson_cursor_free (c);
+
+ bson_free (b);
+}
+
+RUN_TEST (3, func_weird_types);
diff --git a/tests/func/bson/huge_doc.c b/tests/func/bson/huge_doc.c
new file mode 100644
index 0000000..d5daafe
--- /dev/null
+++ b/tests/func/bson/huge_doc.c
@@ -0,0 +1,51 @@
+#include "bson.h"
+#include "tap.h"
+#include "test.h"
+
+#ifndef HUGE_DOC_SIZE
+#define HUGE_DOC_SIZE (1024 * 1024)
+#endif
+
+#include <string.h>
+
+static void
+test_bson_huge_doc (void)
+{
+ bson *b, *s;
+ bson_cursor *c;
+ gchar *buffer;
+ gint32 ds1;
+
+ buffer = (gchar *)g_malloc (HUGE_DOC_SIZE);
+ memset (buffer, 'a', HUGE_DOC_SIZE);
+ buffer[HUGE_DOC_SIZE - 1] = '\0';
+
+ b = bson_new ();
+ bson_append_int32 (b, "preamble", 1);
+ bson_append_string (b, "huge", buffer, -1);
+ bson_append_int32 (b, "post", 1234);
+ bson_finish (b);
+ ds1 = bson_size (b);
+
+ g_free (buffer);
+
+ s = bson_new ();
+ bson_append_document (s, "hugedoc", b);
+ bson_finish (s);
+ bson_free (b);
+
+ cmp_ok (bson_size (s), ">", ds1,
+ "Document embedding another huge one, has bigger size");
+
+ c = bson_find (s, "hugedoc");
+ bson_cursor_get_document (c, &b);
+
+ cmp_ok (bson_size (b), "==", ds1,
+ "The embedded document has the correct, huge size");
+
+ bson_cursor_free (c);
+ bson_free (s);
+ bson_free (b);
+}
+
+RUN_TEST (2, bson_huge_doc);
diff --git a/tests/func/mongo/client/f_client_big_packet.c b/tests/func/mongo/client/f_client_big_packet.c
new file mode 100644
index 0000000..38176ff
--- /dev/null
+++ b/tests/func/mongo/client/f_client_big_packet.c
@@ -0,0 +1,57 @@
+#include "test.h"
+#include "mongo.h"
+
+#define BIG_PACKET_SIZE 2 * 1024 * 1024
+
+void
+test_func_client_big_packet (void)
+{
+ mongo_connection *conn;
+ mongo_packet *p;
+
+ guint8 *data;
+ bson *b;
+ gint32 exp_size;
+
+ conn = mongo_connect (config.primary_host, config.primary_port);
+
+ b = bson_new_sized (BIG_PACKET_SIZE + 1024);
+ data = g_malloc (BIG_PACKET_SIZE);
+ memset (data, 'z', BIG_PACKET_SIZE);
+ bson_append_boolean (b, "big_packet_size", TRUE);
+ bson_append_binary (b, "bighead", BSON_BINARY_SUBTYPE_GENERIC,
+ data, BIG_PACKET_SIZE);
+ bson_finish (b);
+ exp_size = bson_size (b);
+
+ p = mongo_wire_cmd_insert (1, config.ns, b, NULL);
+ mongo_packet_send (conn, p);
+ bson_free (b);
+ mongo_wire_packet_free (p);
+
+ b = bson_new ();
+ bson_append_boolean (b, "big_packet_size", TRUE);
+ bson_finish (b);
+
+ p = mongo_wire_cmd_query (2, config.ns, 0, 0, 1, b, NULL);
+ mongo_packet_send (conn, p);
+ mongo_wire_packet_free (p);
+ bson_free (b);
+
+ p = mongo_packet_recv (conn);
+ ok (p != NULL,
+ "mongo_packet_recv() works with a huge packet");
+
+ mongo_wire_reply_packet_get_nth_document (p, 1, &b);
+ bson_finish (b);
+ mongo_wire_packet_free (p);
+
+ cmp_ok (exp_size + 17, "==", bson_size (b), /* +17: _id + value */
+ "Huge packet receiving works, and returns a same sized packet");
+
+ bson_free (b);
+
+ mongo_disconnect (conn);
+}
+
+RUN_NET_TEST (2, func_client_big_packet);
diff --git a/tests/func/mongo/sync-cursor/f_sync_cursor_iterate.c b/tests/func/mongo/sync-cursor/f_sync_cursor_iterate.c
new file mode 100644
index 0000000..56ccb77
--- /dev/null
+++ b/tests/func/mongo/sync-cursor/f_sync_cursor_iterate.c
@@ -0,0 +1,88 @@
+#include "test.h"
+#include <mongo.h>
+
+#include <errno.h>
+#include <string.h>
+
+void
+test_func_mongo_sync_cursor_iterate (void)
+{
+ mongo_sync_connection *conn;
+ bson *query, *result;
+ mongo_sync_cursor *sc;
+ bson_cursor *c;
+ gint i;
+ gint32 first_i32 = -1, last_i32 = -1, current_i32 = -1;
+ gboolean early_break = FALSE, continous = TRUE;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+
+ for (i = 0; i < 10; i++)
+ {
+ bson *data = bson_new ();
+ bson_append_boolean (data, "f_sync_cursor_iterate", TRUE);
+ bson_append_int32 (data, "i32", 42 * 100 + i);
+ bson_finish (data);
+
+ mongo_sync_cmd_insert (conn, config.ns, data, NULL);
+ bson_free (data);
+ }
+
+ query = bson_new ();
+ bson_append_boolean (query, "f_sync_cursor_iterate", TRUE);
+ bson_finish (query);
+
+ sc = mongo_sync_cursor_new (conn, config.ns,
+ mongo_sync_cmd_query (conn, config.ns, 0, 0, 3,
+ query, NULL));
+ bson_free (query);
+
+ ok (sc != NULL,
+ "mongo_sync_cursor_new() works");
+
+ result = mongo_sync_cursor_get_data (sc);
+ ok (result == NULL,
+ "mongo_sync_cursor_get_data() should fail without _cursor_next()");
+
+ i = 0;
+ while (mongo_sync_cursor_next (sc) && i < 10)
+ {
+ result = mongo_sync_cursor_get_data (sc);
+
+ if (!result)
+ {
+ early_break = TRUE;
+ break;
+ }
+ i++;
+ c = bson_find (result, "i32");
+ bson_cursor_get_int32 (c, &current_i32);
+ bson_cursor_free (c);
+ bson_free (result);
+
+ if (first_i32 == -1)
+ {
+ first_i32 = current_i32;
+ last_i32 = first_i32 - 1;
+ }
+
+ if (current_i32 != last_i32 + 1)
+ continous = FALSE;
+ last_i32 = current_i32;
+ }
+
+ ok (early_break == FALSE,
+ "mongo_sync_cursor_next() can iterate over the whole stuff");
+ ok (continous == TRUE,
+ "mongo_sync_cursor_next() iterates over all elements");
+
+ cmp_ok (first_i32, "!=", last_i32,
+ "Iteration returns different elements, as expected");
+ cmp_ok (i, ">=", 10,
+ "Iteration really does return all documents");
+
+ mongo_sync_cursor_free (sc);
+ mongo_sync_disconnect (conn);
+}
+
+RUN_NET_TEST (6, func_mongo_sync_cursor_iterate);
diff --git a/tests/func/mongo/sync-cursor/f_sync_cursor_tailable.c b/tests/func/mongo/sync-cursor/f_sync_cursor_tailable.c
new file mode 100644
index 0000000..c200ed8
--- /dev/null
+++ b/tests/func/mongo/sync-cursor/f_sync_cursor_tailable.c
@@ -0,0 +1,115 @@
+#include "test.h"
+#include <mongo.h>
+
+#include <errno.h>
+#include <string.h>
+
+void
+test_func_mongo_sync_cursor_tailable (void)
+{
+ mongo_sync_connection *conn;
+ bson *query, *data;
+ mongo_sync_cursor *sc, *tc;
+ mongo_packet *p;
+ gint i;
+ gchar *capped_ns, *capped_coll;
+
+ bson_cursor *c;
+ gboolean tailed = FALSE;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+
+ query = bson_new ();
+ bson_finish (query);
+
+ p = mongo_sync_cmd_query (conn, config.ns,
+ MONGO_WIRE_FLAG_QUERY_TAILABLE_CURSOR |
+ MONGO_WIRE_FLAG_QUERY_NO_CURSOR_TIMEOUT,
+ 0, 3, query, NULL);
+ ok (p == NULL,
+ "Tailable cursors should not work on non-capped collections");
+
+ capped_coll = g_strconcat (config.coll, ".capped", NULL);
+ capped_ns = g_strconcat (config.ns, ".capped", NULL);
+
+ query = bson_build (BSON_TYPE_STRING, "create", capped_coll, -1,
+ BSON_TYPE_BOOLEAN, "capped", TRUE,
+ BSON_TYPE_INT32, "size", 64 * 1024 * 10,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+
+ mongo_sync_cmd_drop (conn, config.db, capped_coll);
+ p = mongo_sync_cmd_custom (conn, config.db, query);
+ bson_free (query);
+
+ ok (p != NULL,
+ "Creating a capped collection works");
+ mongo_wire_packet_free (p);
+
+ for (i = 0; i < 10; i++)
+ {
+ data = bson_new ();
+ bson_append_boolean (data, "f_sync_cursor_tailable", TRUE);
+ bson_append_int32 (data, "i32", 42 * 1000 + i);
+ bson_finish (data);
+
+ mongo_sync_cmd_insert (conn, capped_ns, data, NULL);
+ bson_free (data);
+ }
+
+ query = bson_new ();
+ bson_append_boolean (query, "f_sync_cursor_tailable", TRUE);
+ bson_finish (query);
+
+ tc = mongo_sync_cursor_new (conn, capped_ns,
+ mongo_sync_cmd_query (conn, capped_ns,
+ MONGO_WIRE_FLAG_QUERY_TAILABLE_CURSOR |
+ MONGO_WIRE_FLAG_QUERY_NO_CURSOR_TIMEOUT,
+ 0, 3, query, NULL));
+
+ sc = mongo_sync_cursor_new (conn, capped_ns,
+ mongo_sync_cmd_query (conn, capped_ns,
+ 0,
+ 0, 3, query, NULL));
+
+ bson_free (query);
+
+ /* Exhaust both queries */
+ for (i = 0; i < 10; i++)
+ {
+ mongo_sync_cursor_next (tc);
+ mongo_sync_cursor_next (sc);
+ }
+
+ data = bson_new ();
+ bson_append_boolean (data, "f_sync_cursor_tailable", TRUE);
+ bson_append_boolean (data, "tailed", TRUE);
+ bson_finish (data);
+
+ mongo_sync_cmd_insert (conn, capped_ns, data, NULL);
+ bson_free (data);
+
+ ok (mongo_sync_cursor_next (tc) == TRUE,
+ "mongo_sync_cursor_next() works after a tailable cursor got new data");
+ ok (mongo_sync_cursor_next (sc) == FALSE,
+ "mongo_sync_cursor_next() fails on a non-tailable cursor");
+
+ data = mongo_sync_cursor_get_data (tc);
+ ok (data != NULL,
+ "mongo_sync_cursor_get_data() works on a tailable cursor");
+ c = bson_find (data, "tailed");
+ bson_cursor_get_boolean (c, &tailed);
+ ok (tailed == TRUE,
+ "We got the appropriate data back!");
+ bson_cursor_free (c);
+
+ mongo_sync_cursor_free (sc);
+ mongo_sync_cursor_free (tc);
+
+ mongo_sync_cmd_drop (conn, config.db, capped_coll);
+ g_free (capped_ns);
+ g_free (capped_coll);
+ mongo_sync_disconnect (conn);
+}
+
+RUN_NET_TEST (6, func_mongo_sync_cursor_tailable);
diff --git a/tests/func/mongo/sync-gridfs-chunk/f_sync_gridfs_chunk.c b/tests/func/mongo/sync-gridfs-chunk/f_sync_gridfs_chunk.c
new file mode 100644
index 0000000..cac6e28
--- /dev/null
+++ b/tests/func/mongo/sync-gridfs-chunk/f_sync_gridfs_chunk.c
@@ -0,0 +1,499 @@
+#include "test.h"
+#include "mongo.h"
+
+#define FILE_SIZE 1024 * 1024 + 12345
+
+static guint8 noname_oid[12];
+static guint8 named_oid[12];
+static guint8 binsub_oid[12];
+
+void
+test_func_sync_gridfs_put (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_chunked_file *gfile;
+ bson *meta;
+ guint8 *data, *oid;
+ gchar *oid_s;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+ oid = mongo_util_oid_new (1);
+ meta = bson_build (BSON_TYPE_STRING, "filename", "libmongo-test", -1,
+ BSON_TYPE_OID, "_id", oid,
+ BSON_TYPE_NONE);
+ g_free (oid);
+ bson_finish (meta);
+
+ data = g_malloc (FILE_SIZE);
+ memset (data, 'x', FILE_SIZE);
+
+ gfile = mongo_sync_gridfs_chunked_file_new_from_buffer (gfs, meta,
+ data, FILE_SIZE);
+ ok (gfile != NULL,
+ "GridFS file upload (with metadata) works!");
+ memcpy (named_oid, mongo_sync_gridfs_file_get_id (gfile), 12);
+ oid_s = mongo_util_oid_as_string (named_oid);
+ note ("Named file ID : %s\n", oid_s);
+ g_free (oid_s);
+ mongo_sync_gridfs_chunked_file_free (gfile);
+
+ gfile = mongo_sync_gridfs_chunked_file_new_from_buffer (gfs, NULL,
+ data, FILE_SIZE);
+ ok (gfile != NULL,
+ "GridFS file upload (w/o metadata) works!");
+ memcpy (noname_oid, mongo_sync_gridfs_file_get_id (gfile), 12);
+ oid_s = mongo_util_oid_as_string (noname_oid);
+ note ("Noname file ID: %s\n", oid_s);
+ g_free (oid_s);
+ mongo_sync_gridfs_chunked_file_free (gfile);
+
+ g_free (data);
+ bson_free (meta);
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_put_invalid (void)
+{
+ mongo_sync_connection *conn;
+ bson *meta;
+ gchar *ns;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ ns = g_strconcat (config.gfs_prefix, ".files", NULL);
+
+ /* Insert metadata without any of the required fields but ID. */
+ meta = bson_build (BSON_TYPE_STRING, "my-id", "id-only", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ mongo_sync_cmd_insert (conn, ns, meta, NULL);
+ bson_free (meta);
+
+ /* Insert metadata with an ID that's not an ObjectID. */
+ meta = bson_build (BSON_TYPE_STRING, "_id", "I'm a teapot", -1,
+ BSON_TYPE_STRING, "my-id", "string-id", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ mongo_sync_cmd_insert (conn, ns, meta, NULL);
+ bson_free (meta);
+
+ /* Insert metadata with invalid length type. */
+ meta = bson_build (BSON_TYPE_DOUBLE, "length", 1.0,
+ BSON_TYPE_STRING, "my-id", "invalid-length", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ mongo_sync_cmd_insert (conn, ns, meta, NULL);
+ bson_free (meta);
+
+ /* Insert metadata with invalid chunkSize type. */
+ meta = bson_build (BSON_TYPE_INT32, "length", 10,
+ BSON_TYPE_DOUBLE, "chunkSize", 12.5,
+ BSON_TYPE_STRING, "my-id", "invalid-chunkSize", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ mongo_sync_cmd_insert (conn, ns, meta, NULL);
+ bson_free (meta);
+
+ /* Insert metadata with invalid uploadDate type. */
+ meta = bson_build (BSON_TYPE_INT32, "length", 10,
+ BSON_TYPE_INT32, "chunkSize", 12,
+ BSON_TYPE_STRING, "my-id", "invalid-date", -1,
+ BSON_TYPE_INT32, "uploadDate", 1234,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ mongo_sync_cmd_insert (conn, ns, meta, NULL);
+ bson_free (meta);
+
+ /* Insert metadata with invalid md5 type. */
+ meta = bson_build (BSON_TYPE_INT32, "length", 32,
+ BSON_TYPE_INT32, "chunkSize", 12,
+ BSON_TYPE_UTC_DATETIME, "uploadDate", (gint64)1234,
+ BSON_TYPE_INT32, "md5", 0,
+ BSON_TYPE_STRING, "my-id", "invalid-md5", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ mongo_sync_cmd_insert (conn, ns, meta, NULL);
+ bson_free (meta);
+
+ /* Insert a valid metadata, without chunks. */
+ meta = bson_build (BSON_TYPE_INT32, "length", 32,
+ BSON_TYPE_INT32, "chunkSize", 12,
+ BSON_TYPE_UTC_DATETIME, "uploadDate", (gint64)1234,
+ BSON_TYPE_STRING, "md5", "deadbeef", -1,
+ BSON_TYPE_STRING, "my-id", "no-chunks", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ mongo_sync_cmd_insert (conn, ns, meta, NULL);
+ bson_free (meta);
+
+ g_free (ns);
+ mongo_sync_disconnect (conn);
+}
+
+void
+validate_file (mongo_sync_gridfs *gfs, const bson *query, guint8 *oid,
+ gboolean validate_md5)
+{
+ mongo_sync_gridfs_chunked_file *f;
+ mongo_sync_cursor *cursor;
+ gint64 n = 0, tsize = 0;
+ const bson *meta;
+ gchar *oid_s;
+
+ f = mongo_sync_gridfs_chunked_find (gfs, query);
+
+ ok (f != NULL,
+ "File not found");
+ ok (memcmp (mongo_sync_gridfs_file_get_id (f), oid, 12) == 0,
+ "File _id matches");
+ cmp_ok (mongo_sync_gridfs_file_get_length (f), "==", FILE_SIZE,
+ "File length matches");
+ cmp_ok (mongo_sync_gridfs_file_get_chunk_size (f), "==",
+ mongo_sync_gridfs_get_chunk_size (gfs),
+ "File chunk size matches");
+
+ oid_s = mongo_util_oid_as_string (mongo_sync_gridfs_file_get_id (f));
+ note ("File info:\n\tid = %s; length = %" G_GINT64_FORMAT "; "
+ "chunk_size = %d; date = %" G_GINT64_FORMAT "; "
+ "md5 = %s; n = %" G_GINT64_FORMAT "\n",
+
+ oid_s,
+ mongo_sync_gridfs_file_get_length (f),
+ mongo_sync_gridfs_file_get_chunk_size (f),
+ mongo_sync_gridfs_file_get_date (f),
+ mongo_sync_gridfs_file_get_md5 (f),
+ mongo_sync_gridfs_file_get_chunks (f));
+ g_free (oid_s);
+ meta = mongo_sync_gridfs_file_get_metadata (f);
+ ok (meta != NULL,
+ "mongo_sync_gridfs_file_get_metadata() works");
+
+ cursor = mongo_sync_gridfs_chunked_file_cursor_new (f, 0, 0);
+ while (mongo_sync_cursor_next (cursor))
+ {
+ gint32 size;
+ guint8 *data;
+
+ data = mongo_sync_gridfs_chunked_file_cursor_get_chunk (cursor, &size);
+ g_free (data);
+
+ tsize += size;
+ n++;
+ }
+ mongo_sync_cursor_free (cursor);
+
+ if (validate_md5)
+ cmp_ok (mongo_sync_gridfs_file_get_length (f), "==", tsize,
+ "File size matches the sum of its chunks");
+ cmp_ok (mongo_sync_gridfs_file_get_chunks (f), "==", n,
+ "Number of chunks matches the expected number");
+
+ mongo_sync_gridfs_chunked_file_free (f);
+}
+
+void
+test_func_sync_gridfs_get (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ bson *query;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, TRUE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+
+ query = bson_build (BSON_TYPE_STRING, "filename", "libmongo-test", -1,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+ validate_file (gfs, query, named_oid, TRUE);
+ bson_free (query);
+
+ query = bson_build (BSON_TYPE_OID, "_id", noname_oid,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+ validate_file (gfs, query, noname_oid, TRUE);
+ bson_free (query);
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_get_invalid (mongo_sync_gridfs *gfs, gchar *name, gchar *msg)
+{
+ bson *query;
+
+ query = bson_build (BSON_TYPE_STRING, "my-id", name, -1,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+ ok (mongo_sync_gridfs_chunked_find (gfs, query) == NULL, msg);
+ bson_free (query);
+}
+
+void
+test_func_sync_gridfs_get_invalid (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_cursor *cursor;
+ bson *query;
+ gchar *ns;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, TRUE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+
+ test_get_invalid (gfs, "unknown",
+ "mongo_sync_gridfs_chunked_find() should fail when no file "
+ "is found");
+ test_get_invalid (gfs, "id-only",
+ "mongo_sync_gridfs_chunked__find() should fail if the metadata "
+ "is incomplete");
+ test_get_invalid (gfs, "string-id",
+ "mongo_sync_gridfs_chunked__find() should fail if the _id is "
+ "not an ObjectID");
+ test_get_invalid (gfs, "invalid-length",
+ "mongo_sync_gridfs_chunked__find() should fail if length is "
+ "of inappropriate type");
+ test_get_invalid (gfs, "invalid-chunkSize",
+ "mongo_sync_gridfs_chunked__find() should fail if chunkSize is "
+ "of inappropriate type");
+ test_get_invalid (gfs, "invalid-date",
+ "mongo_sync_gridfs_chunked__find() should fail if uploadDate is "
+ "of inappropriate type");
+ test_get_invalid (gfs, "invalid-md5",
+ "mongo_sync_gridfs_chunked__find() should fail if md5 is of "
+ "inappropriate type");
+
+ ns = g_strconcat (config.gfs_prefix, ".files", NULL);
+ query = bson_build (BSON_TYPE_STRING, "my-id", "id-only", -1,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+
+ cursor = mongo_sync_cursor_new (conn, ns,
+ mongo_sync_cmd_query (conn, ns, 0, 0, 0,
+ query, NULL));
+ bson_free (query);
+ mongo_sync_cursor_next (cursor);
+ ok (mongo_sync_gridfs_chunked_file_cursor_get_chunk (cursor, NULL) == NULL,
+ "mongo_sync_gridfs_chunked_file_cursor_get_chunk() should fail with "
+ "invalid data");
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_list (void)
+{
+ mongo_sync_gridfs *gfs;
+ bson *query, *data;
+ mongo_sync_cursor *cursor;
+ bson_cursor *c;
+ const gchar *str;
+ gboolean found_named = FALSE, found_noname = FALSE;
+ const guint8 *oid;
+
+ gfs = mongo_sync_gridfs_new
+ (mongo_sync_connect (config.primary_host, config.primary_port, TRUE),
+ config.gfs_prefix);
+
+ /* Test list with an invalid query */
+ query = bson_build (BSON_TYPE_STRING, "no-such-field",
+ "You're not seeing this field.", -1,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+
+ cursor = mongo_sync_gridfs_list (gfs, query);
+ ok (cursor == NULL,
+ "mongo_sync_gridfs_list() should fail if there query "
+ "does not match anything");
+ bson_free (query);
+
+ /* Test list with a query */
+ query = bson_build (BSON_TYPE_OID, "_id", named_oid,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+
+ cursor = mongo_sync_gridfs_list (gfs, query);
+ ok (cursor != NULL,
+ "mongo_sync_gridfs_list() correctly finds files by query");
+
+ mongo_sync_cursor_next (cursor);
+ data = mongo_sync_cursor_get_data (cursor);
+ c = bson_find (data, "filename");
+ bson_cursor_get_string (c, &str);
+ bson_cursor_free (c);
+
+ is (str, "libmongo-test",
+ "The listed file is named correctly");
+ bson_free (data);
+ mongo_sync_cursor_free (cursor);
+
+ bson_free (query);
+
+ /* Test list without a query */
+ cursor = mongo_sync_gridfs_list (gfs, NULL);
+ while (mongo_sync_cursor_next (cursor))
+ {
+ data = mongo_sync_cursor_get_data (cursor);
+
+ c = bson_find (data, "_id");
+ bson_cursor_get_oid (c, (const guint8 **)&oid);
+ bson_cursor_free (c);
+
+ if (memcmp (oid, named_oid, 12) == 0)
+ found_named = TRUE;
+ if (memcmp (oid, noname_oid, 12) == 0)
+ found_noname = TRUE;
+
+ bson_free (data);
+ }
+ mongo_sync_cursor_free (cursor);
+
+ ok (found_named == TRUE && found_noname == TRUE,
+ "mongo_sync_gridfs_list() finds both uploaded files without a query");
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_fync_sync_gridfs_remove (void)
+{
+ mongo_sync_gridfs *gfs;
+ bson *query;
+
+ gfs = mongo_sync_gridfs_new
+ (mongo_sync_connect (config.primary_host, config.primary_port, TRUE),
+ config.gfs_prefix);
+
+ /* Test with a non-matching query */
+ query = bson_build (BSON_TYPE_STRING, "no-such-field",
+ "You're not seeing this field.", -1,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+
+ ok (mongo_sync_gridfs_remove (gfs, query) == FALSE,
+ "mongo_sync_gridfs_remove() should fail if there's nothing to delete.");
+ bson_free (query);
+
+ /* Test with a non-string id */
+ query = bson_build (BSON_TYPE_STRING, "my-id", "string-id", -1,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+
+ ok (mongo_sync_gridfs_remove (gfs, query) == FALSE,
+ "mongo_sync_gridfs_remove() should fail if the file id is not "
+ "an ObjectId");
+ bson_free (query);
+
+ /* Test with a working query */
+ query = bson_build (BSON_TYPE_OID, "_id", named_oid,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+
+ ok (mongo_sync_gridfs_remove (gfs, query) == TRUE,
+ "mongo_sync_gridfs_remove() works");
+ bson_finish (query);
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_put_binary_subtype (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_chunked_file *gfile;
+ bson *meta, *query, *update;
+ guint8 *data;
+ gchar *chunk_ns;
+ guint32 size = GINT32_TO_LE(FILE_SIZE);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+ meta = bson_build (BSON_TYPE_STRING, "filename", "binsub-libmongo-test", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ data = g_malloc (FILE_SIZE + 4);
+ memcpy (data, &size, 4);
+ memset (data + 4, 'x', FILE_SIZE);
+
+ gfile = mongo_sync_gridfs_chunked_file_new_from_buffer (gfs, meta,
+ data + 4, FILE_SIZE);
+ memcpy (binsub_oid, mongo_sync_gridfs_file_get_id (gfile), 12);
+
+ query = bson_build (BSON_TYPE_OID, "files_id",
+ mongo_sync_gridfs_file_get_id (gfile),
+ BSON_TYPE_NONE);
+ bson_finish (query);
+
+ mongo_sync_gridfs_chunked_file_free (gfile);
+ bson_free (meta);
+
+ update = bson_build_full (BSON_TYPE_DOCUMENT, "$set", TRUE,
+ bson_build (BSON_TYPE_BINARY, "data",
+ BSON_BINARY_SUBTYPE_BINARY,
+ data, FILE_SIZE + 4,
+ BSON_TYPE_NONE),
+ BSON_TYPE_NONE);
+ bson_finish (update);
+ g_free (data);
+
+ chunk_ns = g_strconcat (config.gfs_prefix, ".chunks", NULL);
+ mongo_sync_cmd_update (conn, chunk_ns, MONGO_WIRE_FLAG_UPDATE_UPSERT,
+ query, update);
+
+ bson_free (query);
+ bson_free (update);
+ g_free (chunk_ns);
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_get_binary_subtype (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ bson *query;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, TRUE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+
+ query = bson_build (BSON_TYPE_STRING, "filename", "binsub-libmongo-test", -1,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+ validate_file (gfs, query, binsub_oid, FALSE);
+ bson_free (query);
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_chunk (void)
+{
+ mongo_util_oid_init (0);
+
+ test_func_sync_gridfs_put ();
+ test_func_sync_gridfs_get ();
+ test_func_sync_gridfs_list ();
+
+ sleep (2);
+
+ test_func_sync_gridfs_put_binary_subtype ();
+ test_func_sync_gridfs_get_binary_subtype ();
+
+ test_func_sync_gridfs_put_invalid ();
+ test_func_sync_gridfs_get_invalid ();
+
+ test_fync_sync_gridfs_remove ();
+}
+
+RUN_NET_TEST (37, func_sync_gridfs_chunk);
diff --git a/tests/func/mongo/sync-gridfs-stream/f_sync_gridfs_stream.c b/tests/func/mongo/sync-gridfs-stream/f_sync_gridfs_stream.c
new file mode 100644
index 0000000..a2c3690
--- /dev/null
+++ b/tests/func/mongo/sync-gridfs-stream/f_sync_gridfs_stream.c
@@ -0,0 +1,501 @@
+#include "test.h"
+#include "mongo.h"
+#include "compat.h"
+
+#define FILE_SIZE 1024 * 1024 + 12345
+#define BUFFER_SIZE 64 * 1024
+
+gchar *write_md5 = NULL;
+static gint seq = 1;
+
+void
+test_func_sync_gridfs_stream_without_oid_init (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+
+ stream = mongo_sync_gridfs_stream_new (gfs, NULL);
+ ok (stream == NULL,
+ "mongo_sync_gridfs_stream_new() fails without mongo_util_oid_init()");
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_stream_write (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+ bson *meta;
+ guint8 *data, *oid;
+ gint pos = 0;
+ gint filler = 0;
+ gboolean write_ok = TRUE;
+ GChecksum *chk;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+
+ oid = mongo_util_oid_new (seq++);
+ meta = bson_build (BSON_TYPE_STRING, "filename", "libmongo-test-stream", -1,
+ BSON_TYPE_OID, "_id", oid,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+ g_free (oid);
+
+ stream = mongo_sync_gridfs_stream_new (gfs, meta);
+ ok (stream != NULL,
+ "mongo_sync_gridfs_stream_new() works");
+ bson_free (meta);
+
+ data = g_malloc (BUFFER_SIZE);
+
+ chk = g_checksum_new (G_CHECKSUM_MD5);
+
+ while (pos < FILE_SIZE)
+ {
+ gint csize = BUFFER_SIZE;
+
+ if (csize + pos > FILE_SIZE)
+ csize = FILE_SIZE - pos;
+
+ memset (data, filler++, BUFFER_SIZE);
+
+ g_checksum_update (chk, data, csize);
+
+ write_ok &= mongo_sync_gridfs_stream_write (stream, data, csize);
+ pos += csize;
+ }
+ ok (write_ok == TRUE,
+ "All stream_write()s succeeded");
+
+ write_md5 = g_strdup (g_checksum_get_string (chk));
+ g_checksum_free (chk);
+
+ note ("File MD5: %s\n", write_md5);
+
+ g_free (data);
+ ok (mongo_sync_gridfs_stream_close (stream) == TRUE,
+ "mongo_sync_gridfs_stream_close() works");
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_stream_write_binary_subtype (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+ bson *meta, *update;
+ guint8 *data, *oid;
+ gboolean write_ok = TRUE;
+ guint32 size = GINT32_TO_LE(BUFFER_SIZE);
+ gchar *ns;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+
+ oid = mongo_util_oid_new (seq++);
+ meta = bson_build (BSON_TYPE_STRING, "filename", "libmongo-test-stream-bintype", -1,
+ BSON_TYPE_OID, "_id", oid,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+
+ stream = mongo_sync_gridfs_stream_new (gfs, meta);
+ ok (stream != NULL,
+ "mongo_sync_gridfs_stream_new() works");
+ bson_free (meta);
+
+ data = g_malloc (BUFFER_SIZE + 4);
+ memcpy (data, &size, 4);
+ memset (data + 4, 'x', BUFFER_SIZE);
+ write_ok = mongo_sync_gridfs_stream_write (stream, data + 4, BUFFER_SIZE);
+ ok (write_ok == TRUE,
+ "All stream_write()s succeeded");
+
+ ok (mongo_sync_gridfs_stream_close (stream) == TRUE,
+ "mongo_sync_gridfs_stream_close() works");
+
+ meta = bson_build (BSON_TYPE_OID, "files_id", oid,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ update = bson_build_full (BSON_TYPE_DOCUMENT, "$set", TRUE,
+ bson_build (BSON_TYPE_BINARY, "data",
+ BSON_BINARY_SUBTYPE_BINARY,
+ data, BUFFER_SIZE + 4,
+ BSON_TYPE_NONE),
+ BSON_TYPE_NONE);
+ bson_finish (update);
+ g_free (data);
+
+ ns = g_strconcat (config.gfs_prefix, ".chunks", NULL);
+ mongo_sync_cmd_update (conn, ns, MONGO_WIRE_FLAG_UPDATE_UPSERT,
+ meta, update);
+ bson_free (meta);
+ bson_free (update);
+ g_free (ns);
+ g_free (oid);
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_stream_write_invalid (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+ bson *meta;
+ gchar *ns;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+ ns = g_strconcat (config.gfs_prefix, ".files", NULL);
+
+ /* Try to write a file with a custom, non-OID _id */
+ meta = bson_build (BSON_TYPE_STRING, "filename", "lmc-invalid-id", -1,
+ BSON_TYPE_STRING, "_id", "Short and stout", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ stream = mongo_sync_gridfs_stream_new (gfs, meta);
+ ok (stream == NULL,
+ "mongo_sync_gridfs_stream_new() should fail if meta has an invalid _id");
+ bson_free (meta);
+
+ /* Write a file with a non-OID _id, bypassing the GridFS API. */
+ meta = bson_build (BSON_TYPE_STRING, "_id", "Short and stout", -1,
+ BSON_TYPE_STRING, "my-id", "stream:string-id", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ mongo_sync_cmd_insert (conn, ns, meta, NULL);
+ bson_free (meta);
+
+ /* Insert metadata with invalid length type. */
+ meta = bson_build (BSON_TYPE_DOUBLE, "length", 1.0,
+ BSON_TYPE_STRING, "my-id", "stream:invalid-length", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ mongo_sync_cmd_insert (conn, ns, meta, NULL);
+ bson_free (meta);
+
+ /* Insert metadata with invalid chunkSize type. */
+ meta = bson_build (BSON_TYPE_INT32, "length", 10,
+ BSON_TYPE_DOUBLE, "chunkSize", 12.5,
+ BSON_TYPE_STRING, "my-id", "stream:invalid-chunkSize", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ mongo_sync_cmd_insert (conn, ns, meta, NULL);
+ bson_free (meta);
+
+ /* Insert a valid metadata, without chunks. */
+ meta = bson_build (BSON_TYPE_INT32, "length", 32,
+ BSON_TYPE_INT32, "chunkSize", 12,
+ BSON_TYPE_UTC_DATETIME, "uploadDate", (gint64)1234,
+ BSON_TYPE_STRING, "md5", "deadbeef", -1,
+ BSON_TYPE_STRING, "my-id", "stream:no-chunks", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ mongo_sync_cmd_insert (conn, ns, meta, NULL);
+ bson_free (meta);
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_stream_read (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+ guint8 data[12345];
+ gint64 pos = 0;
+ bson *meta;
+
+ GChecksum *chk;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+ meta = bson_build (BSON_TYPE_STRING, "filename", "libmongo-test-stream", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ stream = mongo_sync_gridfs_stream_find (gfs, meta);
+ ok (stream != NULL,
+ "mongo_sync_gridfs_stream_find() works");
+ bson_free (meta);
+
+ chk = g_checksum_new (G_CHECKSUM_MD5);
+
+ while (pos < FILE_SIZE)
+ {
+ gint64 r;
+
+ r = mongo_sync_gridfs_stream_read (stream, data, sizeof (data));
+ if (r == -1)
+ break;
+
+ g_checksum_update (chk, data, r);
+ pos += r;
+ }
+
+ cmp_ok (pos, "==", FILE_SIZE,
+ "mongo_sync_gridfs_stream_read() works");
+ is (g_checksum_get_string (chk), write_md5,
+ "md5sums match");
+
+ g_checksum_free (chk);
+ ok (mongo_sync_gridfs_stream_close (stream) == TRUE,
+ "mongo_sync_gridfs_stream_close() works");
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_stream_read_binary_subtype (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+ guint8 *data;
+ gint64 r;
+ bson *meta;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+ meta = bson_build (BSON_TYPE_STRING, "filename", "libmongo-test-stream-bintype", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ stream = mongo_sync_gridfs_stream_find (gfs, meta);
+ ok (stream != NULL,
+ "mongo_sync_gridfs_stream_find() works");
+ bson_free (meta);
+
+ data = g_malloc (BUFFER_SIZE);
+ r = mongo_sync_gridfs_stream_read (stream, data, BUFFER_SIZE);
+ cmp_ok (r, "==", BUFFER_SIZE,
+ "mongo_sync_gridfs_stream_read() works");
+
+ ok (mongo_sync_gridfs_stream_close (stream) == TRUE,
+ "mongo_sync_gridfs_stream_close() works");
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_stream_meta (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+ bson *meta;
+ const guint8 *id;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+ meta = bson_build (BSON_TYPE_STRING, "filename", "libmongo-test-stream", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ stream = mongo_sync_gridfs_stream_find (gfs, meta);
+ bson_free (meta);
+
+ id = mongo_sync_gridfs_file_get_id (stream);
+ ok (id != NULL,
+ "mongo_sync_gridfs_file_get_id() works on streams");
+
+ ok (mongo_sync_gridfs_file_get_md5 (stream) == NULL,
+ "mongo_sync_gridfs_file_get_md5() fails on streams");
+ ok (mongo_sync_gridfs_file_get_date (stream) == -1,
+ "mongo_sync_gridfs_file_get_date() fails on streams");
+ ok (mongo_sync_gridfs_file_get_metadata (stream) == NULL,
+ "mongo_sync_gridfs_file_get_metadata() fails on streams");
+
+ mongo_sync_gridfs_stream_close (stream);
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_stream_read_invalid (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+ guint8 data[1245];
+ gint64 r;
+ bson *meta;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+
+ /* ---- */
+ meta = bson_build (BSON_TYPE_STRING, "my-id", "stream:string-id", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ stream = mongo_sync_gridfs_stream_find (gfs, meta);
+ ok (stream == NULL,
+ "mongo_sync_gridfs_stream_find() should fail if _id is non-OID");
+ bson_free (meta);
+
+ /* ---- */
+ meta = bson_build (BSON_TYPE_STRING, "my-id", "stream:invalid-length", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ stream = mongo_sync_gridfs_stream_find (gfs, meta);
+ ok (stream == NULL,
+ "mongo_sync_gridfs_stream_find() should fail with invalid metadata");
+ bson_free (meta);
+
+ /* ---- */
+ meta = bson_build (BSON_TYPE_STRING, "my-id", "stream:invalid-chunkSize", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ stream = mongo_sync_gridfs_stream_find (gfs, meta);
+ ok (stream == NULL,
+ "mongo_sync_gridfs_stream_find() should fail with invalid metadata");
+ bson_free (meta);
+
+ /* no-chunk test */
+ meta = bson_build (BSON_TYPE_STRING, "my-id", "stream:no-chunks", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ stream = mongo_sync_gridfs_stream_find (gfs, meta);
+ ok (stream != NULL,
+ "mongo_sync_gridfs_stream_find() works [stream:no-chunks]");
+ bson_free (meta);
+
+ r = mongo_sync_gridfs_stream_read (stream, data, sizeof (data));
+ cmp_ok (r, "==", -1,
+ "Reading from a chunk-less file should fail");
+
+ mongo_sync_gridfs_stream_close (stream);
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_stream_seek (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+ bson *meta;
+ guint8 *chunk1, *chunk2, *chunk3;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+ meta = bson_build (BSON_TYPE_STRING, "filename", "libmongo-test-stream", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ stream = mongo_sync_gridfs_stream_find (gfs, meta);
+ bson_free (meta);
+
+ chunk1 = g_malloc (300 * 1024);
+ chunk2 = g_malloc (300 * 1024);
+ chunk3 = g_malloc (300 * 1024);
+
+ cmp_ok (mongo_sync_gridfs_stream_read (stream, chunk1, 300 * 1024), "==",
+ 300 * 1024,
+ "reading the first chunk works");
+ cmp_ok (mongo_sync_gridfs_stream_read (stream, chunk2, 300 * 1024), "==",
+ 300 * 1024,
+ "reading the second chunk works");
+ ok (memcmp (chunk1, chunk2, 300 * 1024) != 0,
+ "The two chunks differ, as they should");
+
+ ok (mongo_sync_gridfs_stream_seek (stream, 0, SEEK_END) == TRUE,
+ "mongo_sync_gridfs_stream_seek() works, with SEEK_END");
+ cmp_ok (stream->file.offset, "==", stream->file.length,
+ "mongo_sync_gridfs_stream_seek() can seek to the end");
+
+ ok (mongo_sync_gridfs_stream_seek (stream, 1, SEEK_SET) == TRUE,
+ "mongo_sync_gridfs_stream_seek() works, with SEEK_SET");
+ cmp_ok (stream->file.offset, "==", 1,
+ "mongo_sync_gridfs_stream_seek()'s SEEK_SET works");
+ ok (mongo_sync_gridfs_stream_seek (stream, 1, SEEK_SET) == TRUE,
+ "mongo_sync_gridfs_stream_seek() works, with SEEK_SET");
+
+ ok (mongo_sync_gridfs_stream_seek (stream, -1, SEEK_CUR) == TRUE,
+ "mongo_sync_gridfs_stream_seek() works, with SEEK_CUR");
+ cmp_ok (stream->file.offset, "==", 0,
+ "mongo_sync_gridfs_stream_seek()'s SEEK_CUR works");
+ ok (mongo_sync_gridfs_stream_seek (stream, 0, SEEK_CUR) == TRUE,
+ "mongo_sync_gridfs_stream_seek() works, with SEEK_CUR");
+
+ cmp_ok (mongo_sync_gridfs_stream_read (stream, chunk3, 300 * 1024), "==",
+ 300 * 1024,
+ "reading after seeking works");
+
+ ok (memcmp (chunk1, chunk3, 300 * 1024) == 0,
+ "After seeking, we're at the beginning");
+
+ mongo_sync_gridfs_stream_close (stream);
+ g_free (chunk3);
+ g_free (chunk2);
+ g_free (chunk1);
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_stream_seek_invalid (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+ bson *meta;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+ meta = bson_build (BSON_TYPE_STRING, "my-id", "stream:no-chunks", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ stream = mongo_sync_gridfs_stream_find (gfs, meta);
+ bson_free (meta);
+
+ ok (mongo_sync_gridfs_stream_seek (stream, 1, SEEK_SET) == FALSE,
+ "mongo_sync_gridfs_stream_seek() should fail with no chunks");
+
+ mongo_sync_gridfs_stream_close (stream);
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_stream (void)
+{
+ test_func_sync_gridfs_stream_without_oid_init ();
+
+ mongo_util_oid_init (0);
+
+ test_func_sync_gridfs_stream_write ();
+ test_func_sync_gridfs_stream_write_binary_subtype ();
+ test_func_sync_gridfs_stream_write_invalid ();
+ test_func_sync_gridfs_stream_read ();
+ test_func_sync_gridfs_stream_read_binary_subtype ();
+ test_func_sync_gridfs_stream_read_invalid ();
+ test_func_sync_gridfs_stream_seek ();
+ test_func_sync_gridfs_stream_seek_invalid ();
+ test_func_sync_gridfs_stream_meta ();
+
+ g_free (write_md5);
+}
+
+RUN_NET_TEST (38, func_sync_gridfs_stream);
diff --git a/tests/func/mongo/sync-pool/f_sync_pool.c b/tests/func/mongo/sync-pool/f_sync_pool.c
new file mode 100644
index 0000000..28a2497
--- /dev/null
+++ b/tests/func/mongo/sync-pool/f_sync_pool.c
@@ -0,0 +1,169 @@
+#include "test.h"
+#include <mongo.h>
+
+#include <errno.h>
+
+#include "libmongo-private.h"
+
+void
+test_func_mongo_sync_pool_secondary (void)
+{
+ mongo_sync_pool *pool;
+ mongo_sync_pool_connection *conn[11], *m, *s1, *s2, *t;
+ gint i = 0;
+ gboolean ret = TRUE;
+
+ skip (!config.secondary_host, 14,
+ "Secondary server not configured");
+
+ ok (mongo_sync_pool_new (config.secondary_host,
+ config.secondary_port, 1, 10) == NULL,
+ "mongo_sync_pool_new() should fail when connecting to a secondary");
+
+ pool = mongo_sync_pool_new (config.primary_host,
+ config.primary_port, 1, 10);
+ ok (pool != NULL,
+ "mongo_sync_pool_new() works with slaves too");
+
+ m = mongo_sync_pool_pick (pool, TRUE);
+ ok (m != NULL,
+ "mongo_sync_pool_pick() can pick a master from a mixed pool");
+ ok (mongo_sync_pool_pick (pool, TRUE) == NULL,
+ "mongo_sync_pool_pick() should fail if there are no more masters, and "
+ "a master was requested");
+
+ while ((conn[i] = mongo_sync_pool_pick (pool, FALSE)) != NULL)
+ i++;
+ cmp_ok (i, "==", 10,
+ "Successfully connect to secondaries on 10 sockets");
+ ok (mongo_sync_pool_pick (pool, FALSE) == NULL,
+ "mongo_sync_pool_pick() should fail if there are no free connections");
+
+ ok (mongo_sync_pool_return (pool, m) == TRUE,
+ "Returning the master to the pool works");
+
+ m = mongo_sync_pool_pick (pool, FALSE);
+ ok (m != NULL,
+ "mongo_sync_pool_pick() will return a master, if no more slaves are "
+ "available");
+
+ for (i = 0; i < 10; i++)
+ ret = ret && mongo_sync_pool_return (pool, conn[i]);
+
+ ok (ret == TRUE,
+ "mongo_sync_pool_return() works when returning slaves");
+
+ mongo_sync_pool_return (pool, m);
+
+ t = mongo_sync_pool_pick (pool, FALSE);
+ t->pool_id = 4242;
+
+ errno = 0;
+ ret = mongo_sync_pool_return (pool, t);
+ ok (ret == FALSE && errno == ERANGE,
+ "mongo_sync_pool_return() should fail if the connection ID is "
+ "out of range");
+
+ /* Test whether masters and slaves are different. */
+ m = mongo_sync_pool_pick (pool, TRUE);
+ s1 = mongo_sync_pool_pick (pool, FALSE);
+ s2 = mongo_sync_pool_pick (pool, FALSE);
+
+ ok (m != s1 && m != s2,
+ "Picked master and slaves are different");
+
+ ok (mongo_sync_cmd_is_master ((mongo_sync_connection *)m) == TRUE,
+ "Picked master is, indeed, a master");
+ ok (mongo_sync_cmd_is_master ((mongo_sync_connection *)s1) == FALSE,
+ "Picked secondary is a secondary");
+ ok (mongo_sync_cmd_is_master ((mongo_sync_connection *)s2) == FALSE,
+ "Picked secondary is a secondary");
+
+ mongo_sync_pool_free (pool);
+
+ endskip;
+}
+
+void
+test_func_mongo_sync_pool (void)
+{
+ mongo_sync_pool *pool;
+ mongo_sync_pool_connection *conn[11], *t;
+ gint c = 0;
+ gboolean ret = TRUE;
+ bson *b;
+ mongo_packet *p;
+
+ /*
+ * First we test that connecting to an invalid host fails.
+ */
+ pool = mongo_sync_pool_new ("invalid.example.com",
+ config.primary_port, 10, 10);
+ ok (pool == NULL,
+ "mongo_sync_pool_new() should fail with an invalid host");
+
+ /*
+ * Next, we test whether the basics work, like connecting, picking
+ * & returning.
+ */
+
+ pool = mongo_sync_pool_new (config.primary_host,
+ config.primary_port,
+ 10, 0);
+
+ ok (pool != NULL,
+ "mongo_sync_pool_new() works");
+
+ while ((conn[c] = mongo_sync_pool_pick (pool, TRUE)) != NULL)
+ c++;
+ cmp_ok (c, "==", 10,
+ "Successfully connect to the master on 10 sockets");
+
+ t = mongo_sync_pool_pick (pool, TRUE);
+ ok (t == NULL && errno == EAGAIN,
+ "Connected to the master only on 10 sockets");
+
+ for (c = 0; c < 10; c++)
+ ret = ret && mongo_sync_pool_return (pool, conn[c]);
+ ok (ret == TRUE,
+ "mongo_sync_pool_return() works");
+
+ t = mongo_sync_pool_pick (pool, TRUE);
+ ok (t != NULL,
+ "mongo_sync_pool_pick() works after returning connections");
+ mongo_sync_pool_return (pool, t);
+
+ /*
+ * Then we test whether we can perform commands on random
+ * connections.
+ */
+ conn[0] = mongo_sync_pool_pick (pool, TRUE);
+ conn[1] = mongo_sync_pool_pick (pool, TRUE);
+
+ ok (conn[0] != conn[1],
+ "Two picked connections are not the same");
+
+ b = bson_build (BSON_TYPE_STRING, "test-name", __FILE__, -1,
+ BSON_TYPE_INT32, "i32", 1984,
+ BSON_TYPE_NONE);
+ bson_finish (b);
+
+ ok (mongo_sync_cmd_insert ((mongo_sync_connection *)conn[0],
+ config.ns, b, NULL) == TRUE,
+ "mongo_sync_cmd_insert() works on a picked connection");
+
+ p = mongo_sync_cmd_query ((mongo_sync_connection *)conn[1],
+ config.ns, 0, 0, 1, b, NULL);
+ ok (p != NULL,
+ "mongo_sync_cmd_query() works on a different picked connection");
+ mongo_wire_packet_free (p);
+
+ mongo_sync_pool_free (pool);
+
+ /*
+ * Test pools with a secondary aswell.
+ */
+ test_func_mongo_sync_pool_secondary ();
+}
+
+RUN_NET_TEST (23, func_mongo_sync_pool);
diff --git a/tests/func/mongo/sync/f_sync_auto_reauth.c b/tests/func/mongo/sync/f_sync_auto_reauth.c
new file mode 100644
index 0000000..477dd25
--- /dev/null
+++ b/tests/func/mongo/sync/f_sync_auto_reauth.c
@@ -0,0 +1,58 @@
+#include "test.h"
+#include <mongo.h>
+
+#include <errno.h>
+#include <sys/socket.h>
+
+#include "libmongo-private.h"
+
+
+/*
+ * This test requires that the "lmcUser" user (password "lmcPass") has
+ * RW access to the test db. It must be set up prior to running this
+ * test.
+ */
+void
+test_func_mongo_sync_auto_reauth (void)
+{
+ mongo_sync_connection *conn;
+ bson *b;
+
+ b = bson_new ();
+ bson_append_int32 (b, "f_sync_auto_reauth", 1);
+ bson_finish (b);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port,
+ TRUE);
+
+ mongo_sync_conn_set_safe_mode (conn, TRUE);
+
+ skip (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == TRUE, 3,
+ "Authentication not configured.");
+
+ skip (mongo_sync_cmd_authenticate (conn, config.db, "lmcUser", "lmcPass")== FALSE, 3,
+ "Authentication environment not set up for testing.");
+
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == TRUE,
+ "Inserting works after authentication.");
+
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (1);
+
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == FALSE,
+ "Inserting fails with auto-reconnect turned off, and a broken "
+ "connection");
+
+ mongo_sync_conn_set_auto_reconnect (conn, TRUE);
+
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == TRUE,
+ "Inserting works with auto-reconnect turned on, and auto-auth, "
+ "and a broken connection.");
+
+ endskip;
+ endskip;
+
+ mongo_sync_disconnect (conn);
+}
+
+RUN_NET_TEST (3, func_mongo_sync_auto_reauth);
diff --git a/tests/func/mongo/sync/f_sync_auto_reconnect.c b/tests/func/mongo/sync/f_sync_auto_reconnect.c
new file mode 100644
index 0000000..45ec28d
--- /dev/null
+++ b/tests/func/mongo/sync/f_sync_auto_reconnect.c
@@ -0,0 +1,61 @@
+#include "test.h"
+#include <mongo.h>
+
+#include <errno.h>
+#include <sys/socket.h>
+
+#include "libmongo-private.h"
+
+void
+test_func_mongo_sync_auto_reconnect (void)
+{
+ mongo_sync_connection *conn;
+ bson *b;
+ mongo_packet *p;
+
+ b = bson_new ();
+ bson_append_int32 (b, "f_sync_auto_reconnect", 1);
+ bson_finish (b);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port,
+ TRUE);
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == TRUE);
+
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (1);
+
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == FALSE,
+ "Inserting fails with auto-reconnect turned off, and a broken "
+ "connection");
+
+ mongo_sync_conn_set_auto_reconnect (conn, TRUE);
+
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == TRUE,
+ "Inserting works with auto-reconnect turned on, and a broken "
+ "connection");
+
+ mongo_sync_conn_set_auto_reconnect (conn, FALSE);
+
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (1);
+
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == FALSE,
+ "Turning off auto-reconnect works");
+
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (1);
+
+ p = mongo_sync_cmd_query (conn, config.ns, 0, 0, 1, b, NULL);
+ ok (p == NULL,
+ "Query fails with auto-reconnect turned off");
+
+ mongo_sync_conn_set_auto_reconnect (conn, TRUE);
+ p = mongo_sync_cmd_query (conn, config.ns, 0, 0, 1, b, NULL);
+ ok (p != NULL,
+ "Query does reconnect with auto-reconnect turned on");
+ mongo_wire_packet_free (p);
+
+ mongo_sync_disconnect (conn);
+}
+
+RUN_NET_TEST (6, func_mongo_sync_auto_reconnect);
diff --git a/tests/func/mongo/sync/f_sync_auto_reconnect_cache.c b/tests/func/mongo/sync/f_sync_auto_reconnect_cache.c
new file mode 100644
index 0000000..d69ea5d
--- /dev/null
+++ b/tests/func/mongo/sync/f_sync_auto_reconnect_cache.c
@@ -0,0 +1,107 @@
+#include "test.h"
+#include <mongo.h>
+
+#include <errno.h>
+#include <sys/socket.h>
+
+#include "libmongo-private.h"
+
+void
+test_func_mongo_sync_auto_reconnect_cache (void)
+{
+ mongo_sync_conn_recovery_cache *cache;
+ mongo_sync_connection *conn;
+ bson *b;
+ mongo_packet *p;
+ gchar *primary_addr;
+ const gchar *error_msg;
+
+ primary_addr = g_strdup_printf ("%s:%d", config.primary_host, config.primary_port);
+
+ b = bson_new ();
+ bson_append_int32 (b, "f_sync_auto_reconnect", 1);
+ bson_finish (b);
+
+ cache = mongo_sync_conn_recovery_cache_new ();
+
+ mongo_sync_conn_recovery_cache_seed_add (cache,
+ config.primary_host,
+ config.primary_port);
+
+ conn = mongo_sync_connect_recovery_cache (cache,
+ TRUE);
+
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == TRUE);
+
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (1);
+
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == FALSE,
+ "Inserting fails with auto-reconnect turned off, and a broken "
+ "connection");
+
+ error_msg = mongo_sync_conn_get_last_error (conn);
+
+ ok (error_msg != NULL, "We have an error msg when insert fails.");
+
+ mongo_sync_conn_set_auto_reconnect (conn, TRUE);
+
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == TRUE,
+ "Inserting works with auto-reconnect turned on, and a broken "
+ "connection");
+
+ error_msg = mongo_sync_conn_get_last_error (conn);
+
+ ok (error_msg == NULL,
+ "After a succesful insert we shouldn't have an error msg.");
+
+ mongo_sync_conn_set_auto_reconnect (conn, FALSE);
+
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (1);
+
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == FALSE,
+ "Turning off auto-reconnect works");
+
+ skip (!config.secondary_host, 7,
+ "Secondary host not set up");
+
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (1);
+
+ p = mongo_sync_cmd_query (conn, config.ns, 0, 0, 1, b, NULL);
+ ok (p == NULL,
+ "Query fails with auto-reconnect turned off");
+
+ error_msg = mongo_sync_conn_get_last_error(conn);
+ ok (error_msg != NULL, "We have an error msg after a failure query.");
+
+ mongo_sync_conn_set_auto_reconnect (conn, TRUE);
+ p = mongo_sync_cmd_query (conn, config.ns, 0, 0, 1, b, NULL);
+ ok (p != NULL,
+ "Query does reconnect with auto-reconnect turned on");
+
+ ok (mongo_sync_conn_get_last_error(conn) == NULL,
+ "We shouldn't have any error messages after a successful operation.");
+
+ mongo_wire_packet_free (p);
+
+ mongo_sync_cmd_is_master (conn);
+
+ ok (conn->rs.hosts != NULL,
+ "We have hosts in the connection's replica set.");
+
+ ok (cache->rs.hosts == NULL, "Cache is empty.");
+
+ mongo_sync_disconnect (conn);
+
+ ok (cache->rs.hosts != NULL, "Cache is filled by disconnect()");
+
+ mongo_sync_conn_recovery_cache_free (cache);
+
+ endskip;
+
+ g_free (primary_addr);
+}
+
+RUN_NET_TEST (13, func_mongo_sync_auto_reconnect_cache);
diff --git a/tests/func/mongo/sync/f_sync_conn_seed_add.c b/tests/func/mongo/sync/f_sync_conn_seed_add.c
new file mode 100644
index 0000000..03bcdd2
--- /dev/null
+++ b/tests/func/mongo/sync/f_sync_conn_seed_add.c
@@ -0,0 +1,58 @@
+#include "test.h"
+#include <mongo.h>
+
+#include "libmongo-private.h"
+
+void
+test_func_mongo_sync_conn_seed_add (void)
+{
+ mongo_sync_connection *conn;
+ GList *l;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port,
+ FALSE);
+ close (conn->super.fd);
+
+ l = conn->rs.hosts;
+ while (l)
+ {
+ g_free (l->data);
+ l = g_list_delete_link (l, l);
+ }
+ conn->rs.hosts = NULL;
+
+ l = conn->rs.seeds;
+ while (l)
+ {
+ g_free (l->data);
+ l = g_list_delete_link (l, l);
+ }
+ conn->rs.seeds = NULL;
+
+ conn = mongo_sync_reconnect (conn, TRUE);
+ ok (conn == NULL,
+ "mongo_sync_reconnect() fails without seeds or discovery");
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port,
+ FALSE);
+ close (conn->super.fd);
+ l = conn->rs.hosts;
+ while (l)
+ {
+ g_free (l->data);
+ l = g_list_delete_link (l, l);
+ }
+ conn->rs.hosts = NULL;
+
+ ok (mongo_sync_conn_seed_add (conn, config.primary_host,
+ config.primary_port),
+ "mongo_sync_conn_seed_add() works");
+
+ conn = mongo_sync_reconnect (conn, TRUE);
+ ok (conn != NULL,
+ "mongo_sync_reconnect() works when properly seeded");
+
+ mongo_sync_disconnect (conn);
+}
+
+RUN_NET_TEST (3, func_mongo_sync_conn_seed_add);
diff --git a/tests/func/mongo/sync/f_sync_invalid_getlasterror.c b/tests/func/mongo/sync/f_sync_invalid_getlasterror.c
new file mode 100644
index 0000000..6af227b
--- /dev/null
+++ b/tests/func/mongo/sync/f_sync_invalid_getlasterror.c
@@ -0,0 +1,27 @@
+#include "test.h"
+#include <mongo.h>
+#include <errno.h>
+
+void
+test_func_mongo_sync_invalid_getlasterror (void)
+{
+ mongo_sync_connection *conn;
+ gchar *error = NULL;
+ gboolean res;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port,
+ TRUE);
+
+ res = mongo_sync_cmd_get_last_error
+ (conn, "1234567890123456789012345678901234567890123456789012345678901234567890",
+ &error);
+
+ ok (res == FALSE,
+ "Trying to get the last error from an invalid DB results in an error.");
+ ok (error == NULL,
+ "When getLastError() fails, error remains NULL");
+
+ mongo_sync_disconnect (conn);
+}
+
+RUN_NET_TEST (2, func_mongo_sync_invalid_getlasterror);
diff --git a/tests/func/mongo/sync/f_sync_max_insert_size.c b/tests/func/mongo/sync/f_sync_max_insert_size.c
new file mode 100644
index 0000000..9ea5854
--- /dev/null
+++ b/tests/func/mongo/sync/f_sync_max_insert_size.c
@@ -0,0 +1,69 @@
+#include "test.h"
+#include <mongo.h>
+
+#include <errno.h>
+
+#include "libmongo-private.h"
+
+void
+test_func_mongo_sync_max_insert_size (void)
+{
+ mongo_sync_connection *conn;
+ const bson *docs[10];
+ bson *b1, *b2, *b3;
+
+ b1 = bson_new ();
+ bson_append_string (b1, "func_mongo_sync_max_insert_size", "works", -1);
+
+ bson_finish (b1);
+ b2 = bson_new ();
+ bson_append_int32 (b2, "int32", 1984);
+ bson_finish (b2);
+ b3 = bson_new ();
+ bson_finish (b3);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port,
+ FALSE);
+
+ /*
+ * cmd_insert_n()
+ */
+ mongo_sync_conn_set_max_insert_size (conn, bson_size (b1) +
+ bson_size (b3) + 1);
+
+ docs[0] = b1;
+ docs[1] = b2;
+ docs[2] = b3;
+
+ ok (mongo_sync_cmd_insert_n (conn, config.ns, 3, docs) == TRUE,
+ "mongo_sync_cmd_insert_n() works with a small max_insert_size");
+
+ mongo_sync_conn_set_max_insert_size (conn, 1);
+ errno = 0;
+ ok (mongo_sync_cmd_insert_n (conn, config.ns, 3, docs) == FALSE,
+ "mongo_sync_cmd_insert_n() should fail if any one document is too big");
+ cmp_ok (errno, "==", EMSGSIZE,
+ "errno is set to EMSGSIZE");
+
+ /*
+ * cmd_insert()
+ */
+ mongo_sync_conn_set_max_insert_size (conn, bson_size (b1) +
+ bson_size (b3) + 1);
+ ok (mongo_sync_cmd_insert (conn, config.ns, b1, b2, b3, NULL) == TRUE,
+ "mongo_sync_cmd_insert() works with a small max_insert_size");
+
+ mongo_sync_conn_set_max_insert_size (conn, 1);
+ errno = 0;
+ ok (mongo_sync_cmd_insert (conn, config.ns, b1, b2, b3, NULL) == FALSE,
+ "mongo_sync_cmd_insert() should fail if any one document is too big");
+ cmp_ok (errno, "==", EMSGSIZE,
+ "errno is set to EMSGSIZE");
+
+ mongo_sync_disconnect (conn);
+ bson_free (b1);
+ bson_free (b2);
+ bson_free (b3);
+}
+
+RUN_NET_TEST (6, func_mongo_sync_max_insert_size);
diff --git a/tests/func/mongo/sync/f_sync_oidtest.c b/tests/func/mongo/sync/f_sync_oidtest.c
new file mode 100644
index 0000000..2a64692
--- /dev/null
+++ b/tests/func/mongo/sync/f_sync_oidtest.c
@@ -0,0 +1,44 @@
+#include "test.h"
+#include <mongo.h>
+
+#include <string.h>
+
+void
+test_func_mongo_sync_oidtest (void)
+{
+ mongo_sync_connection *conn;
+ bson *boid, *reply = NULL;
+ bson_cursor *c;
+ mongo_packet *p;
+ guint8 *oid;
+ const guint8 *noid;
+
+ mongo_util_oid_init (0);
+
+ oid = mongo_util_oid_new (1);
+ boid = bson_new ();
+ bson_append_oid (boid, "driverOIDTest", oid);
+ bson_finish (boid);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port,
+ FALSE);
+
+ p = mongo_sync_cmd_custom (conn, config.db, boid);
+ ok (p != NULL,
+ "driverOIDTest(OID) custom command works");
+ mongo_wire_reply_packet_get_nth_document (p, 1, &reply);
+ bson_finish (reply);
+
+ c = bson_find (reply, "oid");
+ bson_cursor_get_oid (c, &noid);
+ ok (memcmp (oid, noid, 12) == 0,
+ "driverOIDTest(OID) returns the same OID");
+ bson_cursor_free (c);
+
+ mongo_sync_disconnect (conn);
+ mongo_wire_packet_free (p);
+ bson_free (boid);
+ bson_free (reply);
+}
+
+RUN_NET_TEST (2, func_mongo_sync_oidtest);
diff --git a/tests/func/mongo/sync/f_sync_safe_mode.c b/tests/func/mongo/sync/f_sync_safe_mode.c
new file mode 100644
index 0000000..e312c2f
--- /dev/null
+++ b/tests/func/mongo/sync/f_sync_safe_mode.c
@@ -0,0 +1,112 @@
+#include "test.h"
+#include <mongo.h>
+
+#include <errno.h>
+#include <string.h>
+
+#include "libmongo-private.h"
+
+void
+test_func_mongo_sync_safe_mode_basics (void)
+{
+ mongo_sync_connection *conn;
+ const bson *docs[10];
+ bson *b1, *b2, *b3, *b4, *cmd;
+ mongo_packet *p;
+ gchar *error;
+
+ mongo_util_oid_init (0);
+
+ b1 = bson_new ();
+ bson_append_string (b1, "func_mongo_sync_safe_mode", "works", -1);
+ bson_finish (b1);
+
+ b2 = bson_new ();
+ bson_append_int32 (b2, "int32", 1984);
+ bson_finish (b2);
+
+ b3 = test_bson_generate_full ();
+ b4 = test_bson_generate_full ();
+
+ docs[0] = b1;
+ docs[1] = b2;
+ docs[2] = b3;
+ docs[3] = b4;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port,
+ FALSE);
+
+ /* Test inserts */
+ mongo_sync_conn_set_safe_mode (conn, FALSE);
+ ok (mongo_sync_cmd_insert_n (conn, config.ns, 4, docs) == TRUE,
+ "mongo_sync_cmd_insert_n() should not fail with safe mode off");
+
+ mongo_sync_conn_set_safe_mode (conn, TRUE);
+ ok (mongo_sync_cmd_insert_n (conn, config.ns, 4, docs) == FALSE,
+ "mongo_sync_cmd_insert_n() should fail with safe mode on");
+
+ /* Test a custom command */
+ cmd = bson_new ();
+ bson_append_int32 (cmd, "bogusCommand", 1);
+ bson_finish (cmd);
+
+ mongo_sync_cmd_reset_error (conn, config.db);
+ mongo_sync_conn_set_safe_mode (conn, FALSE);
+ p = mongo_sync_cmd_custom (conn, config.db, cmd);
+ mongo_sync_cmd_get_last_error (conn, config.db, &error);
+ ok (p == NULL && strcmp (error, "no such cmd: bogusCommand") == 0,
+ "mongo_sync_cmd_custom() with a bogus command fails with safe-mode off");
+ bson_free (cmd);
+
+ cmd = bson_new ();
+ bson_append_int32 (cmd, "bogusCommand2", 1);
+ bson_finish (cmd);
+ mongo_sync_cmd_reset_error (conn, config.db);
+ mongo_sync_conn_set_safe_mode (conn, TRUE);
+ p = mongo_sync_cmd_custom (conn, config.db, cmd);
+ mongo_sync_cmd_get_last_error (conn, config.db, &error);
+ ok (p == NULL && strcmp (error, "no such cmd: bogusCommand2") == 0,
+ "mongo_sync_cmd_custom() with a bogus command fails with safe-mode on");
+ bson_free (cmd);
+
+ mongo_sync_disconnect (conn);
+ bson_free (b1);
+ bson_free (b2);
+ bson_free (b3);
+ bson_free (b4);
+}
+
+#define INVALID_NS "1234567890123456789012345678901234567890123456789012345678901234567890.test"
+
+void
+test_func_mongo_sync_safe_mode_invalid_db (void)
+{
+ mongo_sync_connection *conn;
+ bson *b;
+ const bson *docs[1];
+
+ b = bson_new ();
+ bson_append_int32 (b, "int32", 1984);
+ bson_finish (b);
+
+ docs[0] = b;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port,
+ TRUE);
+ mongo_sync_conn_set_safe_mode (conn, TRUE);
+
+ ok (mongo_sync_cmd_insert_n (conn, INVALID_NS, 1, docs) == FALSE,
+ "mongo_sync_cmd_insert_n() should fail with safe mode on and an invalid NS");
+
+ mongo_sync_disconnect (conn);
+ bson_free (b);
+}
+
+void
+test_func_mongo_sync_safe_mode (void)
+{
+ test_func_mongo_sync_safe_mode_basics ();
+ test_func_mongo_sync_safe_mode_invalid_db ();
+}
+
+RUN_NET_TEST (5, func_mongo_sync_safe_mode);
diff --git a/tests/func/mongo/sync/f_sync_safe_mode_cache.c b/tests/func/mongo/sync/f_sync_safe_mode_cache.c
new file mode 100644
index 0000000..082617f
--- /dev/null
+++ b/tests/func/mongo/sync/f_sync_safe_mode_cache.c
@@ -0,0 +1,131 @@
+#include "test.h"
+#include <mongo.h>
+
+#include <errno.h>
+#include <string.h>
+
+#include "libmongo-private.h"
+
+void
+test_func_mongo_sync_safe_mode_basics_cache (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_conn_recovery_cache *cache;
+
+ const bson *docs[10];
+ bson *b1, *b2, *b3, *b4, *cmd;
+ mongo_packet *p;
+ gchar *error;
+
+ mongo_util_oid_init (0);
+
+ b1 = bson_new ();
+ bson_append_string (b1, "func_mongo_sync_safe_mode", "works", -1);
+ bson_finish (b1);
+
+ b2 = bson_new ();
+ bson_append_int32 (b2, "int32", 1984);
+ bson_finish (b2);
+
+ b3 = test_bson_generate_full ();
+ b4 = test_bson_generate_full ();
+
+ docs[0] = b1;
+ docs[1] = b2;
+ docs[2] = b3;
+ docs[3] = b4;
+
+ cache = mongo_sync_conn_recovery_cache_new ();
+
+ mongo_sync_conn_recovery_cache_seed_add (cache,
+ config.primary_host,
+ config.primary_port);
+
+ conn = mongo_sync_connect_recovery_cache (cache, FALSE);
+
+ /* Test inserts */
+ mongo_sync_conn_set_safe_mode (conn, FALSE);
+ ok (mongo_sync_cmd_insert_n (conn, config.ns, 4, docs) == TRUE,
+ "mongo_sync_cmd_insert_n() should not fail with safe mode off");
+
+ mongo_sync_conn_set_safe_mode (conn, TRUE);
+ ok (mongo_sync_cmd_insert_n (conn, config.ns, 4, docs) == FALSE,
+ "mongo_sync_cmd_insert_n() should fail with safe mode on");
+
+ /* Test a custom command */
+ cmd = bson_new ();
+ bson_append_int32 (cmd, "bogusCommand", 1);
+ bson_finish (cmd);
+
+ mongo_sync_cmd_reset_error (conn, config.db);
+ mongo_sync_conn_set_safe_mode (conn, FALSE);
+ p = mongo_sync_cmd_custom (conn, config.db, cmd);
+ mongo_sync_cmd_get_last_error (conn, config.db, &error);
+ ok (p == NULL && strcmp (error, "no such cmd: bogusCommand") == 0,
+ "mongo_sync_cmd_custom() with a bogus command fails with safe-mode off");
+ bson_free (cmd);
+ g_free (error);
+
+ cmd = bson_new ();
+ bson_append_int32 (cmd, "bogusCommand2", 1);
+ bson_finish (cmd);
+ mongo_sync_cmd_reset_error (conn, config.db);
+ mongo_sync_conn_set_safe_mode (conn, TRUE);
+ p = mongo_sync_cmd_custom (conn, config.db, cmd);
+ mongo_sync_cmd_get_last_error (conn, config.db, &error);
+ ok (p == NULL && strcmp (error, "no such cmd: bogusCommand2") == 0,
+ "mongo_sync_cmd_custom() with a bogus command fails with safe-mode on");
+ bson_free (cmd);
+ g_free (error);
+
+ mongo_sync_disconnect (conn);
+ mongo_sync_conn_recovery_cache_free (cache);
+
+ bson_free (b1);
+ bson_free (b2);
+ bson_free (b3);
+ bson_free (b4);
+}
+
+#define INVALID_NS "1234567890123456789012345678901234567890123456789012345678901234567890.test"
+
+void
+test_func_mongo_sync_safe_mode_invalid_db_cache (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_conn_recovery_cache *cache;
+ bson *b;
+ const bson *docs[1];
+
+ b = bson_new ();
+ bson_append_int32 (b, "int32", 1984);
+ bson_finish (b);
+
+ docs[0] = b;
+
+ cache = mongo_sync_conn_recovery_cache_new ();
+
+ mongo_sync_conn_recovery_cache_seed_add (cache,
+ config.primary_host,
+ config.primary_port);
+
+ conn = mongo_sync_connect_recovery_cache (cache, TRUE);
+
+ mongo_sync_conn_set_safe_mode (conn, TRUE);
+
+ ok (mongo_sync_cmd_insert_n (conn, INVALID_NS, 1, docs) == FALSE,
+ "mongo_sync_cmd_insert_n() should fail with safe mode on and an invalid NS");
+
+ mongo_sync_disconnect (conn);
+ mongo_sync_conn_recovery_cache_free (cache);
+ bson_free (b);
+}
+
+void
+test_func_mongo_sync_safe_mode_cache (void)
+{
+ test_func_mongo_sync_safe_mode_basics_cache ();
+ test_func_mongo_sync_safe_mode_invalid_db_cache ();
+}
+
+RUN_NET_TEST (5, func_mongo_sync_safe_mode_cache);
diff --git a/tests/func/mongo/sync/f_sync_write_error.c b/tests/func/mongo/sync/f_sync_write_error.c
new file mode 100644
index 0000000..b6d4750
--- /dev/null
+++ b/tests/func/mongo/sync/f_sync_write_error.c
@@ -0,0 +1,52 @@
+#include "test.h"
+#include <mongo.h>
+
+#include <errno.h>
+#include <sys/socket.h>
+
+#include "libmongo-private.h"
+
+#define INVALID_NS "test.$Uncle$.Dagobert$"
+
+void
+test_func_mongo_sync_write_error (void)
+{
+ mongo_sync_connection *conn;
+ bson *b;
+ const gchar *error_msg;
+
+ b = bson_new ();
+ bson_append_int32 (b, "f_sync_write_error", 1);
+ bson_finish (b);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port,
+ TRUE);
+
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == TRUE,
+ "Inserting works with correct namespace when safe mode is off");
+
+ ok (mongo_sync_cmd_insert (conn, INVALID_NS, b, NULL) == TRUE,
+ "Inserting works with invalid namespace when safe mode is off");
+
+ error_msg = mongo_sync_conn_get_last_error (conn);
+ ok (error_msg == NULL,
+ "When safe mode is off, there is no error msg, even if ns is invalid.");
+
+ ok (mongo_sync_conn_set_safe_mode (conn, TRUE) == TRUE,
+ "Setting safe mode works.");
+
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == TRUE,
+ "Inserting works with correct namespace when safe mode is on");
+
+ ok (mongo_sync_cmd_insert (conn, INVALID_NS, b, NULL) == FALSE,
+ "Inserting fails with invalid namespace when safe mode is on");
+
+ error_msg = mongo_sync_conn_get_last_error (conn);
+
+ ok (error_msg != NULL,
+ "Inserting failed in safe mode, so we should have an error msg");
+
+ mongo_sync_disconnect (conn);
+}
+
+RUN_NET_TEST (7, func_mongo_sync_write_error);
diff --git a/tests/libtap/Makefile.am b/tests/libtap/Makefile.am
new file mode 100644
index 0000000..271ade2
--- /dev/null
+++ b/tests/libtap/Makefile.am
@@ -0,0 +1,4 @@
+check_LTLIBRARIES = libtap.la
+libtap_la_SOURCES = tap.c tap.h test.h test.c
+libtap_la_CFLAGS = -I$(top_srcdir)/src/ @GLIB_CFLAGS@
+libtap_la_LIBADD = $(top_builddir)/src/libmongo-client.la @GLIB_LIBS@
diff --git a/tests/libtap/tap.c b/tests/libtap/tap.c
new file mode 100644
index 0000000..e73ae4a
--- /dev/null
+++ b/tests/libtap/tap.c
@@ -0,0 +1,298 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <glib.h>
+#include "tap.h"
+
+static int expected_tests = NO_PLAN;
+static int failed_tests;
+static int current_test;
+static char *todo_mesg;
+
+void
+plan (int tests) {
+ expected_tests = tests;
+ if (tests != NO_PLAN)
+ printf("1..%d\n", tests);
+}
+
+static char *
+vstrdupf (const char *fmt, va_list args) {
+ char *str;
+ int size;
+ va_list args2;
+ va_copy(args2, args);
+ if (!fmt)
+ fmt = "";
+ size = g_vsnprintf(NULL, 0, fmt, args2) + 2;
+ str = malloc(size);
+ vsprintf(str, fmt, args);
+ va_end(args2);
+ return str;
+}
+
+int
+vok_at_loc (const char *file, int line, int test, const char *fmt,
+ va_list args)
+{
+ char *name = vstrdupf(fmt, args);
+ printf("%sok %d", test ? "" : "not ", ++current_test);
+ if (*name)
+ printf(" - %s", name);
+ if (todo_mesg) {
+ printf(" # TODO");
+ if (*todo_mesg)
+ printf(" %s", todo_mesg);
+ }
+ printf("\n");
+ if (!test) {
+ if (*name)
+ diag(" Failed%s test '%s'\n at %s line %d.",
+ todo_mesg ? " (TODO)" : "", name, file, line);
+ else
+ diag(" Failed%s test at %s line %d.",
+ todo_mesg ? " (TODO)" : "", file, line);
+ if (!todo_mesg)
+ failed_tests++;
+ }
+ free(name);
+ return test;
+}
+
+int
+ok_at_loc (const char *file, int line, int test, const char *fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ vok_at_loc(file, line, test, fmt, args);
+ va_end(args);
+ return test;
+}
+
+static int
+mystrcmp (const char *a, const char *b) {
+ return a == b ? 0 : !a ? -1 : !b ? 1 : strcmp(a, b);
+}
+
+#define eq(a, b) (!mystrcmp(a, b))
+#define ne(a, b) (mystrcmp(a, b))
+
+int
+is_at_loc (const char *file, int line, const char *got, const char *expected,
+ const char *fmt, ...)
+{
+ int test = eq(got, expected);
+ va_list args;
+ va_start(args, fmt);
+ vok_at_loc(file, line, test, fmt, args);
+ va_end(args);
+ if (!test) {
+ diag(" got: '%s'", got);
+ diag(" expected: '%s'", expected);
+ }
+ return test;
+}
+
+int
+isnt_at_loc (const char *file, int line, const char *got, const char *expected,
+ const char *fmt, ...)
+{
+ int test = ne(got, expected);
+ va_list args;
+ va_start(args, fmt);
+ vok_at_loc(file, line, test, fmt, args);
+ va_end(args);
+ if (!test) {
+ diag(" got: '%s'", got);
+ diag(" expected: anything else");
+ }
+ return test;
+}
+
+int
+cmp_ok_at_loc (const char *file, int line, int a, const char *op, int b,
+ const char *fmt, ...)
+{
+ int test = eq(op, "||") ? a || b
+ : eq(op, "&&") ? a && b
+ : eq(op, "|") ? a | b
+ : eq(op, "^") ? a ^ b
+ : eq(op, "&") ? a & b
+ : eq(op, "==") ? a == b
+ : eq(op, "!=") ? a != b
+ : eq(op, "<") ? a < b
+ : eq(op, ">") ? a > b
+ : eq(op, "<=") ? a <= b
+ : eq(op, ">=") ? a >= b
+ : eq(op, "<<") ? a << b
+ : eq(op, ">>") ? a >> b
+ : eq(op, "+") ? a + b
+ : eq(op, "-") ? a - b
+ : eq(op, "*") ? a * b
+ : eq(op, "/") ? a / b
+ : eq(op, "%") ? a % b
+ : diag("unrecognized operator '%s'", op);
+ va_list args;
+ va_start(args, fmt);
+ vok_at_loc(file, line, test, fmt, args);
+ va_end(args);
+ if (!test) {
+ diag(" %d", a);
+ diag(" %s", op);
+ diag(" %d", b);
+ }
+ return test;
+}
+
+static void
+vdiag_to_fh (FILE *fh, const char *fmt, va_list args) {
+ char *mesg, *line;
+ int i;
+ if (!fmt)
+ return;
+ mesg = vstrdupf(fmt, args);
+ line = mesg;
+ for (i = 0; *line; i++) {
+ char c = mesg[i];
+ if (!c || c == '\n') {
+ mesg[i] = '\0';
+ fprintf(fh, "# %s\n", line);
+ if (!c) break;
+ mesg[i] = c;
+ line = &mesg[i+1];
+ }
+ }
+ free(mesg);
+ return;
+}
+
+int
+diag (const char *fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ vdiag_to_fh(stderr, fmt, args);
+ va_end(args);
+ return 0;
+}
+
+int
+note (const char *fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ vdiag_to_fh(stdout, fmt, args);
+ va_end(args);
+ return 0;
+}
+
+int
+exit_status () {
+ int retval = 0;
+ if (expected_tests == NO_PLAN) {
+ printf("1..%d\n", current_test);
+ }
+ else if (current_test != expected_tests) {
+ diag("Looks like you planned %d test%s but ran %d.",
+ expected_tests, expected_tests > 1 ? "s" : "", current_test);
+ retval = 255;
+ }
+ if (failed_tests) {
+ diag("Looks like you failed %d test%s of %d run.",
+ failed_tests, failed_tests > 1 ? "s" : "", current_test);
+ if (expected_tests == NO_PLAN)
+ retval = failed_tests;
+ else
+ retval = expected_tests - current_test + failed_tests;
+ }
+ return retval;
+}
+
+void
+skippy (int n, const char *fmt, ...) {
+ char *why;
+ va_list args;
+ va_start(args, fmt);
+ why = vstrdupf(fmt, args);
+ va_end(args);
+ while (n --> 0) {
+ printf("ok %d ", ++current_test);
+ note("skip %s\n", why);
+ }
+ free(why);
+}
+
+void
+ctodo (int ignore __attribute__((unused)), const char *fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ todo_mesg = vstrdupf(fmt, args);
+ va_end(args);
+}
+
+void
+cendtodo () {
+ free(todo_mesg);
+ todo_mesg = NULL;
+}
+
+#ifndef _WIN32
+#include <sys/mman.h>
+#include <regex.h>
+
+#ifdef __APPLE__
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+#ifndef MAP_ANON
+#define MAP_ANON MAP_ANONYMOUS
+#endif
+
+/* Create a shared memory int to keep track of whether a piece of code executed
+dies. to be used in the dies_ok and lives_ok macros */
+int
+tap_test_died (int status) {
+ static int *test_died = NULL;
+ int prev;
+ if (!test_died) {
+ test_died = (int *)mmap(0, sizeof (int), PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANON, -1, 0);
+ *test_died = 0;
+ }
+ prev = *test_died;
+ *test_died = status;
+ return prev;
+}
+
+int
+like_at_loc (int for_match, const char *file, int line, const char *got,
+ const char *expected, const char *fmt, ...)
+{
+ int test;
+ regex_t re;
+ int err = regcomp(&re, expected, REG_EXTENDED);
+ if (err) {
+ char errbuf[256];
+ regerror(err, &re, errbuf, sizeof errbuf);
+ fprintf(stderr, "Unable to compile regex '%s': %s at %s line %d\n",
+ expected, errbuf, file, line);
+ exit(255);
+ }
+ err = regexec(&re, got, 0, NULL, 0);
+ regfree(&re);
+ test = for_match ? !err : err;
+ va_list args;
+ va_start(args, fmt);
+ vok_at_loc(file, line, test, fmt, args);
+ va_end(args);
+ if (!test) {
+ if (for_match) {
+ diag(" '%s'", got);
+ diag(" doesn't match: '%s'", expected);
+ }
+ else {
+ diag(" '%s'", got);
+ diag(" matches: '%s'", expected);
+ }
+ }
+ return test;
+}
+#endif
diff --git a/tests/libtap/tap.h b/tests/libtap/tap.h
new file mode 100644
index 0000000..3e841bc
--- /dev/null
+++ b/tests/libtap/tap.h
@@ -0,0 +1,85 @@
+#ifndef __TAP_H__
+#define __TAP_H__
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+
+#define NO_PLAN -1
+#define ok(...) ok_at_loc(__FILE__, __LINE__, __VA_ARGS__, NULL)
+#define pass(...) ok(1, ## __VA_ARGS__)
+#define fail(...) ok(0, ## __VA_ARGS__)
+#define is(...) is_at_loc(__FILE__, __LINE__, __VA_ARGS__, NULL)
+#define isnt(...) isnt_at_loc(__FILE__, __LINE__, __VA_ARGS__, NULL)
+#define cmp_ok(...) cmp_ok_at_loc(__FILE__, __LINE__, __VA_ARGS__, NULL)
+
+int vok_at_loc (const char *file, int line, int test, const char *fmt,
+ va_list args);
+void plan (int tests);
+int ok_at_loc (const char *file, int line, int test, const char *fmt,
+ ...);
+int diag (const char *fmt, ...);
+int note (const char *fmt, ...);
+int exit_status (void);
+void skippy (int n, const char *fmt, ...);
+void ctodo (int ignore, const char *fmt, ...);
+void cendtodo (void);
+int is_at_loc (const char *file, int line, const char *got,
+ const char *expected, const char *fmt, ...);
+int isnt_at_loc (const char *file, int line, const char *got,
+ const char *expected, const char *fmt, ...);
+int cmp_ok_at_loc (const char *file, int line, int a, const char *op,
+ int b, const char *fmt, ...);
+
+#ifdef _WIN32
+#define like(...) skippy(1, "like is not implemented on MSWin32")
+#define unlike(...) like()
+#else
+#define like(...) like_at_loc(1, __FILE__, __LINE__, __VA_ARGS__, NULL)
+#define unlike(...) like_at_loc(0, __FILE__, __LINE__, __VA_ARGS__, NULL)
+int like_at_loc (int for_match, const char *file, int line,
+ const char *got, const char *expected,
+ const char *fmt, ...);
+#endif
+
+#define skip(test, ...) do {if (test) {skippy(__VA_ARGS__, NULL); break;}
+#define endskip } while (0)
+
+#define todo(...) ctodo(0, ## __VA_ARGS__, NULL)
+#define endtodo cendtodo()
+
+#define dies_ok(code, ...) dies_ok_common(code, 1, ## __VA_ARGS__)
+#define lives_ok(code, ...) dies_ok_common(code, 0, ## __VA_ARGS__)
+
+#ifdef _WIN32
+#define dies_ok_common(...) \
+ skippy(1, "Death detection is not supported on MSWin32")
+#else
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+int tap_test_died (int status);
+#define dies_ok_common(code, for_death, ...) \
+ do { \
+ tap_test_died(1); \
+ int cpid = fork(); \
+ switch (cpid) { \
+ case -1: \
+ perror("fork error"); \
+ exit(EXIT_FAILURE); \
+ case 0: /* child */ \
+ close(1); close(2); \
+ code \
+ tap_test_died(0); \
+ exit(EXIT_SUCCESS); \
+ } \
+ if (waitpid(cpid, NULL, 0) < 0) { \
+ perror("waitpid error"); \
+ exit(EXIT_FAILURE); \
+ } \
+ int it_died = tap_test_died(0); \
+ if (!it_died) {code} \
+ ok(for_death ? it_died : !it_died, ## __VA_ARGS__); \
+ } while (0)
+#endif
+#endif
diff --git a/tests/libtap/test.c b/tests/libtap/test.c
new file mode 100644
index 0000000..979807f
--- /dev/null
+++ b/tests/libtap/test.c
@@ -0,0 +1,183 @@
+#include "test.h"
+#include "bson.h"
+#include "mongo-utils.h"
+
+#include <glib.h>
+#include <string.h>
+
+#ifndef HAVE_MSG_NOSIGNAL
+#include <signal.h>
+#endif
+
+
+func_config_t config;
+
+bson *
+test_bson_generate_full (void)
+{
+ bson *b, *d, *a, *scope;
+ guint8 oid[] = "1234567890ab";
+
+ a = bson_new ();
+ bson_append_int32 (a, "0", 32);
+ bson_append_int64 (a, "1", (gint64)-42);
+ bson_finish (a);
+
+ d = bson_new ();
+ bson_append_string (d, "name", "sub-document", -1);
+ bson_append_int32 (d, "answer", 42);
+ bson_finish (d);
+
+ scope = bson_new ();
+ bson_append_string (scope, "v", "hello world", -1);
+ bson_finish (scope);
+
+ b = bson_new ();
+ bson_append_double (b, "double", 3.14);
+ bson_append_string (b, "str", "hello world", -1);
+ bson_append_document (b, "doc", d);
+ bson_append_array (b, "array", a);
+ bson_append_binary (b, "binary0", BSON_BINARY_SUBTYPE_GENERIC,
+ (guint8 *)"foo\0bar", 7);
+ bson_append_oid (b, "_id", oid);
+ bson_append_boolean (b, "TRUE", FALSE);
+ bson_append_utc_datetime (b, "date", 1294860709000);
+ bson_append_timestamp (b, "ts", 1294860709000);
+ bson_append_null (b, "null");
+ bson_append_regex (b, "foobar", "s/foo.*bar/", "i");
+ bson_append_javascript (b, "alert", "alert (\"hello world!\");", -1);
+ bson_append_symbol (b, "sex", "Marilyn Monroe", -1);
+ bson_append_javascript_w_scope (b, "print", "alert (v);", -1, scope);
+ bson_append_int32 (b, "int32", 32);
+ bson_append_int64 (b, "int64", (gint64)-42);
+ bson_finish (b);
+
+ bson_free (d);
+ bson_free (a);
+ bson_free (scope);
+
+ return b;
+}
+
+mongo_packet *
+test_mongo_wire_generate_reply (gboolean valid, gint32 nreturn,
+ gboolean with_docs)
+{
+ mongo_reply_packet_header rh;
+ mongo_packet_header h;
+ mongo_packet *p;
+ guint8 *data;
+ gint data_size = sizeof (mongo_reply_packet_header);
+ bson *b1 = NULL, *b2 = NULL;
+
+ p = mongo_wire_packet_new ();
+
+ h.opcode = (valid) ? GINT32_TO_LE (1) : GINT32_TO_LE (42);
+ h.id = GINT32_TO_LE (1984);
+ h.resp_to = GINT32_TO_LE (42);
+ if (with_docs)
+ {
+ b1 = test_bson_generate_full ();
+ b2 = test_bson_generate_full ();
+ data_size += bson_size (b1) + bson_size (b2);
+ }
+ h.length = GINT32_TO_LE (sizeof (mongo_packet_header) + data_size);
+
+ mongo_wire_packet_set_header (p, &h);
+
+ data = g_try_malloc (data_size);
+
+ rh.flags = 0;
+ rh.cursor_id = GINT64_TO_LE ((gint64)12345);
+ rh.start = 0;
+ rh.returned = GINT32_TO_LE (nreturn);
+
+ memcpy (data, &rh, sizeof (mongo_reply_packet_header));
+ if (with_docs)
+ {
+ memcpy (data + sizeof (mongo_reply_packet_header),
+ bson_data (b1), bson_size (b1));
+ memcpy (data + sizeof (mongo_reply_packet_header) + bson_size (b1),
+ bson_data (b2), bson_size (b2));
+ }
+
+ mongo_wire_packet_set_data (p, data, data_size);
+ g_free (data);
+ bson_free (b1);
+ bson_free (b2);
+
+ return p;
+}
+
+mongo_sync_connection *
+test_make_fake_sync_conn (gint fd, gboolean slaveok)
+{
+ mongo_sync_connection *c;
+
+ c = g_try_new0 (mongo_sync_connection, 1);
+ if (!c)
+ return NULL;
+
+ c->super.fd = fd;
+ c->slaveok = slaveok;
+ c->safe_mode = FALSE;
+ c->auto_reconnect = FALSE;
+ c->max_insert_size = MONGO_SYNC_DEFAULT_MAX_INSERT_SIZE;
+
+ return c;
+}
+
+gboolean
+test_env_setup (void)
+{
+ config.primary_host = config.secondary_host = NULL;
+ config.primary_port = config.secondary_port = 27017;
+ config.db = g_strdup ("test");
+ config.coll = g_strdup ("libmongo");
+
+ if (getenv ("TEST_DB"))
+ {
+ g_free (config.db);
+ config.db = g_strdup (getenv ("TEST_DB"));
+ }
+ if (getenv ("TEST_COLLECTION"))
+ {
+ g_free (config.coll);
+ config.coll = g_strdup (getenv ("TEST_COLLECTION"));
+ }
+ config.ns = g_strconcat (config.db, ".", config.coll, NULL);
+
+ config.gfs_prefix = g_strconcat (config.ns, ".", "grid", NULL);
+
+ if (!getenv ("TEST_PRIMARY") || strlen (getenv ("TEST_PRIMARY")) == 0)
+ return FALSE;
+
+ if (!mongo_util_parse_addr (getenv ("TEST_PRIMARY"), &config.primary_host,
+ &config.primary_port))
+ return FALSE;
+
+ if (getenv ("TEST_SECONDARY") && strlen (getenv ("TEST_SECONDARY")) > 0)
+ mongo_util_parse_addr (getenv ("TEST_SECONDARY"), &config.secondary_host,
+ &config.secondary_port);
+
+ return TRUE;
+}
+
+void
+test_env_free (void)
+{
+ g_free (config.primary_host);
+ g_free (config.secondary_host);
+ g_free (config.db);
+ g_free (config.coll);
+ g_free (config.ns);
+ g_free (config.gfs_prefix);
+}
+
+void
+test_main_setup (void)
+{
+ #ifndef HAVE_MSG_NOSIGNAL
+ signal(SIGPIPE, SIG_IGN);
+ #endif
+}
diff --git a/tests/libtap/test.h b/tests/libtap/test.h
new file mode 100644
index 0000000..1c442f5
--- /dev/null
+++ b/tests/libtap/test.h
@@ -0,0 +1,84 @@
+#ifndef LIBMONGO_CLIENT_TEST_H
+#define LIBMONGO_CLIENT_TEST_H 1
+
+#include "tap.h"
+#include "bson.h"
+#include "mongo-wire.h"
+#include "mongo-sync.h"
+#include "libmongo-private.h"
+
+#include <dlfcn.h>
+
+typedef struct
+{
+ gchar *primary_host;
+ gint primary_port;
+
+ gchar *secondary_host;
+ gint secondary_port;
+
+ gchar *db;
+ gchar *coll;
+ gchar *ns;
+
+ gchar *gfs_prefix;
+} func_config_t;
+
+extern func_config_t config;
+
+#define begin_network_tests(n) \
+ do \
+ { \
+ skip(!test_env_setup (), n, "Environment not set up for network tests")
+
+#define end_network_tests() \
+ endskip; \
+ test_env_free(); \
+ } while (0)
+
+#define RUN_TEST(n, t) \
+ int \
+ main (void) \
+ { \
+ test_main_setup(); \
+ plan (n); \
+ test_##t (); \
+ return 0; \
+ }
+
+gboolean test_env_setup (void);
+void test_env_free (void);
+void test_main_setup (void);
+
+#define RUN_NET_TEST(n, t) \
+ int \
+ main (void) \
+ { \
+ test_main_setup(); \
+ if (!test_env_setup ()) \
+ printf ("1..0 # skip, Environment not set up for network tests"); \
+ else \
+ { \
+ plan (n); \
+ test_##t (); \
+ } \
+ test_env_free (); \
+ return 0; \
+ }
+
+bson *test_bson_generate_full (void);
+mongo_packet *test_mongo_wire_generate_reply (gboolean valid,
+ gint32 nreturn,
+ gboolean with_docs);
+mongo_sync_connection *test_make_fake_sync_conn (gint fd,
+ gboolean slaveok);
+
+#define SAVE_OLD_FUNC(n) \
+ static void *(*func_##n)(); \
+ if (!func_##n) \
+ func_##n = (void *(*)())dlsym (RTLD_NEXT, #n);
+
+#define CALL_OLD_FUNC(n, ...) \
+ func_##n (__VA_ARGS__)
+
+#endif
diff --git a/tests/perf/bson/p_bson_find.c b/tests/perf/bson/p_bson_find.c
new file mode 100644
index 0000000..4e62132
--- /dev/null
+++ b/tests/perf/bson/p_bson_find.c
@@ -0,0 +1,43 @@
+#include "tap.h"
+#include "test.h"
+
+#include <mongo.h>
+
+#define MAX_KEYS 10000
+
+void
+test_p_bson_find (void)
+{
+ bson *b;
+ bson_cursor *c;
+ gint i;
+ gchar **keys;
+ gboolean ret = TRUE;
+
+ keys = g_new(gchar *, MAX_KEYS);
+
+ b = bson_new ();
+ for (i = 0; i < MAX_KEYS; i++)
+ {
+ keys[i] = g_strdup_printf ("tmp_key_%d", i);
+ bson_append_int32 (b, keys[i], i);
+ }
+ bson_finish (b);
+
+ for (i = 1; i <= MAX_KEYS; i++)
+ {
+ c = bson_find (b, keys[i - 1]);
+ if (!c)
+ ret = FALSE;
+ bson_cursor_free (c);
+ g_free (keys[i - 1]);
+ }
+
+ bson_free (b);
+ g_free (keys);
+
+ ok (ret == TRUE,
+ "bson_find() performance test ok");
+}
+
+RUN_TEST (1, p_bson_find);
diff --git a/tests/runall b/tests/runall
new file mode 100755
index 0000000..e25f8ef
--- /dev/null
+++ b/tests/runall
@@ -0,0 +1,17 @@
+#! /bin/sh
+set -e
+
+fail_counter=0
+
+trap '[ $? -eq 0 ] && rm -f current-test.out current-test.err' 0
+
+for test in $@; do
+ ${test} 2>current-test.err >current-test.out
+ if grep "not ok" current-test.out >/dev/null 2>/dev/null; then
+ echo "${test} failed:" >&2
+ cat current-test.err >&2
+ fail_counter=`expr ${fail_counter} + 1`
+ fi
+done
+
+exit ${fail_counter}
diff --git a/tests/test_cleanup.c b/tests/test_cleanup.c
new file mode 100644
index 0000000..bef203a
--- /dev/null
+++ b/tests/test_cleanup.c
@@ -0,0 +1,31 @@
+#include "test.h"
+#include <mongo.h>
+
+int
+main (void)
+{
+ mongo_sync_connection *conn;
+ gchar *c;
+
+ if (!test_env_setup ())
+ return 0;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ mongo_sync_cmd_drop (conn, config.db, config.coll);
+
+ c = g_strconcat (config.coll, ".grid.files", NULL);
+ mongo_sync_cmd_drop (conn, config.db, c);
+ g_free (c);
+
+ c = g_strconcat (config.coll, ".grid.chunks", NULL);
+ mongo_sync_cmd_drop (conn, config.db, c);
+ g_free (c);
+
+ c = g_strconcat (config.coll, ".capped", NULL);
+ mongo_sync_cmd_drop (conn, config.db, c);
+ g_free (c);
+
+ test_env_free ();
+
+ return 0;
+}
diff --git a/tests/tools/coverage-report-entry.pl b/tests/tools/coverage-report-entry.pl
new file mode 100644
index 0000000..53ec17b
--- /dev/null
+++ b/tests/tools/coverage-report-entry.pl
@@ -0,0 +1,70 @@
+#!/usr/bin/perl
+#
+# Copyright (C) 2006 Daniel Berrange
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+
+print <<EOF;
+<html>
+<head>
+<title>Coverage report for $ARGV[0]</title>
+<style type="text/css">
+ span.perfect {
+ background: rgb(0,255,0);
+ }
+ span.terrible {
+ background: rgb(255,0,0);
+ }
+</style>
+</head>
+<body>
+<h1>Coverage report for $ARGV[0]</h1>
+
+<pre>
+EOF
+
+
+while (<>) {
+ s/&/&amp;/g;
+ s/</&lt;/g;
+ s/>/&gt;/g;
+
+ if (/^\s*function (\S+) called (\d+) returned \d+% blocks executed \d+%/) {
+ my $class = $2 > 0 ? "perfect" : "terrible";
+ $_ = "<span class=\"$class\" id=\"" . $1 . "\">$_</span>";
+ } elsif (/^\s*branch\s+\d+\s+taken\s+(\d+)%\s+.*$/) {
+ my $class = $1 > 0 ? "perfect" : "terrible";
+ $_ = "<span class=\"$class\">$_</span>";
+ } elsif (/^\s*branch\s+\d+\s+never executed.*$/) {
+ my $class = "terrible";
+ $_ = "<span class=\"$class\">$_</span>";
+ } elsif (/^\s*call\s+\d+\s+never executed.*$/) {
+ my $class = "terrible";
+ $_ = "<span class=\"$class\">$_</span>";
+ } elsif (/^\s*call\s+\d+\s+returned\s+(\d+)%.*$/) {
+ my $class = $1 > 0 ? "perfect" : "terrible";
+ $_ = "<span class=\"$class\">$_</span>";
+ }
+
+
+ print;
+}
+
+print <<EOF;
+</pre>
+</body>
+</html>
+EOF
diff --git a/tests/tools/coverage-report.pl b/tests/tools/coverage-report.pl
new file mode 100644
index 0000000..6ad7abd
--- /dev/null
+++ b/tests/tools/coverage-report.pl
@@ -0,0 +1,125 @@
+#!/usr/bin/perl
+#
+# Copyright (C) 2006 Daniel Berrange
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+use warnings;
+use strict;
+
+my %coverage = ( functions => {}, files => {} );
+
+my %filemap;
+
+my $type;
+my $name;
+
+my @functions;
+
+while (<>) {
+ if (/^Function '(.*)'\s*$/) {
+ $type = "function";
+ $name = $1;
+ $coverage{$type}->{$name} = {};
+ push @functions, $name;
+ } elsif (/^File '(.*?)'\s*$/) {
+ $type = "file";
+ $name = $1;
+ $coverage{$type}->{$name} = {};
+
+ foreach my $func (@functions) {
+ $coverage{"function"}->{$func}->{file} = $name;
+ }
+ @functions = ();
+ } elsif (/^Lines executed:(.*)%\s*of\s*(\d+)\s*$/) {
+ $coverage{$type}->{$name}->{lines} = $2;
+ $coverage{$type}->{$name}->{linesCoverage} = $1;
+ } elsif (/^Branches executed:(.*)%\s*of\s*(\d+)\s*$/) {
+ $coverage{$type}->{$name}->{branches} = $2;
+ $coverage{$type}->{$name}->{branchesCoverage} = $1;
+ } elsif (/^Taken at least once:(.*)%\s*of\s*(\d+)\s*$/) {
+ $coverage{$type}->{$name}->{conds} = $2;
+ $coverage{$type}->{$name}->{condsCoverage} = $1;
+ } elsif (/^Calls executed:(.*)%\s*of\s*(\d+)\s*$/) {
+ $coverage{$type}->{$name}->{calls} = $2;
+ $coverage{$type}->{$name}->{callsCoverage} = $1;
+ } elsif (/^No branches$/) {
+ $coverage{$type}->{$name}->{branches} = 0;
+ $coverage{$type}->{$name}->{branchesCoverage} = "100.00";
+ $coverage{$type}->{$name}->{conds} = 0;
+ $coverage{$type}->{$name}->{condsCoverage} = "100.00";
+ } elsif (/^No calls$/) {
+ $coverage{$type}->{$name}->{calls} = 0;
+ $coverage{$type}->{$name}->{callsCoverage} = "100.00";
+ } elsif (/^\s*(.*):creating '(.*)'\s*$/) {
+ $filemap{$1} = $2;
+ } elsif (/^\s*$/) {
+ # nada
+ } else {
+ warn "Shit [$_]\n";
+ }
+}
+
+my %summary;
+foreach my $type ("function", "file") {
+ $summary{$type} = {};
+ foreach my $m ("lines", "branches", "conds", "calls") {
+ my $totalGot = 0;
+ my $totalMiss = 0;
+ my $count = 0;
+ foreach my $func (keys %{$coverage{function}}) {
+ $count++;
+ my $got = $coverage{function}->{$func}->{$m};
+ $totalGot += $got;
+ my $miss = $got * $coverage{function}->{$func}->{$m ."Coverage"} / 100;
+ $totalMiss += $miss;
+ }
+ $summary{$type}->{$m} = sprintf("%d", $totalGot);
+ $summary{$type}->{$m . "Coverage"} = sprintf("%.2f", $totalMiss / $totalGot * 100);
+ }
+}
+
+
+
+print "<coverage>\n";
+
+foreach my $type ("function", "file") {
+ printf "<%ss>\n", $type;
+ foreach my $name (sort { $a cmp $b } keys %{$coverage{$type}}) {
+ my $rec = $coverage{$type}->{$name};
+ printf " <entry name=\"%s\" details=\"%s\">\n", $name, ($type eq "file" ? $filemap{$name} : $filemap{$rec->{file}});
+ printf " <lines count=\"%s\" coverage=\"%s\"/>\n", $rec->{lines}, $rec->{linesCoverage};
+ if (exists $rec->{branches}) {
+ printf " <branches count=\"%s\" coverage=\"%s\"/>\n", $rec->{branches}, $rec->{branchesCoverage};
+ }
+ if (exists $rec->{conds}) {
+ printf " <conditions count=\"%s\" coverage=\"%s\"/>\n", $rec->{conds}, $rec->{condsCoverage};
+ }
+ if (exists $rec->{calls}) {
+ printf " <calls count=\"%s\" coverage=\"%s\"/>\n", $rec->{calls}, $rec->{callsCoverage};
+ }
+ print " </entry>\n";
+ }
+
+ printf " <summary>\n";
+ printf " <lines count=\"%s\" coverage=\"%s\"/>\n", $summary{$type}->{lines}, $summary{$type}->{linesCoverage};
+ printf " <branches count=\"%s\" coverage=\"%s\"/>\n", $summary{$type}->{branches}, $summary{$type}->{branchesCoverage};
+ printf " <conditions count=\"%s\" coverage=\"%s\"/>\n", $summary{$type}->{conds}, $summary{$type}->{condsCoverage};
+ printf " <calls count=\"%s\" coverage=\"%s\"/>\n", $summary{$type}->{calls}, $summary{$type}->{callsCoverage};
+ printf " </summary>\n";
+ printf "</%ss>\n", $type;
+}
+
+print "</coverage>\n";
diff --git a/tests/tools/coverage-report.xsl b/tests/tools/coverage-report.xsl
new file mode 100644
index 0000000..ca3f57f
--- /dev/null
+++ b/tests/tools/coverage-report.xsl
@@ -0,0 +1,235 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+#
+# Copyright (C) 2006 Daniel Berrange
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+-->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version="1.0">
+
+ <xsl:output method="html"/>
+
+ <xsl:template match="coverage">
+ <html>
+ <head>
+ <title>Coverage report</title>
+ <style type="text/css">
+ tbody tr.odd td.label {
+ border-top: 1px solid rgb(128,128,128);
+ border-bottom: 1px solid rgb(128,128,128);
+ }
+ tbody tr.odd td.label {
+ background: rgb(200,200,200);
+ }
+
+ thead, tfoot {
+ background: rgb(60,60,60);
+ color: white;
+ font-weight: bold;
+ }
+
+ tr td.perfect {
+ background: rgb(0,255,0);
+ color: black;
+ }
+ tr td.excellant {
+ background: rgb(140,255,140);
+ color: black;
+ }
+ tr td.good {
+ background: rgb(160,255,0);
+ color: black;
+ }
+ tr td.poor {
+ background: rgb(255,160,0);
+ color: black;
+ }
+ tr td.bad {
+ background: rgb(255,140,140);
+ color: black;
+ }
+ tr td.terrible {
+ background: rgb(255,0,0);
+ color: black;
+ }
+ </style>
+ </head>
+ <body>
+ <h1>Coverage report</h1>
+ <xsl:apply-templates/>
+ </body>
+ </html>
+ </xsl:template>
+
+ <xsl:template match="functions">
+ <h2>Function coverage</h2>
+ <xsl:call-template name="content">
+ <xsl:with-param name="type" select="'function'"/>
+ </xsl:call-template>
+ </xsl:template>
+
+
+ <xsl:template match="files">
+ <h2>File coverage</h2>
+ <xsl:call-template name="content">
+ <xsl:with-param name="type" select="'file'"/>
+ </xsl:call-template>
+ </xsl:template>
+
+ <xsl:template name="content">
+ <xsl:param name="type"/>
+ <table>
+ <thead>
+ <tr>
+ <th>Name</th>
+ <th>Lines</th>
+ <th>Branches</th>
+ <th>Conditions</th>
+ <th>Calls</th>
+ </tr>
+ </thead>
+ <tbody>
+ <xsl:for-each select="entry">
+ <xsl:call-template name="entry">
+ <xsl:with-param name="type" select="$type"/>
+ <xsl:with-param name="class">
+ <xsl:choose>
+ <xsl:when test="position() mod 2">
+ <xsl:text>odd</xsl:text>
+ </xsl:when>
+ <xsl:otherwise>
+ <xsl:text>even</xsl:text>
+ </xsl:otherwise>
+ </xsl:choose>
+ </xsl:with-param>
+ </xsl:call-template>
+ </xsl:for-each>
+ </tbody>
+ <tfoot>
+ <xsl:for-each select="summary">
+ <xsl:call-template name="entry">
+ <xsl:with-param name="type" select="'summary'"/>
+ <xsl:with-param name="class">
+ <xsl:choose>
+ <xsl:when test="position() mod 2">
+ <xsl:text>odd</xsl:text>
+ </xsl:when>
+ <xsl:otherwise>
+ <xsl:text>even</xsl:text>
+ </xsl:otherwise>
+ </xsl:choose>
+ </xsl:with-param>
+ </xsl:call-template>
+ </xsl:for-each>
+ </tfoot>
+ </table>
+ </xsl:template>
+
+ <xsl:template name="entry">
+ <xsl:param name="type"/>
+ <xsl:param name="class"/>
+ <tr class="{$class}">
+ <xsl:choose>
+ <xsl:when test="$type = 'function'">
+ <td class="label"><a href="{@details}.html#{@name}"><xsl:value-of select="@name"/></a></td>
+ </xsl:when>
+ <xsl:when test="$type = 'file'">
+ <td class="label"><a href="{@details}.html"><xsl:value-of select="@name"/></a></td>
+ </xsl:when>
+ <xsl:otherwise>
+ <td class="label">Summary</td>
+ </xsl:otherwise>
+ </xsl:choose>
+
+ <xsl:if test="count(lines)">
+ <xsl:apply-templates select="lines"/>
+ </xsl:if>
+ <xsl:if test="not(count(lines))">
+ <xsl:call-template name="missing"/>
+ </xsl:if>
+
+ <xsl:if test="count(branches)">
+ <xsl:apply-templates select="branches"/>
+ </xsl:if>
+ <xsl:if test="not(count(branches))">
+ <xsl:call-template name="missing"/>
+ </xsl:if>
+
+ <xsl:if test="count(conditions)">
+ <xsl:apply-templates select="conditions"/>
+ </xsl:if>
+ <xsl:if test="not(count(conditions))">
+ <xsl:call-template name="missing"/>
+ </xsl:if>
+
+ <xsl:if test="count(calls)">
+ <xsl:apply-templates select="calls"/>
+ </xsl:if>
+ <xsl:if test="not(count(calls))">
+ <xsl:call-template name="missing"/>
+ </xsl:if>
+
+ </tr>
+ </xsl:template>
+
+ <xsl:template match="lines">
+ <xsl:call-template name="row"/>
+ </xsl:template>
+
+ <xsl:template match="branches">
+ <xsl:call-template name="row"/>
+ </xsl:template>
+
+ <xsl:template match="conditions">
+ <xsl:call-template name="row"/>
+ </xsl:template>
+
+ <xsl:template match="calls">
+ <xsl:call-template name="row"/>
+ </xsl:template>
+
+ <xsl:template name="missing">
+ <td></td>
+ </xsl:template>
+
+ <xsl:template name="row">
+ <xsl:variable name="quality">
+ <xsl:choose>
+ <xsl:when test="@coverage = 100">
+ <xsl:text>perfect</xsl:text>
+ </xsl:when>
+ <xsl:when test="@coverage >= 80.0">
+ <xsl:text>excellant</xsl:text>
+ </xsl:when>
+ <xsl:when test="@coverage >= 60.0">
+ <xsl:text>good</xsl:text>
+ </xsl:when>
+ <xsl:when test="@coverage >= 40.0">
+ <xsl:text>poor</xsl:text>
+ </xsl:when>
+ <xsl:when test="@coverage >= 20.0">
+ <xsl:text>bad</xsl:text>
+ </xsl:when>
+ <xsl:otherwise>
+ <xsl:text>terrible</xsl:text>
+ </xsl:otherwise>
+ </xsl:choose>
+ </xsl:variable>
+
+ <td class="{$quality}"><xsl:value-of select="@coverage"/>% of <xsl:value-of select="@count"/></td>
+ </xsl:template>
+
+</xsl:stylesheet>
diff --git a/tests/unit/bson/bson_append_array.c b/tests/unit/bson/bson_append_array.c
new file mode 100644
index 0000000..040f0ed
--- /dev/null
+++ b/tests/unit/bson/bson_append_array.c
@@ -0,0 +1,65 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_array (void)
+{
+ bson *b, *e1, *e2;
+
+ e1 = bson_new ();
+ bson_append_int32 (e1, "0", 1984);
+ bson_append_string (e1, "1", "hello world", -1);
+ bson_finish (e1);
+
+ e2 = bson_new ();
+ bson_append_string (e2, "0", "bar", -1);
+ ok (bson_append_array (e2, "1", e1),
+ "bson_append_array() works");
+ bson_finish (e2);
+ bson_free (e1);
+
+ b = bson_new ();
+ ok (bson_append_array (b, "0", e2),
+ "bson_append_array() works still");
+ bson_finish (b);
+ bson_free (e2);
+
+ cmp_ok (bson_size (b), "==", 58, "BSON array element size check");
+ ok (memcmp (bson_data (b),
+ "\072\000\000\000\004\060\000\062\000\000\000\002\060\000\004"
+ "\000\000\000\142\141\162\000\004\061\000\037\000\000\000\020"
+ "\060\000\300\007\000\000\002\061\000\014\000\000\000\150\145"
+ "\154\154\157\040\167\157\162\154\144\000\000\000\000",
+ bson_size (b)) == 0,
+ "BSON array element contents check");
+
+ bson_free (b);
+
+ e1 = bson_new ();
+ bson_append_int32 (e1, "0", 1984);
+ b = bson_new ();
+
+ ok (bson_append_array (b, "array", e1) == FALSE,
+ "bson_append_array() with an unfinished array should fail");
+ bson_finish (e1);
+ ok (bson_append_array (b, NULL, e1) == FALSE,
+ "bson_append_array() with a NULL name should fail");
+ ok (bson_append_array (b, "foo", NULL) == FALSE,
+ "bson_append_array() with a NULL array should fail");
+ ok (bson_append_array (NULL, "foo", e1) == FALSE,
+ "bson_append_array() with a NULL BSON should fail");
+ bson_finish (b);
+ cmp_ok (bson_size (b), "==", 5,
+ "BSON object should be empty");
+
+ ok (bson_append_array (b, "array", e1) == FALSE,
+ "Appending to a finished element should fail");
+
+ bson_free (e1);
+ bson_free (b);
+}
+
+RUN_TEST (10, bson_array);
diff --git a/tests/unit/bson/bson_append_binary.c b/tests/unit/bson/bson_append_binary.c
new file mode 100644
index 0000000..06ea362
--- /dev/null
+++ b/tests/unit/bson/bson_append_binary.c
@@ -0,0 +1,56 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_binary (void)
+{
+ bson *b;
+
+ b = bson_new ();
+ ok (bson_append_binary (b, "binary0", BSON_BINARY_SUBTYPE_GENERIC,
+ (guint8 *)"foo\0bar", 7),
+ "bson_append_binary(), type 0 works");
+ ok (bson_append_binary (b, "binary2", BSON_BINARY_SUBTYPE_BINARY,
+ (guint8 *)"\0\0\0\7foo\0bar", 11),
+ "bson_append_binary(), type 2 works");
+ bson_finish (b);
+
+ cmp_ok (bson_size (b), "==", 51, "BSON binary element size check");
+ ok (memcmp (bson_data (b),
+ "\063\000\000\000\005\142\151\156\141\162\171\060\000\007\000"
+ "\000\000\000\146\157\157\000\142\141\162\005\142\151\156\141"
+ "\162\171\062\000\013\000\000\000\002\000\000\000\007\146\157"
+ "\157\000\142\141\162\000",
+ bson_size (b)) == 0,
+ "BSON binary element contents check");
+
+ bson_free (b);
+
+ b = bson_new ();
+ ok (bson_append_binary (b, NULL, BSON_BINARY_SUBTYPE_GENERIC,
+ (guint8 *)"foo\0bar", 7) == FALSE,
+ "bson_append_binary() without a key name should fail");
+ ok (bson_append_binary (b, "binary1", BSON_BINARY_SUBTYPE_GENERIC,
+ NULL, 10) == FALSE,
+ "bson_append_binary () without binary data should fail");
+ ok (bson_append_binary (b, "binary3", BSON_BINARY_SUBTYPE_GENERIC,
+ (guint8 *)"foo\0bar", -1) == FALSE,
+ "bson_append_binary () with an invalid length should fail");
+ ok (bson_append_binary (NULL, "binary1", BSON_BINARY_SUBTYPE_GENERIC,
+ (guint8 *)"foo\0bar", 7) == FALSE,
+ "bson_append_binary () without a BSON object should fail");
+ bson_finish (b);
+ cmp_ok (bson_size (b), "==", 5,
+ "BSON object should be empty");
+
+ ok (bson_append_binary (b, "binary", BSON_BINARY_SUBTYPE_GENERIC,
+ (guint8 *)"foo\0bar", 7) == FALSE,
+ "Appending to a finished element should fail");
+
+ bson_free (b);
+}
+
+RUN_TEST (10, bson_binary);
diff --git a/tests/unit/bson/bson_append_boolean.c b/tests/unit/bson/bson_append_boolean.c
new file mode 100644
index 0000000..03df0a5
--- /dev/null
+++ b/tests/unit/bson/bson_append_boolean.c
@@ -0,0 +1,43 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_boolean (void)
+{
+ bson *b;
+
+ b = bson_new ();
+ ok (bson_append_boolean (b, "FALSE", FALSE),
+ "bson_append_boolean() works");
+ ok (bson_append_boolean (b, "TRUE", TRUE),
+ "bson_append_boolean() works still");
+ bson_finish (b);
+
+ cmp_ok (bson_size (b), "==", 20, "BSON boolean element size check");
+ ok (memcmp (bson_data (b),
+ "\024\000\000\000\010\106\101\114\123\105\000\000\010\124\122"
+ "\125\105\000\001\000",
+ bson_size (b)) == 0,
+ "BSON boolean element contents check");
+
+ bson_free (b);
+
+ b = bson_new ();
+ ok (bson_append_boolean (b, NULL, TRUE) == FALSE,
+ "bson_append_boolean() with a NULL key should fail");
+ ok (bson_append_boolean (NULL, "TRUE", TRUE) == FALSE,
+ "bson_append_boolean() without a BSON object should fail");
+ bson_finish (b);
+ cmp_ok (bson_size (b), "==", 5,
+ "BSON object should be empty");
+
+ ok (bson_append_boolean (b, "b", TRUE) == FALSE,
+ "Appending to a finished element should fail");
+
+ bson_free (b);
+}
+
+RUN_TEST (8, bson_boolean);
diff --git a/tests/unit/bson/bson_append_document.c b/tests/unit/bson/bson_append_document.c
new file mode 100644
index 0000000..04d077c
--- /dev/null
+++ b/tests/unit/bson/bson_append_document.c
@@ -0,0 +1,67 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_document (void)
+{
+ bson *b, *e1, *e2;
+
+ e1 = bson_new ();
+ bson_append_int32 (e1, "i32", 1984);
+ bson_append_string (e1, "str", "hello world", -1);
+ bson_finish (e1);
+
+ e2 = bson_new ();
+ bson_append_string (e2, "foo", "bar", -1);
+ ok (bson_append_document (e2, "subd", e1),
+ "bson_append_document() works");
+ bson_finish (e2);
+ bson_free (e1);
+
+ b = bson_new ();
+ ok (bson_append_document (b, "doc", e2),
+ "bson_append_document() works still");
+ bson_finish (b);
+ bson_free (e2);
+
+ cmp_ok (bson_size (b), "==", 69, "BSON document element size check");
+ ok (memcmp (bson_data (b),
+ "\105\000\000\000\003\144\157\143\000\073\000\000\000\002\146"
+ "\157\157\000\004\000\000\000\142\141\162\000\003\163\165\142"
+ "\144\000\043\000\000\000\020\151\063\062\000\300\007\000\000"
+ "\002\163\164\162\000\014\000\000\000\150\145\154\154\157\040"
+ "\167\157\162\154\144\000\000\000\000",
+ bson_size (b)) == 0,
+ "BSON document element contents check");
+
+ bson_free (b);
+
+ e1 = bson_new ();
+ bson_append_int32 (e1, "foo", 42);
+ b = bson_new ();
+
+ ok (bson_append_document (b, "doc", e1) == FALSE,
+ "bson_append_document() with an unfinished document should fail");
+ bson_finish (e1);
+ ok (bson_append_document (b, NULL, e1) == FALSE,
+ "bson_append_document() with a NULL key should fail");
+ ok (bson_append_document (b, "doc", NULL) == FALSE,
+ "bson_append_document() with a NULL document should fail");
+ ok (bson_append_document (NULL, "doc", e1) == FALSE,
+ "bson_append_document() without a BSON object should fail");
+ bson_finish (b);
+
+ cmp_ok (bson_size (b), "==", 5,
+ "BSON object should be empty");
+
+ ok (bson_append_document (b, "doc", e1) == FALSE,
+ "Appending to a finished element should fail");
+
+ bson_free (e1);
+ bson_free (b);
+}
+
+RUN_TEST (10, bson_document);
diff --git a/tests/unit/bson/bson_append_double.c b/tests/unit/bson/bson_append_double.c
new file mode 100644
index 0000000..de62c15
--- /dev/null
+++ b/tests/unit/bson/bson_append_double.c
@@ -0,0 +1,41 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_double (void)
+{
+ bson *b;
+ double d = 3.14;
+
+ b = bson_new ();
+ ok (bson_append_double (b, "double", d), "bson_append_double() works");
+ bson_finish (b);
+
+ cmp_ok (bson_size (b), "==", 21, "BSON double element size check");
+ ok (memcmp (bson_data (b),
+ "\025\000\000\000\001\144\157\165\142\154\145\000\037\205\353"
+ "\121\270\036\011\100\000",
+ bson_size (b)) == 0,
+ "BSON double element contents check");
+
+ bson_free (b);
+
+ b = bson_new ();
+ ok (bson_append_double (b, NULL, d) == FALSE,
+ "bson_append_double() with a NULL key should fail");
+ ok (bson_append_double (NULL, "double", d) == FALSE,
+ "bson_append_double() without a BSON object should fail");
+ bson_finish (b);
+ cmp_ok (bson_size (b), "==", 5,
+ "BSON object should be empty");
+
+ ok (bson_append_double (b, "d", d) == FALSE,
+ "Appending to a finished element should fail");
+
+ bson_free (b);
+}
+
+RUN_TEST (7, bson_double);
diff --git a/tests/unit/bson/bson_append_int32.c b/tests/unit/bson/bson_append_int32.c
new file mode 100644
index 0000000..e2a2867
--- /dev/null
+++ b/tests/unit/bson/bson_append_int32.c
@@ -0,0 +1,40 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_int32 (void)
+{
+ bson *b;
+ gint32 i = 1984;
+
+ b = bson_new ();
+ ok (bson_append_int32 (b, "i32", i), "bson_append_int32() works");
+ bson_finish (b);
+
+ cmp_ok (bson_size (b), "==", 14, "BSON int32 element size check");
+ ok (memcmp (bson_data (b),
+ "\016\000\000\000\020\151\063\062\000\300\007\000\000\000",
+ bson_size (b)) == 0,
+ "BSON int32 element contents check");
+
+ bson_free (b);
+
+ b = bson_new ();
+ ok (bson_append_int32 (b, NULL, i) == FALSE,
+ "bson_append_int32() with a NULL key should fail");
+ ok (bson_append_int32 (NULL, "i32", i) == FALSE,
+ "bson_append_int32() without a BSON object should fail");
+ bson_finish (b);
+ cmp_ok (bson_size (b), "==", 5,
+ "BSON object should be empty");
+
+ ok (bson_append_int32 (b, "i32", i) == FALSE,
+ "Appending to a finished element should fail");
+
+ bson_free (b);
+}
+
+RUN_TEST (7, bson_int32);
diff --git a/tests/unit/bson/bson_append_int64.c b/tests/unit/bson/bson_append_int64.c
new file mode 100644
index 0000000..22d9691
--- /dev/null
+++ b/tests/unit/bson/bson_append_int64.c
@@ -0,0 +1,41 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_int64 (void)
+{
+ bson *b;
+ gint64 l = 9876543210;
+
+ b = bson_new ();
+ ok (bson_append_int64 (b, "i64", l), "bson_append_int64() works");
+ bson_finish (b);
+
+ cmp_ok (bson_size (b), "==", 18, "BSON int64 element size check");
+ ok (memcmp (bson_data (b),
+ "\022\000\000\000\022\151\066\064\000\352\026\260\114\002\000"
+ "\000\000\000",
+ bson_size (b)) == 0,
+ "BSON int64 element contents check");
+
+ bson_free (b);
+
+ b = bson_new ();
+ ok (bson_append_int64 (b, NULL, l) == FALSE,
+ "bson_append_int64() with a NULL key should fail");
+ ok (bson_append_int64 (NULL, "i64", l) == FALSE,
+ "bson_append_int64() without a BSON object should fail");
+ bson_finish (b);
+ cmp_ok (bson_size (b), "==", 5,
+ "BSON object should be empty");
+
+ ok (bson_append_int64 (b, "i64", l) == FALSE,
+ "Appending to a finished element should fail");
+
+ bson_free (b);
+}
+
+RUN_TEST (7, bson_int64);
diff --git a/tests/unit/bson/bson_append_js_code.c b/tests/unit/bson/bson_append_js_code.c
new file mode 100644
index 0000000..4b250b0
--- /dev/null
+++ b/tests/unit/bson/bson_append_js_code.c
@@ -0,0 +1,66 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_js_code (void)
+{
+ bson *b;
+
+ /* Test #1: A single JS element, with default size. */
+ b = bson_new ();
+ ok (bson_append_javascript (b, "hello",
+ "function () { print (\"hello world!\"); }", -1),
+ "bson_append_javascript() works");
+ bson_finish (b);
+
+ cmp_ok (bson_size (b), "==", 56, "BSON javascript element size check");
+ ok (memcmp (bson_data (b),
+ "\070\000\000\000\015\150\145\154\154\157\000\050\000\000\000"
+ "\146\165\156\143\164\151\157\156\040\050\051\040\173\040\160"
+ "\162\151\156\164\040\050\042\150\145\154\154\157\040\167\157"
+ "\162\154\144\041\042\051\073\040\175\000\000",
+ bson_size (b)) == 0,
+ "BSON javascript element contents check");
+ bson_free (b);
+
+ /* Test #2: A single javascript element, with explicit length. */
+ b = bson_new ();
+ ok (bson_append_javascript (b, "hello",
+ "print (\"hello world!\"); garbage is gone.",
+ strlen ("print (\"hello world!\");")),
+ "bson_append_javascript() with explicit length works");
+ bson_finish (b);
+
+ cmp_ok (bson_size (b), "==", 40, "BSON javascript element size check, #2");
+ ok (memcmp (bson_data (b),
+ "\050\000\000\000\015\150\145\154\154\157\000\030\000\000\000"
+ "\160\162\151\156\164\040\050\042\150\145\154\154\157\040\167"
+ "\157\162\154\144\041\042\051\073\000\000",
+ bson_size (b)) == 0,
+ "BSON javascript element contents check, #2");
+ bson_free (b);
+
+ /* Test #3: Negative test, passing an invalid arguments. */
+ b = bson_new ();
+ ok (bson_append_javascript (b, "hello", "print();", -42) == FALSE,
+ "bson_append_javascript() with an invalid length should fail");
+ ok (bson_append_javascript (b, NULL, "print();", -1) == FALSE,
+ "bson_append_javascript() should fail without a key name");
+ ok (bson_append_javascript (b, "hello", NULL, -1) == FALSE,
+ "bson_append_javascript() should fail without javascript code");
+ ok (bson_append_javascript (NULL, "hello", "print();", -1) == FALSE,
+ "bson_append_javascript() should fail without a BSON object");
+ bson_finish (b);
+ cmp_ok (bson_size (b), "==", 5,
+ "BSON object should be empty");
+
+ ok (bson_append_javascript (b, "js", "print();", -1) == FALSE,
+ "Appending to a finished element should fail");
+
+ bson_free (b);
+}
+
+RUN_TEST (12, bson_js_code);
diff --git a/tests/unit/bson/bson_append_js_code_w_scope.c b/tests/unit/bson/bson_append_js_code_w_scope.c
new file mode 100644
index 0000000..09297f3
--- /dev/null
+++ b/tests/unit/bson/bson_append_js_code_w_scope.c
@@ -0,0 +1,79 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_js_code_w_scope (void)
+{
+ bson *b, *scope;
+
+ scope = bson_new ();
+ bson_append_string (scope, "foo", "bar", -1);
+ bson_finish (scope);
+
+ /* Test #1: A single JS element, with default size. */
+ b = bson_new ();
+ ok (bson_append_javascript_w_scope (b, "f",
+ "alert ('hello');", -1,
+ scope),
+ "bson_append_javascript_w_scope() works");
+ bson_finish (b);
+
+ cmp_ok (bson_size (b), "==", 51, "BSON javascript w/ element size check");
+ ok (memcmp (bson_data (b),
+ "\063\000\000\000\017\146\000\053\000\000\000\021\000\000\000"
+ "\141\154\145\162\164\040\050\047\150\145\154\154\157\047\051"
+ "\073\000\022\000\000\000\002\146\157\157\000\004\000\000\000"
+ "\142\141\162\000\000\000",
+ bson_size (b)) == 0,
+ "BSON javascript w/ scope element contents check");
+ bson_free (b);
+
+ /* Test #2: A single javascript element, with explicit length. */
+ b = bson_new ();
+ ok (bson_append_javascript_w_scope (b, "f",
+ "alert ('hello'); garbage",
+ strlen ("alert ('hello');"),
+ scope),
+ "bson_append_javascript_w_scope() with explicit length works");
+ bson_finish (b);
+
+ cmp_ok (bson_size (b), "==", 51, "BSON javascript w/ element size check");
+ ok (memcmp (bson_data (b),
+ "\063\000\000\000\017\146\000\053\000\000\000\021\000\000\000"
+ "\141\154\145\162\164\040\050\047\150\145\154\154\157\047\051"
+ "\073\000\022\000\000\000\002\146\157\157\000\004\000\000\000"
+ "\142\141\162\000\000\000",
+ bson_size (b)) == 0,
+ "BSON javascript w/ scope element contents check");
+ bson_free (b);
+
+ /* Test #3: Negative test, passing an invalid arguments. */
+ b = bson_new ();
+
+ ok (bson_append_javascript_w_scope (b, "hello", "print();",
+ -42, scope) == FALSE,
+ "bson_append_javascript_w_scope() with an invalid length should fail");
+ ok (bson_append_javascript_w_scope (b, NULL, "print();", -1, scope) == FALSE,
+ "bson_append_javascript_w_scope() should fail without a key name");
+ ok (bson_append_javascript_w_scope (b, "hello", NULL, -1, scope) == FALSE,
+ "bson_append_javascript_w_scope() should fail without javascript code");
+ ok (bson_append_javascript_w_scope (NULL, "hello", "print();",
+ -1, scope) == FALSE,
+ "bson_append_javascript_w_scope() should fail without a BSON object");
+ ok (bson_append_javascript_w_scope (b, "hello", "print();",
+ -1, NULL) == FALSE,
+ "bson_append_javascript_w_scope() should fail without a scope object");
+ bson_finish (b);
+ cmp_ok (bson_size (b), "==", 5,
+ "BSON object should be empty");
+
+ ok (bson_append_javascript_w_scope (b, "js", "print();", -1, scope) == FALSE,
+ "Appending to a finished element should fail");
+
+ bson_free (b);
+}
+
+RUN_TEST (13, bson_js_code_w_scope);
diff --git a/tests/unit/bson/bson_append_null.c b/tests/unit/bson/bson_append_null.c
new file mode 100644
index 0000000..294ea50
--- /dev/null
+++ b/tests/unit/bson/bson_append_null.c
@@ -0,0 +1,40 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_null (void)
+{
+ bson *b;
+
+ b = bson_new ();
+ ok (bson_append_null (b, "null"),
+ "bson_append_null() works");
+ bson_finish (b);
+
+ cmp_ok (bson_size (b), "==", 11, "BSON NULL element size check");
+ ok (memcmp (bson_data (b),
+ "\013\000\000\000\012\156\165\154\154\000\000",
+ bson_size (b)) == 0,
+ "BSON NULL element contents check");
+
+ bson_free (b);
+
+ b = bson_new ();
+ ok (bson_append_null (b, NULL) == FALSE,
+ "bson_append_null() should fail without a key name");
+ ok (bson_append_null (NULL, "null") == FALSE,
+ "bson_append_null() should fail without a BSON object");
+ bson_finish (b);
+ cmp_ok (bson_size (b), "==", 5,
+ "BSON object should be empty");
+
+ ok (bson_append_null (b, "null") == FALSE,
+ "Appending to a finished element should fail");
+
+ bson_free (b);
+}
+
+RUN_TEST (7, bson_null);
diff --git a/tests/unit/bson/bson_append_oid.c b/tests/unit/bson/bson_append_oid.c
new file mode 100644
index 0000000..2db75a3
--- /dev/null
+++ b/tests/unit/bson/bson_append_oid.c
@@ -0,0 +1,43 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_oid (void)
+{
+ bson *b;
+ guint8 oid[] = "1234567890ab";
+
+ b = bson_new ();
+ ok (bson_append_oid (b, "_id", oid), "bson_append_oid() works");
+ bson_finish (b);
+
+ cmp_ok (bson_size (b), "==", 22, "BSON OID element size check");
+ ok (memcmp (bson_data (b),
+ "\026\000\000\000\007\137\151\144\000\061\062\063\064\065\066"
+ "\067\070\071\060\141\142\000",
+ bson_size (b)) == 0,
+ "BSON OID element contents check");
+
+ bson_free (b);
+
+ b = bson_new ();
+ ok (bson_append_oid (b, "_id", NULL) == FALSE,
+ "bson_append_oid() should fail without an OID");
+ ok (bson_append_oid (b, NULL, oid) == FALSE,
+ "bson_append_oid() should fail without a key name");
+ ok (bson_append_oid (NULL, "_id", oid) == FALSE,
+ "bson_append_oid() should fail without a BSON object");
+ bson_finish (b);
+ cmp_ok (bson_size (b), "==", 5,
+ "BSON object should be empty");
+
+ ok (bson_append_oid (b, "_id", oid) == FALSE,
+ "Appending to a finished element should fail");
+
+ bson_free (b);
+}
+
+RUN_TEST (8, bson_oid);
diff --git a/tests/unit/bson/bson_append_regexp.c b/tests/unit/bson/bson_append_regexp.c
new file mode 100644
index 0000000..172cd2a
--- /dev/null
+++ b/tests/unit/bson/bson_append_regexp.c
@@ -0,0 +1,45 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_regex (void)
+{
+ bson *b;
+
+ b = bson_new ();
+ ok (bson_append_regex (b, "regex", "foo.*bar", "i"),
+ "bson_append_regex() works");
+ bson_finish (b);
+
+ cmp_ok (bson_size (b), "==", 23, "BSON regex element size check");
+ ok (memcmp (bson_data (b),
+ "\027\000\000\000\013\162\145\147\145\170\000\146\157\157\056"
+ "\052\142\141\162\000\151\000\000",
+ bson_size (b)) == 0,
+ "BSON regex element contents check");
+
+ bson_free (b);
+
+ b = bson_new ();
+ ok (bson_append_regex (b, "regex", "foo.*bar", NULL) == FALSE,
+ "bson_append_regex() without options should fail");
+ ok (bson_append_regex (b, "regex", NULL, "i") == FALSE,
+ "bson_append_regex() without a regex should fail");
+ ok (bson_append_regex (b, NULL, "foo.*bar", "i") == FALSE,
+ "bson_append_regex() should fail without a key name");
+ ok (bson_append_regex (NULL, "regex", "foo.*bar", "i") == FALSE,
+ "bson_append_regex() should fail without a BSON object");
+ bson_finish (b);
+ cmp_ok (bson_size (b), "==", 5,
+ "BSON object should be empty");
+
+ ok (bson_append_regex (b, "regex", "foo.*bar", "i") == FALSE,
+ "Appending to a finished element should fail");
+
+ bson_free (b);
+}
+
+RUN_TEST (9, bson_regex);
diff --git a/tests/unit/bson/bson_append_string.c b/tests/unit/bson/bson_append_string.c
new file mode 100644
index 0000000..e6a73a7
--- /dev/null
+++ b/tests/unit/bson/bson_append_string.c
@@ -0,0 +1,61 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_string (void)
+{
+ bson *b;
+
+ /* Test #1: A single string element, with default size. */
+ b = bson_new ();
+ ok (bson_append_string (b, "hello", "world", -1),
+ "bson_append_string() works");
+ bson_finish (b);
+ cmp_ok (bson_size (b), "==", 22, "BSON string element size check");
+ ok (memcmp (bson_data (b),
+ "\026\000\000\000\002\150\145\154\154\157\000\006\000\000\000"
+ "\167\157\162\154\144\000\000",
+ bson_size (b)) == 0,
+ "BSON string element contents check");
+ bson_free (b);
+
+ /* Test #2: A single string element, with explicit length. */
+ b = bson_new ();
+ ok (bson_append_string (b, "goodbye",
+ "cruel world, this garbage is gone.",
+ strlen ("cruel world")),
+ "bson_append_string() with explicit length works");
+ bson_finish (b);
+
+ cmp_ok (bson_size (b), "==", 30, "BSON string element size check, #2");
+ ok (memcmp (bson_data (b),
+ "\036\000\000\000\002\147\157\157\144\142\171\145\000\014\000"
+ "\000\000\143\162\165\145\154\040\167\157\162\154\144\000\000",
+ bson_size (b)) == 0,
+ "BSON string element contents check, #2");
+ bson_free (b);
+
+ /* Test #3: Negative test, passing invalid arguments. */
+ b = bson_new ();
+ ok (bson_append_string (b, "hello", "world", -42) == FALSE,
+ "bson_append_string() should fail with invalid length");
+ ok (bson_append_string (b, "hello", NULL, -1) == FALSE,
+ "bson_append_string() should fail without a string");
+ ok (bson_append_string (b, NULL, "world", -1) == FALSE,
+ "bson_append_string() should fail without a key name");
+ ok (bson_append_string (NULL, "hello", "world", -1) == FALSE,
+ "bson_append_string() should fail without a BSON object");
+ bson_finish (b);
+ cmp_ok (bson_size (b), "==", 5,
+ "BSON object should be empty");
+
+ ok (bson_append_string (b, "hello", "world", -1) == FALSE,
+ "Appending to a finished element should fail");
+
+ bson_free (b);
+}
+
+RUN_TEST (12, bson_string);
diff --git a/tests/unit/bson/bson_append_symbol.c b/tests/unit/bson/bson_append_symbol.c
new file mode 100644
index 0000000..6c16301
--- /dev/null
+++ b/tests/unit/bson/bson_append_symbol.c
@@ -0,0 +1,61 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_symbol (void)
+{
+ bson *b;
+
+ /* Test #1: A single symbol element, with default size. */
+ b = bson_new ();
+ ok (bson_append_symbol (b, "hello", "world", -1),
+ "bson_append_symbol() works");
+ bson_finish (b);
+ cmp_ok (bson_size (b), "==", 22, "BSON symbol element size check");
+ ok (memcmp (bson_data (b),
+ "\026\000\000\000\016\150\145\154\154\157\000\006\000\000\000"
+ "\167\157\162\154\144\000\000",
+ bson_size (b)) == 0,
+ "BSON symbol element contents check");
+ bson_free (b);
+
+ /* Test #2: A single symbol element, with explicit length. */
+ b = bson_new ();
+ ok (bson_append_symbol (b, "goodbye",
+ "cruel world, this garbage is gone.",
+ strlen ("cruel world")),
+ "bson_append_symbol() with explicit length works");
+ bson_finish (b);
+
+ cmp_ok (bson_size (b), "==", 30, "BSON symbol element size check, #2");
+ ok (memcmp (bson_data (b),
+ "\036\000\000\000\016\147\157\157\144\142\171\145\000\014\000"
+ "\000\000\143\162\165\145\154\040\167\157\162\154\144\000\000",
+ bson_size (b)) == 0,
+ "BSON symbol element contents check, #2");
+ bson_free (b);
+
+ /* Test #3: Negative test, passing invalid arguments. */
+ b = bson_new ();
+ ok (bson_append_symbol (b, "hello", "world", -42) == FALSE,
+ "bson_append_symbol() should fail with invalid length");
+ ok (bson_append_symbol (b, "hello", NULL, -1) == FALSE,
+ "bson_append_symbol() should fail without a string");
+ ok (bson_append_symbol (b, NULL, "world", -1) == FALSE,
+ "bson_append_symbol() should fail without a key name");
+ ok (bson_append_symbol (NULL, "hello", "world", -1) == FALSE,
+ "bson_append_symbol() should fail without a BSON object");
+ bson_finish (b);
+ cmp_ok (bson_size (b), "==", 5,
+ "BSON object should be empty");
+
+ ok (bson_append_symbol (b, "hello", "world", -1) == FALSE,
+ "Appending to a finished element should fail");
+
+ bson_free (b);
+}
+
+RUN_TEST (12, bson_symbol);
diff --git a/tests/unit/bson/bson_append_timestamp.c b/tests/unit/bson/bson_append_timestamp.c
new file mode 100644
index 0000000..4864ef4
--- /dev/null
+++ b/tests/unit/bson/bson_append_timestamp.c
@@ -0,0 +1,41 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_timestamp (void)
+{
+ bson *b;
+ gint64 l = 9876543210;
+
+ b = bson_new ();
+ ok (bson_append_timestamp (b, "ts", l), "bson_append_timestamp() works");
+ bson_finish (b);
+
+ cmp_ok (bson_size (b), "==", 17, "BSON timestamp element size check");
+ ok (memcmp (bson_data (b),
+ "\021\000\000\000\021\164\163\000\352\026\260\114\002\000\000"
+ "\000\000",
+ bson_size (b)) == 0,
+ "BSON timestamp element contents check");
+
+ bson_free (b);
+
+ b = bson_new ();
+ ok (bson_append_timestamp (b, NULL, l) == FALSE,
+ "bson_append_timestamp() with a NULL key should fail");
+ ok (bson_append_timestamp (NULL, "ts", l) == FALSE,
+ "bson_append_timestamp() without a BSON object should fail");
+ bson_finish (b);
+ cmp_ok (bson_size (b), "==", 5,
+ "BSON object should be empty");
+
+ ok (bson_append_timestamp (b, "ts", l) == FALSE,
+ "Appending to a finished element should fail");
+
+ bson_free (b);
+}
+
+RUN_TEST (7, bson_timestamp);
diff --git a/tests/unit/bson/bson_append_utc_datetime.c b/tests/unit/bson/bson_append_utc_datetime.c
new file mode 100644
index 0000000..b2e38fb
--- /dev/null
+++ b/tests/unit/bson/bson_append_utc_datetime.c
@@ -0,0 +1,41 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_utc_datetime (void)
+{
+ bson *b;
+
+ b = bson_new ();
+ ok (bson_append_utc_datetime (b, "date", 1294860709000),
+ "bson_append_utc_datetime() works");
+ bson_finish (b);
+
+ cmp_ok (bson_size (b), "==", 19, "BSON UTC datetime element size check");
+ ok (memcmp (bson_data (b),
+ "\023\000\000\000\011\144\141\164\145\000\210\154\266\173\055"
+ "\001\000\000\000",
+ bson_size (b)) == 0,
+ "BSON UTC datetime element contents check");
+
+ bson_free (b);
+
+ b = bson_new ();
+ ok (bson_append_utc_datetime (b, NULL, 1294860709000) == FALSE,
+ "bson_append_utc_datetime() with a NULL key should fail");
+ ok (bson_append_utc_datetime (NULL, "date", 1294860709000) == FALSE,
+ "bson_append_utc_datetime() without a BSON object should fail");
+ bson_finish (b);
+ cmp_ok (bson_size (b), "==", 5,
+ "BSON object should be empty");
+
+ ok (bson_append_utc_datetime (b, "date", 1294860709000) == FALSE,
+ "Appending to a finished element should fail");
+
+ bson_free (b);
+}
+
+RUN_TEST (7, bson_utc_datetime);
diff --git a/tests/unit/bson/bson_build.c b/tests/unit/bson/bson_build.c
new file mode 100644
index 0000000..29693db
--- /dev/null
+++ b/tests/unit/bson/bson_build.c
@@ -0,0 +1,70 @@
+#include "bson.h"
+#include "tap.h"
+#include "test.h"
+
+#include <string.h>
+#include <glib.h>
+
+void
+test_bson_build (void)
+{
+ bson *b, *o, *d, *a, *scope;
+ guint8 oid[] = "1234567890ab";
+
+ a = bson_build (BSON_TYPE_INT32, "0", 32,
+ BSON_TYPE_INT64, "1", (gint64)-42,
+ BSON_TYPE_NONE);
+ bson_finish (a);
+ d = bson_build (BSON_TYPE_STRING, "name", "sub-document", -1,
+ BSON_TYPE_INT32, "answer", 42,
+ BSON_TYPE_NONE);
+ bson_finish (d);
+
+ scope = bson_build (BSON_TYPE_STRING, "v", "hello world", -1,
+ BSON_TYPE_NONE);
+ bson_finish (scope);
+
+ b = bson_build (BSON_TYPE_DOUBLE, "double", 3.14,
+ BSON_TYPE_STRING, "str", "hello world", -1,
+ BSON_TYPE_DOCUMENT, "doc", d,
+ BSON_TYPE_ARRAY, "array", a,
+ BSON_TYPE_BINARY, "binary0", BSON_BINARY_SUBTYPE_GENERIC,
+ (guint8 *)"foo\0bar", 7,
+ BSON_TYPE_OID, "_id", oid,
+ BSON_TYPE_BOOLEAN, "TRUE", FALSE,
+ BSON_TYPE_UTC_DATETIME, "date", 1294860709000,
+ BSON_TYPE_TIMESTAMP, "ts", 1294860709000,
+ BSON_TYPE_NULL, "null",
+ BSON_TYPE_REGEXP, "foobar", "s/foo.*bar/", "i",
+ BSON_TYPE_JS_CODE, "alert", "alert (\"hello world!\");", -1,
+ BSON_TYPE_SYMBOL, "sex", "Marilyn Monroe", -1,
+ BSON_TYPE_JS_CODE_W_SCOPE, "print", "alert (v);", -1, scope,
+ BSON_TYPE_INT32, "int32", 32,
+ BSON_TYPE_INT64, "int64", (gint64)-42,
+ BSON_TYPE_NONE);
+ bson_finish (b);
+ bson_free (d);
+ bson_free (a);
+ bson_free (scope);
+
+ o = test_bson_generate_full ();
+
+ cmp_ok (bson_size (b), "==", bson_size (o),
+ "bson_build() and hand crafted BSON object sizes match");
+ ok (memcmp (bson_data (b), bson_data (o), bson_size (b)) == 0,
+ "bson_build() and hand crafted BSON objects match");
+
+ bson_free (b);
+ bson_free (o);
+
+ b = bson_build (BSON_TYPE_UNDEFINED, BSON_TYPE_NONE);
+ ok (b == NULL,
+ "bson_build() should fail with an unsupported element type");
+ b = bson_build (BSON_TYPE_STRING, "str", "hello", -1,
+ BSON_TYPE_UNDEFINED,
+ BSON_TYPE_NONE);
+ ok (b == NULL,
+ "bson_build() should fail with an unsupported element type");
+}
+
+RUN_TEST (4, bson_build);
diff --git a/tests/unit/bson/bson_build_full.c b/tests/unit/bson/bson_build_full.c
new file mode 100644
index 0000000..08f2e45
--- /dev/null
+++ b/tests/unit/bson/bson_build_full.c
@@ -0,0 +1,71 @@
+#include "bson.h"
+#include "tap.h"
+#include "test.h"
+
+#include <string.h>
+#include <glib.h>
+
+#include <stdio.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+void
+test_bson_build_full (void)
+{
+ bson *b, *o;
+
+ b = bson_build_full (BSON_TYPE_DOUBLE, "double", FALSE, 3.14,
+ BSON_TYPE_STRING, "str", FALSE, "hello world", -1,
+ BSON_TYPE_DOCUMENT, "doc", TRUE,
+ bson_build (BSON_TYPE_STRING, "name", "sub-document", -1,
+ BSON_TYPE_INT32, "answer", 42,
+ BSON_TYPE_NONE),
+ BSON_TYPE_ARRAY, "array", TRUE,
+ bson_build (BSON_TYPE_INT32, "0", 32,
+ BSON_TYPE_INT64, "1", (gint64)-42,
+ BSON_TYPE_NONE),
+ BSON_TYPE_BINARY, "binary0", FALSE, BSON_BINARY_SUBTYPE_GENERIC,
+ "foo\0bar", 7,
+ BSON_TYPE_OID, "_id", FALSE, "1234567890ab",
+ BSON_TYPE_BOOLEAN, "TRUE", FALSE, FALSE,
+ BSON_TYPE_UTC_DATETIME, "date", FALSE, 1294860709000,
+ BSON_TYPE_TIMESTAMP, "ts", FALSE, 1294860709000,
+ BSON_TYPE_NULL, "null", FALSE,
+ BSON_TYPE_REGEXP, "foobar", FALSE, "s/foo.*bar/", "i",
+ BSON_TYPE_JS_CODE, "alert", FALSE, "alert (\"hello world!\");", -1,
+ BSON_TYPE_SYMBOL, "sex", FALSE, "Marilyn Monroe", -1,
+ BSON_TYPE_JS_CODE_W_SCOPE, "print", TRUE, "alert (v);", -1,
+ bson_build (BSON_TYPE_STRING, "v", "hello world", -1,
+ BSON_TYPE_NONE),
+ BSON_TYPE_INT32, "int32", FALSE, 32,
+ BSON_TYPE_INT64, "int64", FALSE, (gint64)-42,
+ BSON_TYPE_NONE);
+ bson_finish (b);
+
+ o = test_bson_generate_full ();
+
+ cmp_ok (bson_size (b), "==", bson_size (o),
+ "bson_build_full() and hand crafted BSON object sizes match");
+
+ ok (memcmp (bson_data (b), bson_data (o), bson_size (b)) == 0,
+ "bson_build_full() and hand crafted BSON objects match");
+
+ bson_free (b);
+ bson_free (o);
+
+ b = bson_build_full (BSON_TYPE_UNDEFINED, "undef", FALSE,
+ BSON_TYPE_NONE);
+ ok (b == NULL,
+ "bson_build_full() should fail with an unsupported element type");
+ b = bson_build_full (BSON_TYPE_STRING, "str", FALSE, "hello", -1,
+ BSON_TYPE_UNDEFINED, "undef", FALSE,
+ BSON_TYPE_NONE);
+ ok (b == NULL,
+ "bson_build_full() should fail with an unsupported element type");
+
+}
+
+RUN_TEST (4, bson_build_full);
diff --git a/tests/unit/bson/bson_cursor_find.c b/tests/unit/bson/bson_cursor_find.c
new file mode 100644
index 0000000..db1afd5
--- /dev/null
+++ b/tests/unit/bson/bson_cursor_find.c
@@ -0,0 +1,39 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <string.h>
+
+void
+test_bson_cursor_find (void)
+{
+ bson *b;
+ bson_cursor *c;
+
+ b = test_bson_generate_full ();
+ c = bson_find (b, "TRUE");
+
+ ok (bson_cursor_find (c, NULL) == FALSE,
+ "bson_cursor_find() should fail with a NULL key");
+ ok (bson_cursor_find (NULL, "int32") == FALSE,
+ "bson_cursor_find() should fail with a NULL cursor");
+
+ ok (bson_cursor_find (c, "sex") == TRUE,
+ "bson_cursor_find() works");
+
+ ok (bson_cursor_find (c, "str") == TRUE,
+ "bson_cursor_find() should wrap over if neccessary");
+
+ ok (bson_cursor_find (c, "-invalid-key-") == FALSE,
+ "bson_cursor_find() should fail when the key is not found");
+
+ ok (bson_cursor_find (c, "int64") == TRUE,
+ "bson_cursor_find() works, even after a previous failure");
+
+ ok (bson_cursor_find (c, "int6") == FALSE,
+ "bson_cursor_find() does not match prefixes");
+
+ bson_cursor_free (c);
+ bson_free (b);
+}
+
+RUN_TEST (7, bson_cursor_find);
diff --git a/tests/unit/bson/bson_cursor_find_next.c b/tests/unit/bson/bson_cursor_find_next.c
new file mode 100644
index 0000000..96d7f0e
--- /dev/null
+++ b/tests/unit/bson/bson_cursor_find_next.c
@@ -0,0 +1,33 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <string.h>
+
+void
+test_bson_cursor_find_next (void)
+{
+ bson *b;
+ bson_cursor *c;
+
+ b = test_bson_generate_full ();
+ c = bson_find (b, "TRUE");
+
+ ok (bson_cursor_find_next (c, NULL) == FALSE,
+ "bson_cursor_find_next() should fail with a NULL key");
+ ok (bson_cursor_find_next (NULL, "int32") == FALSE,
+ "bson_cursor_find_next() should fail with a NULL cursor");
+
+ ok (bson_cursor_find_next (c, "sex") == TRUE,
+ "bson_cursor_find_next() works");
+
+ ok (bson_cursor_find_next (c, "str") == FALSE,
+ "bson_cursor_find_next() should fail when the key is not found");
+
+ ok (bson_cursor_find_next (c, "int64") == TRUE,
+ "bson_cursor_find_next() works, even after a previous failure");
+
+ bson_cursor_free (c);
+ bson_free (b);
+}
+
+RUN_TEST (5, bson_cursor_find_next);
diff --git a/tests/unit/bson/bson_cursor_get_array.c b/tests/unit/bson/bson_cursor_get_array.c
new file mode 100644
index 0000000..becdc5d
--- /dev/null
+++ b/tests/unit/bson/bson_cursor_get_array.c
@@ -0,0 +1,44 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_cursor_get_array (void)
+{
+ bson *b, *a = NULL;
+ bson_cursor *c;
+
+ ok (bson_cursor_get_array (NULL, &a) == FALSE,
+ "bson_cursor_get_array() with a NULL cursor fails");
+
+ b = test_bson_generate_full ();
+ c = bson_cursor_new (b);
+
+ ok (bson_cursor_get_array (c, NULL) == FALSE,
+ "bson_cursor_get_array() with a NULL destination fails");
+ ok (bson_cursor_get_array (c, &a) == FALSE,
+ "bson_cursor_get_array() at the initial position fails");
+ ok (a == NULL,
+ "destination remains unchanged after failed cursor operations");
+ bson_cursor_free (c);
+
+ c = bson_find (b, "array");
+ ok (bson_cursor_get_array (c, &a),
+ "bson_cursor_get_array() works");
+ cmp_ok (bson_size (a), ">", 0,
+ "the returned document is finished");
+ bson_free (a);
+
+ bson_cursor_next (c);
+
+ ok (bson_cursor_get_array (c, &a) == FALSE,
+ "bson_cursor_get_array() fails if the cursor points to "
+ "non-array data");
+
+ bson_cursor_free (c);
+ bson_free (b);
+}
+
+RUN_TEST (7, bson_cursor_get_array);
diff --git a/tests/unit/bson/bson_cursor_get_binary.c b/tests/unit/bson/bson_cursor_get_binary.c
new file mode 100644
index 0000000..3ec0dc3
--- /dev/null
+++ b/tests/unit/bson/bson_cursor_get_binary.c
@@ -0,0 +1,60 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_cursor_get_binary (void)
+{
+ bson *b;
+ bson_cursor *c;
+ const guint8 *d = (guint8 *)"deadbeef";
+ bson_binary_subtype t = 0xff;
+ gint32 s = -1;
+
+ ok (bson_cursor_get_binary (NULL, &t, &d, &s) == FALSE,
+ "bson_cursor_get_binary() with a NULL cursor fails");
+
+ b = test_bson_generate_full ();
+ c = bson_cursor_new (b);
+
+ ok (bson_cursor_get_binary (c, NULL, NULL, NULL) == FALSE,
+ "bson_cursor_get_binary() with NULL destinations fails");
+ ok (bson_cursor_get_binary (c, NULL, &d, &s) == FALSE,
+ "bson_cursor_get_binary() with a NULL subtype destination fails");
+ ok (bson_cursor_get_binary (c, &t, NULL, &s) == FALSE,
+ "bson_cursor_get_binary() with a NULL binary destination fails");
+ ok (bson_cursor_get_binary (c, &t, &d, NULL) == FALSE,
+ "bson_cursor_get_binary() with a NULL size destination fails");
+ ok (bson_cursor_get_binary (c, &t, &d, &s) == FALSE,
+ "bson_cursor_get_binary() at the initial position fails");
+ ok (memcmp (d, "deadbeef", sizeof ("deadbeef")) == 0,
+ "binary destination remains unchanged after failed cursor operations");
+ cmp_ok (t, "==", 0xff,
+ "subtype destination remains unchanged after failed cursor "
+ "operations");
+ cmp_ok (s, "==", -1,
+ "size destination remains unchanged after failed cursor operations");
+ bson_cursor_free (c);
+
+ c = bson_find (b, "binary0");
+ ok (bson_cursor_get_binary (c, &t, &d, &s),
+ "bson_cursor_get_binary() works");
+ cmp_ok (s, "==", 7,
+ "bson_cursor_get_binary() returns the correct result");
+ ok (memcmp (d, "foo\0bar", s) == 0,
+ "bson_cursor_get_binary() returns the correct result");
+ cmp_ok (t, "==", BSON_BINARY_SUBTYPE_GENERIC,
+ "bson_cursor_get_binary() returns the correct result");
+
+ bson_cursor_next (c);
+ ok (bson_cursor_get_binary (c, &t, &d, &s) == FALSE,
+ "bson_cursor_get_binary() should fail when the cursor points to "
+ "non-binary data");
+
+ bson_cursor_free (c);
+ bson_free (b);
+}
+
+RUN_TEST (14, bson_cursor_get_binary);
diff --git a/tests/unit/bson/bson_cursor_get_boolean.c b/tests/unit/bson/bson_cursor_get_boolean.c
new file mode 100644
index 0000000..079d2b2
--- /dev/null
+++ b/tests/unit/bson/bson_cursor_get_boolean.c
@@ -0,0 +1,43 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_cursor_get_boolean (void)
+{
+ bson *b;
+ bson_cursor *c;
+ gboolean d = TRUE;
+
+ ok (bson_cursor_get_boolean (NULL, &d) == FALSE,
+ "bson_cursor_get_boolean() with a NULL cursor fails");
+
+ b = test_bson_generate_full ();
+ c = bson_cursor_new (b);
+
+ ok (bson_cursor_get_boolean (c, NULL) == FALSE,
+ "bson_cursor_get_boolean() with a NULL destination fails");
+ ok (bson_cursor_get_boolean (c, &d) == FALSE,
+ "bson_cursor_get_boolean() at the initial position fails");
+ cmp_ok (d, "==", TRUE,
+ "destination remains unchanged after failed cursor operations");
+ bson_cursor_free (c);
+
+ c = bson_find (b, "TRUE");
+ ok (bson_cursor_get_boolean (c, &d),
+ "bson_cursor_get_boolean() works");
+ cmp_ok (d, "==", FALSE,
+ "bson_cursor_get_boolean() returns the correct result");
+
+ bson_cursor_next (c);
+ ok (bson_cursor_get_boolean (c, &d) == FALSE,
+ "bson_cursor_get_boolean() should fail when the cursor points to "
+ "non-double data");
+
+ bson_cursor_free (c);
+ bson_free (b);
+}
+
+RUN_TEST (7, bson_cursor_get_boolean);
diff --git a/tests/unit/bson/bson_cursor_get_document.c b/tests/unit/bson/bson_cursor_get_document.c
new file mode 100644
index 0000000..107ecf5
--- /dev/null
+++ b/tests/unit/bson/bson_cursor_get_document.c
@@ -0,0 +1,43 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_cursor_get_document (void)
+{
+ bson *b, *d = NULL;
+ bson_cursor *c;
+
+ ok (bson_cursor_get_document (NULL, &d) == FALSE,
+ "bson_cursor_get_document() with a NULL cursor fails");
+
+ b = test_bson_generate_full ();
+ c = bson_cursor_new (b);
+
+ ok (bson_cursor_get_document (c, NULL) == FALSE,
+ "bson_cursor_get_document() with a NULL destination fails");
+ ok (bson_cursor_get_document (c, &d) == FALSE,
+ "bson_cursor_get_document() at the initial position fails");
+ ok (d == NULL,
+ "destination remains unchanged after failed cursor operations");
+ bson_cursor_free (c);
+
+ c = bson_find (b, "doc");
+ ok (bson_cursor_get_document (c, &d),
+ "bson_cursor_get_document() works");
+ cmp_ok (bson_size (d), ">", 0,
+ "the returned document is finished");
+ bson_free (d);
+
+ bson_cursor_next (c);
+ ok (bson_cursor_get_document (c, &d) == FALSE,
+ "bson_cursor_get_document() fails if the cursor points to "
+ "non-document data");
+
+ bson_cursor_free (c);
+ bson_free (b);
+}
+
+RUN_TEST (7, bson_cursor_get_document);
diff --git a/tests/unit/bson/bson_cursor_get_double.c b/tests/unit/bson/bson_cursor_get_double.c
new file mode 100644
index 0000000..7b5cdff
--- /dev/null
+++ b/tests/unit/bson/bson_cursor_get_double.c
@@ -0,0 +1,43 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_cursor_get_double (void)
+{
+ bson *b;
+ bson_cursor *c;
+ gdouble d = 12.34;
+
+ ok (bson_cursor_get_double (NULL, &d) == FALSE,
+ "bson_cursor_get_double() with a NULL cursor fails");
+
+ b = test_bson_generate_full ();
+ c = bson_cursor_new (b);
+
+ ok (bson_cursor_get_double (c, NULL) == FALSE,
+ "bson_cursor_get_double() with a NULL destination fails");
+ ok (bson_cursor_get_double (c, &d) == FALSE,
+ "bson_cursor_get_double() at the initial position fails");
+ ok (d == 12.34,
+ "destination remains unchanged after failed cursor operations");
+ bson_cursor_free (c);
+
+ c = bson_find (b, "double");
+ ok (bson_cursor_get_double (c, &d),
+ "bson_cursor_get_double() works");
+ ok (d == 3.14,
+ "bson_cursor_get_double() returns the correct result");
+
+ bson_cursor_next (c);
+ ok (bson_cursor_get_double (c, &d) == FALSE,
+ "bson_cursor_get_double() should fail when the cursor points to "
+ "non-double data");
+
+ bson_cursor_free (c);
+ bson_free (b);
+}
+
+RUN_TEST (7, bson_cursor_get_double);
diff --git a/tests/unit/bson/bson_cursor_get_int32.c b/tests/unit/bson/bson_cursor_get_int32.c
new file mode 100644
index 0000000..caea604
--- /dev/null
+++ b/tests/unit/bson/bson_cursor_get_int32.c
@@ -0,0 +1,43 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_cursor_get_int32 (void)
+{
+ bson *b;
+ bson_cursor *c;
+ gint d = 12345;
+
+ ok (bson_cursor_get_int32 (NULL, &d) == FALSE,
+ "bson_cursor_get_int32() with a NULL cursor fails");
+
+ b = test_bson_generate_full ();
+ c = bson_cursor_new (b);
+
+ ok (bson_cursor_get_int32 (c, NULL) == FALSE,
+ "bson_cursor_get_int32() with a NULL destination fails");
+ ok (bson_cursor_get_int32 (c, &d) == FALSE,
+ "bson_cursor_get_int32() at the initial position fails");
+ cmp_ok (d, "==", 12345,
+ "destination remains unchanged after failed cursor operations");
+ bson_cursor_free (c);
+
+ c = bson_find (b, "int32");
+ ok (bson_cursor_get_int32 (c, &d),
+ "bson_cursor_get_int32() works");
+ cmp_ok (d, "==", 32,
+ "bson_cursor_get_int32() returns the correct result");
+
+ bson_cursor_next (c);
+ ok (bson_cursor_get_int32 (c, &d) == FALSE,
+ "bson_cursor_get_int32() should fail when the cursor points to "
+ "non-int32 data");
+
+ bson_cursor_free (c);
+ bson_free (b);
+}
+
+RUN_TEST (7, bson_cursor_get_int32);
diff --git a/tests/unit/bson/bson_cursor_get_int64.c b/tests/unit/bson/bson_cursor_get_int64.c
new file mode 100644
index 0000000..d1c80f5
--- /dev/null
+++ b/tests/unit/bson/bson_cursor_get_int64.c
@@ -0,0 +1,45 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_cursor_get_int64 (void)
+{
+ bson *b;
+ bson_cursor *c;
+ gint64 d = (gint64)987654;
+
+ ok (bson_cursor_get_int64 (NULL, &d) == FALSE,
+ "bson_cursor_get_int64() with a NULL cursor fails");
+
+ b = test_bson_generate_full ();
+ c = bson_cursor_new (b);
+
+ ok (bson_cursor_get_int64 (c, NULL) == FALSE,
+ "bson_cursor_get_int64() with a NULL destination fails");
+ ok (bson_cursor_get_int64 (c, &d) == FALSE,
+ "bson_cursor_get_int64() at the initial position fails");
+ cmp_ok (d, "==", 987654,
+ "destination remains unchanged after failed cursor operations");
+ bson_cursor_free (c);
+
+ c = bson_find (b, "int64");
+ ok (bson_cursor_get_int64 (c, &d),
+ "bson_cursor_get_int64() works");
+ cmp_ok (d, "==", (gint64)-42,
+ "bson_cursor_get_int64() returns the correct result");
+
+ bson_cursor_free (c);
+
+ c = bson_find (b, "double");
+ ok (bson_cursor_get_int64 (c, &d) == FALSE,
+ "bson_cursor_get_int64() should fail when the cursor points to "
+ "non-int64 data");
+
+ bson_cursor_free (c);
+ bson_free (b);
+}
+
+RUN_TEST (7, bson_cursor_get_int64);
diff --git a/tests/unit/bson/bson_cursor_get_javascript.c b/tests/unit/bson/bson_cursor_get_javascript.c
new file mode 100644
index 0000000..4231cbd
--- /dev/null
+++ b/tests/unit/bson/bson_cursor_get_javascript.c
@@ -0,0 +1,43 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_cursor_get_javascript (void)
+{
+ bson *b;
+ bson_cursor *c;
+ const gchar *s = "deadbeef";
+
+ ok (bson_cursor_get_javascript (NULL, &s) == FALSE,
+ "bson_cursor_get_javascript() with a NULL cursor fails");
+
+ b = test_bson_generate_full ();
+ c = bson_cursor_new (b);
+
+ ok (bson_cursor_get_javascript (c, NULL) == FALSE,
+ "bson_cursor_get_javascript() with a NULL destination fails");
+ ok (bson_cursor_get_javascript (c, &s) == FALSE,
+ "bson_cursor_get_javascript() at the initial position fails");
+ is (s, "deadbeef",
+ "destination remains unchanged after failed cursor operations");
+ bson_cursor_free (c);
+
+ c = bson_find (b, "alert");
+ ok (bson_cursor_get_javascript (c, &s),
+ "bson_cursor_get_javascript() works");
+ is (s, "alert (\"hello world!\");",
+ "bson_cursor_get_javascript() returns the correct result");
+
+ bson_cursor_next (c);
+ ok (bson_cursor_get_javascript (c, &s) == FALSE,
+ "bson_cursor_get_javascript() should fail when the cursor points to "
+ "non-javascript data");
+
+ bson_cursor_free (c);
+ bson_free (b);
+}
+
+RUN_TEST (7, bson_cursor_get_javascript);
diff --git a/tests/unit/bson/bson_cursor_get_javascript_w_scope.c b/tests/unit/bson/bson_cursor_get_javascript_w_scope.c
new file mode 100644
index 0000000..2e0b9ca
--- /dev/null
+++ b/tests/unit/bson/bson_cursor_get_javascript_w_scope.c
@@ -0,0 +1,57 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_cursor_get_javascript_w_scope (void)
+{
+ bson *b, *scope = NULL, *valid;
+ bson_cursor *c;
+ const gchar *s = "deadbeef";
+
+ ok (bson_cursor_get_javascript_w_scope (NULL, &s, &scope) == FALSE,
+ "bson_cursor_get_javascript_w_scope() with a NULL cursor fails");
+
+ b = test_bson_generate_full ();
+ c = bson_cursor_new (b);
+
+ ok (bson_cursor_get_javascript_w_scope (c, NULL, &scope) == FALSE,
+ "bson_cursor_get_javascript_w_scope() with a NULL js destination fails");
+ ok (bson_cursor_get_javascript_w_scope (c, &s, NULL) == FALSE,
+ "bson_cursor_get_javascript_w_scope() with a NULL scope destinatin fails");
+ ok (bson_cursor_get_javascript_w_scope (c, &s, &scope) == FALSE,
+ "bson_cursor_get_javascript_w_scope() at the initial position fails");
+ is (s, "deadbeef",
+ "destination remains unchanged after failed cursor operations");
+ bson_cursor_free (c);
+
+ c = bson_find (b, "print");
+ ok (bson_cursor_get_javascript_w_scope (c, &s, &scope),
+ "bson_cursor_get_javascript_w_scope() works");
+ is (s, "alert (v);",
+ "bson_cursor_get_javascript_w_scope() returns the correct result");
+
+ valid = bson_new ();
+ bson_append_string (valid, "v", "hello world", -1);
+ bson_finish (valid);
+
+ cmp_ok (bson_size (scope), "==", bson_size (valid),
+ "The returned scope's length is correct");
+ ok (memcmp (bson_data (scope), bson_data (valid),
+ bson_size (scope)) == 0,
+ "The returned scope is correct");
+ bson_free (valid);
+
+ bson_cursor_next (c);
+ ok (bson_cursor_get_javascript_w_scope (c, &s, &scope) == FALSE,
+ "bson_cursor_get_javascript_w_scope() should fail when the cursor "
+ "points to non-javascript data");
+
+ bson_cursor_free (c);
+ bson_free (b);
+ bson_free (scope);
+}
+
+RUN_TEST (10, bson_cursor_get_javascript_w_scope);
diff --git a/tests/unit/bson/bson_cursor_get_oid.c b/tests/unit/bson/bson_cursor_get_oid.c
new file mode 100644
index 0000000..5c2d77b
--- /dev/null
+++ b/tests/unit/bson/bson_cursor_get_oid.c
@@ -0,0 +1,43 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_cursor_get_oid (void)
+{
+ bson *b;
+ bson_cursor *c;
+ const gchar *s = "abababababab";
+
+ ok (bson_cursor_get_oid (NULL, (const guint8 **)&s) == FALSE,
+ "bson_cursor_get_oid() with a NULL cursor fails");
+
+ b = test_bson_generate_full ();
+ c = bson_cursor_new (b);
+
+ ok (bson_cursor_get_oid (c, NULL) == FALSE,
+ "bson_cursor_get_oid() with a NULL destination fails");
+ ok (bson_cursor_get_oid (c, (const guint8 **)&s) == FALSE,
+ "bson_cursor_get_oid() at the initial position fails");
+ ok (memcmp (s, "abababababab", 12) == 0,
+ "destination remains unchanged after failed cursor operations");
+ bson_cursor_free (c);
+
+ c = bson_find (b, "_id");
+ ok (bson_cursor_get_oid (c, (const guint8 **)&s),
+ "bson_cursor_get_oid() works");
+ ok (memcmp (s, "1234567890ab", 12) == 0,
+ "bson_cursor_get_oid() returns the correct result");
+
+ bson_cursor_next (c);
+ ok (bson_cursor_get_oid (c, (const guint8 **)&s) == FALSE,
+ "bson_cursor_get_oid() should fail when the cursor points to "
+ "non-oid data");
+
+ bson_cursor_free (c);
+ bson_free (b);
+}
+
+RUN_TEST (7, bson_cursor_get_oid);
diff --git a/tests/unit/bson/bson_cursor_get_regex.c b/tests/unit/bson/bson_cursor_get_regex.c
new file mode 100644
index 0000000..59edefd
--- /dev/null
+++ b/tests/unit/bson/bson_cursor_get_regex.c
@@ -0,0 +1,52 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_cursor_get_regex (void)
+{
+ bson *b;
+ bson_cursor *c;
+ const gchar *r = "deadbeef";
+ const gchar *o = "g";
+
+ ok (bson_cursor_get_regex (NULL, &r, &o) == FALSE,
+ "bson_cursor_get_regex() with a NULL cursor fails");
+
+ b = test_bson_generate_full ();
+ c = bson_cursor_new (b);
+
+ ok (bson_cursor_get_regex (c, NULL, NULL) == FALSE,
+ "bson_cursor_get_regex() with NULL destinations fails");
+ ok (bson_cursor_get_regex (c, &r, NULL) == FALSE,
+ "bson_cursor_get_regex() with a NULL option destination fails");
+ ok (bson_cursor_get_regex (c, NULL, &o) == FALSE,
+ "bson_cursor_get_regex() with a NULL regex destination fails");
+ ok (bson_cursor_get_regex (c, &r, &o) == FALSE,
+ "bson_cursor_get_regex() at the initial position fails");
+ is (r, "deadbeef",
+ "regex destination remains unchanged after failed cursor operations");
+ is (o, "g",
+ "options destination remains unchanged after failed cursor operations");
+ bson_cursor_free (c);
+
+ c = bson_find (b, "foobar");
+ ok (bson_cursor_get_regex (c, &r, &o),
+ "bson_cursor_get_regex() works");
+ is (r, "s/foo.*bar/",
+ "bson_cursor_get_regex() returns the correct result");
+ is (o, "i",
+ "bson_cursor_get_regex() returns the correct result");
+
+ bson_cursor_next (c);
+ ok (bson_cursor_get_regex (c, &r, &o) == FALSE,
+ "bson_cursor_get_regex() should fail when the cursor points to "
+ "non-regex data");
+
+ bson_cursor_free (c);
+ bson_free (b);
+}
+
+RUN_TEST (11, bson_cursor_get_regex);
diff --git a/tests/unit/bson/bson_cursor_get_string.c b/tests/unit/bson/bson_cursor_get_string.c
new file mode 100644
index 0000000..18662bb
--- /dev/null
+++ b/tests/unit/bson/bson_cursor_get_string.c
@@ -0,0 +1,43 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_cursor_get_string (void)
+{
+ bson *b;
+ bson_cursor *c;
+ const gchar *s = "deadbeef";
+
+ ok (bson_cursor_get_string (NULL, &s) == FALSE,
+ "bson_cursor_get_string() with a NULL cursor fails");
+
+ b = test_bson_generate_full ();
+ c = bson_cursor_new (b);
+
+ ok (bson_cursor_get_string (c, NULL) == FALSE,
+ "bson_cursor_get_string() with a NULL destination fails");
+ ok (bson_cursor_get_string (c, &s) == FALSE,
+ "bson_cursor_get_string() at the initial position fails");
+ is (s, "deadbeef",
+ "destination remains unchanged after failed cursor operations");
+ bson_cursor_free (c);
+
+ c = bson_find (b, "str");
+ ok (bson_cursor_get_string (c, &s),
+ "bson_cursor_get_string() works");
+ is (s, "hello world",
+ "bson_cursor_get_string() returns the correct result");
+
+ bson_cursor_next (c);
+ ok (bson_cursor_get_string (c, &s) == FALSE,
+ "bson_cursor_get_string() should fail when the cursor points to "
+ "non-string data");
+
+ bson_cursor_free (c);
+ bson_free (b);
+}
+
+RUN_TEST (7, bson_cursor_get_string);
diff --git a/tests/unit/bson/bson_cursor_get_symbol.c b/tests/unit/bson/bson_cursor_get_symbol.c
new file mode 100644
index 0000000..785e71b
--- /dev/null
+++ b/tests/unit/bson/bson_cursor_get_symbol.c
@@ -0,0 +1,43 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_cursor_get_symbol (void)
+{
+ bson *b;
+ bson_cursor *c;
+ const gchar *s = "deadbeef";
+
+ ok (bson_cursor_get_symbol (NULL, &s) == FALSE,
+ "bson_cursor_get_symbol() with a NULL cursor fails");
+
+ b = test_bson_generate_full ();
+ c = bson_cursor_new (b);
+
+ ok (bson_cursor_get_symbol (c, NULL) == FALSE,
+ "bson_cursor_get_symbol() with a NULL destination fails");
+ ok (bson_cursor_get_symbol (c, &s) == FALSE,
+ "bson_cursor_get_symbol() at the initial position fails");
+ is (s, "deadbeef",
+ "destination remains unchanged after failed cursor operations");
+ bson_cursor_free (c);
+
+ c = bson_find (b, "sex");
+ ok (bson_cursor_get_symbol (c, &s),
+ "bson_cursor_get_symbol() works");
+ is (s, "Marilyn Monroe",
+ "bson_cursor_get_symbol() returns the correct result");
+
+ bson_cursor_next (c);
+ ok (bson_cursor_get_symbol (c, &s) == FALSE,
+ "bson_cursor_get_symbol() should fail when the cursor points to "
+ "non-symbol data");
+
+ bson_cursor_free (c);
+ bson_free (b);
+}
+
+RUN_TEST (7, bson_cursor_get_symbol);
diff --git a/tests/unit/bson/bson_cursor_get_timestamp.c b/tests/unit/bson/bson_cursor_get_timestamp.c
new file mode 100644
index 0000000..3bfc86c
--- /dev/null
+++ b/tests/unit/bson/bson_cursor_get_timestamp.c
@@ -0,0 +1,43 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_cursor_get_timestamp (void)
+{
+ bson *b;
+ bson_cursor *c;
+ gint64 d = (gint64)987654;
+
+ ok (bson_cursor_get_timestamp (NULL, &d) == FALSE,
+ "bson_cursor_get_timestamp() with a NULL cursor fails");
+
+ b = test_bson_generate_full ();
+ c = bson_cursor_new (b);
+
+ ok (bson_cursor_get_timestamp (c, NULL) == FALSE,
+ "bson_cursor_get_timestamp() with a NULL destination fails");
+ ok (bson_cursor_get_timestamp (c, &d) == FALSE,
+ "bson_cursor_get_timestamp() at the initial position fails");
+ cmp_ok (d, "==", 987654,
+ "destination remains unchanged after failed cursor operations");
+ bson_cursor_free (c);
+
+ c = bson_find (b, "ts");
+ ok (bson_cursor_get_timestamp (c, &d),
+ "bson_cursor_get_timestamp() works");
+ ok (d == 1294860709000,
+ "bson_cursor_get_timestamp() returns the correct result");
+
+ bson_cursor_next (c);
+ ok (bson_cursor_get_timestamp (c, &d) == FALSE,
+ "bson_cursor_get_timestamp() should fail when the cursor points to "
+ "non-timestamp data");
+
+ bson_cursor_free (c);
+ bson_free (b);
+}
+
+RUN_TEST (7, bson_cursor_get_timestamp);
diff --git a/tests/unit/bson/bson_cursor_get_utc_datetime.c b/tests/unit/bson/bson_cursor_get_utc_datetime.c
new file mode 100644
index 0000000..70e1332
--- /dev/null
+++ b/tests/unit/bson/bson_cursor_get_utc_datetime.c
@@ -0,0 +1,43 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_cursor_get_utc_datetime (void)
+{
+ bson *b;
+ bson_cursor *c;
+ gint64 d = (gint64)987654;
+
+ ok (bson_cursor_get_utc_datetime (NULL, &d) == FALSE,
+ "bson_cursor_get_utc_datetime() with a NULL cursor fails");
+
+ b = test_bson_generate_full ();
+ c = bson_cursor_new (b);
+
+ ok (bson_cursor_get_utc_datetime (c, NULL) == FALSE,
+ "bson_cursor_get_utc_datetime() with a NULL destination fails");
+ ok (bson_cursor_get_utc_datetime (c, &d) == FALSE,
+ "bson_cursor_get_utc_datetime() at the initial position fails");
+ cmp_ok (d, "==", 987654,
+ "destination remains unchanged after failed cursor operations");
+ bson_cursor_free (c);
+
+ c = bson_find (b, "date");
+ ok (bson_cursor_get_utc_datetime (c, &d),
+ "bson_cursor_get_utc_datetime() works");
+ ok (d == 1294860709000,
+ "bson_cursor_get_utc_datetime() returns the correct result");
+
+ bson_cursor_next (c);
+ ok (bson_cursor_get_utc_datetime (c, &d) == FALSE,
+ "bson_cursor_get_utc_datetime() should fail when the cursor points to "
+ "non-datetime data");
+
+ bson_cursor_free (c);
+ bson_free (b);
+}
+
+RUN_TEST (7, bson_cursor_get_utc_datetime);
diff --git a/tests/unit/bson/bson_cursor_key.c b/tests/unit/bson/bson_cursor_key.c
new file mode 100644
index 0000000..7db98b6
--- /dev/null
+++ b/tests/unit/bson/bson_cursor_key.c
@@ -0,0 +1,30 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_cursor_key (void)
+{
+ bson *b;
+ bson_cursor *c;
+
+ is (bson_cursor_key (NULL), NULL,
+ "bson_cursor_key(NULL) should fail");
+
+ b = test_bson_generate_full ();
+ c = bson_cursor_new (b);
+
+ is (bson_cursor_key (c), NULL,
+ "bson_cursor_key() should fail at the initial position");
+ bson_cursor_next (c);
+
+ is (bson_cursor_key (c), "double",
+ "bson_cursor_key() works");
+
+ bson_cursor_free (c);
+ bson_free (b);
+}
+
+RUN_TEST (3, bson_cursor_key);
diff --git a/tests/unit/bson/bson_cursor_new.c b/tests/unit/bson/bson_cursor_new.c
new file mode 100644
index 0000000..7bcb32b
--- /dev/null
+++ b/tests/unit/bson/bson_cursor_new.c
@@ -0,0 +1,28 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_cursor_new (void)
+{
+ bson *b;
+ bson_cursor *c;
+
+ ok (bson_cursor_new (NULL) == NULL,
+ "bson_cursor_new(NULL) should fail");
+
+ b = bson_new ();
+ ok (bson_cursor_new (b) == NULL,
+ "bson_cursor_new() should fail with an unfinished BSON object");
+ bson_free (b);
+
+ b = test_bson_generate_full ();
+ ok ((c = bson_cursor_new (b)) != NULL,
+ "bson_cursor_new() works");
+ bson_cursor_free (c);
+ bson_free (b);
+}
+
+RUN_TEST (3, bson_cursor_new);
diff --git a/tests/unit/bson/bson_cursor_next.c b/tests/unit/bson/bson_cursor_next.c
new file mode 100644
index 0000000..a2fc137
--- /dev/null
+++ b/tests/unit/bson/bson_cursor_next.c
@@ -0,0 +1,42 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_cursor_next (void)
+{
+ bson *b;
+ bson_cursor *c;
+
+ ok (bson_cursor_next (NULL) == FALSE,
+ "bson_cursor_next (NULL) should fail");
+
+ b = bson_new ();
+ bson_finish (b);
+ c = bson_cursor_new (b);
+
+ ok (bson_cursor_next (c) == FALSE,
+ "bson_cursor_next() should fail with an empty document");
+
+ bson_cursor_free (c);
+ bson_free (b);
+
+ b = test_bson_generate_full ();
+ c = bson_cursor_new (b);
+ ok (bson_cursor_next (c),
+ "initial bson_cursor_next() works");
+ ok (bson_cursor_next (c),
+ "subsequent bson_cursor_next() works too");
+
+ while (bson_cursor_next (c)) ;
+
+ ok (bson_cursor_next (c) == FALSE,
+ "bson_cursor_next() fails after the end of the BSON object");
+
+ bson_cursor_free (c);
+ bson_free (b);
+}
+
+RUN_TEST (5, bson_cursor_next);
diff --git a/tests/unit/bson/bson_cursor_type.c b/tests/unit/bson/bson_cursor_type.c
new file mode 100644
index 0000000..86be005
--- /dev/null
+++ b/tests/unit/bson/bson_cursor_type.c
@@ -0,0 +1,30 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_cursor_type (void)
+{
+ bson *b;
+ bson_cursor *c;
+
+ cmp_ok (bson_cursor_type (NULL), "==", BSON_TYPE_NONE,
+ "bson_cursor_type(NULL) should fail");
+
+ b = test_bson_generate_full ();
+ c = bson_cursor_new (b);
+
+ cmp_ok (bson_cursor_type (c), "==", BSON_TYPE_NONE,
+ "bson_cursor_type() should fail at the beginning of the BSON "
+ "object");
+ bson_cursor_next (c);
+ cmp_ok (bson_cursor_type (c), "==", BSON_TYPE_DOUBLE,
+ "bson_cursor_type() works");
+
+ bson_cursor_free (c);
+ bson_free (b);
+}
+
+RUN_TEST (3, bson_cursor_type);
diff --git a/tests/unit/bson/bson_cursor_type_as_string.c b/tests/unit/bson/bson_cursor_type_as_string.c
new file mode 100644
index 0000000..8ee6fea
--- /dev/null
+++ b/tests/unit/bson/bson_cursor_type_as_string.c
@@ -0,0 +1,31 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_cursor_type_as_string (void)
+{
+ bson *b;
+ bson_cursor *c;
+
+ is (bson_cursor_type_as_string (NULL), NULL,
+ "bson_cursor_type_as_string(NULL) should fail");
+
+ b = test_bson_generate_full ();
+ c = bson_cursor_new (b);
+
+ is (bson_cursor_type_as_string (c), NULL,
+ "bson_cursor_type_as_string() should fail at the initial position");
+ bson_cursor_next (c);
+
+ is (bson_cursor_type_as_string (c),
+ bson_type_as_string (bson_cursor_type (c)),
+ "bson_cursor_type_as_string() works");
+
+ bson_cursor_free (c);
+ bson_free (b);
+}
+
+RUN_TEST (3, bson_cursor_type_as_string);
diff --git a/tests/unit/bson/bson_empty.c b/tests/unit/bson/bson_empty.c
new file mode 100644
index 0000000..69d840a
--- /dev/null
+++ b/tests/unit/bson/bson_empty.c
@@ -0,0 +1,22 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_empty (void)
+{
+ bson *b;
+
+ b = bson_new ();
+ bson_finish (b);
+
+ cmp_ok (bson_size (b), "==", 5, "Empty BSON size check");
+ ok (memcmp (bson_data (b), "\005\000\000\000\000", bson_size (b)) == 0,
+ "Empty BSON contents check");
+
+ bson_free (b);
+}
+
+RUN_TEST (2, bson_empty)
diff --git a/tests/unit/bson/bson_find.c b/tests/unit/bson/bson_find.c
new file mode 100644
index 0000000..54e8767
--- /dev/null
+++ b/tests/unit/bson/bson_find.c
@@ -0,0 +1,34 @@
+#include "tap.h"
+#include "test.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_bson_find (void)
+{
+ bson *b;
+ bson_cursor *c;
+
+ ok (bson_find (NULL, NULL) == NULL,
+ "bson_find() with NULL parameters should fail");
+ ok (bson_find (NULL, "key") == NULL,
+ "bson_find() with a NULL BSON object should fail");
+ b = bson_new ();
+ ok (bson_find (b, "key") == NULL,
+ "bson_find() with an unfinished BSON object should fail");
+ bson_free (b);
+
+ b = test_bson_generate_full ();
+ ok (bson_find (b, NULL) == FALSE,
+ "bson_find() with a NULL key should fail");
+ ok (bson_find (b, "__invalid__") == FALSE,
+ "bson_find() with a non-existent key should fail");
+ ok ((c = bson_find (b, "alert")) != NULL,
+ "bson_find() works");
+
+ bson_cursor_free (c);
+ bson_free (b);
+}
+
+RUN_TEST (6, bson_find);
diff --git a/tests/unit/bson/bson_new.c b/tests/unit/bson/bson_new.c
new file mode 100644
index 0000000..3149027
--- /dev/null
+++ b/tests/unit/bson/bson_new.c
@@ -0,0 +1,28 @@
+#include "bson.h"
+#include "test.h"
+#include "tap.h"
+
+#include <string.h>
+
+void
+test_bson_new (void)
+{
+ bson *b;
+
+ ok ((b = bson_new ()) != NULL, "bson_new() works");
+ ok (bson_data (b) == NULL,
+ "bson_data() with an unfished object should fail");
+ ok (bson_size (b) == -1,
+ "bson_size() with an unfinished object should fail");
+ ok (bson_finish (b), "bson_finish() works");
+ ok (bson_finish (b),
+ "bson_finish() works on an already finished object too");
+ bson_free (b);
+
+ ok (bson_size (NULL) == -1, "bson_size(NULL) works correctly");
+ ok (bson_data (NULL) == NULL, "bson_data(NULL) works correctly");
+ ok (bson_finish (NULL) == FALSE, "bson_finish(NULL) works correctly");
+ bson_free (NULL);
+}
+
+RUN_TEST (8, bson_new);
diff --git a/tests/unit/bson/bson_new_from_data.c b/tests/unit/bson/bson_new_from_data.c
new file mode 100644
index 0000000..740cb6e
--- /dev/null
+++ b/tests/unit/bson/bson_new_from_data.c
@@ -0,0 +1,46 @@
+#include "bson.h"
+#include "test.h"
+#include "tap.h"
+
+#include <string.h>
+
+void
+test_bson_new_from_data (void)
+{
+ bson *orig, *new;
+
+ orig = test_bson_generate_full ();
+
+ ok (bson_new_from_data (NULL, 0) == NULL,
+ "bson_new_from_data (NULL, 0) fails");
+ ok (bson_new_from_data (NULL, bson_size (orig)) == NULL,
+ "bson_new_from_data (NULL, size) fails");
+ ok (bson_new_from_data (bson_data (orig), 0) == NULL,
+ "bson_new_from_data (orig, 0) fails");
+ ok (bson_new_from_data (bson_data (orig), -1) == NULL,
+ "bson_new_from_data (orig, -1) fails");
+ ok (bson_new_from_data (NULL, -1) == NULL,
+ "bson_new_from_data (NULL, -1) fails");
+
+ ok ((new = bson_new_from_data (bson_data (orig),
+ bson_size (orig) - 1)) != NULL,
+ "bson_new_from_data() works");
+ cmp_ok (bson_size (new), "==", -1,
+ "Copied object is unfinished");
+ bson_finish (new);
+
+ ok (orig != new, "Copied BSON object is not the same as the original");
+
+ cmp_ok (bson_size (orig), "==", bson_size (new),
+ "Copied (& finished) object has the same size as the original");
+ ok (bson_data (orig) != bson_data (new),
+ "The copied data is not the same as the original");
+ ok (memcmp (bson_data (orig), bson_data (new),
+ bson_size (orig)) == 0,
+ "The copied data is identical to the original");
+
+ bson_free (orig);
+ bson_free (new);
+}
+
+RUN_TEST (11, bson_new_from_data);
diff --git a/tests/unit/bson/bson_reset.c b/tests/unit/bson/bson_reset.c
new file mode 100644
index 0000000..23f2ce6
--- /dev/null
+++ b/tests/unit/bson/bson_reset.c
@@ -0,0 +1,27 @@
+#include "bson.h"
+#include "test.h"
+#include "tap.h"
+
+void
+test_bson_reset (void)
+{
+ bson *b;
+
+ b = test_bson_generate_full ();
+
+ cmp_ok (bson_size (b), "!=", -1,
+ "bson_size() != -1 on a non-empty document");
+ ok (bson_reset (b), "bson_reset() works");
+ cmp_ok (bson_size (b), "==", -1,
+ "bson_size() on a reseted object returns an error");
+ bson_finish (b);
+ cmp_ok (bson_size (b), "==", 5,
+ "bson_size() on a reseted & finished object matches the "
+ "size of an empty document");
+ bson_free (b);
+
+ ok (bson_reset (NULL) == FALSE,
+ "bson_reset(NULL) should fail");
+}
+
+RUN_TEST (5, bson_reset);
diff --git a/tests/unit/bson/bson_type_as_string.c b/tests/unit/bson/bson_type_as_string.c
new file mode 100644
index 0000000..35e8210
--- /dev/null
+++ b/tests/unit/bson/bson_type_as_string.c
@@ -0,0 +1,40 @@
+#include "bson.h"
+#include "test.h"
+#include "tap.h"
+
+#include <string.h>
+
+#define CHECK_TYPE(t) \
+ is (bson_type_as_string (t), #t, \
+ "bson_type_as_string(%s) works", #t)
+
+void
+test_bson_type_as_string (void)
+{
+ CHECK_TYPE (BSON_TYPE_NONE);
+ CHECK_TYPE (BSON_TYPE_DOUBLE);
+ CHECK_TYPE (BSON_TYPE_STRING);
+ CHECK_TYPE (BSON_TYPE_DOCUMENT);
+ CHECK_TYPE (BSON_TYPE_ARRAY);
+ CHECK_TYPE (BSON_TYPE_BINARY);
+ CHECK_TYPE (BSON_TYPE_UNDEFINED);
+ CHECK_TYPE (BSON_TYPE_OID);
+ CHECK_TYPE (BSON_TYPE_BOOLEAN);
+ CHECK_TYPE (BSON_TYPE_UTC_DATETIME);
+ CHECK_TYPE (BSON_TYPE_NULL);
+ CHECK_TYPE (BSON_TYPE_REGEXP);
+ CHECK_TYPE (BSON_TYPE_DBPOINTER);
+ CHECK_TYPE (BSON_TYPE_JS_CODE);
+ CHECK_TYPE (BSON_TYPE_SYMBOL);
+ CHECK_TYPE (BSON_TYPE_JS_CODE_W_SCOPE);
+ CHECK_TYPE (BSON_TYPE_INT32);
+ CHECK_TYPE (BSON_TYPE_TIMESTAMP);
+ CHECK_TYPE (BSON_TYPE_INT64);
+ CHECK_TYPE (BSON_TYPE_MIN);
+ CHECK_TYPE (BSON_TYPE_MAX);
+
+ ok (bson_type_as_string (42) == NULL,
+ "bson_type_as_string() returns NULL on invalid type.");
+}
+
+RUN_TEST (22, bson_type_as_string);
diff --git a/tests/unit/bson/bson_validate_key.c b/tests/unit/bson/bson_validate_key.c
new file mode 100644
index 0000000..126b1fd
--- /dev/null
+++ b/tests/unit/bson/bson_validate_key.c
@@ -0,0 +1,36 @@
+#include "tap.h"
+#include "test.h"
+
+#include <errno.h>
+#include <bson.h>
+#include <string.h>
+
+void
+test_bson_validate_key (void)
+{
+ gboolean valid;
+
+ valid = bson_validate_key (NULL, FALSE, FALSE);
+ ok (valid == FALSE && errno == EINVAL,
+ "bson_validate_key() sets errno when the key is NULL");
+
+ valid = bson_validate_key ("$foo.bar", FALSE, FALSE);
+ ok (valid == TRUE,
+ "bson_validate_key() returns success if both checks are off");
+
+ valid = bson_validate_key ("$foo.bar", FALSE, TRUE);
+ ok (valid == FALSE,
+ "bson_validate_key() returns failiure if the key starts with a $");
+ valid = bson_validate_key ("foo.bar$", FALSE, TRUE);
+ ok (valid == TRUE,
+ "bson_validate_key() returns success if the key does not start with a $");
+
+ valid = bson_validate_key ("foo.bar", TRUE, TRUE);
+ ok (valid == FALSE,
+ "bson_validate_key() returns failiure if the key contains a dot");
+ valid = bson_validate_key ("foobar", TRUE, TRUE);
+ ok (valid == TRUE,
+ "bson_validate_key() returns success if the key does not contain a dot");
+}
+
+RUN_TEST (6, bson_validate_key)
diff --git a/tests/unit/mongo/client/connect.c b/tests/unit/mongo/client/connect.c
new file mode 100644
index 0000000..fc390ea
--- /dev/null
+++ b/tests/unit/mongo/client/connect.c
@@ -0,0 +1,34 @@
+#include "test.h"
+#include "tap.h"
+#include "mongo-client.h"
+
+#include <errno.h>
+
+void
+test_mongo_connect (void)
+{
+ mongo_connection *c;
+
+ ok (mongo_connect (NULL, 27010) == NULL,
+ "mongo_connect() fails with a NULL host");
+ ok (errno == EINVAL,
+ "mongo_connect() should fail with EINVAL if host is NULL");
+
+ begin_network_tests (4);
+
+ ok (mongo_connect ("invalid.example.com", 27017) == NULL,
+ "Connecting to an invalid host fails");
+ ok (mongo_connect ("example.com", 27017) == NULL,
+ "Connecting to an unavailable host/port fails");
+ ok (mongo_connect ("/does/not/exist.sock", MONGO_CONN_LOCAL) == NULL,
+ "Connecting to an unavailable unix socket fails");
+
+ ok ((c = mongo_connect (config.primary_host,
+ config.primary_port)) != NULL,
+ "Connecting to the primary server works");
+ mongo_disconnect (c);
+
+ end_network_tests ();
+}
+
+RUN_TEST (6, mongo_connect);
diff --git a/tests/unit/mongo/client/connection_get_requestid.c b/tests/unit/mongo/client/connection_get_requestid.c
new file mode 100644
index 0000000..9232689
--- /dev/null
+++ b/tests/unit/mongo/client/connection_get_requestid.c
@@ -0,0 +1,44 @@
+#include "test.h"
+#include "mongo.h"
+
+#include "libmongo-private.h"
+
+void
+test_mongo_connection_get_requestid (void)
+{
+ mongo_connection c, *conn;
+ mongo_packet *p;
+ bson *b;
+ gint reqid;
+
+ c.request_id = 42;
+
+ ok (mongo_connection_get_requestid (NULL) == -1,
+ "mongo_connection_get_requestid() fails with a NULL connection");
+ ok (mongo_connection_get_requestid (&c) == 42,
+ "mongo_connection_get_requestid() works");
+
+ begin_network_tests (2);
+
+ b = bson_new ();
+ bson_append_int32 (b, "getnonce", 1);
+ bson_finish (b);
+
+ p = mongo_wire_cmd_custom (42, config.db, 0, b);
+ bson_free (b);
+
+ conn = mongo_connect (config.primary_host, config.primary_port);
+ cmp_ok ((reqid = mongo_connection_get_requestid (conn)), "==", 0,
+ "Initial request id is 0");
+ mongo_packet_send (conn, p);
+ mongo_wire_packet_free (p);
+
+ cmp_ok (reqid, "<", mongo_connection_get_requestid (conn),
+ "Old request ID is smaller than the new one");
+
+ mongo_disconnect (conn);
+
+ end_network_tests ();
+}
+
+RUN_TEST (4, mongo_connection_get_requestid);
diff --git a/tests/unit/mongo/client/connection_set_timeout.c b/tests/unit/mongo/client/connection_set_timeout.c
new file mode 100644
index 0000000..02468bf
--- /dev/null
+++ b/tests/unit/mongo/client/connection_set_timeout.c
@@ -0,0 +1,33 @@
+#include "test.h"
+#include "mongo.h"
+
+#include "libmongo-private.h"
+
+void
+test_mongo_connection_set_timeout (void)
+{
+ mongo_connection c, *conn;
+
+ c.fd = -1;
+
+ ok (mongo_connection_set_timeout (NULL, 100) == FALSE,
+ "mongo_connection_set_timeout() should fail with a NULL connection");
+ ok (mongo_connection_set_timeout (&c, -1) == FALSE,
+ "mongo_connection_set_timeout() should fail with a negative timeout");
+ ok (mongo_connection_set_timeout (&c, 100) == FALSE,
+ "mongo_connection_set_timeout() should fail with an invalid FD");
+
+ begin_network_tests (0);
+
+ conn = mongo_connect (config.primary_host, config.primary_port);
+
+ /* No verification here, as some systems may or may not support
+ this, thus, failing in a test is not fatal. */
+ mongo_connection_set_timeout (conn, 100);
+
+ mongo_disconnect (conn);
+
+ end_network_tests ();
+}
+
+RUN_TEST (3, mongo_connection_set_timeout);
diff --git a/tests/unit/mongo/client/disconnect.c b/tests/unit/mongo/client/disconnect.c
new file mode 100644
index 0000000..1b0be93
--- /dev/null
+++ b/tests/unit/mongo/client/disconnect.c
@@ -0,0 +1,32 @@
+#include "test.h"
+#include "tap.h"
+#include "mongo-client.h"
+
+#include "libmongo-private.h"
+#include <errno.h>
+
+void
+test_mongo_disconnect (void)
+{
+ mongo_connection *conn;
+
+ conn = g_new0 (mongo_connection, 1);
+ conn->fd = -1;
+
+ errno = 0;
+ mongo_disconnect (NULL);
+ ok (errno == ENOTCONN,
+ "mongo_disconnect() fails with ENOTCONN when passed a NULL connection");
+
+ mongo_disconnect (conn);
+ ok (errno == 0,
+ "mongo_disconnect() works");
+
+ conn = g_new0 (mongo_connection, 1);
+ conn->fd = 100;
+ mongo_disconnect (conn);
+ ok (errno == 0,
+ "mongo_disconnect() works, even with a bogus FD");
+}
+
+RUN_TEST (3, mongo_disconnect);
diff --git a/tests/unit/mongo/client/packet_recv.c b/tests/unit/mongo/client/packet_recv.c
new file mode 100644
index 0000000..51ccb3d
--- /dev/null
+++ b/tests/unit/mongo/client/packet_recv.c
@@ -0,0 +1,56 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <errno.h>
+#include <sys/socket.h>
+
+#include "libmongo-private.h"
+
+void
+test_mongo_packet_recv (void)
+{
+ mongo_connection c, *conn;
+ mongo_packet *p;
+ bson *b;
+
+ c.fd = -1;
+
+ ok (mongo_packet_recv (NULL) == NULL,
+ "mongo_packet_recv() fails with a NULL connection");
+ ok (errno == ENOTCONN,
+ "mongo_packet_recv() sets errno to ENOTCONN if connection is NULL");
+
+ ok (mongo_packet_recv (&c) == NULL,
+ "mongo_packet_recv() fails if the FD is less than zero");
+ ok (errno == EBADF,
+ "mongo_packet_recv() sets errno to EBADF is the FD is bad");
+
+ begin_network_tests (2);
+
+ b = bson_new ();
+ bson_append_int32 (b, "getnonce", 1);
+ bson_finish (b);
+
+ p = mongo_wire_cmd_custom (42, config.db, 0, b);
+ bson_free (b);
+
+ conn = mongo_connect (config.primary_host, config.primary_port);
+ mongo_packet_send (conn, p);
+ mongo_wire_packet_free (p);
+
+ ok ((p = mongo_packet_recv (conn)) != NULL,
+ "mongo_packet_recv() works");
+ mongo_wire_packet_free (p);
+
+ close (conn->fd);
+ sleep (3);
+
+ ok (mongo_packet_recv (conn) == NULL,
+ "mongo_packet_recv() fails on a closed socket");
+
+ mongo_disconnect (conn);
+
+ end_network_tests ();
+}
+
+RUN_TEST (6, mongo_packet_recv);
diff --git a/tests/unit/mongo/client/packet_send.c b/tests/unit/mongo/client/packet_send.c
new file mode 100644
index 0000000..e501a3c
--- /dev/null
+++ b/tests/unit/mongo/client/packet_send.c
@@ -0,0 +1,75 @@
+#include "test.h"
+#include "tap.h"
+#include "mongo-wire.h"
+#include "mongo-client.h"
+
+#include <errno.h>
+#include <sys/socket.h>
+
+#include "libmongo-private.h"
+
+void
+test_mongo_packet_send (void)
+{
+ mongo_packet *p;
+ mongo_connection c, *conn;
+ mongo_packet_header h;
+ bson *b;
+
+ p = mongo_wire_cmd_kill_cursors (1, 2, (gint64)3, (gint64)4);
+ c.fd = -1;
+
+ ok (mongo_packet_send (NULL, p) == FALSE,
+ "mongo_packet_send() fails with a NULL connection");
+ ok (errno == ENOTCONN,
+ "mongo_packet_send() with a NULL connection sets errno to ENOTCONN");
+ ok (mongo_packet_send (&c, NULL) == FALSE,
+ "mongo_packet_send() fails with a NULL packet");
+ ok (errno == EINVAL,
+ "mongo_packet_send() with a NULL packet sets errno to EINVAL");
+ ok (mongo_packet_send (&c, p) == FALSE,
+ "mongo_packet_send() fails if the FD is less than zero");
+ ok (errno == EBADF,
+ "mongo_packet_send() sets errno to EBADF is the FD is bad");
+ mongo_wire_packet_free (p);
+
+ p = mongo_wire_packet_new ();
+
+ h.id = 42;
+ h.resp_to = 0;
+ h.opcode = 1;
+ h.length = sizeof (mongo_packet_header);
+ mongo_wire_packet_set_header (p, &h);
+
+ c.fd = 1;
+ ok (mongo_packet_send (&c, p) == FALSE,
+ "mongo_packet_send() fails with an unfinished packet");
+
+ mongo_wire_packet_free (p);
+
+ begin_network_tests (2);
+
+ b = bson_new ();
+ bson_append_int32 (b, "getnonce", 1);
+ bson_finish (b);
+
+ p = mongo_wire_cmd_custom (42, config.db, 0, b);
+ bson_free (b);
+
+ conn = mongo_connect (config.primary_host, config.primary_port);
+ ok (mongo_packet_send (conn, p),
+ "mongo_packet_send() works");
+
+ close (conn->fd);
+ sleep (3);
+
+ ok (mongo_packet_send (conn, p) == FALSE,
+ "mongo_packet_send() fails on a closed socket");
+ mongo_wire_packet_free (p);
+
+ mongo_disconnect (conn);
+
+ end_network_tests ();
+}
+
+RUN_TEST (9, mongo_packet_send);
diff --git a/tests/unit/mongo/sync-cursor/sync_cursor_free.c b/tests/unit/mongo/sync-cursor/sync_cursor_free.c
new file mode 100644
index 0000000..bd01cb5
--- /dev/null
+++ b/tests/unit/mongo/sync-cursor/sync_cursor_free.c
@@ -0,0 +1,34 @@
+#include "test.h"
+#include "mongo.h"
+#include "config.h"
+
+#include "libmongo-private.h"
+
+#include <errno.h>
+
+void
+test_mongo_sync_cursor_free (void)
+{
+ mongo_sync_connection *conn;
+ mongo_packet *p;
+ mongo_sync_cursor *c;
+
+ test_env_setup ();
+
+ p = mongo_wire_packet_new ();
+ conn = test_make_fake_sync_conn (-1, FALSE);
+
+ c = mongo_sync_cursor_new (conn, config.ns, p);
+
+ errno = 0;
+ mongo_sync_cursor_free (NULL);
+ ok (errno == ENOTCONN,
+ "mongo_sync_cursor_free(NULL) sets errno to ENOTCONN");
+ mongo_sync_cursor_free (c);
+ pass ("mongo_sync_cursor_free() works");
+
+ mongo_sync_disconnect (conn);
+ test_env_free ();
+}
+
+RUN_TEST (2, mongo_sync_cursor_free);
diff --git a/tests/unit/mongo/sync-cursor/sync_cursor_get_data.c b/tests/unit/mongo/sync-cursor/sync_cursor_get_data.c
new file mode 100644
index 0000000..0dd391c
--- /dev/null
+++ b/tests/unit/mongo/sync-cursor/sync_cursor_get_data.c
@@ -0,0 +1,51 @@
+#include "test.h"
+#include "mongo.h"
+#include "config.h"
+
+#include "libmongo-private.h"
+
+#include <errno.h>
+
+void
+test_mongo_sync_cursor_get_data (void)
+{
+ mongo_sync_connection *conn;
+ mongo_packet *p;
+ bson *b;
+ mongo_sync_cursor *c;
+
+ test_env_setup ();
+
+ p = test_mongo_wire_generate_reply (TRUE, 4, TRUE);
+ conn = test_make_fake_sync_conn (-1, FALSE);
+
+ c = mongo_sync_cursor_new (conn, config.ns, p);
+
+ errno = 0;
+ b = mongo_sync_cursor_get_data (NULL);
+ ok (b == NULL && errno == EINVAL,
+ "mongo_sync_cursor_get_data(NULL) should fail");
+
+ b = mongo_sync_cursor_get_data (c);
+ ok (b == NULL,
+ "mongo_sync_cursor_get_data() should fail without _cursor_next()");
+
+ mongo_sync_cursor_next (c);
+ b = mongo_sync_cursor_get_data (c);
+ ok (b != NULL,
+ "mongo_sync_cursor_get_data() works");
+
+ c->offset = 5;
+
+ errno = 0;
+ b = mongo_sync_cursor_get_data (c);
+ ok (b == NULL && errno == ERANGE,
+ "mongo_sync_cursor_get_data() should fail if the cursor is "
+ "out of range");
+
+ mongo_sync_cursor_free (c);
+ mongo_sync_disconnect (conn);
+ test_env_free ();
+}
+
+RUN_TEST (4, mongo_sync_cursor_get_data);
diff --git a/tests/unit/mongo/sync-cursor/sync_cursor_new.c b/tests/unit/mongo/sync-cursor/sync_cursor_new.c
new file mode 100644
index 0000000..642d826
--- /dev/null
+++ b/tests/unit/mongo/sync-cursor/sync_cursor_new.c
@@ -0,0 +1,40 @@
+#include "test.h"
+#include "mongo.h"
+#include "config.h"
+
+#include "libmongo-private.h"
+
+#include <errno.h>
+
+void
+test_mongo_sync_cursor_new (void)
+{
+ mongo_sync_connection *conn;
+ mongo_packet *p;
+ mongo_sync_cursor *c;
+
+ test_env_setup ();
+
+ p = mongo_wire_packet_new ();
+ conn = test_make_fake_sync_conn (-1, FALSE);
+
+ c = mongo_sync_cursor_new (conn, config.ns, NULL);
+ ok (c == NULL,
+ "mongo_sync_cursor_new() fails with a NULL packet");
+ c = mongo_sync_cursor_new (conn, NULL, p);
+ ok (c == NULL,
+ "mongo_sync_cursor_new() fails with a NULL namespace");
+ c = mongo_sync_cursor_new (NULL, config.ns, p);
+ ok (c == NULL,
+ "mongo_sync_cursor_new() fails with a NULL connection");
+
+ c = mongo_sync_cursor_new (conn, config.ns, p);
+ ok (c != NULL,
+ "mongo_sync_cursor_new() works");
+
+ mongo_sync_cursor_free (c);
+ mongo_sync_disconnect (conn);
+ test_env_free ();
+}
+
+RUN_TEST (4, mongo_sync_cursor_new);
diff --git a/tests/unit/mongo/sync-cursor/sync_cursor_next.c b/tests/unit/mongo/sync-cursor/sync_cursor_next.c
new file mode 100644
index 0000000..442df96
--- /dev/null
+++ b/tests/unit/mongo/sync-cursor/sync_cursor_next.c
@@ -0,0 +1,40 @@
+#include "test.h"
+#include "mongo.h"
+#include "config.h"
+
+#include "libmongo-private.h"
+
+#include <errno.h>
+
+void
+test_mongo_sync_cursor_next (void)
+{
+ mongo_sync_connection *conn;
+ mongo_packet *p;
+ mongo_sync_cursor *c;
+ gboolean r = TRUE;
+ gint i;
+
+ test_env_setup ();
+
+ p = test_mongo_wire_generate_reply (TRUE, 2, TRUE);
+ conn = test_make_fake_sync_conn (-1, FALSE);
+
+ c = mongo_sync_cursor_new (conn, config.ns, p);
+
+ ok (mongo_sync_cursor_next (NULL) == FALSE,
+ "mongo_sync_cursor_next() should fail with a NULL cursor");
+ for (i = 0; i < 2; i++)
+ r &= mongo_sync_cursor_next (c);
+
+ ok (r == TRUE,
+ "mongo_sync_cursor_next() works");
+ ok (mongo_sync_cursor_next (c) == FALSE,
+ "mongo_sync_cursor_next() should fail past the end of the resultset");
+
+ mongo_sync_cursor_free (c);
+ mongo_sync_disconnect (conn);
+ test_env_free ();
+}
+
+RUN_TEST (3, mongo_sync_cursor_next);
diff --git a/tests/unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_file_cursor_get_chunk.c b/tests/unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_file_cursor_get_chunk.c
new file mode 100644
index 0000000..f16378a
--- /dev/null
+++ b/tests/unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_file_cursor_get_chunk.c
@@ -0,0 +1,15 @@
+#include "test.h"
+#include "mongo.h"
+
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_gridfs_chunked_file_cursor_get_chunk (void)
+{
+ gint32 size;
+
+ ok (mongo_sync_gridfs_chunked_file_cursor_get_chunk (NULL, &size) == NULL,
+ "mongo_sync_gridfs_file_cursor_get_chunk() fails with a NULL cursor");
+}
+
+RUN_TEST (1, mongo_sync_gridfs_chunked_file_cursor_get_chunk);
diff --git a/tests/unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_file_cursor_new.c b/tests/unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_file_cursor_new.c
new file mode 100644
index 0000000..22210f8
--- /dev/null
+++ b/tests/unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_file_cursor_new.c
@@ -0,0 +1,19 @@
+#include "test.h"
+#include "mongo.h"
+
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_gridfs_chunked_file_cursor_new (void)
+{
+ mongo_sync_gridfs_chunked_file f;
+
+ ok (mongo_sync_gridfs_chunked_file_cursor_new (NULL, 0, 0) == NULL,
+ "mongo_sync_gridfs_file_cursor_new() fails with a NULL file");
+ ok (mongo_sync_gridfs_chunked_file_cursor_new (&f, -1, 0) == NULL,
+ "mongo_sync_gridfs_file_cursor_new() fails with an invalid start position");
+ ok (mongo_sync_gridfs_chunked_file_cursor_new (&f, 0, -1) == NULL,
+ "mongo_sync_gridfs_file_cursor_new() fails with an invalid max number");
+}
+
+RUN_TEST (3, mongo_sync_gridfs_chunked_file_cursor_new);
diff --git a/tests/unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_file_free.c b/tests/unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_file_free.c
new file mode 100644
index 0000000..c9fddfa
--- /dev/null
+++ b/tests/unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_file_free.c
@@ -0,0 +1,16 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <errno.h>
+
+void
+test_mongo_sync_gridfs_chunked_file_free (void)
+{
+ errno = 0;
+ mongo_sync_gridfs_chunked_file_free (NULL);
+
+ cmp_ok (errno, "==", ENOTCONN,
+ "mongo_sync_gridfs_chunked_file_free() fails with a NULL file");
+}
+
+RUN_TEST (1, mongo_sync_gridfs_chunked_file_free);
diff --git a/tests/unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_file_new_from_buffer.c b/tests/unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_file_new_from_buffer.c
new file mode 100644
index 0000000..ba3fa2e
--- /dev/null
+++ b/tests/unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_file_new_from_buffer.c
@@ -0,0 +1,71 @@
+#include "test.h"
+#include "mongo.h"
+
+#define BUFFER_SIZE 256 * 1024 + 42
+
+void
+test_mongo_sync_gridfs_chunked_file_new_from_buffer (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ bson *metadata;
+ guint8 *buffer;
+ mongo_sync_gridfs_chunked_file *gfile;
+
+ buffer = g_malloc (BUFFER_SIZE);
+ memset (buffer, 'a', BUFFER_SIZE);
+
+ conn = test_make_fake_sync_conn (4, TRUE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+
+ metadata = bson_build (BSON_TYPE_STRING, "filename",
+ "gridfs_file_new_from_buffer", -1,
+ BSON_TYPE_NONE);
+ bson_finish (metadata);
+
+ ok (mongo_sync_gridfs_chunked_file_new_from_buffer (NULL, metadata,
+ buffer, BUFFER_SIZE) == FALSE,
+ "mongo_sync_gridfs_chunked_file_new_from_buffer() fails with a NULL GridFS");
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+
+ begin_network_tests (5);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+
+ ok (mongo_sync_gridfs_chunked_file_new_from_buffer (gfs, metadata,
+ NULL, BUFFER_SIZE) == FALSE,
+ "mongo_sync_gridfs_chunked_file_new_from_buffer() fails with NULL data");
+
+ ok (mongo_sync_gridfs_chunked_file_new_from_buffer (gfs, metadata,
+ buffer, 0) == FALSE,
+ "mongo_sync_gridfs_chunked_file_new_from_buffer() fails with an invalid data size");
+
+ ok (mongo_sync_gridfs_chunked_file_new_from_buffer (gfs, metadata, buffer,
+ BUFFER_SIZE) == FALSE,
+ "mongo_sync_gridfs_chunked_file_new_from_buffer() fails with uninitialized OID");
+
+ mongo_util_oid_init (0);
+
+ gfile = mongo_sync_gridfs_chunked_file_new_from_buffer (gfs, metadata,
+ buffer, BUFFER_SIZE);
+ ok (gfile != NULL,
+ "mongo_sync_gridfs_chunked_file_new_from_buffer() works with metadata");
+ mongo_sync_gridfs_chunked_file_free (gfile);
+
+ gfile = mongo_sync_gridfs_chunked_file_new_from_buffer (gfs, NULL,
+ buffer, BUFFER_SIZE);
+ ok (gfile != NULL,
+ "mongo_sync_gridfs_chunked_file_new_from_buffer() works without metadata");
+ mongo_sync_gridfs_chunked_file_free (gfile);
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+
+ end_network_tests ();
+
+ bson_free (metadata);
+ g_free (buffer);
+}
+
+RUN_TEST (6, mongo_sync_gridfs_chunked_file_new_from_buffer);
diff --git a/tests/unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_find.c b/tests/unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_find.c
new file mode 100644
index 0000000..91514f9
--- /dev/null
+++ b/tests/unit/mongo/sync-gridfs-chunk/sync_gridfs_chunked_find.c
@@ -0,0 +1,38 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <errno.h>
+
+void
+test_mongo_sync_gridfs_chunked_find (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ bson *query;
+
+ query = bson_build (BSON_TYPE_STRING, "filename", "bogus-fn", -1,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+
+ ok (mongo_sync_gridfs_chunked_find (NULL, query) == NULL,
+ "mongo_sync_gridfs_chunked_find() fails with a NULL GridFS");
+
+ begin_network_tests (2);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+
+ ok (mongo_sync_gridfs_chunked_find (gfs, NULL) == NULL,
+ "mongo_sync_gridfs_chunked_find() fails with a NULL query");
+
+ ok (mongo_sync_gridfs_chunked_find (gfs, query) == NULL,
+ "mongo_sync_gridfs_chunked_find() fails when the file is not found");
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+
+ end_network_tests ();
+
+ bson_free (query);
+}
+
+RUN_TEST (3, mongo_sync_gridfs_chunked_find);
diff --git a/tests/unit/mongo/sync-gridfs-stream/sync_gridfs_stream_close.c b/tests/unit/mongo/sync-gridfs-stream/sync_gridfs_stream_close.c
new file mode 100644
index 0000000..3c8a7b3
--- /dev/null
+++ b/tests/unit/mongo/sync-gridfs-stream/sync_gridfs_stream_close.c
@@ -0,0 +1,41 @@
+#include "test.h"
+#include "mongo.h"
+
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_gridfs_stream_close (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+
+ mongo_util_oid_init (0);
+
+ ok (mongo_sync_gridfs_stream_close (NULL) == FALSE,
+ "mongo_sync_gridfs_stream_close() fails with a NULL stream");
+
+ begin_network_tests (3);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+
+ stream = mongo_sync_gridfs_stream_new (gfs, NULL);
+ ok (mongo_sync_gridfs_stream_close (stream) == TRUE,
+ "mongo_sync_gridfs_stream_close() works with a write stream");
+
+ stream = mongo_sync_gridfs_stream_new (gfs, NULL);
+ stream->file.type = LMC_GRIDFS_FILE_CHUNKED;
+ ok (mongo_sync_gridfs_stream_close (stream) == FALSE,
+ "mongo_sync_gridfs_stream_close() should fail with a chunked file");
+
+ stream->file.type = LMC_GRIDFS_FILE_STREAM_READER;
+ ok (mongo_sync_gridfs_stream_close (stream) == TRUE,
+ "mongo_sync_gridfs_stream_close() works with a read stream");
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+
+ end_network_tests ();
+}
+
+RUN_TEST (4, mongo_sync_gridfs_stream_close);
diff --git a/tests/unit/mongo/sync-gridfs-stream/sync_gridfs_stream_find.c b/tests/unit/mongo/sync-gridfs-stream/sync_gridfs_stream_find.c
new file mode 100644
index 0000000..643a8b2
--- /dev/null
+++ b/tests/unit/mongo/sync-gridfs-stream/sync_gridfs_stream_find.c
@@ -0,0 +1,36 @@
+#include "test.h"
+#include "mongo.h"
+
+void
+test_mongo_sync_gridfs_stream_find (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ bson *query;
+
+ query = bson_build (BSON_TYPE_STRING, "filename", "bogus-fn", -1,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+
+ ok (mongo_sync_gridfs_stream_find (NULL, query) == NULL,
+ "mongo_sync_gridfs_stream_find() should fail with a NULL connection");
+
+ begin_network_tests (2);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+
+ ok (mongo_sync_gridfs_stream_find (gfs, NULL) == NULL,
+ "mongo_sync_gridfs_stream_find() fails with a NULL query");
+
+ ok (mongo_sync_gridfs_stream_find (gfs, query) == NULL,
+ "mongo_sync_gridfs_stream_find() fails if the file is not found");
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+
+ end_network_tests ();
+
+ bson_free (query);
+}
+
+RUN_TEST (3, mongo_sync_gridfs_stream_find);
diff --git a/tests/unit/mongo/sync-gridfs-stream/sync_gridfs_stream_new.c b/tests/unit/mongo/sync-gridfs-stream/sync_gridfs_stream_new.c
new file mode 100644
index 0000000..75e4419
--- /dev/null
+++ b/tests/unit/mongo/sync-gridfs-stream/sync_gridfs_stream_new.c
@@ -0,0 +1,43 @@
+#include "test.h"
+#include "mongo.h"
+
+void
+test_mongo_sync_gridfs_stream_new (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+ bson *meta;
+
+ mongo_util_oid_init (0);
+
+ meta = bson_build (BSON_TYPE_STRING, "my-id", "sync_gridfs_stream_new", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ ok (mongo_sync_gridfs_stream_new (NULL, meta) == FALSE,
+ "mongo_sync_gridfs_stream_new() should fail with a NULL connection");
+
+ begin_network_tests (2);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+
+ stream = mongo_sync_gridfs_stream_new (gfs, NULL);
+ ok (stream != NULL,
+ "mongo_sync_gridfs_stream_new() works with NULL metadata");
+ mongo_sync_gridfs_stream_close (stream);
+
+ stream = mongo_sync_gridfs_stream_new (gfs, meta);
+ ok (stream != NULL,
+ "mongo_sync_gridfs_stream_new() works with metadata");
+ mongo_sync_gridfs_stream_close (stream);
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+
+ end_network_tests ();
+
+ bson_free (meta);
+}
+
+RUN_TEST (3, mongo_sync_gridfs_stream_new);
diff --git a/tests/unit/mongo/sync-gridfs-stream/sync_gridfs_stream_read.c b/tests/unit/mongo/sync-gridfs-stream/sync_gridfs_stream_read.c
new file mode 100644
index 0000000..a53aa88
--- /dev/null
+++ b/tests/unit/mongo/sync-gridfs-stream/sync_gridfs_stream_read.c
@@ -0,0 +1,44 @@
+#include "test.h"
+#include "mongo.h"
+
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_gridfs_stream_read (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+ guint8 buffer[4096];
+
+ mongo_util_oid_init (0);
+
+ ok (mongo_sync_gridfs_stream_read (NULL, buffer, sizeof (buffer)) == -1,
+ "mongo_sync_gridfs_stream_read() should fail with a NULL connection");
+
+ begin_network_tests (3);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+
+ stream = mongo_sync_gridfs_stream_new (gfs, NULL);
+
+ ok (mongo_sync_gridfs_stream_read (stream, buffer, sizeof (buffer)) == -1,
+ "mongo-sync_gridfs_stream_read() should fail when the stream is "
+ "write-only");
+
+ stream->file.type = LMC_GRIDFS_FILE_STREAM_READER;
+
+ ok (mongo_sync_gridfs_stream_read (stream, NULL, sizeof (buffer)) == -1,
+ "mongo_sync_gridfs_stream_read() should fail with a NULL buffer");
+ ok (mongo_sync_gridfs_stream_read (stream, buffer, 0) == -1,
+ "mongo_sync_gridfs_stream_read() should fail with a 0 size");
+
+ mongo_sync_gridfs_stream_close (stream);
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+
+ end_network_tests ();
+}
+
+RUN_TEST (4, mongo_sync_gridfs_stream_read);
diff --git a/tests/unit/mongo/sync-gridfs-stream/sync_gridfs_stream_seek.c b/tests/unit/mongo/sync-gridfs-stream/sync_gridfs_stream_seek.c
new file mode 100644
index 0000000..49547bc
--- /dev/null
+++ b/tests/unit/mongo/sync-gridfs-stream/sync_gridfs_stream_seek.c
@@ -0,0 +1,65 @@
+#include "test.h"
+#include "mongo.h"
+
+#include "libmongo-private.h"
+
+#include <unistd.h>
+
+void
+test_mongo_sync_gridfs_stream_seek (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+
+ mongo_util_oid_init (0);
+
+ ok (mongo_sync_gridfs_stream_seek (NULL, 0, SEEK_SET) == FALSE,
+ "mongo_sync_gridfs_stream_seek() fails with a NULL stream");
+
+ begin_network_tests (8);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+
+ stream = mongo_sync_gridfs_stream_new (gfs, NULL);
+
+ ok (mongo_sync_gridfs_stream_seek (stream, 0, SEEK_SET) == FALSE,
+ "mongo_sync_gridfs_stream_seek() fails with a write stream");
+
+ stream->file.type = LMC_GRIDFS_FILE_STREAM_READER;
+
+ ok (mongo_sync_gridfs_stream_seek (stream, -1, SEEK_SET) == FALSE,
+ "mongo_sync_gridfs_stream_seek() fails with SEEK_SET and a negative "
+ "position");
+
+ ok (mongo_sync_gridfs_stream_seek (stream, 10, SEEK_SET) == FALSE,
+ "mongo_sync_gridfs_stream_seek() fails with SEEK_SET and a position "
+ "past EOF");
+
+ ok (mongo_sync_gridfs_stream_seek (stream, -1, SEEK_CUR) == FALSE,
+ "mongo_sync_gridfs_stream_seek() fails with SEEK_CUR and a position "
+ "before the start");
+
+ ok (mongo_sync_gridfs_stream_seek (stream, 10, SEEK_CUR) == FALSE,
+ "mongo_sync_gridfs_stream_seek() fails with SEEK_CUR and a position "
+ "past EOF");
+
+ ok (mongo_sync_gridfs_stream_seek (stream, 1, SEEK_END) == FALSE,
+ "mongo_sync_gridfs_stream_seek() fails with SEEK_END and a position "
+ "past EOF");
+
+ ok (mongo_sync_gridfs_stream_seek (stream, -1, SEEK_END) == FALSE,
+ "mongo_sync_gridfs_stream_seek() fails with SEEK_END and a position "
+ "before the start");
+
+ ok (mongo_sync_gridfs_stream_seek (stream, 0, 42) == FALSE,
+ "mongo_sync_gridfs_stream_seek() fails with an invalid whence");
+
+ mongo_sync_gridfs_stream_close (stream);
+ mongo_sync_gridfs_free (gfs, TRUE);
+
+ end_network_tests ();
+}
+
+RUN_TEST (9, mongo_sync_gridfs_stream_seek);
diff --git a/tests/unit/mongo/sync-gridfs-stream/sync_gridfs_stream_write.c b/tests/unit/mongo/sync-gridfs-stream/sync_gridfs_stream_write.c
new file mode 100644
index 0000000..562c7b4
--- /dev/null
+++ b/tests/unit/mongo/sync-gridfs-stream/sync_gridfs_stream_write.c
@@ -0,0 +1,50 @@
+#include "test.h"
+#include "mongo.h"
+
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_gridfs_stream_write (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+ bson *meta;
+ guint8 buffer[4096];
+
+ mongo_util_oid_init (0);
+
+ meta = bson_build (BSON_TYPE_STRING, "my-id", "sync_gridfs_stream_write", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ ok (mongo_sync_gridfs_stream_write (NULL, buffer, sizeof (buffer)) == FALSE,
+ "mongo_sync_gridfs_stream_write() should fail with a NULL connection");
+
+ begin_network_tests (4);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+
+ stream = mongo_sync_gridfs_stream_new (gfs, meta);
+
+ ok (mongo_sync_gridfs_stream_write (stream, NULL, sizeof (buffer)) == FALSE,
+ "mongo_sync_gridfs_stream_write() should fail with a NULL buffer");
+ ok (mongo_sync_gridfs_stream_write (stream, buffer, 0) == FALSE,
+ "mongo_sync_gridfs_stream_write() should fail with 0 size");
+ ok (mongo_sync_gridfs_stream_write (stream, buffer, sizeof (buffer)) == TRUE,
+ "mongo_sync_gridfs_stream_write() works");
+
+ stream->file.type = LMC_GRIDFS_FILE_STREAM_READER;
+ ok (mongo_sync_gridfs_stream_write (stream, buffer, sizeof (buffer)) == FALSE,
+ "mongo_sync_gridfs_stream_write() should fail with a read stream");
+
+ mongo_sync_gridfs_stream_close (stream);
+ mongo_sync_gridfs_free (gfs, TRUE);
+
+ end_network_tests ();
+
+ bson_free (meta);
+}
+
+RUN_TEST (5, mongo_sync_gridfs_stream_write);
diff --git a/tests/unit/mongo/sync-gridfs/sync_gridfs_file_get_metadata.c b/tests/unit/mongo/sync-gridfs/sync_gridfs_file_get_metadata.c
new file mode 100644
index 0000000..2be34e5
--- /dev/null
+++ b/tests/unit/mongo/sync-gridfs/sync_gridfs_file_get_metadata.c
@@ -0,0 +1,23 @@
+#include "test.h"
+#include "mongo.h"
+
+void
+test_mongo_sync_gridfs_file_get_metadata (void)
+{
+ ok (mongo_sync_gridfs_file_get_id (NULL) == NULL,
+ "mongo_sync_gridfs_file_get_id() fails with a NULL file");
+ ok (mongo_sync_gridfs_file_get_length (NULL) == -1,
+ "mongo_sync_gridfs_file_get_length() fails with a NULL file");
+ ok (mongo_sync_gridfs_file_get_chunk_size (NULL) == -1,
+ "mongo_sync_gridfs_file_get_chunk_size() fails with a NULL file");
+ ok (mongo_sync_gridfs_file_get_md5 (NULL) == NULL,
+ "mongo_sync_gridfs_file_get_md5() fails with a NULL file");
+ ok (mongo_sync_gridfs_file_get_date (NULL) == -1,
+ "mongo_sync_gridfs_file_get_date() fails with a NULL file");
+ ok (mongo_sync_gridfs_file_get_metadata (NULL) == NULL,
+ "mongo_sync_gridfs_file_get_metadata() fails with a NULL file");
+ ok (mongo_sync_gridfs_file_get_chunks (NULL) == -1,
+ "mongo_sync_gridfs_file_get_chunks() fails with a NULL file");
+}
+
+RUN_TEST (7, mongo_sync_gridfs_file_get_metadata);
diff --git a/tests/unit/mongo/sync-gridfs/sync_gridfs_free.c b/tests/unit/mongo/sync-gridfs/sync_gridfs_free.c
new file mode 100644
index 0000000..1c8c2d6
--- /dev/null
+++ b/tests/unit/mongo/sync-gridfs/sync_gridfs_free.c
@@ -0,0 +1,35 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <errno.h>
+
+void
+test_mongo_sync_gridfs_free (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+
+ errno = 0;
+ mongo_sync_gridfs_free (NULL, FALSE);
+ cmp_ok (errno, "==", ENOTCONN,
+ "mongo_sync_gridfs_free() with a NULL connection shall set errno");
+
+ begin_network_tests (2);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+
+ mongo_sync_gridfs_free (gfs, FALSE);
+ cmp_ok (errno, "==", 0,
+ "mongo_sync_gridfs_free() should clear errno on success");
+
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+ mongo_sync_gridfs_free (gfs, TRUE);
+ cmp_ok (errno, "==", 0,
+ "mongo_sync_gridfs_free() works when asked to free the "
+ "connection too");
+
+ end_network_tests ();
+}
+
+RUN_TEST (3, mongo_sync_gridfs_free);
diff --git a/tests/unit/mongo/sync-gridfs/sync_gridfs_get_set_chunk_size.c b/tests/unit/mongo/sync-gridfs/sync_gridfs_get_set_chunk_size.c
new file mode 100644
index 0000000..5d17986
--- /dev/null
+++ b/tests/unit/mongo/sync-gridfs/sync_gridfs_get_set_chunk_size.c
@@ -0,0 +1,33 @@
+#include "test.h"
+#include "mongo.h"
+
+void
+test_mongo_sync_gridfs_get_set_chunk_size (void)
+{
+ mongo_sync_gridfs *gfs;
+
+ ok (mongo_sync_gridfs_get_chunk_size (NULL) == -1,
+ "mongo_sync_gridfs_get_chunk_size() fails with a NULL gfs");
+ ok (mongo_sync_gridfs_set_chunk_size (NULL, 16 * 1024) == FALSE,
+ "mongo_sync_gridfs_set_chunk_size() fails with a NULL gfs");
+
+ begin_network_tests (3);
+
+ gfs = mongo_sync_gridfs_new (mongo_sync_connect (config.primary_host,
+ config.primary_port,
+ FALSE),
+ config.gfs_prefix);
+
+ ok (mongo_sync_gridfs_set_chunk_size (gfs, -1) == FALSE,
+ "mongo_sync_gridfs_set_chunk_size() fails if the size is invalid");
+ ok (mongo_sync_gridfs_set_chunk_size (gfs, 12345),
+ "mongo_sync_gridfs_set_chunk_size() works");
+ cmp_ok (mongo_sync_gridfs_get_chunk_size (gfs), "==", 12345,
+ "mongo_sync_gridfs_get_chunk_size() works");
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+
+ end_network_tests ();
+}
+
+RUN_TEST (5, mongo_sync_gridfs_get_set_chunk_size);
diff --git a/tests/unit/mongo/sync-gridfs/sync_gridfs_list.c b/tests/unit/mongo/sync-gridfs/sync_gridfs_list.c
new file mode 100644
index 0000000..e5857ea
--- /dev/null
+++ b/tests/unit/mongo/sync-gridfs/sync_gridfs_list.c
@@ -0,0 +1,34 @@
+#include "test.h"
+#include "mongo.h"
+
+void
+test_mongo_sync_gridfs_list (void)
+{
+ mongo_sync_gridfs *gfs;
+ bson *query;
+
+ query = bson_build (BSON_TYPE_STRING, "bogus-key", "bogus-value", -1,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+
+ ok (mongo_sync_gridfs_list (NULL, NULL) == NULL,
+ "mongo_sync_gridfs_list() fails with a NULL GridFS");
+
+ begin_network_tests (1);
+
+ gfs = mongo_sync_gridfs_new
+ (mongo_sync_connect (config.primary_host, config.primary_port, FALSE),
+ config.gfs_prefix);
+
+ ok (mongo_sync_gridfs_list (gfs, query) == NULL,
+ "mongo_sync_gridfs_list() fails with a query that does not match "
+ "anything");
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+
+ end_network_tests ();
+
+ bson_free (query);
+}
+
+RUN_TEST (2, mongo_sync_gridfs_list);
diff --git a/tests/unit/mongo/sync-gridfs/sync_gridfs_new.c b/tests/unit/mongo/sync-gridfs/sync_gridfs_new.c
new file mode 100644
index 0000000..20d6fea
--- /dev/null
+++ b/tests/unit/mongo/sync-gridfs/sync_gridfs_new.c
@@ -0,0 +1,54 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <errno.h>
+
+void
+test_mongo_sync_gridfs_new (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ gchar *f, *c;
+
+ conn = test_make_fake_sync_conn (4, TRUE);
+
+ ok (mongo_sync_gridfs_new (NULL, "test.fs") == NULL,
+ "mongo_sync_gridfs_new() should fail with a NULL connection");
+
+ ok (mongo_sync_gridfs_new (conn, "test.fs") == NULL,
+ "mongo_sync_gridfs_new() should fail with a bogus connection");
+
+ ok (mongo_sync_gridfs_new (conn, NULL) == NULL,
+ "mongo_sync_gridfs_new() should fail with a NULL ns prefix");
+
+ ok (mongo_sync_gridfs_new (conn, "bogus") == NULL,
+ "mongo_sync_gridfs_new() should fail with a bogus ns prefix");
+
+ mongo_sync_disconnect (conn);
+
+ begin_network_tests (4);
+
+ f = g_strconcat (config.gfs_prefix, ".files", NULL);
+ c = g_strconcat (config.gfs_prefix, ".chunks", NULL);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+ ok (gfs != NULL,
+ "mongo_sync_gridfs_new() works");
+ is (gfs->ns.prefix, config.gfs_prefix,
+ "The namespace prefix is as specified");
+ is (gfs->ns.files, f,
+ "The files namespace is correct");
+ is (gfs->ns.chunks, c,
+ "The chunks namespace is correct");
+ mongo_sync_gridfs_free (gfs, FALSE);
+
+ mongo_sync_disconnect (conn);
+
+ g_free (f);
+ g_free (c);
+ end_network_tests ();
+}
+
+RUN_TEST (8, mongo_sync_gridfs_new);
diff --git a/tests/unit/mongo/sync-gridfs/sync_gridfs_remove.c b/tests/unit/mongo/sync-gridfs/sync_gridfs_remove.c
new file mode 100644
index 0000000..88eb40b
--- /dev/null
+++ b/tests/unit/mongo/sync-gridfs/sync_gridfs_remove.c
@@ -0,0 +1,34 @@
+#include "test.h"
+#include "mongo.h"
+
+void
+test_mongo_sync_gridfs_remove (void)
+{
+ mongo_sync_gridfs *gfs;
+ bson *query;
+
+ query = bson_build (BSON_TYPE_STRING, "bogus-key", "bogus-value", -1,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+
+ ok (mongo_sync_gridfs_remove (NULL, NULL) == FALSE,
+ "mongo_sync_gridfs_remove() fails with a NULL GridFS");
+
+ begin_network_tests (1);
+
+ gfs = mongo_sync_gridfs_new
+ (mongo_sync_connect (config.primary_host, config.primary_port, FALSE),
+ config.gfs_prefix);
+
+ ok (mongo_sync_gridfs_remove (gfs, query) == FALSE,
+ "mongo_sync_gridfs_remove() fails with a query that does not match "
+ "anything");
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+
+ end_network_tests ();
+
+ bson_free (query);
+}
+
+RUN_TEST (2, mongo_sync_gridfs_remove);
diff --git a/tests/unit/mongo/sync-pool/sync_pool_free.c b/tests/unit/mongo/sync-pool/sync_pool_free.c
new file mode 100644
index 0000000..5f64621
--- /dev/null
+++ b/tests/unit/mongo/sync-pool/sync_pool_free.c
@@ -0,0 +1,11 @@
+#include "test.h"
+#include "mongo.h"
+
+void
+test_mongo_sync_pool_free (void)
+{
+ mongo_sync_pool_free (NULL);
+ pass ("mongo_sync_pool_free(NULL) works");
+}
+
+RUN_TEST (1, mongo_sync_pool_free);
diff --git a/tests/unit/mongo/sync-pool/sync_pool_new.c b/tests/unit/mongo/sync-pool/sync_pool_new.c
new file mode 100644
index 0000000..b9758d2
--- /dev/null
+++ b/tests/unit/mongo/sync-pool/sync_pool_new.c
@@ -0,0 +1,19 @@
+#include "test.h"
+#include "mongo.h"
+
+void
+test_mongo_sync_pool_new (void)
+{
+ ok (mongo_sync_pool_new ("example.com", 27017, 0, 0) == NULL,
+ "mongo_sync_pool_new() needs at least one connection");
+ ok (mongo_sync_pool_new (NULL, 27017, 1, 0) == NULL,
+ "mongo_sync_pool_new() should fail without a HOST");
+ ok (mongo_sync_pool_new ("example.com", -1, 1, 0) == NULL,
+ "mongo_sync_pool_new() should fail with an invalid port");
+ ok (mongo_sync_pool_new ("example.com", 27017, -1, 0) == NULL,
+ "mongo_sync_pool_new() should fail with an invalid number of masters");
+ ok (mongo_sync_pool_new ("example.com", 27017, 10, -1) == NULL,
+ "mongo_sync_pool_new() should fail with an invalid number of slaves");
+}
+
+RUN_TEST (5, mongo_sync_pool_new);
diff --git a/tests/unit/mongo/sync-pool/sync_pool_pick.c b/tests/unit/mongo/sync-pool/sync_pool_pick.c
new file mode 100644
index 0000000..352ba04
--- /dev/null
+++ b/tests/unit/mongo/sync-pool/sync_pool_pick.c
@@ -0,0 +1,11 @@
+#include "test.h"
+#include "mongo.h"
+
+void
+test_mongo_sync_pool_pick (void)
+{
+ ok (mongo_sync_pool_pick (NULL, TRUE) == NULL,
+ "mongo_sync_pool_pick() should fail without a pool");
+}
+
+RUN_TEST (1, mongo_sync_pool_pick);
diff --git a/tests/unit/mongo/sync-pool/sync_pool_return.c b/tests/unit/mongo/sync-pool/sync_pool_return.c
new file mode 100644
index 0000000..d622ede
--- /dev/null
+++ b/tests/unit/mongo/sync-pool/sync_pool_return.c
@@ -0,0 +1,22 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <string.h>
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_pool_return (void)
+{
+ mongo_sync_pool_connection c;
+ void *pool;
+
+ pool = g_malloc (1024);
+
+ ok (mongo_sync_pool_return (NULL, &c) == FALSE,
+ "mongo_sync_pool_return() should fail without a pool");
+ ok (mongo_sync_pool_return ((mongo_sync_pool *)pool, NULL) == FALSE,
+ "mongo_sync_pool_return() should fail without a connection");
+ g_free (pool);
+}
+
+RUN_TEST (2, mongo_sync_pool_return);
diff --git a/tests/unit/mongo/sync/sync_cmd_authenticate.c b/tests/unit/mongo/sync/sync_cmd_authenticate.c
new file mode 100644
index 0000000..a5c67cb
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_authenticate.c
@@ -0,0 +1,112 @@
+#include "test.h"
+#include "mongo.h"
+#include "config.h"
+
+#include <errno.h>
+#include <sys/socket.h>
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_cmd_authenticate_net_secondary (void)
+{
+ mongo_sync_connection *c;
+
+ skip (!config.secondary_host, 4,
+ "Secondary server not configured");
+
+ c = mongo_sync_connect (config.secondary_host, config.secondary_port, TRUE);
+ mongo_sync_conn_set_auto_reconnect (c, TRUE);
+ mongo_sync_cmd_is_master (c);
+
+ ok (mongo_sync_cmd_authenticate (c, config.db, "test", "s3kr1+") == TRUE,
+ "mongo_sync_cmd_authenticate() works");
+ ok (mongo_sync_cmd_authenticate (c, config.db, "test", "bad_pw") == FALSE,
+ "mongo_sync_cmd_authenticate() should fail with a bad password");
+ ok (mongo_sync_cmd_authenticate (c, config.db, "xxx", "s3kr1+") == FALSE,
+ "mongo_sync_cmd_authenticate() should fail with a bad username");
+
+ shutdown (c->super.fd, SHUT_RDWR);
+ sleep (3);
+
+ ok (mongo_sync_cmd_authenticate (c, config.db, "test", "s3kr1+") == TRUE,
+ "mongo_sync_cmd_authenticate() automatically reconnects");
+
+ mongo_sync_disconnect (c);
+
+ endskip;
+}
+
+void
+test_mongo_sync_cmd_authenticate_net (void)
+{
+ mongo_sync_connection *c;
+
+ begin_network_tests (8);
+
+ c = mongo_sync_connect (config.primary_host, config.primary_port, TRUE);
+ mongo_sync_conn_set_auto_reconnect (c, TRUE);
+
+ mongo_sync_cmd_user_add (c, config.db, "test", "s3kr1+");
+
+ ok (mongo_sync_cmd_authenticate (c, config.db, "test", "s3kr1+") == TRUE,
+ "mongo_sync_cmd_authenticate() works");
+ ok (mongo_sync_cmd_authenticate (c, config.db, "test", "bad_pw") == FALSE,
+ "mongo_sync_cmd_authenticate() should fail with a bad password");
+ ok (mongo_sync_cmd_authenticate (c, config.db, "xxx", "s3kr1+") == FALSE,
+ "mongo_sync_cmd_authenticate() should fail with a bad username");
+
+ shutdown (c->super.fd, SHUT_RDWR);
+ sleep (3);
+
+ ok (mongo_sync_cmd_authenticate (c, config.db, "test", "s3kr1+") == TRUE,
+ "mongo_sync_cmd_authenticate() automatically reconnects");
+
+ mongo_sync_disconnect (c);
+
+ test_mongo_sync_cmd_authenticate_net_secondary ();
+
+ end_network_tests ();
+}
+
+void
+test_mongo_sync_cmd_authenticate (void)
+{
+ mongo_sync_connection *c;
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+
+ errno = 0;
+ ok (mongo_sync_cmd_authenticate (NULL, "test", "test",
+ "s3kr1+") == FALSE,
+ "mongo_sync_cmd_authenticate() fails with a NULL connection");
+ cmp_ok (errno, "==", ENOTCONN,
+ "errno is set to ENOTCONN");
+
+ errno = 0;
+ ok (mongo_sync_cmd_authenticate (c, NULL, "test", "s3kr1+") == FALSE,
+ "mongo_sync_cmd_authenticate() fails with a NULL db");
+ cmp_ok (errno, "==", EINVAL,
+ "errno is set to EINVAL");
+
+ errno = 0;
+ ok (mongo_sync_cmd_authenticate (c, "test", NULL, "s3kr1+") == FALSE,
+ "mongo_sync_cmd_authenticate() fails with a NULL user");
+ cmp_ok (errno, "==", EINVAL,
+ "errno is set to EINVAL");
+
+ errno = 0;
+ ok (mongo_sync_cmd_authenticate (c, "test", "test", NULL) == FALSE,
+ "mongo_sync_cmd_authenticate() fails with a NULL password");
+ cmp_ok (errno, "==", EINVAL,
+ "errno is set to EINVAL");
+
+ ok (mongo_sync_cmd_authenticate (c, "test", "test",
+ "s3kr1+") == FALSE,
+ "mongo_sync_cmd_authenticate() fails with a bogus FD");
+
+ mongo_sync_disconnect (c);
+
+ test_mongo_sync_cmd_authenticate_net ();
+}
+
+RUN_TEST (17, mongo_sync_cmd_authenticate);
diff --git a/tests/unit/mongo/sync/sync_cmd_authenticate_cache.c b/tests/unit/mongo/sync/sync_cmd_authenticate_cache.c
new file mode 100644
index 0000000..c0581b0
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_authenticate_cache.c
@@ -0,0 +1,60 @@
+#include "test.h"
+#include "mongo.h"
+#include "config.h"
+
+#include <errno.h>
+#include <sys/socket.h>
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_cmd_authenticate_cache (void)
+{
+ mongo_sync_conn_recovery_cache *cache;
+ mongo_sync_connection *c;
+
+ begin_network_tests (8);
+
+ cache = mongo_sync_conn_recovery_cache_new ();
+ mongo_sync_conn_recovery_cache_seed_add (cache,
+ config.primary_host,
+ config.primary_port);
+
+ c = mongo_sync_connect_recovery_cache (cache, TRUE);
+
+ mongo_sync_cmd_user_add (c, config.db, "test", "s3kr1+");
+
+ ok (mongo_sync_cmd_authenticate (c, config.db, "test", "s3kr1+") == TRUE,
+ "mongo_sync_cmd_authenticate() works");
+
+ mongo_sync_disconnect (c);
+
+ ok ((cache->auth.db != NULL) && (strcmp (cache->auth.db, config.db) == 0),
+ "db is cached");
+
+ ok ((cache->auth.user != NULL) && (strcmp (cache->auth.user, "test") == 0),
+ "user is cached");
+
+ ok ((cache->auth.pw != NULL) && (strcmp (cache->auth.pw, "s3kr1+") == 0),
+ "pw is cached");
+
+ c = mongo_sync_connect_recovery_cache (cache, TRUE);
+
+ ok (c->auth.db != NULL, "db is loaded from cache");
+
+ ok (c->auth.user != NULL, "username is loaded from cache");
+
+ ok (c->auth.pw != NULL, "password is loaded from cache");
+
+ ok (mongo_sync_cmd_authenticate (c,
+ c->auth.db,
+ c->auth.user,
+ c->auth.pw) == TRUE,
+ "mongo_sync_cmd_authenticate() works with cached auth. credentials");
+
+ mongo_sync_disconnect (c);
+ mongo_sync_conn_recovery_cache_free (cache);
+
+ end_network_tests ();
+}
+
+RUN_TEST (8, mongo_sync_cmd_authenticate_cache);
diff --git a/tests/unit/mongo/sync/sync_cmd_count.c b/tests/unit/mongo/sync/sync_cmd_count.c
new file mode 100644
index 0000000..2cb8645
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_count.c
@@ -0,0 +1,119 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <sys/socket.h>
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_cmd_count_net_secondary (void)
+{
+ mongo_sync_connection *conn;
+ bson *b;
+ gdouble d;
+
+ skip (!config.secondary_host, 2,
+ "Secondary server not configured");
+
+ conn = mongo_sync_connect (config.secondary_host, config.secondary_port,
+ TRUE);
+ mongo_sync_cmd_is_master (conn);
+ mongo_sync_conn_set_auto_reconnect (conn, TRUE);
+
+ b = bson_new ();
+ bson_append_string (b, "test-name", __FILE__, -1);
+ bson_finish (b);
+
+ d = mongo_sync_cmd_count (conn, config.db, config.coll, b);
+ ok (d > 0,
+ "mongo_sync_cmd_count() works on the secondary too");
+
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (3);
+
+ d = mongo_sync_cmd_count (conn, config.db, config.coll, b);
+ ok (d > 0,
+ "mongo_sync_cmd_count() automatically reconnects");
+
+ bson_free (b);
+ mongo_sync_disconnect (conn);
+
+ endskip;
+}
+
+void
+test_mongo_sync_cmd_count_net (void)
+{
+ mongo_sync_connection *conn;
+ bson *b;
+ gdouble d;
+ gint i;
+
+ begin_network_tests (4);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, TRUE);
+ mongo_sync_conn_set_auto_reconnect (conn, TRUE);
+
+ b = bson_new ();
+ for (i = 0; i < 40; i++)
+ {
+ bson_reset (b);
+ bson_append_string (b, "test-name", __FILE__, -1);
+ bson_append_int32 (b, "seq", i);
+ bson_finish (b);
+
+ mongo_sync_cmd_insert (conn, config.ns, b, NULL);
+ }
+ bson_free (b);
+
+ b = bson_new ();
+ bson_append_string (b, "test-name", __FILE__, -1);
+ bson_finish (b);
+
+ d = mongo_sync_cmd_count (conn, config.db, config.coll, b);
+ ok (d > 0,
+ "mongo_sync_cmd_count() works");
+
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (3);
+
+ d = mongo_sync_cmd_count (conn, config.db, config.coll, b);
+ ok (d > 0,
+ "mongo_sync_cmd_count() automatically reconnects");
+
+ bson_free (b);
+ mongo_sync_disconnect (conn);
+
+ test_mongo_sync_cmd_count_net_secondary ();
+
+ end_network_tests ();
+}
+
+void
+test_mongo_sync_cmd_count (void)
+{
+ mongo_sync_connection *c;
+ bson *b;
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+ b = test_bson_generate_full ();
+
+ ok (mongo_sync_cmd_count (NULL, "test", "db", b) == -1,
+ "mongo_sync_cmd_count() fails with a NULL connection");
+ ok (mongo_sync_cmd_count (c, NULL, "db", b) == -1,
+ "mongo_sync_cmd_count() fails with a NULL db");
+ ok (mongo_sync_cmd_count (c, "test", NULL, b) == -1,
+ "mongo_sync_cmd_count() fails with a NULL collection");
+
+ ok (mongo_sync_cmd_count (c, "test", "db", b) == -1,
+ "mongo_sync_cmd_count() fails with a bogus FD");
+ mongo_sync_conn_set_slaveok (c, TRUE);
+ ok (mongo_sync_cmd_count (c, "test", "db", b) == -1,
+ "mongo_sync_cmd_count() fails with a bogus FD");
+
+ bson_free (b);
+ mongo_sync_disconnect (c);
+
+ test_mongo_sync_cmd_count_net ();
+}
+
+RUN_TEST (9, mongo_sync_cmd_count);
diff --git a/tests/unit/mongo/sync/sync_cmd_create.c b/tests/unit/mongo/sync/sync_cmd_create.c
new file mode 100644
index 0000000..c3334ea
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_create.c
@@ -0,0 +1,78 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <sys/socket.h>
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_cmd_create_net (void)
+{
+ mongo_sync_connection *conn;
+ gchar *cc;
+
+ begin_network_tests (5);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+
+ cc = g_strconcat (config.coll, ".capped", NULL);
+
+ mongo_sync_cmd_drop (conn, config.db, config.coll);
+ mongo_sync_cmd_drop (conn, config.db, cc);
+
+ ok (mongo_sync_cmd_create (conn, config.db, config.coll,
+ MONGO_COLLECTION_DEFAULTS) == TRUE,
+ "mongo_sync_cmd_create() can create normal collections");
+ mongo_sync_cmd_drop (conn, config.db, config.coll);
+
+ ok (mongo_sync_cmd_create (conn, config.db, config.coll,
+ MONGO_COLLECTION_SIZED,
+ (gint64) 64 * 1024 * 10) == TRUE,
+ "mongo_sync_cmd_create() can create pre-allocated collections");
+
+ ok (mongo_sync_cmd_create (conn, config.db, cc,
+ MONGO_COLLECTION_CAPPED, (gint64) -1) == FALSE,
+ "mongo_sync_cmd_create() fails when trying to create a capped "
+ "collection with an invalid size");
+ ok (mongo_sync_cmd_create (conn, config.db, cc,
+ MONGO_COLLECTION_CAPPED_MAX,
+ (gint64) (64 * 1024 * 10), (gint64) -1) == FALSE,
+ "mongo_sync_cmd_create() fails when trying to create a capped "
+ "collection with invalid max.");
+ ok (mongo_sync_cmd_create (conn, config.db, cc,
+ MONGO_COLLECTION_CAPPED_MAX |
+ MONGO_COLLECTION_AUTO_INDEX_ID,
+ (gint64)(64 * 1024 * 10), (gint64) 10) == TRUE,
+ "mongo_sync_cmd_create() can create capped collections");
+
+ mongo_sync_cmd_drop (conn, config.db, cc);
+
+ g_free (cc);
+ mongo_sync_disconnect (conn);
+
+ end_network_tests ();
+}
+
+void
+test_mongo_sync_cmd_create (void)
+{
+ mongo_sync_connection *c;
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+
+ ok (mongo_sync_cmd_create (NULL, "test", "db",
+ MONGO_COLLECTION_DEFAULTS) == FALSE,
+ "mongo_sync_cmd_create() fails with a NULL connection");
+
+ ok (mongo_sync_cmd_create (c, NULL, "db",
+ MONGO_COLLECTION_DEFAULTS) == FALSE,
+ "mongo_sync_cmd_create() fails with a NULL db");
+ ok (mongo_sync_cmd_create (c, "test", NULL,
+ MONGO_COLLECTION_DEFAULTS) == FALSE,
+ "mongo_sync_cmd_create() fails with a NULL collection");
+
+ mongo_sync_disconnect (c);
+
+ test_mongo_sync_cmd_create_net ();
+}
+
+RUN_TEST (8, mongo_sync_cmd_create);
diff --git a/tests/unit/mongo/sync/sync_cmd_custom.c b/tests/unit/mongo/sync/sync_cmd_custom.c
new file mode 100644
index 0000000..1bd3f01
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_custom.c
@@ -0,0 +1,100 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <sys/socket.h>
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_cmd_custom_net_secondary (void)
+{
+ mongo_sync_connection *conn;
+ bson *cmd;
+ mongo_packet *p;
+
+ skip (!config.secondary_host, 1,
+ "Secondary server not configured");
+
+ conn = mongo_sync_connect (config.secondary_host, config.secondary_port,
+ TRUE);
+ cmd = bson_build (BSON_TYPE_INT32, "getnonce", 1,
+ BSON_TYPE_NONE);
+ bson_finish (cmd);
+
+ p = mongo_sync_cmd_custom (conn, config.db, cmd);
+ ok (p != NULL,
+ "mongo_sync_cmd_custom() works on the secondary too");
+ mongo_wire_packet_free (p);
+
+ bson_free (cmd);
+ mongo_sync_disconnect (conn);
+
+ endskip;
+}
+
+void
+test_mongo_sync_cmd_custom_net (void)
+{
+ mongo_sync_connection *conn;
+ bson *cmd;
+ mongo_packet *p;
+
+ begin_network_tests (3);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, TRUE);
+ mongo_sync_cmd_is_master (conn);
+ mongo_sync_conn_set_auto_reconnect (conn, TRUE);
+
+ cmd = bson_build (BSON_TYPE_INT32, "getnonce", 1,
+ BSON_TYPE_NONE);
+ bson_finish (cmd);
+
+ p = mongo_sync_cmd_custom (conn, config.db, cmd);
+ ok (p != NULL,
+ "mongo_sync_cmd_custom() works");
+ mongo_wire_packet_free (p);
+
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (3);
+
+ p = mongo_sync_cmd_custom (conn, config.db, cmd);
+ ok (p != NULL,
+ "mongo_sync_cmd_custom() automatically reconnects");
+ mongo_wire_packet_free (p);
+
+ bson_free (cmd);
+ mongo_sync_disconnect (conn);
+
+ test_mongo_sync_cmd_custom_net_secondary ();
+
+ end_network_tests ();
+}
+
+void
+test_mongo_sync_cmd_custom (void)
+{
+ mongo_sync_connection *c;
+ bson *cmd;
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+ cmd = bson_new ();
+ bson_append_int32 (cmd, "getnonce", 1);
+ bson_finish (cmd);
+
+ ok (mongo_sync_cmd_custom (NULL, "test", cmd) == NULL,
+ "mongo_sync_cmd_custom() fails with a NULL connection");
+ ok (mongo_sync_cmd_custom (c, NULL, cmd) == NULL,
+ "mongo_sync_cmd_custom() fails with a NULL namespace");
+
+ ok (mongo_sync_cmd_custom (c, "test", cmd) == NULL,
+ "mongo_sync_cmd_custom() fails with a bogus FD");
+ mongo_sync_conn_set_slaveok (c, TRUE);
+ ok (mongo_sync_cmd_custom (c, "test", cmd) == NULL,
+ "mongo_sync_cmd_custom() fails with a bogus FD");
+
+ bson_free (cmd);
+ mongo_sync_disconnect (c);
+
+ test_mongo_sync_cmd_custom_net ();
+}
+
+RUN_TEST (7, mongo_sync_cmd_custom);
diff --git a/tests/unit/mongo/sync/sync_cmd_delete.c b/tests/unit/mongo/sync/sync_cmd_delete.c
new file mode 100644
index 0000000..0c20ffe
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_delete.c
@@ -0,0 +1,135 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <sys/socket.h>
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_cmd_delete_net_secondary (void)
+{
+ mongo_sync_connection *conn;
+ bson *b;
+ GList *l;
+
+ skip (!config.secondary_host, 2,
+ "Secondary server not configured");
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port,
+ TRUE);
+
+ b = bson_new ();
+ bson_append_string (b, "unit-test", __FILE__, -1);
+ bson_append_boolean (b, "delete-me", TRUE);
+ bson_finish (b);
+ mongo_sync_cmd_insert (conn, config.ns, b, NULL);
+
+ mongo_sync_disconnect (conn);
+
+ conn = mongo_sync_connect (config.secondary_host, config.secondary_port,
+ TRUE);
+ mongo_sync_conn_set_auto_reconnect (conn, TRUE);
+
+ ok (mongo_sync_cmd_delete (conn, config.ns, 0, b) == TRUE,
+ "mongo_sync_cmd_delete() can reconnect to master");
+ mongo_sync_disconnect (conn);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port,
+ TRUE);
+ mongo_sync_cmd_insert (conn, config.ns, b, NULL);
+ mongo_sync_disconnect (conn);
+
+ conn = mongo_sync_connect (config.secondary_host, config.secondary_port,
+ TRUE);
+ mongo_sync_conn_set_auto_reconnect (conn, TRUE);
+
+ shutdown (conn->super.fd, SHUT_RDWR);
+
+ l = conn->rs.hosts;
+ while (l)
+ {
+ g_free (l->data);
+ l = g_list_delete_link (l, l);
+ }
+ conn->rs.hosts = NULL;
+
+ l = conn->rs.seeds;
+ while (l)
+ {
+ g_free (l->data);
+ l = g_list_delete_link (l, l);
+ }
+ conn->rs.seeds = NULL;
+
+ sleep (3);
+
+ ok (mongo_sync_cmd_delete (conn, config.ns, 0, b) == FALSE,
+ "mongo_sync_cmd_delete() fails if it can't reconnect to master");
+
+ mongo_sync_disconnect (conn);
+ bson_free (b);
+
+ endskip;
+}
+
+void
+test_mongo_sync_cmd_delete_net (void)
+{
+ mongo_sync_connection *conn;
+ bson *b;
+
+ begin_network_tests (4);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, TRUE);
+ mongo_sync_conn_set_auto_reconnect (conn, TRUE);
+
+ b = bson_new ();
+ bson_append_string (b, "unit-test", __FILE__, -1);
+ bson_append_boolean (b, "delete-me", TRUE);
+ bson_finish (b);
+ mongo_sync_cmd_insert (conn, config.ns, b, NULL);
+
+ ok (mongo_sync_cmd_delete (conn, config.ns, 0, b) == TRUE,
+ "mongo_sync_cmd_delete() works");
+
+ mongo_sync_cmd_insert (conn, config.ns, b, NULL);
+
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (3);
+
+ ok (mongo_sync_cmd_delete (conn, config.ns, 0, b) == TRUE,
+ "mongo_sync_cmd_delete() automatically reconnects");
+
+ mongo_sync_disconnect (conn);
+ bson_free (b);
+
+ test_mongo_sync_cmd_delete_net_secondary ();
+
+ end_network_tests ();
+}
+
+void
+test_mongo_sync_cmd_delete (void)
+{
+ mongo_sync_connection *c;
+ bson *b;
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+ b = test_bson_generate_full ();
+
+ ok (mongo_sync_cmd_delete (NULL, "test.ns", 0, b) == FALSE,
+ "mongo_sync_cmd_delete() fails with a NULL connection");
+ ok (mongo_sync_cmd_delete (c, NULL, 0, b) == FALSE,
+ "mongo_sync_cmd_delete() fails with a NULL namespace");
+ ok (mongo_sync_cmd_delete (c, "test.ns", 0, NULL) == FALSE,
+ "mongo_sync_cmd_delete() fails with a NULL selector");
+
+ ok (mongo_sync_cmd_delete (c, "test.ns", 0, b) == FALSE,
+ "mongo_sync_cmd_delete() fails with a bogus FD");
+
+ bson_free (b);
+ mongo_sync_disconnect (c);
+
+ test_mongo_sync_cmd_delete_net ();
+}
+
+RUN_TEST (8, mongo_sync_cmd_delete);
diff --git a/tests/unit/mongo/sync/sync_cmd_drop.c b/tests/unit/mongo/sync/sync_cmd_drop.c
new file mode 100644
index 0000000..c7f9d9f
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_drop.c
@@ -0,0 +1,93 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <sys/socket.h>
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_cmd_drop_net_secondary (void)
+{
+ mongo_sync_connection *conn;
+ bson *b;
+ gboolean ret;
+
+ skip (!config.secondary_host, 1,
+ "Secondary server not configured");
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ b = bson_build (BSON_TYPE_BOOLEAN, "filler", TRUE,
+ BSON_TYPE_NONE);
+ bson_finish (b);
+ mongo_sync_cmd_insert (conn, config.ns, b, NULL);
+ bson_free (b);
+ mongo_sync_disconnect (conn);
+
+ conn = mongo_sync_connect (config.secondary_host, config.secondary_port,
+ TRUE);
+ mongo_sync_cmd_is_master (conn);
+ mongo_sync_conn_set_auto_reconnect (conn, TRUE);
+
+ ret = mongo_sync_cmd_drop (conn, config.db, config.coll);
+ ok (ret && mongo_sync_cmd_is_master (conn),
+ "mongo_sync_cmd_drop() can reconnect to master");
+ mongo_sync_disconnect (conn);
+
+ endskip;
+}
+
+void
+test_mongo_sync_cmd_drop_net (void)
+{
+ mongo_sync_connection *conn;
+ bson *b;
+
+ begin_network_tests (3);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ mongo_sync_conn_set_auto_reconnect (conn, TRUE);
+
+ b = bson_build (BSON_TYPE_BOOLEAN, "filler", TRUE,
+ BSON_TYPE_NONE);
+ bson_finish (b);
+ mongo_sync_cmd_insert (conn, config.ns, b, NULL);
+
+ ok (mongo_sync_cmd_drop (conn, config.db, config.coll) == TRUE,
+ "mongo_sync_cmd_drop() works");
+
+ mongo_sync_cmd_insert (conn, config.ns, b, NULL);
+
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (3);
+
+ ok (mongo_sync_cmd_drop (conn, config.db, config.coll) == TRUE,
+ "mongo_sync_cmd_drop() automatically reconnects");
+
+ bson_free (b);
+ mongo_sync_disconnect (conn);
+
+ test_mongo_sync_cmd_drop_net_secondary ();
+
+ end_network_tests ();
+}
+
+void
+test_mongo_sync_cmd_drop (void)
+{
+ mongo_sync_connection *c;
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+
+ ok (mongo_sync_cmd_drop (NULL, "test", "db") == FALSE,
+ "mongo_sync_cmd_drop() fails with a NULL connection");
+ ok (mongo_sync_cmd_drop (c, NULL, "db") == FALSE,
+ "mongo_sync_cmd_drop() fails with a NULL db");
+
+ ok (mongo_sync_cmd_drop (c, "test", "db") == FALSE,
+ "mongo_sync_cmd_drop() fails with a bogus FD");
+
+ mongo_sync_disconnect (c);
+
+ test_mongo_sync_cmd_drop_net ();
+}
+
+RUN_TEST (6, mongo_sync_cmd_drop);
diff --git a/tests/unit/mongo/sync/sync_cmd_exists.c b/tests/unit/mongo/sync/sync_cmd_exists.c
new file mode 100644
index 0000000..f3c535f
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_exists.c
@@ -0,0 +1,85 @@
+#include "test.h"
+#include "mongo.h"
+
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_cmd_exists_net (void)
+{
+ mongo_sync_connection *conn;
+ gchar *cc, *ns;
+
+ bson *r;
+ bson_cursor *c;
+ const gchar *str = NULL;
+ gboolean capped = FALSE;
+
+ begin_network_tests (4);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+
+ cc = g_strconcat (config.coll, ".capped", NULL);
+
+ mongo_sync_cmd_drop (conn, config.db, config.coll);
+ mongo_sync_cmd_drop (conn, config.db, cc);
+
+ mongo_sync_cmd_create (conn, config.db, config.coll,
+ MONGO_COLLECTION_DEFAULTS);
+ mongo_sync_cmd_create (conn, config.db, cc,
+ MONGO_COLLECTION_CAPPED,
+ (gint64) 64 * 1024 * 10);
+
+ r = mongo_sync_cmd_exists (conn, config.db, config.coll);
+ c = bson_find (r, "name");
+ bson_cursor_get_string (c, &str);
+ is (str, config.ns,
+ "mongo_sync_cmd_exists() works on normal collections");
+ bson_cursor_find (c, "capped");
+ bson_cursor_get_boolean (c, &capped);
+ cmp_ok (capped, "==", FALSE,
+ "mongo_sync_cmd_exists() returned correct info");
+ bson_cursor_free (c);
+ bson_free (r);
+
+ r = mongo_sync_cmd_exists (conn, config.db, cc);
+ ns = g_strconcat (config.db, ".", cc, NULL);
+ c = bson_find (r, "name");
+ bson_cursor_get_string (c, &str);
+ is (str, ns,
+ "mongo_sync_cmd_exists() works on capped collections");
+ bson_cursor_find (c, "capped");
+ bson_cursor_get_boolean (c, &capped);
+ cmp_ok (capped, "==", FALSE,
+ "mongo_sync_cmd_exists() returned correct info");
+ bson_cursor_free (c);
+ g_free (ns);
+ bson_free (r);
+
+ mongo_sync_cmd_drop (conn, config.db, cc);
+
+ g_free (cc);
+ mongo_sync_disconnect (conn);
+
+ end_network_tests ();
+}
+
+void
+test_mongo_sync_cmd_exists (void)
+{
+ mongo_sync_connection *c;
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+
+ ok (mongo_sync_cmd_exists (NULL, "test", "db") == NULL,
+ "mongo_sync_cmd_exists() fails with a NULL connection");
+ ok (mongo_sync_cmd_exists (c, NULL, "db") == NULL,
+ "mongo_sync_cmd_exists() fails with a NULL db");
+ ok (mongo_sync_cmd_exists (c, "test", NULL) == NULL,
+ "mongo_sync_cmd_exists() fails with a NULL collection");
+
+ mongo_sync_disconnect (c);
+
+ test_mongo_sync_cmd_exists_net ();
+}
+
+RUN_TEST (7, mongo_sync_cmd_exists);
diff --git a/tests/unit/mongo/sync/sync_cmd_get_last_error.c b/tests/unit/mongo/sync/sync_cmd_get_last_error.c
new file mode 100644
index 0000000..fef9f78
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_get_last_error.c
@@ -0,0 +1,35 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <errno.h>
+
+void
+test_mongo_sync_cmd_get_last_error (void)
+{
+ mongo_sync_connection *c;
+ gchar *error;
+
+ test_env_setup ();
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+
+ errno = 0;
+ ok (mongo_sync_cmd_get_last_error (NULL, config.db, &error) == FALSE,
+ "mongo_sync_cmd_get_last_error() returns FALSE with a NULL connection");
+ cmp_ok (errno, "==", ENOTCONN,
+ "errno is set to ENOTCONN");
+
+ ok (mongo_sync_cmd_get_last_error (c, NULL, &error) == FALSE,
+ "mongo_sync_cmd_get_last_error() fails with a NULL db");
+
+ errno = 0;
+ ok (mongo_sync_cmd_get_last_error (c, config.db, NULL) == FALSE,
+ "mongo_sync_cmd_get_last_error() fails with a NULL error destination");
+ cmp_ok (errno, "==", EINVAL,
+ "errno is set to EINVAL");
+
+ mongo_sync_disconnect (c);
+ test_env_free ();
+}
+
+RUN_TEST (5, mongo_sync_cmd_get_last_error);
diff --git a/tests/unit/mongo/sync/sync_cmd_get_last_error_full.c b/tests/unit/mongo/sync/sync_cmd_get_last_error_full.c
new file mode 100644
index 0000000..505fd3d
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_get_last_error_full.c
@@ -0,0 +1,35 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <errno.h>
+
+void
+test_mongo_sync_cmd_get_last_error_full (void)
+{
+ mongo_sync_connection *c;
+ bson *error;
+
+ test_env_setup ();
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+
+ errno = 0;
+ ok (mongo_sync_cmd_get_last_error_full (NULL, config.db, &error) == FALSE,
+ "mongo_sync_cmd_get_last_error_full() returns FALSE with a NULL connection");
+ cmp_ok (errno, "==", ENOTCONN,
+ "errno is set to ENOTCONN");
+
+ ok (mongo_sync_cmd_get_last_error_full (c, NULL, &error) == FALSE,
+ "mongo_sync_cmd_get_last_error_full() fails with a NULL db");
+
+ errno = 0;
+ ok (mongo_sync_cmd_get_last_error_full (c, config.db, NULL) == FALSE,
+ "mongo_sync_cmd_get_last_error_full() fails with a NULL error destination");
+ cmp_ok (errno, "==", EINVAL,
+ "errno is set to EINVAL");
+
+ mongo_sync_disconnect (c);
+ test_env_free ();
+}
+
+RUN_TEST (5, mongo_sync_cmd_get_last_error_full);
diff --git a/tests/unit/mongo/sync/sync_cmd_get_more.c b/tests/unit/mongo/sync/sync_cmd_get_more.c
new file mode 100644
index 0000000..18a2f97
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_get_more.c
@@ -0,0 +1,135 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <errno.h>
+#include <sys/socket.h>
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_cmd_get_more_net_secondary (void)
+{
+ mongo_packet *p;
+ mongo_sync_connection *conn;
+ bson *b;
+
+ mongo_reply_packet_header rh;
+ gint64 cid;
+
+ skip (!config.secondary_host, 2,
+ "Secondary server not configured");
+
+ conn = mongo_sync_connect (config.secondary_host, config.secondary_port,
+ TRUE);
+ b = bson_new ();
+ bson_append_string (b, "test-name", __FILE__, -1);
+ bson_finish (b);
+
+ p = mongo_sync_cmd_query (conn, config.ns,
+ MONGO_WIRE_FLAG_QUERY_NO_CURSOR_TIMEOUT,
+ 0, 2, b, NULL);
+ bson_free (b);
+ mongo_wire_reply_packet_get_header (p, &rh);
+ cid = rh.cursor_id;
+ mongo_wire_packet_free (p);
+
+ p = mongo_sync_cmd_get_more (conn, config.db, 3, cid);
+ ok (p != NULL,
+ "mongo_sync_cmd_get_more() works on secondary too");
+ mongo_wire_packet_free (p);
+
+ mongo_sync_reconnect (conn, TRUE);
+
+ p = mongo_sync_cmd_get_more (conn, config.db, 10, cid);
+ ok (p == NULL && errno == EPROTO,
+ "mongo_sync_cmd_get_more() can't jump servers");
+ mongo_wire_packet_free (p);
+
+ mongo_sync_disconnect (conn);
+
+ endskip;
+}
+
+void
+test_mongo_sync_cmd_get_more_net (void)
+{
+ mongo_packet *p;
+ mongo_sync_connection *conn;
+ bson *b;
+ gint i;
+ mongo_reply_packet_header rh;
+ gint64 cid;
+
+ begin_network_tests (4);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, TRUE);
+ mongo_sync_conn_set_auto_reconnect (conn, TRUE);
+
+ b = bson_new ();
+ for (i = 0; i < 40; i++)
+ {
+ bson_reset (b);
+ bson_append_string (b, "test-name", __FILE__, -1);
+ bson_append_int32 (b, "seq", i);
+ bson_finish (b);
+
+ mongo_sync_cmd_insert (conn, config.ns, b, NULL);
+ }
+ bson_free (b);
+
+ b = bson_new ();
+ bson_append_string (b, "test-name", __FILE__, -1);
+ bson_finish (b);
+
+ p = mongo_sync_cmd_query (conn, config.ns,
+ MONGO_WIRE_FLAG_QUERY_NO_CURSOR_TIMEOUT,
+ 0, 2, b, NULL);
+ bson_free (b);
+ mongo_wire_reply_packet_get_header (p, &rh);
+ cid = rh.cursor_id;
+ mongo_wire_packet_free (p);
+
+ p = mongo_sync_cmd_get_more (conn, config.ns, 3, cid);
+ ok (p != NULL,
+ "mongo_sync_cmd_get_more() works");
+ mongo_wire_packet_free (p);
+
+ errno = 0;
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (3);
+
+ p = mongo_sync_cmd_get_more (conn, config.ns, 10, cid);
+ ok (p != NULL,
+ "mongo_sync_cmd_get_more() automatically reconnects");
+ mongo_wire_packet_free (p);
+
+ mongo_sync_disconnect (conn);
+
+ test_mongo_sync_cmd_get_more_net_secondary ();
+
+ end_network_tests ();
+}
+
+void
+test_mongo_sync_cmd_get_more (void)
+{
+ mongo_sync_connection *c;
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+
+ ok (mongo_sync_cmd_get_more (NULL, "test.ns", 1, 1234) == NULL,
+ "mongo_sync_cmd_get_more() fails with a NULL connection");
+ ok (mongo_sync_cmd_get_more (c, NULL, 1, 1234) == NULL,
+ "mongo_sync_cmd_get_more() fails with a NULL namespace");
+
+ ok (mongo_sync_cmd_get_more (c, "test.ns", 1, 1234) == NULL,
+ "mongo_sync_cmd_get_more() fails with a bogus FD");
+ mongo_sync_conn_set_slaveok (c, TRUE);
+ ok (mongo_sync_cmd_get_more (c, "test.ns", 1, 1234) == NULL,
+ "mongo_sync_cmd_get_more() fails with a bogus FD");
+
+ mongo_sync_disconnect (c);
+
+ test_mongo_sync_cmd_get_more_net ();
+}
+
+RUN_TEST (8, mongo_sync_cmd_get_more);
diff --git a/tests/unit/mongo/sync/sync_cmd_index_create.c b/tests/unit/mongo/sync/sync_cmd_index_create.c
new file mode 100644
index 0000000..6603586
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_index_create.c
@@ -0,0 +1,62 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <sys/socket.h>
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_cmd_index_create (void)
+{
+ mongo_sync_connection *c;
+ bson *doc, *indexes, *bad_index;
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+ doc = test_bson_generate_full ();
+ indexes = bson_build (BSON_TYPE_INT32, "sex", 1,
+ BSON_TYPE_DOUBLE, "double", 1.0,
+ BSON_TYPE_BOOLEAN, "TRUE", TRUE,
+ BSON_TYPE_INT64, "print", (gint64)-1,
+ BSON_TYPE_INT32, "zero", 0,
+ BSON_TYPE_NONE);
+ bson_finish (indexes);
+
+ bad_index = bson_build (BSON_TYPE_STRING, "str", "teapot", -1,
+ BSON_TYPE_NONE);
+ bson_finish (bad_index);
+
+ ok (mongo_sync_cmd_index_create (NULL, "test.ns", indexes, 0) == FALSE,
+ "mongo_sync_cmd_index_create() fails with a NULL connection");
+ ok (mongo_sync_cmd_index_create (c, NULL, indexes, 0) == FALSE,
+ "mongo_sync_cmd_index_create() fails with a NULL namespace");
+ ok (mongo_sync_cmd_index_create (c, "test.ns", NULL, 0) == FALSE,
+ "mongo_sync_cmd_index_create() fails with NULL indexes");
+ ok (mongo_sync_cmd_index_create (c, "bogus", indexes, 0) == FALSE,
+ "mongo_sync_cmd_index_create() fails with a bogus namespace");
+ ok (mongo_sync_cmd_index_create (c, "test.ns", indexes, 0) == FALSE,
+ "mongo_sync_cmd_index_create() fails with a bogus FD");
+
+ mongo_sync_disconnect (c);
+
+ begin_network_tests (2);
+
+ c = mongo_sync_connect (config.primary_host, config.primary_port,
+ TRUE);
+ mongo_sync_cmd_insert (c, config.ns, doc, NULL);
+
+ ok (mongo_sync_cmd_index_create(c, config.ns, indexes,
+ MONGO_INDEX_UNIQUE | MONGO_INDEX_DROP_DUPS |
+ MONGO_INDEX_BACKGROUND | MONGO_INDEX_SPARSE),
+ "mongo_sync_cmd_index_create() works");
+
+ ok (mongo_sync_cmd_index_create(c, config.ns, bad_index, 0) == FALSE,
+ "mongo_sync_cmd_index_create() should refuse to work with an invalid index spec");
+
+ mongo_sync_disconnect (c);
+
+ bson_free (doc);
+ bson_free (indexes);
+
+ end_network_tests ();
+}
+
+RUN_TEST (7, mongo_sync_cmd_index_create);
diff --git a/tests/unit/mongo/sync/sync_cmd_index_drop.c b/tests/unit/mongo/sync/sync_cmd_index_drop.c
new file mode 100644
index 0000000..176de6d
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_index_drop.c
@@ -0,0 +1,51 @@
+#include "test.h"
+#include "mongo.h"
+
+void
+test_mongo_sync_cmd_index_drop (void)
+{
+ mongo_sync_connection *c;
+ bson *doc, *indexes;
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+ doc = test_bson_generate_full ();
+ indexes = bson_build (BSON_TYPE_INT32, "sex", 1,
+ BSON_TYPE_DOUBLE, "double", 1.0,
+ BSON_TYPE_BOOLEAN, "TRUE", TRUE,
+ BSON_TYPE_INT64, "print", (gint64)-1,
+ BSON_TYPE_NONE);
+ bson_finish (indexes);
+
+ ok (mongo_sync_cmd_index_drop (NULL, "test.ns", indexes) == FALSE,
+ "mongo_sync_cmd_index_drop() fails with a NULL connection");
+ ok (mongo_sync_cmd_index_drop (c, NULL, indexes) == FALSE,
+ "mongo_sync_cmd_index_drop() fails with a NULL namespace");
+ ok (mongo_sync_cmd_index_drop (c, "test.ns", NULL) == FALSE,
+ "mongo_sync_cmd_index_drop() fails with NULL indexes");
+ ok (mongo_sync_cmd_index_drop (c, "bogus", indexes) == FALSE,
+ "mongo_sync_cmd_index_drop() fails with a bogus namespace");
+ ok (mongo_sync_cmd_index_drop (c, "test.ns", indexes) == FALSE,
+ "mongo_sync_cmd_index_drop() fails with a bogus FD");
+
+ mongo_sync_disconnect (c);
+
+ begin_network_tests (1);
+
+ c = mongo_sync_connect (config.primary_host, config.primary_port,
+ TRUE);
+ mongo_sync_cmd_insert (c, config.ns, doc, NULL);
+
+ mongo_sync_cmd_index_create (c, config.ns, indexes, 0);
+
+ ok (mongo_sync_cmd_index_drop (c, config.ns, indexes) == TRUE,
+ "mongo_sync_cmd_index_drop() works");
+
+ mongo_sync_disconnect (c);
+
+ bson_free (doc);
+ bson_free (indexes);
+
+ end_network_tests ();
+}
+
+RUN_TEST (6, mongo_sync_cmd_index_drop);
diff --git a/tests/unit/mongo/sync/sync_cmd_index_drop_all.c b/tests/unit/mongo/sync/sync_cmd_index_drop_all.c
new file mode 100644
index 0000000..782fd93
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_index_drop_all.c
@@ -0,0 +1,49 @@
+#include "test.h"
+#include "mongo.h"
+
+void
+test_mongo_sync_cmd_index_drop_all (void)
+{
+ mongo_sync_connection *c;
+ bson *doc, *indexes;
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+ doc = test_bson_generate_full ();
+ indexes = bson_build (BSON_TYPE_INT32, "sex", 1,
+ BSON_TYPE_DOUBLE, "double", 1.0,
+ BSON_TYPE_BOOLEAN, "TRUE", TRUE,
+ BSON_TYPE_INT64, "print", (gint64)-1,
+ BSON_TYPE_NONE);
+ bson_finish (indexes);
+
+ ok (mongo_sync_cmd_index_drop_all (NULL, "test.ns") == FALSE,
+ "mongo_sync_cmd_index_drop_all() fails with a NULL connection");
+ ok (mongo_sync_cmd_index_drop_all (c, NULL) == FALSE,
+ "mongo_sync_cmd_index_drop_all() fails with a NULL namespace");
+ ok (mongo_sync_cmd_index_drop_all (c, "bogus") == FALSE,
+ "mongo_sync_cmd_index_drop_all() fails with a bogus namespace");
+ ok (mongo_sync_cmd_index_drop_all (c, "test.ns") == FALSE,
+ "mongo_sync_cmd_index_drop_all() fails with a bogus FD");
+
+ mongo_sync_disconnect (c);
+
+ begin_network_tests (1);
+
+ c = mongo_sync_connect (config.primary_host, config.primary_port,
+ TRUE);
+ mongo_sync_cmd_insert (c, config.ns, doc, NULL);
+
+ mongo_sync_cmd_index_create (c, config.ns, indexes, 0);
+
+ ok (mongo_sync_cmd_index_drop_all (c, config.ns) == TRUE,
+ "mongo_sync_cmd_index_drop_all() works");
+
+ mongo_sync_disconnect (c);
+
+ bson_free (doc);
+ bson_free (indexes);
+
+ end_network_tests ();
+}
+
+RUN_TEST (5, mongo_sync_cmd_index_drop_all);
diff --git a/tests/unit/mongo/sync/sync_cmd_insert.c b/tests/unit/mongo/sync/sync_cmd_insert.c
new file mode 100644
index 0000000..f9a0f6b
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_insert.c
@@ -0,0 +1,78 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <sys/socket.h>
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_cmd_insert (void)
+{
+ mongo_sync_connection *c;
+ bson *b1, *b2;
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+ b1 = test_bson_generate_full ();
+ b2 = test_bson_generate_full ();
+
+ ok (mongo_sync_cmd_insert (NULL, "test.ns", b1, b2, NULL) == FALSE,
+ "mongo_sync_cmd_insert() fails with a NULL connection");
+ ok (mongo_sync_cmd_insert (c, NULL, b1, b2, NULL) == FALSE,
+ "mongo_sync_cmd_insert() fails with a NULL namespace");
+ ok (mongo_sync_cmd_insert (c, "test.ns", NULL) == FALSE,
+ "mongo_sync_cmd_insert() fails with no documents to insert");
+ ok (mongo_sync_cmd_insert (c, "test.ns", b1, b2, NULL) == FALSE,
+ "mongo_sync_cmd_insert() fails with a bogus FD");
+
+ mongo_sync_disconnect (c);
+ bson_free (b1);
+ bson_free (b2);
+
+ begin_network_tests (4);
+
+ b1 = bson_new ();
+ bson_append_string (b1, "sync_cmd_insert", "works", -1);
+ bson_finish (b1);
+ b2 = bson_new ();
+ bson_append_int32 (b2, "int32", 1984);
+ bson_finish (b2);
+
+ c = mongo_sync_connect (config.primary_host, config.primary_port,
+ TRUE);
+ mongo_sync_conn_set_auto_reconnect (c, TRUE);
+
+ ok (mongo_sync_cmd_insert (c, config.ns, b1, b2, NULL) == TRUE,
+ "mongo_sync_cmd_insert() works");
+
+ shutdown (c->super.fd, SHUT_RDWR);
+ sleep (3);
+
+ ok (mongo_sync_cmd_insert (c, config.ns, b1, b2, NULL) == TRUE,
+ "mongo_sync_cmd_insert() automatically reconnects");
+
+ mongo_sync_disconnect (c);
+
+ /*
+ * Tests involving a secondary
+ */
+ skip (!config.secondary_host, 2, "Secondary host not set up");
+
+ c = mongo_sync_connect (config.secondary_host, config.secondary_port,
+ TRUE);
+ mongo_sync_conn_set_auto_reconnect (c, TRUE);
+
+ ok (c && mongo_sync_cmd_is_master (c) == FALSE,
+ "Connected to a secondary");
+
+ ok (mongo_sync_cmd_insert (c, config.ns, b1, b2, NULL) == TRUE,
+ "mongo_sync_cmd_insert() automatically reconnects to master");
+ mongo_sync_disconnect (c);
+
+ endskip;
+
+ bson_free (b1);
+ bson_free (b2);
+
+ end_network_tests ();
+}
+
+RUN_TEST (8, mongo_sync_cmd_insert);
diff --git a/tests/unit/mongo/sync/sync_cmd_insert_n.c b/tests/unit/mongo/sync/sync_cmd_insert_n.c
new file mode 100644
index 0000000..9281c17
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_insert_n.c
@@ -0,0 +1,100 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <sys/socket.h>
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_cmd_insert_n (void)
+{
+ mongo_sync_connection *c;
+ bson *b1, *b2, *b3;
+ const bson *docs[10];
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+ b1 = test_bson_generate_full ();
+ b2 = test_bson_generate_full ();
+ b3 = bson_new ();
+
+ docs[0] = b1;
+ docs[1] = b2;
+ docs[2] = b3;
+ docs[3] = NULL;
+ docs[4] = b1;
+
+ ok (mongo_sync_cmd_insert_n (NULL, "test.ns", 3, docs) == FALSE,
+ "mongo_sync_cmd_insert_n() fails with a NULL connection");
+ ok (mongo_sync_cmd_insert_n (c, NULL, 3, docs) == FALSE,
+ "mongo_sync_cmd_insert_n() fails with a NULL namespace");
+ ok (mongo_sync_cmd_insert_n (c, "test.ns", 0, docs) == FALSE,
+ "mongo_sync_cmd_insert_n() fails with no documents to insert");
+ ok (mongo_sync_cmd_insert_n (c, "test.ns", 3, NULL) == FALSE,
+ "mongo_sync_cmd_insert_n() fails with no documents to insert");
+ ok (mongo_sync_cmd_insert_n (c, "test.ns", 3, docs) == FALSE,
+ "mongo_sync_cmd_insert_n() fails when the array contains an "
+ "unfinished document");
+ bson_finish (b3);
+ ok (mongo_sync_cmd_insert_n (c, "test.ns", 5, docs) == FALSE,
+ "mongo_sync_cmd_insert_n() fails when the array contains a "
+ "NULL document");
+ ok (mongo_sync_cmd_insert_n (c, "test.ns", 3, docs) == FALSE,
+ "mongo_sync_cmd_insert_n() fails with a bogus FD");
+
+ mongo_sync_disconnect (c);
+ bson_free (b1);
+ bson_free (b2);
+ bson_free (b3);
+
+ begin_network_tests (4);
+
+ b1 = bson_new ();
+ bson_append_string (b2, "sync_cmd_insert_n", "works", -1);
+ bson_finish (b1);
+
+ b2 = bson_new ();
+ bson_append_int32 (b2, "int32", 1984);
+ bson_finish (b2);
+
+ docs[0] = b1;
+ docs[1] = b2;
+
+ c = mongo_sync_connect (config.primary_host, config.primary_port,
+ TRUE);
+ mongo_sync_conn_set_auto_reconnect (c, TRUE);
+
+ ok (mongo_sync_cmd_insert_n (c, config.ns, 2, docs) == TRUE,
+ "mongo_sync_cmd_insert_n() works");
+
+ shutdown (c->super.fd, SHUT_RDWR);
+ sleep (3);
+
+ ok (mongo_sync_cmd_insert_n (c, config.ns, 2, docs) == TRUE,
+ "mongo_sync_cmd_insert_n() automatically reconnects");
+
+ mongo_sync_disconnect (c);
+
+ /*
+ * Tests involving a secondary
+ */
+ skip (!config.secondary_host, 2, "Secondary host not set up");
+
+ c = mongo_sync_connect (config.secondary_host, config.secondary_port,
+ TRUE);
+ mongo_sync_conn_set_auto_reconnect (c, TRUE);
+
+ ok (c && mongo_sync_cmd_is_master (c) == FALSE,
+ "Connected to a secondary");
+
+ ok (mongo_sync_cmd_insert_n (c, config.ns, 2, docs) == TRUE,
+ "mongo_sync_cmd_insert_n() automatically reconnects to master");
+ mongo_sync_disconnect (c);
+
+ endskip;
+
+ bson_free (b1);
+ bson_free (b2);
+
+ end_network_tests ();
+}
+
+RUN_TEST (11, mongo_sync_cmd_insert_n);
diff --git a/tests/unit/mongo/sync/sync_cmd_is_master.c b/tests/unit/mongo/sync/sync_cmd_is_master.c
new file mode 100644
index 0000000..6fa8bb4
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_is_master.c
@@ -0,0 +1,65 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <errno.h>
+
+void
+test_mongo_sync_cmd_is_master_net_secondary (void)
+{
+ mongo_sync_connection *conn;
+
+ skip (!config.secondary_host, 1,
+ "Secondary server not configured");
+
+ errno = 0;
+ conn = mongo_sync_connect (config.secondary_host, config.secondary_port,
+ TRUE);
+ ok (mongo_sync_cmd_is_master (conn) == FALSE && errno == 0,
+ "mongo_sync_cmd_is_master() works correctly on a secondary");
+ mongo_sync_disconnect (conn);
+
+ endskip;
+}
+
+void
+test_mongo_sync_cmd_is_master_net (void)
+{
+ mongo_sync_connection *conn;
+
+ begin_network_tests (2);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, TRUE);
+ ok (mongo_sync_cmd_is_master (conn) == TRUE,
+ "mongo_sync_cmd_is_master() works");
+ mongo_sync_disconnect (conn);
+
+ test_mongo_sync_cmd_is_master_net_secondary ();
+
+ end_network_tests ();
+}
+
+void
+test_mongo_sync_cmd_is_master (void)
+{
+ mongo_sync_connection *c;
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+
+ errno = 0;
+ ok (mongo_sync_cmd_is_master (NULL) == FALSE,
+ "mongo_sync_cmd_is_master fails with a NULL connection");
+ cmp_ok (errno, "==", ENOTCONN,
+ "errno is set to ENOTCONN");
+
+ errno = 0;
+ ok (mongo_sync_cmd_is_master (c) == FALSE,
+ "mongo_sync_cmd_is_master() works");
+ cmp_ok (errno, "!=", 0,
+ "errno is not 0");
+
+ mongo_sync_disconnect (c);
+
+ test_mongo_sync_cmd_is_master_net ();
+}
+
+RUN_TEST (6, mongo_sync_cmd_is_master);
diff --git a/tests/unit/mongo/sync/sync_cmd_kill_cursors.c b/tests/unit/mongo/sync/sync_cmd_kill_cursors.c
new file mode 100644
index 0000000..c23a5d8
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_kill_cursors.c
@@ -0,0 +1,123 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <sys/socket.h>
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_cmd_kill_cursors_net_secondary (void)
+{
+ mongo_packet *p;
+ mongo_sync_connection *conn;
+ bson *b;
+
+ mongo_reply_packet_header rh;
+ gint64 cid;
+
+ skip (!config.secondary_host, 1,
+ "Secondary server not configured");
+
+ conn = mongo_sync_connect (config.secondary_host, config.secondary_port,
+ TRUE);
+ b = bson_new ();
+ bson_append_string (b, "test-name", __FILE__, -1);
+ bson_finish (b);
+
+ p = mongo_sync_cmd_query (conn, config.ns,
+ MONGO_WIRE_FLAG_QUERY_NO_CURSOR_TIMEOUT,
+ 0, 2, b, NULL);
+ bson_free (b);
+ mongo_wire_reply_packet_get_header (p, &rh);
+ cid = rh.cursor_id;
+ mongo_wire_packet_free (p);
+
+ ok (mongo_sync_cmd_kill_cursors (conn, 1, cid) == TRUE,
+ "mongo_sync_cmd_kill_cursors() works on secondary too");
+
+ mongo_sync_disconnect (conn);
+
+ endskip;
+}
+
+void
+test_mongo_sync_cmd_kill_cursors_net (void)
+{
+ mongo_packet *p;
+ mongo_sync_connection *conn;
+ bson *b;
+ gint i;
+ mongo_reply_packet_header rh;
+ gint64 cid;
+
+ begin_network_tests (3);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, TRUE);
+ mongo_sync_conn_set_auto_reconnect (conn, TRUE);
+
+ b = bson_new ();
+ for (i = 0; i < 40; i++)
+ {
+ bson_reset (b);
+ bson_append_string (b, "test-name", __FILE__, -1);
+ bson_append_int32 (b, "seq", i);
+ bson_finish (b);
+
+ mongo_sync_cmd_insert (conn, config.ns, b, NULL);
+ }
+ bson_free (b);
+
+ b = bson_new ();
+ bson_append_string (b, "test-name", __FILE__, -1);
+ bson_finish (b);
+
+ p = mongo_sync_cmd_query (conn, config.ns,
+ MONGO_WIRE_FLAG_QUERY_NO_CURSOR_TIMEOUT,
+ 0, 2, b, NULL);
+ mongo_wire_reply_packet_get_header (p, &rh);
+ cid = rh.cursor_id;
+ mongo_wire_packet_free (p);
+
+ ok (mongo_sync_cmd_kill_cursors (conn, 1, cid) == TRUE,
+ "mongo_sync_kill_cursors() works");
+
+ p = mongo_sync_cmd_query (conn, config.ns,
+ MONGO_WIRE_FLAG_QUERY_NO_CURSOR_TIMEOUT,
+ 0, 2, b, NULL);
+ bson_free (b);
+ mongo_wire_reply_packet_get_header (p, &rh);
+ cid = rh.cursor_id;
+ mongo_wire_packet_free (p);
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (3);
+
+ ok (mongo_sync_cmd_kill_cursors (conn, 1, cid) == TRUE,
+ "mongo_sync_cmd_kill_cursors() automatically reconnects");
+
+ mongo_sync_disconnect (conn);
+
+ test_mongo_sync_cmd_kill_cursors_net_secondary ();
+
+ end_network_tests ();
+}
+
+void
+test_mongo_sync_cmd_kill_cursors (void)
+{
+ mongo_sync_connection *c;
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+
+ ok (mongo_sync_cmd_kill_cursors (NULL, 1, (gint64)1234) == FALSE,
+ "mongo_sync_cmd_kill_cursors() fails with a NULL connection");
+ ok (mongo_sync_cmd_kill_cursors (c, 0, (gint64)1234) == FALSE,
+ "mongo_sync_cmd_kill_cursors() fails with a negative number of cursors");
+
+ ok (mongo_sync_cmd_kill_cursors (c, 1, (gint64)1234) == FALSE,
+ "mongo_sync_cmd_kill_cursors() fails with a bogus FD");
+
+ mongo_sync_disconnect (c);
+
+ test_mongo_sync_cmd_kill_cursors_net ();
+}
+
+RUN_TEST (6, mongo_sync_cmd_kill_cursors);
diff --git a/tests/unit/mongo/sync/sync_cmd_ping.c b/tests/unit/mongo/sync/sync_cmd_ping.c
new file mode 100644
index 0000000..51a8aaf
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_ping.c
@@ -0,0 +1,81 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <errno.h>
+#include <sys/socket.h>
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_cmd_ping_net_secondary (void)
+{
+ mongo_sync_connection *c;
+
+ skip (!config.secondary_host, 2,
+ "Secondary server not configured");
+
+ c = mongo_sync_connect (config.secondary_host, config.secondary_port, TRUE);
+
+ ok (mongo_sync_cmd_ping (c) == TRUE,
+ "mongo_sync_cmd_ping() works");
+
+ shutdown (c->super.fd, SHUT_RDWR);
+ sleep (3);
+
+ ok (mongo_sync_cmd_ping (c) == FALSE,
+ "mongo_sync_cmd_ping() returns FALSE when not connected");
+
+ mongo_sync_disconnect (c);
+
+ endskip;
+}
+
+void
+test_mongo_sync_cmd_ping_net (void)
+{
+ mongo_sync_connection *c;
+
+ begin_network_tests (4);
+
+ c = mongo_sync_connect (config.primary_host, config.primary_port, TRUE);
+
+ ok (mongo_sync_cmd_ping (c) == TRUE,
+ "mongo_sync_cmd_ping() works");
+
+ shutdown (c->super.fd, SHUT_RDWR);
+ sleep (3);
+
+ ok (mongo_sync_cmd_ping (c) == FALSE,
+ "mongo_sync_cmd_ping() returns FALSE when not connected");
+
+ mongo_sync_disconnect (c);
+
+ test_mongo_sync_cmd_ping_net_secondary ();
+
+ end_network_tests ();
+}
+
+void
+test_mongo_sync_cmd_ping (void)
+{
+ mongo_sync_connection *c;
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+
+ errno = 0;
+ ok (mongo_sync_cmd_ping (NULL) == FALSE,
+ "mongo_sync_cmd_ping(NULL) returns FALSE");
+ cmp_ok (errno, "==", ENOTCONN,
+ "errno is set to ENOTCONN");
+
+ errno = 0;
+ ok (mongo_sync_cmd_ping (c) == FALSE,
+ "Pinging a bogus connection fails");
+ cmp_ok (errno, "!=", 0,
+ "errno is not 0");
+
+ mongo_sync_disconnect (c);
+
+ test_mongo_sync_cmd_ping_net ();
+}
+
+RUN_TEST (8, mongo_sync_cmd_ping);
diff --git a/tests/unit/mongo/sync/sync_cmd_query.c b/tests/unit/mongo/sync/sync_cmd_query.c
new file mode 100644
index 0000000..da7c693
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_query.c
@@ -0,0 +1,125 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <errno.h>
+#include <sys/socket.h>
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_cmd_query (void)
+{
+ mongo_packet *p;
+ mongo_sync_connection *c;
+ bson *q, *s;
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+ q = test_bson_generate_full ();
+ s = test_bson_generate_full ();
+
+ ok (mongo_sync_cmd_query (NULL, "test.ns", 0, 0, 1, q, s) == NULL,
+ "mongo_sync_cmd_query() fails with a NULL connection");
+ ok (mongo_sync_cmd_query (c, NULL, 0, 0, 1, q, s) == NULL,
+ "mongo_sync_cmd_query() fails with a NULL namespace");
+ ok (mongo_sync_cmd_query (c, "test.ns", 0, 0, 1, NULL, s) == NULL,
+ "mongo_sync_cmd_query() fails with a NULL query");
+
+ ok (mongo_sync_cmd_query (c, "test.ns", 0, 0, 1, q, s) == NULL,
+ "mongo_sync_cmd_query() fails with a bogus FD");
+ mongo_sync_conn_set_slaveok (c, TRUE);
+ ok (mongo_sync_cmd_query (c, "test.ns", 0, 0, 1, q, s) == NULL,
+ "mongo_sync_cmd_query() fails with a bogus FD");
+
+ mongo_sync_disconnect (c);
+
+ bson_free (q);
+ bson_free (s);
+
+ begin_network_tests (7);
+
+ q = bson_new ();
+ bson_append_boolean (q, "sync_cmd_query_test", TRUE);
+ bson_finish (q);
+
+ s = bson_new ();
+ bson_append_boolean (s, "sync_cmd_query_test", FALSE);
+ bson_finish (s);
+
+ c = mongo_sync_connect (config.primary_host, config.primary_port, TRUE);
+ mongo_sync_conn_set_auto_reconnect (c, TRUE);
+ mongo_sync_cmd_insert (c, config.ns, q, NULL);
+
+ p = mongo_sync_cmd_query (c, config.ns, 0, 0, 1, q, NULL);
+ ok (p != NULL,
+ "mongo_sync_cmd_query() works");
+ mongo_wire_packet_free (p);
+
+ errno = 0;
+ p = mongo_sync_cmd_query (c, config.ns, 0, 0, 1, s, NULL);
+ ok (p == NULL && errno == ENOENT,
+ "mongo_sync_cmd_query() sets errno to ENOENT when there's "
+ "nothing to return");
+ mongo_wire_packet_free (p);
+
+ shutdown (c->super.fd, SHUT_RDWR);
+ sleep (3);
+
+ p = mongo_sync_cmd_query (c, config.ns, 0, 0, 1, q, NULL);
+ ok (p != NULL,
+ "mongo_sync_cmd_query() automatically reconnects");
+ mongo_wire_packet_free (p);
+
+ mongo_sync_disconnect (c);
+
+ /*
+ * Test request/response pairing, by sending a crafted query first,
+ * and another, without reading the response for the first before
+ * that.
+ */
+ c = mongo_sync_connect (config.primary_host, config.primary_port, TRUE);
+ p = mongo_wire_cmd_query (12345, config.ns, MONGO_WIRE_FLAG_QUERY_SLAVE_OK,
+ 0, 1, s, NULL);
+ mongo_packet_send ((mongo_connection *)c, p);
+ mongo_wire_packet_free (p);
+
+ errno = 0;
+ p = mongo_sync_cmd_query (c, config.ns, 0, 0, 1, s, NULL);
+ ok (p == NULL && errno == EPROTO,
+ "mongo_sync_cmd_query() fails if the reply is not a response to "
+ "the current query");
+ mongo_wire_packet_free (p);
+
+ mongo_sync_disconnect (c);
+
+ /*
+ * Tests involving a secondary
+ */
+ skip (!config.secondary_host, 3, "Secondary host not set up");
+
+ c = mongo_sync_connect (config.secondary_host, config.secondary_port, TRUE);
+ mongo_sync_conn_set_auto_reconnect (c, TRUE);
+
+ ok (c && mongo_sync_cmd_is_master (c) == FALSE,
+ "Connected to a secondary");
+ p = mongo_sync_cmd_query (c, config.ns, 0, 0, 1, q, NULL);
+ ok (p != NULL,
+ "mongo_sync_cmd_query() works on secondary");
+ mongo_wire_packet_free (p);
+
+ mongo_sync_conn_set_slaveok (c, FALSE);
+
+ p = mongo_sync_cmd_query (c, config.ns, 0, 0, 1, q, NULL);
+ ok (p != NULL && mongo_sync_cmd_is_master (c) == TRUE,
+ "mongo_sync_cmd_query() can resync to master");
+ mongo_wire_packet_free (p);
+
+ mongo_sync_disconnect (c);
+
+ endskip;
+
+ bson_free (q);
+ bson_free (s);
+
+ end_network_tests ();
+}
+
+RUN_TEST (12, mongo_sync_cmd_query);
diff --git a/tests/unit/mongo/sync/sync_cmd_reset_error.c b/tests/unit/mongo/sync/sync_cmd_reset_error.c
new file mode 100644
index 0000000..8f92fcf
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_reset_error.c
@@ -0,0 +1,31 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <errno.h>
+
+void
+test_mongo_sync_cmd_reset_error (void)
+{
+ mongo_sync_connection *c;
+
+ test_env_setup ();
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+
+ errno = 0;
+ ok (mongo_sync_cmd_reset_error (NULL, config.db) == FALSE,
+ "mongo_sync_cmd_reset_error() fails with a NULL connection");
+ cmp_ok (errno, "==", ENOTCONN,
+ "errno is set to ENOTCONN");
+
+ ok (mongo_sync_cmd_reset_error (c, NULL) == FALSE,
+ "mongo_sync_cmd_reset_error() fails with a NULL db");
+
+ ok (mongo_sync_cmd_reset_error (c, config.db) == FALSE,
+ "mongo_sync_cmd_reset_error() fails with a bogus FD");
+
+ mongo_sync_disconnect (c);
+ test_env_free ();
+}
+
+RUN_TEST (4, mongo_sync_cmd_reset_error);
diff --git a/tests/unit/mongo/sync/sync_cmd_update.c b/tests/unit/mongo/sync/sync_cmd_update.c
new file mode 100644
index 0000000..21b981f
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_update.c
@@ -0,0 +1,97 @@
+#include "test.h"
+#include "mongo.h"
+
+#include "libmongo-private.h"
+
+#include <sys/socket.h>
+
+void
+test_mongo_sync_cmd_update (void)
+{
+ mongo_sync_connection *c;
+ bson *sel, *upd;
+ guint8 *oid;
+
+ mongo_util_oid_init (0);
+
+ sel = bson_new ();
+ oid = mongo_util_oid_new (0);
+ bson_append_oid (sel, "_id", oid);
+ g_free (oid);
+ bson_finish (sel);
+
+ upd = test_bson_generate_full ();
+ c = test_make_fake_sync_conn (-1, FALSE);
+
+ ok (mongo_sync_cmd_update (NULL, "test.ns", 0, sel, upd) == FALSE,
+ "mongo_sync_cmd_update() fails with a NULL connection");
+ ok (mongo_sync_cmd_update (c, NULL, 0, sel, upd) == FALSE,
+ "mongo_sync_cmd_update() fails with a NULL namespace");
+ ok (mongo_sync_cmd_update (c, "test.ns", 0, NULL, upd) == FALSE,
+ "mongo_sync_cmd_update() fails with a NULL selector");
+ ok (mongo_sync_cmd_update (c, "test.ns", 0, sel, NULL) == FALSE,
+ "mongo_sync_cmd_update() fails with a NULL update");
+
+ ok (mongo_sync_cmd_update (c, "test.ns", 0, sel, upd) == FALSE,
+ "mongo_sync_cmd_update() fails with a bogus FD");
+
+ mongo_sync_disconnect (c);
+ bson_free (sel);
+ bson_free (upd);
+
+ begin_network_tests (4);
+
+ sel = bson_new ();
+ oid = mongo_util_oid_new (1);
+ bson_append_oid (sel, "_id", oid);
+ g_free (oid);
+ bson_finish (sel);
+
+ upd = bson_new ();
+ oid = mongo_util_oid_new (1);
+ bson_append_oid (upd, "_id", oid);
+ g_free (oid);
+ bson_finish (upd);
+
+ c = mongo_sync_connect (config.primary_host, config.primary_port,
+ FALSE);
+ mongo_sync_conn_set_auto_reconnect (c, TRUE);
+
+ ok (mongo_sync_cmd_update (c, config.ns,
+ MONGO_WIRE_FLAG_UPDATE_UPSERT, sel, upd) == TRUE,
+ "mongo_sync_cmd_update() works");
+
+ shutdown (c->super.fd, SHUT_RDWR);
+ sleep (3);
+
+ ok (mongo_sync_cmd_update (c, config.ns,
+ MONGO_WIRE_FLAG_UPDATE_UPSERT, sel, upd) == TRUE,
+ "mongo_sync_cmd_update() automatically reconnects");
+
+ mongo_sync_disconnect (c);
+
+ /*
+ * Tests involving a secondary
+ */
+ skip (!config.secondary_host, 2,
+ "Secondary host not set up");
+
+ c = mongo_sync_connect (config.secondary_host, config.secondary_port,
+ TRUE);
+ mongo_sync_conn_set_auto_reconnect (c, TRUE);
+
+ ok (mongo_sync_cmd_is_master (c) == FALSE,
+ "Connected to a secondary");
+
+ ok (mongo_sync_cmd_update (c, config.ns,
+ MONGO_WIRE_FLAG_UPDATE_UPSERT, sel, upd) == TRUE,
+ "mongo_sync_cmd_update() automatically reconnects to master");
+ mongo_sync_disconnect (c);
+ endskip;
+
+ bson_free (sel);
+ bson_free (upd);
+ end_network_tests ();
+}
+
+RUN_TEST (9, mongo_sync_cmd_update);
diff --git a/tests/unit/mongo/sync/sync_cmd_user_add.c b/tests/unit/mongo/sync/sync_cmd_user_add.c
new file mode 100644
index 0000000..9cdc542
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_user_add.c
@@ -0,0 +1,95 @@
+#include "test.h"
+#include "mongo.h"
+#include "config.h"
+
+#include <errno.h>
+#include <sys/socket.h>
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_cmd_user_add_net_secondary (void)
+{
+ mongo_sync_connection *c;
+ gboolean ret;
+
+ skip (!config.secondary_host, 1,
+ "Secondary server not configured");
+
+ c = mongo_sync_connect (config.secondary_host, config.secondary_port, TRUE);
+ mongo_sync_conn_set_auto_reconnect (c, TRUE);
+
+ ret = mongo_sync_cmd_user_add (c, config.db, "test", "s3kr1+");
+ ok (ret && mongo_sync_cmd_is_master (c),
+ "mongo_sync_cmd_user_add() automatically reconnects to master");
+
+ mongo_sync_disconnect (c);
+
+ endskip;
+}
+
+void
+test_mongo_sync_cmd_user_add_net (void)
+{
+ mongo_sync_connection *c;
+
+ begin_network_tests (3);
+
+ c = mongo_sync_connect (config.primary_host, config.primary_port, TRUE);
+ mongo_sync_conn_set_auto_reconnect (c, TRUE);
+
+ ok (mongo_sync_cmd_user_add (c, config.db, "test", "s3kr1+") == TRUE,
+ "mongo_sync_cmd_user_add() works");
+
+ shutdown (c->super.fd, SHUT_RDWR);
+ sleep (3);
+
+ ok (mongo_sync_cmd_user_add (c, config.db, "test", "s3kr1+") == TRUE,
+ "mongo_sync_cmd_user_add() automatically reconnects");
+
+ mongo_sync_disconnect (c);
+
+ test_mongo_sync_cmd_user_add_net_secondary ();
+
+ end_network_tests ();
+}
+
+void
+test_mongo_sync_cmd_user_add (void)
+{
+ mongo_sync_connection *c;
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+
+ errno = 0;
+ ok (mongo_sync_cmd_user_add (NULL, "test", "test", "s3kr1+") == FALSE,
+ "mongo_sync_cmd_user_add() fails with a NULL connection");
+ cmp_ok (errno, "==", ENOTCONN,
+ "errno is set to ENOTCONN");
+
+ errno = 0;
+ ok (mongo_sync_cmd_user_add (c, NULL, "test", "s3kr1+") == FALSE,
+ "mongo_sync_cmd_user_add() fails with a NULL db");
+ cmp_ok (errno, "==", EINVAL,
+ "errno is set to EINVAL");
+
+ errno = 0;
+ ok (mongo_sync_cmd_user_add (c, "test", NULL, "s3kr1+") == FALSE,
+ "mongo_sync_cmd_user_add() fails with a NULL user");
+ cmp_ok (errno, "==", EINVAL,
+ "errno is set to EINVAL");
+
+ errno = 0;
+ ok (mongo_sync_cmd_user_add (c, "test", "test", NULL) == FALSE,
+ "mongo_sync_cmd_user_add() fails with a NULL password");
+ cmp_ok (errno, "==", EINVAL,
+ "errno is set to EINVAL");
+
+ ok (mongo_sync_cmd_user_add (c, "test", "test", "s3kr1+") == FALSE,
+ "mongo_sync_cmd_user_add() fails with a bogus FD");
+
+ mongo_sync_disconnect (c);
+
+ test_mongo_sync_cmd_user_add_net ();
+}
+
+RUN_TEST (12, mongo_sync_cmd_user_add);
diff --git a/tests/unit/mongo/sync/sync_cmd_user_add_with_roles.c b/tests/unit/mongo/sync/sync_cmd_user_add_with_roles.c
new file mode 100644
index 0000000..04bb842
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_user_add_with_roles.c
@@ -0,0 +1,89 @@
+#include "test.h"
+#include "mongo.h"
+#include "config.h"
+
+#include <errno.h>
+#include <sys/socket.h>
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_cmd_user_add_with_roles_net (const bson *roles)
+{
+ mongo_sync_connection *c;
+
+ begin_network_tests (2);
+
+ c = mongo_sync_connect (config.primary_host, config.primary_port, TRUE);
+ mongo_sync_conn_set_auto_reconnect (c, TRUE);
+
+ ok (mongo_sync_cmd_user_add_with_roles (c, config.db,
+ "test", "s3kr1+", roles) == TRUE,
+ "mongo_sync_cmd_user_add_with_roles() works");
+
+ shutdown (c->super.fd, SHUT_RDWR);
+ sleep (3);
+
+ ok (mongo_sync_cmd_user_add_with_roles (c, config.db,
+ "test", "s3kr1+", roles) == TRUE,
+ "mongo_sync_cmd_user_add_with_roles() automatically reconnects");
+
+ mongo_sync_disconnect (c);
+
+ end_network_tests ();
+}
+
+void
+test_mongo_sync_cmd_user_add_with_roles (void)
+{
+ mongo_sync_connection *c;
+ bson *roles = bson_build (BSON_TYPE_STRING, "0", "readWrite", -1,
+ BSON_TYPE_NONE);
+
+ bson_finish (roles);
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+
+ errno = 0;
+ ok (mongo_sync_cmd_user_add_with_roles (NULL, "test",
+ "test", "s3kr1+", roles) == FALSE,
+ "mongo_sync_cmd_user_add_with_roles() fails with a NULL connection");
+ cmp_ok (errno, "==", ENOTCONN,
+ "errno is set to ENOTCONN");
+
+ errno = 0;
+ ok (mongo_sync_cmd_user_add_with_roles (c, NULL,
+ "test", "s3kr1+", roles) == FALSE,
+ "mongo_sync_cmd_user_add_with_roles() fails with a NULL db");
+ cmp_ok (errno, "==", EINVAL,
+ "errno is set to EINVAL");
+
+ errno = 0;
+ ok (mongo_sync_cmd_user_add_with_roles (c, "test",
+ NULL, "s3kr1+", roles) == FALSE,
+ "mongo_sync_cmd_user_add_with_roles() fails with a NULL user");
+ cmp_ok (errno, "==", EINVAL,
+ "errno is set to EINVAL");
+
+ errno = 0;
+ ok (mongo_sync_cmd_user_add_with_roles (c, "test",
+ "test", NULL, roles) == FALSE,
+ "mongo_sync_cmd_user_add_with_roles() fails with a NULL password");
+ cmp_ok (errno, "==", EINVAL,
+ "errno is set to EINVAL");
+
+ ok (mongo_sync_cmd_user_add_with_roles (c, "test",
+ "test", "s3kr1+", NULL) == FALSE,
+ "mongo_sync_cmd_user_add() fails with a bogus FD and empty roles");
+
+ ok (mongo_sync_cmd_user_add_with_roles (c, "test",
+ "test", "s3kr1+", roles) == FALSE,
+ "mongo_sync_cmd_user_add() fails with a bogus FD");
+
+ mongo_sync_disconnect (c);
+
+ test_mongo_sync_cmd_user_add_with_roles_net (roles);
+
+ bson_free (roles);
+}
+
+RUN_TEST (12, mongo_sync_cmd_user_add_with_roles);
diff --git a/tests/unit/mongo/sync/sync_cmd_user_remove.c b/tests/unit/mongo/sync/sync_cmd_user_remove.c
new file mode 100644
index 0000000..dc66063
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_cmd_user_remove.c
@@ -0,0 +1,92 @@
+#include "test.h"
+#include "mongo.h"
+#include "config.h"
+
+#include <errno.h>
+#include <sys/socket.h>
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_cmd_user_remove_net_secondary (void)
+{
+ mongo_sync_connection *c;
+ gboolean ret;
+
+ skip (!config.secondary_host, 1,
+ "Secondary server not configured");
+
+ c = mongo_sync_connect (config.secondary_host, config.secondary_port, TRUE);
+ mongo_sync_conn_set_auto_reconnect (c, TRUE);
+
+ mongo_sync_cmd_user_add (c, config.db, "test", "s3kr1+");
+ ret = mongo_sync_cmd_user_remove (c, config.db, "test");
+ ok (ret && mongo_sync_cmd_is_master (c),
+ "mongo_sync_cmd_user_remove() automatically reconnects to master");
+
+ mongo_sync_disconnect (c);
+
+ endskip;
+}
+
+void
+test_mongo_sync_cmd_user_remove_net (void)
+{
+ mongo_sync_connection *c;
+
+ begin_network_tests (3);
+
+ c = mongo_sync_connect (config.primary_host, config.primary_port, TRUE);
+ mongo_sync_conn_set_auto_reconnect (c, TRUE);
+
+ mongo_sync_cmd_user_add (c, config.db, "test", "s3kr1+");
+ ok (mongo_sync_cmd_user_remove (c, config.db, "test") == TRUE,
+ "mongo_sync_cmd_user_remove() works");
+
+ mongo_sync_cmd_user_add (c, config.db, "test", "s3kr1+");
+ shutdown (c->super.fd, SHUT_RDWR);
+ sleep (3);
+
+ ok (mongo_sync_cmd_user_remove (c, config.db, "test") == TRUE,
+ "mongo_sync_cmd_user_remove() automatically reconnects");
+
+ mongo_sync_disconnect (c);
+
+ test_mongo_sync_cmd_user_remove_net_secondary ();
+
+ end_network_tests ();
+}
+
+void
+test_mongo_sync_cmd_user_remove (void)
+{
+ mongo_sync_connection *c;
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+
+ errno = 0;
+ ok (mongo_sync_cmd_user_remove (NULL, "test", "test") == FALSE,
+ "mongo_sync_cmd_user_remove() fails with a NULL connection");
+ cmp_ok (errno, "==", ENOTCONN,
+ "errno is set to ENOTCONN");
+
+ errno = 0;
+ ok (mongo_sync_cmd_user_remove (c, NULL, "test") == FALSE,
+ "mongo_sync_cmd_user_remove() fails with a NULL db");
+ cmp_ok (errno, "==", EINVAL,
+ "errno is set to EINVAL");
+
+ errno = 0;
+ ok (mongo_sync_cmd_user_remove (c, "test", NULL) == FALSE,
+ "mongo_sync_cmd_user_remove() fails with a NULL user");
+ cmp_ok (errno, "==", EINVAL,
+ "errno is set to EINVAL");
+
+ ok (mongo_sync_cmd_user_remove (c, "test", "test") == FALSE,
+ "mongo_sync_cmd_user_remove() fails with a bogus FD");
+
+ mongo_sync_disconnect (c);
+
+ test_mongo_sync_cmd_user_remove_net ();
+}
+
+RUN_TEST (10, mongo_sync_cmd_user_remove);
diff --git a/tests/unit/mongo/sync/sync_conn_seed_add.c b/tests/unit/mongo/sync/sync_conn_seed_add.c
new file mode 100644
index 0000000..fb9f10a
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_conn_seed_add.c
@@ -0,0 +1,24 @@
+#include "test.h"
+#include "mongo.h"
+
+void
+test_mongo_sync_conn_seed_add (void)
+{
+ mongo_sync_connection *c;
+
+ c = test_make_fake_sync_conn (42, TRUE);
+
+ ok (mongo_sync_conn_seed_add (NULL, "localhost", 27017) == FALSE,
+ "mongo_sync_conn_seed_add() should fail with a NULL connection");
+ ok (mongo_sync_conn_seed_add (c, NULL, 27017) == FALSE,
+ "mongo_sync_conn_seed_add() should fail with a NULL host");
+ ok (mongo_sync_conn_seed_add (c, "localhost", -1) == FALSE,
+ "mongo_sync_conn_seed_add() should fail with an invalid port");
+
+ ok (mongo_sync_conn_seed_add (c, "localhost", 27017),
+ "mongo_sync_conn_seed_add() works");
+
+ mongo_sync_disconnect (c);
+}
+
+RUN_TEST (4, mongo_sync_conn_seed_add);
diff --git a/tests/unit/mongo/sync/sync_conn_seed_add_cache.c b/tests/unit/mongo/sync/sync_conn_seed_add_cache.c
new file mode 100644
index 0000000..e049691
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_conn_seed_add_cache.c
@@ -0,0 +1,31 @@
+#include "test.h"
+#include "mongo.h"
+
+void
+test_mongo_sync_connection_cache_seed_add (void)
+{
+ mongo_sync_conn_recovery_cache *cache;
+
+ cache = mongo_sync_conn_recovery_cache_new ();
+
+ ok (mongo_sync_conn_recovery_cache_seed_add (cache,
+ "localhost",
+ 27017) == TRUE,
+ "mongo_sync_connection_cache_seed_add() works");
+
+ ok (mongo_sync_conn_recovery_cache_seed_add (cache,
+ NULL,
+ 27017) == FALSE,
+ "mongo_sync_connection_cache_seed_add() should fail with a NULL host");
+
+ mongo_sync_conn_recovery_cache_discard (cache);
+
+ ok (mongo_sync_conn_recovery_cache_seed_add (cache,
+ "localhost",
+ 27017) == TRUE,
+ "mongo_sync_connection_cache_seed_add() works");
+
+ mongo_sync_conn_recovery_cache_free (cache);
+}
+
+RUN_TEST (3, mongo_sync_connection_cache_seed_add);
diff --git a/tests/unit/mongo/sync/sync_connect.c b/tests/unit/mongo/sync/sync_connect.c
new file mode 100644
index 0000000..418c2bf
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_connect.c
@@ -0,0 +1,22 @@
+#include "test.h"
+#include "mongo.h"
+
+void
+test_mongo_sync_connect (void)
+{
+ mongo_sync_connection *c;
+
+ ok (mongo_sync_connect (NULL, 27017, FALSE) == NULL,
+ "mongo_sync_connect() fails with a NULL host");
+
+ begin_network_tests (1);
+
+ ok ((c = mongo_sync_connect (config.primary_host,
+ config.primary_port, FALSE)) != NULL,
+ "mongo_sync_connect() works");
+ mongo_sync_disconnect (c);
+
+ end_network_tests ();
+}
+
+RUN_TEST (2, mongo_sync_connect);
diff --git a/tests/unit/mongo/sync/sync_connect_cache.c b/tests/unit/mongo/sync/sync_connect_cache.c
new file mode 100644
index 0000000..1618899
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_connect_cache.c
@@ -0,0 +1,42 @@
+#include "test.h"
+#include "mongo.h"
+
+void
+test_mongo_sync_conn_recovery_cache_connection (void)
+{
+ mongo_sync_conn_recovery_cache *cache;
+ mongo_sync_connection *c = NULL;
+
+ cache = mongo_sync_conn_recovery_cache_new ();
+
+ ok (mongo_sync_connect_recovery_cache (cache, FALSE) == NULL,
+ "mongo_sync_connect_recovery_cache() should fail when cache is empty");
+
+ begin_network_tests (4);
+
+ ok (mongo_sync_conn_recovery_cache_seed_add (cache,
+ config.primary_host,
+ config.primary_port) == TRUE,
+ "mongo_sync_conn_recovery_cache_seed_add() works");
+
+ ok ((c = mongo_sync_connect_recovery_cache (cache, FALSE)) != NULL,
+ "mongo_sync_connect_recovery_cache() works");
+
+ mongo_sync_disconnect (c);
+
+ ok ((c = mongo_sync_connect_recovery_cache (cache, FALSE)) != NULL,
+ "mongo_sync_connect_recovery_cache() works after disconnect");
+
+ mongo_sync_disconnect (c);
+
+ mongo_sync_conn_recovery_cache_discard (cache);
+
+ ok (mongo_sync_connect_recovery_cache (cache, TRUE) == NULL,
+ "mongo_sync_connect_recovery_cache() should fail when cache is discarded");
+
+ mongo_sync_conn_recovery_cache_free (cache);
+
+ end_network_tests ();
+}
+
+RUN_TEST (5, mongo_sync_conn_recovery_cache_connection);
diff --git a/tests/unit/mongo/sync/sync_connect_from_cache_enforce_primary.c b/tests/unit/mongo/sync/sync_connect_from_cache_enforce_primary.c
new file mode 100644
index 0000000..5c48ae9
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_connect_from_cache_enforce_primary.c
@@ -0,0 +1,47 @@
+#include "test.h"
+#include "mongo.h"
+
+#define NETWORK_TESTS_NUM 5
+
+void
+test_mongo_sync_connect_from_cache_enforce_primary (void)
+{
+ mongo_sync_conn_recovery_cache *cache;
+ mongo_sync_connection *c;
+
+ begin_network_tests (NETWORK_TESTS_NUM);
+
+ cache = mongo_sync_conn_recovery_cache_new ();
+
+ skip (!config.secondary_host,
+ NETWORK_TESTS_NUM,
+ "Secondary server not configured");
+
+ ok (mongo_sync_conn_recovery_cache_seed_add (cache, config.secondary_host,
+ config.secondary_port) == TRUE,
+ "mongo_sync_conn_recovery_seed_add() works");
+
+ ok ((c = mongo_sync_connect_recovery_cache (cache, TRUE)) != NULL,
+ "mongo_sync_connect_recovery_cache() works");
+
+ ok (mongo_sync_cmd_is_master(c) == FALSE,
+ "Secondary server should not be The Master.");
+
+ mongo_sync_disconnect (c);
+
+ ok ((c = mongo_sync_connect_recovery_cache (cache, FALSE)) != NULL,
+ "mongo_sync_connect_recovery_cache() works");
+
+ ok (mongo_sync_cmd_is_master (c) == TRUE,\
+ "Retrieved connection should be The Master when it is forced to be.");
+
+ mongo_sync_disconnect (c);
+
+ endskip;
+
+ mongo_sync_conn_recovery_cache_free (cache);
+
+ end_network_tests ();
+}
+
+RUN_TEST (NETWORK_TESTS_NUM, mongo_sync_connect_from_cache_enforce_primary);
diff --git a/tests/unit/mongo/sync/sync_disconnect.c b/tests/unit/mongo/sync/sync_disconnect.c
new file mode 100644
index 0000000..f7783e7
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_disconnect.c
@@ -0,0 +1,22 @@
+#include "test.h"
+#include "mongo.h"
+
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_disconnect (void)
+{
+ mongo_sync_connection *conn;
+
+ mongo_sync_disconnect (NULL);
+ pass ("mongo_sync_disconnect(NULL) does not crash");
+
+ conn = test_make_fake_sync_conn (-1, FALSE);
+ conn->rs.hosts = g_list_append (conn->rs.hosts,
+ g_strdup ("invalid.example.com:-42"));
+
+ mongo_sync_disconnect (conn);
+ pass ("mongo_sync_disconnect() works");
+}
+
+RUN_TEST (2, mongo_sync_disconnect);
diff --git a/tests/unit/mongo/sync/sync_get_set_auto_reconnect.c b/tests/unit/mongo/sync/sync_get_set_auto_reconnect.c
new file mode 100644
index 0000000..bfe2719
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_get_set_auto_reconnect.c
@@ -0,0 +1,39 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <errno.h>
+
+void
+test_mongo_sync_get_set_auto_reconnect (void)
+{
+ mongo_sync_connection *c;
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+
+ errno = 0;
+ ok (mongo_sync_conn_get_auto_reconnect (NULL) == FALSE,
+ "mongo_sync_conn_get_auto_reconnect() returns FALSE with a "
+ "NULL connection");
+ cmp_ok (errno, "==", ENOTCONN,
+ "errno is now set to ENOTCONN");
+
+ ok (mongo_sync_conn_get_auto_reconnect (c) == FALSE,
+ "mongo_sync_get_auto_reconnect() works");
+ cmp_ok (errno, "==", 0,
+ "errno is now cleared");
+
+ errno = 0;
+ mongo_sync_conn_set_auto_reconnect (NULL, TRUE);
+ cmp_ok (errno, "==", ENOTCONN,
+ "errno is set to ENOTCONN after "
+ "mongo_sync_conn_set_auto_reconnect(NULL)");
+
+ ok (mongo_sync_conn_set_auto_reconnect (c, TRUE),
+ "mongo_sync_auto_reconnect() works");
+ ok (mongo_sync_conn_get_auto_reconnect (c) == TRUE,
+ "mongo_sync_set_auto_reconnect() worked");
+
+ mongo_sync_disconnect (c);
+}
+
+RUN_TEST (7, mongo_sync_get_set_auto_reconnect);
diff --git a/tests/unit/mongo/sync/sync_get_set_max_insert_size.c b/tests/unit/mongo/sync/sync_get_set_max_insert_size.c
new file mode 100644
index 0000000..51970a3
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_get_set_max_insert_size.c
@@ -0,0 +1,44 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <errno.h>
+
+void
+test_mongo_sync_get_set_max_insert_size (void)
+{
+ mongo_sync_connection *c;
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+
+ errno = 0;
+ ok (mongo_sync_conn_get_max_insert_size (NULL) == -1,
+ "mongo_sync_conn_get_max_insert_size() returns -1 with "
+ "a NULL connection");
+
+ cmp_ok (mongo_sync_conn_get_max_insert_size (c), "==",
+ MONGO_SYNC_DEFAULT_MAX_INSERT_SIZE,
+ "mongo_sync_get_max_insert_size() works");
+
+ errno = 0;
+ mongo_sync_conn_set_max_insert_size (NULL, 1024);
+ cmp_ok (errno, "==", ENOTCONN,
+ "errno is set to ENOTCONN after "
+ "mongo_sync_conn_set_max_insert_size(NULL)");
+
+ mongo_sync_conn_set_max_insert_size (c, 1024);
+ cmp_ok (errno, "==", 0,
+ "errno is cleared");
+ ok (mongo_sync_conn_get_max_insert_size (c) == 1024,
+ "mongo_sync_set_max_insert_size() worked");
+
+ mongo_sync_conn_set_max_insert_size (c, -1);
+ cmp_ok (errno, "==", ERANGE,
+ "errno is set to ERANGE");
+ ok (mongo_sync_conn_get_max_insert_size (c) == 1024,
+ "mongo_sync_set_max_insert_size() with a negative value should "
+ "not work");
+
+ mongo_sync_disconnect (c);
+}
+
+RUN_TEST (7, mongo_sync_get_set_max_insert_size);
diff --git a/tests/unit/mongo/sync/sync_get_set_safe_mode.c b/tests/unit/mongo/sync/sync_get_set_safe_mode.c
new file mode 100644
index 0000000..b444105
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_get_set_safe_mode.c
@@ -0,0 +1,38 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <errno.h>
+
+void
+test_mongo_sync_get_set_safe_mode (void)
+{
+ mongo_sync_connection *c;
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+
+ errno = 0;
+ ok (mongo_sync_conn_get_safe_mode (NULL) == FALSE,
+ "mongo_sync_conn_get_safe_mode() returns FALSE with a NULL connection");
+ cmp_ok (errno, "==", ENOTCONN,
+ "errno is now set to ENOTCONN");
+
+ ok (mongo_sync_conn_get_safe_mode (c) == FALSE,
+ "mongo_sync_get_safe_mode() works");
+ cmp_ok (errno, "==", 0,
+ "errno is now cleared");
+
+ errno = 0;
+ mongo_sync_conn_set_safe_mode (NULL, TRUE);
+ cmp_ok (errno, "==", ENOTCONN,
+ "errno is set to ENOTCONN after mongo_sync_conn_get_safe_mode(NULL)");
+
+ mongo_sync_conn_set_safe_mode (c, TRUE);
+ cmp_ok (errno, "==", 0,
+ "errno is cleared");
+ ok (mongo_sync_conn_get_safe_mode (c) == TRUE,
+ "mongo_sync_set_safe_mode() worked");
+
+ mongo_sync_disconnect (c);
+}
+
+RUN_TEST (7, mongo_sync_get_set_safe_mode);
diff --git a/tests/unit/mongo/sync/sync_get_set_slaveok.c b/tests/unit/mongo/sync/sync_get_set_slaveok.c
new file mode 100644
index 0000000..7a43979
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_get_set_slaveok.c
@@ -0,0 +1,38 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <errno.h>
+
+void
+test_mongo_sync_get_set_slaveok (void)
+{
+ mongo_sync_connection *c;
+
+ c = test_make_fake_sync_conn (-1, FALSE);
+
+ errno = 0;
+ ok (mongo_sync_conn_get_slaveok (NULL) == FALSE,
+ "mongo_sync_conn_get_slaveok() returns FALSE with a NULL connection");
+ cmp_ok (errno, "==", ENOTCONN,
+ "errno is now set to ENOTCONN");
+
+ ok (mongo_sync_conn_get_slaveok (c) == FALSE,
+ "mongo_sync_get_slaveok() works");
+ cmp_ok (errno, "==", 0,
+ "errno is now cleared");
+
+ errno = 0;
+ mongo_sync_conn_set_slaveok (NULL, TRUE);
+ cmp_ok (errno, "==", ENOTCONN,
+ "errno is set to ENOTCONN after mongo_sync_conn_get_slaveok(NULL)");
+
+ mongo_sync_conn_set_slaveok (c, TRUE);
+ cmp_ok (errno, "==", 0,
+ "errno is cleared");
+ ok (mongo_sync_conn_get_slaveok (c) == TRUE,
+ "mongo_sync_set_slaveok() worked");
+
+ mongo_sync_disconnect (c);
+}
+
+RUN_TEST (7, mongo_sync_get_set_slaveok);
diff --git a/tests/unit/mongo/sync/sync_reconnect.c b/tests/unit/mongo/sync/sync_reconnect.c
new file mode 100644
index 0000000..a81e4da
--- /dev/null
+++ b/tests/unit/mongo/sync/sync_reconnect.c
@@ -0,0 +1,143 @@
+#include "test.h"
+#include "mongo.h"
+
+#include <errno.h>
+#include <sys/socket.h>
+#include "libmongo-private.h"
+
+void
+test_mongo_sync_reconnect (void)
+{
+ mongo_sync_connection *conn, *o;
+ GList *l;
+
+ ok (mongo_sync_reconnect (NULL, FALSE) == NULL,
+ "mongo_sync_reconnect() fails with a NULL connection");
+ cmp_ok (errno, "==", ENOTCONN,
+ "errno is ENOTCONN");
+
+ conn = test_make_fake_sync_conn (-1, FALSE);
+ ok (mongo_sync_reconnect (conn, FALSE) == NULL,
+ "mongo_sync_reconnect() fails with a bogus FD");
+ cmp_ok (errno, "==", EHOSTUNREACH,
+ "errno is EHOSTUNREACH");
+
+ mongo_sync_disconnect (conn);
+
+ begin_network_tests (15);
+
+ /* Connect & reconnect to master */
+ o = conn = mongo_sync_connect (config.primary_host,
+ config.primary_port, TRUE);
+ ok ((conn = mongo_sync_reconnect (conn, TRUE)) != NULL,
+ "mongo_sync_reconnect() works when reconnecting to self");
+ ok (o == conn,
+ "Reconnect to an existing master results in the same object");
+ mongo_sync_disconnect (conn);
+
+ /* Connect to master, kill FD, reconnect */
+ conn = mongo_sync_connect (config.primary_host,
+ config.primary_port, TRUE);
+ mongo_sync_cmd_is_master (conn);
+
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (3);
+
+ ok ((conn = mongo_sync_reconnect (conn, TRUE)) != NULL,
+ "mongo_sync_reconnect() succeed when the connection drops");
+ mongo_sync_disconnect (conn);
+
+ /* Connect, kill, reconnect; w/o knowing other hosts */
+ o = conn = mongo_sync_connect (config.primary_host,
+ config.primary_port, TRUE);
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (3);
+ l = conn->rs.hosts;
+ while (l)
+ {
+ g_free (l->data);
+ l = g_list_delete_link (l, l);
+ }
+ conn->rs.hosts = NULL;
+
+ l = conn->rs.seeds;
+ while (l)
+ {
+ g_free (l->data);
+ l = g_list_delete_link (l, l);
+ }
+ conn->rs.seeds = NULL;
+
+ conn = mongo_sync_reconnect (conn, FALSE);
+
+ ok (conn != o && conn == NULL,
+ "mongo_sync_reconnect() fails if it can't reconnect anywhere");
+ mongo_sync_disconnect (o);
+
+ /* Gracefully ignore unparsable hosts during reconnect */
+ o = conn = mongo_sync_connect (config.primary_host,
+ config.primary_port, TRUE);
+ mongo_sync_cmd_is_master (conn);
+ conn->rs.hosts = g_list_prepend (conn->rs.hosts,
+ g_strdup ("invalid:-42"));
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (3);
+ conn = mongo_sync_reconnect (conn, TRUE);
+
+ ok (conn == o,
+ "mongo_sync_reconnect() gracefully ignores unparsable hosts "
+ "during reconnect");
+ mongo_sync_disconnect (conn);
+
+ /* Ignore unreachable hosts during reconnect */
+ o = conn = mongo_sync_connect (config.primary_host,
+ config.primary_port, TRUE);
+ mongo_sync_cmd_is_master (conn);
+ conn->rs.hosts = g_list_prepend (conn->rs.hosts,
+ g_strdup ("example.com:27017"));
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (3);
+ conn = mongo_sync_reconnect (conn, TRUE);
+
+ ok (conn == o,
+ "mongo_sync_reconnect() gracefully ignores unparsable hosts "
+ "during reconnect");
+ mongo_sync_disconnect (conn);
+
+ /*
+ * Tests involving a secondary
+ */
+
+ skip (!config.secondary_host, 9,
+ "Secondary host not set up");
+
+ /* Connect to secondary & reconnect to master */
+ o = conn = mongo_sync_connect (config.secondary_host,
+ config.secondary_port, TRUE);
+ ok (conn != NULL, "Connecting to secondary");
+ ok (mongo_sync_cmd_is_master (conn) == FALSE,
+ "Connected to a secondary");
+ ok ((conn = mongo_sync_reconnect (conn, TRUE)) != NULL,
+ "Reconnecting from slave to master succeeds");
+ ok (conn == o, "Connection object updated in-place");
+ ok (mongo_sync_cmd_is_master (conn),
+ "Correctly reconnected to master");
+ mongo_sync_disconnect (conn);
+
+ /* Connect to secondary & reconnect to self */
+ o = conn = mongo_sync_connect (config.secondary_host,
+ config.secondary_port, TRUE);
+ ok (conn != NULL, "Connecting to secondary");
+ ok ((conn = mongo_sync_reconnect (conn, FALSE)) != NULL,
+ "Reconnecting from slave to self succeeds");
+ ok (conn == o, "Connection object updated in-place");
+ ok (mongo_sync_cmd_is_master (conn) == FALSE,
+ "Correctly reconnected to self");
+ mongo_sync_disconnect (conn);
+
+ endskip;
+
+ end_network_tests ();
+}
+
+RUN_TEST (19, mongo_sync_reconnect);
diff --git a/tests/unit/mongo/utils/oid_as_string.c b/tests/unit/mongo/utils/oid_as_string.c
new file mode 100644
index 0000000..9cf740c
--- /dev/null
+++ b/tests/unit/mongo/utils/oid_as_string.c
@@ -0,0 +1,26 @@
+#include "test.h"
+#include "mongo.h"
+
+void
+test_mongo_utils_oid_as_string (void)
+{
+ guint8 *oid;
+ gchar *oid_str;
+
+ mongo_util_oid_init (0);
+
+ oid = mongo_util_oid_new (1);
+
+ ok (mongo_util_oid_as_string (NULL) == NULL,
+ "mongo_util_oid_as_string() should fail with a NULL oid");
+
+ oid_str = mongo_util_oid_as_string (oid);
+
+ ok (oid_str != NULL,
+ "mongo_util_oid_as_string() works");
+
+ g_free (oid_str);
+ g_free (oid);
+}
+
+RUN_TEST (2, mongo_utils_oid_as_string);
diff --git a/tests/unit/mongo/utils/oid_init.c b/tests/unit/mongo/utils/oid_init.c
new file mode 100644
index 0000000..42d0db1
--- /dev/null
+++ b/tests/unit/mongo/utils/oid_init.c
@@ -0,0 +1,19 @@
+#include "tap.h"
+#include "test.h"
+#include "mongo-utils.h"
+
+void
+test_mongo_utils_oid_init (void)
+{
+ mongo_util_oid_init (0);
+ mongo_util_oid_init (1234);
+
+ /* We don't do any real testing here, only check if it does not
+ crash. To verify that it works, we need to create a new OID, and
+ that will be tested by other unit tests.
+ */
+ ok (TRUE,
+ "mongo_util_oid_init() does not crash.");
+}
+
+RUN_TEST (1, mongo_utils_oid_init);
diff --git a/tests/unit/mongo/utils/oid_new.c b/tests/unit/mongo/utils/oid_new.c
new file mode 100644
index 0000000..b8f7f0a
--- /dev/null
+++ b/tests/unit/mongo/utils/oid_new.c
@@ -0,0 +1,49 @@
+#include "tap.h"
+#include "test.h"
+#include "mongo-utils.h"
+
+#include <string.h>
+#include <unistd.h>
+
+void
+test_mongo_utils_oid_new (void)
+{
+ guint8 *oid1, *oid2, *oid3;
+ gchar *oid1_s, *oid2_s;
+
+ ok (mongo_util_oid_new (0) == NULL,
+ "mongo_util_oid_new() should fail before mongo_util_oid_init()");
+
+ mongo_util_oid_init (0);
+ ok ((oid1 = mongo_util_oid_new (1)) != NULL,
+ "mongo_util_oid_new() works");
+ cmp_ok (oid1[11], "==", 1,
+ "mongo_util_oid_new() returns an OID with the currect seq ID");
+
+ oid2 = mongo_util_oid_new (2);
+ oid3 = mongo_util_oid_new (2);
+
+ ok (memcmp (oid2, oid1, 12) > 0,
+ "OIDs with higher sequence ID sort higher");
+ ok (memcmp (oid2, oid3, 12) == 0,
+ "OIDs with the same sequence ID are equal (within a second)");
+ g_free (oid2);
+ g_free (oid3);
+
+ sleep (2);
+ oid2 = mongo_util_oid_new (0);
+
+ oid1_s = mongo_util_oid_as_string (oid1);
+ oid2_s = mongo_util_oid_as_string (oid2);
+
+ ok (memcmp (oid2, oid1, 12) > 0,
+ "OIDs with the same sequence ID, a few seconds later sort higher; "
+ "oid1=%s; oid2=%s", oid1_s, oid2_s);
+
+ g_free (oid2_s);
+ g_free (oid1_s);
+ g_free (oid2);
+ g_free (oid1);
+}
+
+RUN_TEST (6, mongo_utils_oid_new);
diff --git a/tests/unit/mongo/utils/oid_new_with_time.c b/tests/unit/mongo/utils/oid_new_with_time.c
new file mode 100644
index 0000000..290fdab
--- /dev/null
+++ b/tests/unit/mongo/utils/oid_new_with_time.c
@@ -0,0 +1,46 @@
+#include "tap.h"
+#include "test.h"
+#include "mongo-utils.h"
+
+#include <string.h>
+
+void
+test_mongo_utils_oid_new_with_time (void)
+{
+ guint8 *oid1, *oid2, *oid3;
+ gchar *oid1_s, *oid2_s;
+
+ ok (mongo_util_oid_new_with_time (0, 0) == NULL,
+ "mongo_util_oid_new_with_time() should fail before mongo_util_oid_init()");
+
+ mongo_util_oid_init (0);
+ ok ((oid1 = mongo_util_oid_new_with_time (0, 1)) != NULL,
+ "mongo_util_oid_new_with_time() works");
+ cmp_ok (oid1[11], "==", 1,
+ "mongo_util_oid_new_with_time() returns an OID with the currect seq ID");
+
+ oid2 = mongo_util_oid_new_with_time (0, 2);
+ oid3 = mongo_util_oid_new_with_time (0, 2);
+
+ ok (memcmp (oid2, oid1, 12) > 0,
+ "OIDs with higher sequence ID sort higher");
+ ok (memcmp (oid2, oid3, 12) == 0,
+ "OIDs with the same sequence ID are equal (within a second)");
+ g_free (oid2);
+ g_free (oid3);
+
+ oid2 = mongo_util_oid_new_with_time (1, 0);
+
+ oid1_s = mongo_util_oid_as_string (oid1);
+ oid2_s = mongo_util_oid_as_string (oid2);
+
+ ok (memcmp (oid2, oid1, 12) > 0,
+ "OIDs with the same sequence ID, a few seconds later sort higher; "
+ "oid1=%s; oid2=%s", oid1_s, oid2_s);
+ g_free (oid2_s);
+ g_free (oid1_s);
+ g_free (oid2);
+ g_free (oid1);
+}
+
+RUN_TEST (6, mongo_utils_oid_new_with_time);
diff --git a/tests/unit/mongo/utils/parse_addr.c b/tests/unit/mongo/utils/parse_addr.c
new file mode 100644
index 0000000..13b16d1
--- /dev/null
+++ b/tests/unit/mongo/utils/parse_addr.c
@@ -0,0 +1,244 @@
+#include "tap.h"
+#include "test.h"
+#include "mongo-utils.h"
+
+#include <string.h>
+
+void
+test_mongo_utils_parse_addr (void)
+{
+ gchar *host = "deadbeef";
+ gint port = 42;
+
+ ok (mongo_util_parse_addr (NULL, &host, &port) == FALSE,
+ "mongo_util_parse_addr() fails with a NULL address");
+ is (host, NULL,
+ "Failed parsing sets host to NULL");
+ cmp_ok (port, "==", -1,
+ "Failed parsing sets port to -1");
+ host = "deadbeef";
+ port = 42;
+
+ ok (mongo_util_parse_addr ("127.0.0.1:27017", &host, NULL) == FALSE,
+ "mongo_util_parse_addr() fails when port is NULL");
+ is (host, NULL,
+ "Failed parsing sets host to NULL");
+ host = "deadbeef";
+ port = 42;
+
+ ok (mongo_util_parse_addr ("127.0.0.1:27017", NULL, &port) == FALSE,
+ "mongo_util_parse_addr() fails when host is NULL");
+ cmp_ok (port, "==", -1,
+ "Failed parsing sets port to -1");
+ host = "deadbeef";
+ port = 42;
+
+ ok (mongo_util_parse_addr ("127.0.0.1:27017", &host, &port),
+ "mongo_util_parse_addr() can parse HOST:PORT pairs");
+ is (host, "127.0.0.1",
+ "Host parsed successfully");
+ cmp_ok (port, "==", 27017,
+ "Port parsed successfully");
+ g_free (host);
+ host = "deadbeef";
+ port = 42;
+
+ ok (mongo_util_parse_addr (":27017", &host, &port) == FALSE,
+ "mongo_util_parse_addr() should fail when no host is specified");
+ is (host, NULL,
+ "Failed parsing sets host to NULL");
+ cmp_ok (port, "==", -1,
+ "Failed parsing sets port to -1");
+ host = "deadbeef";
+ port = 42;
+
+ ok (mongo_util_parse_addr ("localhost:27017garbage", &host, &port) == FALSE,
+ "mongo_util_parse_addr() should fail if there is garbage after "
+ "the port");
+ is (host, NULL,
+ "Failed parsing sets host to NULL");
+ cmp_ok (port, "==", -1,
+ "Failed parsing sets port to -1");
+ host = "deadbeef";
+ port = 42;
+
+ ok (mongo_util_parse_addr ("localhost:garbage", &host, &port) == FALSE,
+ "mongo_util_parse_addr() should fail if the port is not a number");
+ is (host, NULL,
+ "Failed parsing sets host to NULL");
+ cmp_ok (port, "==", -1,
+ "Failed parsing sets port to -1");
+ host = "deadbeef";
+ port = 42;
+
+ ok (mongo_util_parse_addr ("localhost:-10", &host, &port) == FALSE,
+ "mongo_util_parse_addr() should fail if the port is out of bounds");
+ is (host, NULL,
+ "Failed parsing sets host to NULL");
+ cmp_ok (port, "==", -1,
+ "Failed parsing sets port to -1");
+ host = "deadbeef";
+ port = 42;
+
+ ok (mongo_util_parse_addr ("localhost:9999999999999999999",
+ &host, &port) == FALSE,
+ "mongo_util_parse_addr() should fail if the port is out of bounds");
+ is (host, NULL,
+ "Failed parsing sets host to NULL");
+ cmp_ok (port, "==", -1,
+ "Failed parsing sets port to -1");
+ host = "deadbeef";
+ port = 42;
+
+ ok (mongo_util_parse_addr ("localhost:9999999999",
+ &host, &port) == FALSE,
+ "mongo_util_parse_addr() should fail if the port is out of bounds");
+ is (host, NULL,
+ "Failed parsing sets host to NULL");
+ cmp_ok (port, "==", -1,
+ "Failed parsing sets port to -1");
+ host = "deadbeef";
+ port = 42;
+
+ /* IPv6 */
+ ok (mongo_util_parse_addr ("::1:27017", &host, &port),
+ "mongo_util_parse_addr() can deal with IPv6 addresses");
+ is (host, "::1",
+ "Host parsed successfully");
+ cmp_ok (port, "==", 27017,
+ "Port parsed successfully");
+ g_free (host);
+ host = "deadbeef";
+ port = 42;
+
+ ok (mongo_util_parse_addr ("::1", &host, &port),
+ "mongo_util_parse_addr() should silently misparse ambigous "
+ "IPv6 addresses");
+ isnt (host, "::1",
+ "Host is misparsed, as expected");
+ cmp_ok (port, "==", 1,
+ "Port is misparsed, as expected");
+ g_free (host);
+ host = "deadbeef";
+ port = 42;
+
+ ok (mongo_util_parse_addr ("[::1", &host, &port) == FALSE,
+ "mongo_util_parse_addr() should fail on invalid IPv6 literals");
+ is (host, NULL,
+ "Host should be NULL");
+ cmp_ok (port, "==", -1,
+ "Port should be -1");
+ g_free (host);
+ host = "deadbeef";
+ port = 42;
+
+ ok (mongo_util_parse_addr ("[::1]:1", &host, &port),
+ "mongo_util_parse_addr() works with IPv6 literal + port");
+ is (host, "::1",
+ "Host should be ::1");
+ cmp_ok (port, "==", 1,
+ "Port should be 1");
+ g_free (host);
+ host = "deadbeef";
+ port = 42;
+
+ ok (mongo_util_parse_addr ("[::1]:27017", &host, &port),
+ "mongo_util_parse_addr() works with IPv6 literal + port");
+ is (host, "::1",
+ "Host should be ::1");
+ cmp_ok (port, "==", 27017,
+ "Port should be 27017");
+ g_free (host);
+ host = "deadbeef";
+ port = 42;
+
+ ok (mongo_util_parse_addr ("[]:27017", &host, &port) == FALSE,
+ "mongo_util_parse_addr() should fail when no host is specified");
+ is (host, NULL,
+ "Failed parsing sets host to NULL");
+ cmp_ok (port, "==", -1,
+ "Failed parsing sets port to -1");
+ host = "deadbeef";
+ port = 42;
+
+ ok (mongo_util_parse_addr ("[::1]:27017garbage", &host, &port) == FALSE,
+ "mongo_util_parse_addr() should fail if there is garbage after "
+ "the port");
+ is (host, NULL,
+ "Failed parsing sets host to NULL");
+ cmp_ok (port, "==", -1,
+ "Failed parsing sets port to -1");
+ host = "deadbeef";
+ port = 42;
+
+ ok (mongo_util_parse_addr ("[::1]:garbage", &host, &port) == FALSE,
+ "mongo_util_parse_addr() should fail if the port is not a number");
+ is (host, NULL,
+ "Failed parsing sets host to NULL");
+ cmp_ok (port, "==", -1,
+ "Failed parsing sets port to -1");
+ host = "deadbeef";
+ port = 42;
+
+ ok (mongo_util_parse_addr ("[::1]:-10", &host, &port) == FALSE,
+ "mongo_util_parse_addr() should fail if the port is out of bounds");
+ is (host, NULL,
+ "Failed parsing sets host to NULL");
+ cmp_ok (port, "==", -1,
+ "Failed parsing sets port to -1");
+ host = "deadbeef";
+ port = 42;
+
+ ok (mongo_util_parse_addr ("[::1]:9999999999999999999",
+ &host, &port) == FALSE,
+ "mongo_util_parse_addr() should fail if the port is out of bounds");
+ is (host, NULL,
+ "Failed parsing sets host to NULL");
+ cmp_ok (port, "==", -1,
+ "Failed parsing sets port to -1");
+ host = "deadbeef";
+ port = 42;
+
+ ok (mongo_util_parse_addr ("[::1]:9999999999",
+ &host, &port) == FALSE,
+ "mongo_util_parse_addr() should fail if the port is out of bounds");
+ is (host, NULL,
+ "Failed parsing sets host to NULL");
+ cmp_ok (port, "==", -1,
+ "Failed parsing sets port to -1");
+ host = "deadbeef";
+ port = 42;
+
+ ok (mongo_util_parse_addr ("/var/run/mongodb/mongodb.socket",
+ &host, &port) == TRUE,
+ "mongo_util_parse_addr() works with unix domain sockets");
+ is (host, "/var/run/mongodb/mongodb.socket",
+ "Parsing a Unix domain socket sets host to the socket name");
+ cmp_ok (port, "==", -1,
+ "Port is set to -1");
+ host = "deadbeef";
+ port = 42;
+
+ ok (mongo_util_parse_addr ("[::1]", &host, &port),
+ "mongo_util_parse_addr() can handle IPv6 literals without port set");
+ is (host, "::1",
+ "Host parsed successfully");
+ cmp_ok (port, "==", -1,
+ "Port is set to -1");
+ g_free (host);
+ host = "deadbeef";
+ port = 42;
+
+ ok (mongo_util_parse_addr ("/var/run/mongodb/mongodb.socket:-1",
+ &host, &port) == TRUE,
+ "mongo_util_parse_addr() can parse unix domain sockets with -1 port");
+ is (host, "/var/run/mongodb/mongodb.socket",
+ "Parsing a unix domain socket sets host to the socket name");
+ cmp_ok (port, "==", -1,
+ "Parsing a unix domain socket with a port set to -1, works");
+ g_free (host);
+ host = "deadbeef";
+ port = 42;
+}
+
+RUN_TEST (70, mongo_utils_parse_addr);
diff --git a/tests/unit/mongo/wire/cmd_custom.c b/tests/unit/mongo/wire/cmd_custom.c
new file mode 100644
index 0000000..7970aaa
--- /dev/null
+++ b/tests/unit/mongo/wire/cmd_custom.c
@@ -0,0 +1,67 @@
+#include "test.h"
+#include "tap.h"
+#include "mongo-wire.h"
+
+#include <string.h>
+
+void
+test_mongo_wire_cmd_custom (void)
+{
+ bson *cmd;
+ mongo_packet *p;
+
+ mongo_packet_header hdr;
+ const guint8 *data;
+ gint32 data_size;
+
+ bson_cursor *c;
+ gint32 pos;
+
+ cmd = bson_new ();
+ bson_append_int32 (cmd, "getnonce", 1);
+
+ ok (mongo_wire_cmd_custom (1, "test", 0, NULL) == NULL,
+ "mongo_wire_cmd_custom() fails with a NULL command");
+ ok (mongo_wire_cmd_custom (1, "test", 0, cmd) == NULL,
+ "mongo_wire_cmd_custom() fails with an unfinished command");
+ bson_finish (cmd);
+ ok (mongo_wire_cmd_custom (1, NULL, 0, cmd) == NULL,
+ "mongo_wire_cmd_custom() fails with a NULL db");
+
+ ok ((p = mongo_wire_cmd_custom (1, "test", 0, cmd)) != NULL,
+ "mongo_wire_cmd_custom() works");
+ bson_free (cmd);
+
+ /* Verify the header */
+ mongo_wire_packet_get_header (p, &hdr);
+ cmp_ok ((data_size = mongo_wire_packet_get_data (p, &data)), "!=", -1,
+ "Packet data size looks fine");
+ cmp_ok (hdr.length, "==", sizeof (mongo_packet_header) + data_size,
+ "Packet header length is OK");
+ cmp_ok (hdr.id, "==", 1, "Packet request ID is ok");
+ cmp_ok (hdr.resp_to, "==", 0, "Packet reply ID is ok");
+
+ /*
+ * Test the created request
+ */
+
+ /* pos = zero + collection_name + NULL + skip + ret */
+ pos = sizeof (gint32) + strlen ("test.$cmd") + 1 + sizeof (gint32) * 2;
+ ok ((cmd = bson_new_from_data (data + pos,
+ bson_stream_doc_size (data, pos) - 1)) != NULL,
+ "Packet contains a BSON document");
+ bson_finish (cmd);
+
+ ok ((c = bson_find (cmd, "getnonce")) != NULL,
+ "BSON object contains a 'getnonce' key");
+ cmp_ok (bson_cursor_type (c), "==", BSON_TYPE_INT32,
+ "'getnonce' key has the correct type");
+ ok (bson_cursor_next (c) == FALSE,
+ "'getnonce' key is the last in the object");
+
+ bson_cursor_free (c);
+ bson_free (cmd);
+ mongo_wire_packet_free (p);
+}
+
+RUN_TEST (12, mongo_wire_cmd_custom);
diff --git a/tests/unit/mongo/wire/cmd_delete.c b/tests/unit/mongo/wire/cmd_delete.c
new file mode 100644
index 0000000..9399046
--- /dev/null
+++ b/tests/unit/mongo/wire/cmd_delete.c
@@ -0,0 +1,73 @@
+#include "test.h"
+#include "tap.h"
+#include "mongo-wire.h"
+
+#include <string.h>
+
+void
+test_mongo_wire_cmd_delete (void)
+{
+ mongo_packet *p;
+ bson *s, *tmp;
+
+ mongo_packet_header hdr;
+ const guint8 *data;
+ gint32 data_size;
+
+ gint32 pos;
+ bson_cursor *c;
+
+ s = test_bson_generate_full ();
+ tmp = bson_new ();
+
+ ok (mongo_wire_cmd_delete (1, NULL, 0, s) == NULL,
+ "mongo_wire_cmd_delete() fails with a NULL namespace");
+ ok (mongo_wire_cmd_delete (1, "test.ns", 0, NULL) == NULL,
+ "mongo_wire_cmd_delete() fails with a NULL selector");
+ ok (mongo_wire_cmd_delete (1, "test.ns", 0, tmp) == NULL,
+ "mongo_wire_cmd_delete() fails with an unfinished selector");
+ bson_free (tmp);
+
+ ok ((p = mongo_wire_cmd_delete (1, "test.ns", 0, s)) != NULL,
+ "mongo_wire_cmd_delete() works");
+ bson_free (s);
+
+ /* Test basic header data */
+ mongo_wire_packet_get_header (p, &hdr);
+ cmp_ok ((data_size = mongo_wire_packet_get_data (p, &data)), "!=", -1,
+ "Packet data size appears fine");
+
+ cmp_ok (hdr.length, "==", sizeof (mongo_packet_header) + data_size,
+ "Packet header length is correct");
+ cmp_ok (hdr.id, "==", 1, "Header ID is ok");
+ cmp_ok (hdr.resp_to, "==", 0, "Response ID is ok");
+
+ /*
+ * Test the constructed request
+ */
+
+ /* pos = zero + ns + NULL + flags */
+ pos = sizeof (gint32) + strlen ("test.ns") + 1 + sizeof (gint32);
+
+ ok ((s = bson_new_from_data (data + pos,
+ bson_stream_doc_size (data, pos) - 1)) != NULL,
+ "Packet contains a valid BSON update document");
+ bson_finish (s);
+
+ ok ((c = bson_find (s, "int32")) != NULL,
+ "BSON contains 'int32'");
+ cmp_ok (bson_cursor_type (c), "==", BSON_TYPE_INT32,
+ "int32 has correct type");
+ bson_cursor_next (c);
+ cmp_ok (bson_cursor_type (c), "==", BSON_TYPE_INT64,
+ "next element has correct type too");
+ ok (bson_cursor_next (c) == FALSE,
+ "No more data after the update BSON object");
+
+ bson_cursor_free (c);
+ bson_free (s);
+
+ mongo_wire_packet_free (p);
+}
+
+RUN_TEST (13, mongo_wire_cmd_delete);
diff --git a/tests/unit/mongo/wire/cmd_get_more.c b/tests/unit/mongo/wire/cmd_get_more.c
new file mode 100644
index 0000000..5f56821
--- /dev/null
+++ b/tests/unit/mongo/wire/cmd_get_more.c
@@ -0,0 +1,50 @@
+#include "test.h"
+#include "tap.h"
+#include "mongo-wire.h"
+
+#include <string.h>
+
+void
+test_mongo_wire_cmd_get_more (void)
+{
+ mongo_packet *p;
+
+ mongo_packet_header hdr;
+ const guint8 *data;
+ gint32 data_size;
+
+ gint32 pos;
+ gint64 cid = 9876543210;
+
+ ok (mongo_wire_cmd_get_more (1, NULL, 1, cid) == NULL,
+ "mongo_wire_cmd_get_more() fails with a NULL namespace");
+ ok ((p = mongo_wire_cmd_get_more (1, "test.ns", 1, cid)) != NULL,
+ "mongo_wire_cmd_get_more() works");
+
+ /* Test basic header data */
+ mongo_wire_packet_get_header (p, &hdr);
+ cmp_ok ((data_size = mongo_wire_packet_get_data (p, &data)), "!=", -1,
+ "Packet data size appears fine");
+
+ cmp_ok (hdr.length, "==", sizeof (mongo_packet_header) + data_size,
+ "Packet header length is correct");
+ cmp_ok (hdr.id, "==", 1, "Header ID is ok");
+ cmp_ok (hdr.resp_to, "==", 0, "Response ID is ok");
+
+ /*
+ * Test the request itself.
+ */
+
+ /* pos = zero + ns + NULL + ret */
+ pos = sizeof (gint32) + strlen ("test.ns") + 1 + sizeof (gint32);
+ cid = 0;
+ memcpy (&cid, data + pos, sizeof (cid));
+ cid = GINT64_FROM_LE (cid);
+
+ ok (cid == 9876543210,
+ "Included CID is correct");
+
+ mongo_wire_packet_free (p);
+}
+
+RUN_TEST (7, mongo_wire_cmd_get_more);
diff --git a/tests/unit/mongo/wire/cmd_insert.c b/tests/unit/mongo/wire/cmd_insert.c
new file mode 100644
index 0000000..3e84847
--- /dev/null
+++ b/tests/unit/mongo/wire/cmd_insert.c
@@ -0,0 +1,83 @@
+#include "test.h"
+#include "tap.h"
+#include "mongo-wire.h"
+
+#include <string.h>
+
+void
+test_mongo_wire_cmd_insert (void)
+{
+ bson *ins, *tmp;
+ mongo_packet *p;
+
+ mongo_packet_header hdr;
+ const guint8 *data;
+ gint32 data_size;
+
+ bson_cursor *c;
+ gint32 pos;
+
+ ins = test_bson_generate_full ();
+ tmp = bson_new ();
+
+ ok (mongo_wire_cmd_insert (1, NULL, ins, NULL) == NULL,
+ "mongo_wire_cmd_insert() fails with a NULL namespace");
+ ok (mongo_wire_cmd_insert (1, "test.ns", NULL) == NULL,
+ "mongo_wire_cmd_insert() fails with no documents");
+ ok (mongo_wire_cmd_insert (1, "test.ns", tmp, NULL) == NULL,
+ "mongo_wire_cmd_insert() with an unfinished document");
+ bson_finish (tmp);
+ ok ((p = mongo_wire_cmd_insert (1, "test.ns", ins, tmp, NULL)) != NULL,
+ "mongo_wire_cmd_insert() works");
+ bson_free (ins);
+ bson_free (tmp);
+
+ /* Test basic header data */
+ mongo_wire_packet_get_header (p, &hdr);
+ cmp_ok ((data_size = mongo_wire_packet_get_data (p, &data)), "!=", -1,
+ "Packet data size appears fine");
+
+ cmp_ok (hdr.length, "==", sizeof (mongo_packet_header) + data_size,
+ "Packet header length is correct");
+ cmp_ok (hdr.id, "==", 1, "Header ID is ok");
+ cmp_ok (hdr.resp_to, "==", 0, "Response ID is ok");
+
+ /*
+ * Test the first document
+ */
+
+ /* pos = zero + collection_name + NULL */
+ pos = sizeof (gint32) + strlen ("test.ns") + 1;
+ ok ((ins = bson_new_from_data (data + pos,
+ bson_stream_doc_size (data, pos) - 1)) != NULL,
+ "First document is included");
+ bson_finish (ins);
+
+ ok ((c = bson_find (ins, "int32")) != NULL,
+ "BSON contains 'int32'");
+ cmp_ok (bson_cursor_type (c), "==", BSON_TYPE_INT32,
+ "int32 has correct type");
+ bson_cursor_next (c);
+ cmp_ok (bson_cursor_type (c), "==", BSON_TYPE_INT64,
+ "next element has correct type too");
+ ok (bson_cursor_next (c) == FALSE,
+ "No more data after the update BSON object");
+ bson_cursor_free (c);
+
+ /*
+ * Test the second document
+ */
+ pos += bson_size (ins);
+ ok ((tmp = bson_new_from_data (data + pos,
+ bson_stream_doc_size (data, pos) - 1)) != NULL,
+ "Second document is included");
+ bson_finish (tmp);
+ cmp_ok (bson_size (tmp), "==", 5,
+ "Second document is empty");
+
+ bson_free (ins);
+ bson_free (tmp);
+ mongo_wire_packet_free (p);
+}
+
+RUN_TEST (15, mongo_wire_cmd_insert);
diff --git a/tests/unit/mongo/wire/cmd_insert_n.c b/tests/unit/mongo/wire/cmd_insert_n.c
new file mode 100644
index 0000000..1c00193
--- /dev/null
+++ b/tests/unit/mongo/wire/cmd_insert_n.c
@@ -0,0 +1,95 @@
+#include "test.h"
+#include "tap.h"
+#include "mongo-wire.h"
+
+#include <string.h>
+
+void
+test_mongo_wire_cmd_insert_n (void)
+{
+ bson *ins, *tmp;
+ const bson *docs[10];
+ mongo_packet *p;
+
+ mongo_packet_header hdr;
+ const guint8 *data;
+ gint32 data_size;
+
+ bson_cursor *c;
+ gint32 pos;
+
+ ins = test_bson_generate_full ();
+ tmp = bson_new ();
+
+ docs[0] = ins;
+ docs[1] = tmp;
+ docs[2] = ins;
+ docs[3] = ins;
+ docs[4] = NULL;
+ docs[5] = ins;
+
+ ok (mongo_wire_cmd_insert_n (1, NULL, 1, docs) == NULL,
+ "mongo_wire_cmd_insert_n() fails with a NULL namespace");
+ ok (mongo_wire_cmd_insert_n (1, "test.ns", 1, NULL) == NULL,
+ "mongo_wire_cmd_insert_n() fails with no documents");
+ ok (mongo_wire_cmd_insert_n (1, "test.ns", 0, docs) == NULL,
+ "mongo_wire_cmd_insert_n() fails with no documents");
+ ok (mongo_wire_cmd_insert_n (1, "test.ns", 2, docs) == NULL,
+ "mongo_wire_cmd_insert_n() fails with an unfinished document");
+ bson_finish (tmp);
+ ok (mongo_wire_cmd_insert_n (1, "test.ns", 5, docs) == NULL,
+ "mongo_wire_cmd_insert_n() fails with a NULL document in the array");
+ ok ((p = mongo_wire_cmd_insert_n (1, "test.ns", 3, docs)) != NULL,
+ "mongo_wire_cmd_insert() works");
+ bson_free (ins);
+ bson_free (tmp);
+
+ /* Test basic header data */
+ mongo_wire_packet_get_header (p, &hdr);
+ cmp_ok ((data_size = mongo_wire_packet_get_data (p, &data)), "!=", -1,
+ "Packet data size appears fine");
+
+ cmp_ok (hdr.length, "==", sizeof (mongo_packet_header) + data_size,
+ "Packet header length is correct");
+ cmp_ok (hdr.id, "==", 1, "Header ID is ok");
+ cmp_ok (hdr.resp_to, "==", 0, "Response ID is ok");
+
+ /*
+ * Test the first document
+ */
+
+ /* pos = zero + collection_name + NULL */
+ pos = sizeof (gint32) + strlen ("test.ns") + 1;
+ ok ((ins = bson_new_from_data (data + pos,
+ bson_stream_doc_size (data, pos) - 1)) != NULL,
+ "First document is included");
+ bson_finish (ins);
+
+ ok ((c = bson_find (ins, "int32")) != NULL,
+ "BSON contains 'int32'");
+ cmp_ok (bson_cursor_type (c), "==", BSON_TYPE_INT32,
+ "int32 has correct type");
+ bson_cursor_next (c);
+ cmp_ok (bson_cursor_type (c), "==", BSON_TYPE_INT64,
+ "next element has correct type too");
+ ok (bson_cursor_next (c) == FALSE,
+ "No more data after the update BSON object");
+ bson_cursor_free (c);
+
+ /*
+ * Test the second document
+ */
+ pos += bson_size (ins);
+ ok ((tmp = bson_new_from_data (data + pos,
+ bson_stream_doc_size (data, pos) - 1)) != NULL,
+ "Second document is included");
+ bson_finish (tmp);
+ cmp_ok (bson_size (tmp), "==", 5,
+ "Second document is empty");
+
+ bson_free (ins);
+ bson_free (tmp);
+ mongo_wire_packet_free (p);
+}
+
+RUN_TEST (17, mongo_wire_cmd_insert_n);
diff --git a/tests/unit/mongo/wire/cmd_kill_cursors.c b/tests/unit/mongo/wire/cmd_kill_cursors.c
new file mode 100644
index 0000000..a8a8fd9
--- /dev/null
+++ b/tests/unit/mongo/wire/cmd_kill_cursors.c
@@ -0,0 +1,58 @@
+#include "test.h"
+#include "tap.h"
+#include "mongo-wire.h"
+
+#include <string.h>
+
+void
+test_mongo_wire_cmd_kill_cursors (void)
+{
+ mongo_packet *p;
+
+ mongo_packet_header hdr;
+ const guint8 *data;
+ gint32 data_size;
+
+ gint32 pos, n = 0;
+ gint64 c1 = 9876543210, c2 = 1234567890;
+
+ ok (mongo_wire_cmd_kill_cursors (1, 0) == NULL,
+ "mongo_wire_cmd_kill_cursors() should fail with zero cursors");
+ ok (mongo_wire_cmd_kill_cursors (1, -1) == NULL,
+ "mongo_wire_cmd_kill_cursors() should fail with negative amount of "
+ "cursors");
+ ok ((p = mongo_wire_cmd_kill_cursors (1, 2, c1, c2)) != NULL,
+ "mongo_wire_cmd_kill_cursors() works");
+
+ /* Verify the header */
+ mongo_wire_packet_get_header (p, &hdr);
+ cmp_ok ((data_size = mongo_wire_packet_get_data (p, &data)), "!=", -1,
+ "Packet data size looks fine");
+ cmp_ok (hdr.length, "==", sizeof (mongo_packet_header) + data_size,
+ "Packet header length is OK");
+ cmp_ok (hdr.id, "==", 1, "Packet request ID is ok");
+ cmp_ok (hdr.resp_to, "==", 0, "Packet reply ID is ok");
+
+ /*
+ * Test the request contents
+ */
+ c1 = c2 = 0;
+ /* pos = zero + n */
+ pos = sizeof (gint32) + sizeof (n);
+
+ memcpy (&n, data + sizeof (gint32), sizeof (gint32));
+ memcpy (&c1, data + pos, sizeof (c1));
+ memcpy (&c2, data + pos + sizeof (c1), sizeof (c2));
+
+ n = GINT32_FROM_LE (n);
+ c1 = GINT64_FROM_LE (c1);
+ c2 = GINT64_FROM_LE (c2);
+
+ ok (n == 2, "Number of cursors are OK");
+ ok (c1 == 9876543210, "First cursor is OK");
+ ok (c2 == 1234567890, "Second cursor is OK");
+
+ mongo_wire_packet_free (p);
+}
+
+RUN_TEST (10, mongo_wire_cmd_kill_cursors);
diff --git a/tests/unit/mongo/wire/cmd_query.c b/tests/unit/mongo/wire/cmd_query.c
new file mode 100644
index 0000000..58eb960
--- /dev/null
+++ b/tests/unit/mongo/wire/cmd_query.c
@@ -0,0 +1,117 @@
+#include "test.h"
+#include "tap.h"
+#include "mongo-wire.h"
+
+#include <string.h>
+
+void
+test_mongo_wire_cmd_query (void)
+{
+ bson *q, *s, *tmp;
+ mongo_packet *p;
+
+ mongo_packet_header hdr;
+ const guint8 *data;
+ gint32 data_size;
+
+ bson_cursor *c;
+ gint32 pos;
+
+ q = test_bson_generate_full ();
+ s = bson_new ();
+ bson_append_boolean (s, "_id", TRUE);
+ bson_append_boolean (s, "double", TRUE);
+ bson_finish (s);
+
+ tmp = bson_new ();
+
+ ok (mongo_wire_cmd_query (1, NULL, 0, 0, 0, q, s) == NULL,
+ "mongo_wire_cmd_query() fails whith a NULL namespace");
+ ok (mongo_wire_cmd_query (1, "test.ns", 0, 0, 0, NULL, s) == NULL,
+ "mongo_wire_cmd_query() fails with a NULL query");
+ ok (mongo_wire_cmd_query (1, "test.ns", 0, 0, 0, tmp, s) == NULL,
+ "mongo_wire_cmd_query() fails with an unfinished query");
+ ok (mongo_wire_cmd_query (1, "test.ns", 0, 0, 0, q, tmp) == NULL,
+ "mongo_wire_cmd_query() fails with an unfinished selector");
+ bson_free (tmp);
+
+ ok ((p = mongo_wire_cmd_query (1, "test.ns", 0, 0, 10, q, NULL)) != NULL,
+ "mongo_wire_cmd_query() works with a NULL selector");
+
+ mongo_wire_packet_get_header (p, &hdr);
+ cmp_ok ((data_size = mongo_wire_packet_get_data (p, &data)), "!=", -1,
+ "Packet data size looks fine");
+ cmp_ok (hdr.length, "==", sizeof (mongo_packet_header) + data_size,
+ "Packet header length is OK");
+ cmp_ok (hdr.id, "==", 1, "Packet request ID is ok");
+ cmp_ok (hdr.resp_to, "==", 0, "Packet reply ID is ok");
+
+ /* pos = zero + collection_name + NULL + skip + ret */
+ pos = sizeof (gint32) + strlen ("test.ns") + 1 + sizeof (gint32) * 2;
+ ok ((tmp = bson_new_from_data (data + pos,
+ bson_stream_doc_size (data, pos) - 1)) != NULL,
+ "Packet contains a valid BSON query document");
+ bson_finish (tmp);
+
+ ok ((c = bson_find (tmp, "int32")) != NULL,
+ "BSON contains 'int32'");
+ cmp_ok (bson_cursor_type (c), "==", BSON_TYPE_INT32,
+ "int32 has correct type");
+ bson_cursor_next (c);
+ cmp_ok (bson_cursor_type (c), "==", BSON_TYPE_INT64,
+ "next element has correct type too");
+ ok (bson_cursor_next (c) == FALSE,
+ "No more data after the update BSON object");
+ bson_cursor_free (c);
+
+ cmp_ok (hdr.length, "==", sizeof (mongo_packet_header) + pos +
+ bson_size (q),
+ "Packet header lenght is correct");
+ bson_free (tmp);
+ mongo_wire_packet_free (p);
+
+ /*
+ * Test again with a selector document
+ */
+
+ ok ((p = mongo_wire_cmd_query (1, "test.ns", 0, 0, 10, q, s)) != NULL,
+ "mongo_wire_cmd_query() works with a NULL selector");
+
+ mongo_wire_packet_get_header (p, &hdr);
+ cmp_ok ((data_size = mongo_wire_packet_get_data (p, &data)), "!=", -1,
+ "Packet data size looks fine");
+ cmp_ok (hdr.length, "==", sizeof (mongo_packet_header) + data_size,
+ "Packet header length is OK");
+ cmp_ok (hdr.id, "==", 1, "Packet request ID is ok");
+ cmp_ok (hdr.resp_to, "==", 0, "Packet reply ID is ok");
+
+ /* pos = zero + collection_name + NULL + skip + ret */
+ pos = sizeof (gint32) + strlen ("test.ns") + 1 + sizeof (gint32) * 2;
+ ok ((tmp = bson_new_from_data (data + pos,
+ bson_stream_doc_size (data, pos) - 1)) != NULL,
+ "Packet contains a valid BSON query document");
+ bson_finish (tmp);
+ pos += bson_size (tmp);
+ bson_free (tmp);
+ bson_free (q);
+ bson_free (s);
+
+ ok ((s = bson_new_from_data (data + pos,
+ bson_stream_doc_size (data, pos) - 1)) != NULL,
+ "Packet contains a valid BSON selector document");
+ bson_finish (s);
+
+ ok ((c = bson_find (s, "_id")) != NULL,
+ "BSON contains '_id'");
+ cmp_ok (bson_cursor_type (c), "==", BSON_TYPE_BOOLEAN,
+ "_id has correct type");
+ bson_cursor_next (c);
+ cmp_ok (bson_cursor_type (c), "==", BSON_TYPE_BOOLEAN,
+ "next element has correct type too");
+
+ bson_cursor_free (c);
+ bson_free (s);
+ mongo_wire_packet_free (p);
+}
+
+RUN_TEST (25, mongo_wire_cmd_query);
diff --git a/tests/unit/mongo/wire/cmd_update.c b/tests/unit/mongo/wire/cmd_update.c
new file mode 100644
index 0000000..95a2a50
--- /dev/null
+++ b/tests/unit/mongo/wire/cmd_update.c
@@ -0,0 +1,97 @@
+#include "test.h"
+#include "tap.h"
+#include "mongo-wire.h"
+
+#include <string.h>
+
+void
+test_mongo_wire_cmd_update (void)
+{
+ bson *sel, *upd, *tmp;
+ mongo_packet *p;
+
+ mongo_packet_header hdr;
+ const guint8 *data;
+ gint32 data_size;
+
+ bson_cursor *c;
+ gint32 pos;
+
+ sel = bson_new ();
+ bson_append_null (sel, "_id");
+ bson_finish (sel);
+
+ upd = test_bson_generate_full ();
+
+ ok (mongo_wire_cmd_update (1, NULL, 0, sel, upd) == NULL,
+ "mongo_wire_cmd_update() with a NULL namespace should fail");
+ ok (mongo_wire_cmd_update (1, "test.ns", 0, NULL, upd) == NULL,
+ "mongo_wire_cmd_update() with a NULL selector should fail");
+ ok (mongo_wire_cmd_update (1, "test.ns", 0, sel, NULL) == NULL,
+ "mongo_wire_cmd_update() with a NULL update should fail");
+
+ tmp = bson_new ();
+ ok (mongo_wire_cmd_update (1, "test.ns", 0, tmp, upd) == NULL,
+ "mongo_wire_cmd_update() fails with an unfinished selector");
+ ok (mongo_wire_cmd_update (1, "test.ns", 0, sel, tmp) == NULL,
+ "mongo_wire_cmd_update() fails with an unfinished update");
+ bson_free (tmp);
+
+ ok ((p = mongo_wire_cmd_update (1, "test.ns", 0, sel, upd)) != NULL,
+ "mongo_wire_cmd_update() works");
+
+ bson_free (sel);
+
+ mongo_wire_packet_get_header (p, &hdr);
+ cmp_ok ((data_size = mongo_wire_packet_get_data (p, &data)), "!=", -1,
+ "Packet data size looks fine");
+ cmp_ok (hdr.length, "==", sizeof (mongo_packet_header) + data_size,
+ "Packet header length is OK");
+ cmp_ok (hdr.id, "==", 1, "Packet request ID is ok");
+ cmp_ok (hdr.resp_to, "==", 0, "Packet reply ID is ok");
+
+ /*
+ * Verify the selector object.
+ */
+
+ /* pos = zero + collection_name + NULL + flags */
+ pos = sizeof (gint32) + strlen ("test.ns") + 1 + sizeof (gint32);
+ ok ((sel = bson_new_from_data (data + pos, (gint32)data[pos] - 1)) != NULL,
+ "Packet contains a valid BSON selector document");
+ bson_finish (sel);
+
+ ok ((c = bson_find (sel, "_id")) != NULL,
+ "BSON contains an _id");
+ cmp_ok (bson_cursor_type (c), "==", BSON_TYPE_NULL,
+ "_id has correct type");
+ bson_cursor_free (c);
+ bson_free (sel);
+
+ /*
+ * Verify the update object
+ */
+ pos += (gint32)data[pos];
+ ok ((tmp = bson_new_from_data (data + pos,
+ bson_stream_doc_size (data, pos) - 1)) != NULL,
+ "Packet contains a valid BSON update document");
+ bson_finish (tmp);
+ cmp_ok (bson_size (upd), "==", bson_size (tmp),
+ "Packet's update document has the correct size");
+
+ ok ((c = bson_find (tmp, "int32")) != NULL,
+ "BSON contains 'int32'");
+ cmp_ok (bson_cursor_type (c), "==", BSON_TYPE_INT32,
+ "int32 has correct type");
+ bson_cursor_next (c);
+ cmp_ok (bson_cursor_type (c), "==", BSON_TYPE_INT64,
+ "next element has correct type too");
+ ok (bson_cursor_next (c) == FALSE,
+ "No more data after the update BSON object");
+
+ bson_cursor_free (c);
+ bson_free (tmp);
+ bson_free (upd);
+ mongo_wire_packet_free (p);
+}
+
+RUN_TEST (19, mongo_wire_cmd_update);
diff --git a/tests/unit/mongo/wire/packet_get_set_data.c b/tests/unit/mongo/wire/packet_get_set_data.c
new file mode 100644
index 0000000..2f06b8f
--- /dev/null
+++ b/tests/unit/mongo/wire/packet_get_set_data.c
@@ -0,0 +1,65 @@
+#include "tap.h"
+#include "test.h"
+#include "mongo-wire.h"
+
+#include <string.h>
+
+void
+test_mongo_wire_packet_get_set_data (void)
+{
+ mongo_packet *p;
+ mongo_packet_header h;
+ guint8 data[32768];
+ const guint8 *idata;
+
+ p = mongo_wire_packet_new ();
+ memset (data, 'x', sizeof (data));
+
+ ok (mongo_wire_packet_get_data (NULL, &idata) == -1,
+ "mongo_wire_packet_get_data() with a NULL packet should fail");
+ ok (mongo_wire_packet_get_data (p, NULL) == -1,
+ "mongo_wire_packet_get_data() with NULL destination should fail");
+ ok (mongo_wire_packet_get_data (p, &idata) == -1,
+ "mongo_wire_packet_get_data() with an empty packet should fail");
+ ok (mongo_wire_packet_set_data (NULL, (const guint8 *)&data,
+ sizeof (data)) == FALSE,
+ "mongo_wire_packet_set_data() with a NULL packet should fail");
+ ok (mongo_wire_packet_set_data (p, NULL, sizeof (data)) == FALSE,
+ "mongo_wire_packet_set_data() with NULL data should fail");
+ ok (mongo_wire_packet_set_data (p, (const guint8 *)&data, 0) == FALSE,
+ "mongo_wire_packet_set_data() with zero size should fail");
+ ok (mongo_wire_packet_set_data (p, (const guint8 *)&data, -1) == FALSE,
+ "mongo_wire_packet_set_data() with negative size should fail");
+
+ ok (mongo_wire_packet_set_data (p, (const guint8 *)&data,
+ sizeof (data)),
+ "mongo_wire_packet_set_data() works");
+ cmp_ok (mongo_wire_packet_get_data (p, &idata), "==", sizeof (data),
+ "mongo_wire_packet_get_data() works");
+
+ mongo_wire_packet_get_header (p, &h);
+
+ cmp_ok (h.length, "==", sizeof (data) + sizeof (mongo_packet_header),
+ "Packet length is updated properly");
+ ok (memcmp (data, idata, sizeof (data)) == 0,
+ "Setting & retrieving data works");
+
+ memset (data, 'a', sizeof (data));
+
+ ok (mongo_wire_packet_set_data (p, (const guint8 *)&data,
+ sizeof (data) / 2),
+ "Re-setting the data works");
+ cmp_ok (mongo_wire_packet_get_data (p, &idata), "==", sizeof (data) / 2,
+ "Retrieving the data works still");
+
+ mongo_wire_packet_get_header (p, &h);
+
+ cmp_ok (h.length, "==", sizeof (data) / 2 + sizeof (mongo_packet_header),
+ "Packet length is updated properly");
+ ok (memcmp (data, idata, sizeof (data) / 2) == 0,
+ "Setting & retrieving data works");
+
+ mongo_wire_packet_free (p);
+}
+
+RUN_TEST (15, mongo_wire_packet_get_set_data);
diff --git a/tests/unit/mongo/wire/packet_get_set_header.c b/tests/unit/mongo/wire/packet_get_set_header.c
new file mode 100644
index 0000000..38e0ea7
--- /dev/null
+++ b/tests/unit/mongo/wire/packet_get_set_header.c
@@ -0,0 +1,58 @@
+#include "tap.h"
+#include "test.h"
+#include "mongo-wire.h"
+
+#include <string.h>
+
+void
+test_mongo_wire_packet_get_set_header (void)
+{
+ mongo_packet *p;
+ mongo_packet_header ph1, ph2;
+
+ p = mongo_wire_packet_new ();
+
+ ok (mongo_wire_packet_get_header (NULL, &ph2) == FALSE,
+ "mongo_wire_packet_get_header() should fail with a NULL packet");
+ ok (mongo_wire_packet_get_header (p, NULL) == FALSE,
+ "mongo_wire_packet_get_header() should fail with a NULL header");
+ ok (mongo_wire_packet_set_header (NULL, &ph1) == FALSE,
+ "mongo_wire_packet_set_header() should fail with a NULL packet");
+ ok (mongo_wire_packet_set_header (p, NULL) == FALSE,
+ "mongo_wire_packet_set_header() should fail with a NULL header");
+
+ ok (mongo_wire_packet_get_header (p, &ph2),
+ "mongo_wire_packet_get_header() works on a fresh packet");
+ cmp_ok (ph2.length, "==", sizeof (mongo_packet_header),
+ "Initial packet length is the length of the header");
+
+ ph1.length = sizeof (mongo_packet_header);
+ ph1.id = 1;
+ ph1.resp_to = 0;
+ ph1.opcode = 1000;
+
+ memset (&ph2, 0, sizeof (mongo_packet_header));
+
+ ok (mongo_wire_packet_set_header (p, &ph1),
+ "mongo_wire_packet_set_header() works");
+ ok (mongo_wire_packet_get_header (p, &ph2),
+ "mongo_wire_packet_get_header() works");
+
+ cmp_ok (ph1.length, "==", ph2.length,
+ "Packet lengths match");
+ cmp_ok (ph1.id, "==", ph2.id,
+ "Sequence IDs match");
+ cmp_ok (ph1.resp_to, "==", ph2.resp_to,
+ "Response IDs match");
+ cmp_ok (ph1.opcode, "==", ph2.opcode,
+ "OPCodes match");
+
+ ph1.length = GINT32_TO_LE (1);
+ ok (mongo_wire_packet_set_header (p, &ph1) == FALSE,
+ "Setting a packet with length shorter than the header "
+ "returns an error");
+
+ mongo_wire_packet_free (p);
+}
+
+RUN_TEST (13, mongo_wire_packet_get_set_header);
diff --git a/tests/unit/mongo/wire/packet_get_set_header_raw.c b/tests/unit/mongo/wire/packet_get_set_header_raw.c
new file mode 100644
index 0000000..d97a8b3
--- /dev/null
+++ b/tests/unit/mongo/wire/packet_get_set_header_raw.c
@@ -0,0 +1,56 @@
+#include "tap.h"
+#include "test.h"
+#include "mongo-wire.h"
+
+#include "libmongo-private.h"
+
+#include <string.h>
+
+void
+test_mongo_wire_packet_get_set_header_raw (void)
+{
+ mongo_packet *p;
+ mongo_packet_header ph1, ph2;
+
+ p = mongo_wire_packet_new ();
+
+ ok (mongo_wire_packet_get_header_raw (NULL, &ph2) == FALSE,
+ "mongo_wire_packet_get_header_raw() should fail with a NULL packet");
+ ok (mongo_wire_packet_get_header_raw (p, NULL) == FALSE,
+ "mongo_wire_packet_get_header_raw() should fail with a NULL header");
+ ok (mongo_wire_packet_set_header_raw (NULL, &ph1) == FALSE,
+ "mongo_wire_packet_set_header_raw() should fail with a NULL packet");
+ ok (mongo_wire_packet_set_header_raw (p, NULL) == FALSE,
+ "mongo_wire_packet_set_header_raw() should fail with a NULL header");
+
+ ok (mongo_wire_packet_get_header_raw (p, &ph2),
+ "mongo_wire_packet_get_header_raw() works on a fresh packet");
+ /* Need to convert from LE, because _new() sets the length to LE. */
+ cmp_ok (GINT32_FROM_LE (ph2.length), "==", sizeof (mongo_packet_header),
+ "Initial packet length is the length of the header");
+
+ ph1.length = sizeof (mongo_packet_header);
+ ph1.id = 1;
+ ph1.resp_to = 0;
+ ph1.opcode = 1000;
+
+ memset (&ph2, 0, sizeof (mongo_packet_header));
+
+ ok (mongo_wire_packet_set_header_raw (p, &ph1),
+ "mongo_wire_packet_set_header_raw() works");
+ ok (mongo_wire_packet_get_header_raw (p, &ph2),
+ "mongo_wire_packet_get_header_raw() works");
+
+ cmp_ok (ph1.length, "==", ph2.length,
+ "Packet lengths match");
+ cmp_ok (ph1.id, "==", ph2.id,
+ "Sequence IDs match");
+ cmp_ok (ph1.resp_to, "==", ph2.resp_to,
+ "Response IDs match");
+ cmp_ok (ph1.opcode, "==", ph2.opcode,
+ "OPCodes match");
+
+ mongo_wire_packet_free (p);
+}
+
+RUN_TEST (12, mongo_wire_packet_get_set_header_raw);
diff --git a/tests/unit/mongo/wire/packet_new.c b/tests/unit/mongo/wire/packet_new.c
new file mode 100644
index 0000000..4940542
--- /dev/null
+++ b/tests/unit/mongo/wire/packet_new.c
@@ -0,0 +1,20 @@
+#include "tap.h"
+#include "test.h"
+#include "mongo-wire.h"
+
+#include <string.h>
+
+void
+test_mongo_wire_packet_new (void)
+{
+ mongo_packet *p;
+
+ ok ((p = mongo_wire_packet_new ()) != NULL,
+ "mongo_wire_packet_new() works");
+ mongo_wire_packet_free (NULL);
+ pass ("mongo_wire_packet_free(NULL) works");
+ mongo_wire_packet_free (p);
+ pass ("mongo_wire_packet_free() works");
+}
+
+RUN_TEST (3, mongo_wire_packet_new);
diff --git a/tests/unit/mongo/wire/reply_packet_get_data.c b/tests/unit/mongo/wire/reply_packet_get_data.c
new file mode 100644
index 0000000..e22f142
--- /dev/null
+++ b/tests/unit/mongo/wire/reply_packet_get_data.c
@@ -0,0 +1,52 @@
+#include "test.h"
+#include "tap.h"
+#include "bson.h"
+#include "mongo-wire.h"
+
+#include <string.h>
+
+void
+test_mongo_wire_reply_packet_get_data (void)
+{
+ mongo_packet *p;
+ mongo_packet_header h;
+ const guint8 *data;
+ bson *b;
+
+ p = mongo_wire_packet_new ();
+ memset (&h, 0, sizeof (mongo_packet_header));
+ h.opcode = 0;
+ h.length = sizeof (mongo_packet_header);
+ mongo_wire_packet_set_header (p, &h);
+
+ ok (mongo_wire_reply_packet_get_data (NULL, &data) == FALSE,
+ "mongo_wire_reply_packet_get_data() fails with a NULL packet");
+ ok (mongo_wire_reply_packet_get_data (p, NULL) == FALSE,
+ "mongo_wire_reply_packet_get_data() fails with a NULL destination");
+ ok (mongo_wire_reply_packet_get_data (p, &data) == FALSE,
+ "mongo_wire_reply_packet_get_data() fails with a non-reply packet");
+
+ h.opcode = 1;
+ mongo_wire_packet_set_header (p, &h);
+
+ ok (mongo_wire_reply_packet_get_data (p, &data) == FALSE,
+ "mongo_wire_reply_packet_get_data() fails if the packet has "
+ "no data");
+
+ mongo_wire_packet_free (p);
+
+ p = test_mongo_wire_generate_reply (TRUE, 2, TRUE);
+
+ ok (mongo_wire_reply_packet_get_data (p, &data),
+ "mongo_wire_reply_packet_get_data() works");
+
+ b = test_bson_generate_full ();
+
+ ok (memcmp (data, bson_data (b), bson_size (b)) == 0,
+ "The returned data is correct");
+
+ bson_free (b);
+ mongo_wire_packet_free (p);
+}
+
+RUN_TEST (6, mongo_wire_reply_packet_get_data);
diff --git a/tests/unit/mongo/wire/reply_packet_get_header.c b/tests/unit/mongo/wire/reply_packet_get_header.c
new file mode 100644
index 0000000..36b548c
--- /dev/null
+++ b/tests/unit/mongo/wire/reply_packet_get_header.c
@@ -0,0 +1,54 @@
+#include "test.h"
+#include "tap.h"
+#include "mongo-wire.h"
+
+#include <string.h>
+
+void
+test_mongo_wire_reply_packet_get_header (void)
+{
+ mongo_packet *p;
+ mongo_packet_header h;
+ mongo_reply_packet_header rh;
+
+ p = mongo_wire_packet_new ();
+ memset (&h, 0, sizeof (mongo_packet_header));
+ h.opcode = 1;
+ h.length = sizeof (mongo_packet_header);
+
+ mongo_wire_packet_set_header (p, &h);
+
+ ok (mongo_wire_reply_packet_get_header (NULL, &rh) == FALSE,
+ "mongo_wire_reply_packet_get_header() fails with a NULL packet");
+ ok (mongo_wire_reply_packet_get_header (p, NULL) == FALSE,
+ "mongo_wire_reply_packet_get_header() fails with a NULL header");
+
+ ok (mongo_wire_reply_packet_get_header (p, &rh) == FALSE,
+ "mongo_wire_reply_packet_get_header() fails if the packet has "
+ "no reply header");
+
+ h.opcode = 2;
+ mongo_wire_packet_set_header (p, &h);
+ ok (mongo_wire_reply_packet_get_header (p, &rh) == FALSE,
+ "mongo_wire_reply_packet_get_header() fails if the packet is "
+ "not a reply packet");
+
+ mongo_wire_packet_free (p);
+
+ p = test_mongo_wire_generate_reply (TRUE, 0, FALSE);
+
+ ok (mongo_wire_reply_packet_get_header (p, &rh),
+ "mongo_wire_reply_packet_get_header() works");
+ cmp_ok (rh.flags, "==", 0,
+ "Reply flags are correct");
+ ok (rh.cursor_id == (gint64)12345,
+ "Cursor ID is correct");
+ cmp_ok (rh.start, "==", 0,
+ "Reply start document is OK");
+ cmp_ok (rh.returned, "==", 0,
+ "Number of documents returned is OK");
+
+ mongo_wire_packet_free (p);
+}
+
+RUN_TEST (9, mongo_wire_reply_packet_get_header);
diff --git a/tests/unit/mongo/wire/reply_packet_get_nth_document.c b/tests/unit/mongo/wire/reply_packet_get_nth_document.c
new file mode 100644
index 0000000..68d9fed
--- /dev/null
+++ b/tests/unit/mongo/wire/reply_packet_get_nth_document.c
@@ -0,0 +1,68 @@
+#include "test.h"
+#include "tap.h"
+#include "mongo-wire.h"
+#include "bson.h"
+
+#include <string.h>
+
+void
+test_mongo_wire_reply_packet_get_nth_document (void)
+{
+ mongo_packet *p;
+ bson *b, *doc;
+ mongo_packet_header h;
+
+ p = mongo_wire_packet_new ();
+ memset (&h, 0, sizeof (mongo_packet_header));
+ h.opcode = 2;
+ h.length = sizeof (mongo_packet_header);
+ mongo_wire_packet_set_header (p, &h);
+
+ ok (mongo_wire_reply_packet_get_nth_document (NULL, 1, &doc) == FALSE,
+ "mongo_wire_reply_packet_get_nth_document() fails with a NULL packet");
+ ok (mongo_wire_reply_packet_get_nth_document (p, 0, &doc) == FALSE,
+ "mongo_wire_reply_packet_get_nth_document() fails with n = 0");
+ ok (mongo_wire_reply_packet_get_nth_document (p, -42, &doc) == FALSE,
+ "mongo_wire_reply_packet_get_nth_document() fails with n < 0");
+ ok (mongo_wire_reply_packet_get_nth_document (p, 1, NULL) == FALSE,
+ "mongo_wire_reply_packet_get_nth_document() fails with a NULL "
+ "destination");
+
+ ok (mongo_wire_reply_packet_get_nth_document (p, 1, &doc) == FALSE,
+ "mongo_wire_reply_packet_get_nth_document() fails with a "
+ "non-reply packet");
+
+ h.opcode = 1;
+ mongo_wire_packet_set_header (p, &h);
+
+ ok (mongo_wire_reply_packet_get_nth_document (p, 1, &doc) == FALSE,
+ "mongo_wire_reply_packet_get_nth_document() fails with an "
+ "incomplete reply packet");
+
+ mongo_wire_packet_free (p);
+
+ p = test_mongo_wire_generate_reply (TRUE, 0, FALSE);
+ ok (mongo_wire_reply_packet_get_nth_document (p, 1, &doc) == FALSE,
+ "mongo_wire_reply_packet_get_nth_document() fails if there are "
+ "no documents to return");
+ mongo_wire_packet_free (p);
+
+ p = test_mongo_wire_generate_reply (TRUE, 2, TRUE);
+ ok (mongo_wire_reply_packet_get_nth_document (p, 2, &doc),
+ "mongo_wire_reply_packet_get_nth_document() works");
+ b = test_bson_generate_full ();
+ bson_finish (doc);
+
+ ok (memcmp (bson_data (b), bson_data (doc), bson_size (doc)) == 0,
+ "Returned document is correct");
+ bson_free (doc);
+ bson_free (b);
+
+ ok (mongo_wire_reply_packet_get_nth_document (p, 3, &doc) == FALSE,
+ "mongo_wire_reply_packet_get_nth_document() fails if the requested "
+ "document does not exist");
+
+ mongo_wire_packet_free (p);
+}
+
+RUN_TEST (10, mongo_wire_reply_packet_get_nth_document);