summaryrefslogtreecommitdiff
path: root/tests/func
diff options
context:
space:
mode:
Diffstat (limited to 'tests/func')
-rw-r--r--tests/func/bson/f_weird_types.c71
-rw-r--r--tests/func/bson/huge_doc.c51
-rw-r--r--tests/func/mongo/client/f_client_big_packet.c57
-rw-r--r--tests/func/mongo/sync-cursor/f_sync_cursor_iterate.c88
-rw-r--r--tests/func/mongo/sync-cursor/f_sync_cursor_tailable.c115
-rw-r--r--tests/func/mongo/sync-gridfs-chunk/f_sync_gridfs_chunk.c499
-rw-r--r--tests/func/mongo/sync-gridfs-stream/f_sync_gridfs_stream.c501
-rw-r--r--tests/func/mongo/sync-pool/f_sync_pool.c169
-rw-r--r--tests/func/mongo/sync/f_sync_auto_reauth.c58
-rw-r--r--tests/func/mongo/sync/f_sync_auto_reconnect.c61
-rw-r--r--tests/func/mongo/sync/f_sync_auto_reconnect_cache.c107
-rw-r--r--tests/func/mongo/sync/f_sync_conn_seed_add.c58
-rw-r--r--tests/func/mongo/sync/f_sync_invalid_getlasterror.c27
-rw-r--r--tests/func/mongo/sync/f_sync_max_insert_size.c69
-rw-r--r--tests/func/mongo/sync/f_sync_oidtest.c44
-rw-r--r--tests/func/mongo/sync/f_sync_safe_mode.c112
-rw-r--r--tests/func/mongo/sync/f_sync_safe_mode_cache.c131
-rw-r--r--tests/func/mongo/sync/f_sync_write_error.c52
18 files changed, 2270 insertions, 0 deletions
diff --git a/tests/func/bson/f_weird_types.c b/tests/func/bson/f_weird_types.c
new file mode 100644
index 0000000..100db8c
--- /dev/null
+++ b/tests/func/bson/f_weird_types.c
@@ -0,0 +1,71 @@
+#include "bson.h"
+#include "tap.h"
+#include "test.h"
+
+#include "libmongo-private.h"
+
+#include <string.h>
+
+static void
+test_func_weird_types (void)
+{
+ bson *b;
+ bson_cursor *c;
+ guint8 type = BSON_TYPE_DBPOINTER;
+ gint32 slen;
+
+ b = bson_new ();
+ bson_append_int32 (b, "int32", 42);
+
+ /* Append weird stuff */
+ b->data = g_byte_array_append (b->data, (const guint8 *)&type, sizeof (type));
+ b->data = g_byte_array_append (b->data, (const guint8 *)"dbpointer",
+ strlen ("dbpointer") + 1);
+ slen = GINT32_TO_LE (strlen ("refname") + 1);
+ b->data = g_byte_array_append (b->data, (const guint8 *)&slen, sizeof (gint32));
+ b->data = g_byte_array_append (b->data, (const guint8 *)"refname",
+ strlen ("refname") + 1);
+ b->data = g_byte_array_append (b->data, (const guint8 *)"0123456789ABCDEF",
+ 12);
+
+ bson_append_boolean (b, "Here be dragons?", TRUE);
+ bson_finish (b);
+
+ c = bson_find (b, "Here be dragons?");
+ ok (c != NULL,
+ "bson_find() can find elements past unsupported BSON types");
+ bson_cursor_free (c);
+ bson_free (b);
+
+ /* Now do it again, but append a type we can't iterate over */
+ b = bson_new ();
+ bson_append_int32 (b, "int32", 42);
+
+ /* Append BSON_TYPE_NONE */
+ type = BSON_TYPE_NONE;
+ b->data = g_byte_array_append (b->data, (const guint8 *)&type, sizeof (type));
+ b->data = g_byte_array_append (b->data, (const guint8 *)"dbpointer",
+ strlen ("dbpointer") + 1);
+ b->data = g_byte_array_append (b->data, (const guint8 *)"0123456789ABCDEF",
+ 12);
+
+ bson_append_boolean (b, "Here be dragons?", TRUE);
+ bson_finish (b);
+
+ c = bson_find (b, "Here be dragons?");
+ ok (c == NULL,
+ "bson_find() should bail out when encountering an invalid element.");
+ bson_cursor_free (c);
+
+ c = bson_cursor_new (b);
+ bson_cursor_next (c); /* This will find the first element, and
+ position us there. */
+ bson_cursor_next (c); /* This positions after the first element. */
+ ok (bson_cursor_next (c) == FALSE,
+ "bson_cursor_next() should bail out when encountering an invalid element.");
+ bson_cursor_free (c);
+
+ bson_free (b);
+}
+
+RUN_TEST (3, func_weird_types);
diff --git a/tests/func/bson/huge_doc.c b/tests/func/bson/huge_doc.c
new file mode 100644
index 0000000..d5daafe
--- /dev/null
+++ b/tests/func/bson/huge_doc.c
@@ -0,0 +1,51 @@
+#include "bson.h"
+#include "tap.h"
+#include "test.h"
+
+#ifndef HUGE_DOC_SIZE
+#define HUGE_DOC_SIZE (1024 * 1024)
+#endif
+
+#include <string.h>
+
+static void
+test_bson_huge_doc (void)
+{
+ bson *b, *s;
+ bson_cursor *c;
+ gchar *buffer;
+ gint32 ds1;
+
+ buffer = (gchar *)g_malloc (HUGE_DOC_SIZE);
+ memset (buffer, 'a', HUGE_DOC_SIZE);
+ buffer[HUGE_DOC_SIZE - 1] = '\0';
+
+ b = bson_new ();
+ bson_append_int32 (b, "preamble", 1);
+ bson_append_string (b, "huge", buffer, -1);
+ bson_append_int32 (b, "post", 1234);
+ bson_finish (b);
+ ds1 = bson_size (b);
+
+ g_free (buffer);
+
+ s = bson_new ();
+ bson_append_document (s, "hugedoc", b);
+ bson_finish (s);
+ bson_free (b);
+
+ cmp_ok (bson_size (s), ">", ds1,
+ "Document embedding another huge one, has bigger size");
+
+ c = bson_find (s, "hugedoc");
+ bson_cursor_get_document (c, &b);
+
+ cmp_ok (bson_size (b), "==", ds1,
+ "The embedded document has the correct, huge size");
+
+ bson_cursor_free (c);
+ bson_free (s);
+ bson_free (b);
+}
+
+RUN_TEST (2, bson_huge_doc);
diff --git a/tests/func/mongo/client/f_client_big_packet.c b/tests/func/mongo/client/f_client_big_packet.c
new file mode 100644
index 0000000..38176ff
--- /dev/null
+++ b/tests/func/mongo/client/f_client_big_packet.c
@@ -0,0 +1,57 @@
+#include "test.h"
+#include "mongo.h"
+
+#define BIG_PACKET_SIZE 2 * 1024 * 1024
+
+void
+test_func_client_big_packet (void)
+{
+ mongo_connection *conn;
+ mongo_packet *p;
+
+ guint8 *data;
+ bson *b;
+ gint32 exp_size;
+
+ conn = mongo_connect (config.primary_host, config.primary_port);
+
+ b = bson_new_sized (BIG_PACKET_SIZE + 1024);
+ data = g_malloc (BIG_PACKET_SIZE);
+ memset (data, 'z', BIG_PACKET_SIZE);
+ bson_append_boolean (b, "big_packet_size", TRUE);
+ bson_append_binary (b, "bighead", BSON_BINARY_SUBTYPE_GENERIC,
+ data, BIG_PACKET_SIZE);
+ bson_finish (b);
+ exp_size = bson_size (b);
+
+ p = mongo_wire_cmd_insert (1, config.ns, b, NULL);
+ mongo_packet_send (conn, p);
+ bson_free (b);
+ mongo_wire_packet_free (p);
+
+ b = bson_new ();
+ bson_append_boolean (b, "big_packet_size", TRUE);
+ bson_finish (b);
+
+ p = mongo_wire_cmd_query (2, config.ns, 0, 0, 1, b, NULL);
+ mongo_packet_send (conn, p);
+ mongo_wire_packet_free (p);
+ bson_free (b);
+
+ p = mongo_packet_recv (conn);
+ ok (p != NULL,
+ "mongo_packet_recv() works with a huge packet");
+
+ mongo_wire_reply_packet_get_nth_document (p, 1, &b);
+ bson_finish (b);
+ mongo_wire_packet_free (p);
+
+ cmp_ok (exp_size + 17, "==", bson_size (b), /* +17: _id + value */
+ "Huge packet receiving works, and returns a same sized packet");
+
+ bson_free (b);
+
+ mongo_disconnect (conn);
+}
+
+RUN_NET_TEST (2, func_client_big_packet);
diff --git a/tests/func/mongo/sync-cursor/f_sync_cursor_iterate.c b/tests/func/mongo/sync-cursor/f_sync_cursor_iterate.c
new file mode 100644
index 0000000..56ccb77
--- /dev/null
+++ b/tests/func/mongo/sync-cursor/f_sync_cursor_iterate.c
@@ -0,0 +1,88 @@
+#include "test.h"
+#include <mongo.h>
+
+#include <errno.h>
+#include <string.h>
+
+void
+test_func_mongo_sync_cursor_iterate (void)
+{
+ mongo_sync_connection *conn;
+ bson *query, *result;
+ mongo_sync_cursor *sc;
+ bson_cursor *c;
+ gint i;
+ gint32 first_i32 = -1, last_i32 = -1, current_i32 = -1;
+ gboolean early_break = FALSE, continous = TRUE;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+
+ for (i = 0; i < 10; i++)
+ {
+ bson *data = bson_new ();
+ bson_append_boolean (data, "f_sync_cursor_iterate", TRUE);
+ bson_append_int32 (data, "i32", 42 * 100 + i);
+ bson_finish (data);
+
+ mongo_sync_cmd_insert (conn, config.ns, data, NULL);
+ bson_free (data);
+ }
+
+ query = bson_new ();
+ bson_append_boolean (query, "f_sync_cursor_iterate", TRUE);
+ bson_finish (query);
+
+ sc = mongo_sync_cursor_new (conn, config.ns,
+ mongo_sync_cmd_query (conn, config.ns, 0, 0, 3,
+ query, NULL));
+ bson_free (query);
+
+ ok (sc != NULL,
+ "mongo_sync_cursor_new() works");
+
+ result = mongo_sync_cursor_get_data (sc);
+ ok (result == NULL,
+ "mongo_sync_cursor_get_data() should fail without _cursor_next()");
+
+ i = 0;
+ while (mongo_sync_cursor_next (sc) && i < 10)
+ {
+ result = mongo_sync_cursor_get_data (sc);
+
+ if (!result)
+ {
+ early_break = TRUE;
+ break;
+ }
+ i++;
+ c = bson_find (result, "i32");
+ bson_cursor_get_int32 (c, &current_i32);
+ bson_cursor_free (c);
+ bson_free (result);
+
+ if (first_i32 == -1)
+ {
+ first_i32 = current_i32;
+ last_i32 = first_i32 - 1;
+ }
+
+ if (current_i32 != last_i32 + 1)
+ continous = FALSE;
+ last_i32 = current_i32;
+ }
+
+ ok (early_break == FALSE,
+ "mongo_sync_cursor_next() can iterate over the whole stuff");
+ ok (continous == TRUE,
+ "mongo_sync_cursor_next() iterates over all elements");
+
+ cmp_ok (first_i32, "!=", last_i32,
+ "Iteration returns different elements, as expected");
+ cmp_ok (i, ">=", 10,
+ "Iteration really does return all documents");
+
+ mongo_sync_cursor_free (sc);
+ mongo_sync_disconnect (conn);
+}
+
+RUN_NET_TEST (6, func_mongo_sync_cursor_iterate);
diff --git a/tests/func/mongo/sync-cursor/f_sync_cursor_tailable.c b/tests/func/mongo/sync-cursor/f_sync_cursor_tailable.c
new file mode 100644
index 0000000..c200ed8
--- /dev/null
+++ b/tests/func/mongo/sync-cursor/f_sync_cursor_tailable.c
@@ -0,0 +1,115 @@
+#include "test.h"
+#include <mongo.h>
+
+#include <errno.h>
+#include <string.h>
+
+void
+test_func_mongo_sync_cursor_tailable (void)
+{
+ mongo_sync_connection *conn;
+ bson *query, *data;
+ mongo_sync_cursor *sc, *tc;
+ mongo_packet *p;
+ gint i;
+ gchar *capped_ns, *capped_coll;
+
+ bson_cursor *c;
+ gboolean tailed = FALSE;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+
+ query = bson_new ();
+ bson_finish (query);
+
+ p = mongo_sync_cmd_query (conn, config.ns,
+ MONGO_WIRE_FLAG_QUERY_TAILABLE_CURSOR |
+ MONGO_WIRE_FLAG_QUERY_NO_CURSOR_TIMEOUT,
+ 0, 3, query, NULL);
+ ok (p == NULL,
+ "Tailable cursors should not work on non-capped collections");
+
+ capped_coll = g_strconcat (config.coll, ".capped", NULL);
+ capped_ns = g_strconcat (config.ns, ".capped", NULL);
+
+ query = bson_build (BSON_TYPE_STRING, "create", capped_coll, -1,
+ BSON_TYPE_BOOLEAN, "capped", TRUE,
+ BSON_TYPE_INT32, "size", 64 * 1024 * 10,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+
+ mongo_sync_cmd_drop (conn, config.db, capped_coll);
+ p = mongo_sync_cmd_custom (conn, config.db, query);
+ bson_free (query);
+
+ ok (p != NULL,
+ "Creating a capped collection works");
+ mongo_wire_packet_free (p);
+
+ for (i = 0; i < 10; i++)
+ {
+ data = bson_new ();
+ bson_append_boolean (data, "f_sync_cursor_tailable", TRUE);
+ bson_append_int32 (data, "i32", 42 * 1000 + i);
+ bson_finish (data);
+
+ mongo_sync_cmd_insert (conn, capped_ns, data, NULL);
+ bson_free (data);
+ }
+
+ query = bson_new ();
+ bson_append_boolean (query, "f_sync_cursor_tailable", TRUE);
+ bson_finish (query);
+
+ tc = mongo_sync_cursor_new (conn, capped_ns,
+ mongo_sync_cmd_query (conn, capped_ns,
+ MONGO_WIRE_FLAG_QUERY_TAILABLE_CURSOR |
+ MONGO_WIRE_FLAG_QUERY_NO_CURSOR_TIMEOUT,
+ 0, 3, query, NULL));
+
+ sc = mongo_sync_cursor_new (conn, capped_ns,
+ mongo_sync_cmd_query (conn, capped_ns,
+ 0,
+ 0, 3, query, NULL));
+
+ bson_free (query);
+
+ /* Exhaust both queries */
+ for (i = 0; i < 10; i++)
+ {
+ mongo_sync_cursor_next (tc);
+ mongo_sync_cursor_next (sc);
+ }
+
+ data = bson_new ();
+ bson_append_boolean (data, "f_sync_cursor_tailable", TRUE);
+ bson_append_boolean (data, "tailed", TRUE);
+ bson_finish (data);
+
+ mongo_sync_cmd_insert (conn, capped_ns, data, NULL);
+ bson_free (data);
+
+ ok (mongo_sync_cursor_next (tc) == TRUE,
+ "mongo_sync_cursor_next() works after a tailable cursor got new data");
+ ok (mongo_sync_cursor_next (sc) == FALSE,
+ "mongo_sync_cursor_next() fails on a non-tailable cursor");
+
+ data = mongo_sync_cursor_get_data (tc);
+ ok (data != NULL,
+ "mongo_sync_cursor_get_data() works on a tailable cursor");
+ c = bson_find (data, "tailed");
+ bson_cursor_get_boolean (c, &tailed);
+ ok (tailed == TRUE,
+ "We got the appropriate data back!");
+ bson_cursor_free (c);
+
+ mongo_sync_cursor_free (sc);
+ mongo_sync_cursor_free (tc);
+
+ mongo_sync_cmd_drop (conn, config.db, capped_coll);
+ g_free (capped_ns);
+ g_free (capped_coll);
+ mongo_sync_disconnect (conn);
+}
+
+RUN_NET_TEST (6, func_mongo_sync_cursor_tailable);
diff --git a/tests/func/mongo/sync-gridfs-chunk/f_sync_gridfs_chunk.c b/tests/func/mongo/sync-gridfs-chunk/f_sync_gridfs_chunk.c
new file mode 100644
index 0000000..cac6e28
--- /dev/null
+++ b/tests/func/mongo/sync-gridfs-chunk/f_sync_gridfs_chunk.c
@@ -0,0 +1,499 @@
+#include "test.h"
+#include "mongo.h"
+
+#define FILE_SIZE 1024 * 1024 + 12345
+
+static guint8 noname_oid[12];
+static guint8 named_oid[12];
+static guint8 binsub_oid[12];
+
+void
+test_func_sync_gridfs_put (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_chunked_file *gfile;
+ bson *meta;
+ guint8 *data, *oid;
+ gchar *oid_s;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+ oid = mongo_util_oid_new (1);
+ meta = bson_build (BSON_TYPE_STRING, "filename", "libmongo-test", -1,
+ BSON_TYPE_OID, "_id", oid,
+ BSON_TYPE_NONE);
+ g_free (oid);
+ bson_finish (meta);
+
+ data = g_malloc (FILE_SIZE);
+ memset (data, 'x', FILE_SIZE);
+
+ gfile = mongo_sync_gridfs_chunked_file_new_from_buffer (gfs, meta,
+ data, FILE_SIZE);
+ ok (gfile != NULL,
+ "GridFS file upload (with metadata) works!");
+ memcpy (named_oid, mongo_sync_gridfs_file_get_id (gfile), 12);
+ oid_s = mongo_util_oid_as_string (named_oid);
+ note ("Named file ID : %s\n", oid_s);
+ g_free (oid_s);
+ mongo_sync_gridfs_chunked_file_free (gfile);
+
+ gfile = mongo_sync_gridfs_chunked_file_new_from_buffer (gfs, NULL,
+ data, FILE_SIZE);
+ ok (gfile != NULL,
+ "GridFS file upload (w/o metadata) works!");
+ memcpy (noname_oid, mongo_sync_gridfs_file_get_id (gfile), 12);
+ oid_s = mongo_util_oid_as_string (noname_oid);
+ note ("Noname file ID: %s\n", oid_s);
+ g_free (oid_s);
+ mongo_sync_gridfs_chunked_file_free (gfile);
+
+ g_free (data);
+ bson_free (meta);
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_put_invalid (void)
+{
+ mongo_sync_connection *conn;
+ bson *meta;
+ gchar *ns;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ ns = g_strconcat (config.gfs_prefix, ".files", NULL);
+
+ /* Insert metadata without any of the required fields but ID. */
+ meta = bson_build (BSON_TYPE_STRING, "my-id", "id-only", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ mongo_sync_cmd_insert (conn, ns, meta, NULL);
+ bson_free (meta);
+
+ /* Insert metadata with an ID that's not an ObjectID. */
+ meta = bson_build (BSON_TYPE_STRING, "_id", "I'm a teapot", -1,
+ BSON_TYPE_STRING, "my-id", "string-id", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ mongo_sync_cmd_insert (conn, ns, meta, NULL);
+ bson_free (meta);
+
+ /* Insert metadata with invalid length type. */
+ meta = bson_build (BSON_TYPE_DOUBLE, "length", 1.0,
+ BSON_TYPE_STRING, "my-id", "invalid-length", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ mongo_sync_cmd_insert (conn, ns, meta, NULL);
+ bson_free (meta);
+
+ /* Insert metadata with invalid chunkSize type. */
+ meta = bson_build (BSON_TYPE_INT32, "length", 10,
+ BSON_TYPE_DOUBLE, "chunkSize", 12.5,
+ BSON_TYPE_STRING, "my-id", "invalid-chunkSize", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ mongo_sync_cmd_insert (conn, ns, meta, NULL);
+ bson_free (meta);
+
+ /* Insert metadata with invalid uploadDate type. */
+ meta = bson_build (BSON_TYPE_INT32, "length", 10,
+ BSON_TYPE_INT32, "chunkSize", 12,
+ BSON_TYPE_STRING, "my-id", "invalid-date", -1,
+ BSON_TYPE_INT32, "uploadDate", 1234,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ mongo_sync_cmd_insert (conn, ns, meta, NULL);
+ bson_free (meta);
+
+ /* Insert metadata with invalid md5 type. */
+ meta = bson_build (BSON_TYPE_INT32, "length", 32,
+ BSON_TYPE_INT32, "chunkSize", 12,
+ BSON_TYPE_UTC_DATETIME, "uploadDate", (gint64)1234,
+ BSON_TYPE_INT32, "md5", 0,
+ BSON_TYPE_STRING, "my-id", "invalid-md5", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ mongo_sync_cmd_insert (conn, ns, meta, NULL);
+ bson_free (meta);
+
+ /* Insert a valid metadata, without chunks. */
+ meta = bson_build (BSON_TYPE_INT32, "length", 32,
+ BSON_TYPE_INT32, "chunkSize", 12,
+ BSON_TYPE_UTC_DATETIME, "uploadDate", (gint64)1234,
+ BSON_TYPE_STRING, "md5", "deadbeef", -1,
+ BSON_TYPE_STRING, "my-id", "no-chunks", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ mongo_sync_cmd_insert (conn, ns, meta, NULL);
+ bson_free (meta);
+
+ g_free (ns);
+ mongo_sync_disconnect (conn);
+}
+
+void
+validate_file (mongo_sync_gridfs *gfs, const bson *query, guint8 *oid,
+ gboolean validate_md5)
+{
+ mongo_sync_gridfs_chunked_file *f;
+ mongo_sync_cursor *cursor;
+ gint64 n = 0, tsize = 0;
+ const bson *meta;
+ gchar *oid_s;
+
+ f = mongo_sync_gridfs_chunked_find (gfs, query);
+
+ ok (f != NULL,
+ "File not found");
+ ok (memcmp (mongo_sync_gridfs_file_get_id (f), oid, 12) == 0,
+ "File _id matches");
+ cmp_ok (mongo_sync_gridfs_file_get_length (f), "==", FILE_SIZE,
+ "File length matches");
+ cmp_ok (mongo_sync_gridfs_file_get_chunk_size (f), "==",
+ mongo_sync_gridfs_get_chunk_size (gfs),
+ "File chunk size matches");
+
+ oid_s = mongo_util_oid_as_string (mongo_sync_gridfs_file_get_id (f));
+ note ("File info:\n\tid = %s; length = %" G_GINT64_FORMAT "; "
+ "chunk_size = %d; date = %" G_GINT64_FORMAT "; "
+ "md5 = %s; n = %" G_GINT64_FORMAT "\n",
+
+ oid_s,
+ mongo_sync_gridfs_file_get_length (f),
+ mongo_sync_gridfs_file_get_chunk_size (f),
+ mongo_sync_gridfs_file_get_date (f),
+ mongo_sync_gridfs_file_get_md5 (f),
+ mongo_sync_gridfs_file_get_chunks (f));
+ g_free (oid_s);
+ meta = mongo_sync_gridfs_file_get_metadata (f);
+ ok (meta != NULL,
+ "mongo_sync_gridfs_file_get_metadata() works");
+
+ cursor = mongo_sync_gridfs_chunked_file_cursor_new (f, 0, 0);
+ while (mongo_sync_cursor_next (cursor))
+ {
+ gint32 size;
+ guint8 *data;
+
+ data = mongo_sync_gridfs_chunked_file_cursor_get_chunk (cursor, &size);
+ g_free (data);
+
+ tsize += size;
+ n++;
+ }
+ mongo_sync_cursor_free (cursor);
+
+ if (validate_md5)
+ cmp_ok (mongo_sync_gridfs_file_get_length (f), "==", tsize,
+ "File size matches the sum of its chunks");
+ cmp_ok (mongo_sync_gridfs_file_get_chunks (f), "==", n,
+ "Number of chunks matches the expected number");
+
+ mongo_sync_gridfs_chunked_file_free (f);
+}
+
+void
+test_func_sync_gridfs_get (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ bson *query;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, TRUE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+
+ query = bson_build (BSON_TYPE_STRING, "filename", "libmongo-test", -1,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+ validate_file (gfs, query, named_oid, TRUE);
+ bson_free (query);
+
+ query = bson_build (BSON_TYPE_OID, "_id", noname_oid,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+ validate_file (gfs, query, noname_oid, TRUE);
+ bson_free (query);
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_get_invalid (mongo_sync_gridfs *gfs, gchar *name, gchar *msg)
+{
+ bson *query;
+
+ query = bson_build (BSON_TYPE_STRING, "my-id", name, -1,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+ ok (mongo_sync_gridfs_chunked_find (gfs, query) == NULL, msg);
+ bson_free (query);
+}
+
+void
+test_func_sync_gridfs_get_invalid (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_cursor *cursor;
+ bson *query;
+ gchar *ns;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, TRUE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+
+ test_get_invalid (gfs, "unknown",
+ "mongo_sync_gridfs_chunked_find() should fail when no file "
+ "is found");
+ test_get_invalid (gfs, "id-only",
+ "mongo_sync_gridfs_chunked__find() should fail if the metadata "
+ "is incomplete");
+ test_get_invalid (gfs, "string-id",
+ "mongo_sync_gridfs_chunked__find() should fail if the _id is "
+ "not an ObjectID");
+ test_get_invalid (gfs, "invalid-length",
+ "mongo_sync_gridfs_chunked__find() should fail if length is "
+ "of inappropriate type");
+ test_get_invalid (gfs, "invalid-chunkSize",
+ "mongo_sync_gridfs_chunked__find() should fail if chunkSize is "
+ "of inappropriate type");
+ test_get_invalid (gfs, "invalid-date",
+ "mongo_sync_gridfs_chunked__find() should fail if uploadDate is "
+ "of inappropriate type");
+ test_get_invalid (gfs, "invalid-md5",
+ "mongo_sync_gridfs_chunked__find() should fail if md5 is of "
+ "inappropriate type");
+
+ ns = g_strconcat (config.gfs_prefix, ".files", NULL);
+ query = bson_build (BSON_TYPE_STRING, "my-id", "id-only", -1,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+
+ cursor = mongo_sync_cursor_new (conn, ns,
+ mongo_sync_cmd_query (conn, ns, 0, 0, 0,
+ query, NULL));
+ bson_free (query);
+ mongo_sync_cursor_next (cursor);
+ ok (mongo_sync_gridfs_chunked_file_cursor_get_chunk (cursor, NULL) == NULL,
+ "mongo_sync_gridfs_chunked_file_cursor_get_chunk() should fail with "
+ "invalid data");
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_list (void)
+{
+ mongo_sync_gridfs *gfs;
+ bson *query, *data;
+ mongo_sync_cursor *cursor;
+ bson_cursor *c;
+ const gchar *str;
+ gboolean found_named = FALSE, found_noname = FALSE;
+ const guint8 *oid;
+
+ gfs = mongo_sync_gridfs_new
+ (mongo_sync_connect (config.primary_host, config.primary_port, TRUE),
+ config.gfs_prefix);
+
+ /* Test list with an invalid query */
+ query = bson_build (BSON_TYPE_STRING, "no-such-field",
+ "You're not seeing this field.", -1,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+
+ cursor = mongo_sync_gridfs_list (gfs, query);
+ ok (cursor == NULL,
+ "mongo_sync_gridfs_list() should fail if there query "
+ "does not match anything");
+ bson_free (query);
+
+ /* Test list with a query */
+ query = bson_build (BSON_TYPE_OID, "_id", named_oid,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+
+ cursor = mongo_sync_gridfs_list (gfs, query);
+ ok (cursor != NULL,
+ "mongo_sync_gridfs_list() correctly finds files by query");
+
+ mongo_sync_cursor_next (cursor);
+ data = mongo_sync_cursor_get_data (cursor);
+ c = bson_find (data, "filename");
+ bson_cursor_get_string (c, &str);
+ bson_cursor_free (c);
+
+ is (str, "libmongo-test",
+ "The listed file is named correctly");
+ bson_free (data);
+ mongo_sync_cursor_free (cursor);
+
+ bson_free (query);
+
+ /* Test list without a query */
+ cursor = mongo_sync_gridfs_list (gfs, NULL);
+ while (mongo_sync_cursor_next (cursor))
+ {
+ data = mongo_sync_cursor_get_data (cursor);
+
+ c = bson_find (data, "_id");
+ bson_cursor_get_oid (c, (const guint8 **)&oid);
+ bson_cursor_free (c);
+
+ if (memcmp (oid, named_oid, 12) == 0)
+ found_named = TRUE;
+ if (memcmp (oid, noname_oid, 12) == 0)
+ found_noname = TRUE;
+
+ bson_free (data);
+ }
+ mongo_sync_cursor_free (cursor);
+
+ ok (found_named == TRUE && found_noname == TRUE,
+ "mongo_sync_gridfs_list() finds both uploaded files without a query");
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_fync_sync_gridfs_remove (void)
+{
+ mongo_sync_gridfs *gfs;
+ bson *query;
+
+ gfs = mongo_sync_gridfs_new
+ (mongo_sync_connect (config.primary_host, config.primary_port, TRUE),
+ config.gfs_prefix);
+
+ /* Test with a non-matching query */
+ query = bson_build (BSON_TYPE_STRING, "no-such-field",
+ "You're not seeing this field.", -1,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+
+ ok (mongo_sync_gridfs_remove (gfs, query) == FALSE,
+ "mongo_sync_gridfs_remove() should fail if there's nothing to delete.");
+ bson_free (query);
+
+ /* Test with a non-string id */
+ query = bson_build (BSON_TYPE_STRING, "my-id", "string-id", -1,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+
+ ok (mongo_sync_gridfs_remove (gfs, query) == FALSE,
+ "mongo_sync_gridfs_remove() should fail if the file id is not "
+ "an ObjectId");
+ bson_free (query);
+
+ /* Test with a working query */
+ query = bson_build (BSON_TYPE_OID, "_id", named_oid,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+
+ ok (mongo_sync_gridfs_remove (gfs, query) == TRUE,
+ "mongo_sync_gridfs_remove() works");
+ bson_finish (query);
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_put_binary_subtype (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_chunked_file *gfile;
+ bson *meta, *query, *update;
+ guint8 *data;
+ gchar *chunk_ns;
+ guint32 size = GINT32_TO_LE(FILE_SIZE);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+ meta = bson_build (BSON_TYPE_STRING, "filename", "binsub-libmongo-test", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ data = g_malloc (FILE_SIZE + 4);
+ memcpy (data, &size, 4);
+ memset (data + 4, 'x', FILE_SIZE);
+
+ gfile = mongo_sync_gridfs_chunked_file_new_from_buffer (gfs, meta,
+ data + 4, FILE_SIZE);
+ memcpy (binsub_oid, mongo_sync_gridfs_file_get_id (gfile), 12);
+
+ query = bson_build (BSON_TYPE_OID, "files_id",
+ mongo_sync_gridfs_file_get_id (gfile),
+ BSON_TYPE_NONE);
+ bson_finish (query);
+
+ mongo_sync_gridfs_chunked_file_free (gfile);
+ bson_free (meta);
+
+ update = bson_build_full (BSON_TYPE_DOCUMENT, "$set", TRUE,
+ bson_build (BSON_TYPE_BINARY, "data",
+ BSON_BINARY_SUBTYPE_BINARY,
+ data, FILE_SIZE + 4,
+ BSON_TYPE_NONE),
+ BSON_TYPE_NONE);
+ bson_finish (update);
+ g_free (data);
+
+ chunk_ns = g_strconcat (config.gfs_prefix, ".chunks", NULL);
+ mongo_sync_cmd_update (conn, chunk_ns, MONGO_WIRE_FLAG_UPDATE_UPSERT,
+ query, update);
+
+ bson_free (query);
+ bson_free (update);
+ g_free (chunk_ns);
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_get_binary_subtype (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ bson *query;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, TRUE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+
+ query = bson_build (BSON_TYPE_STRING, "filename", "binsub-libmongo-test", -1,
+ BSON_TYPE_NONE);
+ bson_finish (query);
+ validate_file (gfs, query, binsub_oid, FALSE);
+ bson_free (query);
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_chunk (void)
+{
+ mongo_util_oid_init (0);
+
+ test_func_sync_gridfs_put ();
+ test_func_sync_gridfs_get ();
+ test_func_sync_gridfs_list ();
+
+ sleep (2);
+
+ test_func_sync_gridfs_put_binary_subtype ();
+ test_func_sync_gridfs_get_binary_subtype ();
+
+ test_func_sync_gridfs_put_invalid ();
+ test_func_sync_gridfs_get_invalid ();
+
+ test_fync_sync_gridfs_remove ();
+}
+
+RUN_NET_TEST (37, func_sync_gridfs_chunk);
diff --git a/tests/func/mongo/sync-gridfs-stream/f_sync_gridfs_stream.c b/tests/func/mongo/sync-gridfs-stream/f_sync_gridfs_stream.c
new file mode 100644
index 0000000..a2c3690
--- /dev/null
+++ b/tests/func/mongo/sync-gridfs-stream/f_sync_gridfs_stream.c
@@ -0,0 +1,501 @@
+#include "test.h"
+#include "mongo.h"
+#include "compat.h"
+
+#define FILE_SIZE 1024 * 1024 + 12345
+#define BUFFER_SIZE 64 * 1024
+
+gchar *write_md5 = NULL;
+static gint seq = 1;
+
+void
+test_func_sync_gridfs_stream_without_oid_init (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+
+ stream = mongo_sync_gridfs_stream_new (gfs, NULL);
+ ok (stream == NULL,
+ "mongo_sync_gridfs_stream_new() fails without mongo_util_oid_init()");
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_stream_write (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+ bson *meta;
+ guint8 *data, *oid;
+ gint pos = 0;
+ gint filler = 0;
+ gboolean write_ok = TRUE;
+ GChecksum *chk;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+
+ oid = mongo_util_oid_new (seq++);
+ meta = bson_build (BSON_TYPE_STRING, "filename", "libmongo-test-stream", -1,
+ BSON_TYPE_OID, "_id", oid,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+ g_free (oid);
+
+ stream = mongo_sync_gridfs_stream_new (gfs, meta);
+ ok (stream != NULL,
+ "mongo_sync_gridfs_stream_new() works");
+ bson_free (meta);
+
+ data = g_malloc (BUFFER_SIZE);
+
+ chk = g_checksum_new (G_CHECKSUM_MD5);
+
+ while (pos < FILE_SIZE)
+ {
+ gint csize = BUFFER_SIZE;
+
+ if (csize + pos > FILE_SIZE)
+ csize = FILE_SIZE - pos;
+
+ memset (data, filler++, BUFFER_SIZE);
+
+ g_checksum_update (chk, data, csize);
+
+ write_ok &= mongo_sync_gridfs_stream_write (stream, data, csize);
+ pos += csize;
+ }
+ ok (write_ok == TRUE,
+ "All stream_write()s succeeded");
+
+ write_md5 = g_strdup (g_checksum_get_string (chk));
+ g_checksum_free (chk);
+
+ note ("File MD5: %s\n", write_md5);
+
+ g_free (data);
+ ok (mongo_sync_gridfs_stream_close (stream) == TRUE,
+ "mongo_sync_gridfs_stream_close() works");
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_stream_write_binary_subtype (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+ bson *meta, *update;
+ guint8 *data, *oid;
+ gboolean write_ok = TRUE;
+ guint32 size = GINT32_TO_LE(BUFFER_SIZE);
+ gchar *ns;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+
+ oid = mongo_util_oid_new (seq++);
+ meta = bson_build (BSON_TYPE_STRING, "filename", "libmongo-test-stream-bintype", -1,
+ BSON_TYPE_OID, "_id", oid,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+
+ stream = mongo_sync_gridfs_stream_new (gfs, meta);
+ ok (stream != NULL,
+ "mongo_sync_gridfs_stream_new() works");
+ bson_free (meta);
+
+ data = g_malloc (BUFFER_SIZE + 4);
+ memcpy (data, &size, 4);
+ memset (data + 4, 'x', BUFFER_SIZE);
+ write_ok = mongo_sync_gridfs_stream_write (stream, data + 4, BUFFER_SIZE);
+ ok (write_ok == TRUE,
+ "All stream_write()s succeeded");
+
+ ok (mongo_sync_gridfs_stream_close (stream) == TRUE,
+ "mongo_sync_gridfs_stream_close() works");
+
+ meta = bson_build (BSON_TYPE_OID, "files_id", oid,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ update = bson_build_full (BSON_TYPE_DOCUMENT, "$set", TRUE,
+ bson_build (BSON_TYPE_BINARY, "data",
+ BSON_BINARY_SUBTYPE_BINARY,
+ data, BUFFER_SIZE + 4,
+ BSON_TYPE_NONE),
+ BSON_TYPE_NONE);
+ bson_finish (update);
+ g_free (data);
+
+ ns = g_strconcat (config.gfs_prefix, ".chunks", NULL);
+ mongo_sync_cmd_update (conn, ns, MONGO_WIRE_FLAG_UPDATE_UPSERT,
+ meta, update);
+ bson_free (meta);
+ bson_free (update);
+ g_free (ns);
+ g_free (oid);
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_stream_write_invalid (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+ bson *meta;
+ gchar *ns;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+ ns = g_strconcat (config.gfs_prefix, ".files", NULL);
+
+ /* Try to write a file with a custom, non-OID _id */
+ meta = bson_build (BSON_TYPE_STRING, "filename", "lmc-invalid-id", -1,
+ BSON_TYPE_STRING, "_id", "Short and stout", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ stream = mongo_sync_gridfs_stream_new (gfs, meta);
+ ok (stream == NULL,
+ "mongo_sync_gridfs_stream_new() should fail if meta has an invalid _id");
+ bson_free (meta);
+
+ /* Write a file with a non-OID _id, bypassing the GridFS API. */
+ meta = bson_build (BSON_TYPE_STRING, "_id", "Short and stout", -1,
+ BSON_TYPE_STRING, "my-id", "stream:string-id", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ mongo_sync_cmd_insert (conn, ns, meta, NULL);
+ bson_free (meta);
+
+ /* Insert metadata with invalid length type. */
+ meta = bson_build (BSON_TYPE_DOUBLE, "length", 1.0,
+ BSON_TYPE_STRING, "my-id", "stream:invalid-length", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ mongo_sync_cmd_insert (conn, ns, meta, NULL);
+ bson_free (meta);
+
+ /* Insert metadata with invalid chunkSize type. */
+ meta = bson_build (BSON_TYPE_INT32, "length", 10,
+ BSON_TYPE_DOUBLE, "chunkSize", 12.5,
+ BSON_TYPE_STRING, "my-id", "stream:invalid-chunkSize", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ mongo_sync_cmd_insert (conn, ns, meta, NULL);
+ bson_free (meta);
+
+ /* Insert a valid metadata, without chunks. */
+ meta = bson_build (BSON_TYPE_INT32, "length", 32,
+ BSON_TYPE_INT32, "chunkSize", 12,
+ BSON_TYPE_UTC_DATETIME, "uploadDate", (gint64)1234,
+ BSON_TYPE_STRING, "md5", "deadbeef", -1,
+ BSON_TYPE_STRING, "my-id", "stream:no-chunks", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ mongo_sync_cmd_insert (conn, ns, meta, NULL);
+ bson_free (meta);
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_stream_read (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+ guint8 data[12345];
+ gint64 pos = 0;
+ bson *meta;
+
+ GChecksum *chk;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+ meta = bson_build (BSON_TYPE_STRING, "filename", "libmongo-test-stream", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ stream = mongo_sync_gridfs_stream_find (gfs, meta);
+ ok (stream != NULL,
+ "mongo_sync_gridfs_stream_find() works");
+ bson_free (meta);
+
+ chk = g_checksum_new (G_CHECKSUM_MD5);
+
+ while (pos < FILE_SIZE)
+ {
+ gint64 r;
+
+ r = mongo_sync_gridfs_stream_read (stream, data, sizeof (data));
+ if (r == -1)
+ break;
+
+ g_checksum_update (chk, data, r);
+ pos += r;
+ }
+
+ cmp_ok (pos, "==", FILE_SIZE,
+ "mongo_sync_gridfs_stream_read() works");
+ is (g_checksum_get_string (chk), write_md5,
+ "md5sums match");
+
+ g_checksum_free (chk);
+ ok (mongo_sync_gridfs_stream_close (stream) == TRUE,
+ "mongo_sync_gridfs_stream_close() works");
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_stream_read_binary_subtype (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+ guint8 *data;
+ gint64 r;
+ bson *meta;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+ meta = bson_build (BSON_TYPE_STRING, "filename", "libmongo-test-stream-bintype", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ stream = mongo_sync_gridfs_stream_find (gfs, meta);
+ ok (stream != NULL,
+ "mongo_sync_gridfs_stream_find() works");
+ bson_free (meta);
+
+ data = g_malloc (BUFFER_SIZE);
+ r = mongo_sync_gridfs_stream_read (stream, data, BUFFER_SIZE);
+ cmp_ok (r, "==", BUFFER_SIZE,
+ "mongo_sync_gridfs_stream_read() works");
+
+ ok (mongo_sync_gridfs_stream_close (stream) == TRUE,
+ "mongo_sync_gridfs_stream_close() works");
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_stream_meta (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+ bson *meta;
+ const guint8 *id;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+ meta = bson_build (BSON_TYPE_STRING, "filename", "libmongo-test-stream", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ stream = mongo_sync_gridfs_stream_find (gfs, meta);
+ bson_free (meta);
+
+ id = mongo_sync_gridfs_file_get_id (stream);
+ ok (id != NULL,
+ "mongo_sync_gridfs_file_get_id() works on streams");
+
+ ok (mongo_sync_gridfs_file_get_md5 (stream) == NULL,
+ "mongo_sync_gridfs_file_get_md5() fails on streams");
+ ok (mongo_sync_gridfs_file_get_date (stream) == -1,
+ "mongo_sync_gridfs_file_get_date() fails on streams");
+ ok (mongo_sync_gridfs_file_get_metadata (stream) == NULL,
+ "mongo_sync_gridfs_file_get_metadata() fails on streams");
+
+ mongo_sync_gridfs_stream_close (stream);
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_stream_read_invalid (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+ guint8 data[1245];
+ gint64 r;
+ bson *meta;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+
+ /* ---- */
+ meta = bson_build (BSON_TYPE_STRING, "my-id", "stream:string-id", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ stream = mongo_sync_gridfs_stream_find (gfs, meta);
+ ok (stream == NULL,
+ "mongo_sync_gridfs_stream_find() should fail if _id is non-OID");
+ bson_free (meta);
+
+ /* ---- */
+ meta = bson_build (BSON_TYPE_STRING, "my-id", "stream:invalid-length", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ stream = mongo_sync_gridfs_stream_find (gfs, meta);
+ ok (stream == NULL,
+ "mongo_sync_gridfs_stream_find() should fail with invalid metadata");
+ bson_free (meta);
+
+ /* ---- */
+ meta = bson_build (BSON_TYPE_STRING, "my-id", "stream:invalid-chunkSize", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ stream = mongo_sync_gridfs_stream_find (gfs, meta);
+ ok (stream == NULL,
+ "mongo_sync_gridfs_stream_find() should fail with invalid metadata");
+ bson_free (meta);
+
+ /* no-chunk test */
+ meta = bson_build (BSON_TYPE_STRING, "my-id", "stream:no-chunks", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ stream = mongo_sync_gridfs_stream_find (gfs, meta);
+ ok (stream != NULL,
+ "mongo_sync_gridfs_stream_find() works [stream:no-chunks]");
+ bson_free (meta);
+
+ r = mongo_sync_gridfs_stream_read (stream, data, sizeof (data));
+ cmp_ok (r, "==", -1,
+ "Reading from a chunk-less file should fail");
+
+ mongo_sync_gridfs_stream_close (stream);
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_stream_seek (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+ bson *meta;
+ guint8 *chunk1, *chunk2, *chunk3;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+ meta = bson_build (BSON_TYPE_STRING, "filename", "libmongo-test-stream", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ stream = mongo_sync_gridfs_stream_find (gfs, meta);
+ bson_free (meta);
+
+ chunk1 = g_malloc (300 * 1024);
+ chunk2 = g_malloc (300 * 1024);
+ chunk3 = g_malloc (300 * 1024);
+
+ cmp_ok (mongo_sync_gridfs_stream_read (stream, chunk1, 300 * 1024), "==",
+ 300 * 1024,
+ "reading the first chunk works");
+ cmp_ok (mongo_sync_gridfs_stream_read (stream, chunk2, 300 * 1024), "==",
+ 300 * 1024,
+ "reading the second chunk works");
+ ok (memcmp (chunk1, chunk2, 300 * 1024) != 0,
+ "The two chunks differ, as they should");
+
+ ok (mongo_sync_gridfs_stream_seek (stream, 0, SEEK_END) == TRUE,
+ "mongo_sync_gridfs_stream_seek() works, with SEEK_END");
+ cmp_ok (stream->file.offset, "==", stream->file.length,
+ "mongo_sync_gridfs_stream_seek() can seek to the end");
+
+ ok (mongo_sync_gridfs_stream_seek (stream, 1, SEEK_SET) == TRUE,
+ "mongo_sync_gridfs_stream_seek() works, with SEEK_SET");
+ cmp_ok (stream->file.offset, "==", 1,
+ "mongo_sync_gridfs_stream_seek()'s SEEK_SET works");
+ ok (mongo_sync_gridfs_stream_seek (stream, 1, SEEK_SET) == TRUE,
+ "mongo_sync_gridfs_stream_seek() works, with SEEK_SET");
+
+ ok (mongo_sync_gridfs_stream_seek (stream, -1, SEEK_CUR) == TRUE,
+ "mongo_sync_gridfs_stream_seek() works, with SEEK_CUR");
+ cmp_ok (stream->file.offset, "==", 0,
+ "mongo_sync_gridfs_stream_seek()'s SEEK_CUR works");
+ ok (mongo_sync_gridfs_stream_seek (stream, 0, SEEK_CUR) == TRUE,
+ "mongo_sync_gridfs_stream_seek() works, with SEEK_CUR");
+
+ cmp_ok (mongo_sync_gridfs_stream_read (stream, chunk3, 300 * 1024), "==",
+ 300 * 1024,
+ "reading after seeking works");
+
+ ok (memcmp (chunk1, chunk3, 300 * 1024) == 0,
+ "After seeking, we're at the beginning");
+
+ mongo_sync_gridfs_stream_close (stream);
+ g_free (chunk3);
+ g_free (chunk2);
+ g_free (chunk1);
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_stream_seek_invalid (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_gridfs *gfs;
+ mongo_sync_gridfs_stream *stream;
+ bson *meta;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE);
+ gfs = mongo_sync_gridfs_new (conn, config.gfs_prefix);
+ meta = bson_build (BSON_TYPE_STRING, "my-id", "stream:no-chunks", -1,
+ BSON_TYPE_NONE);
+ bson_finish (meta);
+
+ stream = mongo_sync_gridfs_stream_find (gfs, meta);
+ bson_free (meta);
+
+ ok (mongo_sync_gridfs_stream_seek (stream, 1, SEEK_SET) == FALSE,
+ "mongo_sync_gridfs_stream_seek() should fail with no chunks");
+
+ mongo_sync_gridfs_stream_close (stream);
+
+ mongo_sync_gridfs_free (gfs, TRUE);
+}
+
+void
+test_func_sync_gridfs_stream (void)
+{
+ test_func_sync_gridfs_stream_without_oid_init ();
+
+ mongo_util_oid_init (0);
+
+ test_func_sync_gridfs_stream_write ();
+ test_func_sync_gridfs_stream_write_binary_subtype ();
+ test_func_sync_gridfs_stream_write_invalid ();
+ test_func_sync_gridfs_stream_read ();
+ test_func_sync_gridfs_stream_read_binary_subtype ();
+ test_func_sync_gridfs_stream_read_invalid ();
+ test_func_sync_gridfs_stream_seek ();
+ test_func_sync_gridfs_stream_seek_invalid ();
+ test_func_sync_gridfs_stream_meta ();
+
+ g_free (write_md5);
+}
+
+RUN_NET_TEST (38, func_sync_gridfs_stream);
diff --git a/tests/func/mongo/sync-pool/f_sync_pool.c b/tests/func/mongo/sync-pool/f_sync_pool.c
new file mode 100644
index 0000000..28a2497
--- /dev/null
+++ b/tests/func/mongo/sync-pool/f_sync_pool.c
@@ -0,0 +1,169 @@
+#include "test.h"
+#include <mongo.h>
+
+#include <errno.h>
+
+#include "libmongo-private.h"
+
+void
+test_func_mongo_sync_pool_secondary (void)
+{
+ mongo_sync_pool *pool;
+ mongo_sync_pool_connection *conn[11], *m, *s1, *s2, *t;
+ gint i = 0;
+ gboolean ret = TRUE;
+
+ skip (!config.secondary_host, 14,
+ "Secondary server not configured");
+
+ ok (mongo_sync_pool_new (config.secondary_host,
+ config.secondary_port, 1, 10) == NULL,
+ "mongo_sync_pool_new() should fail when connecting to a secondary");
+
+ pool = mongo_sync_pool_new (config.primary_host,
+ config.primary_port, 1, 10);
+ ok (pool != NULL,
+ "mongo_sync_pool_new() works with slaves too");
+
+ m = mongo_sync_pool_pick (pool, TRUE);
+ ok (m != NULL,
+ "mongo_sync_pool_pick() can pick a master from a mixed pool");
+ ok (mongo_sync_pool_pick (pool, TRUE) == NULL,
+ "mongo_sync_pool_pick() should fail if there are no more masters, and "
+ "a master was requested");
+
+ while ((conn[i] = mongo_sync_pool_pick (pool, FALSE)) != NULL)
+ i++;
+ cmp_ok (i, "==", 10,
+ "Successfully connect to secondaries on 10 sockets");
+ ok (mongo_sync_pool_pick (pool, FALSE) == NULL,
+ "mongo_sync_pool_pick() should fail if there are no free connections");
+
+ ok (mongo_sync_pool_return (pool, m) == TRUE,
+ "Returning the master to the pool works");
+
+ m = mongo_sync_pool_pick (pool, FALSE);
+ ok (m != NULL,
+ "mongo_sync_pool_pick() will return a master, if no more slaves are "
+ "available");
+
+ for (i = 0; i < 10; i++)
+ ret = ret && mongo_sync_pool_return (pool, conn[i]);
+
+ ok (ret == TRUE,
+ "mongo_sync_pool_return() works when returning slaves");
+
+ mongo_sync_pool_return (pool, m);
+
+ t = mongo_sync_pool_pick (pool, FALSE);
+ t->pool_id = 4242;
+
+ errno = 0;
+ ret = mongo_sync_pool_return (pool, t);
+ ok (ret == FALSE && errno == ERANGE,
+ "mongo_sync_pool_return() should fail if the connection ID is "
+ "out of range");
+
+ /* Test whether masters and slaves are different. */
+ m = mongo_sync_pool_pick (pool, TRUE);
+ s1 = mongo_sync_pool_pick (pool, FALSE);
+ s2 = mongo_sync_pool_pick (pool, FALSE);
+
+ ok (m != s1 && m != s2,
+ "Picked master and slaves are different");
+
+ ok (mongo_sync_cmd_is_master ((mongo_sync_connection *)m) == TRUE,
+ "Picked master is, indeed, a master");
+ ok (mongo_sync_cmd_is_master ((mongo_sync_connection *)s1) == FALSE,
+ "Picked secondary is a secondary");
+ ok (mongo_sync_cmd_is_master ((mongo_sync_connection *)s2) == FALSE,
+ "Picked secondary is a secondary");
+
+ mongo_sync_pool_free (pool);
+
+ endskip;
+}
+
+void
+test_func_mongo_sync_pool (void)
+{
+ mongo_sync_pool *pool;
+ mongo_sync_pool_connection *conn[11], *t;
+ gint c = 0;
+ gboolean ret = TRUE;
+ bson *b;
+ mongo_packet *p;
+
+ /*
+ * First we test that connecting to an invalid host fails.
+ */
+ pool = mongo_sync_pool_new ("invalid.example.com",
+ config.primary_port, 10, 10);
+ ok (pool == NULL,
+ "mongo_sync_pool_new() should fail with an invalid host");
+
+ /*
+ * Next, we test whether the basics work, like connecting, picking
+ * & returning.
+ */
+
+ pool = mongo_sync_pool_new (config.primary_host,
+ config.primary_port,
+ 10, 0);
+
+ ok (pool != NULL,
+ "mongo_sync_pool_new() works");
+
+ while ((conn[c] = mongo_sync_pool_pick (pool, TRUE)) != NULL)
+ c++;
+ cmp_ok (c, "==", 10,
+ "Successfully connect to the master on 10 sockets");
+
+ t = mongo_sync_pool_pick (pool, TRUE);
+ ok (t == NULL && errno == EAGAIN,
+ "Connected to the master only on 10 sockets");
+
+ for (c = 0; c < 10; c++)
+ ret = ret && mongo_sync_pool_return (pool, conn[c]);
+ ok (ret == TRUE,
+ "mongo_sync_pool_return() works");
+
+ t = mongo_sync_pool_pick (pool, TRUE);
+ ok (t != NULL,
+ "mongo_sync_pool_pick() works after returning connections");
+ mongo_sync_pool_return (pool, t);
+
+ /*
+ * Then we test whether we can perform commands on random
+ * connections.
+ */
+ conn[0] = mongo_sync_pool_pick (pool, TRUE);
+ conn[1] = mongo_sync_pool_pick (pool, TRUE);
+
+ ok (conn[0] != conn[1],
+ "Two picked connections are not the same");
+
+ b = bson_build (BSON_TYPE_STRING, "test-name", __FILE__, -1,
+ BSON_TYPE_INT32, "i32", 1984,
+ BSON_TYPE_NONE);
+ bson_finish (b);
+
+ ok (mongo_sync_cmd_insert ((mongo_sync_connection *)conn[0],
+ config.ns, b, NULL) == TRUE,
+ "mongo_sync_cmd_insert() works on a picked connection");
+
+ p = mongo_sync_cmd_query ((mongo_sync_connection *)conn[1],
+ config.ns, 0, 0, 1, b, NULL);
+ ok (p != NULL,
+ "mongo_sync_cmd_query() works on a different picked connection");
+ mongo_wire_packet_free (p);
+
+ mongo_sync_pool_free (pool);
+
+ /*
+ * Test pools with a secondary aswell.
+ */
+ test_func_mongo_sync_pool_secondary ();
+}
+
+RUN_NET_TEST (23, func_mongo_sync_pool);
diff --git a/tests/func/mongo/sync/f_sync_auto_reauth.c b/tests/func/mongo/sync/f_sync_auto_reauth.c
new file mode 100644
index 0000000..477dd25
--- /dev/null
+++ b/tests/func/mongo/sync/f_sync_auto_reauth.c
@@ -0,0 +1,58 @@
+#include "test.h"
+#include <mongo.h>
+
+#include <errno.h>
+#include <sys/socket.h>
+
+#include "libmongo-private.h"
+
+
+/*
+ * This test requires that the "lmcUser" user (password "lmcPass") has
+ * RW access to the test db. It must be set up prior to running this
+ * test.
+ */
+void
+test_func_mongo_sync_auto_reauth (void)
+{
+ mongo_sync_connection *conn;
+ bson *b;
+
+ b = bson_new ();
+ bson_append_int32 (b, "f_sync_auto_reauth", 1);
+ bson_finish (b);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port,
+ TRUE);
+
+ mongo_sync_conn_set_safe_mode (conn, TRUE);
+
+ skip (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == TRUE, 3,
+ "Authentication not configured.");
+
+ skip (mongo_sync_cmd_authenticate (conn, config.db, "lmcUser", "lmcPass")== FALSE, 3,
+ "Authentication environment not set up for testing.");
+
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == TRUE,
+ "Inserting works after authentication.");
+
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (1);
+
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == FALSE,
+ "Inserting fails with auto-reconnect turned off, and a broken "
+ "connection");
+
+ mongo_sync_conn_set_auto_reconnect (conn, TRUE);
+
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == TRUE,
+ "Inserting works with auto-reconnect turned on, and auto-auth, "
+ "and a broken connection.");
+
+ endskip;
+ endskip;
+
+ mongo_sync_disconnect (conn);
+}
+
+RUN_NET_TEST (3, func_mongo_sync_auto_reauth);
diff --git a/tests/func/mongo/sync/f_sync_auto_reconnect.c b/tests/func/mongo/sync/f_sync_auto_reconnect.c
new file mode 100644
index 0000000..45ec28d
--- /dev/null
+++ b/tests/func/mongo/sync/f_sync_auto_reconnect.c
@@ -0,0 +1,61 @@
+#include "test.h"
+#include <mongo.h>
+
+#include <errno.h>
+#include <sys/socket.h>
+
+#include "libmongo-private.h"
+
+void
+test_func_mongo_sync_auto_reconnect (void)
+{
+ mongo_sync_connection *conn;
+ bson *b;
+ mongo_packet *p;
+
+ b = bson_new ();
+ bson_append_int32 (b, "f_sync_auto_reconnect", 1);
+ bson_finish (b);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port,
+ TRUE);
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == TRUE);
+
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (1);
+
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == FALSE,
+ "Inserting fails with auto-reconnect turned off, and a broken "
+ "connection");
+
+ mongo_sync_conn_set_auto_reconnect (conn, TRUE);
+
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == TRUE,
+ "Inserting works with auto-reconnect turned on, and a broken "
+ "connection");
+
+ mongo_sync_conn_set_auto_reconnect (conn, FALSE);
+
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (1);
+
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == FALSE,
+ "Turning off auto-reconnect works");
+
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (1);
+
+ p = mongo_sync_cmd_query (conn, config.ns, 0, 0, 1, b, NULL);
+ ok (p == NULL,
+ "Query fails with auto-reconnect turned off");
+
+ mongo_sync_conn_set_auto_reconnect (conn, TRUE);
+ p = mongo_sync_cmd_query (conn, config.ns, 0, 0, 1, b, NULL);
+ ok (p != NULL,
+ "Query does reconnect with auto-reconnect turned on");
+ mongo_wire_packet_free (p);
+
+ mongo_sync_disconnect (conn);
+}
+
+RUN_NET_TEST (6, func_mongo_sync_auto_reconnect);
diff --git a/tests/func/mongo/sync/f_sync_auto_reconnect_cache.c b/tests/func/mongo/sync/f_sync_auto_reconnect_cache.c
new file mode 100644
index 0000000..d69ea5d
--- /dev/null
+++ b/tests/func/mongo/sync/f_sync_auto_reconnect_cache.c
@@ -0,0 +1,107 @@
+#include "test.h"
+#include <mongo.h>
+
+#include <errno.h>
+#include <sys/socket.h>
+
+#include "libmongo-private.h"
+
+void
+test_func_mongo_sync_auto_reconnect_cache (void)
+{
+ mongo_sync_conn_recovery_cache *cache;
+ mongo_sync_connection *conn;
+ bson *b;
+ mongo_packet *p;
+ gchar *primary_addr;
+ const gchar *error_msg;
+
+ primary_addr = g_strdup_printf ("%s:%d", config.primary_host, config.primary_port);
+
+ b = bson_new ();
+ bson_append_int32 (b, "f_sync_auto_reconnect", 1);
+ bson_finish (b);
+
+ cache = mongo_sync_conn_recovery_cache_new ();
+
+ mongo_sync_conn_recovery_cache_seed_add (cache,
+ config.primary_host,
+ config.primary_port);
+
+ conn = mongo_sync_connect_recovery_cache (cache,
+ TRUE);
+
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == TRUE);
+
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (1);
+
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == FALSE,
+ "Inserting fails with auto-reconnect turned off, and a broken "
+ "connection");
+
+ error_msg = mongo_sync_conn_get_last_error (conn);
+
+ ok (error_msg != NULL, "We have an error msg when insert fails.");
+
+ mongo_sync_conn_set_auto_reconnect (conn, TRUE);
+
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == TRUE,
+ "Inserting works with auto-reconnect turned on, and a broken "
+ "connection");
+
+ error_msg = mongo_sync_conn_get_last_error (conn);
+
+ ok (error_msg == NULL,
+ "After a succesful insert we shouldn't have an error msg.");
+
+ mongo_sync_conn_set_auto_reconnect (conn, FALSE);
+
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (1);
+
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == FALSE,
+ "Turning off auto-reconnect works");
+
+ skip (!config.secondary_host, 7,
+ "Secondary host not set up");
+
+ shutdown (conn->super.fd, SHUT_RDWR);
+ sleep (1);
+
+ p = mongo_sync_cmd_query (conn, config.ns, 0, 0, 1, b, NULL);
+ ok (p == NULL,
+ "Query fails with auto-reconnect turned off");
+
+ error_msg = mongo_sync_conn_get_last_error(conn);
+ ok (error_msg != NULL, "We have an error msg after a failure query.");
+
+ mongo_sync_conn_set_auto_reconnect (conn, TRUE);
+ p = mongo_sync_cmd_query (conn, config.ns, 0, 0, 1, b, NULL);
+ ok (p != NULL,
+ "Query does reconnect with auto-reconnect turned on");
+
+ ok (mongo_sync_conn_get_last_error(conn) == NULL,
+ "We shouldn't have any error messages after a successful operation.");
+
+ mongo_wire_packet_free (p);
+
+ mongo_sync_cmd_is_master (conn);
+
+ ok (conn->rs.hosts != NULL,
+ "We have hosts in the connection's replica set.");
+
+ ok (cache->rs.hosts == NULL, "Cache is empty.");
+
+ mongo_sync_disconnect (conn);
+
+ ok (cache->rs.hosts != NULL, "Cache is filled by disconnect()");
+
+ mongo_sync_conn_recovery_cache_free (cache);
+
+ endskip;
+
+ g_free (primary_addr);
+}
+
+RUN_NET_TEST (13, func_mongo_sync_auto_reconnect_cache);
diff --git a/tests/func/mongo/sync/f_sync_conn_seed_add.c b/tests/func/mongo/sync/f_sync_conn_seed_add.c
new file mode 100644
index 0000000..03bcdd2
--- /dev/null
+++ b/tests/func/mongo/sync/f_sync_conn_seed_add.c
@@ -0,0 +1,58 @@
+#include "test.h"
+#include <mongo.h>
+
+#include "libmongo-private.h"
+
+void
+test_func_mongo_sync_conn_seed_add (void)
+{
+ mongo_sync_connection *conn;
+ GList *l;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port,
+ FALSE);
+ close (conn->super.fd);
+
+ l = conn->rs.hosts;
+ while (l)
+ {
+ g_free (l->data);
+ l = g_list_delete_link (l, l);
+ }
+ conn->rs.hosts = NULL;
+
+ l = conn->rs.seeds;
+ while (l)
+ {
+ g_free (l->data);
+ l = g_list_delete_link (l, l);
+ }
+ conn->rs.seeds = NULL;
+
+ conn = mongo_sync_reconnect (conn, TRUE);
+ ok (conn == NULL,
+ "mongo_sync_reconnect() fails without seeds or discovery");
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port,
+ FALSE);
+ close (conn->super.fd);
+ l = conn->rs.hosts;
+ while (l)
+ {
+ g_free (l->data);
+ l = g_list_delete_link (l, l);
+ }
+ conn->rs.hosts = NULL;
+
+ ok (mongo_sync_conn_seed_add (conn, config.primary_host,
+ config.primary_port),
+ "mongo_sync_conn_seed_add() works");
+
+ conn = mongo_sync_reconnect (conn, TRUE);
+ ok (conn != NULL,
+ "mongo_sync_reconnect() works when properly seeded");
+
+ mongo_sync_disconnect (conn);
+}
+
+RUN_NET_TEST (3, func_mongo_sync_conn_seed_add);
diff --git a/tests/func/mongo/sync/f_sync_invalid_getlasterror.c b/tests/func/mongo/sync/f_sync_invalid_getlasterror.c
new file mode 100644
index 0000000..6af227b
--- /dev/null
+++ b/tests/func/mongo/sync/f_sync_invalid_getlasterror.c
@@ -0,0 +1,27 @@
+#include "test.h"
+#include <mongo.h>
+#include <errno.h>
+
+void
+test_func_mongo_sync_invalid_getlasterror (void)
+{
+ mongo_sync_connection *conn;
+ gchar *error = NULL;
+ gboolean res;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port,
+ TRUE);
+
+ res = mongo_sync_cmd_get_last_error
+ (conn, "1234567890123456789012345678901234567890123456789012345678901234567890",
+ &error);
+
+ ok (res == FALSE,
+ "Trying to get the last error from an invalid DB results in an error.");
+ ok (error == NULL,
+ "When getLastError() fails, error remains NULL");
+
+ mongo_sync_disconnect (conn);
+}
+
+RUN_NET_TEST (2, func_mongo_sync_invalid_getlasterror);
diff --git a/tests/func/mongo/sync/f_sync_max_insert_size.c b/tests/func/mongo/sync/f_sync_max_insert_size.c
new file mode 100644
index 0000000..9ea5854
--- /dev/null
+++ b/tests/func/mongo/sync/f_sync_max_insert_size.c
@@ -0,0 +1,69 @@
+#include "test.h"
+#include <mongo.h>
+
+#include <errno.h>
+
+#include "libmongo-private.h"
+
+void
+test_func_mongo_sync_max_insert_size (void)
+{
+ mongo_sync_connection *conn;
+ const bson *docs[10];
+ bson *b1, *b2, *b3;
+
+ b1 = bson_new ();
+ bson_append_string (b1, "func_mongo_sync_max_insert_size", "works", -1);
+
+ bson_finish (b1);
+ b2 = bson_new ();
+ bson_append_int32 (b2, "int32", 1984);
+ bson_finish (b2);
+ b3 = bson_new ();
+ bson_finish (b3);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port,
+ FALSE);
+
+ /*
+ * cmd_insert_n()
+ */
+ mongo_sync_conn_set_max_insert_size (conn, bson_size (b1) +
+ bson_size (b3) + 1);
+
+ docs[0] = b1;
+ docs[1] = b2;
+ docs[2] = b3;
+
+ ok (mongo_sync_cmd_insert_n (conn, config.ns, 3, docs) == TRUE,
+ "mongo_sync_cmd_insert_n() works with a small max_insert_size");
+
+ mongo_sync_conn_set_max_insert_size (conn, 1);
+ errno = 0;
+ ok (mongo_sync_cmd_insert_n (conn, config.ns, 3, docs) == FALSE,
+ "mongo_sync_cmd_insert_n() should fail if any one document is too big");
+ cmp_ok (errno, "==", EMSGSIZE,
+ "errno is set to EMSGSIZE");
+
+ /*
+ * cmd_insert()
+ */
+ mongo_sync_conn_set_max_insert_size (conn, bson_size (b1) +
+ bson_size (b3) + 1);
+ ok (mongo_sync_cmd_insert (conn, config.ns, b1, b2, b3, NULL) == TRUE,
+ "mongo_sync_cmd_insert() works with a small max_insert_size");
+
+ mongo_sync_conn_set_max_insert_size (conn, 1);
+ errno = 0;
+ ok (mongo_sync_cmd_insert (conn, config.ns, b1, b2, b3, NULL) == FALSE,
+ "mongo_sync_cmd_insert() should fail if any one document is too big");
+ cmp_ok (errno, "==", EMSGSIZE,
+ "errno is set to EMSGSIZE");
+
+ mongo_sync_disconnect (conn);
+ bson_free (b1);
+ bson_free (b2);
+ bson_free (b3);
+}
+
+RUN_NET_TEST (6, func_mongo_sync_max_insert_size);
diff --git a/tests/func/mongo/sync/f_sync_oidtest.c b/tests/func/mongo/sync/f_sync_oidtest.c
new file mode 100644
index 0000000..2a64692
--- /dev/null
+++ b/tests/func/mongo/sync/f_sync_oidtest.c
@@ -0,0 +1,44 @@
+#include "test.h"
+#include <mongo.h>
+
+#include <string.h>
+
+void
+test_func_mongo_sync_oidtest (void)
+{
+ mongo_sync_connection *conn;
+ bson *boid, *reply = NULL;
+ bson_cursor *c;
+ mongo_packet *p;
+ guint8 *oid;
+ const guint8 *noid;
+
+ mongo_util_oid_init (0);
+
+ oid = mongo_util_oid_new (1);
+ boid = bson_new ();
+ bson_append_oid (boid, "driverOIDTest", oid);
+ bson_finish (boid);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port,
+ FALSE);
+
+ p = mongo_sync_cmd_custom (conn, config.db, boid);
+ ok (p != NULL,
+ "driverOIDTest(OID) custom command works");
+ mongo_wire_reply_packet_get_nth_document (p, 1, &reply);
+ bson_finish (reply);
+
+ c = bson_find (reply, "oid");
+ bson_cursor_get_oid (c, &noid);
+ ok (memcmp (oid, noid, 12) == 0,
+ "driverOIDTest(OID) returns the same OID");
+ bson_cursor_free (c);
+
+ mongo_sync_disconnect (conn);
+ mongo_wire_packet_free (p);
+ bson_free (boid);
+ bson_free (reply);
+}
+
+RUN_NET_TEST (2, func_mongo_sync_oidtest);
diff --git a/tests/func/mongo/sync/f_sync_safe_mode.c b/tests/func/mongo/sync/f_sync_safe_mode.c
new file mode 100644
index 0000000..e312c2f
--- /dev/null
+++ b/tests/func/mongo/sync/f_sync_safe_mode.c
@@ -0,0 +1,112 @@
+#include "test.h"
+#include <mongo.h>
+
+#include <errno.h>
+#include <string.h>
+
+#include "libmongo-private.h"
+
+void
+test_func_mongo_sync_safe_mode_basics (void)
+{
+ mongo_sync_connection *conn;
+ const bson *docs[10];
+ bson *b1, *b2, *b3, *b4, *cmd;
+ mongo_packet *p;
+ gchar *error;
+
+ mongo_util_oid_init (0);
+
+ b1 = bson_new ();
+ bson_append_string (b1, "func_mongo_sync_safe_mode", "works", -1);
+ bson_finish (b1);
+
+ b2 = bson_new ();
+ bson_append_int32 (b2, "int32", 1984);
+ bson_finish (b2);
+
+ b3 = test_bson_generate_full ();
+ b4 = test_bson_generate_full ();
+
+ docs[0] = b1;
+ docs[1] = b2;
+ docs[2] = b3;
+ docs[3] = b4;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port,
+ FALSE);
+
+ /* Test inserts */
+ mongo_sync_conn_set_safe_mode (conn, FALSE);
+ ok (mongo_sync_cmd_insert_n (conn, config.ns, 4, docs) == TRUE,
+ "mongo_sync_cmd_insert_n() should not fail with safe mode off");
+
+ mongo_sync_conn_set_safe_mode (conn, TRUE);
+ ok (mongo_sync_cmd_insert_n (conn, config.ns, 4, docs) == FALSE,
+ "mongo_sync_cmd_insert_n() should fail with safe mode on");
+
+ /* Test a custom command */
+ cmd = bson_new ();
+ bson_append_int32 (cmd, "bogusCommand", 1);
+ bson_finish (cmd);
+
+ mongo_sync_cmd_reset_error (conn, config.db);
+ mongo_sync_conn_set_safe_mode (conn, FALSE);
+ p = mongo_sync_cmd_custom (conn, config.db, cmd);
+ mongo_sync_cmd_get_last_error (conn, config.db, &error);
+ ok (p == NULL && strcmp (error, "no such cmd: bogusCommand") == 0,
+ "mongo_sync_cmd_custom() with a bogus command fails with safe-mode off");
+ bson_free (cmd);
+
+ cmd = bson_new ();
+ bson_append_int32 (cmd, "bogusCommand2", 1);
+ bson_finish (cmd);
+ mongo_sync_cmd_reset_error (conn, config.db);
+ mongo_sync_conn_set_safe_mode (conn, TRUE);
+ p = mongo_sync_cmd_custom (conn, config.db, cmd);
+ mongo_sync_cmd_get_last_error (conn, config.db, &error);
+ ok (p == NULL && strcmp (error, "no such cmd: bogusCommand2") == 0,
+ "mongo_sync_cmd_custom() with a bogus command fails with safe-mode on");
+ bson_free (cmd);
+
+ mongo_sync_disconnect (conn);
+ bson_free (b1);
+ bson_free (b2);
+ bson_free (b3);
+ bson_free (b4);
+}
+
+#define INVALID_NS "1234567890123456789012345678901234567890123456789012345678901234567890.test"
+
+void
+test_func_mongo_sync_safe_mode_invalid_db (void)
+{
+ mongo_sync_connection *conn;
+ bson *b;
+ const bson *docs[1];
+
+ b = bson_new ();
+ bson_append_int32 (b, "int32", 1984);
+ bson_finish (b);
+
+ docs[0] = b;
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port,
+ TRUE);
+ mongo_sync_conn_set_safe_mode (conn, TRUE);
+
+ ok (mongo_sync_cmd_insert_n (conn, INVALID_NS, 1, docs) == FALSE,
+ "mongo_sync_cmd_insert_n() should fail with safe mode on and an invalid NS");
+
+ mongo_sync_disconnect (conn);
+ bson_free (b);
+}
+
+void
+test_func_mongo_sync_safe_mode (void)
+{
+ test_func_mongo_sync_safe_mode_basics ();
+ test_func_mongo_sync_safe_mode_invalid_db ();
+}
+
+RUN_NET_TEST (5, func_mongo_sync_safe_mode);
diff --git a/tests/func/mongo/sync/f_sync_safe_mode_cache.c b/tests/func/mongo/sync/f_sync_safe_mode_cache.c
new file mode 100644
index 0000000..082617f
--- /dev/null
+++ b/tests/func/mongo/sync/f_sync_safe_mode_cache.c
@@ -0,0 +1,131 @@
+#include "test.h"
+#include <mongo.h>
+
+#include <errno.h>
+#include <string.h>
+
+#include "libmongo-private.h"
+
+void
+test_func_mongo_sync_safe_mode_basics_cache (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_conn_recovery_cache *cache;
+
+ const bson *docs[10];
+ bson *b1, *b2, *b3, *b4, *cmd;
+ mongo_packet *p;
+ gchar *error;
+
+ mongo_util_oid_init (0);
+
+ b1 = bson_new ();
+ bson_append_string (b1, "func_mongo_sync_safe_mode", "works", -1);
+ bson_finish (b1);
+
+ b2 = bson_new ();
+ bson_append_int32 (b2, "int32", 1984);
+ bson_finish (b2);
+
+ b3 = test_bson_generate_full ();
+ b4 = test_bson_generate_full ();
+
+ docs[0] = b1;
+ docs[1] = b2;
+ docs[2] = b3;
+ docs[3] = b4;
+
+ cache = mongo_sync_conn_recovery_cache_new ();
+
+ mongo_sync_conn_recovery_cache_seed_add (cache,
+ config.primary_host,
+ config.primary_port);
+
+ conn = mongo_sync_connect_recovery_cache (cache, FALSE);
+
+ /* Test inserts */
+ mongo_sync_conn_set_safe_mode (conn, FALSE);
+ ok (mongo_sync_cmd_insert_n (conn, config.ns, 4, docs) == TRUE,
+ "mongo_sync_cmd_insert_n() should not fail with safe mode off");
+
+ mongo_sync_conn_set_safe_mode (conn, TRUE);
+ ok (mongo_sync_cmd_insert_n (conn, config.ns, 4, docs) == FALSE,
+ "mongo_sync_cmd_insert_n() should fail with safe mode on");
+
+ /* Test a custom command */
+ cmd = bson_new ();
+ bson_append_int32 (cmd, "bogusCommand", 1);
+ bson_finish (cmd);
+
+ mongo_sync_cmd_reset_error (conn, config.db);
+ mongo_sync_conn_set_safe_mode (conn, FALSE);
+ p = mongo_sync_cmd_custom (conn, config.db, cmd);
+ mongo_sync_cmd_get_last_error (conn, config.db, &error);
+ ok (p == NULL && strcmp (error, "no such cmd: bogusCommand") == 0,
+ "mongo_sync_cmd_custom() with a bogus command fails with safe-mode off");
+ bson_free (cmd);
+ g_free (error);
+
+ cmd = bson_new ();
+ bson_append_int32 (cmd, "bogusCommand2", 1);
+ bson_finish (cmd);
+ mongo_sync_cmd_reset_error (conn, config.db);
+ mongo_sync_conn_set_safe_mode (conn, TRUE);
+ p = mongo_sync_cmd_custom (conn, config.db, cmd);
+ mongo_sync_cmd_get_last_error (conn, config.db, &error);
+ ok (p == NULL && strcmp (error, "no such cmd: bogusCommand2") == 0,
+ "mongo_sync_cmd_custom() with a bogus command fails with safe-mode on");
+ bson_free (cmd);
+ g_free (error);
+
+ mongo_sync_disconnect (conn);
+ mongo_sync_conn_recovery_cache_free (cache);
+
+ bson_free (b1);
+ bson_free (b2);
+ bson_free (b3);
+ bson_free (b4);
+}
+
+#define INVALID_NS "1234567890123456789012345678901234567890123456789012345678901234567890.test"
+
+void
+test_func_mongo_sync_safe_mode_invalid_db_cache (void)
+{
+ mongo_sync_connection *conn;
+ mongo_sync_conn_recovery_cache *cache;
+ bson *b;
+ const bson *docs[1];
+
+ b = bson_new ();
+ bson_append_int32 (b, "int32", 1984);
+ bson_finish (b);
+
+ docs[0] = b;
+
+ cache = mongo_sync_conn_recovery_cache_new ();
+
+ mongo_sync_conn_recovery_cache_seed_add (cache,
+ config.primary_host,
+ config.primary_port);
+
+ conn = mongo_sync_connect_recovery_cache (cache, TRUE);
+
+ mongo_sync_conn_set_safe_mode (conn, TRUE);
+
+ ok (mongo_sync_cmd_insert_n (conn, INVALID_NS, 1, docs) == FALSE,
+ "mongo_sync_cmd_insert_n() should fail with safe mode on and an invalid NS");
+
+ mongo_sync_disconnect (conn);
+ mongo_sync_conn_recovery_cache_free (cache);
+ bson_free (b);
+}
+
+void
+test_func_mongo_sync_safe_mode_cache (void)
+{
+ test_func_mongo_sync_safe_mode_basics_cache ();
+ test_func_mongo_sync_safe_mode_invalid_db_cache ();
+}
+
+RUN_NET_TEST (5, func_mongo_sync_safe_mode_cache);
diff --git a/tests/func/mongo/sync/f_sync_write_error.c b/tests/func/mongo/sync/f_sync_write_error.c
new file mode 100644
index 0000000..b6d4750
--- /dev/null
+++ b/tests/func/mongo/sync/f_sync_write_error.c
@@ -0,0 +1,52 @@
+#include "test.h"
+#include <mongo.h>
+
+#include <errno.h>
+#include <sys/socket.h>
+
+#include "libmongo-private.h"
+
+#define INVALID_NS "test.$Uncle$.Dagobert$"
+
+void
+test_func_mongo_sync_write_error (void)
+{
+ mongo_sync_connection *conn;
+ bson *b;
+ const gchar *error_msg;
+
+ b = bson_new ();
+ bson_append_int32 (b, "f_sync_write_error", 1);
+ bson_finish (b);
+
+ conn = mongo_sync_connect (config.primary_host, config.primary_port,
+ TRUE);
+
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == TRUE,
+ "Inserting works with correct namespace when safe mode is off");
+
+ ok (mongo_sync_cmd_insert (conn, INVALID_NS, b, NULL) == TRUE,
+ "Inserting works with invalid namespace when safe mode is off");
+
+ error_msg = mongo_sync_conn_get_last_error (conn);
+ ok (error_msg == NULL,
+ "When safe mode is off, there is no error msg, even if ns is invalid.");
+
+ ok (mongo_sync_conn_set_safe_mode (conn, TRUE) == TRUE,
+ "Setting safe mode works.");
+
+ ok (mongo_sync_cmd_insert (conn, config.ns, b, NULL) == TRUE,
+ "Inserting works with correct namespace when safe mode is on");
+
+ ok (mongo_sync_cmd_insert (conn, INVALID_NS, b, NULL) == FALSE,
+ "Inserting fails with invalid namespace when safe mode is on");
+
+ error_msg = mongo_sync_conn_get_last_error (conn);
+
+ ok (error_msg != NULL,
+ "Inserting failed in safe mode, so we should have an error msg");
+
+ mongo_sync_disconnect (conn);
+}
+
+RUN_NET_TEST (7, func_mongo_sync_write_error);