[mod_cdr_mongodb] Remove from tree (#2992)

This commit is contained in:
Andrey Volk
2026-03-06 01:26:20 +03:00
committed by GitHub
parent 88fa1f95ca
commit 14b8295dbc
34 changed files with 4 additions and 8382 deletions

View File

@@ -500,8 +500,6 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mod_cv", "src\mod\applicati
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mod_cidlookup", "src\mod\applications\mod_cidlookup\mod_cidlookup.2017.vcxproj", "{0A130A8B-3076-4619-BADF-9E86F621AEEC}"
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mod_cdr_mongodb", "src\mod\event_handlers\mod_cdr_mongodb\mod_cdr_mongodb.2017.vcxproj", "{4DFF29B4-2976-447D-A8B3-43476451517C}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Tests", "Tests", "{9388C266-C3FC-468A-92EF-0CBC35941412}"
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test_mod_av", "src\mod\applications\mod_av\test\test_mod_av.2017.vcxproj", "{7926CB0D-62CE-4A09-AE94-1DA2BC92D625}"
@@ -2238,18 +2236,6 @@ Global
{0A130A8B-3076-4619-BADF-9E86F621AEEC}.Release|Win32.Build.0 = Release|Win32
{0A130A8B-3076-4619-BADF-9E86F621AEEC}.Release|x64.ActiveCfg = Release|x64
{0A130A8B-3076-4619-BADF-9E86F621AEEC}.Release|x64.Build.0 = Release|x64
{4DFF29B4-2976-447D-A8B3-43476451517C}.All|Win32.ActiveCfg = Release|Win32
{4DFF29B4-2976-447D-A8B3-43476451517C}.All|Win32.Build.0 = Release|Win32
{4DFF29B4-2976-447D-A8B3-43476451517C}.All|x64.ActiveCfg = Release|x64
{4DFF29B4-2976-447D-A8B3-43476451517C}.All|x64.Build.0 = Release|x64
{4DFF29B4-2976-447D-A8B3-43476451517C}.Debug|Win32.ActiveCfg = Debug|Win32
{4DFF29B4-2976-447D-A8B3-43476451517C}.Debug|Win32.Build.0 = Debug|Win32
{4DFF29B4-2976-447D-A8B3-43476451517C}.Debug|x64.ActiveCfg = Debug|x64
{4DFF29B4-2976-447D-A8B3-43476451517C}.Debug|x64.Build.0 = Debug|x64
{4DFF29B4-2976-447D-A8B3-43476451517C}.Release|Win32.ActiveCfg = Release|Win32
{4DFF29B4-2976-447D-A8B3-43476451517C}.Release|Win32.Build.0 = Release|Win32
{4DFF29B4-2976-447D-A8B3-43476451517C}.Release|x64.ActiveCfg = Release|x64
{4DFF29B4-2976-447D-A8B3-43476451517C}.Release|x64.Build.0 = Release|x64
{7926CB0D-62CE-4A09-AE94-1DA2BC92D625}.All|Win32.ActiveCfg = Release|Win32
{7926CB0D-62CE-4A09-AE94-1DA2BC92D625}.All|Win32.Build.0 = Release|Win32
{7926CB0D-62CE-4A09-AE94-1DA2BC92D625}.All|x64.ActiveCfg = Release|x64
@@ -2579,7 +2565,6 @@ Global
{2CA661A7-01DD-4532-BF88-B6629DFB544A} = {9ADF1E48-2F5C-4ED7-A893-596259FABFE0}
{40C4E2A2-B49B-496C-96D6-C04B890F7F88} = {E72B5BCB-6462-4D23-B419-3AF1A4AC3D78}
{0A130A8B-3076-4619-BADF-9E86F621AEEC} = {E72B5BCB-6462-4D23-B419-3AF1A4AC3D78}
{4DFF29B4-2976-447D-A8B3-43476451517C} = {9ADF1E48-2F5C-4ED7-A893-596259FABFE0}
{7926CB0D-62CE-4A09-AE94-1DA2BC92D625} = {9388C266-C3FC-468A-92EF-0CBC35941412}
{EF62B845-A0CE-44FD-B8E6-475FE87D06C3} = {9388C266-C3FC-468A-92EF-0CBC35941412}
{8154C82D-58EE-4145-9DEC-A445A5AA3D6B} = {4F227C26-768F-46A3-8684-1D08A46FB374}

13
LICENSE
View File

@@ -1304,23 +1304,10 @@ Files: src/mod/applications/mod_cluechoo/sl.h
Copyright: 1993 Toyoda Masashi
License: FIXME
Files: src/mod/event_handlers/mod_cdr_mongodb/*
Copyright: 2009-2012 10gen Inc.
2001 Unicode, Inc.
License: Apache-2.0
Files: src/mod/loggers/mod_syslog/mod_syslog.c
Copyright: 2005-2010, James Martelletti <james@nerdc0re.com>
License: MPL-1.1
Files: src/mod/event_handlers/mod_cdr_mongodb/mod_cdr_mongodb.c
Copyright: 2005-2014, Anthony Minessale II <anthm@freeswitch.org>
License: MPL-1.1
Files: src/mod/event_handlers/mod_cdr_mongodb/driver/src/md5.[ch]
Copyright: 1999, 2000, 2002 Aladdin Enterprises.
License: zlib/libpng
Files: src/mod/say/mod_say_??/mod_say_??.c
scripts/c/socket2me/socket2me.c
src/mod/xml_int/mod_xml_scgi/xml_scgi_server.pl

View File

@@ -76,7 +76,6 @@ endpoints/mod_sofia
endpoints/mod_verto
#event_handlers/mod_amqp
event_handlers/mod_cdr_csv
#event_handlers/mod_cdr_mongodb
#event_handlers/mod_cdr_pg_csv
event_handlers/mod_cdr_sqlite
#event_handlers/mod_erlang_event

View File

@@ -76,7 +76,6 @@ endpoints/mod_sofia
endpoints/mod_verto
event_handlers/mod_amqp
event_handlers/mod_cdr_csv
event_handlers/mod_cdr_mongodb
#event_handlers/mod_cdr_pg_csv
event_handlers/mod_cdr_sqlite
event_handlers/mod_erlang_event

1
ci.sh
View File

@@ -97,7 +97,6 @@ configure_freeswitch()
-e '/mod_siren/s/^/#/g' \
-e '/mod_avmd/s/^/#/g' \
-e '/mod_basic/s/^/#/g' \
-e '/mod_cdr_mongodb/s/^/#/g' \
-e '/mod_cv/s/^/#/g' \
-e '/mod_erlang_event/s/^/#/g' \
-e '/mod_perl/s/^/#/g' \

View File

@@ -1,13 +0,0 @@
<configuration name="cdr_mongodb.conf" description="MongoDB CDR logger">
<settings>
<!-- Hostnames and IPv6 addrs not supported (yet) -->
<param name="host" value="127.0.0.1"/>
<param name="port" value="27017"/>
<!-- Namespace format is database.collection -->
<param name="namespace" value="test.cdr"/>
<!-- If true, create CDR for B-leg of call (default: true) -->
<param name="log-b-leg" value="false"/>
</settings>
</configuration>

View File

@@ -2072,7 +2072,6 @@ AC_CONFIG_FILES([Makefile
src/mod/endpoints/mod_verto/Makefile
src/mod/event_handlers/mod_amqp/Makefile
src/mod/event_handlers/mod_cdr_csv/Makefile
src/mod/event_handlers/mod_cdr_mongodb/Makefile
src/mod/event_handlers/mod_cdr_pg_csv/Makefile
src/mod/event_handlers/mod_cdr_sqlite/Makefile
src/mod/event_handlers/mod_erlang_event/Makefile

2
debian/bootstrap.sh vendored
View File

@@ -636,7 +636,6 @@ Depends: \${misc:Depends}, freeswitch (= \${binary:Version}),
freeswitch-mod-sofia (= \${binary:Version}),
freeswitch-mod-verto (= \${binary:Version}),
freeswitch-mod-cdr-csv (= \${binary:Version}),
freeswitch-mod-cdr-mongodb (= \${binary:Version}),
freeswitch-mod-cdr-sqlite (= \${binary:Version}),
freeswitch-mod-erlang-event (= \${binary:Version}),
freeswitch-mod-event-multicast (= \${binary:Version}),
@@ -852,7 +851,6 @@ Depends: \${misc:Depends}, freeswitch (= \${binary:Version}),
freeswitch-mod-sofia-dbg (= \${binary:Version}),
freeswitch-mod-verto-dbg (= \${binary:Version}),
freeswitch-mod-cdr-csv-dbg (= \${binary:Version}),
freeswitch-mod-cdr-mongodb-dbg (= \${binary:Version}),
freeswitch-mod-cdr-sqlite-dbg (= \${binary:Version}),
freeswitch-mod-erlang-event-dbg (= \${binary:Version}),
freeswitch-mod-event-multicast-dbg (= \${binary:Version}),

View File

@@ -399,10 +399,6 @@ Module: event_handlers/mod_cdr_csv
Description: mod_cdr_csv
Adds mod_cdr_csv.
Module: event_handlers/mod_cdr_mongodb
Description: mod_cdr_mongodb
Adds mod_cdr_mongodb.
Module: event_handlers/mod_cdr_pg_csv
Description: mod_cdr_pg_csv
Adds mod_cdr_pg_csv.

13
debian/copyright vendored
View File

@@ -1304,23 +1304,10 @@ Files: src/mod/applications/mod_cluechoo/sl.h
Copyright: 1993 Toyoda Masashi
License: FIXME
Files: src/mod/event_handlers/mod_cdr_mongodb/*
Copyright: 2009-2012 10gen Inc.
2001 Unicode, Inc.
License: Apache-2.0
Files: src/mod/loggers/mod_syslog/mod_syslog.c
Copyright: 2005-2010, James Martelletti <james@nerdc0re.com>
License: MPL-1.1
Files: src/mod/event_handlers/mod_cdr_mongodb/mod_cdr_mongodb.c
Copyright: 2005-2014, Anthony Minessale II <anthm@freeswitch.org>
License: MPL-1.1
Files: src/mod/event_handlers/mod_cdr_mongodb/driver/src/md5.[ch]
Copyright: 1999, 2000, 2002 Aladdin Enterprises.
License: zlib/libpng
Files: src/mod/say/mod_say_??/mod_say_??.c
scripts/c/socket2me/socket2me.c
src/mod/xml_int/mod_xml_scgi/xml_scgi_server.pl

View File

@@ -769,15 +769,6 @@ Verto protocol support for FreeSWITCH open source telephony platform.
# FreeSWITCH Event Handler Modules
######################################################################################################################
%package event-cdr-mongodb
Summary: MongoDB CDR Logger for the FreeSWITCH open source telephony platform
Group: System/Libraries
Requires: %{name} = %{version}-%{release}
BuildRequires: mongo-c-driver-devel
%description event-cdr-mongodb
MongoDB CDR Logger for FreeSWITCH
%package event-cdr-pg-csv
Summary: PostgreSQL CDR Logger for the FreeSWITCH open source telephony platform
Group: System/Libraries
@@ -1257,7 +1248,7 @@ ENDPOINTS_MODULES=" \
#
######################################################################################################################
EVENT_HANDLERS_MODULES="event_handlers/mod_cdr_csv event_handlers/mod_cdr_pg_csv event_handlers/mod_cdr_sqlite \
event_handlers/mod_cdr_mongodb event_handlers/mod_format_cdr event_handlers/mod_erlang_event event_handlers/mod_event_multicast \
event_handlers/mod_format_cdr event_handlers/mod_erlang_event event_handlers/mod_event_multicast \
event_handlers/mod_event_socket event_handlers/mod_json_cdr \
event_handlers/mod_snmp"
@@ -1636,7 +1627,6 @@ fi
%config(noreplace) %attr(0640, freeswitch, daemon) %{sysconfdir}/autoload_configs/blacklist.conf.xml
%config(noreplace) %attr(0640, freeswitch, daemon) %{sysconfdir}/autoload_configs/callcenter.conf.xml
%config(noreplace) %attr(0640, freeswitch, daemon) %{sysconfdir}/autoload_configs/cdr_csv.conf.xml
%config(noreplace) %attr(0640, freeswitch, daemon) %{sysconfdir}/autoload_configs/cdr_mongodb.conf.xml
%config(noreplace) %attr(0640, freeswitch, daemon) %{sysconfdir}/autoload_configs/cdr_pg_csv.conf.xml
%config(noreplace) %attr(0640, freeswitch, daemon) %{sysconfdir}/autoload_configs/cdr_sqlite.conf.xml
%config(noreplace) %attr(0640, freeswitch, daemon) %{sysconfdir}/autoload_configs/cidlookup.conf.xml
@@ -1958,9 +1948,6 @@ fi
#
######################################################################################################################
%files event-cdr-mongodb
%{MODINSTDIR}/mod_cdr_mongodb.so*
%files event-cdr-pg-csv
%{MODINSTDIR}/mod_cdr_pg_csv.so*

View File

@@ -1,9 +0,0 @@
include $(top_srcdir)/build/modmake.rulesam
MODNAME=mod_cdr_mongodb
MONGODB_DRIVER=$(switch_srcdir)/src/mod/event_handlers/mod_cdr_mongodb/driver/src
mod_LTLIBRARIES = mod_cdr_mongodb.la
mod_cdr_mongodb_la_SOURCES = mod_cdr_mongodb.c ./driver/src/encoding.c ./driver/src/env_posix.c ./driver/src/bson.c ./driver/src/md5.c ./driver/src/mongo.c ./driver/src/numbers.c
mod_cdr_mongodb_la_CFLAGS = $(AM_CFLAGS)
mod_cdr_mongodb_la_CPPFLAGS = -I$(MONGODB_DRIVER) -I$(switch_srcdir)/libs/libteletone/src/ -D_GNU_SOURCE
mod_cdr_mongodb_la_LIBADD = $(switch_builddir)/libfreeswitch.la
mod_cdr_mongodb_la_LDFLAGS = -avoid-version -module -no-undefined -shared

View File

@@ -1,21 +0,0 @@
<configuration name="cdr_mongodb.conf" description="MongoDB CDR logger">
<settings>
<!-- Specify MongoDB server in hostname[:port] format. Specify multiple
comma-delimited hosts for a replica set -->
<param name="host" value="127.0.0.1"/>
<param name="port" value="27017"/>
<!-- Optionally override default replica set name -->
<!-- <param name="replica_set_name" value="cdr_mongodb"/> -->
<!-- Namespace format is database.collection -->
<param name="namespace" value="test.cdr"/>
<!-- Set username and password to enable MongoDB authentication -->
<!-- <param name="username" value="freeswitch"/> -->
<!-- <param name="password" value="secret"/> -->
<!-- If true, create CDR for B-leg of call (default: true) -->
<param name="log-b-leg" value="false"/>
</settings>
</configuration>

View File

@@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,201 +0,0 @@
# MongoDB C Driver History
## 0.6
2012-6-3
** API CHANGE **
Version 0.6 supports write concern. This involves a backward-breaking
API change, as the write functions now take an optional write_concern
object.
The driver now also supports the MONGO_CONTINUE_ON_ERROR flag for
batch inserts.
The new function prototypes are as follows:
* int mongo_insert( mongo *conn, const char *ns, const bson *data,
mongo_write_concern *custom_write_concern );
* int mongo_insert_batch( mongo *conn, const char *ns,
const bson **data, int num, mongo_write_concern *custom_write_concern );
* int mongo_update( mongo *conn, const char *ns, const bson *cond,
const bson *op, int flags, mongo_write_concern *custom_write_concern,
int flags );
* int mongo_remove( mongo *conn, const char *ns, const bson *cond,
mongo_write_concern *custom_write_concern );
* Allow DBRefs (i.e., allows keys $ref, $id, and $db)
* Added mongo_create_capped_collection().
* Fixed some bugs in the SCons and Makefile build scripts.
* Fixes for SCons and Makefile shared library install targets.
* Other minor bug fixes.
## 0.5.2
2012-5-4
* Validate collection and database names on insert.
* Validate insert limits using max BSON size.
* Support getaddrinfo and SO_RCVTIMEO and SO_SNDTIMEO on Windows.
* Store errno/WSAGetLastError() on errors.
* Various bug fixes and refactorings.
* Update error reporting docs.
## 0.5.1
* Env for POSIX, WIN32, and standard C.
* Various bug fixes.
## 0.5
2012-3-31
* Separate cursor-specific errors into their own enum: mongo_cursor_error_t.
* Catch $err return on bad queries and store the result in conn->getlasterrorcode
and conn->getlasterrstr.
* On queries that return $err, set cursor->err to MONGO_CURSOR_QUERY_FAIL.
* When passing bad BSON to a cursor object, set cursor->err to MONGO_CURSOR_BSON_ERROR,
and store the specific BSON error on the conn->err field.
* Remove bson_copy_basic().
* bson_copy() will copy finished bson objects only.
* bson_copy() returns BSON_OK on success and BSON_ERROR on failure.
* Added a Makefile for easy compile and install on Linux and OS X.
* Replica set connect fixes.
## 0.4
THIS RELEASE INCLUDES NUMEROUS BACKWARD-BREAKING CHANGES.
These changes have been made for extensibility, consistency,
and ease of use. Please read the following release notes
carefully, and study the updated tutorial.
API Principles:
1. Present a consistent interface for all objects: connections,
cursors, bson objects, and bson iterators.
2. Require no knowledge of an object's implementation to use the API.
3. Allow users to allocate objects on the stack or on the heap.
4. Integrate API with new error reporting strategy.
5. Be concise, except where it impairs clarity.
Changes:
* mongo_replset_init_conn has been renamed to mongo_replset_init.
* bson_buffer has been removed. All functions for building bson
objects now take objects of type bson. The new pattern looks like this:
Example:
bson b[1];
bson_init( b );
bson_append_int( b, "foo", 1 );
bson_finish( b );
/* The object is ready to use.
When finished, destroy it. */
bson_destroy( b );
* mongo_connection has been renamed to mongo.
Example:
mongo conn[1];
mongo_connect( conn, '127.0.0.1', 27017 );
/* Connection is ready. Destroy when down. */
mongo_destroy( conn );
* New cursor builder API for clearer code:
Example:
mongo_cursor cursor[1];
mongo_cursor_init( cursor, conn, "test.foo" );
bson query[1];
bson_init( query );
bson_append_int( query, "bar", 1 );
bson_finish( query );
bson fields[1];
bson_init( fields );
bson_append_int( fields, "baz", 1 );
bson_finish( fields );
mongo_cursor_set_query( cursor, query );
mongo_cursor_set_fields( cursor, fields );
mongo_cursor_set_limit( cursor, 10 );
mongo_cursor_set_skip( cursor, 10 );
while( mongo_cursor_next( cursor ) == MONGO_OK )
bson_print( mongo_cursor_bson( cursor ) );
* bson_iterator_init now takes a (bson*) instead of a (const char*). This is consistent
with bson_find, which also takes a (bson*). If you want to initiate a bson iterator
with a buffer, use the new function bson_iterator_from_buffer.
* With the addition of the mongo_cursor_bson function, it's now no
longer necessary to know how bson and mongo_cursor objects are implemented.
Example:
bson b[1];
bson_iterator i[1];
bson_iterator_init( i, b );
/* With a cursor */
bson_iterator_init( i, mongo_cursor_bson( cursor ) );
* Added mongo_cursor_data and bson_data functions, which return the
raw bson buffer as a (const char *).
* All constants that were once lower case are now
upper case. These include: MONGO_OP_MSG, MONGO_OP_UPDATE, MONGO_OP_INSERT,
MONGO_OP_QUERY, MONGO_OP_GET_MORE, MONGO_OP_DELETE, MONGO_OP_KILL_CURSORS
BSON_EOO, BSON_DOUBLE, BSON_STRING, BSON_OBJECT, BSON_ARRAY, BSON_BINDATA,
BSON_UNDEFINED, BSON_OID, BSON_BOOL, BSON_DATE, BSON_NULL, BSON_REGEX, BSON_DBREF,
BSON_CODE, BSON_SYMBOL, BSON_CODEWSCOPE, BSON_INT, BSON_TIMESTAMP, BSON_LONG,
MONGO_CONN_SUCCESS, MONGO_CONN_BAD_ARG, MONGO_CONN_NO_SOCKET, MONGO_CONN_FAIL,
MONGO_CONN_NOT_MASTER, MONGO_CONN_BAD_SET_NAME, MONGO_CONN_CANNOT_FIND_PRIMARY
If your programs use any of these constants, you must convert them to their
upper case forms, or you will see compile errors.
* The error handling strategy has been changed. Exceptions are not longer being used.
* Functions taking a mongo_connection object now return either MONGO_OK or MONGO_ERROR.
In case of an error, an error code of type mongo_error_t will be indicated on the
mongo_connection->err field.
* Functions taking a bson object now return either BSON_OK or BSON_ERROR.
In case of an error, an error code of type bson_validity_t will be indicated on the
bson->err or bson_buffer->err field.
* Calls to mongo_cmd_get_last_error store the error status on the
mongo->lasterrcode and mongo->lasterrstr fields.
* bson_print now prints all types.
* Users may now set custom malloc, realloc, free, printf, sprintf, and fprintf fields.
* Groundwork for modules for supporting platform-specific features (e.g., socket timeouts).
* Added mongo_set_op_timeout for setting socket timeout. To take advantage of this, you must
compile with --use-platform=LINUX. The compiles with platform/linux/net.h instead of the
top-level net.h.
* Fixed tailable cursors.
* GridFS API is now in-line with the new driver API. In particular, all of the
following functions now return MONGO_OK or MONGO_ERROR: gridfs_init,
gridfile_init, gridfile_writer_done, gridfs_store_buffer, gridfs_store_file,
and gridfs_find_query.
* Fixed a few memory leaks.
## 0.3
2011-4-14
* Support replica sets.
* Better standard connection API.
* GridFS write buffers iteratively.
* Fixes for working with large GridFS files (> 3GB)
* bson_append_string_n and family (Gergely Nagy)
## 0.2
2011-2-11
* GridFS support (Chris Triolo).
* BSON Timestamp type support.
## 0.1
2009-11-30
* Initial release.

View File

@@ -1,66 +0,0 @@
# MongoDB C Driver
This is then 10gen-supported MongoDB C driver. There are two goals for this driver.
The first is to provide a strict, default compilation option for ultimate portability,
no dependencies, and generic embeddability.
The second is to support more advanced, platform-specific features, like socket timeout,
by providing an interface for platform-specific modules.
Until the 1.0 release, this driver should be considered alpha. Keep in mind that the API will be in flux until then.
# Documentation
Documentation exists in the project's `docs` folder. You can read the latest
docs online at (http://api.mongodb.org/c/current/).
The docs are built using Sphinx and Doxygen. If you have these tools installed, then
you can build the docs with scons:
scons docs
The html docs will appear in docs/html.
# Building
First check out the version you want to build. *Always build from a particular tag, since HEAD may be
a work in progress.* For example, to build version 0.6, run:
git checkout v0.6
You can then build the driver with scons:
scons
For more build options, see the docs.
## Running the tests
Make sure that you're running mongod on 127.0.0.1 on the default port (27017). The replica set
test assumes a replica set with at least three nodes running at 127.0.0.1 and starting at port
30000. Note that the driver does not recognize 'localhost' as a valid host name.
To compile and run the tests:
scons test
# Error Handling
Most functions return MONGO_OK or BSON_OK on success and MONGO_ERROR or BSON_ERROR on failure.
Specific error codes and error strings are then stored in the `err` and `errstr` fields of the
`mongo` and `bson` objects. It is the client's responsibility to check for errors and handle
them appropriately.
# ISSUES
You can report bugs, request new features, and view this driver's roadmap
using [JIRA](http://jira.mongodb.org/browse/CDRIVER).
# CREDITS
* Gergely Nagy - Non-null-terminated string support.
* Josh Rotenberg - Initial Doxygen setup and a significant chunk of documentation.
# LICENSE
Unless otherwise specified in a source file, sources in this
repository are published under the terms of the Apache License version
2.0, a copy of which is in this repository as APACHE-2.0.txt.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,167 +0,0 @@
/*
* Copyright 2009-2012 10gen, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Portions Copyright 2001 Unicode, Inc.
*
* Disclaimer
*
* This source code is provided as is by Unicode, Inc. No claims are
* made as to fitness for any particular purpose. No warranties of any
* kind are expressed or implied. The recipient agrees to determine
* applicability of information provided. If this file has been
* purchased on magnetic or optical media from Unicode, Inc., the
* sole remedy for any claim will be exchange of defective media
* within 90 days of receipt.
*
* Limitations on Rights to Redistribute This Code
*
* Unicode, Inc. hereby grants the right to freely use the information
* supplied in this file in the creation of products supporting the
* Unicode Standard, and to make copies of this file in any form
* for internal or external distribution as long as this notice
* remains attached.
*/
#include "bson.h"
#include "encoding.h"
/*
* Index into the table below with the first byte of a UTF-8 sequence to
* get the number of trailing bytes that are supposed to follow it.
*/
static const char trailingBytesForUTF8[256] = {
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5
};
/* --------------------------------------------------------------------- */
/*
* Utility routine to tell whether a sequence of bytes is legal UTF-8.
* This must be called with the length pre-determined by the first byte.
* The length can be set by:
* length = trailingBytesForUTF8[*source]+1;
* and the sequence is illegal right away if there aren't that many bytes
* available.
* If presented with a length > 4, this returns 0. The Unicode
* definition of UTF-8 goes up to 4-byte sequences.
*/
static int isLegalUTF8( const unsigned char *source, int length ) {
unsigned char a;
const unsigned char *srcptr = source + length;
switch ( length ) {
default:
return 0;
/* Everything else falls through when "true"... */
case 4:
if ( ( a = ( *--srcptr ) ) < 0x80 || a > 0xBF ) return 0;
case 3:
if ( ( a = ( *--srcptr ) ) < 0x80 || a > 0xBF ) return 0;
case 2:
if ( ( a = ( *--srcptr ) ) > 0xBF ) return 0;
switch ( *source ) {
/* no fall-through in this inner switch */
case 0xE0:
if ( a < 0xA0 ) return 0;
break;
case 0xF0:
if ( a < 0x90 ) return 0;
break;
case 0xF4:
if ( a > 0x8F ) return 0;
break;
default:
if ( a < 0x80 ) return 0;
}
case 1:
if ( *source >= 0x80 && *source < 0xC2 ) return 0;
if ( *source > 0xF4 ) return 0;
}
return 1;
}
/* If the name is part of a db ref ($ref, $db, or $id), then return true. */
static int bson_string_is_db_ref( const unsigned char *string, const int length ) {
int result = 0;
if( length >= 4 ) {
if( string[1] == 'r' && string[2] == 'e' && string[3] == 'f' )
result = 1;
}
else if( length >= 3 ) {
if( string[1] == 'i' && string[2] == 'd' )
result = 1;
else if( string[1] == 'd' && string[2] == 'b' )
result = 1;
}
return result;
}
static int bson_validate_string( bson *b, const unsigned char *string,
const int length, const char check_utf8, const char check_dot,
const char check_dollar ) {
int position = 0;
int sequence_length = 1;
if( check_dollar && string[0] == '$' ) {
if( !bson_string_is_db_ref( string, length ) )
b->err |= BSON_FIELD_INIT_DOLLAR;
}
while ( position < length ) {
if ( check_dot && *( string + position ) == '.' ) {
b->err |= BSON_FIELD_HAS_DOT;
}
if ( check_utf8 ) {
sequence_length = trailingBytesForUTF8[*( string + position )] + 1;
if ( ( position + sequence_length ) > length ) {
b->err |= BSON_NOT_UTF8;
return BSON_ERROR;
}
if ( !isLegalUTF8( string + position, sequence_length ) ) {
b->err |= BSON_NOT_UTF8;
return BSON_ERROR;
}
}
position += sequence_length;
}
return BSON_OK;
}
int bson_check_string( bson *b, const char *string,
const int length ) {
return bson_validate_string( b, ( const unsigned char * )string, length, 1, 0, 0 );
}
int bson_check_field_name( bson *b, const char *string,
const int length ) {
return bson_validate_string( b, ( const unsigned char * )string, length, 1, 1, 1 );
}

View File

@@ -1,54 +0,0 @@
/*
* Copyright 2009-2012 10gen, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef BSON_ENCODING_H_
#define BSON_ENCODING_H_
MONGO_EXTERN_C_START
/**
* Check that a field name is valid UTF8, does not start with a '$',
* and contains no '.' characters. Set bson bit field appropriately.
* Note that we don't need to check for '\0' because we're using
* strlen(3), which stops at '\0'.
*
* @param b The bson object to which field name will be appended.
* @param string The field name as char*.
* @param length The length of the field name.
*
* @return BSON_OK if valid UTF8 and BSON_ERROR if not. All BSON strings must be
* valid UTF8. This function will also check whether the string
* contains '.' or starts with '$', since the validity of this depends on context.
* Set the value of b->err appropriately.
*/
int bson_check_field_name( bson *b, const char *string,
const int length );
/**
* Check that a string is valid UTF8. Sets the buffer bit field appropriately.
*
* @param b The bson object to which string will be appended.
* @param string The string to check.
* @param length The length of the string.
*
* @return BSON_OK if valid UTF-8; otherwise, BSON_ERROR.
* Sets b->err on error.
*/
bson_bool_t bson_check_string( bson *b, const char *string,
const int length );
MONGO_EXTERN_C_END
#endif

View File

@@ -1,39 +0,0 @@
/** @file env.h */
/* Copyright 2009-2012 10gen Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Header for generic net.h */
#ifndef MONGO_ENV_H_
#define MONGO_ENV_H_
#include "mongo.h"
MONGO_EXTERN_C_START
/* This is a no-op in the generic implementation. */
int mongo_env_set_socket_op_timeout( mongo *conn, int millis );
int mongo_env_read_socket( mongo *conn, void *buf, int len );
int mongo_env_write_socket( mongo *conn, const void *buf, int len );
int mongo_env_socket_connect( mongo *conn, const char *host, int port );
/* Initialize socket services */
MONGO_EXPORT int mongo_env_sock_init( void );
/* Close a socket */
MONGO_EXPORT int mongo_env_close_socket( int socket );
MONGO_EXTERN_C_END
#endif

View File

@@ -1,200 +0,0 @@
/* env_posix.c */
/* Copyright 2009-2012 10gen Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Networking and other niceties for POSIX systems. */
#include "env.h"
#include "mongo.h"
#include <string.h>
#include <errno.h>
#include <sys/time.h>
#include <arpa/inet.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <netdb.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <fcntl.h>
#include <unistd.h>
#ifndef NI_MAXSERV
# define NI_MAXSERV 32
#endif
int mongo_env_close_socket( int socket ) {
return close( socket );
}
int mongo_env_sock_init( void ) {
return 0;
}
int mongo_env_write_socket( mongo *conn, const void *buf, int len ) {
const char *cbuf = buf;
#ifdef __APPLE__
int flags = 0;
#else
int flags = MSG_NOSIGNAL;
#endif
while ( len ) {
int sent = send( conn->sock, cbuf, len, flags );
if ( sent == -1 ) {
if (errno == EPIPE)
conn->connected = 0;
__mongo_set_error( conn, MONGO_IO_ERROR, strerror( errno ), errno );
return MONGO_ERROR;
}
cbuf += sent;
len -= sent;
}
return MONGO_OK;
}
int mongo_env_read_socket( mongo *conn, void *buf, int len ) {
char *cbuf = buf;
while ( len ) {
int sent = recv( conn->sock, cbuf, len, 0 );
if ( sent == 0 || sent == -1 ) {
__mongo_set_error( conn, MONGO_IO_ERROR, strerror( errno ), errno );
return MONGO_ERROR;
}
cbuf += sent;
len -= sent;
}
return MONGO_OK;
}
int mongo_env_set_socket_op_timeout( mongo *conn, int millis ) {
struct timeval tv;
tv.tv_sec = millis / 1000;
tv.tv_usec = ( millis % 1000 ) * 1000;
if ( setsockopt( conn->sock, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof( tv ) ) == -1 ) {
conn->err = MONGO_IO_ERROR;
__mongo_set_error( conn, MONGO_IO_ERROR, "setsockopt SO_RCVTIMEO failed.", errno );
return MONGO_ERROR;
}
if ( setsockopt( conn->sock, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof( tv ) ) == -1 ) {
__mongo_set_error( conn, MONGO_IO_ERROR, "setsockopt SO_SNDTIMEO failed.", errno );
return MONGO_ERROR;
}
return MONGO_OK;
}
int mongo_env_unix_socket_connect( mongo *conn, const char *sock_path ) {
struct sockaddr_un addr;
int status, len;
conn->connected = 0;
conn->sock = socket( AF_UNIX, SOCK_STREAM, 0 );
if ( conn->sock < 0 ) {
conn->sock = 0;
return MONGO_ERROR;
}
addr.sun_family = AF_UNIX;
strncpy( addr.sun_path, sock_path, sizeof(addr.sun_path) - 1 );
len = sizeof( addr );
status = connect( conn->sock, (struct sockaddr *) &addr, len );
if( status < 0 ){
mongo_env_close_socket( conn->sock );
conn->sock = 0;
conn->err = MONGO_CONN_FAIL;
return MONGO_ERROR;
}
conn->connected = 1;
return MONGO_OK;
}
int mongo_env_socket_connect( mongo *conn, const char *host, int port ) {
char port_str[NI_MAXSERV];
int status;
struct addrinfo ai_hints;
struct addrinfo *ai_list = NULL;
struct addrinfo *ai_ptr = NULL;
if ( port < 0 ) {
return mongo_env_unix_socket_connect( conn, host );
}
conn->sock = 0;
conn->connected = 0;
sprintf(port_str,"%d",port);
bson_sprintf( port_str, "%d", port );
memset( &ai_hints, 0, sizeof( ai_hints ) );
#ifdef AI_ADDRCONFIG
ai_hints.ai_flags = AI_ADDRCONFIG;
#endif
ai_hints.ai_family = AF_UNSPEC;
ai_hints.ai_socktype = SOCK_STREAM;
status = getaddrinfo( host, port_str, &ai_hints, &ai_list );
if ( status != 0 ) {
bson_errprintf( "getaddrinfo failed: %s", gai_strerror( status ) );
conn->err = MONGO_CONN_ADDR_FAIL;
return MONGO_ERROR;
}
for ( ai_ptr = ai_list; ai_ptr != NULL; ai_ptr = ai_ptr->ai_next ) {
conn->sock = socket( ai_ptr->ai_family, ai_ptr->ai_socktype, ai_ptr->ai_protocol );
if ( conn->sock < 0 ) {
conn->sock = 0;
continue;
}
status = connect( conn->sock, ai_ptr->ai_addr, ai_ptr->ai_addrlen );
if ( status != 0 ) {
mongo_env_close_socket( conn->sock );
conn->sock = 0;
continue;
}
if ( ai_ptr->ai_protocol == IPPROTO_TCP ) {
int flag = 1;
setsockopt( conn->sock, IPPROTO_TCP, TCP_NODELAY,
( void * ) &flag, sizeof( flag ) );
if ( conn->op_timeout_ms > 0 )
mongo_env_set_socket_op_timeout( conn, conn->op_timeout_ms );
}
conn->connected = 1;
break;
}
freeaddrinfo( ai_list );
if ( ! conn->connected ) {
conn->err = MONGO_CONN_FAIL;
return MONGO_ERROR;
}
return MONGO_OK;
}

View File

@@ -1,168 +0,0 @@
/* env_standard.c */
/* Copyright 2009-2012 10gen Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Vanilla networking designed to work on all systems. */
#include "env.h"
#include <errno.h>
#include <string.h>
#ifdef _WIN32
#ifdef _MSC_VER
#include <ws2tcpip.h> // send,recv,socklen_t etc
#include <wspiapi.h> // addrinfo
#else
#include <windows.h>
#include <winsock.h>
typedef int socklen_t;
#endif
#else
#include <arpa/inet.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netdb.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <fcntl.h>
#include <unistd.h>
#endif
#ifndef NI_MAXSERV
# define NI_MAXSERV 32
#endif
int mongo_env_close_socket( int socket ) {
#ifdef _WIN32
return closesocket( socket );
#else
return close( socket );
#endif
}
int mongo_env_write_socket( mongo *conn, const void *buf, int len ) {
const char *cbuf = buf;
#ifdef _WIN32
int flags = 0;
#else
#ifdef __APPLE__
int flags = 0;
#else
int flags = MSG_NOSIGNAL;
#endif
#endif
while ( len ) {
int sent = send( conn->sock, cbuf, len, flags );
if ( sent == -1 ) {
if (errno == EPIPE)
conn->connected = 0;
conn->err = MONGO_IO_ERROR;
return MONGO_ERROR;
}
cbuf += sent;
len -= sent;
}
return MONGO_OK;
}
int mongo_env_read_socket( mongo *conn, void *buf, int len ) {
char *cbuf = buf;
while ( len ) {
int sent = recv( conn->sock, cbuf, len, 0 );
if ( sent == 0 || sent == -1 ) {
conn->err = MONGO_IO_ERROR;
return MONGO_ERROR;
}
cbuf += sent;
len -= sent;
}
return MONGO_OK;
}
/* This is a no-op in the generic implementation. */
int mongo_env_set_socket_op_timeout( mongo *conn, int millis ) {
return MONGO_OK;
}
int mongo_env_socket_connect( mongo *conn, const char *host, int port ) {
struct sockaddr_in sa;
socklen_t addressSize;
int flag = 1;
if ( ( conn->sock = socket( AF_INET, SOCK_STREAM, 0 ) ) < 0 ) {
conn->sock = 0;
conn->err = MONGO_CONN_NO_SOCKET;
return MONGO_ERROR;
}
memset( sa.sin_zero , 0 , sizeof( sa.sin_zero ) );
sa.sin_family = AF_INET;
sa.sin_port = htons( port );
sa.sin_addr.s_addr = inet_addr( host );
addressSize = sizeof( sa );
if ( connect( conn->sock, ( struct sockaddr * )&sa, addressSize ) == -1 ) {
mongo_env_close_socket( conn->sock );
conn->connected = 0;
conn->sock = 0;
conn->err = MONGO_CONN_FAIL;
return MONGO_ERROR;
}
setsockopt( conn->sock, IPPROTO_TCP, TCP_NODELAY, ( char * ) &flag, sizeof( flag ) );
if( conn->op_timeout_ms > 0 )
mongo_env_set_socket_op_timeout( conn, conn->op_timeout_ms );
conn->connected = 1;
return MONGO_OK;
}
MONGO_EXPORT int mongo_env_sock_init( void ) {
#if defined(_WIN32)
WSADATA wsaData;
WORD wVers;
#elif defined(SIGPIPE)
struct sigaction act;
#endif
static int called_once;
static int retval;
if (called_once) return retval;
called_once = 1;
#if defined(_WIN32)
wVers = MAKEWORD(1, 1);
retval = (WSAStartup(wVers, &wsaData) == 0);
#elif defined(MACINTOSH)
GUSISetup(GUSIwithInternetSockets);
retval = 1;
#elif defined(SIGPIPE)
retval = 1;
if (sigaction(SIGPIPE, (struct sigaction *)NULL, &act) < 0)
retval = 0;
else if (act.sa_handler == SIG_DFL) {
act.sa_handler = SIG_IGN;
if (sigaction(SIGPIPE, &act, (struct sigaction *)NULL) < 0)
retval = 0;
}
#endif
return retval;
}

View File

@@ -1,178 +0,0 @@
/* env_win32.c */
/* Copyright 2009-2012 10gen Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Networking and other niceties for WIN32. */
#include "env.h"
#include "mongo.h"
#include <string.h>
#ifdef _MSC_VER
#include <ws2tcpip.h> // send,recv,socklen_t etc
#include <wspiapi.h> // addrinfo
#else
#include <ws2tcpip.h> // send,recv,socklen_t etc
#include <winsock2.h>
typedef int socklen_t;
#endif
#ifndef NI_MAXSERV
# define NI_MAXSERV 32
#endif
int mongo_env_close_socket( int socket ) {
return closesocket( socket );
}
int mongo_env_write_socket( mongo *conn, const void *buf, int len ) {
const char *cbuf = buf;
int flags = 0;
while ( len ) {
int sent = send( conn->sock, cbuf, len, flags );
if ( sent == -1 ) {
__mongo_set_error( conn, MONGO_IO_ERROR, NULL, WSAGetLastError() );
conn->connected = 0;
return MONGO_ERROR;
}
cbuf += sent;
len -= sent;
}
return MONGO_OK;
}
int mongo_env_read_socket( mongo *conn, void *buf, int len ) {
char *cbuf = buf;
while ( len ) {
int sent = recv( conn->sock, cbuf, len, 0 );
if ( sent == 0 || sent == -1 ) {
__mongo_set_error( conn, MONGO_IO_ERROR, NULL, WSAGetLastError() );
return MONGO_ERROR;
}
cbuf += sent;
len -= sent;
}
return MONGO_OK;
}
int mongo_env_set_socket_op_timeout( mongo *conn, int millis ) {
if ( setsockopt( conn->sock, SOL_SOCKET, SO_RCVTIMEO, (const char *)&millis,
sizeof( millis ) ) == -1 ) {
__mongo_set_error( conn, MONGO_IO_ERROR, "setsockopt SO_RCVTIMEO failed.",
WSAGetLastError() );
return MONGO_ERROR;
}
if ( setsockopt( conn->sock, SOL_SOCKET, SO_SNDTIMEO, (const char *)&millis,
sizeof( millis ) ) == -1 ) {
__mongo_set_error( conn, MONGO_IO_ERROR, "setsockopt SO_SNDTIMEO failed.",
WSAGetLastError() );
return MONGO_ERROR;
}
return MONGO_OK;
}
int mongo_env_socket_connect( mongo *conn, const char *host, int port ) {
char port_str[NI_MAXSERV];
char errstr[MONGO_ERR_LEN];
int status;
struct addrinfo ai_hints;
struct addrinfo *ai_list = NULL;
struct addrinfo *ai_ptr = NULL;
conn->sock = 0;
conn->connected = 0;
bson_sprintf( port_str, "%d", port );
memset( &ai_hints, 0, sizeof( ai_hints ) );
ai_hints.ai_family = AF_UNSPEC;
ai_hints.ai_socktype = SOCK_STREAM;
ai_hints.ai_protocol = IPPROTO_TCP;
status = getaddrinfo( host, port_str, &ai_hints, &ai_list );
if ( status != 0 ) {
bson_sprintf( errstr, "getaddrinfo failed with error %d", status );
__mongo_set_error( conn, MONGO_CONN_ADDR_FAIL, errstr, WSAGetLastError() );
return MONGO_ERROR;
}
for ( ai_ptr = ai_list; ai_ptr != NULL; ai_ptr = ai_ptr->ai_next ) {
conn->sock = socket( ai_ptr->ai_family, ai_ptr->ai_socktype,
ai_ptr->ai_protocol );
if ( conn->sock < 0 ) {
__mongo_set_error( conn, MONGO_SOCKET_ERROR, "socket() failed",
WSAGetLastError() );
conn->sock = 0;
continue;
}
status = connect( conn->sock, ai_ptr->ai_addr, ai_ptr->ai_addrlen );
if ( status != 0 ) {
__mongo_set_error( conn, MONGO_SOCKET_ERROR, "connect() failed",
WSAGetLastError() );
mongo_env_close_socket( conn->sock );
conn->sock = 0;
continue;
}
if ( ai_ptr->ai_protocol == IPPROTO_TCP ) {
int flag = 1;
setsockopt( conn->sock, IPPROTO_TCP, TCP_NODELAY,
( void * ) &flag, sizeof( flag ) );
if ( conn->op_timeout_ms > 0 )
mongo_env_set_socket_op_timeout( conn, conn->op_timeout_ms );
}
conn->connected = 1;
break;
}
freeaddrinfo( ai_list );
if ( ! conn->connected ) {
conn->err = MONGO_CONN_FAIL;
return MONGO_ERROR;
}
else {
mongo_clear_errors( conn );
return MONGO_OK;
}
}
MONGO_EXPORT int mongo_env_sock_init( void ) {
WSADATA wsaData;
WORD wVers;
static int called_once;
static int retval;
if (called_once) return retval;
called_once = 1;
wVers = MAKEWORD(1, 1);
retval = (WSAStartup(wVers, &wsaData) == 0);
return retval;
}

View File

@@ -1,712 +0,0 @@
/* gridfs.c */
/* Copyright 2009-2012 10gen Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gridfs.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
MONGO_EXPORT gridfs* gridfs_create() {
return (gridfs*)bson_malloc(sizeof(gridfs));
}
MONGO_EXPORT void gridfs_dispose(gridfs* gfs) {
free(gfs);
}
MONGO_EXPORT gridfile* gridfile_create() {
return (gridfile*)bson_malloc(sizeof(gridfile));
}
MONGO_EXPORT void gridfile_dispose(gridfile* gf) {
free(gf);
}
MONGO_EXPORT void gridfile_get_descriptor(gridfile* gf, bson* out) {
*out = *gf->meta;
}
static bson *chunk_new( bson_oid_t id, int chunkNumber,
const char *data, int len ) {
bson *b = bson_malloc( sizeof( bson ) );
bson_init( b );
bson_append_oid( b, "files_id", &id );
bson_append_int( b, "n", chunkNumber );
bson_append_binary( b, "data", BSON_BIN_BINARY, data, len );
bson_finish( b );
return b;
}
static void chunk_free( bson *oChunk ) {
bson_destroy( oChunk );
bson_free( oChunk );
}
int gridfs_init( mongo *client, const char *dbname, const char *prefix,
gridfs *gfs ) {
int options;
bson b;
bson_bool_t success;
gfs->client = client;
/* Allocate space to own the dbname */
gfs->dbname = ( const char * )bson_malloc( strlen( dbname )+1 );
strcpy( ( char * )gfs->dbname, dbname );
/* Allocate space to own the prefix */
if ( prefix == NULL ) prefix = "fs";
gfs->prefix = ( const char * )bson_malloc( strlen( prefix )+1 );
strcpy( ( char * )gfs->prefix, prefix );
/* Allocate space to own files_ns */
gfs->files_ns =
( const char * ) bson_malloc ( strlen( prefix )+strlen( dbname )+strlen( ".files" )+2 );
strcpy( ( char * )gfs->files_ns, dbname );
strcat( ( char * )gfs->files_ns, "." );
strcat( ( char * )gfs->files_ns, prefix );
strcat( ( char * )gfs->files_ns, ".files" );
/* Allocate space to own chunks_ns */
gfs->chunks_ns = ( const char * ) bson_malloc( strlen( prefix ) + strlen( dbname )
+ strlen( ".chunks" ) + 2 );
strcpy( ( char * )gfs->chunks_ns, dbname );
strcat( ( char * )gfs->chunks_ns, "." );
strcat( ( char * )gfs->chunks_ns, prefix );
strcat( ( char * )gfs->chunks_ns, ".chunks" );
bson_init( &b );
bson_append_int( &b, "filename", 1 );
bson_finish( &b );
options = 0;
success = ( mongo_create_index( gfs->client, gfs->files_ns, &b, options, NULL ) == MONGO_OK );
bson_destroy( &b );
if ( !success ) {
bson_free( ( char * )gfs->dbname );
bson_free( ( char * )gfs->prefix );
bson_free( ( char * )gfs->files_ns );
bson_free( ( char * )gfs->chunks_ns );
return MONGO_ERROR;
}
bson_init( &b );
bson_append_int( &b, "files_id", 1 );
bson_append_int( &b, "n", 1 );
bson_finish( &b );
options = MONGO_INDEX_UNIQUE;
success = ( mongo_create_index( gfs->client, gfs->chunks_ns, &b, options, NULL ) == MONGO_OK );
bson_destroy( &b );
if ( !success ) {
bson_free( ( char * )gfs->dbname );
bson_free( ( char * )gfs->prefix );
bson_free( ( char * )gfs->files_ns );
bson_free( ( char * )gfs->chunks_ns );
return MONGO_ERROR;
}
return MONGO_OK;
}
MONGO_EXPORT void gridfs_destroy( gridfs *gfs ) {
if ( gfs == NULL ) return;
if ( gfs->dbname ) bson_free( ( char * )gfs->dbname );
if ( gfs->prefix ) bson_free( ( char * )gfs->prefix );
if ( gfs->files_ns ) bson_free( ( char * )gfs->files_ns );
if ( gfs->chunks_ns ) bson_free( ( char * )gfs->chunks_ns );
}
static int gridfs_insert_file( gridfs *gfs, const char *name,
const bson_oid_t id, gridfs_offset length,
const char *contenttype ) {
bson command;
bson ret;
bson res;
bson_iterator it;
int result;
int64_t d;
/* Check run md5 */
bson_init( &command );
bson_append_oid( &command, "filemd5", &id );
bson_append_string( &command, "root", gfs->prefix );
bson_finish( &command );
result = mongo_run_command( gfs->client, gfs->dbname, &command, &res );
bson_destroy( &command );
if (result != MONGO_OK)
return result;
/* Create and insert BSON for file metadata */
bson_init( &ret );
bson_append_oid( &ret, "_id", &id );
if ( name != NULL && *name != '\0' ) {
bson_append_string( &ret, "filename", name );
}
bson_append_long( &ret, "length", length );
bson_append_int( &ret, "chunkSize", DEFAULT_CHUNK_SIZE );
d = ( bson_date_t )1000*time( NULL );
bson_append_date( &ret, "uploadDate", d);
bson_find( &it, &res, "md5" );
bson_append_string( &ret, "md5", bson_iterator_string( &it ) );
bson_destroy( &res );
if ( contenttype != NULL && *contenttype != '\0' ) {
bson_append_string( &ret, "contentType", contenttype );
}
bson_finish( &ret );
result = mongo_insert( gfs->client, gfs->files_ns, &ret, NULL );
bson_destroy( &ret );
return result;
}
MONGO_EXPORT int gridfs_store_buffer( gridfs *gfs, const char *data,
gridfs_offset length, const char *remotename,
const char *contenttype ) {
char const *end = data + length;
const char *data_ptr = data;
bson_oid_t id;
int chunkNumber = 0;
int chunkLen;
bson *oChunk;
/* Large files Assertion */
/* assert( length <= 0xffffffff ); */
/* Generate and append an oid*/
bson_oid_gen( &id );
/* Insert the file's data chunk by chunk */
while ( data_ptr < end ) {
chunkLen = DEFAULT_CHUNK_SIZE < ( unsigned int )( end - data_ptr ) ?
DEFAULT_CHUNK_SIZE : ( unsigned int )( end - data_ptr );
oChunk = chunk_new( id, chunkNumber, data_ptr, chunkLen );
mongo_insert( gfs->client, gfs->chunks_ns, oChunk, NULL );
chunk_free( oChunk );
chunkNumber++;
data_ptr += chunkLen;
}
/* Inserts file's metadata */
return gridfs_insert_file( gfs, remotename, id, length, contenttype );
}
MONGO_EXPORT void gridfile_writer_init( gridfile *gfile, gridfs *gfs,
const char *remote_name, const char *content_type ) {
gfile->gfs = gfs;
bson_oid_gen( &( gfile->id ) );
gfile->chunk_num = 0;
gfile->length = 0;
gfile->pending_len = 0;
gfile->pending_data = NULL;
gfile->remote_name = ( char * )bson_malloc( strlen( remote_name ) + 1 );
strcpy( ( char * )gfile->remote_name, remote_name );
gfile->content_type = ( char * )bson_malloc( strlen( content_type ) + 1 );
strcpy( ( char * )gfile->content_type, content_type );
}
MONGO_EXPORT void gridfile_write_buffer( gridfile *gfile, const char *data,
gridfs_offset length ) {
int bytes_left = 0;
int data_partial_len = 0;
int chunks_to_write = 0;
char *buffer;
bson *oChunk;
gridfs_offset to_write = length + gfile->pending_len;
if ( to_write < DEFAULT_CHUNK_SIZE ) { /* Less than one chunk to write */
if( gfile->pending_data ) {
gfile->pending_data = ( char * )bson_realloc( ( void * )gfile->pending_data, gfile->pending_len + to_write );
memcpy( gfile->pending_data + gfile->pending_len, data, length );
} else if ( to_write > 0 ) {
gfile->pending_data = ( char * )bson_malloc( to_write );
memcpy( gfile->pending_data, data, length );
}
gfile->pending_len += length;
} else { /* At least one chunk of data to write */
chunks_to_write = to_write / DEFAULT_CHUNK_SIZE;
bytes_left = to_write % DEFAULT_CHUNK_SIZE;
/* If there's a pending chunk to be written, we need to combine
* the buffer provided up to DEFAULT_CHUNK_SIZE.
*/
if ( gfile->pending_len > 0 ) {
data_partial_len = DEFAULT_CHUNK_SIZE - gfile->pending_len;
buffer = ( char * )bson_malloc( DEFAULT_CHUNK_SIZE );
memcpy( buffer, gfile->pending_data, gfile->pending_len );
memcpy( buffer + gfile->pending_len, data, data_partial_len );
oChunk = chunk_new( gfile->id, gfile->chunk_num, buffer, DEFAULT_CHUNK_SIZE );
mongo_insert( gfile->gfs->client, gfile->gfs->chunks_ns, oChunk, NULL );
chunk_free( oChunk );
gfile->chunk_num++;
gfile->length += DEFAULT_CHUNK_SIZE;
data += data_partial_len;
chunks_to_write--;
bson_free( buffer );
}
while( chunks_to_write > 0 ) {
oChunk = chunk_new( gfile->id, gfile->chunk_num, data, DEFAULT_CHUNK_SIZE );
mongo_insert( gfile->gfs->client, gfile->gfs->chunks_ns, oChunk, NULL );
chunk_free( oChunk );
gfile->chunk_num++;
chunks_to_write--;
gfile->length += DEFAULT_CHUNK_SIZE;
data += DEFAULT_CHUNK_SIZE;
}
bson_free( gfile->pending_data );
/* If there are any leftover bytes, store them as pending data. */
if( bytes_left == 0 )
gfile->pending_data = NULL;
else {
gfile->pending_data = ( char * )bson_malloc( bytes_left );
memcpy( gfile->pending_data, data, bytes_left );
}
gfile->pending_len = bytes_left;
}
}
MONGO_EXPORT int gridfile_writer_done( gridfile *gfile ) {
/* write any remaining pending chunk data.
* pending data will always take up less than one chunk */
bson *oChunk;
int response;
if( gfile->pending_data ) {
oChunk = chunk_new( gfile->id, gfile->chunk_num, gfile->pending_data, gfile->pending_len );
mongo_insert( gfile->gfs->client, gfile->gfs->chunks_ns, oChunk, NULL );
chunk_free( oChunk );
bson_free( gfile->pending_data );
gfile->length += gfile->pending_len;
}
/* insert into files collection */
response = gridfs_insert_file( gfile->gfs, gfile->remote_name, gfile->id,
gfile->length, gfile->content_type );
bson_free( gfile->remote_name );
bson_free( gfile->content_type );
return response;
}
int gridfs_store_file( gridfs *gfs, const char *filename,
const char *remotename, const char *contenttype ) {
char buffer[DEFAULT_CHUNK_SIZE];
FILE *fd;
bson_oid_t id;
int chunkNumber = 0;
gridfs_offset length = 0;
gridfs_offset chunkLen = 0;
bson *oChunk;
/* Open the file and the correct stream */
if ( strcmp( filename, "-" ) == 0 ) fd = stdin;
else {
fd = fopen( filename, "rb" );
if (fd == NULL)
return MONGO_ERROR;
}
/* Generate and append an oid*/
bson_oid_gen( &id );
/* Insert the file chunk by chunk */
chunkLen = fread( buffer, 1, DEFAULT_CHUNK_SIZE, fd );
do {
oChunk = chunk_new( id, chunkNumber, buffer, chunkLen );
mongo_insert( gfs->client, gfs->chunks_ns, oChunk, NULL );
chunk_free( oChunk );
length += chunkLen;
chunkNumber++;
chunkLen = fread( buffer, 1, DEFAULT_CHUNK_SIZE, fd );
} while ( chunkLen != 0 );
/* Close the file stream */
if ( fd != stdin ) fclose( fd );
/* Large files Assertion */
/* assert(length <= 0xffffffff); */
/* Optional Remote Name */
if ( remotename == NULL || *remotename == '\0' ) {
remotename = filename;
}
/* Inserts file's metadata */
return gridfs_insert_file( gfs, remotename, id, length, contenttype );
}
MONGO_EXPORT void gridfs_remove_filename( gridfs *gfs, const char *filename ) {
bson query;
mongo_cursor *files;
bson file;
bson_iterator it;
bson_oid_t id;
bson b;
bson_init( &query );
bson_append_string( &query, "filename", filename );
bson_finish( &query );
files = mongo_find( gfs->client, gfs->files_ns, &query, NULL, 0, 0, 0 );
bson_destroy( &query );
/* Remove each file and it's chunks from files named filename */
while ( mongo_cursor_next( files ) == MONGO_OK ) {
file = files->current;
bson_find( &it, &file, "_id" );
id = *bson_iterator_oid( &it );
/* Remove the file with the specified id */
bson_init( &b );
bson_append_oid( &b, "_id", &id );
bson_finish( &b );
mongo_remove( gfs->client, gfs->files_ns, &b, NULL );
bson_destroy( &b );
/* Remove all chunks from the file with the specified id */
bson_init( &b );
bson_append_oid( &b, "files_id", &id );
bson_finish( &b );
mongo_remove( gfs->client, gfs->chunks_ns, &b, NULL );
bson_destroy( &b );
}
mongo_cursor_destroy( files );
}
int gridfs_find_query( gridfs *gfs, bson *query,
gridfile *gfile ) {
bson uploadDate;
bson finalQuery;
bson out;
int i;
bson_init( &uploadDate );
bson_append_int( &uploadDate, "uploadDate", -1 );
bson_finish( &uploadDate );
bson_init( &finalQuery );
bson_append_bson( &finalQuery, "query", query );
bson_append_bson( &finalQuery, "orderby", &uploadDate );
bson_finish( &finalQuery );
i = ( mongo_find_one( gfs->client, gfs->files_ns,
&finalQuery, NULL, &out ) == MONGO_OK );
bson_destroy( &uploadDate );
bson_destroy( &finalQuery );
if ( !i )
return MONGO_ERROR;
else {
gridfile_init( gfs, &out, gfile );
bson_destroy( &out );
return MONGO_OK;
}
}
int gridfs_find_filename( gridfs *gfs, const char *filename,
gridfile *gfile )
{
bson query;
int i;
bson_init( &query );
bson_append_string( &query, "filename", filename );
bson_finish( &query );
i = gridfs_find_query( gfs, &query, gfile );
bson_destroy( &query );
return i;
}
int gridfile_init( gridfs *gfs, bson *meta, gridfile *gfile )
{
gfile->gfs = gfs;
gfile->pos = 0;
gfile->meta = ( bson * )bson_malloc( sizeof( bson ) );
if ( gfile->meta == NULL ) return MONGO_ERROR;
bson_copy( gfile->meta, meta );
return MONGO_OK;
}
MONGO_EXPORT void gridfile_destroy( gridfile *gfile )
{
bson_destroy( gfile->meta );
bson_free( gfile->meta );
}
bson_bool_t gridfile_exists( gridfile *gfile ) {
return ( bson_bool_t )( gfile != NULL || gfile->meta == NULL );
}
MONGO_EXPORT const char *gridfile_get_filename( gridfile *gfile ) {
bson_iterator it;
bson_find( &it, gfile->meta, "filename" );
return bson_iterator_string( &it );
}
MONGO_EXPORT int gridfile_get_chunksize( gridfile *gfile ) {
bson_iterator it;
bson_find( &it, gfile->meta, "chunkSize" );
return bson_iterator_int( &it );
}
MONGO_EXPORT gridfs_offset gridfile_get_contentlength( gridfile *gfile ) {
bson_iterator it;
bson_find( &it, gfile->meta, "length" );
if( bson_iterator_type( &it ) == BSON_INT )
return ( gridfs_offset )bson_iterator_int( &it );
else
return ( gridfs_offset )bson_iterator_long( &it );
}
MONGO_EXPORT const char *gridfile_get_contenttype( gridfile *gfile ) {
bson_iterator it;
if ( bson_find( &it, gfile->meta, "contentType" ) )
return bson_iterator_string( &it );
else return NULL;
}
MONGO_EXPORT bson_date_t gridfile_get_uploaddate( gridfile *gfile ) {
bson_iterator it;
bson_find( &it, gfile->meta, "uploadDate" );
return bson_iterator_date( &it );
}
MONGO_EXPORT const char *gridfile_get_md5( gridfile *gfile ) {
bson_iterator it;
bson_find( &it, gfile->meta, "md5" );
return bson_iterator_string( &it );
}
const char *gridfile_get_field( gridfile *gfile, const char *name ) {
bson_iterator it;
bson_find( &it, gfile->meta, name );
return bson_iterator_value( &it );
}
bson_bool_t gridfile_get_boolean( gridfile *gfile, const char *name ) {
bson_iterator it;
bson_find( &it, gfile->meta, name );
return bson_iterator_bool( &it );
}
MONGO_EXPORT void gridfile_get_metadata( gridfile *gfile, bson* out ) {
bson_iterator it;
if ( bson_find( &it, gfile->meta, "metadata" ) )
bson_iterator_subobject( &it, out );
else
bson_empty( out );
}
MONGO_EXPORT int gridfile_get_numchunks( gridfile *gfile ) {
bson_iterator it;
gridfs_offset length;
gridfs_offset chunkSize;
double numchunks;
bson_find( &it, gfile->meta, "length" );
if( bson_iterator_type( &it ) == BSON_INT )
length = ( gridfs_offset )bson_iterator_int( &it );
else
length = ( gridfs_offset )bson_iterator_long( &it );
bson_find( &it, gfile->meta, "chunkSize" );
chunkSize = bson_iterator_int( &it );
numchunks = ( ( double )length/( double )chunkSize );
return ( numchunks - ( int )numchunks > 0 )
? ( int )( numchunks+1 )
: ( int )( numchunks );
}
MONGO_EXPORT void gridfile_get_chunk( gridfile *gfile, int n, bson* out ) {
bson query;
bson_iterator it;
bson_oid_t id;
int result;
bson_init( &query );
bson_find( &it, gfile->meta, "_id" );
id = *bson_iterator_oid( &it );
bson_append_oid( &query, "files_id", &id );
bson_append_int( &query, "n", n );
bson_finish( &query );
result = (mongo_find_one(gfile->gfs->client,
gfile->gfs->chunks_ns,
&query, NULL, out ) == MONGO_OK );
bson_destroy( &query );
if (!result) {
bson empty;
bson_empty(&empty);
bson_copy(out, &empty);
}
}
MONGO_EXPORT mongo_cursor *gridfile_get_chunks( gridfile *gfile, int start, int size ) {
bson_iterator it;
bson_oid_t id;
bson gte;
bson query;
bson orderby;
bson command;
mongo_cursor *cursor;
bson_find( &it, gfile->meta, "_id" );
id = *bson_iterator_oid( &it );
bson_init( &query );
bson_append_oid( &query, "files_id", &id );
if ( size == 1 ) {
bson_append_int( &query, "n", start );
} else {
bson_init( &gte );
bson_append_int( &gte, "$gte", start );
bson_finish( &gte );
bson_append_bson( &query, "n", &gte );
bson_destroy( &gte );
}
bson_finish( &query );
bson_init( &orderby );
bson_append_int( &orderby, "n", 1 );
bson_finish( &orderby );
bson_init( &command );
bson_append_bson( &command, "query", &query );
bson_append_bson( &command, "orderby", &orderby );
bson_finish( &command );
cursor = mongo_find( gfile->gfs->client, gfile->gfs->chunks_ns,
&command, NULL, size, 0, 0 );
bson_destroy( &command );
bson_destroy( &query );
bson_destroy( &orderby );
return cursor;
}
gridfs_offset gridfile_write_file( gridfile *gfile, FILE *stream ) {
int i;
size_t len;
bson chunk;
bson_iterator it;
const char *data;
const int num = gridfile_get_numchunks( gfile );
for ( i=0; i<num; i++ ) {
gridfile_get_chunk( gfile, i, &chunk );
bson_find( &it, &chunk, "data" );
len = bson_iterator_bin_len( &it );
data = bson_iterator_bin_data( &it );
fwrite( data, sizeof( char ), len, stream );
bson_destroy( &chunk );
}
return gridfile_get_contentlength( gfile );
}
MONGO_EXPORT gridfs_offset gridfile_read( gridfile *gfile, gridfs_offset size, char *buf ) {
mongo_cursor *chunks;
bson chunk;
int first_chunk;
int last_chunk;
int total_chunks;
gridfs_offset chunksize;
gridfs_offset contentlength;
gridfs_offset bytes_left;
int i;
bson_iterator it;
gridfs_offset chunk_len;
const char *chunk_data;
contentlength = gridfile_get_contentlength( gfile );
chunksize = gridfile_get_chunksize( gfile );
size = ( contentlength - gfile->pos < size )
? contentlength - gfile->pos
: size;
bytes_left = size;
first_chunk = ( gfile->pos )/chunksize;
last_chunk = ( gfile->pos+size-1 )/chunksize;
total_chunks = last_chunk - first_chunk + 1;
chunks = gridfile_get_chunks( gfile, first_chunk, total_chunks );
for ( i = 0; i < total_chunks; i++ ) {
mongo_cursor_next( chunks );
chunk = chunks->current;
bson_find( &it, &chunk, "data" );
chunk_len = bson_iterator_bin_len( &it );
chunk_data = bson_iterator_bin_data( &it );
if ( i == 0 ) {
chunk_data += ( gfile->pos )%chunksize;
chunk_len -= ( gfile->pos )%chunksize;
}
if ( bytes_left > chunk_len ) {
memcpy( buf, chunk_data, chunk_len );
bytes_left -= chunk_len;
buf += chunk_len;
} else {
memcpy( buf, chunk_data, bytes_left );
}
}
mongo_cursor_destroy( chunks );
gfile->pos = gfile->pos + size;
return size;
}
MONGO_EXPORT gridfs_offset gridfile_seek( gridfile *gfile, gridfs_offset offset ) {
gridfs_offset length;
length = gridfile_get_contentlength( gfile );
gfile->pos = length < offset ? length : offset;
return gfile->pos;
}

View File

@@ -1,332 +0,0 @@
/** @file gridfs.h
*
* @brief GridFS declarations
*
* */
/* Copyright 2009-2012 10gen Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongo.h"
#ifndef MONGO_GRIDFS_H_
#define MONGO_GRIDFS_H_
enum {DEFAULT_CHUNK_SIZE = 256 * 1024};
typedef uint64_t gridfs_offset;
/* A GridFS represents a single collection of GridFS files in the database. */
typedef struct {
mongo *client; /**> The client to db-connection. */
const char *dbname; /**> The root database name */
const char *prefix; /**> The prefix of the GridFS's collections, default is NULL */
const char *files_ns; /**> The namespace where the file's metadata is stored */
const char *chunks_ns; /**. The namespace where the files's data is stored in chunks */
} gridfs;
/* A GridFile is a single GridFS file. */
typedef struct {
gridfs *gfs; /**> The GridFS where the GridFile is located */
bson *meta; /**> The GridFile's bson object where all its metadata is located */
gridfs_offset pos; /**> The position is the offset in the file */
bson_oid_t id; /**> The files_id of the gridfile */
char *remote_name; /**> The name of the gridfile as a string */
char *content_type; /**> The gridfile's content type */
gridfs_offset length; /**> The length of this gridfile */
int chunk_num; /**> The number of the current chunk being written to */
char *pending_data; /**> A buffer storing data still to be written to chunks */
int pending_len; /**> Length of pending_data buffer */
} gridfile;
MONGO_EXPORT gridfs* gridfs_create();
MONGO_EXPORT void gridfs_dispose(gridfs* gfs);
MONGO_EXPORT gridfile* gridfile_create();
MONGO_EXPORT void gridfile_dispose(gridfile* gf);
MONGO_EXPORT void gridfile_get_descriptor(gridfile* gf, bson* out);
/**
* Initializes a GridFS object
* @param client - db connection
* @param dbname - database name
* @param prefix - collection prefix, default is fs if NULL or empty
* @param gfs - the GridFS object to initialize
*
* @return - MONGO_OK or MONGO_ERROR.
*/
MONGO_EXPORT int gridfs_init( mongo *client, const char *dbname,
const char *prefix, gridfs *gfs );
/**
* Destroys a GridFS object. Call this when finished with
* the object..
*
* @param gfs a grid
*/
MONGO_EXPORT void gridfs_destroy( gridfs *gfs );
/**
* Initializes a gridfile for writing incrementally with gridfs_write_buffer.
* Once initialized, you can write any number of buffers with gridfs_write_buffer.
* When done, you must call gridfs_writer_done to save the file metadata.
*
*/
MONGO_EXPORT void gridfile_writer_init( gridfile *gfile, gridfs *gfs, const char *remote_name,
const char *content_type );
/**
* Write to a GridFS file incrementally. You can call this function any number
* of times with a new buffer each time. This allows you to effectively
* stream to a GridFS file. When finished, be sure to call gridfs_writer_done.
*
*/
MONGO_EXPORT void gridfile_write_buffer( gridfile *gfile, const char *data,
gridfs_offset length );
/**
* Signal that writing of this gridfile is complete by
* writing any buffered chunks along with the entry in the
* files collection.
*
* @return - MONGO_OK or MONGO_ERROR.
*/
MONGO_EXPORT int gridfile_writer_done( gridfile *gfile );
/**
* Store a buffer as a GridFS file.
* @param gfs - the working GridFS
* @param data - pointer to buffer to store in GridFS
* @param length - length of the buffer
* @param remotename - filename for use in the database
* @param contenttype - optional MIME type for this object
*
* @return - MONGO_OK or MONGO_ERROR.
*/
MONGO_EXPORT int gridfs_store_buffer( gridfs *gfs, const char *data, gridfs_offset length,
const char *remotename,
const char *contenttype );
/**
* Open the file referenced by filename and store it as a GridFS file.
* @param gfs - the working GridFS
* @param filename - local filename relative to the process
* @param remotename - optional filename for use in the database
* @param contenttype - optional MIME type for this object
*
* @return - MONGO_OK or MONGO_ERROR.
*/
MONGO_EXPORT int gridfs_store_file( gridfs *gfs, const char *filename,
const char *remotename, const char *contenttype );
/**
* Removes the files referenced by filename from the db
* @param gfs - the working GridFS
* @param filename - the filename of the file/s to be removed
*/
MONGO_EXPORT void gridfs_remove_filename( gridfs *gfs, const char *filename );
/**
* Find the first file matching the provided query within the
* GridFS files collection, and return the file as a GridFile.
*
* @param gfs - the working GridFS
* @param query - a pointer to the bson with the query data
* @param gfile - the output GridFile to be initialized
*
* @return MONGO_OK if successful, MONGO_ERROR otherwise
*/
MONGO_EXPORT int gridfs_find_query( gridfs *gfs, bson *query, gridfile *gfile );
/**
* Find the first file referenced by filename within the GridFS
* and return it as a GridFile
* @param gfs - the working GridFS
* @param filename - filename of the file to find
* @param gfile - the output GridFile to be intialized
*
* @return MONGO_OK or MONGO_ERROR.
*/
MONGO_EXPORT int gridfs_find_filename( gridfs *gfs, const char *filename, gridfile *gfile );
/**
* Initializes a GridFile containing the GridFS and file bson
* @param gfs - the GridFS where the GridFile is located
* @param meta - the file object
* @param gfile - the output GridFile that is being initialized
*
* @return - MONGO_OK or MONGO_ERROR.
*/
MONGO_EXPORT int gridfile_init( gridfs *gfs, bson *meta, gridfile *gfile );
/**
* Destroys the GridFile
*
* @param oGridFIle - the GridFile being destroyed
*/
MONGO_EXPORT void gridfile_destroy( gridfile *gfile );
/**
* Returns whether or not the GridFile exists
* @param gfile - the GridFile being examined
*/
MONGO_EXPORT bson_bool_t gridfile_exists( gridfile *gfile );
/**
* Returns the filename of GridFile
* @param gfile - the working GridFile
*
* @return - the filename of the Gridfile
*/
MONGO_EXPORT const char *gridfile_get_filename( gridfile *gfile );
/**
* Returns the size of the chunks of the GridFile
* @param gfile - the working GridFile
*
* @return - the size of the chunks of the Gridfile
*/
MONGO_EXPORT int gridfile_get_chunksize( gridfile *gfile );
/**
* Returns the length of GridFile's data
*
* @param gfile - the working GridFile
*
* @return - the length of the Gridfile's data
*/
MONGO_EXPORT gridfs_offset gridfile_get_contentlength( gridfile *gfile );
/**
* Returns the MIME type of the GridFile
*
* @param gfile - the working GridFile
*
* @return - the MIME type of the Gridfile
* (NULL if no type specified)
*/
MONGO_EXPORT const char *gridfile_get_contenttype( gridfile *gfile );
/**
* Returns the upload date of GridFile
*
* @param gfile - the working GridFile
*
* @return - the upload date of the Gridfile
*/
MONGO_EXPORT bson_date_t gridfile_get_uploaddate( gridfile *gfile );
/**
* Returns the MD5 of GridFile
*
* @param gfile - the working GridFile
*
* @return - the MD5 of the Gridfile
*/
MONGO_EXPORT const char *gridfile_get_md5( gridfile *gfile );
/**
* Returns the field in GridFile specified by name
*
* @param gfile - the working GridFile
* @param name - the name of the field to be returned
*
* @return - the data of the field specified
* (NULL if none exists)
*/
const char *gridfile_get_field( gridfile *gfile,
const char *name );
/**
* Returns a boolean field in GridFile specified by name
* @param gfile - the working GridFile
* @param name - the name of the field to be returned
*
* @return - the boolean of the field specified
* (NULL if none exists)
*/
bson_bool_t gridfile_get_boolean( gridfile *gfile,
const char *name );
/**
* Returns the metadata of GridFile
* @param gfile - the working GridFile
*
* @return - the metadata of the Gridfile in a bson object
* (an empty bson is returned if none exists)
*/
MONGO_EXPORT void gridfile_get_metadata( gridfile *gfile, bson* out );
/**
* Returns the number of chunks in the GridFile
* @param gfile - the working GridFile
*
* @return - the number of chunks in the Gridfile
*/
MONGO_EXPORT int gridfile_get_numchunks( gridfile *gfile );
/**
* Returns chunk n of GridFile
* @param gfile - the working GridFile
*
* @return - the nth chunk of the Gridfile
*/
MONGO_EXPORT void gridfile_get_chunk( gridfile *gfile, int n, bson* out );
/**
* Returns a mongo_cursor of *size* chunks starting with chunk *start*
*
* @param gfile - the working GridFile
* @param start - the first chunk in the cursor
* @param size - the number of chunks to be returned
*
* @return - mongo_cursor of the chunks (must be destroyed after use)
*/
MONGO_EXPORT mongo_cursor *gridfile_get_chunks( gridfile *gfile, int start, int size );
/**
* Writes the GridFile to a stream
*
* @param gfile - the working GridFile
* @param stream - the file stream to write to
*/
MONGO_EXPORT gridfs_offset gridfile_write_file( gridfile *gfile, FILE *stream );
/**
* Reads length bytes from the GridFile to a buffer
* and updates the position in the file.
* (assumes the buffer is large enough)
* (if size is greater than EOF gridfile_read reads until EOF)
*
* @param gfile - the working GridFile
* @param size - the amount of bytes to be read
* @param buf - the buffer to read to
*
* @return - the number of bytes read
*/
MONGO_EXPORT gridfs_offset gridfile_read( gridfile *gfile, gridfs_offset size, char *buf );
/**
* Updates the position in the file
* (If the offset goes beyond the contentlength,
* the position is updated to the end of the file.)
*
* @param gfile - the working GridFile
* @param offset - the position to update to
*
* @return - resulting offset location
*/
MONGO_EXPORT gridfs_offset gridfile_seek( gridfile *gfile, gridfs_offset offset );
#endif

View File

@@ -1,381 +0,0 @@
/*
Copyright (C) 1999, 2000, 2002 Aladdin Enterprises. All rights reserved.
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
L. Peter Deutsch
ghost@aladdin.com
*/
/* $Id: md5.c,v 1.6 2002/04/13 19:20:28 lpd Exp $ */
/*
Independent implementation of MD5 (RFC 1321).
This code implements the MD5 Algorithm defined in RFC 1321, whose
text is available at
http://www.ietf.org/rfc/rfc1321.txt
The code is derived from the text of the RFC, including the test suite
(section A.5) but excluding the rest of Appendix A. It does not include
any code or documentation that is identified in the RFC as being
copyrighted.
The original and principal author of md5.c is L. Peter Deutsch
<ghost@aladdin.com>. Other authors are noted in the change history
that follows (in reverse chronological order):
2002-04-13 lpd Clarified derivation from RFC 1321; now handles byte order
either statically or dynamically; added missing #include <string.h>
in library.
2002-03-11 lpd Corrected argument list for main(), and added int return
type, in test program and T value program.
2002-02-21 lpd Added missing #include <stdio.h> in test program.
2000-07-03 lpd Patched to eliminate warnings about "constant is
unsigned in ANSI C, signed in traditional"; made test program
self-checking.
1999-11-04 lpd Edited comments slightly for automatic TOC extraction.
1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5).
1999-05-03 lpd Original version.
*/
#include "md5.h"
#include <string.h>
#undef BYTE_ORDER /* 1 = big-endian, -1 = little-endian, 0 = unknown */
#ifdef MONGO_BIG_ENDIAN
# define BYTE_ORDER 1
#else
# define BYTE_ORDER -1
#endif
#define T_MASK ((mongo_md5_word_t)~0)
#define T1 /* 0xd76aa478 */ (T_MASK ^ 0x28955b87)
#define T2 /* 0xe8c7b756 */ (T_MASK ^ 0x173848a9)
#define T3 0x242070db
#define T4 /* 0xc1bdceee */ (T_MASK ^ 0x3e423111)
#define T5 /* 0xf57c0faf */ (T_MASK ^ 0x0a83f050)
#define T6 0x4787c62a
#define T7 /* 0xa8304613 */ (T_MASK ^ 0x57cfb9ec)
#define T8 /* 0xfd469501 */ (T_MASK ^ 0x02b96afe)
#define T9 0x698098d8
#define T10 /* 0x8b44f7af */ (T_MASK ^ 0x74bb0850)
#define T11 /* 0xffff5bb1 */ (T_MASK ^ 0x0000a44e)
#define T12 /* 0x895cd7be */ (T_MASK ^ 0x76a32841)
#define T13 0x6b901122
#define T14 /* 0xfd987193 */ (T_MASK ^ 0x02678e6c)
#define T15 /* 0xa679438e */ (T_MASK ^ 0x5986bc71)
#define T16 0x49b40821
#define T17 /* 0xf61e2562 */ (T_MASK ^ 0x09e1da9d)
#define T18 /* 0xc040b340 */ (T_MASK ^ 0x3fbf4cbf)
#define T19 0x265e5a51
#define T20 /* 0xe9b6c7aa */ (T_MASK ^ 0x16493855)
#define T21 /* 0xd62f105d */ (T_MASK ^ 0x29d0efa2)
#define T22 0x02441453
#define T23 /* 0xd8a1e681 */ (T_MASK ^ 0x275e197e)
#define T24 /* 0xe7d3fbc8 */ (T_MASK ^ 0x182c0437)
#define T25 0x21e1cde6
#define T26 /* 0xc33707d6 */ (T_MASK ^ 0x3cc8f829)
#define T27 /* 0xf4d50d87 */ (T_MASK ^ 0x0b2af278)
#define T28 0x455a14ed
#define T29 /* 0xa9e3e905 */ (T_MASK ^ 0x561c16fa)
#define T30 /* 0xfcefa3f8 */ (T_MASK ^ 0x03105c07)
#define T31 0x676f02d9
#define T32 /* 0x8d2a4c8a */ (T_MASK ^ 0x72d5b375)
#define T33 /* 0xfffa3942 */ (T_MASK ^ 0x0005c6bd)
#define T34 /* 0x8771f681 */ (T_MASK ^ 0x788e097e)
#define T35 0x6d9d6122
#define T36 /* 0xfde5380c */ (T_MASK ^ 0x021ac7f3)
#define T37 /* 0xa4beea44 */ (T_MASK ^ 0x5b4115bb)
#define T38 0x4bdecfa9
#define T39 /* 0xf6bb4b60 */ (T_MASK ^ 0x0944b49f)
#define T40 /* 0xbebfbc70 */ (T_MASK ^ 0x4140438f)
#define T41 0x289b7ec6
#define T42 /* 0xeaa127fa */ (T_MASK ^ 0x155ed805)
#define T43 /* 0xd4ef3085 */ (T_MASK ^ 0x2b10cf7a)
#define T44 0x04881d05
#define T45 /* 0xd9d4d039 */ (T_MASK ^ 0x262b2fc6)
#define T46 /* 0xe6db99e5 */ (T_MASK ^ 0x1924661a)
#define T47 0x1fa27cf8
#define T48 /* 0xc4ac5665 */ (T_MASK ^ 0x3b53a99a)
#define T49 /* 0xf4292244 */ (T_MASK ^ 0x0bd6ddbb)
#define T50 0x432aff97
#define T51 /* 0xab9423a7 */ (T_MASK ^ 0x546bdc58)
#define T52 /* 0xfc93a039 */ (T_MASK ^ 0x036c5fc6)
#define T53 0x655b59c3
#define T54 /* 0x8f0ccc92 */ (T_MASK ^ 0x70f3336d)
#define T55 /* 0xffeff47d */ (T_MASK ^ 0x00100b82)
#define T56 /* 0x85845dd1 */ (T_MASK ^ 0x7a7ba22e)
#define T57 0x6fa87e4f
#define T58 /* 0xfe2ce6e0 */ (T_MASK ^ 0x01d3191f)
#define T59 /* 0xa3014314 */ (T_MASK ^ 0x5cfebceb)
#define T60 0x4e0811a1
#define T61 /* 0xf7537e82 */ (T_MASK ^ 0x08ac817d)
#define T62 /* 0xbd3af235 */ (T_MASK ^ 0x42c50dca)
#define T63 0x2ad7d2bb
#define T64 /* 0xeb86d391 */ (T_MASK ^ 0x14792c6e)
static void
mongo_md5_process(mongo_md5_state_t *pms, const mongo_md5_byte_t *data /*[64]*/)
{
mongo_md5_word_t
a = pms->abcd[0], b = pms->abcd[1],
c = pms->abcd[2], d = pms->abcd[3];
mongo_md5_word_t t;
#if BYTE_ORDER > 0
/* Define storage only for big-endian CPUs. */
mongo_md5_word_t X[16];
#else
/* Define storage for little-endian or both types of CPUs. */
mongo_md5_word_t xbuf[16];
const mongo_md5_word_t *X;
#endif
{
#if BYTE_ORDER == 0
/*
* Determine dynamically whether this is a big-endian or
* little-endian machine, since we can use a more efficient
* algorithm on the latter.
*/
static const int w = 1;
if (*((const mongo_md5_byte_t *)&w)) /* dynamic little-endian */
#endif
#if BYTE_ORDER <= 0 /* little-endian */
{
/*
* On little-endian machines, we can process properly aligned
* data without copying it.
*/
if (!((data - (const mongo_md5_byte_t *)0) & 3)) {
/* data are properly aligned */
X = (const mongo_md5_word_t *)data;
} else {
/* not aligned */
memcpy(xbuf, data, 64);
X = xbuf;
}
}
#endif
#if BYTE_ORDER == 0
else /* dynamic big-endian */
#endif
#if BYTE_ORDER >= 0 /* big-endian */
{
/*
* On big-endian machines, we must arrange the bytes in the
* right order.
*/
const mongo_md5_byte_t *xp = data;
int i;
# if BYTE_ORDER == 0
X = xbuf; /* (dynamic only) */
# else
# define xbuf X /* (static only) */
# endif
for (i = 0; i < 16; ++i, xp += 4)
xbuf[i] = xp[0] + (xp[1] << 8) + (xp[2] << 16) + (xp[3] << 24);
}
#endif
}
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32 - (n))))
/* Round 1. */
/* Let [abcd k s i] denote the operation
a = b + ((a + F(b,c,d) + X[k] + T[i]) <<< s). */
#define F(x, y, z) (((x) & (y)) | (~(x) & (z)))
#define SET(a, b, c, d, k, s, Ti)\
t = a + F(b,c,d) + X[k] + Ti;\
a = ROTATE_LEFT(t, s) + b
/* Do the following 16 operations. */
SET(a, b, c, d, 0, 7, T1);
SET(d, a, b, c, 1, 12, T2);
SET(c, d, a, b, 2, 17, T3);
SET(b, c, d, a, 3, 22, T4);
SET(a, b, c, d, 4, 7, T5);
SET(d, a, b, c, 5, 12, T6);
SET(c, d, a, b, 6, 17, T7);
SET(b, c, d, a, 7, 22, T8);
SET(a, b, c, d, 8, 7, T9);
SET(d, a, b, c, 9, 12, T10);
SET(c, d, a, b, 10, 17, T11);
SET(b, c, d, a, 11, 22, T12);
SET(a, b, c, d, 12, 7, T13);
SET(d, a, b, c, 13, 12, T14);
SET(c, d, a, b, 14, 17, T15);
SET(b, c, d, a, 15, 22, T16);
#undef SET
/* Round 2. */
/* Let [abcd k s i] denote the operation
a = b + ((a + G(b,c,d) + X[k] + T[i]) <<< s). */
#define G(x, y, z) (((x) & (z)) | ((y) & ~(z)))
#define SET(a, b, c, d, k, s, Ti)\
t = a + G(b,c,d) + X[k] + Ti;\
a = ROTATE_LEFT(t, s) + b
/* Do the following 16 operations. */
SET(a, b, c, d, 1, 5, T17);
SET(d, a, b, c, 6, 9, T18);
SET(c, d, a, b, 11, 14, T19);
SET(b, c, d, a, 0, 20, T20);
SET(a, b, c, d, 5, 5, T21);
SET(d, a, b, c, 10, 9, T22);
SET(c, d, a, b, 15, 14, T23);
SET(b, c, d, a, 4, 20, T24);
SET(a, b, c, d, 9, 5, T25);
SET(d, a, b, c, 14, 9, T26);
SET(c, d, a, b, 3, 14, T27);
SET(b, c, d, a, 8, 20, T28);
SET(a, b, c, d, 13, 5, T29);
SET(d, a, b, c, 2, 9, T30);
SET(c, d, a, b, 7, 14, T31);
SET(b, c, d, a, 12, 20, T32);
#undef SET
/* Round 3. */
/* Let [abcd k s t] denote the operation
a = b + ((a + H(b,c,d) + X[k] + T[i]) <<< s). */
#define H(x, y, z) ((x) ^ (y) ^ (z))
#define SET(a, b, c, d, k, s, Ti)\
t = a + H(b,c,d) + X[k] + Ti;\
a = ROTATE_LEFT(t, s) + b
/* Do the following 16 operations. */
SET(a, b, c, d, 5, 4, T33);
SET(d, a, b, c, 8, 11, T34);
SET(c, d, a, b, 11, 16, T35);
SET(b, c, d, a, 14, 23, T36);
SET(a, b, c, d, 1, 4, T37);
SET(d, a, b, c, 4, 11, T38);
SET(c, d, a, b, 7, 16, T39);
SET(b, c, d, a, 10, 23, T40);
SET(a, b, c, d, 13, 4, T41);
SET(d, a, b, c, 0, 11, T42);
SET(c, d, a, b, 3, 16, T43);
SET(b, c, d, a, 6, 23, T44);
SET(a, b, c, d, 9, 4, T45);
SET(d, a, b, c, 12, 11, T46);
SET(c, d, a, b, 15, 16, T47);
SET(b, c, d, a, 2, 23, T48);
#undef SET
/* Round 4. */
/* Let [abcd k s t] denote the operation
a = b + ((a + I(b,c,d) + X[k] + T[i]) <<< s). */
#define I(x, y, z) ((y) ^ ((x) | ~(z)))
#define SET(a, b, c, d, k, s, Ti)\
t = a + I(b,c,d) + X[k] + Ti;\
a = ROTATE_LEFT(t, s) + b
/* Do the following 16 operations. */
SET(a, b, c, d, 0, 6, T49);
SET(d, a, b, c, 7, 10, T50);
SET(c, d, a, b, 14, 15, T51);
SET(b, c, d, a, 5, 21, T52);
SET(a, b, c, d, 12, 6, T53);
SET(d, a, b, c, 3, 10, T54);
SET(c, d, a, b, 10, 15, T55);
SET(b, c, d, a, 1, 21, T56);
SET(a, b, c, d, 8, 6, T57);
SET(d, a, b, c, 15, 10, T58);
SET(c, d, a, b, 6, 15, T59);
SET(b, c, d, a, 13, 21, T60);
SET(a, b, c, d, 4, 6, T61);
SET(d, a, b, c, 11, 10, T62);
SET(c, d, a, b, 2, 15, T63);
SET(b, c, d, a, 9, 21, T64);
#undef SET
/* Then perform the following additions. (That is increment each
of the four registers by the value it had before this block
was started.) */
pms->abcd[0] += a;
pms->abcd[1] += b;
pms->abcd[2] += c;
pms->abcd[3] += d;
}
MONGO_EXPORT void
mongo_md5_init(mongo_md5_state_t *pms)
{
pms->count[0] = pms->count[1] = 0;
pms->abcd[0] = 0x67452301;
pms->abcd[1] = /*0xefcdab89*/ T_MASK ^ 0x10325476;
pms->abcd[2] = /*0x98badcfe*/ T_MASK ^ 0x67452301;
pms->abcd[3] = 0x10325476;
}
MONGO_EXPORT void
mongo_md5_append(mongo_md5_state_t *pms, const mongo_md5_byte_t *data, int nbytes)
{
const mongo_md5_byte_t *p = data;
int left = nbytes;
int offset = (pms->count[0] >> 3) & 63;
mongo_md5_word_t nbits = (mongo_md5_word_t)(nbytes << 3);
if (nbytes <= 0)
return;
/* Update the message length. */
pms->count[1] += nbytes >> 29;
pms->count[0] += nbits;
if (pms->count[0] < nbits)
pms->count[1]++;
/* Process an initial partial block. */
if (offset) {
int copy = (offset + nbytes > 64 ? 64 - offset : nbytes);
memcpy(pms->buf + offset, p, copy);
if (offset + copy < 64)
return;
p += copy;
left -= copy;
mongo_md5_process(pms, pms->buf);
}
/* Process full blocks. */
for (; left >= 64; p += 64, left -= 64)
mongo_md5_process(pms, p);
/* Process a final partial block. */
if (left)
memcpy(pms->buf, p, left);
}
MONGO_EXPORT void
mongo_md5_finish(mongo_md5_state_t *pms, mongo_md5_byte_t digest[16])
{
static const mongo_md5_byte_t pad[64] = {
0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
mongo_md5_byte_t data[8];
int i;
/* Save the length before padding. */
for (i = 0; i < 8; ++i)
data[i] = (mongo_md5_byte_t)(pms->count[i >> 2] >> ((i & 3) << 3));
/* Pad to 56 bytes mod 64. */
mongo_md5_append(pms, pad, ((55 - (pms->count[0] >> 3)) & 63) + 1);
/* Append the length. */
mongo_md5_append(pms, data, 8);
for (i = 0; i < 16; ++i)
digest[i] = (mongo_md5_byte_t)(pms->abcd[i >> 2] >> ((i & 3) << 3));
}

View File

@@ -1,92 +0,0 @@
/*
Copyright (C) 1999, 2002 Aladdin Enterprises. All rights reserved.
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
L. Peter Deutsch
ghost@aladdin.com
*/
/* $Id: md5.h,v 1.4 2002/04/13 19:20:28 lpd Exp $ */
/*
Independent implementation of MD5 (RFC 1321).
This code implements the MD5 Algorithm defined in RFC 1321, whose
text is available at
http://www.ietf.org/rfc/rfc1321.txt
The code is derived from the text of the RFC, including the test suite
(section A.5) but excluding the rest of Appendix A. It does not include
any code or documentation that is identified in the RFC as being
copyrighted.
The original and principal author of md5.h is L. Peter Deutsch
<ghost@aladdin.com>. Other authors are noted in the change history
that follows (in reverse chronological order):
2002-04-13 lpd Removed support for non-ANSI compilers; removed
references to Ghostscript; clarified derivation from RFC 1321;
now handles byte order either statically or dynamically.
1999-11-04 lpd Edited comments slightly for automatic TOC extraction.
1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5);
added conditionalization for C++ compilation from Martin
Purschke <purschke@bnl.gov>.
1999-05-03 lpd Original version.
*/
#ifndef MONGO_MD5_H_
#define MONGO_MD5_H_
/*
* This package supports both compile-time and run-time determination of CPU
* byte order. If ARCH_IS_BIG_ENDIAN is defined as 0, the code will be
* compiled to run only on little-endian CPUs; if ARCH_IS_BIG_ENDIAN is
* defined as non-zero, the code will be compiled to run only on big-endian
* CPUs; if ARCH_IS_BIG_ENDIAN is not defined, the code will be compiled to
* run on either big- or little-endian CPUs, but will run slightly less
* efficiently on either one than if ARCH_IS_BIG_ENDIAN is defined.
*/
#include "bson.h"
typedef unsigned char mongo_md5_byte_t; /* 8-bit byte */
typedef unsigned int mongo_md5_word_t; /* 32-bit word */
/* Define the state of the MD5 Algorithm. */
typedef struct mongo_md5_state_s {
mongo_md5_word_t count[2]; /* message length in bits, lsw first */
mongo_md5_word_t abcd[4]; /* digest buffer */
mongo_md5_byte_t buf[64]; /* accumulate block */
} mongo_md5_state_t;
#ifdef __cplusplus
extern "C"
{
#endif
/* Initialize the algorithm. */
MONGO_EXPORT void mongo_md5_init(mongo_md5_state_t *pms);
/* Append a string to the message. */
MONGO_EXPORT void mongo_md5_append(mongo_md5_state_t *pms, const mongo_md5_byte_t *data, int nbytes);
/* Finish the message and return the digest. */
MONGO_EXPORT void mongo_md5_finish(mongo_md5_state_t *pms, mongo_md5_byte_t digest[16]);
#ifdef __cplusplus
} /* end extern "C" */
#endif
#endif /* MONGO_MD5_H_ */

File diff suppressed because it is too large Load Diff

View File

@@ -1,824 +0,0 @@
/**
* @file mongo.h
* @brief Main MongoDB Declarations
*/
/* Copyright 2009-2012 10gen Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGO_H_
#define MONGO_H_
#include "bson.h"
MONGO_EXTERN_C_START
#define MONGO_MAJOR 0
#define MONGO_MINOR 6
#define MONGO_PATCH 0
#define MONGO_OK 0
#define MONGO_ERROR -1
#define MONGO_DEFAULT_PORT 27017
#define MONGO_DEFAULT_MAX_BSON_SIZE 4 * 1024 * 1024
#define MONGO_ERR_LEN 128
typedef enum mongo_error_t {
MONGO_CONN_SUCCESS = 0, /**< Connection success! */
MONGO_CONN_NO_SOCKET, /**< Could not create a socket. */
MONGO_CONN_FAIL, /**< An error occured while calling connect(). */
MONGO_CONN_ADDR_FAIL, /**< An error occured while calling getaddrinfo(). */
MONGO_CONN_NOT_MASTER, /**< Warning: connected to a non-master node (read-only). */
MONGO_CONN_BAD_SET_NAME, /**< Given rs name doesn't match this replica set. */
MONGO_CONN_NO_PRIMARY, /**< Can't find primary in replica set. Connection closed. */
MONGO_IO_ERROR, /**< An error occurred while reading or writing on the socket. */
MONGO_SOCKET_ERROR, /**< Other socket error. */
MONGO_READ_SIZE_ERROR, /**< The response is not the expected length. */
MONGO_COMMAND_FAILED, /**< The command returned with 'ok' value of 0. */
MONGO_WRITE_ERROR, /**< Write with given write_concern returned an error. */
MONGO_NS_INVALID, /**< The name for the ns (database or collection) is invalid. */
MONGO_BSON_INVALID, /**< BSON not valid for the specified op. */
MONGO_BSON_NOT_FINISHED, /**< BSON object has not been finished. */
MONGO_BSON_TOO_LARGE, /**< BSON object exceeds max BSON size. */
MONGO_WRITE_CONCERN_INVALID /**< Supplied write concern object is invalid. */
} mongo_error_t;
typedef enum mongo_cursor_error_t {
MONGO_CURSOR_EXHAUSTED, /**< The cursor has no more results. */
MONGO_CURSOR_INVALID, /**< The cursor has timed out or is not recognized. */
MONGO_CURSOR_PENDING, /**< Tailable cursor still alive but no data. */
MONGO_CURSOR_QUERY_FAIL, /**< The server returned an '$err' object, indicating query failure.
See conn->lasterrcode and conn->lasterrstr for details. */
MONGO_CURSOR_BSON_ERROR /**< Something is wrong with the BSON provided. See conn->err
for details. */
} mongo_cursor_error_t;
enum mongo_cursor_flags {
MONGO_CURSOR_MUST_FREE = 1, /**< mongo_cursor_destroy should free cursor. */
MONGO_CURSOR_QUERY_SENT = ( 1<<1 ) /**< Initial query has been sent. */
};
enum mongo_index_opts {
MONGO_INDEX_UNIQUE = ( 1<<0 ),
MONGO_INDEX_DROP_DUPS = ( 1<<2 ),
MONGO_INDEX_BACKGROUND = ( 1<<3 ),
MONGO_INDEX_SPARSE = ( 1<<4 )
};
enum mongo_update_opts {
MONGO_UPDATE_UPSERT = 0x1,
MONGO_UPDATE_MULTI = 0x2,
MONGO_UPDATE_BASIC = 0x4
};
enum mongo_insert_opts {
MONGO_CONTINUE_ON_ERROR = 0x1
};
enum mongo_cursor_opts {
MONGO_TAILABLE = ( 1<<1 ), /**< Create a tailable cursor. */
MONGO_SLAVE_OK = ( 1<<2 ), /**< Allow queries on a non-primary node. */
MONGO_NO_CURSOR_TIMEOUT = ( 1<<4 ), /**< Disable cursor timeouts. */
MONGO_AWAIT_DATA = ( 1<<5 ), /**< Momentarily block for more data. */
MONGO_EXHAUST = ( 1<<6 ), /**< Stream in multiple 'more' packages. */
MONGO_PARTIAL = ( 1<<7 ) /**< Allow reads even if a shard is down. */
};
enum mongo_operations {
MONGO_OP_MSG = 1000,
MONGO_OP_UPDATE = 2001,
MONGO_OP_INSERT = 2002,
MONGO_OP_QUERY = 2004,
MONGO_OP_GET_MORE = 2005,
MONGO_OP_DELETE = 2006,
MONGO_OP_KILL_CURSORS = 2007
};
#pragma pack(1)
typedef struct {
int len;
int id;
int responseTo;
int op;
} mongo_header;
typedef struct {
mongo_header head;
char data;
} mongo_message;
typedef struct {
int flag; /* FIX THIS COMMENT non-zero on failure */
int64_t cursorID;
int start;
int num;
} mongo_reply_fields;
typedef struct {
mongo_header head;
mongo_reply_fields fields;
char objs;
} mongo_reply;
#pragma pack()
typedef struct mongo_host_port {
char host[255];
int port;
struct mongo_host_port *next;
} mongo_host_port;
typedef struct mongo_write_concern {
int w; /**< Number of nodes this write should be replicated to. */
int wtimeout; /**< Number of milliseconds before replication timeout. */
int j; /**< If non-zero, block until the journal sync. */
int fsync; /**< Same a j with journaling enabled; otherwise, call fsync. */
const char *mode; /**< Either "majority" or a getlasterrormode. Overrides w value. */
bson *cmd; /**< The BSON object representing the getlasterror command. */
} mongo_write_concern;
typedef struct {
mongo_host_port *seeds; /**< List of seeds provided by the user. */
mongo_host_port *hosts; /**< List of host/ports given by the replica set */
char *name; /**< Name of the replica set. */
bson_bool_t primary_connected; /**< Primary node connection status. */
} mongo_replset;
typedef struct mongo {
mongo_host_port *primary; /**< Primary connection info. */
mongo_replset *replset; /**< replset object if connected to a replica set. */
int sock; /**< Socket file descriptor. */
int flags; /**< Flags on this connection object. */
int conn_timeout_ms; /**< Connection timeout in milliseconds. */
int op_timeout_ms; /**< Read and write timeout in milliseconds. */
int max_bson_size; /**< Largest BSON object allowed on this connection. */
bson_bool_t connected; /**< Connection status. */
mongo_write_concern *write_concern; /**< The default write concern. */
mongo_error_t err; /**< Most recent driver error code. */
int errcode; /**< Most recent errno or WSAGetLastError(). */
char errstr[MONGO_ERR_LEN]; /**< String version of error. */
int lasterrcode; /**< getlasterror code from the server. */
char lasterrstr[MONGO_ERR_LEN]; /**< getlasterror string from the server. */
} mongo;
typedef struct {
mongo_reply *reply; /**< reply is owned by cursor */
mongo *conn; /**< connection is *not* owned by cursor */
const char *ns; /**< owned by cursor */
int flags; /**< Flags used internally by this drivers. */
int seen; /**< Number returned so far. */
bson current; /**< This cursor's current bson object. */
mongo_cursor_error_t err; /**< Errors on this cursor. */
const bson *query; /**< Bitfield containing cursor options. */
const bson *fields;/**< Bitfield containing cursor options. */
int options; /**< Bitfield containing cursor options. */
int limit; /**< Bitfield containing cursor options. */
int skip; /**< Bitfield containing cursor options. */
} mongo_cursor;
/*********************************************************************
Connection API
**********************************************************************/
/** Initialize sockets for Windows.
*/
MONGO_EXPORT void mongo_init_sockets();
/**
* Initialize a new mongo connection object. You must initialize each mongo
* object using this function.
*
* @note When finished, you must pass this object to
* mongo_destroy( ).
*
* @param conn a mongo connection object allocated on the stack
* or heap.
*/
MONGO_EXPORT void mongo_init( mongo *conn );
/**
* Connect to a single MongoDB server.
*
* @param conn a mongo object.
* @param host a numerical network address or a network hostname.
* @param port the port to connect to.
*
* @return MONGO_OK or MONGO_ERROR on failure. On failure, a constant of type
* mongo_error_t will be set on the conn->err field.
*/
MONGO_EXPORT int mongo_connect( mongo *conn , const char *host, int port );
/**
* Set up this connection object for connecting to a replica set.
* To connect, pass the object to mongo_replset_connect().
*
* @param conn a mongo object.
* @param name the name of the replica set to connect to.
* */
MONGO_EXPORT void mongo_replset_init( mongo *conn, const char *name );
/**
* Add a seed node to the replica set connection object.
*
* You must specify at least one seed node before connecting to a replica set.
*
* @param conn a mongo object.
* @param host a numerical network address or a network hostname.
* @param port the port to connect to.
*/
MONGO_EXPORT void mongo_replset_add_seed( mongo *conn, const char *host, int port );
/**
* Utility function for converting a host-port string to a mongo_host_port.
*
* @param host_string a string containing either a host or a host and port separated
* by a colon.
* @param host_port the mongo_host_port object to write the result to.
*/
void mongo_parse_host( const char *host_string, mongo_host_port *host_port );
/**
* Utility function for validation database and collection names.
*
* @param conn a mongo object.
*
* @return MONGO_OK or MONGO_ERROR on failure. On failure, a constant of type
* mongo_conn_return_t will be set on the conn->err field.
*
*/
MONGO_EXPORT int mongo_validate_ns( mongo *conn, const char *ns );
/**
* Connect to a replica set.
*
* Before passing a connection object to this function, you must already have called
* mongo_set_replset and mongo_replset_add_seed.
*
* @param conn a mongo object.
*
* @return MONGO_OK or MONGO_ERROR on failure. On failure, a constant of type
* mongo_conn_return_t will be set on the conn->err field.
*/
MONGO_EXPORT int mongo_replset_connect( mongo *conn );
/** Set a timeout for operations on this connection. This
* is a platform-specific feature, and only work on *nix
* system. You must also compile for linux to support this.
*
* @param conn a mongo object.
* @param millis timeout time in milliseconds.
*
* @return MONGO_OK. On error, return MONGO_ERROR and
* set the conn->err field.
*/
MONGO_EXPORT int mongo_set_op_timeout( mongo *conn, int millis );
/**
* Ensure that this connection is healthy by performing
* a round-trip to the server.
*
* @param conn a mongo connection
*
* @return MONGO_OK if connected; otherwise, MONGO_ERROR.
*/
MONGO_EXPORT int mongo_check_connection( mongo *conn );
/**
* Try reconnecting to the server using the existing connection settings.
*
* This function will disconnect the current socket. If you've authenticated,
* you'll need to re-authenticate after calling this function.
*
* @param conn a mongo object.
*
* @return MONGO_OK or MONGO_ERROR and
* set the conn->err field.
*/
MONGO_EXPORT int mongo_reconnect( mongo *conn );
/**
* Close the current connection to the server. After calling
* this function, you may call mongo_reconnect with the same
* connection object.
*
* @param conn a mongo object.
*/
MONGO_EXPORT void mongo_disconnect( mongo *conn );
/**
* Close any existing connection to the server and free all allocated
* memory associated with the conn object.
*
* You must always call this function when finished with the connection object.
*
* @param conn a mongo object.
*/
MONGO_EXPORT void mongo_destroy( mongo *conn );
/**
* Specify the write concern object that this connection should use
* by default for all writes (inserts, updates, and deletes). This value
* can be overridden by passing a write_concern object to any write function.
*
* @param conn a mongo object.
* @param write_concern pointer to a write concern object.
*
*/
MONGO_EXPORT void mongo_set_write_concern( mongo *conn,
mongo_write_concern *write_concern );
/*********************************************************************
CRUD API
**********************************************************************/
/**
* Insert a BSON document into a MongoDB server. This function
* will fail if the supplied BSON struct is not UTF-8 or if
* the keys are invalid for insert (contain '.' or start with '$').
*
* The default write concern set on the conn object will be used.
*
* @param conn a mongo object.
* @param ns the namespace.
* @param data the bson data.
* @param custom_write_concern a write concern object that will
* override any write concern set on the conn object.
*
* @return MONGO_OK or MONGO_ERROR. If the conn->err
* field is MONGO_BSON_INVALID, check the err field
* on the bson struct for the reason.
*/
MONGO_EXPORT int mongo_insert( mongo *conn, const char *ns, const bson *data,
mongo_write_concern *custom_write_concern );
/**
* Insert a batch of BSON documents into a MongoDB server. This function
* will fail if any of the documents to be inserted is invalid.
*
* The default write concern set on the conn object will be used.
*
* @param conn a mongo object.
* @param ns the namespace.
* @param data the bson data.
* @param num the number of documents in data.
* @param custom_write_concern a write concern object that will
* override any write concern set on the conn object.
* @param flags flags on this batch insert. Currently, this value
* may be 0 or MONGO_CONTINUE_ON_ERROR, which will cause the
* batch insert to continue even if a given insert in the batch fails.
*
* @return MONGO_OK or MONGO_ERROR.
*
*/
MONGO_EXPORT int mongo_insert_batch( mongo *conn, const char *ns,
const bson **data, int num, mongo_write_concern *custom_write_concern,
int flags );
/**
* Update a document in a MongoDB server.
*
* The default write concern set on the conn object will be used.
*
* @param conn a mongo object.
* @param ns the namespace.
* @param cond the bson update query.
* @param op the bson update data.
* @param flags flags for the update.
* @param custom_write_concern a write concern object that will
* override any write concern set on the conn object.
*
* @return MONGO_OK or MONGO_ERROR with error stored in conn object.
*
*/
MONGO_EXPORT int mongo_update( mongo *conn, const char *ns, const bson *cond,
const bson *op, int flags, mongo_write_concern *custom_write_concern );
/**
* Remove a document from a MongoDB server.
*
* The default write concern set on the conn object will be used.
*
* @param conn a mongo object.
* @param ns the namespace.
* @param cond the bson query.
* @param custom_write_concern a write concern object that will
* override any write concern set on the conn object.
*
* @return MONGO_OK or MONGO_ERROR with error stored in conn object.
*/
MONGO_EXPORT int mongo_remove( mongo *conn, const char *ns, const bson *cond,
mongo_write_concern *custom_write_concern );
/*********************************************************************
Write Concern API
**********************************************************************/
/**
* Initialize a mongo_write_concern object. Effectively zeroes out the struct.
*
*/
MONGO_EXPORT void mongo_write_concern_init( mongo_write_concern *write_concern );
/**
* Finish this write concern object by serializing the literal getlasterror
* command that will be sent to the server.
*
* You must call mongo_write_concern_destroy() to free the serialized BSON.
*
*/
MONGO_EXPORT int mongo_write_concern_finish( mongo_write_concern *write_concern );
/**
* Free the write_concern object (specifically, the BSON that it owns).
*
*/
MONGO_EXPORT void mongo_write_concern_destroy( mongo_write_concern *write_concern );
/*********************************************************************
Cursor API
**********************************************************************/
/**
* Find documents in a MongoDB server.
*
* @param conn a mongo object.
* @param ns the namespace.
* @param query the bson query.
* @param fields a bson document of fields to be returned.
* @param limit the maximum number of documents to retrun.
* @param skip the number of documents to skip.
* @param options A bitfield containing cursor options.
*
* @return A cursor object allocated on the heap or NULL if
* an error has occurred. For finer-grained error checking,
* use the cursor builder API instead.
*/
MONGO_EXPORT mongo_cursor *mongo_find( mongo *conn, const char *ns, const bson *query,
const bson *fields, int limit, int skip, int options );
/**
* Initalize a new cursor object.
*
* @param cursor
* @param ns the namespace, represented as the the database
* name and collection name separated by a dot. e.g., "test.users"
*/
MONGO_EXPORT void mongo_cursor_init( mongo_cursor *cursor, mongo *conn, const char *ns );
/**
* Set the bson object specifying this cursor's query spec. If
* your query is the empty bson object "{}", then you need not
* set this value.
*
* @param cursor
* @param query a bson object representing the query spec. This may
* be either a simple query spec or a complex spec storing values for
* $query, $orderby, $hint, and/or $explain. See
* http://www.mongodb.org/display/DOCS/Mongo+Wire+Protocol for details.
*/
MONGO_EXPORT void mongo_cursor_set_query( mongo_cursor *cursor, const bson *query );
/**
* Set the fields to return for this cursor. If you want to return
* all fields, you need not set this value.
*
* @param cursor
* @param fields a bson object representing the fields to return.
* See http://www.mongodb.org/display/DOCS/Retrieving+a+Subset+of+Fields.
*/
MONGO_EXPORT void mongo_cursor_set_fields( mongo_cursor *cursor, const bson *fields );
/**
* Set the number of documents to skip.
*
* @param cursor
* @param skip
*/
MONGO_EXPORT void mongo_cursor_set_skip( mongo_cursor *cursor, int skip );
/**
* Set the number of documents to return.
*
* @param cursor
* @param limit
*/
MONGO_EXPORT void mongo_cursor_set_limit( mongo_cursor *cursor, int limit );
/**
* Set any of the available query options (e.g., MONGO_TAILABLE).
*
* @param cursor
* @param options a bitfield storing query options. See
* mongo_cursor_bitfield_t for available constants.
*/
MONGO_EXPORT void mongo_cursor_set_options( mongo_cursor *cursor, int options );
/**
* Return the current BSON object data as a const char*. This is useful
* for creating bson iterators with bson_iterator_init.
*
* @param cursor
*/
MONGO_EXPORT const char *mongo_cursor_data( mongo_cursor *cursor );
/**
* Return the current BSON object data as a const char*. This is useful
* for creating bson iterators with bson_iterator_init.
*
* @param cursor
*/
MONGO_EXPORT const bson *mongo_cursor_bson( mongo_cursor *cursor );
/**
* Iterate the cursor, returning the next item. When successful,
* the returned object will be stored in cursor->current;
*
* @param cursor
*
* @return MONGO_OK. On error, returns MONGO_ERROR and sets
* cursor->err with a value of mongo_error_t.
*/
MONGO_EXPORT int mongo_cursor_next( mongo_cursor *cursor );
/**
* Destroy a cursor object. When finished with a cursor, you
* must pass it to this function.
*
* @param cursor the cursor to destroy.
*
* @return MONGO_OK or an error code. On error, check cursor->conn->err
* for errors.
*/
MONGO_EXPORT int mongo_cursor_destroy( mongo_cursor *cursor );
/**
* Find a single document in a MongoDB server.
*
* @param conn a mongo object.
* @param ns the namespace.
* @param query the bson query.
* @param fields a bson document of the fields to be returned.
* @param out a bson document in which to put the query result.
*
*/
/* out can be NULL if you don't care about results. useful for commands */
MONGO_EXPORT int mongo_find_one( mongo *conn, const char *ns, const bson *query,
const bson *fields, bson *out );
/*********************************************************************
Command API and Helpers
**********************************************************************/
/**
* Count the number of documents in a collection matching a query.
*
* @param conn a mongo object.
* @param db the db name.
* @param coll the collection name.
* @param query the BSON query.
*
* @return the number of matching documents. If the command fails,
* MONGO_ERROR is returned.
*/
MONGO_EXPORT double mongo_count( mongo *conn, const char *db, const char *coll,
const bson *query );
/**
* Create a compound index.
*
* @param conn a mongo object.
* @param ns the namespace.
* @param data the bson index data.
* @param options a bitfield for setting index options. Possibilities include
* MONGO_INDEX_UNIQUE, MONGO_INDEX_DROP_DUPS, MONGO_INDEX_BACKGROUND,
* and MONGO_INDEX_SPARSE.
* @param out a bson document containing errors, if any.
*
* @return MONGO_OK if index is created successfully; otherwise, MONGO_ERROR.
*/
MONGO_EXPORT int mongo_create_index( mongo *conn, const char *ns,
const bson *key, int options, bson *out );
/**
* Create a capped collection.
*
* @param conn a mongo object.
* @param ns the namespace (e.g., "dbname.collectioname")
* @param size the size of the capped collection in bytes.
* @param max the max number of documents this collection is
* allowed to contain. If zero, this argument will be ignored
* and the server will use the collection's size to age document out.
* If using this option, ensure that the total size can contain this
* number of documents.
*/
MONGO_EXPORT int mongo_create_capped_collection( mongo *conn, const char *db,
const char *collection, int size, int max, bson *out );
/**
* Create an index with a single key.
*
* @param conn a mongo object.
* @param ns the namespace.
* @param field the index key.
* @param options index options.
* @param out a BSON document containing errors, if any.
*
* @return true if the index was created.
*/
MONGO_EXPORT bson_bool_t mongo_create_simple_index( mongo *conn, const char *ns,
const char *field, int options, bson *out );
/**
* Run a command on a MongoDB server.
*
* @param conn a mongo object.
* @param db the name of the database.
* @param command the BSON command to run.
* @param out the BSON result of the command.
*
* @return MONGO_OK if the command ran without error.
*/
MONGO_EXPORT int mongo_run_command( mongo *conn, const char *db,
const bson *command, bson *out );
/**
* Run a command that accepts a simple string key and integer value.
*
* @param conn a mongo object.
* @param db the name of the database.
* @param cmd the command to run.
* @param arg the integer argument to the command.
* @param out the BSON result of the command.
*
* @return MONGO_OK or an error code.
*
*/
MONGO_EXPORT int mongo_simple_int_command( mongo *conn, const char *db,
const char *cmd, int arg, bson *out );
/**
* Run a command that accepts a simple string key and value.
*
* @param conn a mongo object.
* @param db the name of the database.
* @param cmd the command to run.
* @param arg the string argument to the command.
* @param out the BSON result of the command.
*
* @return true if the command ran without error.
*
*/
MONGO_EXPORT int mongo_simple_str_command( mongo *conn, const char *db,
const char *cmd, const char *arg, bson *out );
/**
* Drop a database.
*
* @param conn a mongo object.
* @param db the name of the database to drop.
*
* @return MONGO_OK or an error code.
*/
MONGO_EXPORT int mongo_cmd_drop_db( mongo *conn, const char *db );
/**
* Drop a collection.
*
* @param conn a mongo object.
* @param db the name of the database.
* @param collection the name of the collection to drop.
* @param out a BSON document containing the result of the command.
*
* @return true if the collection drop was successful.
*/
MONGO_EXPORT int mongo_cmd_drop_collection( mongo *conn, const char *db,
const char *collection, bson *out );
/**
* Add a database user.
*
* @param conn a mongo object.
* @param db the database in which to add the user.
* @param user the user name
* @param pass the user password
*
* @return MONGO_OK or MONGO_ERROR.
*/
MONGO_EXPORT int mongo_cmd_add_user( mongo *conn, const char *db,
const char *user, const char *pass );
/**
* Authenticate a user.
*
* @param conn a mongo object.
* @param db the database to authenticate against.
* @param user the user name to authenticate.
* @param pass the user's password.
*
* @return MONGO_OK on sucess and MONGO_ERROR on failure.
*/
MONGO_EXPORT int mongo_cmd_authenticate( mongo *conn, const char *db,
const char *user, const char *pass );
/**
* Check if the current server is a master.
*
* @param conn a mongo object.
* @param out a BSON result of the command.
*
* @return true if the server is a master.
*/
/* return value is master status */
MONGO_EXPORT bson_bool_t mongo_cmd_ismaster( mongo *conn, bson *out );
/**
* Get the error for the last command with the current connection.
*
* @param conn a mongo object.
* @param db the name of the database.
* @param out a BSON object containing the error details.
*
* @return MONGO_OK if no error and MONGO_ERROR on error. On error, check the values
* of conn->lasterrcode and conn->lasterrstr for the error status.
*/
MONGO_EXPORT int mongo_cmd_get_last_error( mongo *conn, const char *db, bson *out );
/**
* Get the most recent error with the current connection.
*
* @param conn a mongo object.
* @param db the name of the database.
* @param out a BSON object containing the error details.
*
* @return MONGO_OK if no error and MONGO_ERROR on error. On error, check the values
* of conn->lasterrcode and conn->lasterrstr for the error status.
*/
MONGO_EXPORT int mongo_cmd_get_prev_error( mongo *conn, const char *db, bson *out );
/**
* Reset the error state for the connection.
*
* @param conn a mongo object.
* @param db the name of the database.
*/
MONGO_EXPORT void mongo_cmd_reset_error( mongo *conn, const char *db );
/*********************************************************************
Utility API
**********************************************************************/
MONGO_EXPORT mongo* mongo_create();
MONGO_EXPORT void mongo_dispose(mongo* conn);
MONGO_EXPORT int mongo_get_err(mongo* conn);
MONGO_EXPORT int mongo_is_connected(mongo* conn);
MONGO_EXPORT int mongo_get_op_timeout(mongo* conn);
MONGO_EXPORT const char* mongo_get_primary(mongo* conn);
MONGO_EXPORT int mongo_get_socket(mongo* conn) ;
MONGO_EXPORT int mongo_get_host_count(mongo* conn);
MONGO_EXPORT const char* mongo_get_host(mongo* conn, int i);
MONGO_EXPORT mongo_cursor* mongo_cursor_create();
MONGO_EXPORT void mongo_cursor_dispose(mongo_cursor* cursor);
MONGO_EXPORT int mongo_get_server_err(mongo* conn);
MONGO_EXPORT const char* mongo_get_server_err_string(mongo* conn);
/**
* Set an error on a mongo connection object. Mostly for internal use.
*
* @param conn a mongo connection object.
* @param err a driver error code of mongo_error_t.
* @param errstr a string version of the error.
* @param errorcode Currently errno or WSAGetLastError().
*/
MONGO_EXPORT void __mongo_set_error( mongo *conn, mongo_error_t err,
const char *errstr, int errorcode );
/**
* Clear all errors stored on a mongo connection object.
*
* @param conn a mongo connection object.
*/
MONGO_EXPORT void mongo_clear_errors( mongo *conn );
MONGO_EXTERN_C_END
#endif

View File

@@ -1,127 +0,0 @@
/* Copyright 2009-2012 10gen Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* all the numbers that fit in a 4 byte string */
const char bson_numstrs[1000][4] = {
"0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
"10", "11", "12", "13", "14", "15", "16", "17", "18", "19",
"20", "21", "22", "23", "24", "25", "26", "27", "28", "29",
"30", "31", "32", "33", "34", "35", "36", "37", "38", "39",
"40", "41", "42", "43", "44", "45", "46", "47", "48", "49",
"50", "51", "52", "53", "54", "55", "56", "57", "58", "59",
"60", "61", "62", "63", "64", "65", "66", "67", "68", "69",
"70", "71", "72", "73", "74", "75", "76", "77", "78", "79",
"80", "81", "82", "83", "84", "85", "86", "87", "88", "89",
"90", "91", "92", "93", "94", "95", "96", "97", "98", "99",
"100", "101", "102", "103", "104", "105", "106", "107", "108", "109",
"110", "111", "112", "113", "114", "115", "116", "117", "118", "119",
"120", "121", "122", "123", "124", "125", "126", "127", "128", "129",
"130", "131", "132", "133", "134", "135", "136", "137", "138", "139",
"140", "141", "142", "143", "144", "145", "146", "147", "148", "149",
"150", "151", "152", "153", "154", "155", "156", "157", "158", "159",
"160", "161", "162", "163", "164", "165", "166", "167", "168", "169",
"170", "171", "172", "173", "174", "175", "176", "177", "178", "179",
"180", "181", "182", "183", "184", "185", "186", "187", "188", "189",
"190", "191", "192", "193", "194", "195", "196", "197", "198", "199",
"200", "201", "202", "203", "204", "205", "206", "207", "208", "209",
"210", "211", "212", "213", "214", "215", "216", "217", "218", "219",
"220", "221", "222", "223", "224", "225", "226", "227", "228", "229",
"230", "231", "232", "233", "234", "235", "236", "237", "238", "239",
"240", "241", "242", "243", "244", "245", "246", "247", "248", "249",
"250", "251", "252", "253", "254", "255", "256", "257", "258", "259",
"260", "261", "262", "263", "264", "265", "266", "267", "268", "269",
"270", "271", "272", "273", "274", "275", "276", "277", "278", "279",
"280", "281", "282", "283", "284", "285", "286", "287", "288", "289",
"290", "291", "292", "293", "294", "295", "296", "297", "298", "299",
"300", "301", "302", "303", "304", "305", "306", "307", "308", "309",
"310", "311", "312", "313", "314", "315", "316", "317", "318", "319",
"320", "321", "322", "323", "324", "325", "326", "327", "328", "329",
"330", "331", "332", "333", "334", "335", "336", "337", "338", "339",
"340", "341", "342", "343", "344", "345", "346", "347", "348", "349",
"350", "351", "352", "353", "354", "355", "356", "357", "358", "359",
"360", "361", "362", "363", "364", "365", "366", "367", "368", "369",
"370", "371", "372", "373", "374", "375", "376", "377", "378", "379",
"380", "381", "382", "383", "384", "385", "386", "387", "388", "389",
"390", "391", "392", "393", "394", "395", "396", "397", "398", "399",
"400", "401", "402", "403", "404", "405", "406", "407", "408", "409",
"410", "411", "412", "413", "414", "415", "416", "417", "418", "419",
"420", "421", "422", "423", "424", "425", "426", "427", "428", "429",
"430", "431", "432", "433", "434", "435", "436", "437", "438", "439",
"440", "441", "442", "443", "444", "445", "446", "447", "448", "449",
"450", "451", "452", "453", "454", "455", "456", "457", "458", "459",
"460", "461", "462", "463", "464", "465", "466", "467", "468", "469",
"470", "471", "472", "473", "474", "475", "476", "477", "478", "479",
"480", "481", "482", "483", "484", "485", "486", "487", "488", "489",
"490", "491", "492", "493", "494", "495", "496", "497", "498", "499",
"500", "501", "502", "503", "504", "505", "506", "507", "508", "509",
"510", "511", "512", "513", "514", "515", "516", "517", "518", "519",
"520", "521", "522", "523", "524", "525", "526", "527", "528", "529",
"530", "531", "532", "533", "534", "535", "536", "537", "538", "539",
"540", "541", "542", "543", "544", "545", "546", "547", "548", "549",
"550", "551", "552", "553", "554", "555", "556", "557", "558", "559",
"560", "561", "562", "563", "564", "565", "566", "567", "568", "569",
"570", "571", "572", "573", "574", "575", "576", "577", "578", "579",
"580", "581", "582", "583", "584", "585", "586", "587", "588", "589",
"590", "591", "592", "593", "594", "595", "596", "597", "598", "599",
"600", "601", "602", "603", "604", "605", "606", "607", "608", "609",
"610", "611", "612", "613", "614", "615", "616", "617", "618", "619",
"620", "621", "622", "623", "624", "625", "626", "627", "628", "629",
"630", "631", "632", "633", "634", "635", "636", "637", "638", "639",
"640", "641", "642", "643", "644", "645", "646", "647", "648", "649",
"650", "651", "652", "653", "654", "655", "656", "657", "658", "659",
"660", "661", "662", "663", "664", "665", "666", "667", "668", "669",
"670", "671", "672", "673", "674", "675", "676", "677", "678", "679",
"680", "681", "682", "683", "684", "685", "686", "687", "688", "689",
"690", "691", "692", "693", "694", "695", "696", "697", "698", "699",
"700", "701", "702", "703", "704", "705", "706", "707", "708", "709",
"710", "711", "712", "713", "714", "715", "716", "717", "718", "719",
"720", "721", "722", "723", "724", "725", "726", "727", "728", "729",
"730", "731", "732", "733", "734", "735", "736", "737", "738", "739",
"740", "741", "742", "743", "744", "745", "746", "747", "748", "749",
"750", "751", "752", "753", "754", "755", "756", "757", "758", "759",
"760", "761", "762", "763", "764", "765", "766", "767", "768", "769",
"770", "771", "772", "773", "774", "775", "776", "777", "778", "779",
"780", "781", "782", "783", "784", "785", "786", "787", "788", "789",
"790", "791", "792", "793", "794", "795", "796", "797", "798", "799",
"800", "801", "802", "803", "804", "805", "806", "807", "808", "809",
"810", "811", "812", "813", "814", "815", "816", "817", "818", "819",
"820", "821", "822", "823", "824", "825", "826", "827", "828", "829",
"830", "831", "832", "833", "834", "835", "836", "837", "838", "839",
"840", "841", "842", "843", "844", "845", "846", "847", "848", "849",
"850", "851", "852", "853", "854", "855", "856", "857", "858", "859",
"860", "861", "862", "863", "864", "865", "866", "867", "868", "869",
"870", "871", "872", "873", "874", "875", "876", "877", "878", "879",
"880", "881", "882", "883", "884", "885", "886", "887", "888", "889",
"890", "891", "892", "893", "894", "895", "896", "897", "898", "899",
"900", "901", "902", "903", "904", "905", "906", "907", "908", "909",
"910", "911", "912", "913", "914", "915", "916", "917", "918", "919",
"920", "921", "922", "923", "924", "925", "926", "927", "928", "929",
"930", "931", "932", "933", "934", "935", "936", "937", "938", "939",
"940", "941", "942", "943", "944", "945", "946", "947", "948", "949",
"950", "951", "952", "953", "954", "955", "956", "957", "958", "959",
"960", "961", "962", "963", "964", "965", "966", "967", "968", "969",
"970", "971", "972", "973", "974", "975", "976", "977", "978", "979",
"980", "981", "982", "983", "984", "985", "986", "987", "988", "989",
"990", "991", "992", "993", "994", "995", "996", "997", "998", "999",
};

View File

@@ -1,147 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectName>mod_cdr_mongodb</ProjectName>
<RootNamespace>mod_cdr_mongodb</RootNamespace>
<Keyword>Win32Proj</Keyword>
<ProjectGuid>{4DFF29B4-2976-447D-A8B3-43476451517C}</ProjectGuid>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="..\..\..\..\w32\module_release.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="..\..\..\..\w32\module_debug.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="..\..\..\..\w32\module_release.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="..\..\..\..\w32\module_debug.props" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup>
<_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
</PropertyGroup>
<ItemDefinitionGroup>
<ClCompile>
<AdditionalIncludeDirectories>$(SolutionDir)libs\libteletone\src\;$(ProjectDir)\driver\src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>_GNU_SOURCE;__GNUC__;MONGO_HAVE_STDINT;%(PreprocessorDefinitions)</PreprocessorDefinitions>
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
</ClCompile>
<Link>
<RandomizedBaseAddress>false</RandomizedBaseAddress>
<DataExecutionPrevention>
</DataExecutionPrevention>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<Midl>
<TargetEnvironment>X64</TargetEnvironment>
</Midl>
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
</ClCompile>
<Link>
<RandomizedBaseAddress>false</RandomizedBaseAddress>
<DataExecutionPrevention>
</DataExecutionPrevention>
<TargetMachine>MachineX64</TargetMachine>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
</ClCompile>
<Link>
<RandomizedBaseAddress>false</RandomizedBaseAddress>
<DataExecutionPrevention>
</DataExecutionPrevention>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<Midl>
<TargetEnvironment>X64</TargetEnvironment>
</Midl>
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
</ClCompile>
<Link>
<RandomizedBaseAddress>false</RandomizedBaseAddress>
<DataExecutionPrevention>
</DataExecutionPrevention>
<TargetMachine>MachineX64</TargetMachine>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="mod_cdr_mongodb.c" />
<ClCompile Include="driver\src\bson.c" />
<ClCompile Include="driver\src\encoding.c" />
<ClCompile Include="driver\src\env_win32.c" />
<ClCompile Include="driver\src\md5.c" />
<ClCompile Include="driver\src\mongo.c" />
<ClCompile Include="driver\src\numbers.c" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\..\..\w32\Library\FreeSwitchCore.2017.vcxproj">
<Project>{202d7a4e-760d-4d0e-afa1-d7459ced30ff}</Project>
<ReferenceOutputAssembly>false</ReferenceOutputAssembly>
</ProjectReference>
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>

View File

@@ -1,559 +0,0 @@
/*
* FreeSWITCH Modular Media Switching Software Library / Soft-Switch Application
* Copyright (C) 2005-2014, Anthony Minessale II <anthm@freeswitch.org>
*
* Version: MPL 1.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is FreeSWITCH Modular Media Switching Software Library / Soft-Switch Application
*
* The Initial Developer of the Original Code is
* Anthony Minessale II <anthm@freeswitch.org>
* Portions created by the Initial Developer are Copyright (C)
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Daniel Swarbrick <daniel.swarbrick@gmail.com>
*
* mod_cdr_mongodb.c -- MongoDB CDR Module
*
* Derived from:
* mod_xml_cdr.c -- XML CDR Module to files or curl
*
*/
#include <switch.h>
#include <mongo.h>
#define MONGO_REPLSET_MAX_MEMBERS 12
static struct {
switch_memory_pool_t *pool;
int shutdown;
char *mongo_host;
int mongo_port;
char *mongo_namespace;
char *mongo_replset_name;
char *mongo_username;
char *mongo_password;
mongo mongo_conn[1];
switch_mutex_t *mongo_mutex;
switch_bool_t log_b;
} globals;
static switch_xml_config_item_t config_settings[] = {
/* key, flags, ptr, default_value, syntax, helptext */
SWITCH_CONFIG_ITEM_STRING_STRDUP("host", CONFIG_REQUIRED, &globals.mongo_host, "127.0.0.1", NULL, "MongoDB server host address"),
SWITCH_CONFIG_ITEM_STRING_STRDUP("namespace", CONFIG_REQUIRED, &globals.mongo_namespace, NULL, "database.collection", "MongoDB namespace"),
SWITCH_CONFIG_ITEM_STRING_STRDUP("replica_set_name", CONFIG_RELOADABLE, &globals.mongo_replset_name, "cdr_mongodb", NULL, "MongoDB replica set name"),
SWITCH_CONFIG_ITEM_STRING_STRDUP("username", CONFIG_RELOADABLE, &globals.mongo_username, NULL, NULL, "MongoDB username"),
SWITCH_CONFIG_ITEM_STRING_STRDUP("password", CONFIG_RELOADABLE, &globals.mongo_password, NULL, NULL, "MongoDB password"),
/* key, type, flags, ptr, default_value, data, syntax, helptext */
SWITCH_CONFIG_ITEM("port", SWITCH_CONFIG_INT, CONFIG_REQUIRED, &globals.mongo_port, MONGO_DEFAULT_PORT, NULL, NULL, "MongoDB server TCP port"),
SWITCH_CONFIG_ITEM("log-b-leg", SWITCH_CONFIG_BOOL, CONFIG_RELOADABLE, &globals.log_b, SWITCH_TRUE, NULL, NULL, "Log B-leg in addition to A-leg"),
SWITCH_CONFIG_ITEM_END()
};
SWITCH_MODULE_LOAD_FUNCTION(mod_cdr_mongodb_load);
SWITCH_MODULE_SHUTDOWN_FUNCTION(mod_cdr_mongodb_shutdown);
SWITCH_MODULE_DEFINITION(mod_cdr_mongodb, mod_cdr_mongodb_load, mod_cdr_mongodb_shutdown, NULL);
static void bson_append_value(bson *cdr, char *name, char *val)
{
//Check the variable and insert it as int, long int or string depending on it's value
char* endptr;
long int lintValue = strtol(val, &endptr, 10);
if (!*endptr){
int intValue = lintValue;
if(intValue == lintValue){
bson_append_int(cdr, name, intValue);
}else{
bson_append_long(cdr, name, lintValue);
}
} else {
bson_append_string(cdr, name, val);
}
}
static void set_bson_profile_data(bson *b, switch_caller_profile_t *caller_profile)
{
bson_append_string(b, "username", caller_profile->username);
bson_append_string(b, "dialplan", caller_profile->dialplan);
bson_append_string(b, "caller_id_name", caller_profile->caller_id_name);
bson_append_string(b, "ani", caller_profile->ani);
bson_append_string(b, "aniii", caller_profile->aniii);
bson_append_string(b, "caller_id_number", caller_profile->caller_id_number);
bson_append_string(b, "network_addr", caller_profile->network_addr);
bson_append_string(b, "rdnis", caller_profile->rdnis);
bson_append_string(b, "destination_number", caller_profile->destination_number);
bson_append_string(b, "uuid", caller_profile->uuid);
bson_append_string(b, "source", caller_profile->source);
bson_append_string(b, "context", caller_profile->context);
bson_append_string(b, "chan_name", caller_profile->chan_name);
}
static switch_status_t cdr_mongo_authenticate() {
switch_status_t status = SWITCH_STATUS_SUCCESS;
mongo_error_t db_status;
char *ns_tmp, *ns_split[2];
/* Split namespace db.collection into separate vars */
switch_strdup(ns_tmp, globals.mongo_namespace);
switch_separate_string(ns_tmp, '.', ns_split, 2);
db_status = mongo_cmd_authenticate(globals.mongo_conn, ns_split[0], globals.mongo_username, globals.mongo_password);
if (db_status != MONGO_OK) {
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "mongo_cmd_authenticate: authentication failed\n");
status = SWITCH_STATUS_FALSE;
} else {
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "Successfully authenticated %s@%s\n", globals.mongo_username, ns_split[0]);
}
switch_safe_free(ns_tmp);
return status;
}
static switch_status_t my_on_reporting(switch_core_session_t *session)
{
switch_status_t status = SWITCH_STATUS_SUCCESS;
switch_channel_t *channel = switch_core_session_get_channel(session);
switch_event_header_t *hi;
switch_caller_profile_t *caller_profile;
switch_hold_record_t *hold_record;
switch_app_log_t *app_log;
bson cdr;
int is_b;
int bson_idx, callflow_idx;
char idx_buffer[12];
char *tmp;
if (globals.shutdown) {
return SWITCH_STATUS_SUCCESS;
}
is_b = channel && switch_channel_get_originator_caller_profile(channel);
if (!globals.log_b && is_b) {
const char *force_cdr = switch_channel_get_variable(channel, SWITCH_FORCE_PROCESS_CDR_VARIABLE);
if (!switch_true(force_cdr)) {
return SWITCH_STATUS_SUCCESS;
}
}
bson_init(&cdr);
/* Channel data */
bson_append_start_object(&cdr, "channel_data");
bson_append_string(&cdr, "state", switch_channel_state_name(switch_channel_get_state(channel)));
bson_append_string(&cdr, "direction", switch_channel_direction(channel) == SWITCH_CALL_DIRECTION_OUTBOUND ? "outbound" : "inbound");
bson_append_int(&cdr, "state_number", switch_channel_get_state(channel));
if ((tmp = switch_channel_get_flag_string(channel))) {
bson_append_string(&cdr, "flags", tmp);
free(tmp);
}
if ((tmp = switch_channel_get_cap_string(channel))) {
bson_append_string(&cdr, "caps", tmp);
free(tmp);
}
bson_append_finish_object(&cdr); /* channel_data */
/* Channel variables */
bson_append_start_object(&cdr, "variables");
if ((hi = switch_channel_variable_first(channel))) {
for (; hi; hi = hi->next) {
if (!zstr(hi->name) && !zstr(hi->value)) {
bson_append_value(&cdr, hi->name, hi->value);
}
}
switch_channel_variable_last(channel);
}
bson_append_finish_object(&cdr); /* variables */
/* App log */
if ((app_log = switch_core_session_get_app_log(session))) {
switch_app_log_t *ap;
bson_append_start_array(&cdr, "app_log");
for (ap = app_log, bson_idx = 0; ap; ap = ap->next, bson_idx++) {
switch_snprintf(idx_buffer, sizeof(idx_buffer), "%d", bson_idx);
bson_append_start_object(&cdr, idx_buffer);
bson_append_string(&cdr, "app_name", ap->app);
bson_append_string(&cdr, "app_data", switch_str_nil(ap->arg));
bson_append_long(&cdr, "app_stamp", ap->stamp);
bson_append_finish_object(&cdr); /* application */
}
bson_append_finish_array(&cdr); /* app_log */
}
/* Hold */
if ((hold_record = switch_channel_get_hold_record(channel))) {
switch_hold_record_t *hr;
bson_append_start_array(&cdr, "hold_record");
for (hr = hold_record, bson_idx = 0; hr; hr = hr->next, bson_idx++) {
switch_snprintf(idx_buffer, sizeof(idx_buffer), "%d", bson_idx);
bson_append_start_object(&cdr, idx_buffer);
bson_append_long(&cdr, "on", hr->on);
bson_append_long(&cdr, "off", hr->off);
if (hr->uuid) {
bson_append_string(&cdr, "bridged_to", hr->uuid);
}
bson_append_finish_object(&cdr);
}
bson_append_finish_array(&cdr); /* hold_record */
}
/* Callflow */
caller_profile = switch_channel_get_caller_profile(channel);
/* Start callflow array */
bson_append_start_array(&cdr, "callflow");
callflow_idx = 0;
while (caller_profile) {
snprintf(idx_buffer, sizeof(idx_buffer), "%d", callflow_idx);
bson_append_start_object(&cdr, idx_buffer);
if (!zstr(caller_profile->dialplan)) {
bson_append_string(&cdr, "dialplan", caller_profile->dialplan);
}
if (!zstr(caller_profile->profile_index)) {
bson_append_string(&cdr, "profile_index", caller_profile->profile_index);
}
if (caller_profile->caller_extension) {
switch_caller_application_t *ap;
bson_append_start_object(&cdr, "extension");
bson_append_string(&cdr, "name", switch_str_nil(caller_profile->caller_extension->extension_name));
bson_append_string(&cdr, "number", switch_str_nil(caller_profile->caller_extension->extension_number));
if (caller_profile->caller_extension->current_application) {
bson_append_string(&cdr, "current_app", caller_profile->caller_extension->current_application->application_name);
}
for (ap = caller_profile->caller_extension->applications; ap; ap = ap->next) {
bson_append_start_object(&cdr, "application");
if (ap == caller_profile->caller_extension->current_application) {
bson_append_bool(&cdr, "last_executed", 1);
}
bson_append_string(&cdr, "app_name", ap->application_name);
bson_append_string(&cdr, "app_data", switch_str_nil(ap->application_data));
bson_append_finish_object(&cdr);
}
if (caller_profile->caller_extension->children) {
switch_caller_profile_t *cp = NULL;
for (cp = caller_profile->caller_extension->children; cp; cp = cp->next) {
if (!cp->caller_extension) {
continue;
}
bson_append_start_object(&cdr, "sub_extensions");
bson_append_start_object(&cdr, "extension");
bson_append_string(&cdr, "name", cp->caller_extension->extension_name);
bson_append_string(&cdr, "number", cp->caller_extension->extension_number);
bson_append_string(&cdr, "dialplan", cp->dialplan);
if (cp->caller_extension->current_application) {
bson_append_string(&cdr, "current_app", cp->caller_extension->current_application->application_name);
}
for (ap = cp->caller_extension->applications; ap; ap = ap->next) {
bson_append_start_object(&cdr, "application");
if (ap == cp->caller_extension->current_application) {
bson_append_bool(&cdr, "last_executed", 1);
}
bson_append_string(&cdr, "app_name", ap->application_name);
bson_append_string(&cdr, "app_data", switch_str_nil(ap->application_data));
bson_append_finish_object(&cdr);
}
bson_append_finish_object(&cdr); /* extension */
bson_append_finish_object(&cdr); /* sub_extensions */
}
}
bson_append_finish_object(&cdr); /* extension */
}
bson_append_start_object(&cdr, "caller_profile");
set_bson_profile_data(&cdr, caller_profile);
if (caller_profile->origination_caller_profile) {
switch_caller_profile_t *cp = NULL;
/* Start origination array */
bson_append_start_array(&cdr, "origination");
for (cp = caller_profile->origination_caller_profile, bson_idx = 0; cp; cp = cp->next, bson_idx++) {
snprintf(idx_buffer, sizeof(idx_buffer), "%d", bson_idx);
bson_append_start_object(&cdr, idx_buffer);
set_bson_profile_data(&cdr, cp);
bson_append_finish_object(&cdr);
}
bson_append_finish_object(&cdr); /* origination */
}
if (caller_profile->originator_caller_profile) {
switch_caller_profile_t *cp = NULL;
/* Start originator array */
bson_append_start_array(&cdr, "originator");
for (cp = caller_profile->originator_caller_profile, bson_idx = 0; cp; cp = cp->next, bson_idx++) {
snprintf(idx_buffer, sizeof(idx_buffer), "%d", bson_idx);
bson_append_start_object(&cdr, idx_buffer);
set_bson_profile_data(&cdr, cp);
bson_append_finish_object(&cdr);
}
bson_append_finish_object(&cdr); /* originator */
}
if (caller_profile->originatee_caller_profile) {
switch_caller_profile_t *cp = NULL;
/* Start originatee array */
bson_append_start_array(&cdr, "originatee");
for (cp = caller_profile->originatee_caller_profile, bson_idx = 0; cp; cp = cp->next, bson_idx++) {
snprintf(idx_buffer, sizeof(idx_buffer), "%d", bson_idx);
bson_append_start_object(&cdr, idx_buffer);
set_bson_profile_data(&cdr, cp);
bson_append_finish_object(&cdr);
}
bson_append_finish_object(&cdr); /* originatee */
}
bson_append_finish_object(&cdr); /* caller_profile */
/* Timestamps */
if (caller_profile->times) {
bson_append_start_object(&cdr, "times");
/* Insert timestamps as long ints (microseconds) to preserve accuracy */
bson_append_long(&cdr, "created_time", caller_profile->times->created);
bson_append_long(&cdr, "profile_created_time", caller_profile->times->profile_created);
bson_append_long(&cdr, "progress_time", caller_profile->times->progress);
bson_append_long(&cdr, "progress_media_time", caller_profile->times->progress_media);
bson_append_long(&cdr, "answered_time", caller_profile->times->answered);
bson_append_long(&cdr, "bridged_time", caller_profile->times->bridged);
bson_append_long(&cdr, "last_hold_time", caller_profile->times->last_hold);
bson_append_long(&cdr, "hold_accum_time", caller_profile->times->hold_accum);
bson_append_long(&cdr, "hangup_time", caller_profile->times->hungup);
bson_append_long(&cdr, "resurrect_time", caller_profile->times->resurrected);
bson_append_long(&cdr, "transfer_time", caller_profile->times->transferred);
bson_append_finish_object(&cdr); /* times */
}
bson_append_finish_object(&cdr); /* callflow */
caller_profile = caller_profile->next;
callflow_idx++;
}
bson_append_finish_array(&cdr);
bson_finish(&cdr);
switch_mutex_lock(globals.mongo_mutex);
if (mongo_insert(globals.mongo_conn, globals.mongo_namespace, &cdr, NULL) != MONGO_OK) {
if (globals.mongo_conn->err == MONGO_IO_ERROR) {
mongo_error_t db_status;
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_WARNING, "MongoDB connection failed; attempting reconnect...\n");
db_status = mongo_reconnect(globals.mongo_conn);
if (db_status != MONGO_OK) {
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "MongoDB reconnect failed with error code %d\n", db_status);
status = SWITCH_STATUS_FALSE;
} else {
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "MongoDB connection re-established.\n");
/* Re-authentication is necessary after a reconnect */
if (globals.mongo_username && globals.mongo_password) {
status = cdr_mongo_authenticate();
}
if (mongo_insert(globals.mongo_conn, globals.mongo_namespace, &cdr, NULL) != MONGO_OK) {
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "mongo_insert: %s (error code %d)\n", globals.mongo_conn->errstr, globals.mongo_conn->err);
status = SWITCH_STATUS_FALSE;
}
}
} else {
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "mongo_insert: %s (error code %d)\n", globals.mongo_conn->errstr, globals.mongo_conn->err);
status = SWITCH_STATUS_FALSE;
}
}
switch_mutex_unlock(globals.mongo_mutex);
bson_destroy(&cdr);
return status;
}
static switch_state_handler_table_t state_handlers = {
/*.on_init */ NULL,
/*.on_routing */ NULL,
/*.on_execute */ NULL,
/*.on_hangup */ NULL,
/*.on_exchange_media */ NULL,
/*.on_soft_execute */ NULL,
/*.on_consume_media */ NULL,
/*.on_hibernate */ NULL,
/*.on_reset */ NULL,
/*.on_park */ NULL,
/*.on_reporting */ my_on_reporting
};
static switch_status_t load_config(switch_memory_pool_t *pool)
{
switch_status_t status = SWITCH_STATUS_SUCCESS;
if (switch_xml_config_parse_module_settings("cdr_mongodb.conf", SWITCH_FALSE, config_settings) != SWITCH_STATUS_SUCCESS) {
return SWITCH_STATUS_FALSE;
}
return status;
}
SWITCH_MODULE_LOAD_FUNCTION(mod_cdr_mongodb_load)
{
switch_status_t status = SWITCH_STATUS_SUCCESS;
mongo_error_t db_status;
char *repl_hosts[MONGO_REPLSET_MAX_MEMBERS];
char *mongo_host[2];
int num_hosts, mongo_port;
memset(&globals, 0, sizeof(globals));
globals.pool = pool;
if (load_config(pool) != SWITCH_STATUS_SUCCESS) {
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "Unable to load or parse config!\n");
return SWITCH_STATUS_FALSE;
}
num_hosts = switch_separate_string(globals.mongo_host, ',', repl_hosts, MONGO_REPLSET_MAX_MEMBERS);
if (num_hosts > 1) {
int i;
mongo_replset_init(globals.mongo_conn, globals.mongo_replset_name);
for (i = 0; i < num_hosts; i++) {
switch_separate_string(repl_hosts[i], ':', mongo_host, 2);
mongo_port = mongo_host[1] ? atoi(mongo_host[1]) : globals.mongo_port;
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "Adding MongoDB server %s:%d to replica set\n", mongo_host[0], mongo_port);
mongo_replset_add_seed(globals.mongo_conn, mongo_host[0], mongo_port);
}
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "Connecting to MongoDB replica set %s\n", globals.mongo_replset_name);
db_status = mongo_replset_connect(globals.mongo_conn);
} else {
switch_separate_string(globals.mongo_host, ':', mongo_host, 2);
if (mongo_host[1]) {
globals.mongo_port = atoi(mongo_host[1]);
}
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "Connecting to MongoDB server %s:%d\n", globals.mongo_host, globals.mongo_port);
db_status = mongo_connect(globals.mongo_conn, globals.mongo_host, globals.mongo_port);
}
if (db_status != MONGO_OK) {
switch (globals.mongo_conn->err) {
case MONGO_CONN_NO_SOCKET:
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "mongo_connect: no socket\n");
break;
case MONGO_CONN_FAIL:
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "mongo_connect: connection failed\n");
break;
case MONGO_CONN_ADDR_FAIL:
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "mongo_connect: hostname lookup failed\n");
break;
case MONGO_CONN_NOT_MASTER:
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "mongo_connect: not master\n");
break;
case MONGO_CONN_BAD_SET_NAME:
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "mongo_replset_connect: configured replica set name does not match\n");
break;
case MONGO_CONN_NO_PRIMARY:
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "mongo_replset_connect: cannot find replica set primary member\n");
break;
default:
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "mongo_connect: unknown error: status code %d, error code %d\n", db_status, globals.mongo_conn->err);
}
return SWITCH_STATUS_FALSE;
} else {
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "Connection established\n");
}
if (globals.mongo_username && globals.mongo_password) {
if (cdr_mongo_authenticate() != SWITCH_STATUS_SUCCESS) {
return SWITCH_STATUS_FALSE;
}
}
switch_mutex_init(&globals.mongo_mutex, SWITCH_MUTEX_NESTED, pool);
switch_core_add_state_handler(&state_handlers);
*module_interface = switch_loadable_module_create_module_interface(pool, modname);
return status;
}
SWITCH_MODULE_SHUTDOWN_FUNCTION(mod_cdr_mongodb_shutdown)
{
globals.shutdown = 1;
switch_core_remove_state_handler(&state_handlers);
switch_mutex_destroy(globals.mongo_mutex);
mongo_destroy(globals.mongo_conn);
return SWITCH_STATUS_SUCCESS;
}
/* For Emacs:
* Local Variables:
* mode:c
* indent-tabs-mode:t
* tab-width:4
* c-basic-offset:4
* End:
* For VIM:
* vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet:
*/

View File

@@ -545,14 +545,6 @@
<RefProjectOutputGroups>Binaries;Content;Satellites</RefProjectOutputGroups>
<RefTargetDir>INSTALLFOLDER</RefTargetDir>
</ProjectReference>
<ProjectReference Include="..\..\src\mod\event_handlers\mod_cdr_mongodb\mod_cdr_mongodb.2017.vcxproj">
<Name>mod_cdr_mongodb</Name>
<Project>{4dff29b4-2976-447d-a8b3-43476451517c}</Project>
<Private>True</Private>
<DoNotHarvest>True</DoNotHarvest>
<RefProjectOutputGroups>Binaries;Content;Satellites</RefProjectOutputGroups>
<RefTargetDir>INSTALLFOLDER</RefTargetDir>
</ProjectReference>
<ProjectReference Include="..\..\src\mod\event_handlers\mod_cdr_pg_csv\mod_cdr_pg_csv.2017.vcxproj">
<Name>mod_cdr_pg_csv</Name>
<Project>{411f6d43-9f09-47d0-8b04-e1eb6b67c5bf}</Project>
@@ -925,9 +917,9 @@
<Message Importance="High" Text="DefineConstants: $(DefineConstants)" />
<Message Importance="High" Text="DestinationFileName: $(DestinationFileName)" />
</Target>
<!--
To modify your build process, add your task inside one of the targets below.
Other similar extension points exist, see Wix.targets.
<!--
To modify your build process, add your task inside one of the targets below.
Other similar extension points exist, see Wix.targets.
-->
<Target Name="AfterResolveReferences">
<Message Importance="High" Text="GitExe: $(GitExe)" />