saving uncommitted changes in /etc prior to emerge run
This commit is contained in:
parent
5c57b79cf6
commit
a7c4550aaa
9
conf.d/couchdb
Normal file
9
conf.d/couchdb
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
# Options for CouchDB
|
||||||
|
|
||||||
|
EXEC="/usr/bin/couchdb"
|
||||||
|
COUCHDB_USER="couchdb:couchdb"
|
||||||
|
COUCHDB_PID_FILE="/var/run/couchdb/couchdb.pid"
|
||||||
|
COUCHDB_STDOUT_FILE="/var/log/couchdb.out"
|
||||||
|
COUCHDB_STDERR_FILE="/var/log/couchdb.err"
|
||||||
|
#COUCHDB_RESPAWN_TIMEOUT=
|
||||||
|
#COUCHDB_OPTIONS=
|
361
couchdb/default.ini
Normal file
361
couchdb/default.ini
Normal file
@ -0,0 +1,361 @@
|
|||||||
|
; etc/couchdb/default.ini.tpl. Generated from default.ini.tpl.in by configure.
|
||||||
|
|
||||||
|
; Upgrading CouchDB will overwrite this file.
|
||||||
|
[vendor]
|
||||||
|
name = The Apache Software Foundation
|
||||||
|
version = 1.6.1
|
||||||
|
|
||||||
|
[couchdb]
|
||||||
|
database_dir = /var/lib/couchdb
|
||||||
|
view_index_dir = /var/lib/couchdb
|
||||||
|
util_driver_dir = /usr/lib64/couchdb/erlang/lib/couch-1.6.1/priv/lib
|
||||||
|
max_document_size = 4294967296 ; 4 GB
|
||||||
|
os_process_timeout = 5000 ; 5 seconds. for view and external servers.
|
||||||
|
max_dbs_open = 100
|
||||||
|
delayed_commits = true ; set this to false to ensure an fsync before 201 Created is returned
|
||||||
|
uri_file = /var/run/couchdb/couch.uri
|
||||||
|
; Method used to compress everything that is appended to database and view index files, except
|
||||||
|
; for attachments (see the attachments section). Available methods are:
|
||||||
|
;
|
||||||
|
; none - no compression
|
||||||
|
; snappy - use google snappy, a very fast compressor/decompressor
|
||||||
|
; deflate_[N] - use zlib's deflate, N is the compression level which ranges from 1 (fastest,
|
||||||
|
; lowest compression ratio) to 9 (slowest, highest compression ratio)
|
||||||
|
file_compression = snappy
|
||||||
|
; Higher values may give better read performance due to less read operations
|
||||||
|
; and/or more OS page cache hits, but they can also increase overall response
|
||||||
|
; time for writes when there are many attachment write requests in parallel.
|
||||||
|
attachment_stream_buffer_size = 4096
|
||||||
|
|
||||||
|
plugin_dir = /usr/lib64/couchdb/plugins
|
||||||
|
|
||||||
|
; etc/couchdb/default.ini.tpl. Generated from default.ini.tpl.in by configure.
|
||||||
|
|
||||||
|
[database_compaction]
|
||||||
|
; larger buffer sizes can originate smaller files
|
||||||
|
doc_buffer_size = 524288 ; value in bytes
|
||||||
|
checkpoint_after = 5242880 ; checkpoint after every N bytes were written
|
||||||
|
|
||||||
|
[view_compaction]
|
||||||
|
; larger buffer sizes can originate smaller files
|
||||||
|
keyvalue_buffer_size = 2097152 ; value in bytes
|
||||||
|
|
||||||
|
[httpd]
|
||||||
|
port = 5984
|
||||||
|
bind_address = 127.0.0.1
|
||||||
|
authentication_handlers = {couch_httpd_oauth, oauth_authentication_handler}, {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}
|
||||||
|
default_handler = {couch_httpd_db, handle_request}
|
||||||
|
secure_rewrites = true
|
||||||
|
vhost_global_handlers = _utils, _uuids, _session, _oauth, _users
|
||||||
|
allow_jsonp = false
|
||||||
|
; Options for the MochiWeb HTTP server.
|
||||||
|
;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
|
||||||
|
; For more socket options, consult Erlang's module 'inet' man page.
|
||||||
|
;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
|
||||||
|
socket_options = [{recbuf, 262144}, {sndbuf, 262144}]
|
||||||
|
log_max_chunk_size = 1000000
|
||||||
|
enable_cors = false
|
||||||
|
; CouchDB can optionally enforce a maximum uri length;
|
||||||
|
; max_uri_length = 8000
|
||||||
|
|
||||||
|
[ssl]
|
||||||
|
port = 6984
|
||||||
|
|
||||||
|
[log]
|
||||||
|
file = /var/log/couchdb/couch.log
|
||||||
|
level = info
|
||||||
|
include_sasl = true
|
||||||
|
|
||||||
|
[couch_httpd_auth]
|
||||||
|
authentication_db = _users
|
||||||
|
authentication_redirect = /_utils/session.html
|
||||||
|
require_valid_user = false
|
||||||
|
timeout = 600 ; number of seconds before automatic logout
|
||||||
|
auth_cache_size = 50 ; size is number of cache entries
|
||||||
|
allow_persistent_cookies = false ; set to true to allow persistent cookies
|
||||||
|
iterations = 10 ; iterations for password hashing
|
||||||
|
; min_iterations = 1
|
||||||
|
; max_iterations = 1000000000
|
||||||
|
; comma-separated list of public fields, 404 if empty
|
||||||
|
; public_fields =
|
||||||
|
|
||||||
|
[cors]
|
||||||
|
credentials = false
|
||||||
|
; List of origins separated by a comma, * means accept all
|
||||||
|
; Origins must include the scheme: http://example.com
|
||||||
|
; You can’t set origins: * and credentials = true at the same time.
|
||||||
|
;origins = *
|
||||||
|
; List of accepted headers separated by a comma
|
||||||
|
; headers =
|
||||||
|
; List of accepted methods
|
||||||
|
; methods =
|
||||||
|
|
||||||
|
|
||||||
|
; Configuration for a vhost
|
||||||
|
;[cors:http://example.com]
|
||||||
|
; credentials = false
|
||||||
|
; List of origins separated by a comma
|
||||||
|
; Origins must include the scheme: http://example.com
|
||||||
|
; You can’t set origins: * and credentials = true at the same time.
|
||||||
|
;origins =
|
||||||
|
; List of accepted headers separated by a comma
|
||||||
|
; headers =
|
||||||
|
; List of accepted methods
|
||||||
|
; methods =
|
||||||
|
|
||||||
|
[couch_httpd_oauth]
|
||||||
|
; If set to 'true', oauth token and consumer secrets will be looked up
|
||||||
|
; in the authentication database (_users). These secrets are stored in
|
||||||
|
; a top level property named "oauth" in user documents. Example:
|
||||||
|
; {
|
||||||
|
; "_id": "org.couchdb.user:joe",
|
||||||
|
; "type": "user",
|
||||||
|
; "name": "joe",
|
||||||
|
; "password_sha": "fe95df1ca59a9b567bdca5cbaf8412abd6e06121",
|
||||||
|
; "salt": "4e170ffeb6f34daecfd814dfb4001a73"
|
||||||
|
; "roles": ["foo", "bar"],
|
||||||
|
; "oauth": {
|
||||||
|
; "consumer_keys": {
|
||||||
|
; "consumerKey1": "key1Secret",
|
||||||
|
; "consumerKey2": "key2Secret"
|
||||||
|
; },
|
||||||
|
; "tokens": {
|
||||||
|
; "token1": "token1Secret",
|
||||||
|
; "token2": "token2Secret"
|
||||||
|
; }
|
||||||
|
; }
|
||||||
|
; }
|
||||||
|
use_users_db = false
|
||||||
|
|
||||||
|
[query_servers]
|
||||||
|
javascript = /usr/local/bin/couchjs /usr/local/share/couchdb/server/main.js
|
||||||
|
coffeescript = /usr/local/bin/couchjs /usr/local/share/couchdb/server/main-coffee.js
|
||||||
|
|
||||||
|
|
||||||
|
; Changing reduce_limit to false will disable reduce_limit.
|
||||||
|
; If you think you're hitting reduce_limit with a "good" reduce function,
|
||||||
|
; please let us know on the mailing list so we can fine tune the heuristic.
|
||||||
|
[query_server_config]
|
||||||
|
reduce_limit = true
|
||||||
|
os_process_limit = 25
|
||||||
|
|
||||||
|
[daemons]
|
||||||
|
index_server={couch_index_server, start_link, []}
|
||||||
|
external_manager={couch_external_manager, start_link, []}
|
||||||
|
query_servers={couch_query_servers, start_link, []}
|
||||||
|
vhosts={couch_httpd_vhost, start_link, []}
|
||||||
|
httpd={couch_httpd, start_link, []}
|
||||||
|
stats_aggregator={couch_stats_aggregator, start, []}
|
||||||
|
stats_collector={couch_stats_collector, start, []}
|
||||||
|
uuids={couch_uuids, start, []}
|
||||||
|
auth_cache={couch_auth_cache, start_link, []}
|
||||||
|
replicator_manager={couch_replicator_manager, start_link, []}
|
||||||
|
os_daemons={couch_os_daemons, start_link, []}
|
||||||
|
compaction_daemon={couch_compaction_daemon, start_link, []}
|
||||||
|
|
||||||
|
[httpd_global_handlers]
|
||||||
|
/ = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome">>}
|
||||||
|
favicon.ico = {couch_httpd_misc_handlers, handle_favicon_req, "/usr/local/share/couchdb/www"}
|
||||||
|
|
||||||
|
_utils = {couch_httpd_misc_handlers, handle_utils_dir_req, "/usr/local/share/couchdb/www"}
|
||||||
|
_all_dbs = {couch_httpd_misc_handlers, handle_all_dbs_req}
|
||||||
|
_active_tasks = {couch_httpd_misc_handlers, handle_task_status_req}
|
||||||
|
_config = {couch_httpd_misc_handlers, handle_config_req}
|
||||||
|
_replicate = {couch_replicator_httpd, handle_req}
|
||||||
|
_uuids = {couch_httpd_misc_handlers, handle_uuids_req}
|
||||||
|
_restart = {couch_httpd_misc_handlers, handle_restart_req}
|
||||||
|
_stats = {couch_httpd_stats_handlers, handle_stats_req}
|
||||||
|
_log = {couch_httpd_misc_handlers, handle_log_req}
|
||||||
|
_session = {couch_httpd_auth, handle_session_req}
|
||||||
|
_oauth = {couch_httpd_oauth, handle_oauth_req}
|
||||||
|
_db_updates = {couch_dbupdates_httpd, handle_req}
|
||||||
|
_plugins = {couch_plugins_httpd, handle_req}
|
||||||
|
|
||||||
|
[httpd_db_handlers]
|
||||||
|
_all_docs = {couch_mrview_http, handle_all_docs_req}
|
||||||
|
_changes = {couch_httpd_db, handle_changes_req}
|
||||||
|
_compact = {couch_httpd_db, handle_compact_req}
|
||||||
|
_design = {couch_httpd_db, handle_design_req}
|
||||||
|
_temp_view = {couch_mrview_http, handle_temp_view_req}
|
||||||
|
_view_cleanup = {couch_mrview_http, handle_cleanup_req}
|
||||||
|
|
||||||
|
; The external module takes an optional argument allowing you to narrow it to a
|
||||||
|
; single script. Otherwise the script name is inferred from the first path section
|
||||||
|
; after _external's own path.
|
||||||
|
; _mypath = {couch_httpd_external, handle_external_req, <<"mykey">>}
|
||||||
|
; _external = {couch_httpd_external, handle_external_req}
|
||||||
|
|
||||||
|
[httpd_design_handlers]
|
||||||
|
_compact = {couch_mrview_http, handle_compact_req}
|
||||||
|
_info = {couch_mrview_http, handle_info_req}
|
||||||
|
_list = {couch_mrview_show, handle_view_list_req}
|
||||||
|
_rewrite = {couch_httpd_rewrite, handle_rewrite_req}
|
||||||
|
_show = {couch_mrview_show, handle_doc_show_req}
|
||||||
|
_update = {couch_mrview_show, handle_doc_update_req}
|
||||||
|
_view = {couch_mrview_http, handle_view_req}
|
||||||
|
|
||||||
|
; enable external as an httpd handler, then link it with commands here.
|
||||||
|
; note, this api is still under consideration.
|
||||||
|
; [external]
|
||||||
|
; mykey = /path/to/mycommand
|
||||||
|
|
||||||
|
; Here you can setup commands for CouchDB to manage
|
||||||
|
; while it is alive. It will attempt to keep each command
|
||||||
|
; alive if it exits.
|
||||||
|
; [os_daemons]
|
||||||
|
; some_daemon_name = /path/to/script -with args
|
||||||
|
|
||||||
|
|
||||||
|
[uuids]
|
||||||
|
; Known algorithms:
|
||||||
|
; random - 128 bits of random awesome
|
||||||
|
; All awesome, all the time.
|
||||||
|
; sequential - monotonically increasing ids with random increments
|
||||||
|
; First 26 hex characters are random. Last 6 increment in
|
||||||
|
; random amounts until an overflow occurs. On overflow, the
|
||||||
|
; random prefix is regenerated and the process starts over.
|
||||||
|
; utc_random - Time since Jan 1, 1970 UTC with microseconds
|
||||||
|
; First 14 characters are the time in hex. Last 18 are random.
|
||||||
|
; utc_id - Time since Jan 1, 1970 UTC with microseconds, plus utc_id_suffix string
|
||||||
|
; First 14 characters are the time in hex. uuids/utc_id_suffix string value is appended to these.
|
||||||
|
algorithm = sequential
|
||||||
|
; The utc_id_suffix value will be appended to uuids generated by the utc_id algorithm.
|
||||||
|
; Replicating instances should have unique utc_id_suffix values to ensure uniqueness of utc_id ids.
|
||||||
|
utc_id_suffix =
|
||||||
|
# Maximum number of UUIDs retrievable from /_uuids in a single request
|
||||||
|
max_count = 1000
|
||||||
|
|
||||||
|
[stats]
|
||||||
|
; rate is in milliseconds
|
||||||
|
rate = 1000
|
||||||
|
; sample intervals are in seconds
|
||||||
|
samples = [0, 60, 300, 900]
|
||||||
|
|
||||||
|
[attachments]
|
||||||
|
compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression
|
||||||
|
compressible_types = text/*, application/javascript, application/json, application/xml
|
||||||
|
|
||||||
|
[replicator]
|
||||||
|
db = _replicator
|
||||||
|
; Maximum replicaton retry count can be a non-negative integer or "infinity".
|
||||||
|
max_replication_retry_count = 10
|
||||||
|
; More worker processes can give higher network throughput but can also
|
||||||
|
; imply more disk and network IO.
|
||||||
|
worker_processes = 4
|
||||||
|
; With lower batch sizes checkpoints are done more frequently. Lower batch sizes
|
||||||
|
; also reduce the total amount of used RAM memory.
|
||||||
|
worker_batch_size = 500
|
||||||
|
; Maximum number of HTTP connections per replication.
|
||||||
|
http_connections = 20
|
||||||
|
; HTTP connection timeout per replication.
|
||||||
|
; Even for very fast/reliable networks it might need to be increased if a remote
|
||||||
|
; database is too busy.
|
||||||
|
connection_timeout = 30000
|
||||||
|
; If a request fails, the replicator will retry it up to N times.
|
||||||
|
retries_per_request = 10
|
||||||
|
; Some socket options that might boost performance in some scenarios:
|
||||||
|
; {nodelay, boolean()}
|
||||||
|
; {sndbuf, integer()}
|
||||||
|
; {recbuf, integer()}
|
||||||
|
; {priority, integer()}
|
||||||
|
; See the `inet` Erlang module's man page for the full list of options.
|
||||||
|
socket_options = [{keepalive, true}, {nodelay, false}]
|
||||||
|
; Path to a file containing the user's certificate.
|
||||||
|
;cert_file = /full/path/to/server_cert.pem
|
||||||
|
; Path to file containing user's private PEM encoded key.
|
||||||
|
;key_file = /full/path/to/server_key.pem
|
||||||
|
; String containing the user's password. Only used if the private keyfile is password protected.
|
||||||
|
;password = somepassword
|
||||||
|
; Set to true to validate peer certificates.
|
||||||
|
verify_ssl_certificates = false
|
||||||
|
; File containing a list of peer trusted certificates (in the PEM format).
|
||||||
|
;ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt
|
||||||
|
; Maximum peer certificate depth (must be set even if certificate validation is off).
|
||||||
|
ssl_certificate_max_depth = 3
|
||||||
|
|
||||||
|
[compaction_daemon]
|
||||||
|
; The delay, in seconds, between each check for which database and view indexes
|
||||||
|
; need to be compacted.
|
||||||
|
check_interval = 300
|
||||||
|
; If a database or view index file is smaller then this value (in bytes),
|
||||||
|
; compaction will not happen. Very small files always have a very high
|
||||||
|
; fragmentation therefore it's not worth to compact them.
|
||||||
|
min_file_size = 131072
|
||||||
|
|
||||||
|
[compactions]
|
||||||
|
; List of compaction rules for the compaction daemon.
|
||||||
|
; The daemon compacts databases and their respective view groups when all the
|
||||||
|
; condition parameters are satisfied. Configuration can be per database or
|
||||||
|
; global, and it has the following format:
|
||||||
|
;
|
||||||
|
; database_name = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ]
|
||||||
|
; _default = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ]
|
||||||
|
;
|
||||||
|
; Possible parameters:
|
||||||
|
;
|
||||||
|
; * db_fragmentation - If the ratio (as an integer percentage), of the amount
|
||||||
|
; of old data (and its supporting metadata) over the database
|
||||||
|
; file size is equal to or greater then this value, this
|
||||||
|
; database compaction condition is satisfied.
|
||||||
|
; This value is computed as:
|
||||||
|
;
|
||||||
|
; (file_size - data_size) / file_size * 100
|
||||||
|
;
|
||||||
|
; The data_size and file_size values can be obtained when
|
||||||
|
; querying a database's information URI (GET /dbname/).
|
||||||
|
;
|
||||||
|
; * view_fragmentation - If the ratio (as an integer percentage), of the amount
|
||||||
|
; of old data (and its supporting metadata) over the view
|
||||||
|
; index (view group) file size is equal to or greater then
|
||||||
|
; this value, then this view index compaction condition is
|
||||||
|
; satisfied. This value is computed as:
|
||||||
|
;
|
||||||
|
; (file_size - data_size) / file_size * 100
|
||||||
|
;
|
||||||
|
; The data_size and file_size values can be obtained when
|
||||||
|
; querying a view group's information URI
|
||||||
|
; (GET /dbname/_design/groupname/_info).
|
||||||
|
;
|
||||||
|
; * from _and_ to - The period for which a database (and its view groups) compaction
|
||||||
|
; is allowed. The value for these parameters must obey the format:
|
||||||
|
;
|
||||||
|
; HH:MM - HH:MM (HH in [0..23], MM in [0..59])
|
||||||
|
;
|
||||||
|
; * strict_window - If a compaction is still running after the end of the allowed
|
||||||
|
; period, it will be canceled if this parameter is set to 'true'.
|
||||||
|
; It defaults to 'false' and it's meaningful only if the *period*
|
||||||
|
; parameter is also specified.
|
||||||
|
;
|
||||||
|
; * parallel_view_compaction - If set to 'true', the database and its views are
|
||||||
|
; compacted in parallel. This is only useful on
|
||||||
|
; certain setups, like for example when the database
|
||||||
|
; and view index directories point to different
|
||||||
|
; disks. It defaults to 'false'.
|
||||||
|
;
|
||||||
|
; Before a compaction is triggered, an estimation of how much free disk space is
|
||||||
|
; needed is computed. This estimation corresponds to 2 times the data size of
|
||||||
|
; the database or view index. When there's not enough free disk space to compact
|
||||||
|
; a particular database or view index, a warning message is logged.
|
||||||
|
;
|
||||||
|
; Examples:
|
||||||
|
;
|
||||||
|
; 1) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}]
|
||||||
|
; The `foo` database is compacted if its fragmentation is 70% or more.
|
||||||
|
; Any view index of this database is compacted only if its fragmentation
|
||||||
|
; is 60% or more.
|
||||||
|
;
|
||||||
|
; 2) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}]
|
||||||
|
; Similar to the preceding example but a compaction (database or view index)
|
||||||
|
; is only triggered if the current time is between midnight and 4 AM.
|
||||||
|
;
|
||||||
|
; 3) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}]
|
||||||
|
; Similar to the preceding example - a compaction (database or view index)
|
||||||
|
; is only triggered if the current time is between midnight and 4 AM. If at
|
||||||
|
; 4 AM the database or one of its views is still compacting, the compaction
|
||||||
|
; process will be canceled.
|
||||||
|
;
|
||||||
|
; 4) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}, {parallel_view_compaction, true}]
|
||||||
|
; Similar to the preceding example, but a database and its views can be
|
||||||
|
; compacted in parallel.
|
||||||
|
;
|
||||||
|
;_default = [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "23:00"}, {to, "04:00"}]
|
99
couchdb/local.ini
Normal file
99
couchdb/local.ini
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
; CouchDB Configuration Settings
|
||||||
|
|
||||||
|
; Custom settings should be made in this file. They will override settings
|
||||||
|
; in default.ini, but unlike changes made to default.ini, this file won't be
|
||||||
|
; overwritten on server upgrade.
|
||||||
|
|
||||||
|
[couchdb]
|
||||||
|
;max_document_size = 4294967296 ; bytes
|
||||||
|
uuid = 1c971bb8652d1deae6c5428a0642ad97
|
||||||
|
|
||||||
|
[httpd]
|
||||||
|
;port = 5984
|
||||||
|
;bind_address = 127.0.0.1
|
||||||
|
; Options for the MochiWeb HTTP server.
|
||||||
|
;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
|
||||||
|
; For more socket options, consult Erlang's module 'inet' man page.
|
||||||
|
;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
|
||||||
|
|
||||||
|
; Uncomment next line to trigger basic-auth popup on unauthorized requests.
|
||||||
|
;WWW-Authenticate = Basic realm="administrator"
|
||||||
|
|
||||||
|
; Uncomment next line to set the configuration modification whitelist. Only
|
||||||
|
; whitelisted values may be changed via the /_config URLs. To allow the admin
|
||||||
|
; to change this value over HTTP, remember to include {httpd,config_whitelist}
|
||||||
|
; itself. Excluding it from the list would require editing this file to update
|
||||||
|
; the whitelist.
|
||||||
|
;config_whitelist = [{httpd,config_whitelist}, {log,level}, {etc,etc}]
|
||||||
|
|
||||||
|
[query_servers]
|
||||||
|
;nodejs = /usr/local/bin/couchjs-node /path/to/couchdb/share/server/main.js
|
||||||
|
|
||||||
|
|
||||||
|
[httpd_global_handlers]
|
||||||
|
;_google = {couch_httpd_proxy, handle_proxy_req, <<"http://www.google.com">>}
|
||||||
|
|
||||||
|
[couch_httpd_auth]
|
||||||
|
; If you set this to true, you should also uncomment the WWW-Authenticate line
|
||||||
|
; above. If you don't configure a WWW-Authenticate header, CouchDB will send
|
||||||
|
; Basic realm="server" in order to prevent you getting logged out.
|
||||||
|
; require_valid_user = false
|
||||||
|
|
||||||
|
[log]
|
||||||
|
;level = debug
|
||||||
|
|
||||||
|
[log_level_by_module]
|
||||||
|
; In this section you can specify any of the four log levels 'none', 'info',
|
||||||
|
; 'error' or 'debug' on a per-module basis. See src/*/*.erl for various
|
||||||
|
; modules.
|
||||||
|
;couch_httpd = error
|
||||||
|
|
||||||
|
|
||||||
|
[os_daemons]
|
||||||
|
; For any commands listed here, CouchDB will attempt to ensure that
|
||||||
|
; the process remains alive. Daemons should monitor their environment
|
||||||
|
; to know when to exit. This can most easily be accomplished by exiting
|
||||||
|
; when stdin is closed.
|
||||||
|
;foo = /path/to/command -with args
|
||||||
|
|
||||||
|
[daemons]
|
||||||
|
; enable SSL support by uncommenting the following line and supply the PEM's below.
|
||||||
|
; the default ssl port CouchDB listens on is 6984
|
||||||
|
; httpsd = {couch_httpd, start_link, [https]}
|
||||||
|
|
||||||
|
[ssl]
|
||||||
|
;cert_file = /full/path/to/server_cert.pem
|
||||||
|
;key_file = /full/path/to/server_key.pem
|
||||||
|
;password = somepassword
|
||||||
|
; set to true to validate peer certificates
|
||||||
|
verify_ssl_certificates = false
|
||||||
|
; Path to file containing PEM encoded CA certificates (trusted
|
||||||
|
; certificates used for verifying a peer certificate). May be omitted if
|
||||||
|
; you do not want to verify the peer.
|
||||||
|
;cacert_file = /full/path/to/cacertf
|
||||||
|
; The verification fun (optional) if not specified, the default
|
||||||
|
; verification fun will be used.
|
||||||
|
;verify_fun = {Module, VerifyFun}
|
||||||
|
; maximum peer certificate depth
|
||||||
|
ssl_certificate_max_depth = 1
|
||||||
|
|
||||||
|
; To enable Virtual Hosts in CouchDB, add a vhost = path directive. All requests to
|
||||||
|
; the Virual Host will be redirected to the path. In the example below all requests
|
||||||
|
; to http://example.com/ are redirected to /database.
|
||||||
|
; If you run CouchDB on a specific port, include the port number in the vhost:
|
||||||
|
; example.com:5984 = /database
|
||||||
|
[vhosts]
|
||||||
|
;example.com = /database/
|
||||||
|
|
||||||
|
[update_notification]
|
||||||
|
;unique notifier name=/full/path/to/exe -with "cmd line arg"
|
||||||
|
|
||||||
|
; To create an admin account uncomment the '[admins]' section below and add a
|
||||||
|
; line in the format 'username = password'. When you next start CouchDB, it
|
||||||
|
; will change the password to a hash (so that your passwords don't linger
|
||||||
|
; around in plain-text files). You can add more admin accounts with more
|
||||||
|
; 'username = password' lines. Don't forget to restart CouchDB after
|
||||||
|
; changing this.
|
||||||
|
[admins]
|
||||||
|
;admin = mysecretpassword
|
||||||
|
|
7
default/couchdb
Normal file
7
default/couchdb
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
# Sourced by init script for configuration.
|
||||||
|
|
||||||
|
COUCHDB_USER=couchdb
|
||||||
|
COUCHDB_STDOUT_FILE=/dev/null
|
||||||
|
COUCHDB_STDERR_FILE=/dev/null
|
||||||
|
COUCHDB_RESPAWN_TIMEOUT=5
|
||||||
|
COUCHDB_OPTIONS=
|
17
init.d/couchdb
Executable file
17
init.d/couchdb
Executable file
@ -0,0 +1,17 @@
|
|||||||
|
#!/sbin/runscript
|
||||||
|
# Copyright 1999-2013 Dirkjan Ochtman
|
||||||
|
# Distributed under the terms of the Apache License, Version 2.0
|
||||||
|
|
||||||
|
pidfile=${COUCHDB_PID_FILE}
|
||||||
|
command=${EXEC:-/usr/bin/couchdb}
|
||||||
|
command_args="-b -o ${COUCHDB_STDOUT_FILE} -e ${COUCHDB_STDERR_FILE} -p ${pidfile} ${COUCHDB_OPTIONS}"
|
||||||
|
|
||||||
|
start_stop_daemon_args="--user ${COUCHDB_USER}"
|
||||||
|
|
||||||
|
depend() {
|
||||||
|
need net
|
||||||
|
}
|
||||||
|
|
||||||
|
start_pre() {
|
||||||
|
checkpath -q -d -m 0755 -o ${COUCHDB_USER} /var/run/couchdb
|
||||||
|
}
|
9
logrotate.d/couchdb
Normal file
9
logrotate.d/couchdb
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
/var/log/couchdb/*.log {
|
||||||
|
weekly
|
||||||
|
rotate 10
|
||||||
|
copytruncate
|
||||||
|
delaycompress
|
||||||
|
compress
|
||||||
|
notifempty
|
||||||
|
missingok
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user