[ns_server:info,2016-10-19T09:55:11.542-07:00,nonode@nohost:<0.88.0>:ns_server:init_logging:151]Started & configured logging
[ns_server:info,2016-10-19T09:55:11.547-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]Static config terms:
[{error_logger_mf_dir,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"},
{path_config_bindir,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/bin"},
{path_config_etcdir,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/etc/couchbase"},
{path_config_libdir,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib"},
{path_config_datadir,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase"},
{path_config_tmpdir,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/tmp"},
{path_config_secdir,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/etc/security"},
{nodefile,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/couchbase-server.node"},
{loglevel_default,debug},
{loglevel_couchdb,info},
{loglevel_ns_server,debug},
{loglevel_error_logger,debug},
{loglevel_user,debug},
{loglevel_menelaus,debug},
{loglevel_ns_doctor,debug},
{loglevel_stats,debug},
{loglevel_rebalance,debug},
{loglevel_cluster,debug},
{loglevel_views,debug},
{loglevel_mapreduce_errors,debug},
{loglevel_xdcr,debug},
{loglevel_xdcr_trace,error},
{loglevel_access,info},
{disk_sink_opts,
[{rotation,
[{compress,true},
{size,41943040},
{num_files,10},
{buffer_size_max,52428800}]}]},
{disk_sink_opts_xdcr_trace,
[{rotation,[{compress,false},{size,83886080},{num_files,5}]}]},
{net_kernel_verbosity,10}]
[ns_server:warn,2016-10-19T09:55:11.547-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter error_logger_mf_dir, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.547-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter path_config_bindir, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.547-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter path_config_etcdir, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.548-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter path_config_libdir, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.548-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter path_config_datadir, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.548-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter path_config_tmpdir, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.548-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter path_config_secdir, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.548-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter nodefile, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.548-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_default, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.548-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_couchdb, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.548-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_ns_server, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.548-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_error_logger, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.548-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_user, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.548-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_menelaus, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.548-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_ns_doctor, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.548-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_stats, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.548-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_rebalance, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.548-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_cluster, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.549-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_views, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.549-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_mapreduce_errors, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.549-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_xdcr, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.549-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_xdcr_trace, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.549-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_access, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.549-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter disk_sink_opts, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.549-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter disk_sink_opts_xdcr_trace, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.549-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter net_kernel_verbosity, which is given from command line
[ns_server:warn,2016-10-19T09:55:11.550-07:00,nonode@nohost:<0.88.0>:ns_server:start:79]Could not lock myself into a memory: {error,enotsup}. Ignoring.
[error_logger:info,2016-10-19T09:55:11.552-07:00,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.129.0>},
{name,local_tasks},
{mfargs,{local_tasks,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:11.555-07:00,nonode@nohost:ns_server_cluster_sup<0.128.0>:log_os_info:start_link:25]OS type: {unix,darwin} Version: {14,3,0}
Runtime info: [{otp_release,"R16B03-1"},
{erl_version,"5.10.4.0.0.1"},
{erl_version_long,
"Erlang R16B03-1 (erts-5.10.4.0.0.1) [source-00852da] [64-bit] [smp:8:8] [async-threads:16] [kernel-poll:true]\n"},
{system_arch_raw,"x86_64-apple-darwin13.4.0"},
{system_arch,"x86_64-apple-darwin13.4.0"},
{localtime,{{2016,10,19},{9,55,11}}},
{memory,
[{total,25896680},
{processes,10199792},
{processes_used,10198312},
{system,15696888},
{atom,331249},
{atom_used,309719},
{binary,57536},
{code,7684198},
{ets,2452584}]},
{loaded,
[ns_info,log_os_info,local_tasks,restartable,
ns_server_cluster_sup,mlockall,calendar,
ale_default_formatter,'ale_logger-metakv',
'ale_logger-rebalance','ale_logger-xdcr_trace',
'ale_logger-menelaus','ale_logger-stats',
'ale_logger-access','ale_logger-ns_server',
'ale_logger-user','ale_logger-ns_doctor',
'ale_logger-cluster','ale_logger-xdcr',otp_internal,
ale_stderr_sink,ns_log_sink,filelib,ale_disk_sink,misc,
couch_util,io_lib_fread,ns_server,cpu_sup,memsup,disksup,
os_mon,io,release_handler,overload,alarm_handler,sasl,
timer,tftp_sup,httpd_sup,httpc_handler_sup,httpc_cookie,
inets_trace,httpc_manager,httpc,httpc_profile_sup,
httpc_sup,ftp_sup,inets_sup,inets_app,ssl,lhttpc_manager,
lhttpc_sup,lhttpc,tls_connection_sup,ssl_session_cache,
ssl_pkix_db,ssl_manager,ssl_sup,ssl_app,crypto_server,
crypto_sup,crypto_app,ale_error_logger_handler,
'ale_logger-ale_logger','ale_logger-error_logger',
beam_opcodes,beam_dict,beam_asm,beam_validator,beam_z,
beam_flatten,beam_trim,beam_receive,beam_bsm,beam_peep,
beam_dead,beam_split,beam_type,beam_bool,beam_except,
beam_clean,beam_utils,beam_block,beam_jump,beam_a,
v3_codegen,v3_life,v3_kernel,sys_core_dsetel,erl_bifs,
sys_core_fold,cerl_trees,sys_core_inline,core_lib,cerl,
v3_core,erl_bits,erl_expand_records,sys_pre_expand,sofs,
erl_internal,sets,ordsets,erl_lint,compile,
dynamic_compile,ale_utils,io_lib_pretty,io_lib_format,
io_lib,ale_codegen,dict,ale,ale_dynamic_sup,ale_sup,
ale_app,epp,ns_bootstrap,child_erlang,file_io_server,
orddict,erl_eval,file,c,kernel_config,user_sup,
supervisor_bridge,standard_error,code_server,unicode,
hipe_unified_loader,gb_sets,ets,binary,code,file_server,
net_kernel,global_group,erl_distribution,filename,os,
inet_parse,inet,inet_udp,inet_config,inet_db,global,
gb_trees,rpc,supervisor,kernel,application_master,sys,
application,gen_server,erl_parse,proplists,erl_scan,lists,
application_controller,proc_lib,gen,gen_event,
error_logger,heart,error_handler,erts_internal,erlang,
erl_prim_loader,prim_zip,zlib,prim_file,prim_inet,
prim_eval,init,otp_ring0]},
{applications,
[{lhttpc,"Lightweight HTTP Client","1.3.0"},
{os_mon,"CPO CXC 138 46","2.2.14"},
{public_key,"Public key infrastructure","0.21"},
{asn1,"The Erlang ASN1 compiler version 2.0.4","2.0.4"},
{kernel,"ERTS CXC 138 10","2.16.4"},
{ale,"Another Logger for Erlang","4.6.0-3391-enterprise"},
{inets,"INETS CXC 138 49","5.9.8"},
{ns_server,"Couchbase server","4.6.0-3391-enterprise"},
{crypto,"CRYPTO version 2","3.2"},
{ssl,"Erlang/OTP SSL application","5.3.3"},
{sasl,"SASL CXC 138 11","2.3.4"},
{stdlib,"ERTS CXC 138 10","1.19.4"}]},
{pre_loaded,
[erts_internal,erlang,erl_prim_loader,prim_zip,zlib,
prim_file,prim_inet,prim_eval,init,otp_ring0]},
{process_count,94},
{node,nonode@nohost},
{nodes,[]},
{registered,
[lhttpc_manager,standard_error_sup,release_handler,
code_server,httpd_sup,ale_dynamic_sup,'sink-disk_metakv',
overload,application_controller,'sink-disk_access_int',
alarm_handler,'sink-disk_access',kernel_safe_sup,
'sink-xdcr_trace',standard_error,'sink-disk_reports',
error_logger,'sink-disk_stats',timer_server,
'sink-disk_xdcr_errors',crypto_server,sasl_safe_sup,
crypto_sup,'sink-disk_xdcr','sink-disk_debug',tftp_sup,
os_mon_sup,'sink-disk_error',tls_connection_sup,cpu_sup,
ssl_sup,memsup,'sink-disk_default',init,disksup,inet_db,
httpc_sup,rex,ssl_manager,kernel_sup,httpc_profile_sup,
global_name_server,httpc_manager,ns_server_cluster_sup,
httpc_handler_sup,file_server_2,os_cmd_port_creator,
global_group,ftp_sup,sasl_sup,'sink-stderr',
ale_stats_events,ale,erl_prim_loader,inets_sup,
'sink-ns_log',local_tasks,lhttpc_sup,ale_sup]},
{cookie,nocookie},
{wordsize,8},
{wall_clock,0}]
[ns_server:info,2016-10-19T09:55:11.561-07:00,nonode@nohost:ns_server_cluster_sup<0.128.0>:log_os_info:start_link:27]Manifest:
["","",
" ",
" ",
" ",
" ",
" ",
" "," "," ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" "," ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
""]
[error_logger:info,2016-10-19T09:55:11.563-07:00,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.130.0>},
{name,timeout_diag_logger},
{mfargs,{timeout_diag_logger,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:11.564-07:00,nonode@nohost:dist_manager<0.131.0>:dist_manager:read_address_config_from_path:86]Reading ip config from "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/ip_start"
[ns_server:info,2016-10-19T09:55:11.564-07:00,nonode@nohost:dist_manager<0.131.0>:dist_manager:read_address_config_from_path:86]Reading ip config from "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/ip"
[ns_server:info,2016-10-19T09:55:11.564-07:00,nonode@nohost:dist_manager<0.131.0>:dist_manager:init:163]ip config not found. Looks like we're brand new node
[error_logger:info,2016-10-19T09:55:11.566-07:00,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,inet_gethost_native_sup}
started: [{pid,<0.133.0>},{mfa,{inet_gethost_native,init,[[]]}}]
[error_logger:info,2016-10-19T09:55:11.566-07:00,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,kernel_safe_sup}
started: [{pid,<0.132.0>},
{name,inet_gethost_native_sup},
{mfargs,{inet_gethost_native,start_link,[]}},
{restart_type,temporary},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:11.574-07:00,nonode@nohost:dist_manager<0.131.0>:dist_manager:bringup:214]Attempting to bring up net_kernel with name 'ns_1@127.0.0.1'
[error_logger:info,2016-10-19T09:55:11.577-07:00,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,net_sup}
started: [{pid,<0.135.0>},
{name,erl_epmd},
{mfargs,{erl_epmd,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:11.577-07:00,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,net_sup}
started: [{pid,<0.136.0>},
{name,auth},
{mfargs,{auth,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:11.578-07:00,ns_1@127.0.0.1:dist_manager<0.131.0>:dist_manager:configure_net_kernel:255]Set net_kernel vebosity to 10 -> 0
[error_logger:info,2016-10-19T09:55:11.578-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,net_sup}
started: [{pid,<0.137.0>},
{name,net_kernel},
{mfargs,
{net_kernel,start_link,
[['ns_1@127.0.0.1',longnames]]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:11.578-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,kernel_sup}
started: [{pid,<0.134.0>},
{name,net_sup_dynamic},
{mfargs,
{erl_distribution,start_link,
[['ns_1@127.0.0.1',longnames]]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,supervisor}]
[ns_server:info,2016-10-19T09:55:11.579-07:00,ns_1@127.0.0.1:dist_manager<0.131.0>:dist_manager:save_node:147]saving node to "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/couchbase-server.node"
[ns_server:debug,2016-10-19T09:55:11.592-07:00,ns_1@127.0.0.1:dist_manager<0.131.0>:dist_manager:bringup:228]Attempted to save node name to disk: ok
[ns_server:debug,2016-10-19T09:55:11.593-07:00,ns_1@127.0.0.1:dist_manager<0.131.0>:dist_manager:wait_for_node:235]Waiting for connection to node 'babysitter_of_ns_1@127.0.0.1' to be established
[error_logger:info,2016-10-19T09:55:11.593-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================INFO REPORT=========================
{net_kernel,{connect,normal,'babysitter_of_ns_1@127.0.0.1'}}
[ns_server:debug,2016-10-19T09:55:11.598-07:00,ns_1@127.0.0.1:dist_manager<0.131.0>:dist_manager:wait_for_node:244]Observed node 'babysitter_of_ns_1@127.0.0.1' to come up
[error_logger:info,2016-10-19T09:55:11.601-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.131.0>},
{name,dist_manager},
{mfargs,{dist_manager,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:11.602-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.142.0>},
{name,ns_cookie_manager},
{mfargs,{ns_cookie_manager,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:11.603-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.143.0>},
{name,ns_cluster},
{mfargs,{ns_cluster,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:11.603-07:00,ns_1@127.0.0.1:ns_config_sup<0.144.0>:ns_config_sup:init:32]loading static ns_config from "/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/etc/couchbase/config"
[error_logger:info,2016-10-19T09:55:11.604-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.145.0>},
{name,ns_config_events},
{mfargs,
{gen_event,start_link,[{local,ns_config_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:11.604-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.146.0>},
{name,ns_config_events_local},
{mfargs,
{gen_event,start_link,
[{local,ns_config_events_local}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:11.623-07:00,ns_1@127.0.0.1:ns_config<0.147.0>:ns_config:load_config:1070]Loading static config from "/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/etc/couchbase/config"
[ns_server:info,2016-10-19T09:55:11.624-07:00,ns_1@127.0.0.1:ns_config<0.147.0>:ns_config:load_config:1084]Loading dynamic config from "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/config.dat"
[ns_server:debug,2016-10-19T09:55:11.629-07:00,ns_1@127.0.0.1:ns_config<0.147.0>:ns_config:load_config:1092]Here's full dynamic config we loaded:
[[{buckets,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{18,63644101958}}]},
{configs,
[{"test",
[{repl_type,dcp},
{uuid,<<"7f7d4a28ca84a805edf9c899521eb18c">>},
{num_replicas,1},
{replica_index,false},
{ram_quota,524288000},
{auth_type,sasl},
{sasl_password,"*****"},
{autocompaction,false},
{purge_interval,undefined},
{flush_enabled,false},
{num_threads,3},
{eviction_policy,value_only},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map,
[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]]},
{map_opts_hash,133465355}]},
{"locked",
[{eviction_policy,value_only},
{num_threads,3},
{flush_enabled,false},
{purge_interval,undefined},
{autocompaction,false},
{sasl_password,"*****"},
{auth_type,sasl},
{ram_quota,104857600},
{num_replicas,1},
{repl_type,dcp},
{uuid,<<"8515ae93e826e7c4389f3fd25fbb263e">>},
{replica_index,false},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map,
[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]]},
{map_opts_hash,133465355}]},
{"default",
[{repl_type,dcp},
{uuid,<<"b04d5897bd3c5329a82156f1b77c395d">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,104857600},
{auth_type,sasl},
{flush_enabled,false},
{num_threads,3},
{eviction_policy,value_only},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map,
[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]]},
{map_opts_hash,133465355}]}]}]},
{vbucket_map_history,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012772}}]},
{[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]],
[{replication_topology,star},{tags,undefined},{max_slaves,10}]}]},
{alert_limits,
[{max_overhead_perc,50},{max_disk_used,90},{max_indexer_ram,75}]},
{audit,
[{auditd_enabled,false},
{rotate_interval,86400},
{rotate_size,20971520},
{disabled,[]},
{sync,[]},
{log_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"}]},
{auto_failover_cfg,[{enabled,false},{timeout,120},{max_nodes,1},{count,0}]},
{autocompaction,
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]},
{cert_and_pkey,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
{<<"-----BEGIN CERTIFICATE-----\nMIIDAjCCAeqgAwIBAgIIFH6f01mhINIwDQYJKoZIhvcNAQELBQAwJDEiMCAGA1UE\nAxMZQ291Y2hiYXNlIFNlcnZlciAxMzBiNDVmMzAeFw0xMzAxMDEwMDAwMDBaFw00\nOTEyMzEyMzU5NTlaMCQxIjAgBgNVBAMTGUNvdWNoYmFzZSBTZXJ2ZXIgMTMwYjQ1\nZjMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDINaiYz/zhTeD2h8Pa\nP015LetKMhey1yoE2L5H1wHK7pADeFRfqeoNunEvlxRWL/YjnqvRZPxrjdadlh7L\nVhZVke2blopHdhJjaHCvdI8R3BRBK4fLv5m4c0SzdE6bvk1QS+T3rZyzxUbMtB0g\nEq2ZPed8JdQFqO0Bo1JuXJx4/q9tjhvbHUVjRX9QHL3nClC3qVemVjTCKbNqZWv8\n5qZmH/X5DWkyNFKj6HzE20qFWYa8d9tmdeo9zaGVMzCFCOXKPGeHkW/GpJWxK3FM\n/BWdgq5nonb+y3ufSE1JBJjXCO6JipXf4OKRB54009m9hAmJJK9sPVeH9NMnVhS7\naEDXAgMBAAGjODA2MA4GA1UdDwEB/wQEAwICpDATBgNVHSUEDDAKBggrBgEFBQcD\nATAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCAdca3XDbl7heN\n6vk0VTcrrZCLHDY+PLTFcnGq2xv84APOrvwQJEH9qnCw0/czkn9UW+27Ix2wjkeP\nFbUdXKvFDpU0LQgpkdZ+BKXQlX0ezKG+StpUODxYdDnUDCLzRLJsg0GgEODysPAK\nwHiA3X5d+UvNE/Z7TP5ASyzXnypuR8jhXCdEQ0o8mLQMx4I4Xd2sHFz2x6qO9i8f\nMPEJ076QTj5+RyI4BDAgUeWns/ZTKX/bi+FXPkRZ8QWkxIrSkNSdmgvPmMBzFluv\nDhFwtFBMQovmICfkT5TYmtwYsqZgh32v5FZOLUlHOR29R1dKOOuyCbIyqlTjWCYZ\n1j3GlmIC\n-----END CERTIFICATE-----\n">>,
<<"*****">>}]},
{cluster_compat_version,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{6,63644012660}}]},
4,6]},
{drop_request_memory_threshold_mib,undefined},
{email_alerts,
[{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,
[{user,[]},{pass,"*****"},{host,"localhost"},{port,25},{encrypt,false}]},
{alerts,
[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,auto_failover_cluster_too_small,
auto_failover_disabled,ip,disk,overhead,ep_oom_errors,
ep_item_commit_failed,audit_dropped_events,indexer_ram_max_usage]}]},
{fts_memory_quota,512},
{goxdcr_upgrade,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}|
'_deleted']},
{index_aware_rebalance_disabled,false},
{max_bucket_count,10},
{memcached,[]},
{memory_quota,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012751}}]}|
1024]},
{nodes_wanted,['ns_1@127.0.0.1']},
{otp,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]},
{cookie,oxqibayfkfbrogxo}]},
{read_only_user_creds,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}|
null]},
{remote_clusters,[]},
{replication,[{enabled,true}]},
{rest,[{port,8091}]},
{rest_creds,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012784}}]}|
{"Administrator",{password,"*****"}}]},
{roles_definitions,
[{admin,[],
[{name,<<"Admin">>},
{desc,<<"Can manage ALL cluster features including security.">>}],
[{[],all}]},
{ro_admin,[],
[{name,<<"Read Only Admin">>},
{desc,<<"Can view ALL cluster features.">>}],
[{[{bucket,any},password],none},
{[{bucket,any},data],none},
{[admin,security],[read]},
{[admin],none},
{[],[read]}]},
{cluster_admin,[],
[{name,<<"Cluster Admin">>},
{desc,<<"Can manage all cluster features EXCEPT security.">>}],
[{[admin],none},{[],all}]},
{bucket_admin,
[bucket_name],
[{name,<<"Bucket Admin">>},
{desc,
<<"Can manage ALL bucket features for specified buckets (incl. start/stop XDCR)">>}],
[{[{bucket,bucket_name},xdcr],[read,execute]},
{[{bucket,bucket_name}],all},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],none},
{[admin],none},
{[],[read]}]},
{bucket_sasl,
[bucket_name],
[],
[{[{bucket,bucket_name},data],all},
{[{bucket,bucket_name},views],all},
{[{bucket,bucket_name}],[read,flush]},
{[pools],[read]}]},
{views_admin,
[bucket_name],
[{name,<<"Views Admin">>},
{desc,<<"Can manage views for specified buckets">>}],
[{[{bucket,bucket_name},views],all},
{[{bucket,bucket_name},data],[read]},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],none},
{[admin],none},
{[],[read]}]},
{replication_admin,[],
[{name,<<"Replication Admin">>},
{desc,<<"Can manage ONLY XDCR features (cluster AND bucket level)">>}],
[{[{bucket,any},xdcr],all},
{[{bucket,any},data],[read]},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],all},
{[admin],none},
{[],[read]}]}]},
{server_groups,
[[{uuid,<<"0">>},{name,<<"Group 1">>},{nodes,['ns_1@127.0.0.1']}]]},
{set_view_update_daemon,
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]},
{settings,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012777}}]},
{stats,[{send_stats,false}]}]},
{uuid,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012784}}]}|
<<"561b20f339d4184270a7de0b1c1de1b0">>]},
{{couchdb,max_parallel_indexers},4},
{{couchdb,max_parallel_replica_indexers},2},
{{local_changes_count,<<"eac84bf2ecf69c83ca0268ac5aac465d">>},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{35,63644101958}}]}]},
{{metakv,<<"/indexing/settings/config">>},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{4,63644012784}}]}|
<<"{\"indexer.settings.compaction.days_of_week\":\"Sunday,Monday,Tuesday,Wednesday,Thursday,Friday,Saturday\",\"indexer.settings.compaction.interval\":\"00:00,00:00\",\"indexer.settings.compaction.compaction_mode\":\"circular\",\"indexer.settings.persisted_snapshot.interval\":5000,\"indexer.settings.log_level\":\"info\",\"indexer.settings.compaction.min_frag\":30,\"indexer.settings.inmemory_snapshot.interval\":200,\"indexer.settings.max_cpu_percent\":0,\"indexer.settings.storage_mode\":\"forestdb\",\"indexer.settings.recovery.max_rollbacks\":5,\"indexer.settings.memory_quota\":536870912,\"indexer.settings.compaction.abort_exceed_interval\":false}">>]},
{{request_limit,capi},undefined},
{{request_limit,rest},undefined},
{{service_map,fts},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}]},
{{service_map,index},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012791}}]},
'ns_1@127.0.0.1']},
{{service_map,n1ql},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012791}}]},
'ns_1@127.0.0.1']},
{{node,'ns_1@127.0.0.1',audit},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}]},
{{node,'ns_1@127.0.0.1',capi_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
8092]},
{{node,'ns_1@127.0.0.1',compaction_daemon},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{check_interval,30},
{min_db_file_size,131072},
{min_view_file_size,20971520}]},
{{node,'ns_1@127.0.0.1',config_version},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
{4,5}]},
{{node,'ns_1@127.0.0.1',fts_http_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
8094]},
{{node,'ns_1@127.0.0.1',indexer_admin_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9100]},
{{node,'ns_1@127.0.0.1',indexer_http_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9102]},
{{node,'ns_1@127.0.0.1',indexer_scan_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9101]},
{{node,'ns_1@127.0.0.1',indexer_stcatchup_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9104]},
{{node,'ns_1@127.0.0.1',indexer_stinit_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9103]},
{{node,'ns_1@127.0.0.1',indexer_stmaint_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9105]},
{{node,'ns_1@127.0.0.1',is_enterprise},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
true]},
{{node,'ns_1@127.0.0.1',isasl},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/isasl.pw"}]},
{{node,'ns_1@127.0.0.1',ldap_enabled},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
false]},
{{node,'ns_1@127.0.0.1',membership},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
active]},
{{node,'ns_1@127.0.0.1',memcached},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,11210},
{dedicated_port,11209},
{ssl_port,11207},
{admin_user,"_admin"},
{admin_pass,"*****"},
{engines,
[{membase,
[{engine,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/ep.so"},
{static_config_string,"failpartialwarmup=false"}]},
{memcached,
[{engine,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{config_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached.json"},
{audit_file,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/audit.json"},
{log_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003}]},
{{node,'ns_1@127.0.0.1',memcached_config},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
{[{interfaces,
{memcached_config_mgr,omit_missing_mcd_ports,
[{[{host,<<"*">>},{port,port},{maxconn,maxconn}]},
{[{host,<<"*">>},
{port,dedicated_port},
{maxconn,dedicated_port_maxconn}]},
{[{host,<<"*">>},
{port,ssl_port},
{maxconn,maxconn},
{ssl,
{[{key,
<<"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached-key.pem">>},
{cert,
<<"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached-cert.pem">>}]}}]}]}},
{ssl_cipher_list,{"~s",[ssl_cipher_list]}},
{ssl_minimum_protocol,{memcached_config_mgr,ssl_minimum_protocol,[]}},
{connection_idle_time,connection_idle_time},
{breakpad,
{[{enabled,breakpad_enabled},
{minidump_dir,{memcached_config_mgr,get_minidump_dir,[]}}]}},
{extensions,
[{[{module,
<<"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/stdin_term_handler.so">>},
{config,<<>>}]},
{[{module,
<<"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/file_logger.so">>},
{config,
{"cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]}}]}]},
{admin,{"~s",[admin_user]}},
{verbosity,verbosity},
{audit_file,{"~s",[audit_file]}},
{dedupe_nmvb_maps,dedupe_nmvb_maps}]}]},
{{node,'ns_1@127.0.0.1',memcached_defaults},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{maxconn,30000},
{dedicated_port_maxconn,5000},
{ssl_cipher_list,"HIGH"},
{connection_idle_time,0},
{verbosity,0},
{breakpad_enabled,true},
{breakpad_minidump_dir_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/crash"},
{dedupe_nmvb_maps,false}]},
{{node,'ns_1@127.0.0.1',moxi},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,11211},
{verbosity,[]}]},
{{node,'ns_1@127.0.0.1',ns_log},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{filename,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/ns_log"}]},
{{node,'ns_1@127.0.0.1',port_servers},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}]},
{{node,'ns_1@127.0.0.1',projector_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9999]},
{{node,'ns_1@127.0.0.1',query_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
8093]},
{{node,'ns_1@127.0.0.1',rest},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,8091},
{port_meta,global}]},
{{node,'ns_1@127.0.0.1',services},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012751}}]},
index,kv,n1ql]},
{{node,'ns_1@127.0.0.1',ssl_capi_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
18092]},
{{node,'ns_1@127.0.0.1',ssl_proxy_downstream_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
11214]},
{{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
11215]},
{{node,'ns_1@127.0.0.1',ssl_query_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
18093]},
{{node,'ns_1@127.0.0.1',ssl_rest_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
18091]},
{{node,'ns_1@127.0.0.1',stop_xdcr},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012660}}]}|
'_deleted']},
{{node,'ns_1@127.0.0.1',uuid},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
<<"eac84bf2ecf69c83ca0268ac5aac465d">>]},
{{node,'ns_1@127.0.0.1',xdcr_rest_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9998]}]]
[ns_server:info,2016-10-19T09:55:11.634-07:00,ns_1@127.0.0.1:ns_config<0.147.0>:ns_config:load_config:1113]Here's full dynamic config we loaded + static & default config:
[{{node,'ns_1@127.0.0.1',xdcr_rest_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9998]},
{{node,'ns_1@127.0.0.1',uuid},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
<<"eac84bf2ecf69c83ca0268ac5aac465d">>]},
{{node,'ns_1@127.0.0.1',stop_xdcr},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012660}}]}|
'_deleted']},
{{node,'ns_1@127.0.0.1',ssl_rest_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
18091]},
{{node,'ns_1@127.0.0.1',ssl_query_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
18093]},
{{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
11215]},
{{node,'ns_1@127.0.0.1',ssl_proxy_downstream_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
11214]},
{{node,'ns_1@127.0.0.1',ssl_capi_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
18092]},
{{node,'ns_1@127.0.0.1',services},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012751}}]},
index,kv,n1ql]},
{{node,'ns_1@127.0.0.1',rest},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,8091},
{port_meta,global}]},
{{node,'ns_1@127.0.0.1',query_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
8093]},
{{node,'ns_1@127.0.0.1',projector_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9999]},
{{node,'ns_1@127.0.0.1',port_servers},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}]},
{{node,'ns_1@127.0.0.1',ns_log},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{filename,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/ns_log"}]},
{{node,'ns_1@127.0.0.1',moxi},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,11211},
{verbosity,[]}]},
{{node,'ns_1@127.0.0.1',memcached_defaults},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{maxconn,30000},
{dedicated_port_maxconn,5000},
{ssl_cipher_list,"HIGH"},
{connection_idle_time,0},
{verbosity,0},
{breakpad_enabled,true},
{breakpad_minidump_dir_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/crash"},
{dedupe_nmvb_maps,false}]},
{{node,'ns_1@127.0.0.1',memcached_config},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
{[{interfaces,
{memcached_config_mgr,omit_missing_mcd_ports,
[{[{host,<<"*">>},{port,port},{maxconn,maxconn}]},
{[{host,<<"*">>},
{port,dedicated_port},
{maxconn,dedicated_port_maxconn}]},
{[{host,<<"*">>},
{port,ssl_port},
{maxconn,maxconn},
{ssl,
{[{key,
<<"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached-key.pem">>},
{cert,
<<"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached-cert.pem">>}]}}]}]}},
{ssl_cipher_list,{"~s",[ssl_cipher_list]}},
{ssl_minimum_protocol,{memcached_config_mgr,ssl_minimum_protocol,[]}},
{connection_idle_time,connection_idle_time},
{breakpad,
{[{enabled,breakpad_enabled},
{minidump_dir,{memcached_config_mgr,get_minidump_dir,[]}}]}},
{extensions,
[{[{module,
<<"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/stdin_term_handler.so">>},
{config,<<>>}]},
{[{module,
<<"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/file_logger.so">>},
{config,
{"cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]}}]}]},
{admin,{"~s",[admin_user]}},
{verbosity,verbosity},
{audit_file,{"~s",[audit_file]}},
{dedupe_nmvb_maps,dedupe_nmvb_maps}]}]},
{{node,'ns_1@127.0.0.1',memcached},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,11210},
{dedicated_port,11209},
{ssl_port,11207},
{admin_user,"_admin"},
{admin_pass,"*****"},
{engines,
[{membase,
[{engine,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/ep.so"},
{static_config_string,"failpartialwarmup=false"}]},
{memcached,
[{engine,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{config_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached.json"},
{audit_file,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/audit.json"},
{log_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003}]},
{{node,'ns_1@127.0.0.1',membership},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
active]},
{{node,'ns_1@127.0.0.1',ldap_enabled},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
false]},
{{node,'ns_1@127.0.0.1',isasl},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/isasl.pw"}]},
{{node,'ns_1@127.0.0.1',is_enterprise},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
true]},
{{node,'ns_1@127.0.0.1',indexer_stmaint_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9105]},
{{node,'ns_1@127.0.0.1',indexer_stinit_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9103]},
{{node,'ns_1@127.0.0.1',indexer_stcatchup_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9104]},
{{node,'ns_1@127.0.0.1',indexer_scan_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9101]},
{{node,'ns_1@127.0.0.1',indexer_http_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9102]},
{{node,'ns_1@127.0.0.1',indexer_admin_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9100]},
{{node,'ns_1@127.0.0.1',fts_http_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
8094]},
{{node,'ns_1@127.0.0.1',config_version},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
{4,5}]},
{{node,'ns_1@127.0.0.1',compaction_daemon},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{check_interval,30},
{min_db_file_size,131072},
{min_view_file_size,20971520}]},
{{node,'ns_1@127.0.0.1',capi_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
8092]},
{{node,'ns_1@127.0.0.1',audit},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}]},
{{service_map,n1ql},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012791}}]},
'ns_1@127.0.0.1']},
{{service_map,index},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012791}}]},
'ns_1@127.0.0.1']},
{{service_map,fts},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}]},
{{request_limit,rest},undefined},
{{request_limit,capi},undefined},
{{metakv,<<"/indexing/settings/config">>},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{4,63644012784}}]}|
<<"{\"indexer.settings.compaction.days_of_week\":\"Sunday,Monday,Tuesday,Wednesday,Thursday,Friday,Saturday\",\"indexer.settings.compaction.interval\":\"00:00,00:00\",\"indexer.settings.compaction.compaction_mode\":\"circular\",\"indexer.settings.persisted_snapshot.interval\":5000,\"indexer.settings.log_level\":\"info\",\"indexer.settings.compaction.min_frag\":30,\"indexer.settings.inmemory_snapshot.interval\":200,\"indexer.settings.max_cpu_percent\":0,\"indexer.settings.storage_mode\":\"forestdb\",\"indexer.settings.recovery.max_rollbacks\":5,\"indexer.settings.memory_quota\":536870912,\"indexer.settings.compaction.abort_exceed_interval\":false}">>]},
{{local_changes_count,<<"eac84bf2ecf69c83ca0268ac5aac465d">>},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{35,63644101958}}]}]},
{{couchdb,max_parallel_replica_indexers},2},
{{couchdb,max_parallel_indexers},4},
{uuid,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012784}}]}|
<<"561b20f339d4184270a7de0b1c1de1b0">>]},
{settings,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012777}}]},
{stats,[{send_stats,false}]}]},
{set_view_update_daemon,
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]},
{server_groups,
[[{uuid,<<"0">>},{name,<<"Group 1">>},{nodes,['ns_1@127.0.0.1']}]]},
{roles_definitions,
[{admin,[],
[{name,<<"Admin">>},
{desc,<<"Can manage ALL cluster features including security.">>}],
[{[],all}]},
{ro_admin,[],
[{name,<<"Read Only Admin">>},{desc,<<"Can view ALL cluster features.">>}],
[{[{bucket,any},password],none},
{[{bucket,any},data],none},
{[admin,security],[read]},
{[admin],none},
{[],[read]}]},
{cluster_admin,[],
[{name,<<"Cluster Admin">>},
{desc,<<"Can manage all cluster features EXCEPT security.">>}],
[{[admin],none},{[],all}]},
{bucket_admin,
[bucket_name],
[{name,<<"Bucket Admin">>},
{desc,
<<"Can manage ALL bucket features for specified buckets (incl. start/stop XDCR)">>}],
[{[{bucket,bucket_name},xdcr],[read,execute]},
{[{bucket,bucket_name}],all},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],none},
{[admin],none},
{[],[read]}]},
{bucket_sasl,
[bucket_name],
[],
[{[{bucket,bucket_name},data],all},
{[{bucket,bucket_name},views],all},
{[{bucket,bucket_name}],[read,flush]},
{[pools],[read]}]},
{views_admin,
[bucket_name],
[{name,<<"Views Admin">>},
{desc,<<"Can manage views for specified buckets">>}],
[{[{bucket,bucket_name},views],all},
{[{bucket,bucket_name},data],[read]},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],none},
{[admin],none},
{[],[read]}]},
{replication_admin,[],
[{name,<<"Replication Admin">>},
{desc,<<"Can manage ONLY XDCR features (cluster AND bucket level)">>}],
[{[{bucket,any},xdcr],all},
{[{bucket,any},data],[read]},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],all},
{[admin],none},
{[],[read]}]}]},
{rest_creds,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012784}}]}|
{"Administrator",{password,"*****"}}]},
{rest,[{port,8091}]},
{replication,[{enabled,true}]},
{remote_clusters,[]},
{read_only_user_creds,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}|
null]},
{otp,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]},
{cookie,oxqibayfkfbrogxo}]},
{nodes_wanted,['ns_1@127.0.0.1']},
{memory_quota,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012751}}]}|
1024]},
{memcached,[]},
{max_bucket_count,10},
{index_aware_rebalance_disabled,false},
{goxdcr_upgrade,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}|
'_deleted']},
{fts_memory_quota,512},
{email_alerts,
[{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,
[{user,[]},{pass,"*****"},{host,"localhost"},{port,25},{encrypt,false}]},
{alerts,
[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,auto_failover_cluster_too_small,
auto_failover_disabled,ip,disk,overhead,ep_oom_errors,
ep_item_commit_failed,audit_dropped_events,indexer_ram_max_usage]}]},
{drop_request_memory_threshold_mib,undefined},
{cluster_compat_version,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{6,63644012660}}]},
4,6]},
{cert_and_pkey,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
{<<"-----BEGIN CERTIFICATE-----\nMIIDAjCCAeqgAwIBAgIIFH6f01mhINIwDQYJKoZIhvcNAQELBQAwJDEiMCAGA1UE\nAxMZQ291Y2hiYXNlIFNlcnZlciAxMzBiNDVmMzAeFw0xMzAxMDEwMDAwMDBaFw00\nOTEyMzEyMzU5NTlaMCQxIjAgBgNVBAMTGUNvdWNoYmFzZSBTZXJ2ZXIgMTMwYjQ1\nZjMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDINaiYz/zhTeD2h8Pa\nP015LetKMhey1yoE2L5H1wHK7pADeFRfqeoNunEvlxRWL/YjnqvRZPxrjdadlh7L\nVhZVke2blopHdhJjaHCvdI8R3BRBK4fLv5m4c0SzdE6bvk1QS+T3rZyzxUbMtB0g\nEq2ZPed8JdQFqO0Bo1JuXJx4/q9tjhvbHUVjRX9QHL3nClC3qVemVjTCKbNqZWv8\n5qZmH/X5DWkyNFKj6HzE20qFWYa8d9tmdeo9zaGVMzCFCOXKPGeHkW/GpJWxK3FM\n/BWdgq5nonb+y3ufSE1JBJjXCO6JipXf4OKRB54009m9hAmJJK9sPVeH9NMnVhS7\naEDXAgMBAAGjODA2MA4GA1UdDwEB/wQEAwICpDATBgNVHSUEDDAKBggrBgEFBQcD\nATAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCAdca3XDbl7heN\n6vk0VTcrrZCLHDY+PLTFcnGq2xv84APOrvwQJEH9qnCw0/czkn9UW+27Ix2wjkeP\nFbUdXKvFDpU0LQgpkdZ+BKXQlX0ezKG+StpUODxYdDnUDCLzRLJsg0GgEODysPAK\nwHiA3X5d+UvNE/Z7TP5ASyzXnypuR8jhXCdEQ0o8mLQMx4I4Xd2sHFz2x6qO9i8f\nMPEJ076QTj5+RyI4BDAgUeWns/ZTKX/bi+FXPkRZ8QWkxIrSkNSdmgvPmMBzFluv\nDhFwtFBMQovmICfkT5TYmtwYsqZgh32v5FZOLUlHOR29R1dKOOuyCbIyqlTjWCYZ\n1j3GlmIC\n-----END CERTIFICATE-----\n">>,
<<"*****">>}]},
{autocompaction,
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]},
{auto_failover_cfg,[{enabled,false},{timeout,120},{max_nodes,1},{count,0}]},
{audit,
[{auditd_enabled,false},
{rotate_interval,86400},
{rotate_size,20971520},
{disabled,[]},
{sync,[]},
{log_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"}]},
{alert_limits,
[{max_overhead_perc,50},{max_disk_used,90},{max_indexer_ram,75}]},
{vbucket_map_history,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012772}}]},
{[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]],
[{replication_topology,star},{tags,undefined},{max_slaves,10}]}]},
{buckets,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{18,63644101958}}]},
{configs,
[{"test",
[{repl_type,dcp},
{uuid,<<"7f7d4a28ca84a805edf9c899521eb18c">>},
{num_replicas,1},
{replica_index,false},
{ram_quota,524288000},
{auth_type,sasl},
{sasl_password,"*****"},
{autocompaction,false},
{purge_interval,undefined},
{flush_enabled,false},
{num_threads,3},
{eviction_policy,value_only},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map,
[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]]},
{map_opts_hash,133465355}]},
{"locked",
[{eviction_policy,value_only},
{num_threads,3},
{flush_enabled,false},
{purge_interval,undefined},
{autocompaction,false},
{sasl_password,"*****"},
{auth_type,sasl},
{ram_quota,104857600},
{num_replicas,1},
{repl_type,dcp},
{uuid,<<"8515ae93e826e7c4389f3fd25fbb263e">>},
{replica_index,false},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map,
[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]]},
{map_opts_hash,133465355}]},
{"default",
[{repl_type,dcp},
{uuid,<<"b04d5897bd3c5329a82156f1b77c395d">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,104857600},
{auth_type,sasl},
{flush_enabled,false},
{num_threads,3},
{eviction_policy,value_only},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map,
[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]]},
{map_opts_hash,133465355}]}]}]}]
[error_logger:info,2016-10-19T09:55:11.637-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.147.0>},
{name,ns_config},
{mfargs,
{ns_config,start_link,
["/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/etc/couchbase/config",
ns_config_default]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:11.638-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.150.0>},
{name,ns_config_remote},
{mfargs,
{ns_config_replica,start_link,
[{local,ns_config_remote}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:11.639-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.151.0>},
{name,ns_config_log},
{mfargs,{ns_config_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:11.639-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.144.0>},
{name,ns_config_sup},
{mfargs,{ns_config_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:11.640-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.153.0>},
{name,vbucket_filter_changes_registry},
{mfargs,
{ns_process_registry,start_link,
[vbucket_filter_changes_registry,
[{terminate_command,shutdown}]]}},
{restart_type,permanent},
{shutdown,100},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:11.642-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.154.0>},
{name,json_rpc_connection_sup},
{mfargs,{json_rpc_connection_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:11.647-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_nodes_sup}
started: [{pid,<0.157.0>},
{name,remote_monitors},
{mfargs,{remote_monitors,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:11.648-07:00,ns_1@127.0.0.1:menelaus_barrier<0.158.0>:one_shot_barrier:barrier_body:58]Barrier menelaus_barrier has started
[error_logger:info,2016-10-19T09:55:11.648-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_nodes_sup}
started: [{pid,<0.158.0>},
{name,menelaus_barrier},
{mfargs,{menelaus_sup,barrier_start_link,[]}},
{restart_type,temporary},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:11.648-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_nodes_sup}
started: [{pid,<0.159.0>},
{name,rest_lhttpc_pool},
{mfargs,
{lhttpc_manager,start_link,
[[{name,rest_lhttpc_pool},
{connection_timeout,120000},
{pool_size,20}]]}},
{restart_type,{permanent,1}},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:11.655-07:00,ns_1@127.0.0.1:ns_ssl_services_setup<0.161.0>:ns_ssl_services_setup:init:370]Used ssl options:
[{keyfile,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/ssl-cert-key.pem"},
{certfile,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/ssl-cert-key.pem"},
{versions,[tlsv1,'tlsv1.1','tlsv1.2']},
{cacertfile,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/ssl-cert-key.pem-ca"},
{dh,<<48,130,1,8,2,130,1,1,0,152,202,99,248,92,201,35,238,246,5,77,93,120,10,
118,129,36,52,111,193,167,220,49,229,106,105,152,133,121,157,73,158,
232,153,197,197,21,171,140,30,207,52,165,45,8,221,162,21,199,183,66,
211,247,51,224,102,214,190,130,96,253,218,193,35,43,139,145,89,200,250,
145,92,50,80,134,135,188,205,254,148,122,136,237,220,186,147,187,104,
159,36,147,217,117,74,35,163,145,249,175,242,18,221,124,54,140,16,246,
169,84,252,45,47,99,136,30,60,189,203,61,86,225,117,255,4,91,46,110,
167,173,106,51,65,10,248,94,225,223,73,40,232,140,26,11,67,170,118,190,
67,31,127,233,39,68,88,132,171,224,62,187,207,160,189,209,101,74,8,205,
174,146,173,80,105,144,246,25,153,86,36,24,178,163,64,202,221,95,184,
110,244,32,226,217,34,55,188,230,55,16,216,247,173,246,139,76,187,66,
211,159,17,46,20,18,48,80,27,250,96,189,29,214,234,241,34,69,254,147,
103,220,133,40,164,84,8,44,241,61,164,151,9,135,41,60,75,4,202,133,173,
72,6,69,167,89,112,174,40,229,171,2,1,2>>},
{ciphers,[{dhe_rsa,aes_256_cbc,sha256},
{dhe_dss,aes_256_cbc,sha256},
{rsa,aes_256_cbc,sha256},
{dhe_rsa,aes_128_cbc,sha256},
{dhe_dss,aes_128_cbc,sha256},
{rsa,aes_128_cbc,sha256},
{dhe_rsa,aes_256_cbc,sha},
{dhe_dss,aes_256_cbc,sha},
{rsa,aes_256_cbc,sha},
{dhe_rsa,'3des_ede_cbc',sha},
{dhe_dss,'3des_ede_cbc',sha},
{rsa,'3des_ede_cbc',sha},
{dhe_rsa,aes_128_cbc,sha},
{dhe_dss,aes_128_cbc,sha},
{rsa,aes_128_cbc,sha}]}]
[error_logger:info,2016-10-19T09:55:11.705-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_ssl_services_sup}
started: [{pid,<0.161.0>},
{name,ns_ssl_services_setup},
{mfargs,{ns_ssl_services_setup,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:11.721-07:00,ns_1@127.0.0.1:<0.163.0>:menelaus_pluggable_ui:validate_plugin_spec:117]Loaded pluggable UI specification for fts
[ns_server:info,2016-10-19T09:55:11.721-07:00,ns_1@127.0.0.1:<0.163.0>:menelaus_pluggable_ui:validate_plugin_spec:117]Loaded pluggable UI specification for n1ql
[ns_server:debug,2016-10-19T09:55:11.728-07:00,ns_1@127.0.0.1:<0.163.0>:restartable:start_child:98]Started child process <0.165.0>
MFA: {ns_ssl_services_setup,start_link_rest_service,[]}
[error_logger:info,2016-10-19T09:55:11.728-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_ssl_services_sup}
started: [{pid,<0.163.0>},
{name,ns_rest_ssl_service},
{mfargs,
{restartable,start_link,
[{ns_ssl_services_setup,
start_link_rest_service,[]},
1000]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:11.729-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_nodes_sup}
started: [{pid,<0.160.0>},
{name,ns_ssl_services_sup},
{mfargs,{ns_ssl_services_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:11.735-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_nodes_sup}
started: [{pid,<0.182.0>},
{name,start_couchdb_node},
{mfargs,{ns_server_nodes_sup,start_couchdb_node,[]}},
{restart_type,{permanent,5}},
{shutdown,86400000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:11.735-07:00,ns_1@127.0.0.1:wait_link_to_couchdb_node<0.183.0>:ns_server_nodes_sup:do_wait_link_to_couchdb_node:126]Waiting for ns_couchdb node to start
[error_logger:info,2016-10-19T09:55:11.735-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================INFO REPORT=========================
{net_kernel,{connect,normal,'couchdb_ns_1@127.0.0.1'}}
[ns_server:debug,2016-10-19T09:55:11.735-07:00,ns_1@127.0.0.1:<0.184.0>:ns_server_nodes_sup:do_wait_link_to_couchdb_node:140]ns_couchdb is not ready: {badrpc,nodedown}
[error_logger:info,2016-10-19T09:55:11.735-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================INFO REPORT=========================
{net_kernel,{'EXIT',<0.186.0>,shutdown}}
[error_logger:info,2016-10-19T09:55:11.735-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================INFO REPORT=========================
{net_kernel,{net_kernel,875,nodedown,'couchdb_ns_1@127.0.0.1'}}
[error_logger:info,2016-10-19T09:55:11.936-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================INFO REPORT=========================
{net_kernel,{connect,normal,'couchdb_ns_1@127.0.0.1'}}
[ns_server:debug,2016-10-19T09:55:11.943-07:00,ns_1@127.0.0.1:<0.184.0>:ns_server_nodes_sup:do_wait_link_to_couchdb_node:140]ns_couchdb is not ready: false
[ns_server:debug,2016-10-19T09:55:12.147-07:00,ns_1@127.0.0.1:<0.184.0>:ns_server_nodes_sup:do_wait_link_to_couchdb_node:140]ns_couchdb is not ready: false
[ns_server:debug,2016-10-19T09:55:12.350-07:00,ns_1@127.0.0.1:<0.184.0>:ns_server_nodes_sup:do_wait_link_to_couchdb_node:140]ns_couchdb is not ready: false
[ns_server:debug,2016-10-19T09:55:12.551-07:00,ns_1@127.0.0.1:<0.184.0>:ns_server_nodes_sup:do_wait_link_to_couchdb_node:140]ns_couchdb is not ready: false
[ns_server:debug,2016-10-19T09:55:12.755-07:00,ns_1@127.0.0.1:<0.184.0>:ns_server_nodes_sup:do_wait_link_to_couchdb_node:140]ns_couchdb is not ready: false
[error_logger:info,2016-10-19T09:55:13.129-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_nodes_sup}
started: [{pid,<0.183.0>},
{name,wait_for_couchdb_node},
{mfargs,
{erlang,apply,
[#Fun,[]]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:13.133-07:00,ns_1@127.0.0.1:ns_server_nodes_sup<0.156.0>:ns_storage_conf:setup_db_and_ix_paths:53]Initialize db_and_ix_paths variable with [{db_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/data"},
{index_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/data"}]
[error_logger:info,2016-10-19T09:55:13.136-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.198.0>},
{name,diag_handler_worker},
{mfargs,{work_queue,start_link,[diag_handler_worker]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:13.137-07:00,ns_1@127.0.0.1:ns_server_sup<0.197.0>:dir_size:start_link:39]Starting quick version of dir_size with program name: godu
[error_logger:info,2016-10-19T09:55:13.138-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.199.0>},
{name,dir_size},
{mfargs,{dir_size,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:13.139-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.200.0>},
{name,request_throttler},
{mfargs,{request_throttler,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:13.142-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,kernel_safe_sup}
started: [{pid,<0.202.0>},
{name,timer2_server},
{mfargs,{timer2,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:13.143-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.201.0>},
{name,ns_log},
{mfargs,{ns_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:13.143-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.203.0>},
{name,ns_crash_log_consumer},
{mfargs,{ns_log,start_link_crash_consumer,[]}},
{restart_type,{permanent,4}},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:13.145-07:00,ns_1@127.0.0.1:ns_config_isasl_sync<0.204.0>:ns_config_isasl_sync:init:63]isasl_sync init: ["/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/isasl.pw",
"_admin"]
[ns_server:debug,2016-10-19T09:55:13.145-07:00,ns_1@127.0.0.1:ns_config_isasl_sync<0.204.0>:ns_config_isasl_sync:init:71]isasl_sync init buckets: ["default","locked","test"]
[ns_server:debug,2016-10-19T09:55:13.145-07:00,ns_1@127.0.0.1:ns_config_isasl_sync<0.204.0>:ns_config_isasl_sync:writeSASLConf:143]Writing isasl passwd file: "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/isasl.pw"
[ns_server:warn,2016-10-19T09:55:13.149-07:00,ns_1@127.0.0.1:ns_config_isasl_sync<0.204.0>:ns_memcached:connect:1307]Unable to connect: {error,{badmatch,{error,econnrefused}}}, retrying.
[error_logger:info,2016-10-19T09:55:14.150-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.204.0>},
{name,ns_config_isasl_sync},
{mfargs,{ns_config_isasl_sync,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.150-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.207.0>},
{name,ns_log_events},
{mfargs,{gen_event,start_link,[{local,ns_log_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.154-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.209.0>},
{name,ns_node_disco_events},
{mfargs,
{gen_event,start_link,
[{local,ns_node_disco_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.154-07:00,ns_1@127.0.0.1:ns_node_disco<0.210.0>:ns_node_disco:init:138]Initting ns_node_disco with []
[ns_server:debug,2016-10-19T09:55:14.155-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[user:info,2016-10-19T09:55:14.155-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_sync:130]Node 'ns_1@127.0.0.1' synchronized otp cookie oxqibayfkfbrogxo from cluster
[ns_server:debug,2016-10-19T09:55:14.155-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2016-10-19T09:55:14.167-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[ns_server:debug,2016-10-19T09:55:14.167-07:00,ns_1@127.0.0.1:<0.211.0>:ns_node_disco:do_nodes_wanted_updated_fun:224]ns_node_disco: nodes_wanted updated: ['ns_1@127.0.0.1'], with cookie: oxqibayfkfbrogxo
[ns_server:debug,2016-10-19T09:55:14.169-07:00,ns_1@127.0.0.1:<0.211.0>:ns_node_disco:do_nodes_wanted_updated_fun:230]ns_node_disco: nodes_wanted pong: ['ns_1@127.0.0.1'], with cookie: oxqibayfkfbrogxo
[error_logger:info,2016-10-19T09:55:14.169-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.210.0>},
{name,ns_node_disco},
{mfargs,{ns_node_disco,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.170-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.212.0>},
{name,ns_node_disco_log},
{mfargs,{ns_node_disco_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.172-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.213.0>},
{name,ns_node_disco_conf_events},
{mfargs,{ns_node_disco_conf_events,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.173-07:00,ns_1@127.0.0.1:ns_config_rep<0.215.0>:ns_config_rep:init:68]init pulling
[ns_server:debug,2016-10-19T09:55:14.173-07:00,ns_1@127.0.0.1:ns_config_rep<0.215.0>:ns_config_rep:init:70]init pushing
[error_logger:info,2016-10-19T09:55:14.173-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.214.0>},
{name,ns_config_rep_merger},
{mfargs,{ns_config_rep,start_link_merger,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.174-07:00,ns_1@127.0.0.1:ns_config_rep<0.215.0>:ns_config_rep:init:74]init reannouncing
[ns_server:debug,2016-10-19T09:55:14.174-07:00,ns_1@127.0.0.1:ns_config_events<0.145.0>:ns_node_disco_conf_events:handle_event:44]ns_node_disco_conf_events config on nodes_wanted
[ns_server:debug,2016-10-19T09:55:14.175-07:00,ns_1@127.0.0.1:ns_config_events<0.145.0>:ns_node_disco_conf_events:handle_event:50]ns_node_disco_conf_events config on otp
[ns_server:debug,2016-10-19T09:55:14.175-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[ns_server:debug,2016-10-19T09:55:14.175-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2016-10-19T09:55:14.175-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
alert_limits ->
[{max_overhead_perc,50},{max_disk_used,90},{max_indexer_ram,75}]
[ns_server:debug,2016-10-19T09:55:14.175-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
audit ->
[{auditd_enabled,false},
{rotate_interval,86400},
{rotate_size,20971520},
{disabled,[]},
{sync,[]},
{log_path,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"}]
[ns_server:debug,2016-10-19T09:55:14.175-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
auto_failover_cfg ->
[{enabled,false},{timeout,120},{max_nodes,1},{count,0}]
[ns_server:debug,2016-10-19T09:55:14.175-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
autocompaction ->
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[error_logger:info,2016-10-19T09:55:14.176-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.215.0>},
{name,ns_config_rep},
{mfargs,{ns_config_rep,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.176-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.208.0>},
{name,ns_node_disco_sup},
{mfargs,{ns_node_disco_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:14.176-07:00,ns_1@127.0.0.1:ns_config_rep<0.215.0>:ns_config_rep:do_push_keys:321]Replicating some config keys ([alert_limits,audit,auto_failover_cfg,
autocompaction,buckets,cert_and_pkey,
cluster_compat_version,
drop_request_memory_threshold_mib,email_alerts,
fts_memory_quota,goxdcr_upgrade,
index_aware_rebalance_disabled,
max_bucket_count,memcached,memory_quota,
nodes_wanted,otp,read_only_user_creds,
remote_clusters,replication,rest,rest_creds,
roles_definitions,server_groups,
set_view_update_daemon,settings,uuid,
vbucket_map_history,
{couchdb,max_parallel_indexers},
{couchdb,max_parallel_replica_indexers},
{local_changes_count,
<<"eac84bf2ecf69c83ca0268ac5aac465d">>},
{metakv,<<"/indexing/settings/config">>},
{request_limit,capi},
{request_limit,rest},
{service_map,fts},
{service_map,index},
{service_map,n1ql},
{node,'ns_1@127.0.0.1',audit},
{node,'ns_1@127.0.0.1',capi_port},
{node,'ns_1@127.0.0.1',compaction_daemon},
{node,'ns_1@127.0.0.1',config_version},
{node,'ns_1@127.0.0.1',fts_http_port},
{node,'ns_1@127.0.0.1',indexer_admin_port},
{node,'ns_1@127.0.0.1',indexer_http_port},
{node,'ns_1@127.0.0.1',indexer_scan_port},
{node,'ns_1@127.0.0.1',indexer_stcatchup_port},
{node,'ns_1@127.0.0.1',indexer_stinit_port},
{node,'ns_1@127.0.0.1',indexer_stmaint_port},
{node,'ns_1@127.0.0.1',is_enterprise},
{node,'ns_1@127.0.0.1',isasl},
{node,'ns_1@127.0.0.1',ldap_enabled},
{node,'ns_1@127.0.0.1',membership},
{node,'ns_1@127.0.0.1',memcached},
{node,'ns_1@127.0.0.1',memcached_config},
{node,'ns_1@127.0.0.1',memcached_defaults},
{node,'ns_1@127.0.0.1',moxi},
{node,'ns_1@127.0.0.1',ns_log},
{node,'ns_1@127.0.0.1',port_servers},
{node,'ns_1@127.0.0.1',projector_port},
{node,'ns_1@127.0.0.1',query_port},
{node,'ns_1@127.0.0.1',rest},
{node,'ns_1@127.0.0.1',services},
{node,'ns_1@127.0.0.1',ssl_capi_port},
{node,'ns_1@127.0.0.1',
ssl_proxy_downstream_port}]..)
[ns_server:debug,2016-10-19T09:55:14.177-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
buckets ->
[[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{18,63644101958}}],
{configs,[[{map,[{0,[],['ns_1@127.0.0.1',undefined]},
{1,[],['ns_1@127.0.0.1',undefined]},
{2,[],['ns_1@127.0.0.1',undefined]},
{3,[],['ns_1@127.0.0.1',undefined]},
{4,[],['ns_1@127.0.0.1',undefined]},
{5,[],['ns_1@127.0.0.1',undefined]},
{6,[],['ns_1@127.0.0.1',undefined]},
{7,[],['ns_1@127.0.0.1',undefined]},
{8,[],['ns_1@127.0.0.1',undefined]},
{9,[],['ns_1@127.0.0.1',undefined]},
{10,[],['ns_1@127.0.0.1',undefined]},
{11,[],['ns_1@127.0.0.1',undefined]},
{12,[],['ns_1@127.0.0.1',undefined]},
{13,[],['ns_1@127.0.0.1',undefined]},
{14,[],['ns_1@127.0.0.1',undefined]},
{15,[],['ns_1@127.0.0.1',undefined]},
{16,[],['ns_1@127.0.0.1',undefined]},
{17,[],['ns_1@127.0.0.1',undefined]},
{18,[],['ns_1@127.0.0.1',undefined]},
{19,[],['ns_1@127.0.0.1',undefined]},
{20,[],['ns_1@127.0.0.1',undefined]},
{21,[],['ns_1@127.0.0.1',undefined]},
{22,[],['ns_1@127.0.0.1',undefined]},
{23,[],['ns_1@127.0.0.1',undefined]},
{24,[],['ns_1@127.0.0.1',undefined]},
{25,[],['ns_1@127.0.0.1',undefined]},
{26,[],['ns_1@127.0.0.1',undefined]},
{27,[],['ns_1@127.0.0.1',undefined]},
{28,[],['ns_1@127.0.0.1',undefined]},
{29,[],['ns_1@127.0.0.1',undefined]},
{30,[],['ns_1@127.0.0.1',undefined]},
{31,[],['ns_1@127.0.0.1',undefined]},
{32,[],['ns_1@127.0.0.1',undefined]},
{33,[],['ns_1@127.0.0.1',undefined]},
{34,[],['ns_1@127.0.0.1',undefined]},
{35,[],['ns_1@127.0.0.1',undefined]},
{36,[],['ns_1@127.0.0.1',undefined]},
{37,[],['ns_1@127.0.0.1',undefined]},
{38,[],['ns_1@127.0.0.1',undefined]},
{39,[],['ns_1@127.0.0.1',undefined]},
{40,[],['ns_1@127.0.0.1',undefined]},
{41,[],['ns_1@127.0.0.1',undefined]},
{42,[],['ns_1@127.0.0.1',undefined]},
{43,[],['ns_1@127.0.0.1',undefined]},
{44,[],['ns_1@127.0.0.1',undefined]},
{45,[],['ns_1@127.0.0.1',undefined]},
{46,[],['ns_1@127.0.0.1',undefined]},
{47,[],['ns_1@127.0.0.1',undefined]},
{48,[],['ns_1@127.0.0.1',undefined]},
{49,[],['ns_1@127.0.0.1',undefined]},
{50,[],['ns_1@127.0.0.1',undefined]},
{51,[],['ns_1@127.0.0.1',undefined]},
{52,[],['ns_1@127.0.0.1',undefined]},
{53,[],['ns_1@127.0.0.1',undefined]},
{54,[],['ns_1@127.0.0.1',undefined]},
{55,[],['ns_1@127.0.0.1',undefined]},
{56,[],['ns_1@127.0.0.1',undefined]},
{57,[],['ns_1@127.0.0.1',undefined]},
{58,[],['ns_1@127.0.0.1',undefined]},
{59,[],['ns_1@127.0.0.1',undefined]},
{60,[],['ns_1@127.0.0.1',undefined]},
{61,[],['ns_1@127.0.0.1',undefined]},
{62,[],['ns_1@127.0.0.1',undefined]},
{63,[],['ns_1@127.0.0.1',undefined]}]},
{fastForwardMap,[]},
{repl_type,dcp},
{uuid,<<"b04d5897bd3c5329a82156f1b77c395d">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,104857600},
{auth_type,sasl},
{flush_enabled,false},
{num_threads,3},
{eviction_policy,value_only},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map_opts_hash,133465355}],
[{map,[{0,[],['ns_1@127.0.0.1',undefined]},
{1,[],['ns_1@127.0.0.1',undefined]},
{2,[],['ns_1@127.0.0.1',undefined]},
{3,[],['ns_1@127.0.0.1',undefined]},
{4,[],['ns_1@127.0.0.1',undefined]},
{5,[],['ns_1@127.0.0.1',undefined]},
{6,[],['ns_1@127.0.0.1',undefined]},
{7,[],['ns_1@127.0.0.1',undefined]},
{8,[],['ns_1@127.0.0.1',undefined]},
{9,[],['ns_1@127.0.0.1',undefined]},
{10,[],['ns_1@127.0.0.1',undefined]},
{11,[],['ns_1@127.0.0.1',undefined]},
{12,[],['ns_1@127.0.0.1',undefined]},
{13,[],['ns_1@127.0.0.1',undefined]},
{14,[],['ns_1@127.0.0.1',undefined]},
{15,[],['ns_1@127.0.0.1',undefined]},
{16,[],['ns_1@127.0.0.1',undefined]},
{17,[],['ns_1@127.0.0.1',undefined]},
{18,[],['ns_1@127.0.0.1',undefined]},
{19,[],['ns_1@127.0.0.1',undefined]},
{20,[],['ns_1@127.0.0.1',undefined]},
{21,[],['ns_1@127.0.0.1',undefined]},
{22,[],['ns_1@127.0.0.1',undefined]},
{23,[],['ns_1@127.0.0.1',undefined]},
{24,[],['ns_1@127.0.0.1',undefined]},
{25,[],['ns_1@127.0.0.1',undefined]},
{26,[],['ns_1@127.0.0.1',undefined]},
{27,[],['ns_1@127.0.0.1',undefined]},
{28,[],['ns_1@127.0.0.1',undefined]},
{29,[],['ns_1@127.0.0.1',undefined]},
{30,[],['ns_1@127.0.0.1',undefined]},
{31,[],['ns_1@127.0.0.1',undefined]},
{32,[],['ns_1@127.0.0.1',undefined]},
{33,[],['ns_1@127.0.0.1',undefined]},
{34,[],['ns_1@127.0.0.1',undefined]},
{35,[],['ns_1@127.0.0.1',undefined]},
{36,[],['ns_1@127.0.0.1',undefined]},
{37,[],['ns_1@127.0.0.1',undefined]},
{38,[],['ns_1@127.0.0.1',undefined]},
{39,[],['ns_1@127.0.0.1',undefined]},
{40,[],['ns_1@127.0.0.1',undefined]},
{41,[],['ns_1@127.0.0.1',undefined]},
{42,[],['ns_1@127.0.0.1',undefined]},
{43,[],['ns_1@127.0.0.1',undefined]},
{44,[],['ns_1@127.0.0.1',undefined]},
{45,[],['ns_1@127.0.0.1',undefined]},
{46,[],['ns_1@127.0.0.1',undefined]},
{47,[],['ns_1@127.0.0.1',undefined]},
{48,[],['ns_1@127.0.0.1',undefined]},
{49,[],['ns_1@127.0.0.1',undefined]},
{50,[],['ns_1@127.0.0.1',undefined]},
{51,[],['ns_1@127.0.0.1',undefined]},
{52,[],['ns_1@127.0.0.1',undefined]},
{53,[],['ns_1@127.0.0.1',undefined]},
{54,[],['ns_1@127.0.0.1',undefined]},
{55,[],['ns_1@127.0.0.1',undefined]},
{56,[],['ns_1@127.0.0.1',undefined]},
{57,[],['ns_1@127.0.0.1',undefined]},
{58,[],['ns_1@127.0.0.1',undefined]},
{59,[],['ns_1@127.0.0.1',undefined]},
{60,[],['ns_1@127.0.0.1',undefined]},
{61,[],['ns_1@127.0.0.1',undefined]},
{62,[],['ns_1@127.0.0.1',undefined]},
{63,[],['ns_1@127.0.0.1',undefined]}]},
{fastForwardMap,[]},
{eviction_policy,value_only},
{num_threads,3},
{flush_enabled,false},
{purge_interval,undefined},
{autocompaction,false},
{sasl_password,"*****"},
{auth_type,sasl},
{ram_quota,104857600},
{num_replicas,1},
{repl_type,dcp},
{uuid,<<"8515ae93e826e7c4389f3fd25fbb263e">>},
{replica_index,false},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map_opts_hash,133465355}],
[{map,[{0,[],['ns_1@127.0.0.1',undefined]},
{1,[],['ns_1@127.0.0.1',undefined]},
{2,[],['ns_1@127.0.0.1',undefined]},
{3,[],['ns_1@127.0.0.1',undefined]},
{4,[],['ns_1@127.0.0.1',undefined]},
{5,[],['ns_1@127.0.0.1',undefined]},
{6,[],['ns_1@127.0.0.1',undefined]},
{7,[],['ns_1@127.0.0.1',undefined]},
{8,[],['ns_1@127.0.0.1',undefined]},
{9,[],['ns_1@127.0.0.1',undefined]},
{10,[],['ns_1@127.0.0.1',undefined]},
{11,[],['ns_1@127.0.0.1',undefined]},
{12,[],['ns_1@127.0.0.1',undefined]},
{13,[],['ns_1@127.0.0.1',undefined]},
{14,[],['ns_1@127.0.0.1',undefined]},
{15,[],['ns_1@127.0.0.1',undefined]},
{16,[],['ns_1@127.0.0.1',undefined]},
{17,[],['ns_1@127.0.0.1',undefined]},
{18,[],['ns_1@127.0.0.1',undefined]},
{19,[],['ns_1@127.0.0.1',undefined]},
{20,[],['ns_1@127.0.0.1',undefined]},
{21,[],['ns_1@127.0.0.1',undefined]},
{22,[],['ns_1@127.0.0.1',undefined]},
{23,[],['ns_1@127.0.0.1',undefined]},
{24,[],['ns_1@127.0.0.1',undefined]},
{25,[],['ns_1@127.0.0.1',undefined]},
{26,[],['ns_1@127.0.0.1',undefined]},
{27,[],['ns_1@127.0.0.1',undefined]},
{28,[],['ns_1@127.0.0.1',undefined]},
{29,[],['ns_1@127.0.0.1',undefined]},
{30,[],['ns_1@127.0.0.1',undefined]},
{31,[],['ns_1@127.0.0.1',undefined]},
{32,[],['ns_1@127.0.0.1',undefined]},
{33,[],['ns_1@127.0.0.1',undefined]},
{34,[],['ns_1@127.0.0.1',undefined]},
{35,[],['ns_1@127.0.0.1',undefined]},
{36,[],['ns_1@127.0.0.1',undefined]},
{37,[],['ns_1@127.0.0.1',undefined]},
{38,[],['ns_1@127.0.0.1',undefined]},
{39,[],['ns_1@127.0.0.1',undefined]},
{40,[],['ns_1@127.0.0.1',undefined]},
{41,[],['ns_1@127.0.0.1',undefined]},
{42,[],['ns_1@127.0.0.1',undefined]},
{43,[],['ns_1@127.0.0.1',undefined]},
{44,[],['ns_1@127.0.0.1',undefined]},
{45,[],['ns_1@127.0.0.1',undefined]},
{46,[],['ns_1@127.0.0.1',undefined]},
{47,[],['ns_1@127.0.0.1',undefined]},
{48,[],['ns_1@127.0.0.1',undefined]},
{49,[],['ns_1@127.0.0.1',undefined]},
{50,[],['ns_1@127.0.0.1',undefined]},
{51,[],['ns_1@127.0.0.1',undefined]},
{52,[],['ns_1@127.0.0.1',undefined]},
{53,[],['ns_1@127.0.0.1',undefined]},
{54,[],['ns_1@127.0.0.1',undefined]},
{55,[],['ns_1@127.0.0.1',undefined]},
{56,[],['ns_1@127.0.0.1',undefined]},
{57,[],['ns_1@127.0.0.1',undefined]},
{58,[],['ns_1@127.0.0.1',undefined]},
{59,[],['ns_1@127.0.0.1',undefined]},
{60,[],['ns_1@127.0.0.1',undefined]},
{61,[],['ns_1@127.0.0.1',undefined]},
{62,[],['ns_1@127.0.0.1',undefined]},
{63,[],['ns_1@127.0.0.1',undefined]}]},
{fastForwardMap,[]},
{repl_type,dcp},
{uuid,<<"7f7d4a28ca84a805edf9c899521eb18c">>},
{num_replicas,1},
{replica_index,false},
{ram_quota,524288000},
{auth_type,sasl},
{sasl_password,"*****"},
{autocompaction,false},
{purge_interval,undefined},
{flush_enabled,false},
{num_threads,3},
{eviction_policy,value_only},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map_opts_hash,133465355}]]}]
[ns_server:debug,2016-10-19T09:55:14.178-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
cert_and_pkey ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
{<<"-----BEGIN CERTIFICATE-----\nMIIDAjCCAeqgAwIBAgIIFH6f01mhINIwDQYJKoZIhvcNAQELBQAwJDEiMCAGA1UE\nAxMZQ291Y2hiYXNlIFNlcnZlciAxMzBiNDVmMzAeFw0xMzAxMDEwMDAwMDBaFw00\nOTEyMzEyMzU5NTlaMCQxIjAgBgNVBAMTGUNvdWNoYmFzZSBTZXJ2ZXIgMTMwYjQ1\nZjMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDINaiYz/zhTeD2h8Pa\nP015LetKMhey1yoE2L5H1wHK7pADeFRfqeoNunEvlxRWL/YjnqvRZPxrjdadlh7L\nVhZVke2blopHdhJjaHCvdI8R3BRBK4f"...>>,
<<"*****">>}]
[ns_server:debug,2016-10-19T09:55:14.178-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
cluster_compat_version ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{6,63644012660}}]},4,6]
[ns_server:debug,2016-10-19T09:55:14.178-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
drop_request_memory_threshold_mib ->
undefined
[error_logger:info,2016-10-19T09:55:14.178-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.223.0>},
{name,vbucket_map_mirror},
{mfargs,{vbucket_map_mirror,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.178-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
email_alerts ->
[{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,[{user,[]},
{pass,"*****"},
{host,"localhost"},
{port,25},
{encrypt,false}]},
{alerts,[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,auto_failover_cluster_too_small,
auto_failover_disabled,ip,disk,overhead,ep_oom_errors,
ep_item_commit_failed,audit_dropped_events,indexer_ram_max_usage]}]
[ns_server:debug,2016-10-19T09:55:14.178-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
fts_memory_quota ->
512
[ns_server:debug,2016-10-19T09:55:14.178-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
goxdcr_upgrade ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}|
'_deleted']
[ns_server:debug,2016-10-19T09:55:14.178-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
index_aware_rebalance_disabled ->
false
[ns_server:debug,2016-10-19T09:55:14.179-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
max_bucket_count ->
10
[ns_server:debug,2016-10-19T09:55:14.179-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
memcached ->
[]
[ns_server:debug,2016-10-19T09:55:14.179-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
memory_quota ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012751}}]}|1024]
[ns_server:debug,2016-10-19T09:55:14.179-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
nodes_wanted ->
['ns_1@127.0.0.1']
[ns_server:debug,2016-10-19T09:55:14.179-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
otp ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]},
{cookie,oxqibayfkfbrogxo}]
[ns_server:debug,2016-10-19T09:55:14.179-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
read_only_user_creds ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}|null]
[ns_server:debug,2016-10-19T09:55:14.179-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
remote_clusters ->
[]
[ns_server:debug,2016-10-19T09:55:14.179-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
replication ->
[{enabled,true}]
[ns_server:debug,2016-10-19T09:55:14.179-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
rest ->
[{port,8091}]
[ns_server:debug,2016-10-19T09:55:14.179-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
rest_creds ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012784}}]}|
{"Administrator",{password,"*****"}}]
[ns_server:debug,2016-10-19T09:55:14.179-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
roles_definitions ->
[{admin,[],
[{name,<<"Admin">>},
{desc,<<"Can manage ALL cluster features including security.">>}],
[{[],all}]},
{ro_admin,[],
[{name,<<"Read Only Admin">>},
{desc,<<"Can view ALL cluster features.">>}],
[{[{bucket,any},password],none},
{[{bucket,any},data],none},
{[admin,security],[read]},
{[admin],none},
{[],[read]}]},
{cluster_admin,[],
[{name,<<"Cluster Admin">>},
{desc,<<"Can manage all cluster features EXCEPT security.">>}],
[{[admin],none},{[],all}]},
{bucket_admin,[bucket_name],
[{name,<<"Bucket Admin">>},
{desc,<<"Can manage ALL bucket features for specified buckets (incl. start/stop XDCR)">>}],
[{[{bucket,bucket_name},xdcr],[read,execute]},
{[{bucket,bucket_name}],all},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],none},
{[admin],none},
{[],[read]}]},
{bucket_sasl,[bucket_name],
[],
[{[{bucket,bucket_name},data],all},
{[{bucket,bucket_name},views],all},
{[{bucket,bucket_name}],[read,flush]},
{[pools],[read]}]},
{views_admin,[bucket_name],
[{name,<<"Views Admin">>},
{desc,<<"Can manage views for specified buckets">>}],
[{[{bucket,bucket_name},views],all},
{[{bucket,bucket_name},data],[read]},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],none},
{[admin],none},
{[],[read]}]},
{replication_admin,[],
[{name,<<"Replication Admin">>},
{desc,<<"Can manage ONLY XDCR features (cluster AND bucket level)">>}],
[{[{bucket,any},xdcr],all},
{[{bucket,any},data],[read]},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],all},
{[admin],none},
{[],[read]}]}]
[ns_server:debug,2016-10-19T09:55:14.180-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
server_groups ->
[[{uuid,<<"0">>},{name,<<"Group 1">>},{nodes,['ns_1@127.0.0.1']}]]
[ns_server:debug,2016-10-19T09:55:14.180-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
set_view_update_daemon ->
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]
[ns_server:debug,2016-10-19T09:55:14.180-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
settings ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012777}}]},
{stats,[{send_stats,false}]}]
[ns_server:debug,2016-10-19T09:55:14.180-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
uuid ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012784}}]}|
<<"561b20f339d4184270a7de0b1c1de1b0">>]
[ns_server:debug,2016-10-19T09:55:14.180-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
vbucket_map_history ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012772}}]},
{[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]],
[{replication_topology,star},{tags,undefined},{max_slaves,10}]}]
[ns_server:debug,2016-10-19T09:55:14.180-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{couchdb,max_parallel_indexers} ->
4
[ns_server:debug,2016-10-19T09:55:14.180-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{couchdb,max_parallel_replica_indexers} ->
2
[ns_server:debug,2016-10-19T09:55:14.180-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{local_changes_count,<<"eac84bf2ecf69c83ca0268ac5aac465d">>} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{35,63644101958}}]}]
[ns_server:debug,2016-10-19T09:55:14.181-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{metakv,<<"/indexing/settings/config">>} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{4,63644012784}}]}|
<<"{\"indexer.settings.compaction.days_of_week\":\"Sunday,Monday,Tuesday,Wednesday,Thursday,Friday,Saturday\",\"indexer.settings.compaction.interval\":\"00:00,00:00\",\"indexer.settings.compaction.compaction_mode\":\"circular\",\"indexer.settings.persisted_snapshot.interval\":5000,\"indexer.settings.log_level\":\"info\",\"indexer.settings.compaction.min_frag\":30,\"indexer.settings.inmemory_snapshot.interval\""...>>]
[ns_server:debug,2016-10-19T09:55:14.181-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{request_limit,capi} ->
undefined
[ns_server:debug,2016-10-19T09:55:14.181-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{request_limit,rest} ->
undefined
[ns_server:debug,2016-10-19T09:55:14.181-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{service_map,fts} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}]
[ns_server:debug,2016-10-19T09:55:14.181-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{service_map,index} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012791}}]},
'ns_1@127.0.0.1']
[ns_server:debug,2016-10-19T09:55:14.181-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{service_map,n1ql} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012791}}]},
'ns_1@127.0.0.1']
[ns_server:debug,2016-10-19T09:55:14.181-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',audit} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}]
[ns_server:debug,2016-10-19T09:55:14.181-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',capi_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|8092]
[ns_server:debug,2016-10-19T09:55:14.181-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',compaction_daemon} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{check_interval,30},
{min_db_file_size,131072},
{min_view_file_size,20971520}]
[ns_server:debug,2016-10-19T09:55:14.181-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',config_version} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|{4,5}]
[ns_server:debug,2016-10-19T09:55:14.181-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',fts_http_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|8094]
[ns_server:debug,2016-10-19T09:55:14.181-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',indexer_admin_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|9100]
[ns_server:debug,2016-10-19T09:55:14.181-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',indexer_http_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|9102]
[ns_server:debug,2016-10-19T09:55:14.181-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',indexer_scan_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|9101]
[ns_server:debug,2016-10-19T09:55:14.181-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',indexer_stcatchup_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|9104]
[ns_server:debug,2016-10-19T09:55:14.181-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',indexer_stinit_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|9103]
[ns_server:debug,2016-10-19T09:55:14.181-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',indexer_stmaint_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|9105]
[error_logger:info,2016-10-19T09:55:14.181-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.225.0>},
{name,bucket_info_cache},
{mfargs,{bucket_info_cache,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.182-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',is_enterprise} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|true]
[error_logger:info,2016-10-19T09:55:14.182-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.228.0>},
{name,ns_tick_event},
{mfargs,{gen_event,start_link,[{local,ns_tick_event}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.182-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',isasl} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{path,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/isasl.pw"}]
[error_logger:info,2016-10-19T09:55:14.182-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.229.0>},
{name,buckets_events},
{mfargs,
{gen_event,start_link,[{local,buckets_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.182-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',ldap_enabled} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|false]
[ns_server:debug,2016-10-19T09:55:14.182-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',membership} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
active]
[ns_server:debug,2016-10-19T09:55:14.182-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',memcached} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,11210},
{dedicated_port,11209},
{ssl_port,11207},
{admin_user,"_admin"},
{admin_pass,"*****"},
{engines,[{membase,[{engine,"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/ep.so"},
{static_config_string,"failpartialwarmup=false"}]},
{memcached,[{engine,"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{config_path,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached.json"},
{audit_file,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/audit.json"},
{log_path,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003}]
[ns_server:debug,2016-10-19T09:55:14.182-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',memcached_config} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
{[{interfaces,
{memcached_config_mgr,omit_missing_mcd_ports,
[{[{host,<<"*">>},{port,port},{maxconn,maxconn}]},
{[{host,<<"*">>},
{port,dedicated_port},
{maxconn,dedicated_port_maxconn}]},
{[{host,<<"*">>},
{port,ssl_port},
{maxconn,maxconn},
{ssl,
{[{key,
<<"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached-key.pem">>},
{cert,
<<"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached-cert.pem">>}]}}]}]}},
{ssl_cipher_list,{"~s",[ssl_cipher_list]}},
{ssl_minimum_protocol,{memcached_config_mgr,ssl_minimum_protocol,[]}},
{connection_idle_time,connection_idle_time},
{breakpad,
{[{enabled,breakpad_enabled},
{minidump_dir,{memcached_config_mgr,get_minidump_dir,[]}}]}},
{extensions,
[{[{module,
<<"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/stdin_term_handler.so">>},
{config,<<>>}]},
{[{module,
<<"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/file_logger.so">>},
{config,
{"cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]}}]}]},
{admin,{"~s",[admin_user]}},
{verbosity,verbosity},
{audit_file,{"~s",[audit_file]}},
{dedupe_nmvb_maps,dedupe_nmvb_maps}]}]
[ns_server:debug,2016-10-19T09:55:14.183-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',memcached_defaults} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{maxconn,30000},
{dedicated_port_maxconn,5000},
{ssl_cipher_list,"HIGH"},
{connection_idle_time,0},
{verbosity,0},
{breakpad_enabled,true},
{breakpad_minidump_dir_path,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/crash"},
{dedupe_nmvb_maps,false}]
[ns_server:debug,2016-10-19T09:55:14.183-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',moxi} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,11211},
{verbosity,[]}]
[ns_server:debug,2016-10-19T09:55:14.183-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',ns_log} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{filename,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/ns_log"}]
[ns_server:debug,2016-10-19T09:55:14.183-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',port_servers} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}]
[ns_server:debug,2016-10-19T09:55:14.183-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',projector_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|9999]
[ns_server:debug,2016-10-19T09:55:14.183-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',query_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|8093]
[ns_server:debug,2016-10-19T09:55:14.183-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',rest} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,8091},
{port_meta,global}]
[ns_server:debug,2016-10-19T09:55:14.183-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',services} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012751}}]},
index,kv,n1ql]
[ns_server:debug,2016-10-19T09:55:14.183-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',ssl_capi_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|18092]
[ns_server:debug,2016-10-19T09:55:14.183-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',ssl_proxy_downstream_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|11214]
[ns_server:debug,2016-10-19T09:55:14.183-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|11215]
[ns_server:debug,2016-10-19T09:55:14.183-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',ssl_query_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|18093]
[ns_server:debug,2016-10-19T09:55:14.183-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',ssl_rest_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|18091]
[ns_server:debug,2016-10-19T09:55:14.183-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',stop_xdcr} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012660}}]}|
'_deleted']
[ns_server:debug,2016-10-19T09:55:14.183-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',uuid} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
<<"eac84bf2ecf69c83ca0268ac5aac465d">>]
[ns_server:debug,2016-10-19T09:55:14.184-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',xdcr_rest_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|9998]
[ns_server:debug,2016-10-19T09:55:14.186-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[ns_server:debug,2016-10-19T09:55:14.187-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[ns_server:debug,2016-10-19T09:55:14.187-07:00,ns_1@127.0.0.1:<0.219.0>:ns_node_disco:do_nodes_wanted_updated_fun:224]ns_node_disco: nodes_wanted updated: ['ns_1@127.0.0.1'], with cookie: oxqibayfkfbrogxo
[ns_server:debug,2016-10-19T09:55:14.187-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2016-10-19T09:55:14.187-07:00,ns_1@127.0.0.1:<0.219.0>:ns_node_disco:do_nodes_wanted_updated_fun:230]ns_node_disco: nodes_wanted pong: ['ns_1@127.0.0.1'], with cookie: oxqibayfkfbrogxo
[ns_server:debug,2016-10-19T09:55:14.193-07:00,ns_1@127.0.0.1:ns_log_events<0.207.0>:ns_mail_log:init:44]ns_mail_log started up
[error_logger:info,2016-10-19T09:55:14.193-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_mail_sup}
started: [{pid,<0.231.0>},
{name,ns_mail_log},
{mfargs,{ns_mail_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.194-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.230.0>},
{name,ns_mail_sup},
{mfargs,{ns_mail_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:14.194-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.232.0>},
{name,ns_stats_event},
{mfargs,
{gen_event,start_link,[{local,ns_stats_event}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.198-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.233.0>},
{name,samples_loader_tasks},
{mfargs,{samples_loader_tasks,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.199-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[ns_server:debug,2016-10-19T09:55:14.199-07:00,ns_1@127.0.0.1:<0.220.0>:ns_node_disco:do_nodes_wanted_updated_fun:224]ns_node_disco: nodes_wanted updated: ['ns_1@127.0.0.1'], with cookie: oxqibayfkfbrogxo
[ns_server:debug,2016-10-19T09:55:14.199-07:00,ns_1@127.0.0.1:<0.220.0>:ns_node_disco:do_nodes_wanted_updated_fun:230]ns_node_disco: nodes_wanted pong: ['ns_1@127.0.0.1'], with cookie: oxqibayfkfbrogxo
[error_logger:info,2016-10-19T09:55:14.201-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_heart_sup}
started: [{pid,<0.235.0>},
{name,ns_heart},
{mfargs,{ns_heart,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.202-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_heart_sup}
started: [{pid,<0.238.0>},
{name,ns_heart_slow_updater},
{mfargs,{ns_heart,start_link_slow_updater,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.202-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.234.0>},
{name,ns_heart_sup},
{mfargs,{ns_heart_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:14.203-07:00,ns_1@127.0.0.1:ns_heart<0.235.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "@system" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-@system-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-lc$^0/1-0-',1,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,update_current_status,1,
[{file,"src/ns_heart.erl"},{line,186}]},
{ns_heart,handle_info,2,
[{file,"src/ns_heart.erl"},{line,118}]}]}}
[ns_server:debug,2016-10-19T09:55:14.203-07:00,ns_1@127.0.0.1:ns_heart<0.235.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "@system-processes" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-@system-processes-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-lc$^0/1-0-',1,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,'-current_status_slow_inner/0-lc$^0/1-0-',1,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,update_current_status,1,
[{file,"src/ns_heart.erl"},{line,186}]}]}}
[error_logger:info,2016-10-19T09:55:14.204-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_doctor_sup}
started: [{pid,<0.241.0>},
{name,ns_doctor_events},
{mfargs,
{gen_event,start_link,[{local,ns_doctor_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.208-07:00,ns_1@127.0.0.1:<0.239.0>:restartable:start_child:98]Started child process <0.240.0>
MFA: {ns_doctor_sup,start_link,[]}
[error_logger:info,2016-10-19T09:55:14.208-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_doctor_sup}
started: [{pid,<0.242.0>},
{name,ns_doctor},
{mfargs,{ns_doctor,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.208-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.239.0>},
{name,ns_doctor_sup},
{mfargs,
{restartable,start_link,
[{ns_doctor_sup,start_link,[]},infinity]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:14.219-07:00,ns_1@127.0.0.1:ns_heart<0.235.0>:ns_heart:grab_index_status:373]ignoring failure to get index status: {exit,
{noproc,
{gen_server,call,
['index_status_keeper-index',
get_status,2000]}}}
[{gen_server,call,3,[{file,"gen_server.erl"},{line,188}]},
{ns_heart,grab_index_status,0,[{file,"src/ns_heart.erl"},{line,370}]},
{ns_heart,current_status_slow_inner,0,[{file,"src/ns_heart.erl"},{line,280}]},
{ns_heart,current_status_slow,1,[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,update_current_status,1,[{file,"src/ns_heart.erl"},{line,186}]},
{ns_heart,handle_info,2,[{file,"src/ns_heart.erl"},{line,118}]},
{gen_server,handle_msg,5,[{file,"gen_server.erl"},{line,604}]},
{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,239}]}]
[ns_server:debug,2016-10-19T09:55:14.219-07:00,ns_1@127.0.0.1:ns_heart<0.235.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "test" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-test-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-fun-1-',3,
[{file,"src/ns_heart.erl"},{line,286}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1248}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,285}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,update_current_status,1,
[{file,"src/ns_heart.erl"},{line,186}]}]}}
[ns_server:debug,2016-10-19T09:55:14.219-07:00,ns_1@127.0.0.1:ns_heart<0.235.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "locked" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-locked-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-fun-1-',3,
[{file,"src/ns_heart.erl"},{line,286}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1248}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,285}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,update_current_status,1,
[{file,"src/ns_heart.erl"},{line,186}]}]}}
[ns_server:debug,2016-10-19T09:55:14.220-07:00,ns_1@127.0.0.1:ns_heart<0.235.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "default" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-default-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-fun-1-',3,
[{file,"src/ns_heart.erl"},{line,286}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1248}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,285}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,update_current_status,1,
[{file,"src/ns_heart.erl"},{line,186}]}]}}
[error_logger:info,2016-10-19T09:55:14.222-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,kernel_safe_sup}
started: [{pid,<0.247.0>},
{name,disk_log_sup},
{mfargs,{disk_log_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:14.222-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,kernel_safe_sup}
started: [{pid,<0.248.0>},
{name,disk_log_server},
{mfargs,{disk_log_server,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.226-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.245.0>},
{name,remote_clusters_info},
{mfargs,{remote_clusters_info,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.226-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.251.0>},
{name,master_activity_events},
{mfargs,
{gen_event,start_link,
[{local,master_activity_events}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.228-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.252.0>},
{name,xdcr_ckpt_store},
{mfargs,{simple_store,start_link,[xdcr_ckpt_data]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.228-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.253.0>},
{name,metakv_worker},
{mfargs,{work_queue,start_link,[metakv_worker]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.228-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.254.0>},
{name,index_events},
{mfargs,{gen_event,start_link,[{local,index_events}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.230-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.255.0>},
{name,index_settings_manager},
{mfargs,{index_settings_manager,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.232-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.258.0>},
{name,menelaus_ui_auth},
{mfargs,{menelaus_ui_auth,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.234-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.260.0>},
{name,menelaus_web_cache},
{mfargs,{menelaus_web_cache,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.237-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.262.0>},
{name,menelaus_stats_gatherer},
{mfargs,{menelaus_stats_gatherer,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.237-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.263.0>},
{name,json_rpc_events},
{mfargs,
{gen_event,start_link,[{local,json_rpc_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:14.239-07:00,ns_1@127.0.0.1:menelaus_sup<0.257.0>:menelaus_pluggable_ui:validate_plugin_spec:117]Loaded pluggable UI specification for fts
[ns_server:info,2016-10-19T09:55:14.239-07:00,ns_1@127.0.0.1:menelaus_sup<0.257.0>:menelaus_pluggable_ui:validate_plugin_spec:117]Loaded pluggable UI specification for n1ql
[error_logger:info,2016-10-19T09:55:14.240-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.264.0>},
{name,menelaus_web},
{mfargs,{menelaus_web,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.240-07:00,ns_1@127.0.0.1:ns_heart<0.235.0>:goxdcr_rest:get_from_goxdcr:163]Goxdcr is temporary not available. Return empty list.
[error_logger:info,2016-10-19T09:55:14.241-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.281.0>},
{name,menelaus_event},
{mfargs,{menelaus_event,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.243-07:00,ns_1@127.0.0.1:ns_heart<0.235.0>:cluster_logs_collection_task:maybe_build_cluster_logs_task:43]Ignoring exception trying to read cluster_logs_collection_task_status table: error:badarg
[error_logger:info,2016-10-19T09:55:14.244-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.283.0>},
{name,hot_keys_keeper},
{mfargs,{hot_keys_keeper,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.246-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.284.0>},
{name,menelaus_web_alerts_srv},
{mfargs,{menelaus_web_alerts_srv,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.250-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.285.0>},
{name,menelaus_cbauth},
{mfargs,{menelaus_cbauth,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[user:info,2016-10-19T09:55:14.251-07:00,ns_1@127.0.0.1:ns_server_sup<0.197.0>:menelaus_sup:start_link:46]Couchbase Server has started on web port 8091 on node 'ns_1@127.0.0.1'. Version: "4.6.0-3391-enterprise".
[error_logger:info,2016-10-19T09:55:14.251-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.257.0>},
{name,menelaus},
{mfargs,{menelaus_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:14.252-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.291.0>},
{name,ns_ports_setup},
{mfargs,{ns_ports_setup,start,[]}},
{restart_type,{permanent,4}},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.253-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,service_agent_sup}
started: [{pid,<0.294.0>},
{name,service_agent_children_sup},
{mfargs,
{supervisor,start_link,
[{local,service_agent_children_sup},
service_agent_sup,child]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:14.253-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,service_agent_sup}
started: [{pid,<0.295.0>},
{name,service_agent_worker},
{mfargs,
{erlang,apply,
[#Fun,[]]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.253-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.293.0>},
{name,service_agent_sup},
{mfargs,{service_agent_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:14.258-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.297.0>},
{name,ns_memcached_sockets_pool},
{mfargs,{ns_memcached_sockets_pool,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.258-07:00,ns_1@127.0.0.1:ns_audit_cfg<0.298.0>:ns_audit_cfg:write_audit_json:158]Writing new content to "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/audit.json" : [{auditd_enabled,
false},
{disabled,
[]},
{log_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"},
{rotate_interval,
86400},
{rotate_size,
20971520},
{sync,
[]},
{version,
1},
{descriptors_path,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/etc/security"}]
[ns_server:debug,2016-10-19T09:55:14.261-07:00,ns_1@127.0.0.1:ns_ports_setup<0.291.0>:ns_ports_manager:set_dynamic_children:54]Setting children [memcached,moxi,projector,indexer,query,saslauthd_port,
goxdcr,xdcr_proxy]
[ns_server:debug,2016-10-19T09:55:14.262-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.238.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "@system" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-@system-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-lc$^0/1-0-',1,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,slow_updater_loop,0,
[{file,"src/ns_heart.erl"},{line,243}]},
{proc_lib,init_p_do_apply,3,
[{file,"proc_lib.erl"},{line,239}]}]}}
[ns_server:debug,2016-10-19T09:55:14.262-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.238.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "@system-processes" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-@system-processes-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-lc$^0/1-0-',1,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,'-current_status_slow_inner/0-lc$^0/1-0-',1,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,slow_updater_loop,0,
[{file,"src/ns_heart.erl"},{line,243}]}]}}
[ns_server:debug,2016-10-19T09:55:14.262-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.238.0>:ns_heart:grab_index_status:373]ignoring failure to get index status: {exit,
{noproc,
{gen_server,call,
['index_status_keeper-index',
get_status,2000]}}}
[{gen_server,call,3,[{file,"gen_server.erl"},{line,188}]},
{ns_heart,grab_index_status,0,[{file,"src/ns_heart.erl"},{line,370}]},
{ns_heart,current_status_slow_inner,0,[{file,"src/ns_heart.erl"},{line,280}]},
{ns_heart,current_status_slow,1,[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,slow_updater_loop,0,[{file,"src/ns_heart.erl"},{line,243}]},
{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,239}]}]
[ns_server:debug,2016-10-19T09:55:14.263-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.238.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "test" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-test-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-fun-1-',3,
[{file,"src/ns_heart.erl"},{line,286}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1248}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,285}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,slow_updater_loop,0,
[{file,"src/ns_heart.erl"},{line,243}]}]}}
[ns_server:debug,2016-10-19T09:55:14.263-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.238.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "locked" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-locked-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-fun-1-',3,
[{file,"src/ns_heart.erl"},{line,286}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1248}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,285}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,slow_updater_loop,0,
[{file,"src/ns_heart.erl"},{line,243}]}]}}
[ns_server:debug,2016-10-19T09:55:14.263-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.238.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "default" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-default-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-fun-1-',3,
[{file,"src/ns_heart.erl"},{line,286}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1248}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,285}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,slow_updater_loop,0,
[{file,"src/ns_heart.erl"},{line,243}]}]}}
[ns_server:debug,2016-10-19T09:55:14.264-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.238.0>:goxdcr_rest:get_from_goxdcr:163]Goxdcr is temporary not available. Return empty list.
[ns_server:debug,2016-10-19T09:55:14.264-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.238.0>:cluster_logs_collection_task:maybe_build_cluster_logs_task:43]Ignoring exception trying to read cluster_logs_collection_task_status table: error:badarg
[ns_server:debug,2016-10-19T09:55:14.269-07:00,ns_1@127.0.0.1:ns_audit_cfg<0.298.0>:ns_audit_cfg:handle_info:107]Instruct memcached to reload audit config
[error_logger:info,2016-10-19T09:55:14.269-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.298.0>},
{name,ns_audit_cfg},
{mfargs,{ns_audit_cfg,start_link,[]}},
{restart_type,{permanent,4}},
{shutdown,1000},
{child_type,worker}]
[ns_server:warn,2016-10-19T09:55:14.270-07:00,ns_1@127.0.0.1:<0.308.0>:ns_memcached:connect:1307]Unable to connect: {error,{badmatch,{error,econnrefused}}}, retrying.
[ns_server:debug,2016-10-19T09:55:14.271-07:00,ns_1@127.0.0.1:memcached_config_mgr<0.310.0>:memcached_config_mgr:init:44]waiting for completion of initial ns_ports_setup round
[error_logger:info,2016-10-19T09:55:14.271-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.310.0>},
{name,memcached_config_mgr},
{mfargs,{memcached_config_mgr,start_link,[]}},
{restart_type,{permanent,4}},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:14.273-07:00,ns_1@127.0.0.1:<0.311.0>:ns_memcached_log_rotator:init:28]Starting log rotator on "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"/"memcached.log"* with an initial period of 39003ms
[error_logger:info,2016-10-19T09:55:14.273-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.311.0>},
{name,ns_memcached_log_rotator},
{mfargs,{ns_memcached_log_rotator,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.275-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.312.0>},
{name,memcached_clients_pool},
{mfargs,{memcached_clients_pool,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.276-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.314.0>},
{name,proxied_memcached_clients_pool},
{mfargs,{proxied_memcached_clients_pool,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.276-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.315.0>},
{name,xdc_lhttpc_pool},
{mfargs,
{lhttpc_manager,start_link,
[[{name,xdc_lhttpc_pool},
{connection_timeout,120000},
{pool_size,200}]]}},
{restart_type,{permanent,1}},
{shutdown,10000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.279-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.316.0>},
{name,ns_null_connection_pool},
{mfargs,
{ns_null_connection_pool,start_link,
[ns_null_connection_pool]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.287-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.317.0>,xdcr_sup}
started: [{pid,<0.318.0>},
{name,xdc_stats_holder},
{mfargs,
{proc_lib,start_link,
[xdcr_sup,link_stats_holder_body,[]]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.288-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.317.0>,xdcr_sup}
started: [{pid,<0.319.0>},
{name,xdc_replication_sup},
{mfargs,{xdc_replication_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:14.290-07:00,ns_1@127.0.0.1:xdc_rep_manager<0.320.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[error_logger:info,2016-10-19T09:55:14.290-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.317.0>,xdcr_sup}
started: [{pid,<0.320.0>},
{name,xdc_rep_manager},
{mfargs,{xdc_rep_manager,start_link,[]}},
{restart_type,permanent},
{shutdown,30000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.292-07:00,ns_1@127.0.0.1:xdcr_doc_replicator<0.322.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[ns_server:debug,2016-10-19T09:55:14.292-07:00,ns_1@127.0.0.1:xdc_rdoc_replication_srv<0.323.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[error_logger:info,2016-10-19T09:55:14.292-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.317.0>,xdcr_sup}
started: [{pid,<0.322.0>},
{name,xdc_rdoc_replicator},
{mfargs,{doc_replicator,start_link_xdcr,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.292-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.317.0>,xdcr_sup}
started: [{pid,<0.323.0>},
{name,xdc_rdoc_replication_srv},
{mfargs,{doc_replication_srv,start_link_xdcr,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.293-07:00,ns_1@127.0.0.1:<0.317.0>:xdc_rdoc_manager:start_link_remote:42]Starting xdc_rdoc_manager on 'couchdb_ns_1@127.0.0.1' with following links: [<0.322.0>,
<0.323.0>,
<0.320.0>]
[ns_server:debug,2016-10-19T09:55:14.295-07:00,ns_1@127.0.0.1:xdcr_doc_replicator<0.322.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.254.0>
[ns_server:debug,2016-10-19T09:55:14.295-07:00,ns_1@127.0.0.1:xdc_rdoc_replication_srv<0.323.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.254.0>
[ns_server:debug,2016-10-19T09:55:14.295-07:00,ns_1@127.0.0.1:xdc_rep_manager<0.320.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.254.0>
[error_logger:info,2016-10-19T09:55:14.295-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.317.0>,xdcr_sup}
started: [{pid,<11625.254.0>},
{name,xdc_rdoc_manager},
{mfargs,
{xdc_rdoc_manager,start_link_remote,
['couchdb_ns_1@127.0.0.1']}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.295-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.317.0>},
{name,xdcr_sup},
{mfargs,{xdcr_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:14.296-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.325.0>},
{name,xdcr_dcp_sockets_pool},
{mfargs,{xdcr_dcp_sockets_pool,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.298-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_worker_sup}
started: [{pid,<0.327.0>},
{name,ns_bucket_worker},
{mfargs,{work_queue,start_link,[ns_bucket_worker]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.303-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.329.0>},
{name,buckets_observing_subscription},
{mfargs,{ns_bucket_sup,subscribe_on_config_events,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.303-07:00,ns_1@127.0.0.1:ns_bucket_worker<0.327.0>:ns_bucket_sup:update_children:110]Starting new child: {{docs_sup,"test"},
{docs_sup,start_link,["test"]},
permanent,infinity,supervisor,
[docs_sup]}
[error_logger:info,2016-10-19T09:55:14.303-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_worker_sup}
started: [{pid,<0.328.0>},
{name,ns_bucket_sup},
{mfargs,{ns_bucket_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:14.303-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.326.0>},
{name,ns_bucket_worker_sup},
{mfargs,{ns_bucket_worker_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:14.308-07:00,ns_1@127.0.0.1:xdcr_doc_replicator<0.322.0>:doc_replicator:loop:64]doing replicate_newnodes_docs
[error_logger:info,2016-10-19T09:55:14.309-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.330.0>},
{name,system_stats_collector},
{mfargs,{system_stats_collector,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.309-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.334.0>},
{name,{stats_archiver,"@system"}},
{mfargs,{stats_archiver,start_link,["@system"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.321-07:00,ns_1@127.0.0.1:capi_doc_replicator-test<0.337.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[ns_server:debug,2016-10-19T09:55:14.321-07:00,ns_1@127.0.0.1:capi_ddoc_replication_srv-test<0.338.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[error_logger:info,2016-10-19T09:55:14.321-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.336.0>,docs_sup}
started: [{pid,<0.337.0>},
{name,doc_replicator},
{mfargs,{doc_replicator,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.321-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.336.0>,docs_sup}
started: [{pid,<0.338.0>},
{name,doc_replication_srv},
{mfargs,{doc_replication_srv,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.323-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.339.0>},
{name,{stats_reader,"@system"}},
{mfargs,{stats_reader,start_link,["@system"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.324-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.340.0>},
{name,{stats_archiver,"@system-processes"}},
{mfargs,
{stats_archiver,start_link,["@system-processes"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.324-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.342.0>},
{name,{stats_reader,"@system-processes"}},
{mfargs,
{stats_reader,start_link,["@system-processes"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.324-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.343.0>},
{name,{stats_archiver,"@query"}},
{mfargs,{stats_archiver,start_link,["@query"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.324-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.345.0>},
{name,{stats_reader,"@query"}},
{mfargs,{stats_reader,start_link,["@query"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.351-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'capi_ddoc_manager_sup-test'}
started: [{pid,<11625.262.0>},
{name,capi_ddoc_manager_events},
{mfargs,
{capi_ddoc_manager,start_link_event_manager,
["test"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.352-07:00,ns_1@127.0.0.1:capi_ddoc_replication_srv-test<0.338.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.263.0>
[ns_server:debug,2016-10-19T09:55:14.352-07:00,ns_1@127.0.0.1:capi_doc_replicator-test<0.337.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.263.0>
[error_logger:info,2016-10-19T09:55:14.352-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'capi_ddoc_manager_sup-test'}
started: [{pid,<11625.263.0>},
{name,capi_ddoc_manager},
{mfargs,
{capi_ddoc_manager,start_link,
["test",<0.337.0>,<0.338.0>]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.352-07:00,ns_1@127.0.0.1:ns_bucket_worker<0.327.0>:ns_bucket_sup:update_children:110]Starting new child: {{single_bucket_kv_sup,"test"},
{single_bucket_kv_sup,start_link,["test"]},
permanent,infinity,supervisor,
[single_bucket_kv_sup]}
[error_logger:info,2016-10-19T09:55:14.352-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.336.0>,docs_sup}
started: [{pid,<11625.261.0>},
{name,capi_ddoc_manager_sup},
{mfargs,
{capi_ddoc_manager_sup,start_link_remote,
['couchdb_ns_1@127.0.0.1',"test"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:14.352-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.336.0>},
{name,{docs_sup,"test"}},
{mfargs,{docs_sup,start_link,["test"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:14.362-07:00,ns_1@127.0.0.1:capi_doc_replicator-test<0.337.0>:doc_replicator:loop:64]doing replicate_newnodes_docs
[error_logger:info,2016-10-19T09:55:14.423-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.349.0>},
{name,query_stats_collector},
{mfargs,{query_stats_collector,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.423-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.351.0>},
{name,{stats_archiver,"@global"}},
{mfargs,{stats_archiver,start_link,["@global"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.424-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.353.0>},
{name,{stats_reader,"@global"}},
{mfargs,{stats_reader,start_link,["@global"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.472-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.357.0>},
{name,global_stats_collector},
{mfargs,{global_stats_collector,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.482-07:00,ns_1@127.0.0.1:ns_ports_setup<0.291.0>:ns_ports_setup:set_children:72]Monitor ns_child_ports_sup <11624.75.0>
[ns_server:debug,2016-10-19T09:55:14.482-07:00,ns_1@127.0.0.1:memcached_config_mgr<0.310.0>:memcached_config_mgr:init:46]ns_ports_setup seems to be ready
[error_logger:info,2016-10-19T09:55:14.491-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.361.0>},
{name,goxdcr_status_keeper},
{mfargs,{goxdcr_status_keeper,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.492-07:00,ns_1@127.0.0.1:goxdcr_status_keeper<0.361.0>:goxdcr_rest:get_from_goxdcr:163]Goxdcr is temporary not available. Return empty list.
[ns_server:debug,2016-10-19T09:55:14.492-07:00,ns_1@127.0.0.1:goxdcr_status_keeper<0.361.0>:goxdcr_rest:get_from_goxdcr:163]Goxdcr is temporary not available. Return empty list.
[ns_server:debug,2016-10-19T09:55:14.496-07:00,ns_1@127.0.0.1:memcached_config_mgr<0.310.0>:memcached_config_mgr:find_port_pid_loop:119]Found memcached port <11624.81.0>
[error_logger:info,2016-10-19T09:55:14.496-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.359.0>,docs_kv_sup}
started: [{pid,<11625.271.0>},
{name,capi_set_view_manager},
{mfargs,
{capi_set_view_manager,start_link_remote,
['couchdb_ns_1@127.0.0.1',"test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.497-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_sup}
started: [{pid,<0.368.0>},
{name,index_stats_children_sup},
{mfargs,
{supervisor,start_link,
[{local,index_stats_children_sup},
index_stats_sup,child]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:14.502-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_status_keeper_sup}
started: [{pid,<0.370.0>},
{name,index_status_keeper_worker},
{mfargs,
{work_queue,start_link,
[index_status_keeper_worker]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.502-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_status_keeper_sup}
started: [{pid,<0.371.0>},
{name,index_status_keeper},
{mfargs,{indexer_gsi,start_keeper,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.505-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.359.0>,docs_kv_sup}
started: [{pid,<11625.274.0>},
{name,couch_stats_reader},
{mfargs,
{couch_stats_reader,start_link_remote,
['couchdb_ns_1@127.0.0.1',"test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.505-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.359.0>},
{name,{docs_kv_sup,"test"}},
{mfargs,{docs_kv_sup,start_link,["test"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:14.507-07:00,ns_1@127.0.0.1:memcached_config_mgr<0.310.0>:memcached_config_mgr:init:77]wrote memcached config to /Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached.json. Will activate memcached port server
[ns_server:debug,2016-10-19T09:55:14.508-07:00,ns_1@127.0.0.1:memcached_config_mgr<0.310.0>:memcached_config_mgr:init:80]activated memcached port server
[error_logger:info,2016-10-19T09:55:14.508-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_status_keeper_sup}
started: [{pid,<0.374.0>},
{name,index_status_keeper_fts},
{mfargs,{indexer_fts,start_keeper,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.508-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_sup}
started: [{pid,<0.369.0>},
{name,index_status_keeper_sup},
{mfargs,{index_status_keeper_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:14.508-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_sup}
started: [{pid,<0.377.0>},
{name,index_stats_worker},
{mfargs,
{erlang,apply,
[#Fun,[]]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.508-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.367.0>},
{name,index_stats_sup},
{mfargs,{index_stats_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:error,2016-10-19T09:55:14.510-07:00,ns_1@127.0.0.1:index_status_keeper_worker<0.370.0>:index_rest:get_json:42]Request to (indexer) http://127.0.0.1:9102/getIndexStatus failed: {error,
{econnrefused,
[{lhttpc_client,
send_request,
1,
[{file,
"/Users/jenkins/jenkins/workspace/watson-unix/couchdb/src/lhttpc/lhttpc_client.erl"},
{line,
220}]},
{lhttpc_client,
execute,
9,
[{file,
"/Users/jenkins/jenkins/workspace/watson-unix/couchdb/src/lhttpc/lhttpc_client.erl"},
{line,
169}]},
{lhttpc_client,
request,
9,
[{file,
"/Users/jenkins/jenkins/workspace/watson-unix/couchdb/src/lhttpc/lhttpc_client.erl"},
{line,
92}]}]}}
[ns_server:debug,2016-10-19T09:55:14.511-07:00,ns_1@127.0.0.1:ns_memcached-test<0.381.0>:ns_memcached:init:167]Starting ns_memcached
[ns_server:debug,2016-10-19T09:55:14.511-07:00,ns_1@127.0.0.1:<0.382.0>:ns_memcached:run_connect_phase:190]Started 'connecting' phase of ns_memcached-test. Parent is <0.381.0>
[error_logger:info,2016-10-19T09:55:14.511-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.380.0>,ns_memcached_sup}
started: [{pid,<0.381.0>},
{name,{ns_memcached,"test"}},
{mfargs,{ns_memcached,start_link,["test"]}},
{restart_type,permanent},
{shutdown,86400000},
{child_type,worker}]
[ns_server:warn,2016-10-19T09:55:14.512-07:00,ns_1@127.0.0.1:<0.382.0>:ns_memcached:connect:1307]Unable to connect: {error,{badmatch,{error,econnrefused}}}, retrying.
[error_logger:info,2016-10-19T09:55:14.514-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.383.0>},
{name,{indexer_gsi,index_stats_collector}},
{mfargs,
{index_stats_collector,start_link,[indexer_gsi]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.515-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.386.0>},
{name,{indexer_gsi,stats_archiver,"@index"}},
{mfargs,{stats_archiver,start_link,["@index"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.518-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.388.0>},
{name,{indexer_gsi,stats_archiver,"default"}},
{mfargs,{stats_archiver,start_link,["@index-default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.518-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.380.0>,ns_memcached_sup}
started: [{pid,<0.389.0>},
{name,{terse_bucket_info_uploader,"test"}},
{mfargs,
{terse_bucket_info_uploader,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.519-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.380.0>},
{name,{ns_memcached_sup,"test"}},
{mfargs,{ns_memcached_sup,start_link,["test"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:14.519-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.392.0>},
{name,{indexer_gsi,stats_archiver,"locked"}},
{mfargs,{stats_archiver,start_link,["@index-locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.525-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.395.0>},
{name,compaction_daemon},
{mfargs,{compaction_daemon,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.536-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.394.0>},
{name,{indexer_gsi,stats_archiver,"test"}},
{mfargs,{stats_archiver,start_link,["@index-test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.536-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.397.0>},
{name,{indexer_gsi,stats_reader,"@index"}},
{mfargs,{stats_reader,start_link,["@index"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.536-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.398.0>},
{name,{indexer_gsi,stats_reader,"default"}},
{mfargs,{stats_reader,start_link,["@index-default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.536-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.399.0>},
{name,{indexer_gsi,stats_reader,"locked"}},
{mfargs,{stats_reader,start_link,["@index-locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.537-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.400.0>},
{name,{indexer_gsi,stats_reader,"test"}},
{mfargs,{stats_reader,start_link,["@index-test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.537-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.401.0>},
{name,{ns_vbm_sup,"test"}},
{mfargs,{ns_vbm_sup,start_link,["test"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:14.548-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.404.0>},
{name,{dcp_sup,"test"}},
{mfargs,{dcp_sup,start_link,["test"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:14.553-07:00,ns_1@127.0.0.1:<0.405.0>:new_concurrency_throttle:init:113]init concurrent throttle process, pid: <0.405.0>, type: kv_throttle# of available token: 1
[error_logger:info,2016-10-19T09:55:14.554-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.406.0>},
{name,{dcp_replication_manager,"test"}},
{mfargs,{dcp_replication_manager,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.554-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.407.0>},
{name,{replication_manager,"test"}},
{mfargs,{replication_manager,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.555-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.402.0>:compaction_new_daemon:process_scheduler_message:1312]Starting compaction (compact_kv) for the following buckets:
[<<"test">>,<<"locked">>,<<"default">>]
[error_logger:info,2016-10-19T09:55:14.555-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.402.0>},
{name,compaction_new_daemon},
{mfargs,{compaction_new_daemon,start_link,[]}},
{restart_type,{permanent,4}},
{shutdown,86400000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.556-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.402.0>:compaction_new_daemon:process_scheduler_message:1312]Starting compaction (compact_views) for the following buckets:
[<<"test">>,<<"locked">>,<<"default">>]
[ns_server:info,2016-10-19T09:55:14.556-07:00,ns_1@127.0.0.1:<0.408.0>:compaction_new_daemon:spawn_scheduled_kv_compactor:471]Start compaction of vbuckets for bucket test with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2016-10-19T09:55:14.556-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.402.0>:compaction_new_daemon:process_scheduler_message:1312]Starting compaction (compact_master) for the following buckets:
[<<"test">>,<<"locked">>,<<"default">>]
[ns_server:info,2016-10-19T09:55:14.556-07:00,ns_1@127.0.0.1:<0.412.0>:compaction_new_daemon:spawn_master_db_compactor:850]Start compaction of master db for bucket test with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:info,2016-10-19T09:55:14.557-07:00,ns_1@127.0.0.1:<0.411.0>:compaction_new_daemon:spawn_scheduled_views_compactor:497]Start compaction of indexes for bucket test with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[error_logger:info,2016-10-19T09:55:14.559-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,cluster_logs_sup}
started: [{pid,<0.415.0>},
{name,ets_holder},
{mfargs,
{cluster_logs_collection_task,
start_link_ets_holder,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.559-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.414.0>},
{name,cluster_logs_sup},
{mfargs,{cluster_logs_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:14.561-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.416.0>},
{name,{dcp_notifier,"test"}},
{mfargs,{dcp_notifier,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.565-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.417.0>},
{name,remote_api},
{mfargs,{remote_api,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:14.570-07:00,ns_1@127.0.0.1:janitor_agent-test<0.421.0>:janitor_agent:read_flush_counter:1047]Loading flushseq failed: {error,enoent}. Assuming it's equal to global config.
[error_logger:info,2016-10-19T09:55:14.570-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'janitor_agent_sup-test'}
started: [{pid,<0.420.0>},
{name,rebalance_subprocesses_registry},
{mfargs,
{ns_process_registry,start_link,
['rebalance_subprocesses_registry-test',
[{terminate_command,kill}]]}},
{restart_type,permanent},
{shutdown,86400000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:14.570-07:00,ns_1@127.0.0.1:<0.422.0>:compaction_new_daemon:spawn_master_db_compactor:850]Start compaction of master db for bucket locked with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:info,2016-10-19T09:55:14.570-07:00,ns_1@127.0.0.1:janitor_agent-test<0.421.0>:janitor_agent:read_flush_counter_from_config:1054]Initialized flushseq 0 from bucket config
[ns_server:info,2016-10-19T09:55:14.570-07:00,ns_1@127.0.0.1:<0.413.0>:compaction_new_daemon:spawn_scheduled_views_compactor:497]Start compaction of indexes for bucket locked with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[error_logger:info,2016-10-19T09:55:14.570-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'janitor_agent_sup-test'}
started: [{pid,<0.421.0>},
{name,janitor_agent},
{mfargs,{janitor_agent,start_link,["test"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.571-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.419.0>},
{name,{janitor_agent_sup,"test"}},
{mfargs,{janitor_agent_sup,start_link,["test"]}},
{restart_type,permanent},
{shutdown,10000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:14.571-07:00,ns_1@127.0.0.1:<0.423.0>:compaction_new_daemon:spawn_master_db_compactor:850]Start compaction of master db for bucket default with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2016-10-19T09:55:14.571-07:00,ns_1@127.0.0.1:<0.413.0>:ns_couchdb_api:rpc_couchdb_node:165]RPC to couchdb node failed for {foreach_doc,<<"locked">>,
#Fun,infinity} with {badrpc,
{'EXIT',
{noproc,
{gen_server,
call,
['capi_ddoc_manager-locked',
{foreach_doc,
#Fun},
infinity]}}}}
Stack: [{ns_couchdb_api,rpc_couchdb_node,4,
[{file,"src/ns_couchdb_api.erl"},{line,164}]},
{capi_utils,foreach_live_ddoc_id,2,
[{file,"src/capi_utils.erl"},{line,151}]},
{capi_utils,fetch_ddoc_ids,1,[{file,"src/capi_utils.erl"},{line,144}]},
{compaction_new_daemon,'-spawn_scheduled_views_compactor/2-fun-0-',3,
[{file,"src/compaction_new_daemon.erl"},
{line,500}]},
{proc_lib,init_p,3,[{file,"proc_lib.erl"},{line,224}]}]
[ns_server:error,2016-10-19T09:55:14.571-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.402.0>:compaction_new_daemon:log_compactors_exit:1327]Compactor <0.413.0> exited unexpectedly: {error,
{badrpc,
{'EXIT',
{noproc,
{gen_server,call,
['capi_ddoc_manager-locked',
{foreach_doc,
#Fun},
infinity]}}}}}. Moving to the next bucket.
[ns_server:debug,2016-10-19T09:55:14.572-07:00,ns_1@127.0.0.1:<0.418.0>:mb_master:check_master_takeover_needed:141]Sending master node question to the following nodes: []
[ns_server:debug,2016-10-19T09:55:14.573-07:00,ns_1@127.0.0.1:<0.418.0>:mb_master:check_master_takeover_needed:143]Got replies: []
[ns_server:debug,2016-10-19T09:55:14.573-07:00,ns_1@127.0.0.1:<0.418.0>:mb_master:check_master_takeover_needed:149]Was unable to discover master, not going to force mastership takeover
[user:info,2016-10-19T09:55:14.573-07:00,ns_1@127.0.0.1:mb_master<0.426.0>:mb_master:init:86]I'm the only node, so I'm the master.
[ns_server:debug,2016-10-19T09:55:14.583-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.402.0>:compaction_new_daemon:process_compactors_exit:1353]Finished compaction iteration.
[ns_server:debug,2016-10-19T09:55:14.583-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.402.0>:compaction_scheduler:schedule_next:60]Finished compaction for compact_master too soon. Next run will be in 3600s
[ns_server:info,2016-10-19T09:55:14.583-07:00,ns_1@127.0.0.1:<0.424.0>:compaction_new_daemon:spawn_scheduled_views_compactor:497]Start compaction of indexes for bucket default with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2016-10-19T09:55:14.584-07:00,ns_1@127.0.0.1:<0.424.0>:ns_couchdb_api:rpc_couchdb_node:165]RPC to couchdb node failed for {foreach_doc,<<"default">>,
#Fun,infinity} with {badrpc,
{'EXIT',
{noproc,
{gen_server,
call,
['capi_ddoc_manager-default',
{foreach_doc,
#Fun},
infinity]}}}}
Stack: [{ns_couchdb_api,rpc_couchdb_node,4,
[{file,"src/ns_couchdb_api.erl"},{line,164}]},
{capi_utils,foreach_live_ddoc_id,2,
[{file,"src/capi_utils.erl"},{line,151}]},
{capi_utils,fetch_ddoc_ids,1,[{file,"src/capi_utils.erl"},{line,144}]},
{compaction_new_daemon,'-spawn_scheduled_views_compactor/2-fun-0-',3,
[{file,"src/compaction_new_daemon.erl"},
{line,500}]},
{proc_lib,init_p,3,[{file,"proc_lib.erl"},{line,224}]}]
[ns_server:error,2016-10-19T09:55:14.584-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.402.0>:compaction_new_daemon:log_compactors_exit:1327]Compactor <0.424.0> exited unexpectedly: {error,
{badrpc,
{'EXIT',
{noproc,
{gen_server,call,
['capi_ddoc_manager-default',
{foreach_doc,
#Fun},
infinity]}}}}}. Moving to the next bucket.
[ns_server:debug,2016-10-19T09:55:14.584-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.402.0>:compaction_new_daemon:process_compactors_exit:1353]Finished compaction iteration.
[ns_server:debug,2016-10-19T09:55:14.584-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.402.0>:compaction_scheduler:schedule_next:60]Finished compaction for compact_views too soon. Next run will be in 30s
[error_logger:error,2016-10-19T09:55:14.587-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================CRASH REPORT=========================
crasher:
initial call: compaction_new_daemon:-spawn_scheduled_views_compactor/2-fun-0-/0
pid: <0.413.0>
registered_name: []
exception exit: {error,
{badrpc,
{'EXIT',
{noproc,
{gen_server,call,
['capi_ddoc_manager-locked',
{foreach_doc,#Fun},
infinity]}}}}}
in function ns_couchdb_api:rpc_couchdb_node/4 (src/ns_couchdb_api.erl, line 166)
in call from capi_utils:foreach_live_ddoc_id/2 (src/capi_utils.erl, line 151)
in call from capi_utils:fetch_ddoc_ids/1 (src/capi_utils.erl, line 144)
in call from compaction_new_daemon:'-spawn_scheduled_views_compactor/2-fun-0-'/3 (src/compaction_new_daemon.erl, line 500)
ancestors: [compaction_new_daemon,ns_server_sup,ns_server_nodes_sup,
<0.155.0>,ns_server_cluster_sup,<0.88.0>]
messages: []
links: [<0.402.0>]
dictionary: []
trap_exit: false
status: running
heap_size: 4185
stack_size: 27
reductions: 6783
neighbours:
[error_logger:info,2016-10-19T09:55:14.587-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.428.0>},
{name,{stats_collector,"test"}},
{mfargs,{stats_collector,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.587-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.431.0>},
{name,{stats_archiver,"test"}},
{mfargs,{stats_archiver,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.587-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.433.0>},
{name,{stats_reader,"test"}},
{mfargs,{stats_reader,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:error,2016-10-19T09:55:14.588-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================CRASH REPORT=========================
crasher:
initial call: compaction_new_daemon:-spawn_scheduled_views_compactor/2-fun-0-/0
pid: <0.424.0>
registered_name: []
exception exit: {error,
{badrpc,
{'EXIT',
{noproc,
{gen_server,call,
['capi_ddoc_manager-default',
{foreach_doc,#Fun},
infinity]}}}}}
in function ns_couchdb_api:rpc_couchdb_node/4 (src/ns_couchdb_api.erl, line 166)
in call from capi_utils:foreach_live_ddoc_id/2 (src/capi_utils.erl, line 151)
in call from capi_utils:fetch_ddoc_ids/1 (src/capi_utils.erl, line 144)
in call from compaction_new_daemon:'-spawn_scheduled_views_compactor/2-fun-0-'/3 (src/compaction_new_daemon.erl, line 500)
ancestors: [compaction_new_daemon,ns_server_sup,ns_server_nodes_sup,
<0.155.0>,ns_server_cluster_sup,<0.88.0>]
messages: []
links: [<0.402.0>]
dictionary: []
trap_exit: false
status: running
heap_size: 4185
stack_size: 27
reductions: 6619
neighbours:
[error_logger:info,2016-10-19T09:55:14.690-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.434.0>},
{name,{goxdcr_stats_collector,"test"}},
{mfargs,{goxdcr_stats_collector,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.690-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.436.0>},
{name,{goxdcr_stats_archiver,"test"}},
{mfargs,{stats_archiver,start_link,["@xdcr-test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.690-07:00,ns_1@127.0.0.1:ns_bucket_worker<0.327.0>:ns_bucket_sup:update_children:110]Starting new child: {{docs_sup,"locked"},
{docs_sup,start_link,["locked"]},
permanent,infinity,supervisor,
[docs_sup]}
[error_logger:info,2016-10-19T09:55:14.691-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.438.0>},
{name,{goxdcr_stats_reader,"test"}},
{mfargs,{stats_reader,start_link,["@xdcr-test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.691-07:00,ns_1@127.0.0.1:capi_doc_replicator-locked<0.441.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[error_logger:info,2016-10-19T09:55:14.691-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.439.0>},
{name,{failover_safeness_level,"test"}},
{mfargs,{failover_safeness_level,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.691-07:00,ns_1@127.0.0.1:capi_ddoc_replication_srv-locked<0.442.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[error_logger:info,2016-10-19T09:55:14.691-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.356.0>},
{name,{single_bucket_kv_sup,"test"}},
{mfargs,{single_bucket_kv_sup,start_link,["test"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:14.691-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.440.0>,docs_sup}
started: [{pid,<0.441.0>},
{name,doc_replicator},
{mfargs,{doc_replicator,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.691-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.440.0>,docs_sup}
started: [{pid,<0.442.0>},
{name,doc_replication_srv},
{mfargs,{doc_replication_srv,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.692-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'capi_ddoc_manager_sup-locked'}
started: [{pid,<11625.299.0>},
{name,capi_ddoc_manager_events},
{mfargs,
{capi_ddoc_manager,start_link_event_manager,
["locked"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.692-07:00,ns_1@127.0.0.1:capi_doc_replicator-locked<0.441.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.300.0>
[ns_server:debug,2016-10-19T09:55:14.692-07:00,ns_1@127.0.0.1:capi_ddoc_replication_srv-locked<0.442.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.300.0>
[error_logger:info,2016-10-19T09:55:14.692-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'capi_ddoc_manager_sup-locked'}
started: [{pid,<11625.300.0>},
{name,capi_ddoc_manager},
{mfargs,
{capi_ddoc_manager,start_link,
["locked",<0.441.0>,<0.442.0>]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.692-07:00,ns_1@127.0.0.1:ns_bucket_worker<0.327.0>:ns_bucket_sup:update_children:110]Starting new child: {{single_bucket_kv_sup,"locked"},
{single_bucket_kv_sup,start_link,["locked"]},
permanent,infinity,supervisor,
[single_bucket_kv_sup]}
[ns_server:debug,2016-10-19T09:55:14.692-07:00,ns_1@127.0.0.1:capi_doc_replicator-locked<0.441.0>:doc_replicator:loop:64]doing replicate_newnodes_docs
[error_logger:info,2016-10-19T09:55:14.692-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.440.0>,docs_sup}
started: [{pid,<11625.298.0>},
{name,capi_ddoc_manager_sup},
{mfargs,
{capi_ddoc_manager_sup,start_link_remote,
['couchdb_ns_1@127.0.0.1',"locked"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:14.692-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.440.0>},
{name,{docs_sup,"locked"}},
{mfargs,{docs_sup,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:14.693-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.445.0>,docs_kv_sup}
started: [{pid,<11625.302.0>},
{name,capi_set_view_manager},
{mfargs,
{capi_set_view_manager,start_link_remote,
['couchdb_ns_1@127.0.0.1',"locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.693-07:00,ns_1@127.0.0.1:ns_memcached-locked<0.447.0>:ns_memcached:init:167]Starting ns_memcached
[error_logger:info,2016-10-19T09:55:14.693-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.445.0>,docs_kv_sup}
started: [{pid,<11625.305.0>},
{name,couch_stats_reader},
{mfargs,
{couch_stats_reader,start_link_remote,
['couchdb_ns_1@127.0.0.1',"locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.693-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.445.0>},
{name,{docs_kv_sup,"locked"}},
{mfargs,{docs_kv_sup,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:14.693-07:00,ns_1@127.0.0.1:<0.448.0>:ns_memcached:run_connect_phase:190]Started 'connecting' phase of ns_memcached-locked. Parent is <0.447.0>
[error_logger:info,2016-10-19T09:55:14.694-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.446.0>,ns_memcached_sup}
started: [{pid,<0.447.0>},
{name,{ns_memcached,"locked"}},
{mfargs,{ns_memcached,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,86400000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.694-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.446.0>,ns_memcached_sup}
started: [{pid,<0.449.0>},
{name,{terse_bucket_info_uploader,"locked"}},
{mfargs,
{terse_bucket_info_uploader,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.694-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.446.0>},
{name,{ns_memcached_sup,"locked"}},
{mfargs,{ns_memcached_sup,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:14.694-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.451.0>},
{name,{ns_vbm_sup,"locked"}},
{mfargs,{ns_vbm_sup,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:info,2016-10-19T09:55:14.694-07:00,ns_1@127.0.0.1:janitor_agent-locked<0.458.0>:janitor_agent:read_flush_counter:1047]Loading flushseq failed: {error,enoent}. Assuming it's equal to global config.
[error_logger:info,2016-10-19T09:55:14.694-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.452.0>},
{name,{dcp_sup,"locked"}},
{mfargs,{dcp_sup,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:info,2016-10-19T09:55:14.695-07:00,ns_1@127.0.0.1:janitor_agent-locked<0.458.0>:janitor_agent:read_flush_counter_from_config:1054]Initialized flushseq 0 from bucket config
[error_logger:info,2016-10-19T09:55:14.695-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.453.0>},
{name,{dcp_replication_manager,"locked"}},
{mfargs,
{dcp_replication_manager,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.695-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.454.0>},
{name,{replication_manager,"locked"}},
{mfargs,{replication_manager,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.695-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.455.0>},
{name,{dcp_notifier,"locked"}},
{mfargs,{dcp_notifier,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.695-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'janitor_agent_sup-locked'}
started: [{pid,<0.457.0>},
{name,rebalance_subprocesses_registry},
{mfargs,
{ns_process_registry,start_link,
['rebalance_subprocesses_registry-locked',
[{terminate_command,kill}]]}},
{restart_type,permanent},
{shutdown,86400000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.695-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'janitor_agent_sup-locked'}
started: [{pid,<0.458.0>},
{name,janitor_agent},
{mfargs,{janitor_agent,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.696-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.456.0>},
{name,{janitor_agent_sup,"locked"}},
{mfargs,{janitor_agent_sup,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,10000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.696-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.459.0>},
{name,{stats_collector,"locked"}},
{mfargs,{stats_collector,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.696-07:00,ns_1@127.0.0.1:ns_bucket_worker<0.327.0>:ns_bucket_sup:update_children:110]Starting new child: {{docs_sup,"default"},
{docs_sup,start_link,["default"]},
permanent,infinity,supervisor,
[docs_sup]}
[error_logger:info,2016-10-19T09:55:14.696-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.462.0>},
{name,{stats_archiver,"locked"}},
{mfargs,{stats_archiver,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.696-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.464.0>},
{name,{stats_reader,"locked"}},
{mfargs,{stats_reader,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.696-07:00,ns_1@127.0.0.1:capi_doc_replicator-default<0.472.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[ns_server:debug,2016-10-19T09:55:14.696-07:00,ns_1@127.0.0.1:capi_ddoc_replication_srv-default<0.473.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[error_logger:info,2016-10-19T09:55:14.696-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.465.0>},
{name,{goxdcr_stats_collector,"locked"}},
{mfargs,{goxdcr_stats_collector,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.696-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.467.0>},
{name,{goxdcr_stats_archiver,"locked"}},
{mfargs,{stats_archiver,start_link,["@xdcr-locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.697-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.469.0>},
{name,{goxdcr_stats_reader,"locked"}},
{mfargs,{stats_reader,start_link,["@xdcr-locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.697-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.470.0>},
{name,{failover_safeness_level,"locked"}},
{mfargs,
{failover_safeness_level,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.697-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.444.0>},
{name,{single_bucket_kv_sup,"locked"}},
{mfargs,{single_bucket_kv_sup,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:14.697-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.471.0>,docs_sup}
started: [{pid,<0.472.0>},
{name,doc_replicator},
{mfargs,{doc_replicator,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.697-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.471.0>,docs_sup}
started: [{pid,<0.473.0>},
{name,doc_replication_srv},
{mfargs,{doc_replication_srv,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.698-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'capi_ddoc_manager_sup-default'}
started: [{pid,<11625.307.0>},
{name,capi_ddoc_manager_events},
{mfargs,
{capi_ddoc_manager,start_link_event_manager,
["default"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.698-07:00,ns_1@127.0.0.1:capi_doc_replicator-default<0.472.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.308.0>
[ns_server:debug,2016-10-19T09:55:14.698-07:00,ns_1@127.0.0.1:capi_ddoc_replication_srv-default<0.473.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.308.0>
[ns_server:debug,2016-10-19T09:55:14.698-07:00,ns_1@127.0.0.1:ns_bucket_worker<0.327.0>:ns_bucket_sup:update_children:110]Starting new child: {{single_bucket_kv_sup,"default"},
{single_bucket_kv_sup,start_link,["default"]},
permanent,infinity,supervisor,
[single_bucket_kv_sup]}
[error_logger:info,2016-10-19T09:55:14.698-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.471.0>,docs_sup}
started: [{pid,<11625.306.0>},
{name,capi_ddoc_manager_sup},
{mfargs,
{capi_ddoc_manager_sup,start_link_remote,
['couchdb_ns_1@127.0.0.1',"default"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:14.699-07:00,ns_1@127.0.0.1:capi_doc_replicator-default<0.472.0>:doc_replicator:loop:64]doing replicate_newnodes_docs
[error_logger:info,2016-10-19T09:55:14.699-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.471.0>},
{name,{docs_sup,"default"}},
{mfargs,{docs_sup,start_link,["default"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:14.699-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'capi_ddoc_manager_sup-default'}
started: [{pid,<11625.308.0>},
{name,capi_ddoc_manager},
{mfargs,
{capi_ddoc_manager,start_link,
["default",<0.472.0>,<0.473.0>]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.699-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.476.0>,docs_kv_sup}
started: [{pid,<11625.310.0>},
{name,capi_set_view_manager},
{mfargs,
{capi_set_view_manager,start_link_remote,
['couchdb_ns_1@127.0.0.1',"default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.700-07:00,ns_1@127.0.0.1:ns_memcached-default<0.478.0>:ns_memcached:init:167]Starting ns_memcached
[error_logger:info,2016-10-19T09:55:14.699-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.476.0>,docs_kv_sup}
started: [{pid,<11625.313.0>},
{name,couch_stats_reader},
{mfargs,
{couch_stats_reader,start_link_remote,
['couchdb_ns_1@127.0.0.1',"default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.700-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.476.0>},
{name,{docs_kv_sup,"default"}},
{mfargs,{docs_kv_sup,start_link,["default"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:14.700-07:00,ns_1@127.0.0.1:<0.479.0>:ns_memcached:run_connect_phase:190]Started 'connecting' phase of ns_memcached-default. Parent is <0.478.0>
[error_logger:info,2016-10-19T09:55:14.700-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.477.0>,ns_memcached_sup}
started: [{pid,<0.478.0>},
{name,{ns_memcached,"default"}},
{mfargs,{ns_memcached,start_link,["default"]}},
{restart_type,permanent},
{shutdown,86400000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.700-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.477.0>,ns_memcached_sup}
started: [{pid,<0.480.0>},
{name,{terse_bucket_info_uploader,"default"}},
{mfargs,
{terse_bucket_info_uploader,start_link,
["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.701-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.477.0>},
{name,{ns_memcached_sup,"default"}},
{mfargs,{ns_memcached_sup,start_link,["default"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:info,2016-10-19T09:55:14.701-07:00,ns_1@127.0.0.1:janitor_agent-default<0.489.0>:janitor_agent:read_flush_counter:1047]Loading flushseq failed: {error,enoent}. Assuming it's equal to global config.
[ns_server:info,2016-10-19T09:55:14.701-07:00,ns_1@127.0.0.1:janitor_agent-default<0.489.0>:janitor_agent:read_flush_counter_from_config:1054]Initialized flushseq 0 from bucket config
[error_logger:info,2016-10-19T09:55:14.701-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.482.0>},
{name,{ns_vbm_sup,"default"}},
{mfargs,{ns_vbm_sup,start_link,["default"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:14.701-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.483.0>},
{name,{dcp_sup,"default"}},
{mfargs,{dcp_sup,start_link,["default"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:14.701-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.484.0>},
{name,{dcp_replication_manager,"default"}},
{mfargs,
{dcp_replication_manager,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.702-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.485.0>},
{name,{replication_manager,"default"}},
{mfargs,{replication_manager,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.702-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.486.0>},
{name,{dcp_notifier,"default"}},
{mfargs,{dcp_notifier,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.702-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'janitor_agent_sup-default'}
started: [{pid,<0.488.0>},
{name,rebalance_subprocesses_registry},
{mfargs,
{ns_process_registry,start_link,
['rebalance_subprocesses_registry-default',
[{terminate_command,kill}]]}},
{restart_type,permanent},
{shutdown,86400000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.702-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'janitor_agent_sup-default'}
started: [{pid,<0.489.0>},
{name,janitor_agent},
{mfargs,{janitor_agent,start_link,["default"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.702-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.487.0>},
{name,{janitor_agent_sup,"default"}},
{mfargs,{janitor_agent_sup,start_link,["default"]}},
{restart_type,permanent},
{shutdown,10000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.703-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.490.0>},
{name,{stats_collector,"default"}},
{mfargs,{stats_collector,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.703-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.493.0>},
{name,{stats_archiver,"default"}},
{mfargs,{stats_archiver,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.703-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.495.0>},
{name,{stats_reader,"default"}},
{mfargs,{stats_reader,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.703-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.496.0>},
{name,{goxdcr_stats_collector,"default"}},
{mfargs,
{goxdcr_stats_collector,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.703-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.498.0>},
{name,{goxdcr_stats_archiver,"default"}},
{mfargs,{stats_archiver,start_link,["@xdcr-default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.703-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.500.0>},
{name,{goxdcr_stats_reader,"default"}},
{mfargs,{stats_reader,start_link,["@xdcr-default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.703-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.501.0>},
{name,{failover_safeness_level,"default"}},
{mfargs,
{failover_safeness_level,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.704-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.475.0>},
{name,{single_bucket_kv_sup,"default"}},
{mfargs,{single_bucket_kv_sup,start_link,["default"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:14.825-07:00,ns_1@127.0.0.1:mb_master_sup<0.502.0>:misc:start_singleton:1094]start_singleton(gen_server, ns_tick, [], []): started as <0.503.0> on 'ns_1@127.0.0.1'
[error_logger:info,2016-10-19T09:55:14.825-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,mb_master_sup}
started: [{pid,<0.503.0>},
{name,ns_tick},
{mfargs,{ns_tick,start_link,[]}},
{restart_type,permanent},
{shutdown,10},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.828-07:00,ns_1@127.0.0.1:ns_orchestrator_sup<0.504.0>:misc:start_singleton:1094]start_singleton(gen_fsm, ns_orchestrator, [], []): started as <0.505.0> on 'ns_1@127.0.0.1'
[error_logger:info,2016-10-19T09:55:14.829-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_orchestrator_sup}
started: [{pid,<0.505.0>},
{name,ns_orchestrator},
{mfargs,{ns_orchestrator,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.831-07:00,ns_1@127.0.0.1:<0.507.0>:auto_failover:init:147]init auto_failover.
[ns_server:debug,2016-10-19T09:55:14.831-07:00,ns_1@127.0.0.1:ns_orchestrator_sup<0.504.0>:misc:start_singleton:1094]start_singleton(gen_server, auto_failover, [], []): started as <0.507.0> on 'ns_1@127.0.0.1'
[ns_server:debug,2016-10-19T09:55:14.831-07:00,ns_1@127.0.0.1:<0.418.0>:restartable:start_child:98]Started child process <0.426.0>
MFA: {mb_master,start_link,[]}
[error_logger:info,2016-10-19T09:55:14.831-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_orchestrator_sup}
started: [{pid,<0.507.0>},
{name,auto_failover},
{mfargs,{auto_failover,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.831-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,mb_master_sup}
started: [{pid,<0.504.0>},
{name,ns_orchestrator_sup},
{mfargs,{ns_orchestrator_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:14.832-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.418.0>},
{name,mb_master},
{mfargs,
{restartable,start_link,
[{mb_master,start_link,[]},infinity]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:14.832-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.508.0>},
{name,master_activity_events_ingress},
{mfargs,
{gen_event,start_link,
[{local,master_activity_events_ingress}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.832-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.509.0>},
{name,master_activity_events_timestamper},
{mfargs,
{master_activity_events,start_link_timestamper,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:14.834-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.511.0>},
{name,master_activity_events_pids_watcher},
{mfargs,
{master_activity_events_pids_watcher,start_link,
[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.839-07:00,ns_1@127.0.0.1:<0.516.0>:janitor_agent:query_vbucket_states_loop_next_step:118]Waiting for "test" on 'ns_1@127.0.0.1'
[ns_server:debug,2016-10-19T09:55:14.843-07:00,ns_1@127.0.0.1:ns_server_nodes_sup<0.156.0>:one_shot_barrier:notify:27]Notifying on barrier menelaus_barrier
[error_logger:info,2016-10-19T09:55:14.843-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.512.0>},
{name,master_activity_events_keeper},
{mfargs,{master_activity_events_keeper,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:14.844-07:00,ns_1@127.0.0.1:menelaus_barrier<0.158.0>:one_shot_barrier:barrier_body:62]Barrier menelaus_barrier got notification from <0.156.0>
[ns_server:debug,2016-10-19T09:55:14.844-07:00,ns_1@127.0.0.1:ns_server_nodes_sup<0.156.0>:one_shot_barrier:notify:32]Successfuly notified on barrier menelaus_barrier
[ns_server:debug,2016-10-19T09:55:14.844-07:00,ns_1@127.0.0.1:<0.155.0>:restartable:start_child:98]Started child process <0.156.0>
MFA: {ns_server_nodes_sup,start_link,[]}
[error_logger:info,2016-10-19T09:55:14.844-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_nodes_sup}
started: [{pid,<0.197.0>},
{name,ns_server_sup},
{mfargs,{ns_server_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:14.844-07:00,ns_1@127.0.0.1:<0.2.0>:child_erlang:child_loop:115]67655: Entered child_loop
[error_logger:info,2016-10-19T09:55:14.844-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.155.0>},
{name,ns_server_nodes_sup},
{mfargs,
{restartable,start_link,
[{ns_server_nodes_sup,start_link,[]},
infinity]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:14.844-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
application: ns_server
started_at: 'ns_1@127.0.0.1'
[ns_server:debug,2016-10-19T09:55:14.857-07:00,ns_1@127.0.0.1:json_rpc_connection-goxdcr-cbauth<0.517.0>:json_rpc_connection:init:74]Observed revrpc connection: label "goxdcr-cbauth", handling process <0.517.0>
[ns_server:debug,2016-10-19T09:55:14.857-07:00,ns_1@127.0.0.1:json_rpc_connection-saslauthd-saslauthd-port<0.518.0>:json_rpc_connection:init:74]Observed revrpc connection: label "saslauthd-saslauthd-port", handling process <0.518.0>
[ns_server:debug,2016-10-19T09:55:14.857-07:00,ns_1@127.0.0.1:menelaus_cbauth<0.285.0>:menelaus_cbauth:handle_cast:87]Observed json rpc process {"goxdcr-cbauth",<0.517.0>} started
[ns_server:debug,2016-10-19T09:55:14.857-07:00,ns_1@127.0.0.1:json_rpc_connection-cbq-engine-cbauth<0.520.0>:json_rpc_connection:init:74]Observed revrpc connection: label "cbq-engine-cbauth", handling process <0.520.0>
[ns_server:info,2016-10-19T09:55:14.857-07:00,ns_1@127.0.0.1:ns_memcached-locked<0.447.0>:ns_memcached:ensure_bucket:1324]Created bucket "locked" with config string "ht_size=3079;ht_locks=47;tap_noop_interval=20;max_size=104857600;tap_keepalive=300;dbname=/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/data/locked;backend=couchdb;couch_bucket=locked;max_vbuckets=64;alog_path=/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/data/locked/access.log;data_traffic_enabled=false;max_num_workers=3;uuid=8515ae93e826e7c4389f3fd25fbb263e;item_eviction_policy=value_only;conflict_resolution_type=seqno;failpartialwarmup=false;"
[error_logger:error,2016-10-19T09:55:14.857-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]Supervisor received unexpected message: {ack,<0.517.0>,{ok,<0.517.0>}}
[ns_server:debug,2016-10-19T09:55:14.857-07:00,ns_1@127.0.0.1:json_rpc_connection-index-cbauth<0.524.0>:json_rpc_connection:init:74]Observed revrpc connection: label "index-cbauth", handling process <0.524.0>
[error_logger:error,2016-10-19T09:55:14.858-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]Supervisor received unexpected message: {ack,<0.518.0>,{ok,<0.518.0>}}
[ns_server:info,2016-10-19T09:55:14.858-07:00,ns_1@127.0.0.1:ns_memcached-locked<0.447.0>:ns_memcached:handle_cast:689]Main ns_memcached connection established: {ok,#Port<0.6703>}
[error_logger:error,2016-10-19T09:55:14.858-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]Supervisor received unexpected message: {ack,<0.520.0>,{ok,<0.520.0>}}
[error_logger:error,2016-10-19T09:55:14.858-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]Supervisor received unexpected message: {ack,<0.524.0>,{ok,<0.524.0>}}
[ns_server:debug,2016-10-19T09:55:14.858-07:00,ns_1@127.0.0.1:json_rpc_connection-projector-cbauth<0.522.0>:json_rpc_connection:init:74]Observed revrpc connection: label "projector-cbauth", handling process <0.522.0>
[error_logger:error,2016-10-19T09:55:14.858-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]Supervisor received unexpected message: {ack,<0.522.0>,{ok,<0.522.0>}}
[ns_server:info,2016-10-19T09:55:14.861-07:00,ns_1@127.0.0.1:ns_memcached-default<0.478.0>:ns_memcached:ensure_bucket:1324]Created bucket "default" with config string "ht_size=3079;ht_locks=47;tap_noop_interval=20;max_size=104857600;tap_keepalive=300;dbname=/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/data/default;backend=couchdb;couch_bucket=default;max_vbuckets=64;alog_path=/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/data/default/access.log;data_traffic_enabled=false;max_num_workers=3;uuid=b04d5897bd3c5329a82156f1b77c395d;item_eviction_policy=value_only;conflict_resolution_type=seqno;failpartialwarmup=false;"
[ns_server:info,2016-10-19T09:55:14.861-07:00,ns_1@127.0.0.1:ns_memcached-default<0.478.0>:ns_memcached:handle_cast:689]Main ns_memcached connection established: {ok,#Port<0.6719>}
[ns_server:debug,2016-10-19T09:55:14.861-07:00,ns_1@127.0.0.1:json_rpc_connection-goxdcr-cbauth<0.517.0>:json_rpc_connection:handle_call:157]sending jsonrpc call:{[{jsonrpc,<<"2.0">>},
{id,0},
{method,<<"AuthCacheSvc.UpdateDB">>},
{params,
[{[{specialUser,<<"@goxdcr-cbauth">>},
{nodes,
[{[{host,<<"127.0.0.1">>},
{user,<<"_admin">>},
{password,"*****"},
{ports,
[8091,18091,9100,9101,9102,9103,9104,9105,
18092,8092,11207,9999,11210,11211,8093,
18093]},
{local,true}]}]},
{buckets,
[{[{name,<<"test">>},{password,"*****"}]},
{[{name,<<"locked">>},{password,"*****"}]},
{[{name,<<"default">>},{password,"*****"}]}]},
{authCheckURL,<<"http://127.0.0.1:8091/_cbauth">>},
{permissionCheckURL,
<<"http://127.0.0.1:8091/_cbauth/checkPermission">>},
{ldapEnabled,false},
{permissionsVersion,75615},
{admin,
{[{user,<<"Administrator">>},
{salt,<<"QB3B+r6o6uk8Xpv/FO8fTQ==">>},
{mac,
<<"6S6aASAfFc8/OJxtT/k7eSxo1HU=">>}]}}]}]}]}
[ns_server:debug,2016-10-19T09:55:14.862-07:00,ns_1@127.0.0.1:json_rpc_connection-goxdcr-cbauth<0.517.0>:json_rpc_connection:handle_info:93]got response: [{<<"id">>,0},{<<"result">>,true},{<<"error">>,null}]
[ns_server:debug,2016-10-19T09:55:14.862-07:00,ns_1@127.0.0.1:menelaus_cbauth<0.285.0>:menelaus_cbauth:handle_cast:87]Observed json rpc process {"cbq-engine-cbauth",<0.520.0>} started
[ns_server:debug,2016-10-19T09:55:14.862-07:00,ns_1@127.0.0.1:json_rpc_connection-cbq-engine-cbauth<0.520.0>:json_rpc_connection:handle_call:157]sending jsonrpc call:{[{jsonrpc,<<"2.0">>},
{id,0},
{method,<<"AuthCacheSvc.UpdateDB">>},
{params,
[{[{specialUser,<<"@cbq-engine-cbauth">>},
{nodes,
[{[{host,<<"127.0.0.1">>},
{user,<<"_admin">>},
{password,"*****"},
{ports,
[8091,18091,9100,9101,9102,9103,9104,9105,
18092,8092,11207,9999,11210,11211,8093,
18093]},
{local,true}]}]},
{buckets,
[{[{name,<<"test">>},{password,"*****"}]},
{[{name,<<"locked">>},{password,"*****"}]},
{[{name,<<"default">>},{password,"*****"}]}]},
{authCheckURL,<<"http://127.0.0.1:8091/_cbauth">>},
{permissionCheckURL,
<<"http://127.0.0.1:8091/_cbauth/checkPermission">>},
{ldapEnabled,false},
{permissionsVersion,75615},
{admin,
{[{user,<<"Administrator">>},
{salt,<<"QB3B+r6o6uk8Xpv/FO8fTQ==">>},
{mac,
<<"6S6aASAfFc8/OJxtT/k7eSxo1HU=">>}]}}]}]}]}
[ns_server:debug,2016-10-19T09:55:14.863-07:00,ns_1@127.0.0.1:json_rpc_connection-cbq-engine-cbauth<0.520.0>:json_rpc_connection:handle_info:93]got response: [{<<"id">>,0},{<<"result">>,true},{<<"error">>,null}]
[ns_server:debug,2016-10-19T09:55:14.863-07:00,ns_1@127.0.0.1:menelaus_cbauth<0.285.0>:menelaus_cbauth:handle_cast:87]Observed json rpc process {"index-cbauth",<0.524.0>} started
[ns_server:debug,2016-10-19T09:55:14.864-07:00,ns_1@127.0.0.1:json_rpc_connection-index-cbauth<0.524.0>:json_rpc_connection:handle_call:157]sending jsonrpc call:{[{jsonrpc,<<"2.0">>},
{id,0},
{method,<<"AuthCacheSvc.UpdateDB">>},
{params,
[{[{specialUser,<<"@index-cbauth">>},
{nodes,
[{[{host,<<"127.0.0.1">>},
{user,<<"_admin">>},
{password,"*****"},
{ports,
[8091,18091,9100,9101,9102,9103,9104,9105,
18092,8092,11207,9999,11210,11211,8093,
18093]},
{local,true}]}]},
{buckets,
[{[{name,<<"test">>},{password,"*****"}]},
{[{name,<<"locked">>},{password,"*****"}]},
{[{name,<<"default">>},{password,"*****"}]}]},
{authCheckURL,<<"http://127.0.0.1:8091/_cbauth">>},
{permissionCheckURL,
<<"http://127.0.0.1:8091/_cbauth/checkPermission">>},
{ldapEnabled,false},
{permissionsVersion,75615},
{admin,
{[{user,<<"Administrator">>},
{salt,<<"QB3B+r6o6uk8Xpv/FO8fTQ==">>},
{mac,
<<"6S6aASAfFc8/OJxtT/k7eSxo1HU=">>}]}}]}]}]}
[ns_server:debug,2016-10-19T09:55:14.864-07:00,ns_1@127.0.0.1:json_rpc_connection-index-cbauth<0.524.0>:json_rpc_connection:handle_info:93]got response: [{<<"id">>,0},{<<"result">>,true},{<<"error">>,null}]
[ns_server:debug,2016-10-19T09:55:14.864-07:00,ns_1@127.0.0.1:menelaus_cbauth<0.285.0>:menelaus_cbauth:handle_cast:87]Observed json rpc process {"projector-cbauth",<0.522.0>} started
[ns_server:debug,2016-10-19T09:55:14.865-07:00,ns_1@127.0.0.1:json_rpc_connection-projector-cbauth<0.522.0>:json_rpc_connection:handle_call:157]sending jsonrpc call:{[{jsonrpc,<<"2.0">>},
{id,0},
{method,<<"AuthCacheSvc.UpdateDB">>},
{params,
[{[{specialUser,<<"@projector-cbauth">>},
{nodes,
[{[{host,<<"127.0.0.1">>},
{user,<<"_admin">>},
{password,"*****"},
{ports,
[8091,18091,9100,9101,9102,9103,9104,9105,
18092,8092,11207,9999,11210,11211,8093,
18093]},
{local,true}]}]},
{buckets,
[{[{name,<<"test">>},{password,"*****"}]},
{[{name,<<"locked">>},{password,"*****"}]},
{[{name,<<"default">>},{password,"*****"}]}]},
{authCheckURL,<<"http://127.0.0.1:8091/_cbauth">>},
{permissionCheckURL,
<<"http://127.0.0.1:8091/_cbauth/checkPermission">>},
{ldapEnabled,false},
{permissionsVersion,75615},
{admin,
{[{user,<<"Administrator">>},
{salt,<<"QB3B+r6o6uk8Xpv/FO8fTQ==">>},
{mac,
<<"6S6aASAfFc8/OJxtT/k7eSxo1HU=">>}]}}]}]}]}
[ns_server:debug,2016-10-19T09:55:14.866-07:00,ns_1@127.0.0.1:json_rpc_connection-projector-cbauth<0.522.0>:json_rpc_connection:handle_info:93]got response: [{<<"id">>,0},{<<"result">>,true},{<<"error">>,null}]
[user:info,2016-10-19T09:55:15.361-07:00,ns_1@127.0.0.1:ns_memcached-locked<0.447.0>:ns_memcached:handle_cast:718]Bucket "locked" loaded on node 'ns_1@127.0.0.1' in 0 seconds.
[user:info,2016-10-19T09:55:15.362-07:00,ns_1@127.0.0.1:ns_memcached-default<0.478.0>:ns_memcached:handle_cast:718]Bucket "default" loaded on node 'ns_1@127.0.0.1' in 0 seconds.
[ns_server:info,2016-10-19T09:55:15.548-07:00,ns_1@127.0.0.1:ns_memcached-test<0.381.0>:ns_memcached:ensure_bucket:1324]Created bucket "test" with config string "ht_size=3079;ht_locks=47;tap_noop_interval=20;max_size=524288000;tap_keepalive=300;dbname=/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/data/test;backend=couchdb;couch_bucket=test;max_vbuckets=64;alog_path=/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/data/test/access.log;data_traffic_enabled=false;max_num_workers=3;uuid=7f7d4a28ca84a805edf9c899521eb18c;item_eviction_policy=value_only;conflict_resolution_type=seqno;failpartialwarmup=false;"
[ns_server:info,2016-10-19T09:55:15.548-07:00,ns_1@127.0.0.1:ns_memcached-test<0.381.0>:ns_memcached:handle_cast:689]Main ns_memcached connection established: {ok,#Port<0.7030>}
[ns_server:debug,2016-10-19T09:55:15.550-07:00,ns_1@127.0.0.1:<0.410.0>:compaction_new_daemon:bucket_needs_compaction:972]`test` data size is 0, disk size is 0
[ns_server:info,2016-10-19T09:55:15.550-07:00,ns_1@127.0.0.1:<0.581.0>:compaction_new_daemon:spawn_scheduled_kv_compactor:471]Start compaction of vbuckets for bucket locked with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2016-10-19T09:55:15.550-07:00,ns_1@127.0.0.1:<0.583.0>:compaction_new_daemon:bucket_needs_compaction:972]`locked` data size is 11443, disk size is 273325
[ns_server:info,2016-10-19T09:55:15.551-07:00,ns_1@127.0.0.1:<0.584.0>:compaction_new_daemon:spawn_scheduled_kv_compactor:471]Start compaction of vbuckets for bucket default with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2016-10-19T09:55:15.551-07:00,ns_1@127.0.0.1:<0.586.0>:compaction_new_daemon:bucket_needs_compaction:972]`default` data size is 11278, disk size is 265088
[ns_server:debug,2016-10-19T09:55:15.551-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.402.0>:compaction_new_daemon:process_compactors_exit:1353]Finished compaction iteration.
[ns_server:debug,2016-10-19T09:55:15.551-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.402.0>:compaction_scheduler:schedule_next:60]Finished compaction for compact_kv too soon. Next run will be in 29s
[ns_server:error,2016-10-19T09:55:15.830-07:00,ns_1@127.0.0.1:index_stats_collector-index<0.383.0>:index_rest:get_json:42]Request to (indexer) http://127.0.0.1:9102/stats?async=true failed: {error,
{econnrefused,
[{lhttpc_client,
send_request,
1,
[{file,
"/Users/jenkins/jenkins/workspace/watson-unix/couchdb/src/lhttpc/lhttpc_client.erl"},
{line,
220}]},
{lhttpc_client,
execute,
9,
[{file,
"/Users/jenkins/jenkins/workspace/watson-unix/couchdb/src/lhttpc/lhttpc_client.erl"},
{line,
169}]},
{lhttpc_client,
request,
9,
[{file,
"/Users/jenkins/jenkins/workspace/watson-unix/couchdb/src/lhttpc/lhttpc_client.erl"},
{line,
92}]}]}}
[ns_server:debug,2016-10-19T09:55:15.840-07:00,ns_1@127.0.0.1:<0.516.0>:janitor_agent:query_vbucket_states_loop_next_step:118]Waiting for "test" on 'ns_1@127.0.0.1'
[ns_server:debug,2016-10-19T09:55:16.841-07:00,ns_1@127.0.0.1:<0.516.0>:janitor_agent:query_vbucket_states_loop_next_step:118]Waiting for "test" on 'ns_1@127.0.0.1'
[user:info,2016-10-19T09:55:17.052-07:00,ns_1@127.0.0.1:ns_memcached-test<0.381.0>:ns_memcached:handle_cast:718]Bucket "test" loaded on node 'ns_1@127.0.0.1' in 1 seconds.
[ns_server:debug,2016-10-19T09:55:17.844-07:00,ns_1@127.0.0.1:janitor_agent-test<0.421.0>:dcp_sup:nuke:79]Nuking DCP replicators for bucket "test":
[]
[ns_server:debug,2016-10-19T09:55:17.852-07:00,ns_1@127.0.0.1:replication_manager-test<0.407.0>:replication_manager:handle_call:182]Change replication type from tap to dcp
[ns_server:info,2016-10-19T09:55:17.853-07:00,ns_1@127.0.0.1:ns_memcached-test<0.381.0>:ns_memcached:handle_call:287]Enabling traffic to bucket "test"
[ns_server:info,2016-10-19T09:55:17.853-07:00,ns_1@127.0.0.1:ns_memcached-test<0.381.0>:ns_memcached:handle_call:291]Bucket "test" marked as warmed in 2 seconds
[ns_server:debug,2016-10-19T09:55:17.854-07:00,ns_1@127.0.0.1:janitor_agent-locked<0.458.0>:dcp_sup:nuke:79]Nuking DCP replicators for bucket "locked":
[]
[ns_server:debug,2016-10-19T09:55:17.858-07:00,ns_1@127.0.0.1:replication_manager-locked<0.454.0>:replication_manager:handle_call:182]Change replication type from tap to dcp
[ns_server:info,2016-10-19T09:55:17.858-07:00,ns_1@127.0.0.1:ns_memcached-locked<0.447.0>:ns_memcached:handle_call:287]Enabling traffic to bucket "locked"
[ns_server:info,2016-10-19T09:55:17.858-07:00,ns_1@127.0.0.1:ns_memcached-locked<0.447.0>:ns_memcached:handle_call:291]Bucket "locked" marked as warmed in 3 seconds
[ns_server:debug,2016-10-19T09:55:17.859-07:00,ns_1@127.0.0.1:janitor_agent-default<0.489.0>:dcp_sup:nuke:79]Nuking DCP replicators for bucket "default":
[]
[ns_server:debug,2016-10-19T09:55:17.860-07:00,ns_1@127.0.0.1:replication_manager-default<0.485.0>:replication_manager:handle_call:182]Change replication type from tap to dcp
[ns_server:info,2016-10-19T09:55:17.860-07:00,ns_1@127.0.0.1:ns_memcached-default<0.478.0>:ns_memcached:handle_call:287]Enabling traffic to bucket "default"
[ns_server:info,2016-10-19T09:55:17.860-07:00,ns_1@127.0.0.1:ns_memcached-default<0.478.0>:ns_memcached:handle_call:291]Bucket "default" marked as warmed in 2 seconds
[ns_server:info,2016-10-19T09:55:19.220-07:00,ns_1@127.0.0.1:ns_doctor<0.242.0>:ns_doctor:update_status:314]The following buckets became ready on node 'ns_1@127.0.0.1': ["default",
"locked","test"]
[error_logger:error,2016-10-19T09:55:29.208-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]** Generic server memsup terminating
** Last message in was {'EXIT',<0.883.0>,
{enfile,
[{erlang,open_port,
[{spawn,"/bin/sh -s unix:cmd 2>&1"},
[stream]],
[]},
{os,start_port_srv_handle,1,
[{file,"os.erl"},{line,278}]},
{os,start_port_srv_loop,0,
[{file,"os.erl"},{line,294}]}]}}
** When Server state == [{data,[{"Timeout",60000}]},
{items,{"Memory Usage",
[{"Allocated",8735510528},
{"Total",15364091904}]}},
{items,{"Worst Memory User",
[{"Pid",<0.7.0>},{"Memory",602280}]}}]
** Reason for termination ==
** {enfile,[{erlang,open_port,
[{spawn,"/bin/sh -s unix:cmd 2>&1"},[stream]],
[]},
{os,start_port_srv_handle,1,[{file,"os.erl"},{line,278}]},
{os,start_port_srv_loop,0,[{file,"os.erl"},{line,294}]}]}
[error_logger:error,2016-10-19T09:55:29.209-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================CRASH REPORT=========================
crasher:
initial call: memsup:init/1
pid: <0.80.0>
registered_name: memsup
exception exit: {enfile,[{erlang,open_port,
[{spawn,"/bin/sh -s unix:cmd 2>&1"},
[stream]],
[]},
{os,start_port_srv_handle,1,
[{file,"os.erl"},{line,278}]},
{os,start_port_srv_loop,0,
[{file,"os.erl"},{line,294}]}]}
in function gen_server:terminate/6 (gen_server.erl, line 744)
ancestors: [os_mon_sup,<0.77.0>]
messages: []
links: [<0.78.0>]
dictionary: []
trap_exit: true
status: running
heap_size: 987
stack_size: 27
reductions: 21799
neighbours:
[error_logger:error,2016-10-19T09:55:29.209-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================SUPERVISOR REPORT=========================
Supervisor: {local,ns_heart_sup}
Context: child_terminated
Reason: {{enfile,[{erlang,open_port,
[{spawn,"/bin/sh -s unix:cmd 2>&1"},
[stream]],
[]},
{os,start_port_srv_handle,1,
[{file,"os.erl"},{line,278}]},
{os,start_port_srv_loop,0,
[{file,"os.erl"},{line,294}]}]},
{gen_server,call,[memsup,get_system_memory_data,infinity]}}
Offender: [{pid,<0.238.0>},
{name,ns_heart_slow_updater},
{mfargs,{ns_heart,start_link_slow_updater,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:error,2016-10-19T09:55:29.209-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================SUPERVISOR REPORT=========================
Supervisor: {local,os_mon_sup}
Context: child_terminated
Reason: {enfile,[{erlang,open_port,
[{spawn,"/bin/sh -s unix:cmd 2>&1"},
[stream]],
[]},
{os,start_port_srv_handle,1,
[{file,"os.erl"},{line,278}]},
{os,start_port_srv_loop,0,
[{file,"os.erl"},{line,294}]}]}
Offender: [{pid,<0.80.0>},
{name,memsup},
{mfargs,{memsup,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:29.210-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_heart_sup}
started: [{pid,<0.885.0>},
{name,ns_heart_slow_updater},
{mfargs,{ns_heart,start_link_slow_updater,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:29.210-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,os_mon_sup}
started: [{pid,<0.886.0>},
{name,memsup},
{mfargs,{memsup,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:error,2016-10-19T09:55:29.210-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]** Generic server memsup terminating
** Last message in was {'EXIT',<0.887.0>,
{enfile,
[{erlang,open_port,
[{spawn,"/bin/sh -s unix:cmd 2>&1"},
[stream]],
[]},
{os,start_port_srv_handle,1,
[{file,"os.erl"},{line,278}]},
{os,start_port_srv_loop,0,
[{file,"os.erl"},{line,294}]}]}}
** When Server state == {state,{unix,darwin},
false,undefined,undefined,false,60000,30000,
0.8,0.05,<0.887.0>,#Ref<0.0.0.6421>,undefined,
[reg],
[]}
** Reason for termination ==
** {enfile,[{erlang,open_port,
[{spawn,"/bin/sh -s unix:cmd 2>&1"},[stream]],
[]},
{os,start_port_srv_handle,1,[{file,"os.erl"},{line,278}]},
{os,start_port_srv_loop,0,[{file,"os.erl"},{line,294}]}]}
[error_logger:error,2016-10-19T09:55:29.211-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================CRASH REPORT=========================
crasher:
initial call: memsup:init/1
pid: <0.886.0>
registered_name: memsup
exception exit: {enfile,[{erlang,open_port,
[{spawn,"/bin/sh -s unix:cmd 2>&1"},
[stream]],
[]},
{os,start_port_srv_handle,1,
[{file,"os.erl"},{line,278}]},
{os,start_port_srv_loop,0,
[{file,"os.erl"},{line,294}]}]}
in function gen_server:terminate/6 (gen_server.erl, line 744)
ancestors: [os_mon_sup,<0.77.0>]
messages: []
links: [<0.78.0>]
dictionary: []
trap_exit: true
status: running
heap_size: 610
stack_size: 27
reductions: 201
neighbours:
[ns_server:debug,2016-10-19T09:55:29.211-07:00,ns_1@127.0.0.1:<0.513.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {master_activity_events,<0.512.0>} exited with reason killed
[ns_server:info,2016-10-19T09:55:29.211-07:00,ns_1@127.0.0.1:mb_master<0.426.0>:mb_master:terminate:299]Synchronously shutting down child mb_master_sup
[error_logger:error,2016-10-19T09:55:29.211-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================SUPERVISOR REPORT=========================
Supervisor: {local,os_mon_sup}
Context: child_terminated
Reason: {enfile,[{erlang,open_port,
[{spawn,"/bin/sh -s unix:cmd 2>&1"},
[stream]],
[]},
{os,start_port_srv_handle,1,
[{file,"os.erl"},{line,278}]},
{os,start_port_srv_loop,0,
[{file,"os.erl"},{line,294}]}]}
Offender: [{pid,<0.886.0>},
{name,memsup},
{mfargs,{memsup,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:29.211-07:00,ns_1@127.0.0.1:<0.418.0>:restartable:shutdown_child:120]Successfully terminated process <0.426.0>
[ns_server:debug,2016-10-19T09:55:29.211-07:00,ns_1@127.0.0.1:<0.427.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.426.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:29.211-07:00,ns_1@127.0.0.1:<0.403.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.402.0>} exited with reason shutdown
[error_logger:info,2016-10-19T09:55:29.211-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,os_mon_sup}
started: [{pid,<0.889.0>},
{name,memsup},
{mfargs,{memsup,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:29.211-07:00,ns_1@127.0.0.1:<0.378.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.377.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:29.211-07:00,ns_1@127.0.0.1:<0.376.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_node_disco_events,<0.374.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:29.211-07:00,ns_1@127.0.0.1:<0.375.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.374.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:29.211-07:00,ns_1@127.0.0.1:<0.373.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_node_disco_events,<0.371.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:29.211-07:00,ns_1@127.0.0.1:<0.372.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.371.0>} exited with reason shutdown
[error_logger:error,2016-10-19T09:55:29.211-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]** Generic server memsup terminating
** Last message in was {'EXIT',<0.890.0>,
{enfile,
[{erlang,open_port,
[{spawn,"/bin/sh -s unix:cmd 2>&1"},
[stream]],
[]},
{os,start_port_srv_handle,1,
[{file,"os.erl"},{line,278}]},
{os,start_port_srv_loop,0,
[{file,"os.erl"},{line,294}]}]}}
** When Server state == {state,{unix,darwin},
false,undefined,undefined,false,60000,30000,
0.8,0.05,<0.890.0>,#Ref<0.0.0.6426>,undefined,
[reg],
[]}
** Reason for termination ==
** {enfile,[{erlang,open_port,
[{spawn,"/bin/sh -s unix:cmd 2>&1"},[stream]],
[]},
{os,start_port_srv_handle,1,[{file,"os.erl"},{line,278}]},
{os,start_port_srv_loop,0,[{file,"os.erl"},{line,294}]}]}
[error_logger:error,2016-10-19T09:55:29.212-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================CRASH REPORT=========================
crasher:
initial call: memsup:init/1
pid: <0.889.0>
registered_name: memsup
exception exit: {enfile,[{erlang,open_port,
[{spawn,"/bin/sh -s unix:cmd 2>&1"},
[stream]],
[]},
{os,start_port_srv_handle,1,
[{file,"os.erl"},{line,278}]},
{os,start_port_srv_loop,0,
[{file,"os.erl"},{line,294}]}]}
in function gen_server:terminate/6 (gen_server.erl, line 744)
ancestors: [os_mon_sup,<0.77.0>]
messages: []
links: [<0.78.0>]
dictionary: []
trap_exit: true
status: running
heap_size: 610
stack_size: 27
reductions: 217
neighbours:
[error_logger:error,2016-10-19T09:55:29.212-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================SUPERVISOR REPORT=========================
Supervisor: {local,os_mon_sup}
Context: child_terminated
Reason: {enfile,[{erlang,open_port,
[{spawn,"/bin/sh -s unix:cmd 2>&1"},
[stream]],
[]},
{os,start_port_srv_handle,1,
[{file,"os.erl"},{line,278}]},
{os,start_port_srv_loop,0,
[{file,"os.erl"},{line,294}]}]}
Offender: [{pid,<0.889.0>},
{name,memsup},
{mfargs,{memsup,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:29.212-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,os_mon_sup}
started: [{pid,<0.892.0>},
{name,memsup},
{mfargs,{memsup,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:error,2016-10-19T09:55:29.212-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]** Generic server memsup terminating
** Last message in was {'EXIT',<0.893.0>,
{enfile,
[{erlang,open_port,
[{spawn,"/bin/sh -s unix:cmd 2>&1"},
[stream]],
[]},
{os,start_port_srv_handle,1,
[{file,"os.erl"},{line,278}]},
{os,start_port_srv_loop,0,
[{file,"os.erl"},{line,294}]}]}}
** When Server state == {state,{unix,darwin},
false,undefined,undefined,false,60000,30000,
0.8,0.05,<0.893.0>,#Ref<0.0.0.6431>,undefined,
[reg],
[]}
** Reason for termination ==
** {enfile,[{erlang,open_port,
[{spawn,"/bin/sh -s unix:cmd 2>&1"},[stream]],
[]},
{os,start_port_srv_handle,1,[{file,"os.erl"},{line,278}]},
{os,start_port_srv_loop,0,[{file,"os.erl"},{line,294}]}]}
[error_logger:error,2016-10-19T09:55:29.213-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================CRASH REPORT=========================
crasher:
initial call: memsup:init/1
pid: <0.892.0>
registered_name: memsup
exception exit: {enfile,[{erlang,open_port,
[{spawn,"/bin/sh -s unix:cmd 2>&1"},
[stream]],
[]},
{os,start_port_srv_handle,1,
[{file,"os.erl"},{line,278}]},
{os,start_port_srv_loop,0,
[{file,"os.erl"},{line,294}]}]}
in function gen_server:terminate/6 (gen_server.erl, line 744)
ancestors: [os_mon_sup,<0.77.0>]
messages: []
links: [<0.78.0>]
dictionary: []
trap_exit: true
status: running
heap_size: 610
stack_size: 27
reductions: 181
neighbours:
[error_logger:error,2016-10-19T09:55:29.213-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================SUPERVISOR REPORT=========================
Supervisor: {local,os_mon_sup}
Context: child_terminated
Reason: {enfile,[{erlang,open_port,
[{spawn,"/bin/sh -s unix:cmd 2>&1"},
[stream]],
[]},
{os,start_port_srv_handle,1,
[{file,"os.erl"},{line,278}]},
{os,start_port_srv_loop,0,
[{file,"os.erl"},{line,294}]}]}
Offender: [{pid,<0.892.0>},
{name,memsup},
{mfargs,{memsup,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:29.213-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,os_mon_sup}
started: [{pid,<0.895.0>},
{name,memsup},
{mfargs,{memsup,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:error,2016-10-19T09:55:29.214-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]** Generic server memsup terminating
** Last message in was {'EXIT',<0.896.0>,
{enfile,
[{erlang,open_port,
[{spawn,"/bin/sh -s unix:cmd 2>&1"},
[stream]],
[]},
{os,start_port_srv_handle,1,
[{file,"os.erl"},{line,278}]},
{os,start_port_srv_loop,0,
[{file,"os.erl"},{line,294}]}]}}
** When Server state == {state,{unix,darwin},
false,undefined,undefined,false,60000,30000,
0.8,0.05,<0.896.0>,#Ref<0.0.0.6439>,undefined,
[reg],
[]}
** Reason for termination ==
** {enfile,[{erlang,open_port,
[{spawn,"/bin/sh -s unix:cmd 2>&1"},[stream]],
[]},
{os,start_port_srv_handle,1,[{file,"os.erl"},{line,278}]},
{os,start_port_srv_loop,0,[{file,"os.erl"},{line,294}]}]}
[error_logger:error,2016-10-19T09:55:29.214-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================CRASH REPORT=========================
crasher:
initial call: memsup:init/1
pid: <0.895.0>
registered_name: memsup
exception exit: {enfile,[{erlang,open_port,
[{spawn,"/bin/sh -s unix:cmd 2>&1"},
[stream]],
[]},
{os,start_port_srv_handle,1,
[{file,"os.erl"},{line,278}]},
{os,start_port_srv_loop,0,
[{file,"os.erl"},{line,294}]}]}
in function gen_server:terminate/6 (gen_server.erl, line 744)
ancestors: [os_mon_sup,<0.77.0>]
messages: []
links: [<0.78.0>]
dictionary: []
trap_exit: true
status: running
heap_size: 610
stack_size: 27
reductions: 197
neighbours:
[error_logger:error,2016-10-19T09:55:29.214-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================SUPERVISOR REPORT=========================
Supervisor: {local,os_mon_sup}
Context: child_terminated
Reason: {enfile,[{erlang,open_port,
[{spawn,"/bin/sh -s unix:cmd 2>&1"},
[stream]],
[]},
{os,start_port_srv_handle,1,
[{file,"os.erl"},{line,278}]},
{os,start_port_srv_loop,0,
[{file,"os.erl"},{line,294}]}]}
Offender: [{pid,<0.895.0>},
{name,memsup},
{mfargs,{memsup,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:error,2016-10-19T09:55:29.215-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================SUPERVISOR REPORT=========================
Supervisor: {local,os_mon_sup}
Context: shutdown
Reason: reached_max_restart_intensity
Offender: [{pid,<0.895.0>},
{name,memsup},
{mfargs,{memsup,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:29.215-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================INFO REPORT=========================
application: os_mon
exited: shutdown
type: permanent
[ns_server:debug,2016-10-19T09:55:29.296-07:00,ns_1@127.0.0.1:<0.396.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.394.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:29.393-07:00,ns_1@127.0.0.1:<0.393.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.392.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:29.478-07:00,ns_1@127.0.0.1:<0.390.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.388.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:29.555-07:00,ns_1@127.0.0.1:<0.387.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.386.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:29.555-07:00,ns_1@127.0.0.1:<0.385.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_tick_event,<0.383.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:29.555-07:00,ns_1@127.0.0.1:<0.384.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.383.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:29.555-07:00,ns_1@127.0.0.1:<0.358.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_tick_event,<0.357.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:29.624-07:00,ns_1@127.0.0.1:<0.352.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.351.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:29.625-07:00,ns_1@127.0.0.1:<0.350.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_tick_event,<0.349.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:29.711-07:00,ns_1@127.0.0.1:<0.344.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.343.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:30.169-07:00,ns_1@127.0.0.1:<0.341.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.340.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:30.261-07:00,ns_1@127.0.0.1:<0.335.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.334.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:30.261-07:00,ns_1@127.0.0.1:<0.332.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ale_stats_events,<0.330.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:30.261-07:00,ns_1@127.0.0.1:<0.333.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_tick_event,<0.330.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:30.333-07:00,ns_1@127.0.0.1:<0.497.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_tick_event,<0.496.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:30.333-07:00,ns_1@127.0.0.1:<0.499.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.498.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:30.577-07:00,ns_1@127.0.0.1:<0.494.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.493.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:30.577-07:00,ns_1@127.0.0.1:<0.492.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_tick_event,<0.490.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:30.577-07:00,ns_1@127.0.0.1:<0.491.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.490.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:30.578-07:00,ns_1@127.0.0.1:replication_manager-default<0.485.0>:replication_manager:terminate:105]Replication manager died {shutdown,{state,"default",dcp,[],undefined}}
[ns_server:debug,2016-10-19T09:55:30.578-07:00,ns_1@127.0.0.1:<0.481.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {bucket_info_cache_invalidations,<0.480.0>} exited with reason shutdown
[user:info,2016-10-19T09:55:30.578-07:00,ns_1@127.0.0.1:ns_memcached-default<0.478.0>:ns_memcached:terminate:804]Shutting down bucket "default" on 'ns_1@127.0.0.1' for server shutdown
[ns_server:info,2016-10-19T09:55:30.578-07:00,ns_1@127.0.0.1:ns_memcached-default<0.478.0>:ns_memcached:terminate:816]This bucket shutdown is not due to bucket deletion or reconfiguration. Doing nothing
[ns_server:debug,2016-10-19T09:55:30.578-07:00,ns_1@127.0.0.1:ns_memcached-default<0.478.0>:ns_memcached:terminate:842]Terminated.
[ns_server:debug,2016-10-19T09:55:30.642-07:00,ns_1@127.0.0.1:<0.466.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_tick_event,<0.465.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:30.642-07:00,ns_1@127.0.0.1:<0.468.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.467.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:30.884-07:00,ns_1@127.0.0.1:<0.463.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.462.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:30.884-07:00,ns_1@127.0.0.1:<0.461.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_tick_event,<0.459.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:30.884-07:00,ns_1@127.0.0.1:<0.460.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.459.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:30.884-07:00,ns_1@127.0.0.1:replication_manager-locked<0.454.0>:replication_manager:terminate:105]Replication manager died {shutdown,{state,"locked",dcp,[],undefined}}
[ns_server:debug,2016-10-19T09:55:30.885-07:00,ns_1@127.0.0.1:<0.450.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {bucket_info_cache_invalidations,<0.449.0>} exited with reason shutdown
[user:info,2016-10-19T09:55:30.885-07:00,ns_1@127.0.0.1:ns_memcached-locked<0.447.0>:ns_memcached:terminate:804]Shutting down bucket "locked" on 'ns_1@127.0.0.1' for server shutdown
[ns_server:info,2016-10-19T09:55:30.885-07:00,ns_1@127.0.0.1:ns_memcached-locked<0.447.0>:ns_memcached:terminate:816]This bucket shutdown is not due to bucket deletion or reconfiguration. Doing nothing
[ns_server:debug,2016-10-19T09:55:30.885-07:00,ns_1@127.0.0.1:ns_memcached-locked<0.447.0>:ns_memcached:terminate:842]Terminated.
[ns_server:debug,2016-10-19T09:55:30.958-07:00,ns_1@127.0.0.1:<0.437.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.436.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:30.958-07:00,ns_1@127.0.0.1:<0.435.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_tick_event,<0.434.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:31.221-07:00,ns_1@127.0.0.1:<0.432.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.431.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:31.221-07:00,ns_1@127.0.0.1:<0.430.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_tick_event,<0.428.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:31.221-07:00,ns_1@127.0.0.1:<0.429.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.428.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:31.221-07:00,ns_1@127.0.0.1:replication_manager-test<0.407.0>:replication_manager:terminate:105]Replication manager died {shutdown,{state,"test",dcp,[],undefined}}
[ns_server:debug,2016-10-19T09:55:31.221-07:00,ns_1@127.0.0.1:<0.391.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {bucket_info_cache_invalidations,<0.389.0>} exited with reason shutdown
[user:info,2016-10-19T09:55:31.221-07:00,ns_1@127.0.0.1:ns_memcached-test<0.381.0>:ns_memcached:terminate:804]Shutting down bucket "test" on 'ns_1@127.0.0.1' for server shutdown
[ns_server:info,2016-10-19T09:55:31.222-07:00,ns_1@127.0.0.1:ns_memcached-test<0.381.0>:ns_memcached:terminate:816]This bucket shutdown is not due to bucket deletion or reconfiguration. Doing nothing
[ns_server:debug,2016-10-19T09:55:31.222-07:00,ns_1@127.0.0.1:ns_memcached-test<0.381.0>:ns_memcached:terminate:842]Terminated.
[ns_server:debug,2016-10-19T09:55:31.222-07:00,ns_1@127.0.0.1:<0.329.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.328.0>} exited with reason shutdown
[error_logger:error,2016-10-19T09:55:31.223-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================SUPERVISOR REPORT=========================
Supervisor: {local,ns_bucket_sup}
Context: shutdown_error
Reason: normal
Offender: [{pid,<0.329.0>},
{name,buckets_observing_subscription},
{mfargs,{ns_bucket_sup,subscribe_on_config_events,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:31.223-07:00,ns_1@127.0.0.1:<0.321.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.320.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:31.223-07:00,ns_1@127.0.0.1:<0.365.0>:remote_monitors:handle_down:158]Caller of remote monitor <0.310.0> died with shutdown. Exiting
[ns_server:debug,2016-10-19T09:55:31.223-07:00,ns_1@127.0.0.1:<0.366.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.310.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:31.223-07:00,ns_1@127.0.0.1:<0.299.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.298.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:31.223-07:00,ns_1@127.0.0.1:<0.296.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.295.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:31.223-07:00,ns_1@127.0.0.1:<0.292.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.291.0>} exited with reason killed
[ns_server:debug,2016-10-19T09:55:31.223-07:00,ns_1@127.0.0.1:<0.360.0>:remote_monitors:handle_down:158]Caller of remote monitor <0.291.0> died with killed. Exiting
[ns_server:debug,2016-10-19T09:55:31.223-07:00,ns_1@127.0.0.1:<0.286.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {json_rpc_events,<0.285.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:31.223-07:00,ns_1@127.0.0.1:<0.288.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.285.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:31.223-07:00,ns_1@127.0.0.1:<0.287.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_node_disco_events,<0.285.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:31.223-07:00,ns_1@127.0.0.1:<0.553.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.279.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:31.223-07:00,ns_1@127.0.0.1:<0.543.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.274.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:31.223-07:00,ns_1@127.0.0.1:<0.554.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.348.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:31.223-07:00,ns_1@127.0.0.1:<0.551.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.346.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:31.224-07:00,ns_1@127.0.0.1:<0.541.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.273.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:31.224-07:00,ns_1@127.0.0.1:<0.552.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.280.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:31.224-07:00,ns_1@127.0.0.1:<0.259.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.258.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:31.224-07:00,ns_1@127.0.0.1:<0.550.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.313.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:31.224-07:00,ns_1@127.0.0.1:<0.256.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.255.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:31.225-07:00,ns_1@127.0.0.1:<0.243.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.242.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:31.225-07:00,ns_1@127.0.0.1:<0.239.0>:restartable:shutdown_child:120]Successfully terminated process <0.240.0>
[ns_server:debug,2016-10-19T09:55:31.227-07:00,ns_1@127.0.0.1:json_rpc_connection-goxdcr-cbauth<0.517.0>:json_rpc_connection:handle_info:128]Socket closed
[user:debug,2016-10-19T09:55:31.229-07:00,ns_1@127.0.0.1:<0.203.0>:ns_log:crash_consumption_loop:70]Service 'goxdcr' exited with status 0. Restarting. Messages: GlobalSettingChangeListener 2016-10-19T09:55:31.225-07:00 [INFO] Started MetakvChangeListener GlobalSettingChangeListener
ReplicationManager 2016-10-19T09:55:31.225-07:00 [INFO] Replication manager is exiting...
ReplicationSpecChangeListener 2016-10-19T09:55:31.225-07:00 [INFO] metakv.RunObserveChildren failed, err=Get http://127.0.0.1:8091/_metakv/replicationSpec/?feed=continuous: dial tcp 127.0.0.1:8091: getsockopt: connection refused
ReplicationSpecChangeListener 2016-10-19T09:55:31.225-07:00 [INFO] Started MetakvChangeListener ReplicationSpecChangeListener
ReplicationManager 2016-10-19T09:55:31.225-07:00 [INFO] Replication manager is already in the processof stopping, no-op on this stop request
[ns_server:debug,2016-10-19T09:55:32.227-07:00,ns_1@127.0.0.1:<0.236.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {buckets_events,<0.235.0>} exited with reason killed
[error_logger:error,2016-10-19T09:55:32.227-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================SUPERVISOR REPORT=========================
Supervisor: {local,ns_heart_sup}
Context: shutdown_error
Reason: killed
Offender: [{pid,<0.235.0>},
{name,ns_heart},
{mfargs,{ns_heart,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:32.228-07:00,ns_1@127.0.0.1:<0.227.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.225.0>} exited with reason killed
[ns_server:debug,2016-10-19T09:55:32.228-07:00,ns_1@127.0.0.1:<0.224.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.223.0>} exited with reason killed
[ns_server:debug,2016-10-19T09:55:32.228-07:00,ns_1@127.0.0.1:<0.216.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events_local,<0.215.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:32.228-07:00,ns_1@127.0.0.1:<0.205.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.204.0>} exited with reason shutdown
[error_logger:error,2016-10-19T09:55:32.228-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================CRASH REPORT=========================
crasher:
initial call: gen_event:init_it/6
pid: <0.226.0>
registered_name: bucket_info_cache_invalidations
exception exit: killed
in function gen_event:terminate_server/4 (gen_event.erl, line 320)
ancestors: [bucket_info_cache,ns_server_sup,ns_server_nodes_sup,
<0.155.0>,ns_server_cluster_sup,<0.88.0>]
messages: []
links: []
dictionary: []
trap_exit: true
status: running
heap_size: 610
stack_size: 27
reductions: 256
neighbours:
[ns_server:debug,2016-10-19T09:55:32.230-07:00,ns_1@127.0.0.1:ns_couchdb_port<0.182.0>:ns_port_server:terminate:182]Sending shutdown to port ns_couchdb
[ns_server:debug,2016-10-19T09:55:32.230-07:00,ns_1@127.0.0.1:<0.196.0>:remote_monitors:handle_down:158]Caller of remote monitor <0.183.0> died with shutdown. Exiting
[ns_server:debug,2016-10-19T09:55:32.241-07:00,ns_1@127.0.0.1:ns_couchdb_port<0.182.0>:ns_port_server:terminate:185]ns_couchdb has exited
[error_logger:info,2016-10-19T09:55:32.241-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================INFO REPORT=========================
{net_kernel,{'EXIT',<0.189.0>,connection_closed}}
[ns_server:debug,2016-10-19T09:55:32.241-07:00,ns_1@127.0.0.1:<0.163.0>:restartable:shutdown_child:120]Successfully terminated process <0.165.0>
[ns_server:debug,2016-10-19T09:55:32.241-07:00,ns_1@127.0.0.1:<0.162.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.161.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:32.241-07:00,ns_1@127.0.0.1:<0.155.0>:restartable:shutdown_child:120]Successfully terminated process <0.156.0>
[ns_server:debug,2016-10-19T09:55:32.242-07:00,ns_1@127.0.0.1:<0.152.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.151.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:32.242-07:00,ns_1@127.0.0.1:ns_config<0.147.0>:ns_config:wait_saver:829]Done waiting for saver.
[ns_server:info,2016-10-19T09:55:34.822-07:00,nonode@nohost:<0.88.0>:ns_server:init_logging:151]Started & configured logging
[ns_server:info,2016-10-19T09:55:34.828-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]Static config terms:
[{error_logger_mf_dir,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"},
{path_config_bindir,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/bin"},
{path_config_etcdir,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/etc/couchbase"},
{path_config_libdir,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib"},
{path_config_datadir,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase"},
{path_config_tmpdir,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/tmp"},
{path_config_secdir,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/etc/security"},
{nodefile,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/couchbase-server.node"},
{loglevel_default,debug},
{loglevel_couchdb,info},
{loglevel_ns_server,debug},
{loglevel_error_logger,debug},
{loglevel_user,debug},
{loglevel_menelaus,debug},
{loglevel_ns_doctor,debug},
{loglevel_stats,debug},
{loglevel_rebalance,debug},
{loglevel_cluster,debug},
{loglevel_views,debug},
{loglevel_mapreduce_errors,debug},
{loglevel_xdcr,debug},
{loglevel_xdcr_trace,error},
{loglevel_access,info},
{disk_sink_opts,
[{rotation,
[{compress,true},
{size,41943040},
{num_files,10},
{buffer_size_max,52428800}]}]},
{disk_sink_opts_xdcr_trace,
[{rotation,[{compress,false},{size,83886080},{num_files,5}]}]},
{net_kernel_verbosity,10}]
[ns_server:warn,2016-10-19T09:55:34.828-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter error_logger_mf_dir, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.828-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter path_config_bindir, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.828-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter path_config_etcdir, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.828-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter path_config_libdir, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.828-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter path_config_datadir, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.828-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter path_config_tmpdir, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.828-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter path_config_secdir, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.829-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter nodefile, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.829-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_default, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.829-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_couchdb, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.829-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_ns_server, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.829-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_error_logger, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.829-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_user, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.829-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_menelaus, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.829-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_ns_doctor, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.829-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_stats, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.829-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_rebalance, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.829-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_cluster, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.829-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_views, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.829-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_mapreduce_errors, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.830-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_xdcr, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.830-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_xdcr_trace, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.830-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_access, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.830-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter disk_sink_opts, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.830-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter disk_sink_opts_xdcr_trace, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.830-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter net_kernel_verbosity, which is given from command line
[ns_server:warn,2016-10-19T09:55:34.832-07:00,nonode@nohost:<0.88.0>:ns_server:start:79]Could not lock myself into a memory: {error,enotsup}. Ignoring.
[error_logger:info,2016-10-19T09:55:34.834-07:00,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.129.0>},
{name,local_tasks},
{mfargs,{local_tasks,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:34.838-07:00,nonode@nohost:ns_server_cluster_sup<0.128.0>:log_os_info:start_link:25]OS type: {unix,darwin} Version: {14,3,0}
Runtime info: [{otp_release,"R16B03-1"},
{erl_version,"5.10.4.0.0.1"},
{erl_version_long,
"Erlang R16B03-1 (erts-5.10.4.0.0.1) [source-00852da] [64-bit] [smp:8:8] [async-threads:16] [kernel-poll:true]\n"},
{system_arch_raw,"x86_64-apple-darwin13.4.0"},
{system_arch,"x86_64-apple-darwin13.4.0"},
{localtime,{{2016,10,19},{9,55,34}}},
{memory,
[{total,26094104},
{processes,10387720},
{processes_used,10386448},
{system,15706384},
{atom,331249},
{atom_used,309719},
{binary,62416},
{code,7684198},
{ets,2452584}]},
{loaded,
[ns_info,log_os_info,local_tasks,restartable,
ns_server_cluster_sup,mlockall,calendar,
ale_default_formatter,'ale_logger-metakv',
'ale_logger-rebalance','ale_logger-xdcr_trace',
'ale_logger-menelaus','ale_logger-stats',
'ale_logger-access','ale_logger-ns_server',
'ale_logger-user','ale_logger-ns_doctor',
'ale_logger-cluster','ale_logger-xdcr',otp_internal,
ale_stderr_sink,ns_log_sink,filelib,ale_disk_sink,misc,
couch_util,ns_server,io_lib_fread,cpu_sup,memsup,disksup,
os_mon,io,release_handler,overload,alarm_handler,sasl,
timer,tftp_sup,httpd_sup,httpc_handler_sup,httpc_cookie,
inets_trace,httpc_manager,httpc,httpc_profile_sup,
httpc_sup,ftp_sup,inets_sup,inets_app,ssl,lhttpc_manager,
lhttpc_sup,lhttpc,tls_connection_sup,ssl_session_cache,
ssl_pkix_db,ssl_manager,ssl_sup,ssl_app,crypto_server,
crypto_sup,crypto_app,ale_error_logger_handler,
'ale_logger-ale_logger','ale_logger-error_logger',
beam_opcodes,beam_dict,beam_asm,beam_validator,beam_z,
beam_flatten,beam_trim,beam_receive,beam_bsm,beam_peep,
beam_dead,beam_split,beam_type,beam_bool,beam_except,
beam_clean,beam_utils,beam_block,beam_jump,beam_a,
v3_codegen,v3_life,v3_kernel,sys_core_dsetel,erl_bifs,
sys_core_fold,cerl_trees,sys_core_inline,core_lib,cerl,
v3_core,erl_bits,erl_expand_records,sys_pre_expand,sofs,
erl_internal,sets,ordsets,erl_lint,compile,
dynamic_compile,ale_utils,io_lib_pretty,io_lib_format,
io_lib,ale_codegen,dict,ale,ale_dynamic_sup,ale_sup,
ale_app,epp,ns_bootstrap,child_erlang,file_io_server,
orddict,erl_eval,file,c,kernel_config,user_sup,
supervisor_bridge,standard_error,code_server,unicode,
hipe_unified_loader,gb_sets,ets,binary,code,file_server,
net_kernel,global_group,erl_distribution,filename,os,
inet_parse,inet,inet_udp,inet_config,inet_db,global,
gb_trees,rpc,supervisor,kernel,application_master,sys,
application,gen_server,erl_parse,proplists,erl_scan,lists,
application_controller,proc_lib,gen,gen_event,
error_logger,heart,error_handler,erts_internal,erlang,
erl_prim_loader,prim_zip,zlib,prim_file,prim_inet,
prim_eval,init,otp_ring0]},
{applications,
[{lhttpc,"Lightweight HTTP Client","1.3.0"},
{os_mon,"CPO CXC 138 46","2.2.14"},
{public_key,"Public key infrastructure","0.21"},
{asn1,"The Erlang ASN1 compiler version 2.0.4","2.0.4"},
{kernel,"ERTS CXC 138 10","2.16.4"},
{ale,"Another Logger for Erlang","4.6.0-3391-enterprise"},
{inets,"INETS CXC 138 49","5.9.8"},
{ns_server,"Couchbase server","4.6.0-3391-enterprise"},
{crypto,"CRYPTO version 2","3.2"},
{ssl,"Erlang/OTP SSL application","5.3.3"},
{sasl,"SASL CXC 138 11","2.3.4"},
{stdlib,"ERTS CXC 138 10","1.19.4"}]},
{pre_loaded,
[erts_internal,erlang,erl_prim_loader,prim_zip,zlib,
prim_file,prim_inet,prim_eval,init,otp_ring0]},
{process_count,94},
{node,nonode@nohost},
{nodes,[]},
{registered,
[lhttpc_manager,standard_error_sup,release_handler,
code_server,httpd_sup,ale_dynamic_sup,'sink-disk_metakv',
overload,application_controller,'sink-disk_access_int',
alarm_handler,'sink-disk_access',kernel_safe_sup,
'sink-xdcr_trace',standard_error,'sink-disk_reports',
error_logger,'sink-disk_stats',timer_server,
'sink-disk_xdcr_errors',crypto_server,sasl_safe_sup,
crypto_sup,'sink-disk_xdcr','sink-disk_debug',tftp_sup,
os_mon_sup,'sink-disk_error',tls_connection_sup,cpu_sup,
ssl_sup,memsup,'sink-disk_default',init,disksup,inet_db,
httpc_sup,rex,ssl_manager,kernel_sup,httpc_profile_sup,
global_name_server,httpc_manager,ns_server_cluster_sup,
httpc_handler_sup,file_server_2,os_cmd_port_creator,
global_group,ftp_sup,sasl_sup,'sink-stderr',
ale_stats_events,ale,erl_prim_loader,inets_sup,
'sink-ns_log',local_tasks,lhttpc_sup,ale_sup]},
{cookie,nocookie},
{wordsize,8},
{wall_clock,1}]
[ns_server:info,2016-10-19T09:55:34.844-07:00,nonode@nohost:ns_server_cluster_sup<0.128.0>:log_os_info:start_link:27]Manifest:
["","",
" ",
" ",
" ",
" ",
" ",
" "," "," ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" "," ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
""]
[error_logger:info,2016-10-19T09:55:34.847-07:00,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.130.0>},
{name,timeout_diag_logger},
{mfargs,{timeout_diag_logger,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:34.848-07:00,nonode@nohost:dist_manager<0.131.0>:dist_manager:read_address_config_from_path:86]Reading ip config from "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/ip_start"
[ns_server:info,2016-10-19T09:55:34.848-07:00,nonode@nohost:dist_manager<0.131.0>:dist_manager:read_address_config_from_path:86]Reading ip config from "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/ip"
[ns_server:info,2016-10-19T09:55:34.848-07:00,nonode@nohost:dist_manager<0.131.0>:dist_manager:init:163]ip config not found. Looks like we're brand new node
[error_logger:info,2016-10-19T09:55:34.851-07:00,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,inet_gethost_native_sup}
started: [{pid,<0.133.0>},{mfa,{inet_gethost_native,init,[[]]}}]
[error_logger:info,2016-10-19T09:55:34.851-07:00,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,kernel_safe_sup}
started: [{pid,<0.132.0>},
{name,inet_gethost_native_sup},
{mfargs,{inet_gethost_native,start_link,[]}},
{restart_type,temporary},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:34.860-07:00,nonode@nohost:dist_manager<0.131.0>:dist_manager:bringup:214]Attempting to bring up net_kernel with name 'ns_1@127.0.0.1'
[error_logger:info,2016-10-19T09:55:34.864-07:00,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,net_sup}
started: [{pid,<0.135.0>},
{name,erl_epmd},
{mfargs,{erl_epmd,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:34.864-07:00,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,net_sup}
started: [{pid,<0.136.0>},
{name,auth},
{mfargs,{auth,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:34.865-07:00,ns_1@127.0.0.1:dist_manager<0.131.0>:dist_manager:configure_net_kernel:255]Set net_kernel vebosity to 10 -> 0
[error_logger:info,2016-10-19T09:55:34.865-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,net_sup}
started: [{pid,<0.137.0>},
{name,net_kernel},
{mfargs,
{net_kernel,start_link,
[['ns_1@127.0.0.1',longnames]]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:34.865-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,kernel_sup}
started: [{pid,<0.134.0>},
{name,net_sup_dynamic},
{mfargs,
{erl_distribution,start_link,
[['ns_1@127.0.0.1',longnames]]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,supervisor}]
[ns_server:info,2016-10-19T09:55:34.866-07:00,ns_1@127.0.0.1:dist_manager<0.131.0>:dist_manager:save_node:147]saving node to "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/couchbase-server.node"
[ns_server:debug,2016-10-19T09:55:34.876-07:00,ns_1@127.0.0.1:dist_manager<0.131.0>:dist_manager:bringup:228]Attempted to save node name to disk: ok
[ns_server:debug,2016-10-19T09:55:34.876-07:00,ns_1@127.0.0.1:dist_manager<0.131.0>:dist_manager:wait_for_node:235]Waiting for connection to node 'babysitter_of_ns_1@127.0.0.1' to be established
[error_logger:info,2016-10-19T09:55:34.876-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================INFO REPORT=========================
{net_kernel,{connect,normal,'babysitter_of_ns_1@127.0.0.1'}}
[ns_server:debug,2016-10-19T09:55:34.879-07:00,ns_1@127.0.0.1:dist_manager<0.131.0>:dist_manager:wait_for_node:244]Observed node 'babysitter_of_ns_1@127.0.0.1' to come up
[error_logger:info,2016-10-19T09:55:34.882-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.131.0>},
{name,dist_manager},
{mfargs,{dist_manager,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:34.883-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.142.0>},
{name,ns_cookie_manager},
{mfargs,{ns_cookie_manager,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:34.883-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.143.0>},
{name,ns_cluster},
{mfargs,{ns_cluster,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:34.884-07:00,ns_1@127.0.0.1:ns_config_sup<0.144.0>:ns_config_sup:init:32]loading static ns_config from "/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/etc/couchbase/config"
[error_logger:info,2016-10-19T09:55:34.884-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.145.0>},
{name,ns_config_events},
{mfargs,
{gen_event,start_link,[{local,ns_config_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:34.884-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.146.0>},
{name,ns_config_events_local},
{mfargs,
{gen_event,start_link,
[{local,ns_config_events_local}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:34.908-07:00,ns_1@127.0.0.1:ns_config<0.147.0>:ns_config:load_config:1070]Loading static config from "/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/etc/couchbase/config"
[ns_server:info,2016-10-19T09:55:34.909-07:00,ns_1@127.0.0.1:ns_config<0.147.0>:ns_config:load_config:1084]Loading dynamic config from "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/config.dat"
[ns_server:debug,2016-10-19T09:55:34.913-07:00,ns_1@127.0.0.1:ns_config<0.147.0>:ns_config:load_config:1092]Here's full dynamic config we loaded:
[[{buckets,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{18,63644101958}}]},
{configs,
[{"test",
[{repl_type,dcp},
{uuid,<<"7f7d4a28ca84a805edf9c899521eb18c">>},
{num_replicas,1},
{replica_index,false},
{ram_quota,524288000},
{auth_type,sasl},
{sasl_password,"*****"},
{autocompaction,false},
{purge_interval,undefined},
{flush_enabled,false},
{num_threads,3},
{eviction_policy,value_only},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map,
[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]]},
{map_opts_hash,133465355}]},
{"locked",
[{eviction_policy,value_only},
{num_threads,3},
{flush_enabled,false},
{purge_interval,undefined},
{autocompaction,false},
{sasl_password,"*****"},
{auth_type,sasl},
{ram_quota,104857600},
{num_replicas,1},
{repl_type,dcp},
{uuid,<<"8515ae93e826e7c4389f3fd25fbb263e">>},
{replica_index,false},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map,
[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]]},
{map_opts_hash,133465355}]},
{"default",
[{repl_type,dcp},
{uuid,<<"b04d5897bd3c5329a82156f1b77c395d">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,104857600},
{auth_type,sasl},
{flush_enabled,false},
{num_threads,3},
{eviction_policy,value_only},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map,
[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]]},
{map_opts_hash,133465355}]}]}]},
{vbucket_map_history,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012772}}]},
{[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]],
[{replication_topology,star},{tags,undefined},{max_slaves,10}]}]},
{alert_limits,
[{max_overhead_perc,50},{max_disk_used,90},{max_indexer_ram,75}]},
{audit,
[{auditd_enabled,false},
{rotate_interval,86400},
{rotate_size,20971520},
{disabled,[]},
{sync,[]},
{log_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"}]},
{auto_failover_cfg,[{enabled,false},{timeout,120},{max_nodes,1},{count,0}]},
{autocompaction,
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]},
{cert_and_pkey,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
{<<"-----BEGIN CERTIFICATE-----\nMIIDAjCCAeqgAwIBAgIIFH6f01mhINIwDQYJKoZIhvcNAQELBQAwJDEiMCAGA1UE\nAxMZQ291Y2hiYXNlIFNlcnZlciAxMzBiNDVmMzAeFw0xMzAxMDEwMDAwMDBaFw00\nOTEyMzEyMzU5NTlaMCQxIjAgBgNVBAMTGUNvdWNoYmFzZSBTZXJ2ZXIgMTMwYjQ1\nZjMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDINaiYz/zhTeD2h8Pa\nP015LetKMhey1yoE2L5H1wHK7pADeFRfqeoNunEvlxRWL/YjnqvRZPxrjdadlh7L\nVhZVke2blopHdhJjaHCvdI8R3BRBK4fLv5m4c0SzdE6bvk1QS+T3rZyzxUbMtB0g\nEq2ZPed8JdQFqO0Bo1JuXJx4/q9tjhvbHUVjRX9QHL3nClC3qVemVjTCKbNqZWv8\n5qZmH/X5DWkyNFKj6HzE20qFWYa8d9tmdeo9zaGVMzCFCOXKPGeHkW/GpJWxK3FM\n/BWdgq5nonb+y3ufSE1JBJjXCO6JipXf4OKRB54009m9hAmJJK9sPVeH9NMnVhS7\naEDXAgMBAAGjODA2MA4GA1UdDwEB/wQEAwICpDATBgNVHSUEDDAKBggrBgEFBQcD\nATAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCAdca3XDbl7heN\n6vk0VTcrrZCLHDY+PLTFcnGq2xv84APOrvwQJEH9qnCw0/czkn9UW+27Ix2wjkeP\nFbUdXKvFDpU0LQgpkdZ+BKXQlX0ezKG+StpUODxYdDnUDCLzRLJsg0GgEODysPAK\nwHiA3X5d+UvNE/Z7TP5ASyzXnypuR8jhXCdEQ0o8mLQMx4I4Xd2sHFz2x6qO9i8f\nMPEJ076QTj5+RyI4BDAgUeWns/ZTKX/bi+FXPkRZ8QWkxIrSkNSdmgvPmMBzFluv\nDhFwtFBMQovmICfkT5TYmtwYsqZgh32v5FZOLUlHOR29R1dKOOuyCbIyqlTjWCYZ\n1j3GlmIC\n-----END CERTIFICATE-----\n">>,
<<"*****">>}]},
{cluster_compat_version,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{6,63644012660}}]},
4,6]},
{drop_request_memory_threshold_mib,undefined},
{email_alerts,
[{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,
[{user,[]},{pass,"*****"},{host,"localhost"},{port,25},{encrypt,false}]},
{alerts,
[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,auto_failover_cluster_too_small,
auto_failover_disabled,ip,disk,overhead,ep_oom_errors,
ep_item_commit_failed,audit_dropped_events,indexer_ram_max_usage]}]},
{fts_memory_quota,512},
{goxdcr_upgrade,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}|
'_deleted']},
{index_aware_rebalance_disabled,false},
{max_bucket_count,10},
{memcached,[]},
{memory_quota,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012751}}]}|
1024]},
{nodes_wanted,['ns_1@127.0.0.1']},
{otp,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]},
{cookie,oxqibayfkfbrogxo}]},
{read_only_user_creds,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}|
null]},
{remote_clusters,[]},
{replication,[{enabled,true}]},
{rest,[{port,8091}]},
{rest_creds,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012784}}]}|
{"Administrator",{password,"*****"}}]},
{roles_definitions,
[{admin,[],
[{name,<<"Admin">>},
{desc,<<"Can manage ALL cluster features including security.">>}],
[{[],all}]},
{ro_admin,[],
[{name,<<"Read Only Admin">>},
{desc,<<"Can view ALL cluster features.">>}],
[{[{bucket,any},password],none},
{[{bucket,any},data],none},
{[admin,security],[read]},
{[admin],none},
{[],[read]}]},
{cluster_admin,[],
[{name,<<"Cluster Admin">>},
{desc,<<"Can manage all cluster features EXCEPT security.">>}],
[{[admin],none},{[],all}]},
{bucket_admin,
[bucket_name],
[{name,<<"Bucket Admin">>},
{desc,
<<"Can manage ALL bucket features for specified buckets (incl. start/stop XDCR)">>}],
[{[{bucket,bucket_name},xdcr],[read,execute]},
{[{bucket,bucket_name}],all},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],none},
{[admin],none},
{[],[read]}]},
{bucket_sasl,
[bucket_name],
[],
[{[{bucket,bucket_name},data],all},
{[{bucket,bucket_name},views],all},
{[{bucket,bucket_name}],[read,flush]},
{[pools],[read]}]},
{views_admin,
[bucket_name],
[{name,<<"Views Admin">>},
{desc,<<"Can manage views for specified buckets">>}],
[{[{bucket,bucket_name},views],all},
{[{bucket,bucket_name},data],[read]},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],none},
{[admin],none},
{[],[read]}]},
{replication_admin,[],
[{name,<<"Replication Admin">>},
{desc,<<"Can manage ONLY XDCR features (cluster AND bucket level)">>}],
[{[{bucket,any},xdcr],all},
{[{bucket,any},data],[read]},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],all},
{[admin],none},
{[],[read]}]}]},
{server_groups,
[[{uuid,<<"0">>},{name,<<"Group 1">>},{nodes,['ns_1@127.0.0.1']}]]},
{set_view_update_daemon,
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]},
{settings,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012777}}]},
{stats,[{send_stats,false}]}]},
{uuid,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012784}}]}|
<<"561b20f339d4184270a7de0b1c1de1b0">>]},
{{couchdb,max_parallel_indexers},4},
{{couchdb,max_parallel_replica_indexers},2},
{{local_changes_count,<<"eac84bf2ecf69c83ca0268ac5aac465d">>},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{35,63644101958}}]}]},
{{metakv,<<"/indexing/settings/config">>},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{4,63644012784}}]}|
<<"{\"indexer.settings.compaction.days_of_week\":\"Sunday,Monday,Tuesday,Wednesday,Thursday,Friday,Saturday\",\"indexer.settings.compaction.interval\":\"00:00,00:00\",\"indexer.settings.compaction.compaction_mode\":\"circular\",\"indexer.settings.persisted_snapshot.interval\":5000,\"indexer.settings.log_level\":\"info\",\"indexer.settings.compaction.min_frag\":30,\"indexer.settings.inmemory_snapshot.interval\":200,\"indexer.settings.max_cpu_percent\":0,\"indexer.settings.storage_mode\":\"forestdb\",\"indexer.settings.recovery.max_rollbacks\":5,\"indexer.settings.memory_quota\":536870912,\"indexer.settings.compaction.abort_exceed_interval\":false}">>]},
{{request_limit,capi},undefined},
{{request_limit,rest},undefined},
{{service_map,fts},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}]},
{{service_map,index},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012791}}]},
'ns_1@127.0.0.1']},
{{service_map,n1ql},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012791}}]},
'ns_1@127.0.0.1']},
{{node,'ns_1@127.0.0.1',audit},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}]},
{{node,'ns_1@127.0.0.1',capi_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
8092]},
{{node,'ns_1@127.0.0.1',compaction_daemon},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{check_interval,30},
{min_db_file_size,131072},
{min_view_file_size,20971520}]},
{{node,'ns_1@127.0.0.1',config_version},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
{4,5}]},
{{node,'ns_1@127.0.0.1',fts_http_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
8094]},
{{node,'ns_1@127.0.0.1',indexer_admin_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9100]},
{{node,'ns_1@127.0.0.1',indexer_http_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9102]},
{{node,'ns_1@127.0.0.1',indexer_scan_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9101]},
{{node,'ns_1@127.0.0.1',indexer_stcatchup_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9104]},
{{node,'ns_1@127.0.0.1',indexer_stinit_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9103]},
{{node,'ns_1@127.0.0.1',indexer_stmaint_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9105]},
{{node,'ns_1@127.0.0.1',is_enterprise},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
true]},
{{node,'ns_1@127.0.0.1',isasl},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/isasl.pw"}]},
{{node,'ns_1@127.0.0.1',ldap_enabled},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
false]},
{{node,'ns_1@127.0.0.1',membership},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
active]},
{{node,'ns_1@127.0.0.1',memcached},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,11210},
{dedicated_port,11209},
{ssl_port,11207},
{admin_user,"_admin"},
{admin_pass,"*****"},
{engines,
[{membase,
[{engine,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/ep.so"},
{static_config_string,"failpartialwarmup=false"}]},
{memcached,
[{engine,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{config_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached.json"},
{audit_file,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/audit.json"},
{log_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003}]},
{{node,'ns_1@127.0.0.1',memcached_config},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
{[{interfaces,
{memcached_config_mgr,omit_missing_mcd_ports,
[{[{host,<<"*">>},{port,port},{maxconn,maxconn}]},
{[{host,<<"*">>},
{port,dedicated_port},
{maxconn,dedicated_port_maxconn}]},
{[{host,<<"*">>},
{port,ssl_port},
{maxconn,maxconn},
{ssl,
{[{key,
<<"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached-key.pem">>},
{cert,
<<"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached-cert.pem">>}]}}]}]}},
{ssl_cipher_list,{"~s",[ssl_cipher_list]}},
{ssl_minimum_protocol,{memcached_config_mgr,ssl_minimum_protocol,[]}},
{connection_idle_time,connection_idle_time},
{breakpad,
{[{enabled,breakpad_enabled},
{minidump_dir,{memcached_config_mgr,get_minidump_dir,[]}}]}},
{extensions,
[{[{module,
<<"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/stdin_term_handler.so">>},
{config,<<>>}]},
{[{module,
<<"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/file_logger.so">>},
{config,
{"cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]}}]}]},
{admin,{"~s",[admin_user]}},
{verbosity,verbosity},
{audit_file,{"~s",[audit_file]}},
{dedupe_nmvb_maps,dedupe_nmvb_maps}]}]},
{{node,'ns_1@127.0.0.1',memcached_defaults},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{maxconn,30000},
{dedicated_port_maxconn,5000},
{ssl_cipher_list,"HIGH"},
{connection_idle_time,0},
{verbosity,0},
{breakpad_enabled,true},
{breakpad_minidump_dir_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/crash"},
{dedupe_nmvb_maps,false}]},
{{node,'ns_1@127.0.0.1',moxi},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,11211},
{verbosity,[]}]},
{{node,'ns_1@127.0.0.1',ns_log},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{filename,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/ns_log"}]},
{{node,'ns_1@127.0.0.1',port_servers},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}]},
{{node,'ns_1@127.0.0.1',projector_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9999]},
{{node,'ns_1@127.0.0.1',query_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
8093]},
{{node,'ns_1@127.0.0.1',rest},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,8091},
{port_meta,global}]},
{{node,'ns_1@127.0.0.1',services},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012751}}]},
index,kv,n1ql]},
{{node,'ns_1@127.0.0.1',ssl_capi_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
18092]},
{{node,'ns_1@127.0.0.1',ssl_proxy_downstream_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
11214]},
{{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
11215]},
{{node,'ns_1@127.0.0.1',ssl_query_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
18093]},
{{node,'ns_1@127.0.0.1',ssl_rest_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
18091]},
{{node,'ns_1@127.0.0.1',stop_xdcr},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012660}}]}|
'_deleted']},
{{node,'ns_1@127.0.0.1',uuid},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
<<"eac84bf2ecf69c83ca0268ac5aac465d">>]},
{{node,'ns_1@127.0.0.1',xdcr_rest_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9998]}]]
[ns_server:info,2016-10-19T09:55:34.920-07:00,ns_1@127.0.0.1:ns_config<0.147.0>:ns_config:load_config:1113]Here's full dynamic config we loaded + static & default config:
[{{node,'ns_1@127.0.0.1',xdcr_rest_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9998]},
{{node,'ns_1@127.0.0.1',uuid},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
<<"eac84bf2ecf69c83ca0268ac5aac465d">>]},
{{node,'ns_1@127.0.0.1',stop_xdcr},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012660}}]}|
'_deleted']},
{{node,'ns_1@127.0.0.1',ssl_rest_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
18091]},
{{node,'ns_1@127.0.0.1',ssl_query_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
18093]},
{{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
11215]},
{{node,'ns_1@127.0.0.1',ssl_proxy_downstream_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
11214]},
{{node,'ns_1@127.0.0.1',ssl_capi_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
18092]},
{{node,'ns_1@127.0.0.1',services},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012751}}]},
index,kv,n1ql]},
{{node,'ns_1@127.0.0.1',rest},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,8091},
{port_meta,global}]},
{{node,'ns_1@127.0.0.1',query_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
8093]},
{{node,'ns_1@127.0.0.1',projector_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9999]},
{{node,'ns_1@127.0.0.1',port_servers},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}]},
{{node,'ns_1@127.0.0.1',ns_log},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{filename,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/ns_log"}]},
{{node,'ns_1@127.0.0.1',moxi},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,11211},
{verbosity,[]}]},
{{node,'ns_1@127.0.0.1',memcached_defaults},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{maxconn,30000},
{dedicated_port_maxconn,5000},
{ssl_cipher_list,"HIGH"},
{connection_idle_time,0},
{verbosity,0},
{breakpad_enabled,true},
{breakpad_minidump_dir_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/crash"},
{dedupe_nmvb_maps,false}]},
{{node,'ns_1@127.0.0.1',memcached_config},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
{[{interfaces,
{memcached_config_mgr,omit_missing_mcd_ports,
[{[{host,<<"*">>},{port,port},{maxconn,maxconn}]},
{[{host,<<"*">>},
{port,dedicated_port},
{maxconn,dedicated_port_maxconn}]},
{[{host,<<"*">>},
{port,ssl_port},
{maxconn,maxconn},
{ssl,
{[{key,
<<"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached-key.pem">>},
{cert,
<<"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached-cert.pem">>}]}}]}]}},
{ssl_cipher_list,{"~s",[ssl_cipher_list]}},
{ssl_minimum_protocol,{memcached_config_mgr,ssl_minimum_protocol,[]}},
{connection_idle_time,connection_idle_time},
{breakpad,
{[{enabled,breakpad_enabled},
{minidump_dir,{memcached_config_mgr,get_minidump_dir,[]}}]}},
{extensions,
[{[{module,
<<"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/stdin_term_handler.so">>},
{config,<<>>}]},
{[{module,
<<"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/file_logger.so">>},
{config,
{"cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]}}]}]},
{admin,{"~s",[admin_user]}},
{verbosity,verbosity},
{audit_file,{"~s",[audit_file]}},
{dedupe_nmvb_maps,dedupe_nmvb_maps}]}]},
{{node,'ns_1@127.0.0.1',memcached},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,11210},
{dedicated_port,11209},
{ssl_port,11207},
{admin_user,"_admin"},
{admin_pass,"*****"},
{engines,
[{membase,
[{engine,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/ep.so"},
{static_config_string,"failpartialwarmup=false"}]},
{memcached,
[{engine,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{config_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached.json"},
{audit_file,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/audit.json"},
{log_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003}]},
{{node,'ns_1@127.0.0.1',membership},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
active]},
{{node,'ns_1@127.0.0.1',ldap_enabled},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
false]},
{{node,'ns_1@127.0.0.1',isasl},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/isasl.pw"}]},
{{node,'ns_1@127.0.0.1',is_enterprise},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
true]},
{{node,'ns_1@127.0.0.1',indexer_stmaint_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9105]},
{{node,'ns_1@127.0.0.1',indexer_stinit_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9103]},
{{node,'ns_1@127.0.0.1',indexer_stcatchup_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9104]},
{{node,'ns_1@127.0.0.1',indexer_scan_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9101]},
{{node,'ns_1@127.0.0.1',indexer_http_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9102]},
{{node,'ns_1@127.0.0.1',indexer_admin_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9100]},
{{node,'ns_1@127.0.0.1',fts_http_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
8094]},
{{node,'ns_1@127.0.0.1',config_version},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
{4,5}]},
{{node,'ns_1@127.0.0.1',compaction_daemon},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{check_interval,30},
{min_db_file_size,131072},
{min_view_file_size,20971520}]},
{{node,'ns_1@127.0.0.1',capi_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
8092]},
{{node,'ns_1@127.0.0.1',audit},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}]},
{{service_map,n1ql},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012791}}]},
'ns_1@127.0.0.1']},
{{service_map,index},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012791}}]},
'ns_1@127.0.0.1']},
{{service_map,fts},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}]},
{{request_limit,rest},undefined},
{{request_limit,capi},undefined},
{{metakv,<<"/indexing/settings/config">>},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{4,63644012784}}]}|
<<"{\"indexer.settings.compaction.days_of_week\":\"Sunday,Monday,Tuesday,Wednesday,Thursday,Friday,Saturday\",\"indexer.settings.compaction.interval\":\"00:00,00:00\",\"indexer.settings.compaction.compaction_mode\":\"circular\",\"indexer.settings.persisted_snapshot.interval\":5000,\"indexer.settings.log_level\":\"info\",\"indexer.settings.compaction.min_frag\":30,\"indexer.settings.inmemory_snapshot.interval\":200,\"indexer.settings.max_cpu_percent\":0,\"indexer.settings.storage_mode\":\"forestdb\",\"indexer.settings.recovery.max_rollbacks\":5,\"indexer.settings.memory_quota\":536870912,\"indexer.settings.compaction.abort_exceed_interval\":false}">>]},
{{local_changes_count,<<"eac84bf2ecf69c83ca0268ac5aac465d">>},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{35,63644101958}}]}]},
{{couchdb,max_parallel_replica_indexers},2},
{{couchdb,max_parallel_indexers},4},
{uuid,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012784}}]}|
<<"561b20f339d4184270a7de0b1c1de1b0">>]},
{settings,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012777}}]},
{stats,[{send_stats,false}]}]},
{set_view_update_daemon,
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]},
{server_groups,
[[{uuid,<<"0">>},{name,<<"Group 1">>},{nodes,['ns_1@127.0.0.1']}]]},
{roles_definitions,
[{admin,[],
[{name,<<"Admin">>},
{desc,<<"Can manage ALL cluster features including security.">>}],
[{[],all}]},
{ro_admin,[],
[{name,<<"Read Only Admin">>},{desc,<<"Can view ALL cluster features.">>}],
[{[{bucket,any},password],none},
{[{bucket,any},data],none},
{[admin,security],[read]},
{[admin],none},
{[],[read]}]},
{cluster_admin,[],
[{name,<<"Cluster Admin">>},
{desc,<<"Can manage all cluster features EXCEPT security.">>}],
[{[admin],none},{[],all}]},
{bucket_admin,
[bucket_name],
[{name,<<"Bucket Admin">>},
{desc,
<<"Can manage ALL bucket features for specified buckets (incl. start/stop XDCR)">>}],
[{[{bucket,bucket_name},xdcr],[read,execute]},
{[{bucket,bucket_name}],all},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],none},
{[admin],none},
{[],[read]}]},
{bucket_sasl,
[bucket_name],
[],
[{[{bucket,bucket_name},data],all},
{[{bucket,bucket_name},views],all},
{[{bucket,bucket_name}],[read,flush]},
{[pools],[read]}]},
{views_admin,
[bucket_name],
[{name,<<"Views Admin">>},
{desc,<<"Can manage views for specified buckets">>}],
[{[{bucket,bucket_name},views],all},
{[{bucket,bucket_name},data],[read]},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],none},
{[admin],none},
{[],[read]}]},
{replication_admin,[],
[{name,<<"Replication Admin">>},
{desc,<<"Can manage ONLY XDCR features (cluster AND bucket level)">>}],
[{[{bucket,any},xdcr],all},
{[{bucket,any},data],[read]},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],all},
{[admin],none},
{[],[read]}]}]},
{rest_creds,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012784}}]}|
{"Administrator",{password,"*****"}}]},
{rest,[{port,8091}]},
{replication,[{enabled,true}]},
{remote_clusters,[]},
{read_only_user_creds,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}|
null]},
{otp,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]},
{cookie,oxqibayfkfbrogxo}]},
{nodes_wanted,['ns_1@127.0.0.1']},
{memory_quota,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012751}}]}|
1024]},
{memcached,[]},
{max_bucket_count,10},
{index_aware_rebalance_disabled,false},
{goxdcr_upgrade,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}|
'_deleted']},
{fts_memory_quota,512},
{email_alerts,
[{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,
[{user,[]},{pass,"*****"},{host,"localhost"},{port,25},{encrypt,false}]},
{alerts,
[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,auto_failover_cluster_too_small,
auto_failover_disabled,ip,disk,overhead,ep_oom_errors,
ep_item_commit_failed,audit_dropped_events,indexer_ram_max_usage]}]},
{drop_request_memory_threshold_mib,undefined},
{cluster_compat_version,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{6,63644012660}}]},
4,6]},
{cert_and_pkey,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
{<<"-----BEGIN CERTIFICATE-----\nMIIDAjCCAeqgAwIBAgIIFH6f01mhINIwDQYJKoZIhvcNAQELBQAwJDEiMCAGA1UE\nAxMZQ291Y2hiYXNlIFNlcnZlciAxMzBiNDVmMzAeFw0xMzAxMDEwMDAwMDBaFw00\nOTEyMzEyMzU5NTlaMCQxIjAgBgNVBAMTGUNvdWNoYmFzZSBTZXJ2ZXIgMTMwYjQ1\nZjMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDINaiYz/zhTeD2h8Pa\nP015LetKMhey1yoE2L5H1wHK7pADeFRfqeoNunEvlxRWL/YjnqvRZPxrjdadlh7L\nVhZVke2blopHdhJjaHCvdI8R3BRBK4fLv5m4c0SzdE6bvk1QS+T3rZyzxUbMtB0g\nEq2ZPed8JdQFqO0Bo1JuXJx4/q9tjhvbHUVjRX9QHL3nClC3qVemVjTCKbNqZWv8\n5qZmH/X5DWkyNFKj6HzE20qFWYa8d9tmdeo9zaGVMzCFCOXKPGeHkW/GpJWxK3FM\n/BWdgq5nonb+y3ufSE1JBJjXCO6JipXf4OKRB54009m9hAmJJK9sPVeH9NMnVhS7\naEDXAgMBAAGjODA2MA4GA1UdDwEB/wQEAwICpDATBgNVHSUEDDAKBggrBgEFBQcD\nATAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCAdca3XDbl7heN\n6vk0VTcrrZCLHDY+PLTFcnGq2xv84APOrvwQJEH9qnCw0/czkn9UW+27Ix2wjkeP\nFbUdXKvFDpU0LQgpkdZ+BKXQlX0ezKG+StpUODxYdDnUDCLzRLJsg0GgEODysPAK\nwHiA3X5d+UvNE/Z7TP5ASyzXnypuR8jhXCdEQ0o8mLQMx4I4Xd2sHFz2x6qO9i8f\nMPEJ076QTj5+RyI4BDAgUeWns/ZTKX/bi+FXPkRZ8QWkxIrSkNSdmgvPmMBzFluv\nDhFwtFBMQovmICfkT5TYmtwYsqZgh32v5FZOLUlHOR29R1dKOOuyCbIyqlTjWCYZ\n1j3GlmIC\n-----END CERTIFICATE-----\n">>,
<<"*****">>}]},
{autocompaction,
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]},
{auto_failover_cfg,[{enabled,false},{timeout,120},{max_nodes,1},{count,0}]},
{audit,
[{auditd_enabled,false},
{rotate_interval,86400},
{rotate_size,20971520},
{disabled,[]},
{sync,[]},
{log_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"}]},
{alert_limits,
[{max_overhead_perc,50},{max_disk_used,90},{max_indexer_ram,75}]},
{vbucket_map_history,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012772}}]},
{[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]],
[{replication_topology,star},{tags,undefined},{max_slaves,10}]}]},
{buckets,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{18,63644101958}}]},
{configs,
[{"test",
[{repl_type,dcp},
{uuid,<<"7f7d4a28ca84a805edf9c899521eb18c">>},
{num_replicas,1},
{replica_index,false},
{ram_quota,524288000},
{auth_type,sasl},
{sasl_password,"*****"},
{autocompaction,false},
{purge_interval,undefined},
{flush_enabled,false},
{num_threads,3},
{eviction_policy,value_only},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map,
[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]]},
{map_opts_hash,133465355}]},
{"locked",
[{eviction_policy,value_only},
{num_threads,3},
{flush_enabled,false},
{purge_interval,undefined},
{autocompaction,false},
{sasl_password,"*****"},
{auth_type,sasl},
{ram_quota,104857600},
{num_replicas,1},
{repl_type,dcp},
{uuid,<<"8515ae93e826e7c4389f3fd25fbb263e">>},
{replica_index,false},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map,
[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]]},
{map_opts_hash,133465355}]},
{"default",
[{repl_type,dcp},
{uuid,<<"b04d5897bd3c5329a82156f1b77c395d">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,104857600},
{auth_type,sasl},
{flush_enabled,false},
{num_threads,3},
{eviction_policy,value_only},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map,
[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]]},
{map_opts_hash,133465355}]}]}]}]
[error_logger:info,2016-10-19T09:55:34.924-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.147.0>},
{name,ns_config},
{mfargs,
{ns_config,start_link,
["/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/etc/couchbase/config",
ns_config_default]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:34.925-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.150.0>},
{name,ns_config_remote},
{mfargs,
{ns_config_replica,start_link,
[{local,ns_config_remote}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:34.927-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.151.0>},
{name,ns_config_log},
{mfargs,{ns_config_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:34.927-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.144.0>},
{name,ns_config_sup},
{mfargs,{ns_config_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:34.928-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.153.0>},
{name,vbucket_filter_changes_registry},
{mfargs,
{ns_process_registry,start_link,
[vbucket_filter_changes_registry,
[{terminate_command,shutdown}]]}},
{restart_type,permanent},
{shutdown,100},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:34.929-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.154.0>},
{name,json_rpc_connection_sup},
{mfargs,{json_rpc_connection_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:34.936-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_nodes_sup}
started: [{pid,<0.157.0>},
{name,remote_monitors},
{mfargs,{remote_monitors,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:34.937-07:00,ns_1@127.0.0.1:menelaus_barrier<0.158.0>:one_shot_barrier:barrier_body:58]Barrier menelaus_barrier has started
[error_logger:info,2016-10-19T09:55:34.937-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_nodes_sup}
started: [{pid,<0.158.0>},
{name,menelaus_barrier},
{mfargs,{menelaus_sup,barrier_start_link,[]}},
{restart_type,temporary},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:34.937-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_nodes_sup}
started: [{pid,<0.159.0>},
{name,rest_lhttpc_pool},
{mfargs,
{lhttpc_manager,start_link,
[[{name,rest_lhttpc_pool},
{connection_timeout,120000},
{pool_size,20}]]}},
{restart_type,{permanent,1}},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:34.947-07:00,ns_1@127.0.0.1:ns_ssl_services_setup<0.161.0>:ns_ssl_services_setup:init:370]Used ssl options:
[{keyfile,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/ssl-cert-key.pem"},
{certfile,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/ssl-cert-key.pem"},
{versions,[tlsv1,'tlsv1.1','tlsv1.2']},
{cacertfile,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/ssl-cert-key.pem-ca"},
{dh,<<48,130,1,8,2,130,1,1,0,152,202,99,248,92,201,35,238,246,5,77,93,120,10,
118,129,36,52,111,193,167,220,49,229,106,105,152,133,121,157,73,158,
232,153,197,197,21,171,140,30,207,52,165,45,8,221,162,21,199,183,66,
211,247,51,224,102,214,190,130,96,253,218,193,35,43,139,145,89,200,250,
145,92,50,80,134,135,188,205,254,148,122,136,237,220,186,147,187,104,
159,36,147,217,117,74,35,163,145,249,175,242,18,221,124,54,140,16,246,
169,84,252,45,47,99,136,30,60,189,203,61,86,225,117,255,4,91,46,110,
167,173,106,51,65,10,248,94,225,223,73,40,232,140,26,11,67,170,118,190,
67,31,127,233,39,68,88,132,171,224,62,187,207,160,189,209,101,74,8,205,
174,146,173,80,105,144,246,25,153,86,36,24,178,163,64,202,221,95,184,
110,244,32,226,217,34,55,188,230,55,16,216,247,173,246,139,76,187,66,
211,159,17,46,20,18,48,80,27,250,96,189,29,214,234,241,34,69,254,147,
103,220,133,40,164,84,8,44,241,61,164,151,9,135,41,60,75,4,202,133,173,
72,6,69,167,89,112,174,40,229,171,2,1,2>>},
{ciphers,[{dhe_rsa,aes_256_cbc,sha256},
{dhe_dss,aes_256_cbc,sha256},
{rsa,aes_256_cbc,sha256},
{dhe_rsa,aes_128_cbc,sha256},
{dhe_dss,aes_128_cbc,sha256},
{rsa,aes_128_cbc,sha256},
{dhe_rsa,aes_256_cbc,sha},
{dhe_dss,aes_256_cbc,sha},
{rsa,aes_256_cbc,sha},
{dhe_rsa,'3des_ede_cbc',sha},
{dhe_dss,'3des_ede_cbc',sha},
{rsa,'3des_ede_cbc',sha},
{dhe_rsa,aes_128_cbc,sha},
{dhe_dss,aes_128_cbc,sha},
{rsa,aes_128_cbc,sha}]}]
[error_logger:info,2016-10-19T09:55:34.995-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_ssl_services_sup}
started: [{pid,<0.161.0>},
{name,ns_ssl_services_setup},
{mfargs,{ns_ssl_services_setup,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:35.010-07:00,ns_1@127.0.0.1:<0.163.0>:menelaus_pluggable_ui:validate_plugin_spec:117]Loaded pluggable UI specification for fts
[ns_server:info,2016-10-19T09:55:35.011-07:00,ns_1@127.0.0.1:<0.163.0>:menelaus_pluggable_ui:validate_plugin_spec:117]Loaded pluggable UI specification for n1ql
[ns_server:debug,2016-10-19T09:55:35.019-07:00,ns_1@127.0.0.1:<0.163.0>:restartable:start_child:98]Started child process <0.165.0>
MFA: {ns_ssl_services_setup,start_link_rest_service,[]}
[error_logger:info,2016-10-19T09:55:35.019-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_ssl_services_sup}
started: [{pid,<0.163.0>},
{name,ns_rest_ssl_service},
{mfargs,
{restartable,start_link,
[{ns_ssl_services_setup,
start_link_rest_service,[]},
1000]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:35.019-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_nodes_sup}
started: [{pid,<0.160.0>},
{name,ns_ssl_services_sup},
{mfargs,{ns_ssl_services_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:35.027-07:00,ns_1@127.0.0.1:wait_link_to_couchdb_node<0.183.0>:ns_server_nodes_sup:do_wait_link_to_couchdb_node:126]Waiting for ns_couchdb node to start
[error_logger:info,2016-10-19T09:55:35.027-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_nodes_sup}
started: [{pid,<0.182.0>},
{name,start_couchdb_node},
{mfargs,{ns_server_nodes_sup,start_couchdb_node,[]}},
{restart_type,{permanent,5}},
{shutdown,86400000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:35.027-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================INFO REPORT=========================
{net_kernel,{connect,normal,'couchdb_ns_1@127.0.0.1'}}
[ns_server:debug,2016-10-19T09:55:35.028-07:00,ns_1@127.0.0.1:<0.184.0>:ns_server_nodes_sup:do_wait_link_to_couchdb_node:140]ns_couchdb is not ready: {badrpc,nodedown}
[error_logger:info,2016-10-19T09:55:35.028-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================INFO REPORT=========================
{net_kernel,{'EXIT',<0.186.0>,shutdown}}
[error_logger:info,2016-10-19T09:55:35.028-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================INFO REPORT=========================
{net_kernel,{net_kernel,875,nodedown,'couchdb_ns_1@127.0.0.1'}}
[error_logger:info,2016-10-19T09:55:35.229-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================INFO REPORT=========================
{net_kernel,{connect,normal,'couchdb_ns_1@127.0.0.1'}}
[ns_server:debug,2016-10-19T09:55:35.266-07:00,ns_1@127.0.0.1:<0.184.0>:ns_server_nodes_sup:do_wait_link_to_couchdb_node:140]ns_couchdb is not ready: false
[ns_server:debug,2016-10-19T09:55:35.467-07:00,ns_1@127.0.0.1:<0.184.0>:ns_server_nodes_sup:do_wait_link_to_couchdb_node:140]ns_couchdb is not ready: false
[ns_server:debug,2016-10-19T09:55:35.668-07:00,ns_1@127.0.0.1:<0.184.0>:ns_server_nodes_sup:do_wait_link_to_couchdb_node:140]ns_couchdb is not ready: false
[ns_server:debug,2016-10-19T09:55:35.871-07:00,ns_1@127.0.0.1:<0.184.0>:ns_server_nodes_sup:do_wait_link_to_couchdb_node:140]ns_couchdb is not ready: false
[ns_server:debug,2016-10-19T09:55:36.072-07:00,ns_1@127.0.0.1:<0.184.0>:ns_server_nodes_sup:do_wait_link_to_couchdb_node:140]ns_couchdb is not ready: false
[ns_server:debug,2016-10-19T09:55:36.273-07:00,ns_1@127.0.0.1:<0.184.0>:ns_server_nodes_sup:do_wait_link_to_couchdb_node:140]ns_couchdb is not ready: false
[error_logger:info,2016-10-19T09:55:36.591-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_nodes_sup}
started: [{pid,<0.183.0>},
{name,wait_for_couchdb_node},
{mfargs,
{erlang,apply,
[#Fun,[]]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:36.594-07:00,ns_1@127.0.0.1:ns_server_nodes_sup<0.156.0>:ns_storage_conf:setup_db_and_ix_paths:53]Initialize db_and_ix_paths variable with [{db_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/data"},
{index_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/data"}]
[error_logger:info,2016-10-19T09:55:36.598-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.199.0>},
{name,diag_handler_worker},
{mfargs,{work_queue,start_link,[diag_handler_worker]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:36.599-07:00,ns_1@127.0.0.1:ns_server_sup<0.198.0>:dir_size:start_link:39]Starting quick version of dir_size with program name: godu
[error_logger:info,2016-10-19T09:55:36.600-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.200.0>},
{name,dir_size},
{mfargs,{dir_size,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.601-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.201.0>},
{name,request_throttler},
{mfargs,{request_throttler,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.603-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,kernel_safe_sup}
started: [{pid,<0.203.0>},
{name,timer2_server},
{mfargs,{timer2,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.604-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.202.0>},
{name,ns_log},
{mfargs,{ns_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.604-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.204.0>},
{name,ns_crash_log_consumer},
{mfargs,{ns_log,start_link_crash_consumer,[]}},
{restart_type,{permanent,4}},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:36.607-07:00,ns_1@127.0.0.1:ns_config_isasl_sync<0.205.0>:ns_config_isasl_sync:init:63]isasl_sync init: ["/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/isasl.pw",
"_admin"]
[ns_server:debug,2016-10-19T09:55:36.607-07:00,ns_1@127.0.0.1:ns_config_isasl_sync<0.205.0>:ns_config_isasl_sync:init:71]isasl_sync init buckets: ["default","locked","test"]
[ns_server:debug,2016-10-19T09:55:36.608-07:00,ns_1@127.0.0.1:ns_config_isasl_sync<0.205.0>:ns_config_isasl_sync:writeSASLConf:143]Writing isasl passwd file: "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/isasl.pw"
[user:info,2016-10-19T09:55:36.609-07:00,ns_1@127.0.0.1:<0.204.0>:ns_log:crash_consumption_loop:70]Service 'ns_server' exited with status 1. Restarting. Messages: {"Kernel pid terminated",application_controller,"{application_terminated,os_mon,shutdown}"}
[ns_server:error,2016-10-19T09:55:36.609-07:00,ns_1@127.0.0.1:ns_log<0.202.0>:ns_log:handle_cast:209]unable to notify listeners because of badarg
[user:info,2016-10-19T09:55:36.621-07:00,ns_1@127.0.0.1:<0.204.0>:ns_log:crash_consumption_loop:70]Service 'indexer' exited with status 1. Restarting. Messages: runtime.goexit()
/Users/jenkins/.cbdepscache/exploded/x86_64/go-1.7.1/go/src/runtime/asm_amd64.s:2086 +0x1 fp=0xc420042fb0 sp=0xc420042fa8
created by net/http.(*Transport).dialConn
/Users/jenkins/.cbdepscache/exploded/x86_64/go-1.7.1/go/src/net/http/transport.go:1063 +0x50e
[goport] 2016/10/19 09:55:34 /Applications/Couchbase Server.app/Contents/Resources/couchbase-core/bin/indexer terminated: exit status 2
[ns_server:error,2016-10-19T09:55:36.621-07:00,ns_1@127.0.0.1:ns_log<0.202.0>:ns_log:handle_cast:209]unable to notify listeners because of badarg
[user:debug,2016-10-19T09:55:36.633-07:00,ns_1@127.0.0.1:<0.204.0>:ns_log:crash_consumption_loop:70]Service 'indexer' exited with status 0. Restarting. Messages: 2016-10-19T09:55:34.518-07:00 [Info] Indexer::NewIndexer Status Bootstrap
2016-10-19T09:55:34.519-07:00 [Error] GetSettingsConfig() failed: Get http://127.0.0.1:8091/_metakv/indexing/settings/config: CBAuth database is stale: last reason: dial tcp 127.0.0.1:8091: getsockopt: connection refused
2016-10-19T09:55:34.519-07:00 [Fatal] Indexer::NewIndexer settingsMgr Init Error Get http://127.0.0.1:8091/_metakv/indexing/settings/config: CBAuth database is stale: last reason: dial tcp 127.0.0.1:8091: getsockopt: connection refused
2016-10-19T09:55:34.519-07:00 [Warn] Indexer Failure to Init Get http://127.0.0.1:8091/_metakv/indexing/settings/config: CBAuth database is stale: last reason: dial tcp 127.0.0.1:8091: getsockopt: connection refused
2016-10-19T09:55:34.519-07:00 [Info] Indexer exiting normally
[error_logger:info,2016-10-19T09:55:36.659-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.205.0>},
{name,ns_config_isasl_sync},
{mfargs,{ns_config_isasl_sync,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.659-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.208.0>},
{name,ns_log_events},
{mfargs,{gen_event,start_link,[{local,ns_log_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:36.660-07:00,ns_1@127.0.0.1:ns_node_disco<0.211.0>:ns_node_disco:init:138]Initting ns_node_disco with []
[ns_server:debug,2016-10-19T09:55:36.660-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[error_logger:info,2016-10-19T09:55:36.660-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.210.0>},
{name,ns_node_disco_events},
{mfargs,
{gen_event,start_link,
[{local,ns_node_disco_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[user:info,2016-10-19T09:55:36.661-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_sync:130]Node 'ns_1@127.0.0.1' synchronized otp cookie oxqibayfkfbrogxo from cluster
[ns_server:debug,2016-10-19T09:55:36.661-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2016-10-19T09:55:36.673-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[ns_server:debug,2016-10-19T09:55:36.673-07:00,ns_1@127.0.0.1:<0.212.0>:ns_node_disco:do_nodes_wanted_updated_fun:224]ns_node_disco: nodes_wanted updated: ['ns_1@127.0.0.1'], with cookie: oxqibayfkfbrogxo
[ns_server:debug,2016-10-19T09:55:36.675-07:00,ns_1@127.0.0.1:<0.212.0>:ns_node_disco:do_nodes_wanted_updated_fun:230]ns_node_disco: nodes_wanted pong: ['ns_1@127.0.0.1'], with cookie: oxqibayfkfbrogxo
[error_logger:info,2016-10-19T09:55:36.675-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.211.0>},
{name,ns_node_disco},
{mfargs,{ns_node_disco,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.676-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.213.0>},
{name,ns_node_disco_log},
{mfargs,{ns_node_disco_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.677-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.214.0>},
{name,ns_node_disco_conf_events},
{mfargs,{ns_node_disco_conf_events,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:36.678-07:00,ns_1@127.0.0.1:ns_config_rep<0.216.0>:ns_config_rep:init:68]init pulling
[ns_server:debug,2016-10-19T09:55:36.678-07:00,ns_1@127.0.0.1:ns_config_rep<0.216.0>:ns_config_rep:init:70]init pushing
[error_logger:info,2016-10-19T09:55:36.678-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.215.0>},
{name,ns_config_rep_merger},
{mfargs,{ns_config_rep,start_link_merger,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:36.679-07:00,ns_1@127.0.0.1:ns_config_rep<0.216.0>:ns_config_rep:init:74]init reannouncing
[ns_server:debug,2016-10-19T09:55:36.679-07:00,ns_1@127.0.0.1:ns_config_events<0.145.0>:ns_node_disco_conf_events:handle_event:44]ns_node_disco_conf_events config on nodes_wanted
[ns_server:debug,2016-10-19T09:55:36.679-07:00,ns_1@127.0.0.1:ns_config_events<0.145.0>:ns_node_disco_conf_events:handle_event:50]ns_node_disco_conf_events config on otp
[ns_server:debug,2016-10-19T09:55:36.679-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[ns_server:debug,2016-10-19T09:55:36.679-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2016-10-19T09:55:36.679-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
alert_limits ->
[{max_overhead_perc,50},{max_disk_used,90},{max_indexer_ram,75}]
[ns_server:debug,2016-10-19T09:55:36.679-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
audit ->
[{auditd_enabled,false},
{rotate_interval,86400},
{rotate_size,20971520},
{disabled,[]},
{sync,[]},
{log_path,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"}]
[ns_server:debug,2016-10-19T09:55:36.680-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
auto_failover_cfg ->
[{enabled,false},{timeout,120},{max_nodes,1},{count,0}]
[ns_server:debug,2016-10-19T09:55:36.680-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
autocompaction ->
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[error_logger:info,2016-10-19T09:55:36.680-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.216.0>},
{name,ns_config_rep},
{mfargs,{ns_config_rep,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.680-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.209.0>},
{name,ns_node_disco_sup},
{mfargs,{ns_node_disco_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:36.680-07:00,ns_1@127.0.0.1:ns_config_rep<0.216.0>:ns_config_rep:do_push_keys:321]Replicating some config keys ([alert_limits,audit,auto_failover_cfg,
autocompaction,buckets,cert_and_pkey,
cluster_compat_version,
drop_request_memory_threshold_mib,email_alerts,
fts_memory_quota,goxdcr_upgrade,
index_aware_rebalance_disabled,
max_bucket_count,memcached,memory_quota,
nodes_wanted,otp,read_only_user_creds,
remote_clusters,replication,rest,rest_creds,
roles_definitions,server_groups,
set_view_update_daemon,settings,uuid,
vbucket_map_history,
{couchdb,max_parallel_indexers},
{couchdb,max_parallel_replica_indexers},
{local_changes_count,
<<"eac84bf2ecf69c83ca0268ac5aac465d">>},
{metakv,<<"/indexing/settings/config">>},
{request_limit,capi},
{request_limit,rest},
{service_map,fts},
{service_map,index},
{service_map,n1ql},
{node,'ns_1@127.0.0.1',audit},
{node,'ns_1@127.0.0.1',capi_port},
{node,'ns_1@127.0.0.1',compaction_daemon},
{node,'ns_1@127.0.0.1',config_version},
{node,'ns_1@127.0.0.1',fts_http_port},
{node,'ns_1@127.0.0.1',indexer_admin_port},
{node,'ns_1@127.0.0.1',indexer_http_port},
{node,'ns_1@127.0.0.1',indexer_scan_port},
{node,'ns_1@127.0.0.1',indexer_stcatchup_port},
{node,'ns_1@127.0.0.1',indexer_stinit_port},
{node,'ns_1@127.0.0.1',indexer_stmaint_port},
{node,'ns_1@127.0.0.1',is_enterprise},
{node,'ns_1@127.0.0.1',isasl},
{node,'ns_1@127.0.0.1',ldap_enabled},
{node,'ns_1@127.0.0.1',membership},
{node,'ns_1@127.0.0.1',memcached},
{node,'ns_1@127.0.0.1',memcached_config},
{node,'ns_1@127.0.0.1',memcached_defaults},
{node,'ns_1@127.0.0.1',moxi},
{node,'ns_1@127.0.0.1',ns_log},
{node,'ns_1@127.0.0.1',port_servers},
{node,'ns_1@127.0.0.1',projector_port},
{node,'ns_1@127.0.0.1',query_port},
{node,'ns_1@127.0.0.1',rest},
{node,'ns_1@127.0.0.1',services},
{node,'ns_1@127.0.0.1',ssl_capi_port},
{node,'ns_1@127.0.0.1',
ssl_proxy_downstream_port}]..)
[ns_server:debug,2016-10-19T09:55:36.681-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
buckets ->
[[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{18,63644101958}}],
{configs,[[{map,[{0,[],['ns_1@127.0.0.1',undefined]},
{1,[],['ns_1@127.0.0.1',undefined]},
{2,[],['ns_1@127.0.0.1',undefined]},
{3,[],['ns_1@127.0.0.1',undefined]},
{4,[],['ns_1@127.0.0.1',undefined]},
{5,[],['ns_1@127.0.0.1',undefined]},
{6,[],['ns_1@127.0.0.1',undefined]},
{7,[],['ns_1@127.0.0.1',undefined]},
{8,[],['ns_1@127.0.0.1',undefined]},
{9,[],['ns_1@127.0.0.1',undefined]},
{10,[],['ns_1@127.0.0.1',undefined]},
{11,[],['ns_1@127.0.0.1',undefined]},
{12,[],['ns_1@127.0.0.1',undefined]},
{13,[],['ns_1@127.0.0.1',undefined]},
{14,[],['ns_1@127.0.0.1',undefined]},
{15,[],['ns_1@127.0.0.1',undefined]},
{16,[],['ns_1@127.0.0.1',undefined]},
{17,[],['ns_1@127.0.0.1',undefined]},
{18,[],['ns_1@127.0.0.1',undefined]},
{19,[],['ns_1@127.0.0.1',undefined]},
{20,[],['ns_1@127.0.0.1',undefined]},
{21,[],['ns_1@127.0.0.1',undefined]},
{22,[],['ns_1@127.0.0.1',undefined]},
{23,[],['ns_1@127.0.0.1',undefined]},
{24,[],['ns_1@127.0.0.1',undefined]},
{25,[],['ns_1@127.0.0.1',undefined]},
{26,[],['ns_1@127.0.0.1',undefined]},
{27,[],['ns_1@127.0.0.1',undefined]},
{28,[],['ns_1@127.0.0.1',undefined]},
{29,[],['ns_1@127.0.0.1',undefined]},
{30,[],['ns_1@127.0.0.1',undefined]},
{31,[],['ns_1@127.0.0.1',undefined]},
{32,[],['ns_1@127.0.0.1',undefined]},
{33,[],['ns_1@127.0.0.1',undefined]},
{34,[],['ns_1@127.0.0.1',undefined]},
{35,[],['ns_1@127.0.0.1',undefined]},
{36,[],['ns_1@127.0.0.1',undefined]},
{37,[],['ns_1@127.0.0.1',undefined]},
{38,[],['ns_1@127.0.0.1',undefined]},
{39,[],['ns_1@127.0.0.1',undefined]},
{40,[],['ns_1@127.0.0.1',undefined]},
{41,[],['ns_1@127.0.0.1',undefined]},
{42,[],['ns_1@127.0.0.1',undefined]},
{43,[],['ns_1@127.0.0.1',undefined]},
{44,[],['ns_1@127.0.0.1',undefined]},
{45,[],['ns_1@127.0.0.1',undefined]},
{46,[],['ns_1@127.0.0.1',undefined]},
{47,[],['ns_1@127.0.0.1',undefined]},
{48,[],['ns_1@127.0.0.1',undefined]},
{49,[],['ns_1@127.0.0.1',undefined]},
{50,[],['ns_1@127.0.0.1',undefined]},
{51,[],['ns_1@127.0.0.1',undefined]},
{52,[],['ns_1@127.0.0.1',undefined]},
{53,[],['ns_1@127.0.0.1',undefined]},
{54,[],['ns_1@127.0.0.1',undefined]},
{55,[],['ns_1@127.0.0.1',undefined]},
{56,[],['ns_1@127.0.0.1',undefined]},
{57,[],['ns_1@127.0.0.1',undefined]},
{58,[],['ns_1@127.0.0.1',undefined]},
{59,[],['ns_1@127.0.0.1',undefined]},
{60,[],['ns_1@127.0.0.1',undefined]},
{61,[],['ns_1@127.0.0.1',undefined]},
{62,[],['ns_1@127.0.0.1',undefined]},
{63,[],['ns_1@127.0.0.1',undefined]}]},
{fastForwardMap,[]},
{repl_type,dcp},
{uuid,<<"b04d5897bd3c5329a82156f1b77c395d">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,104857600},
{auth_type,sasl},
{flush_enabled,false},
{num_threads,3},
{eviction_policy,value_only},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map_opts_hash,133465355}],
[{map,[{0,[],['ns_1@127.0.0.1',undefined]},
{1,[],['ns_1@127.0.0.1',undefined]},
{2,[],['ns_1@127.0.0.1',undefined]},
{3,[],['ns_1@127.0.0.1',undefined]},
{4,[],['ns_1@127.0.0.1',undefined]},
{5,[],['ns_1@127.0.0.1',undefined]},
{6,[],['ns_1@127.0.0.1',undefined]},
{7,[],['ns_1@127.0.0.1',undefined]},
{8,[],['ns_1@127.0.0.1',undefined]},
{9,[],['ns_1@127.0.0.1',undefined]},
{10,[],['ns_1@127.0.0.1',undefined]},
{11,[],['ns_1@127.0.0.1',undefined]},
{12,[],['ns_1@127.0.0.1',undefined]},
{13,[],['ns_1@127.0.0.1',undefined]},
{14,[],['ns_1@127.0.0.1',undefined]},
{15,[],['ns_1@127.0.0.1',undefined]},
{16,[],['ns_1@127.0.0.1',undefined]},
{17,[],['ns_1@127.0.0.1',undefined]},
{18,[],['ns_1@127.0.0.1',undefined]},
{19,[],['ns_1@127.0.0.1',undefined]},
{20,[],['ns_1@127.0.0.1',undefined]},
{21,[],['ns_1@127.0.0.1',undefined]},
{22,[],['ns_1@127.0.0.1',undefined]},
{23,[],['ns_1@127.0.0.1',undefined]},
{24,[],['ns_1@127.0.0.1',undefined]},
{25,[],['ns_1@127.0.0.1',undefined]},
{26,[],['ns_1@127.0.0.1',undefined]},
{27,[],['ns_1@127.0.0.1',undefined]},
{28,[],['ns_1@127.0.0.1',undefined]},
{29,[],['ns_1@127.0.0.1',undefined]},
{30,[],['ns_1@127.0.0.1',undefined]},
{31,[],['ns_1@127.0.0.1',undefined]},
{32,[],['ns_1@127.0.0.1',undefined]},
{33,[],['ns_1@127.0.0.1',undefined]},
{34,[],['ns_1@127.0.0.1',undefined]},
{35,[],['ns_1@127.0.0.1',undefined]},
{36,[],['ns_1@127.0.0.1',undefined]},
{37,[],['ns_1@127.0.0.1',undefined]},
{38,[],['ns_1@127.0.0.1',undefined]},
{39,[],['ns_1@127.0.0.1',undefined]},
{40,[],['ns_1@127.0.0.1',undefined]},
{41,[],['ns_1@127.0.0.1',undefined]},
{42,[],['ns_1@127.0.0.1',undefined]},
{43,[],['ns_1@127.0.0.1',undefined]},
{44,[],['ns_1@127.0.0.1',undefined]},
{45,[],['ns_1@127.0.0.1',undefined]},
{46,[],['ns_1@127.0.0.1',undefined]},
{47,[],['ns_1@127.0.0.1',undefined]},
{48,[],['ns_1@127.0.0.1',undefined]},
{49,[],['ns_1@127.0.0.1',undefined]},
{50,[],['ns_1@127.0.0.1',undefined]},
{51,[],['ns_1@127.0.0.1',undefined]},
{52,[],['ns_1@127.0.0.1',undefined]},
{53,[],['ns_1@127.0.0.1',undefined]},
{54,[],['ns_1@127.0.0.1',undefined]},
{55,[],['ns_1@127.0.0.1',undefined]},
{56,[],['ns_1@127.0.0.1',undefined]},
{57,[],['ns_1@127.0.0.1',undefined]},
{58,[],['ns_1@127.0.0.1',undefined]},
{59,[],['ns_1@127.0.0.1',undefined]},
{60,[],['ns_1@127.0.0.1',undefined]},
{61,[],['ns_1@127.0.0.1',undefined]},
{62,[],['ns_1@127.0.0.1',undefined]},
{63,[],['ns_1@127.0.0.1',undefined]}]},
{fastForwardMap,[]},
{eviction_policy,value_only},
{num_threads,3},
{flush_enabled,false},
{purge_interval,undefined},
{autocompaction,false},
{sasl_password,"*****"},
{auth_type,sasl},
{ram_quota,104857600},
{num_replicas,1},
{repl_type,dcp},
{uuid,<<"8515ae93e826e7c4389f3fd25fbb263e">>},
{replica_index,false},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map_opts_hash,133465355}],
[{map,[{0,[],['ns_1@127.0.0.1',undefined]},
{1,[],['ns_1@127.0.0.1',undefined]},
{2,[],['ns_1@127.0.0.1',undefined]},
{3,[],['ns_1@127.0.0.1',undefined]},
{4,[],['ns_1@127.0.0.1',undefined]},
{5,[],['ns_1@127.0.0.1',undefined]},
{6,[],['ns_1@127.0.0.1',undefined]},
{7,[],['ns_1@127.0.0.1',undefined]},
{8,[],['ns_1@127.0.0.1',undefined]},
{9,[],['ns_1@127.0.0.1',undefined]},
{10,[],['ns_1@127.0.0.1',undefined]},
{11,[],['ns_1@127.0.0.1',undefined]},
{12,[],['ns_1@127.0.0.1',undefined]},
{13,[],['ns_1@127.0.0.1',undefined]},
{14,[],['ns_1@127.0.0.1',undefined]},
{15,[],['ns_1@127.0.0.1',undefined]},
{16,[],['ns_1@127.0.0.1',undefined]},
{17,[],['ns_1@127.0.0.1',undefined]},
{18,[],['ns_1@127.0.0.1',undefined]},
{19,[],['ns_1@127.0.0.1',undefined]},
{20,[],['ns_1@127.0.0.1',undefined]},
{21,[],['ns_1@127.0.0.1',undefined]},
{22,[],['ns_1@127.0.0.1',undefined]},
{23,[],['ns_1@127.0.0.1',undefined]},
{24,[],['ns_1@127.0.0.1',undefined]},
{25,[],['ns_1@127.0.0.1',undefined]},
{26,[],['ns_1@127.0.0.1',undefined]},
{27,[],['ns_1@127.0.0.1',undefined]},
{28,[],['ns_1@127.0.0.1',undefined]},
{29,[],['ns_1@127.0.0.1',undefined]},
{30,[],['ns_1@127.0.0.1',undefined]},
{31,[],['ns_1@127.0.0.1',undefined]},
{32,[],['ns_1@127.0.0.1',undefined]},
{33,[],['ns_1@127.0.0.1',undefined]},
{34,[],['ns_1@127.0.0.1',undefined]},
{35,[],['ns_1@127.0.0.1',undefined]},
{36,[],['ns_1@127.0.0.1',undefined]},
{37,[],['ns_1@127.0.0.1',undefined]},
{38,[],['ns_1@127.0.0.1',undefined]},
{39,[],['ns_1@127.0.0.1',undefined]},
{40,[],['ns_1@127.0.0.1',undefined]},
{41,[],['ns_1@127.0.0.1',undefined]},
{42,[],['ns_1@127.0.0.1',undefined]},
{43,[],['ns_1@127.0.0.1',undefined]},
{44,[],['ns_1@127.0.0.1',undefined]},
{45,[],['ns_1@127.0.0.1',undefined]},
{46,[],['ns_1@127.0.0.1',undefined]},
{47,[],['ns_1@127.0.0.1',undefined]},
{48,[],['ns_1@127.0.0.1',undefined]},
{49,[],['ns_1@127.0.0.1',undefined]},
{50,[],['ns_1@127.0.0.1',undefined]},
{51,[],['ns_1@127.0.0.1',undefined]},
{52,[],['ns_1@127.0.0.1',undefined]},
{53,[],['ns_1@127.0.0.1',undefined]},
{54,[],['ns_1@127.0.0.1',undefined]},
{55,[],['ns_1@127.0.0.1',undefined]},
{56,[],['ns_1@127.0.0.1',undefined]},
{57,[],['ns_1@127.0.0.1',undefined]},
{58,[],['ns_1@127.0.0.1',undefined]},
{59,[],['ns_1@127.0.0.1',undefined]},
{60,[],['ns_1@127.0.0.1',undefined]},
{61,[],['ns_1@127.0.0.1',undefined]},
{62,[],['ns_1@127.0.0.1',undefined]},
{63,[],['ns_1@127.0.0.1',undefined]}]},
{fastForwardMap,[]},
{repl_type,dcp},
{uuid,<<"7f7d4a28ca84a805edf9c899521eb18c">>},
{num_replicas,1},
{replica_index,false},
{ram_quota,524288000},
{auth_type,sasl},
{sasl_password,"*****"},
{autocompaction,false},
{purge_interval,undefined},
{flush_enabled,false},
{num_threads,3},
{eviction_policy,value_only},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map_opts_hash,133465355}]]}]
[ns_server:debug,2016-10-19T09:55:36.682-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
cert_and_pkey ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
{<<"-----BEGIN CERTIFICATE-----\nMIIDAjCCAeqgAwIBAgIIFH6f01mhINIwDQYJKoZIhvcNAQELBQAwJDEiMCAGA1UE\nAxMZQ291Y2hiYXNlIFNlcnZlciAxMzBiNDVmMzAeFw0xMzAxMDEwMDAwMDBaFw00\nOTEyMzEyMzU5NTlaMCQxIjAgBgNVBAMTGUNvdWNoYmFzZSBTZXJ2ZXIgMTMwYjQ1\nZjMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDINaiYz/zhTeD2h8Pa\nP015LetKMhey1yoE2L5H1wHK7pADeFRfqeoNunEvlxRWL/YjnqvRZPxrjdadlh7L\nVhZVke2blopHdhJjaHCvdI8R3BRBK4f"...>>,
<<"*****">>}]
[ns_server:debug,2016-10-19T09:55:36.682-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
cluster_compat_version ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{6,63644012660}}]},4,6]
[ns_server:debug,2016-10-19T09:55:36.682-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
drop_request_memory_threshold_mib ->
undefined
[error_logger:info,2016-10-19T09:55:36.682-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.224.0>},
{name,vbucket_map_mirror},
{mfargs,{vbucket_map_mirror,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:36.682-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
email_alerts ->
[{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,[{user,[]},
{pass,"*****"},
{host,"localhost"},
{port,25},
{encrypt,false}]},
{alerts,[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,auto_failover_cluster_too_small,
auto_failover_disabled,ip,disk,overhead,ep_oom_errors,
ep_item_commit_failed,audit_dropped_events,indexer_ram_max_usage]}]
[ns_server:debug,2016-10-19T09:55:36.682-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
fts_memory_quota ->
512
[ns_server:debug,2016-10-19T09:55:36.682-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
goxdcr_upgrade ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}|
'_deleted']
[ns_server:debug,2016-10-19T09:55:36.682-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
index_aware_rebalance_disabled ->
false
[ns_server:debug,2016-10-19T09:55:36.682-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
max_bucket_count ->
10
[ns_server:debug,2016-10-19T09:55:36.682-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
memcached ->
[]
[ns_server:debug,2016-10-19T09:55:36.682-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
memory_quota ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012751}}]}|1024]
[ns_server:debug,2016-10-19T09:55:36.683-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
nodes_wanted ->
['ns_1@127.0.0.1']
[ns_server:debug,2016-10-19T09:55:36.683-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
otp ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]},
{cookie,oxqibayfkfbrogxo}]
[ns_server:debug,2016-10-19T09:55:36.683-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
read_only_user_creds ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}|null]
[ns_server:debug,2016-10-19T09:55:36.683-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
remote_clusters ->
[]
[ns_server:debug,2016-10-19T09:55:36.683-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
replication ->
[{enabled,true}]
[ns_server:debug,2016-10-19T09:55:36.683-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
rest ->
[{port,8091}]
[ns_server:debug,2016-10-19T09:55:36.683-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
rest_creds ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012784}}]}|
{"Administrator",{password,"*****"}}]
[ns_server:debug,2016-10-19T09:55:36.683-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
roles_definitions ->
[{admin,[],
[{name,<<"Admin">>},
{desc,<<"Can manage ALL cluster features including security.">>}],
[{[],all}]},
{ro_admin,[],
[{name,<<"Read Only Admin">>},
{desc,<<"Can view ALL cluster features.">>}],
[{[{bucket,any},password],none},
{[{bucket,any},data],none},
{[admin,security],[read]},
{[admin],none},
{[],[read]}]},
{cluster_admin,[],
[{name,<<"Cluster Admin">>},
{desc,<<"Can manage all cluster features EXCEPT security.">>}],
[{[admin],none},{[],all}]},
{bucket_admin,[bucket_name],
[{name,<<"Bucket Admin">>},
{desc,<<"Can manage ALL bucket features for specified buckets (incl. start/stop XDCR)">>}],
[{[{bucket,bucket_name},xdcr],[read,execute]},
{[{bucket,bucket_name}],all},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],none},
{[admin],none},
{[],[read]}]},
{bucket_sasl,[bucket_name],
[],
[{[{bucket,bucket_name},data],all},
{[{bucket,bucket_name},views],all},
{[{bucket,bucket_name}],[read,flush]},
{[pools],[read]}]},
{views_admin,[bucket_name],
[{name,<<"Views Admin">>},
{desc,<<"Can manage views for specified buckets">>}],
[{[{bucket,bucket_name},views],all},
{[{bucket,bucket_name},data],[read]},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],none},
{[admin],none},
{[],[read]}]},
{replication_admin,[],
[{name,<<"Replication Admin">>},
{desc,<<"Can manage ONLY XDCR features (cluster AND bucket level)">>}],
[{[{bucket,any},xdcr],all},
{[{bucket,any},data],[read]},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],all},
{[admin],none},
{[],[read]}]}]
[ns_server:debug,2016-10-19T09:55:36.683-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
server_groups ->
[[{uuid,<<"0">>},{name,<<"Group 1">>},{nodes,['ns_1@127.0.0.1']}]]
[ns_server:debug,2016-10-19T09:55:36.683-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
set_view_update_daemon ->
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]
[ns_server:debug,2016-10-19T09:55:36.684-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
settings ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012777}}]},
{stats,[{send_stats,false}]}]
[ns_server:debug,2016-10-19T09:55:36.684-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
uuid ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012784}}]}|
<<"561b20f339d4184270a7de0b1c1de1b0">>]
[ns_server:debug,2016-10-19T09:55:36.684-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
vbucket_map_history ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012772}}]},
{[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]],
[{replication_topology,star},{tags,undefined},{max_slaves,10}]}]
[ns_server:debug,2016-10-19T09:55:36.684-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{couchdb,max_parallel_indexers} ->
4
[ns_server:debug,2016-10-19T09:55:36.685-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{couchdb,max_parallel_replica_indexers} ->
2
[ns_server:debug,2016-10-19T09:55:36.685-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{local_changes_count,<<"eac84bf2ecf69c83ca0268ac5aac465d">>} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{35,63644101958}}]}]
[ns_server:debug,2016-10-19T09:55:36.685-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{metakv,<<"/indexing/settings/config">>} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{4,63644012784}}]}|
<<"{\"indexer.settings.compaction.days_of_week\":\"Sunday,Monday,Tuesday,Wednesday,Thursday,Friday,Saturday\",\"indexer.settings.compaction.interval\":\"00:00,00:00\",\"indexer.settings.compaction.compaction_mode\":\"circular\",\"indexer.settings.persisted_snapshot.interval\":5000,\"indexer.settings.log_level\":\"info\",\"indexer.settings.compaction.min_frag\":30,\"indexer.settings.inmemory_snapshot.interval\""...>>]
[ns_server:debug,2016-10-19T09:55:36.685-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{request_limit,capi} ->
undefined
[ns_server:debug,2016-10-19T09:55:36.685-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{request_limit,rest} ->
undefined
[ns_server:debug,2016-10-19T09:55:36.685-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{service_map,fts} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}]
[ns_server:debug,2016-10-19T09:55:36.685-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{service_map,index} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012791}}]},
'ns_1@127.0.0.1']
[ns_server:debug,2016-10-19T09:55:36.685-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{service_map,n1ql} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012791}}]},
'ns_1@127.0.0.1']
[ns_server:debug,2016-10-19T09:55:36.685-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',audit} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}]
[ns_server:debug,2016-10-19T09:55:36.685-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',capi_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|8092]
[ns_server:debug,2016-10-19T09:55:36.685-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',compaction_daemon} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{check_interval,30},
{min_db_file_size,131072},
{min_view_file_size,20971520}]
[ns_server:debug,2016-10-19T09:55:36.685-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',config_version} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|{4,5}]
[ns_server:debug,2016-10-19T09:55:36.686-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',fts_http_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|8094]
[ns_server:debug,2016-10-19T09:55:36.686-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',indexer_admin_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|9100]
[ns_server:debug,2016-10-19T09:55:36.686-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',indexer_http_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|9102]
[ns_server:debug,2016-10-19T09:55:36.686-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',indexer_scan_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|9101]
[ns_server:debug,2016-10-19T09:55:36.686-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',indexer_stcatchup_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|9104]
[ns_server:debug,2016-10-19T09:55:36.686-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',indexer_stinit_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|9103]
[ns_server:debug,2016-10-19T09:55:36.686-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',indexer_stmaint_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|9105]
[ns_server:debug,2016-10-19T09:55:36.686-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',is_enterprise} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|true]
[ns_server:debug,2016-10-19T09:55:36.686-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',isasl} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{path,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/isasl.pw"}]
[ns_server:debug,2016-10-19T09:55:36.686-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',ldap_enabled} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|false]
[ns_server:debug,2016-10-19T09:55:36.686-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',membership} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
active]
[ns_server:debug,2016-10-19T09:55:36.686-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',memcached} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,11210},
{dedicated_port,11209},
{ssl_port,11207},
{admin_user,"_admin"},
{admin_pass,"*****"},
{engines,[{membase,[{engine,"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/ep.so"},
{static_config_string,"failpartialwarmup=false"}]},
{memcached,[{engine,"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{config_path,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached.json"},
{audit_file,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/audit.json"},
{log_path,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003}]
[ns_server:debug,2016-10-19T09:55:36.687-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',memcached_config} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
{[{interfaces,
{memcached_config_mgr,omit_missing_mcd_ports,
[{[{host,<<"*">>},{port,port},{maxconn,maxconn}]},
{[{host,<<"*">>},
{port,dedicated_port},
{maxconn,dedicated_port_maxconn}]},
{[{host,<<"*">>},
{port,ssl_port},
{maxconn,maxconn},
{ssl,
{[{key,
<<"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached-key.pem">>},
{cert,
<<"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached-cert.pem">>}]}}]}]}},
{ssl_cipher_list,{"~s",[ssl_cipher_list]}},
{ssl_minimum_protocol,{memcached_config_mgr,ssl_minimum_protocol,[]}},
{connection_idle_time,connection_idle_time},
{breakpad,
{[{enabled,breakpad_enabled},
{minidump_dir,{memcached_config_mgr,get_minidump_dir,[]}}]}},
{extensions,
[{[{module,
<<"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/stdin_term_handler.so">>},
{config,<<>>}]},
{[{module,
<<"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/file_logger.so">>},
{config,
{"cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]}}]}]},
{admin,{"~s",[admin_user]}},
{verbosity,verbosity},
{audit_file,{"~s",[audit_file]}},
{dedupe_nmvb_maps,dedupe_nmvb_maps}]}]
[ns_server:debug,2016-10-19T09:55:36.687-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',memcached_defaults} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{maxconn,30000},
{dedicated_port_maxconn,5000},
{ssl_cipher_list,"HIGH"},
{connection_idle_time,0},
{verbosity,0},
{breakpad_enabled,true},
{breakpad_minidump_dir_path,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/crash"},
{dedupe_nmvb_maps,false}]
[ns_server:debug,2016-10-19T09:55:36.687-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',moxi} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,11211},
{verbosity,[]}]
[ns_server:debug,2016-10-19T09:55:36.687-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',ns_log} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{filename,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/ns_log"}]
[ns_server:debug,2016-10-19T09:55:36.687-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',port_servers} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}]
[ns_server:debug,2016-10-19T09:55:36.687-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',projector_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|9999]
[ns_server:debug,2016-10-19T09:55:36.687-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',query_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|8093]
[ns_server:debug,2016-10-19T09:55:36.687-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',rest} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,8091},
{port_meta,global}]
[ns_server:debug,2016-10-19T09:55:36.687-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',services} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012751}}]},
index,kv,n1ql]
[ns_server:debug,2016-10-19T09:55:36.687-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',ssl_capi_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|18092]
[ns_server:debug,2016-10-19T09:55:36.687-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',ssl_proxy_downstream_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|11214]
[ns_server:debug,2016-10-19T09:55:36.687-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|11215]
[ns_server:debug,2016-10-19T09:55:36.687-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',ssl_query_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|18093]
[ns_server:debug,2016-10-19T09:55:36.687-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',ssl_rest_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|18091]
[ns_server:debug,2016-10-19T09:55:36.687-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',stop_xdcr} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012660}}]}|
'_deleted']
[ns_server:debug,2016-10-19T09:55:36.688-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',uuid} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
<<"eac84bf2ecf69c83ca0268ac5aac465d">>]
[ns_server:debug,2016-10-19T09:55:36.688-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',xdcr_rest_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|9998]
[ns_server:debug,2016-10-19T09:55:36.693-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[ns_server:debug,2016-10-19T09:55:36.693-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[ns_server:debug,2016-10-19T09:55:36.693-07:00,ns_1@127.0.0.1:<0.220.0>:ns_node_disco:do_nodes_wanted_updated_fun:224]ns_node_disco: nodes_wanted updated: ['ns_1@127.0.0.1'], with cookie: oxqibayfkfbrogxo
[ns_server:debug,2016-10-19T09:55:36.693-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2016-10-19T09:55:36.693-07:00,ns_1@127.0.0.1:<0.220.0>:ns_node_disco:do_nodes_wanted_updated_fun:230]ns_node_disco: nodes_wanted pong: ['ns_1@127.0.0.1'], with cookie: oxqibayfkfbrogxo
[error_logger:info,2016-10-19T09:55:36.694-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.226.0>},
{name,bucket_info_cache},
{mfargs,{bucket_info_cache,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.694-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.229.0>},
{name,ns_tick_event},
{mfargs,{gen_event,start_link,[{local,ns_tick_event}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.694-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.230.0>},
{name,buckets_events},
{mfargs,
{gen_event,start_link,[{local,buckets_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:36.706-07:00,ns_1@127.0.0.1:ns_log_events<0.208.0>:ns_mail_log:init:44]ns_mail_log started up
[ns_server:debug,2016-10-19T09:55:36.706-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[error_logger:info,2016-10-19T09:55:36.706-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_mail_sup}
started: [{pid,<0.232.0>},
{name,ns_mail_log},
{mfargs,{ns_mail_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:36.706-07:00,ns_1@127.0.0.1:<0.221.0>:ns_node_disco:do_nodes_wanted_updated_fun:224]ns_node_disco: nodes_wanted updated: ['ns_1@127.0.0.1'], with cookie: oxqibayfkfbrogxo
[ns_server:debug,2016-10-19T09:55:36.706-07:00,ns_1@127.0.0.1:<0.221.0>:ns_node_disco:do_nodes_wanted_updated_fun:230]ns_node_disco: nodes_wanted pong: ['ns_1@127.0.0.1'], with cookie: oxqibayfkfbrogxo
[error_logger:info,2016-10-19T09:55:36.706-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.231.0>},
{name,ns_mail_sup},
{mfargs,{ns_mail_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:36.706-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.233.0>},
{name,ns_stats_event},
{mfargs,
{gen_event,start_link,[{local,ns_stats_event}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.707-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.234.0>},
{name,samples_loader_tasks},
{mfargs,{samples_loader_tasks,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.710-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_heart_sup}
started: [{pid,<0.236.0>},
{name,ns_heart},
{mfargs,{ns_heart,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.710-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_heart_sup}
started: [{pid,<0.239.0>},
{name,ns_heart_slow_updater},
{mfargs,{ns_heart,start_link_slow_updater,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.710-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.235.0>},
{name,ns_heart_sup},
{mfargs,{ns_heart_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:36.711-07:00,ns_1@127.0.0.1:ns_heart<0.236.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "@system" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-@system-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-lc$^0/1-0-',1,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,update_current_status,1,
[{file,"src/ns_heart.erl"},{line,186}]},
{ns_heart,handle_info,2,
[{file,"src/ns_heart.erl"},{line,118}]}]}}
[ns_server:debug,2016-10-19T09:55:36.712-07:00,ns_1@127.0.0.1:ns_heart<0.236.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "@system-processes" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-@system-processes-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-lc$^0/1-0-',1,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,'-current_status_slow_inner/0-lc$^0/1-0-',1,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,update_current_status,1,
[{file,"src/ns_heart.erl"},{line,186}]}]}}
[error_logger:info,2016-10-19T09:55:36.713-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_doctor_sup}
started: [{pid,<0.242.0>},
{name,ns_doctor_events},
{mfargs,
{gen_event,start_link,[{local,ns_doctor_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:36.717-07:00,ns_1@127.0.0.1:<0.240.0>:restartable:start_child:98]Started child process <0.241.0>
MFA: {ns_doctor_sup,start_link,[]}
[error_logger:info,2016-10-19T09:55:36.717-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_doctor_sup}
started: [{pid,<0.243.0>},
{name,ns_doctor},
{mfargs,{ns_doctor,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.717-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.240.0>},
{name,ns_doctor_sup},
{mfargs,
{restartable,start_link,
[{ns_doctor_sup,start_link,[]},infinity]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:36.729-07:00,ns_1@127.0.0.1:ns_heart<0.236.0>:ns_heart:grab_index_status:373]ignoring failure to get index status: {exit,
{noproc,
{gen_server,call,
['index_status_keeper-index',
get_status,2000]}}}
[{gen_server,call,3,[{file,"gen_server.erl"},{line,188}]},
{ns_heart,grab_index_status,0,[{file,"src/ns_heart.erl"},{line,370}]},
{ns_heart,current_status_slow_inner,0,[{file,"src/ns_heart.erl"},{line,280}]},
{ns_heart,current_status_slow,1,[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,update_current_status,1,[{file,"src/ns_heart.erl"},{line,186}]},
{ns_heart,handle_info,2,[{file,"src/ns_heart.erl"},{line,118}]},
{gen_server,handle_msg,5,[{file,"gen_server.erl"},{line,604}]},
{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,239}]}]
[ns_server:debug,2016-10-19T09:55:36.730-07:00,ns_1@127.0.0.1:ns_heart<0.236.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "test" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-test-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-fun-1-',3,
[{file,"src/ns_heart.erl"},{line,286}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1248}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,285}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,update_current_status,1,
[{file,"src/ns_heart.erl"},{line,186}]}]}}
[ns_server:debug,2016-10-19T09:55:36.730-07:00,ns_1@127.0.0.1:ns_heart<0.236.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "locked" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-locked-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-fun-1-',3,
[{file,"src/ns_heart.erl"},{line,286}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1248}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,285}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,update_current_status,1,
[{file,"src/ns_heart.erl"},{line,186}]}]}}
[ns_server:debug,2016-10-19T09:55:36.730-07:00,ns_1@127.0.0.1:ns_heart<0.236.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "default" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-default-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-fun-1-',3,
[{file,"src/ns_heart.erl"},{line,286}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1248}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,285}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,update_current_status,1,
[{file,"src/ns_heart.erl"},{line,186}]}]}}
[error_logger:info,2016-10-19T09:55:36.733-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,kernel_safe_sup}
started: [{pid,<0.248.0>},
{name,disk_log_sup},
{mfargs,{disk_log_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:36.733-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,kernel_safe_sup}
started: [{pid,<0.249.0>},
{name,disk_log_server},
{mfargs,{disk_log_server,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.737-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.246.0>},
{name,remote_clusters_info},
{mfargs,{remote_clusters_info,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.737-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.252.0>},
{name,master_activity_events},
{mfargs,
{gen_event,start_link,
[{local,master_activity_events}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.739-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.253.0>},
{name,xdcr_ckpt_store},
{mfargs,{simple_store,start_link,[xdcr_ckpt_data]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.739-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.254.0>},
{name,metakv_worker},
{mfargs,{work_queue,start_link,[metakv_worker]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.739-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.255.0>},
{name,index_events},
{mfargs,{gen_event,start_link,[{local,index_events}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.741-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.256.0>},
{name,index_settings_manager},
{mfargs,{index_settings_manager,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.743-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.259.0>},
{name,menelaus_ui_auth},
{mfargs,{menelaus_ui_auth,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.746-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.261.0>},
{name,menelaus_web_cache},
{mfargs,{menelaus_web_cache,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.749-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.263.0>},
{name,menelaus_stats_gatherer},
{mfargs,{menelaus_stats_gatherer,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.749-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.264.0>},
{name,json_rpc_events},
{mfargs,
{gen_event,start_link,[{local,json_rpc_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:36.751-07:00,ns_1@127.0.0.1:menelaus_sup<0.258.0>:menelaus_pluggable_ui:validate_plugin_spec:117]Loaded pluggable UI specification for fts
[ns_server:info,2016-10-19T09:55:36.752-07:00,ns_1@127.0.0.1:menelaus_sup<0.258.0>:menelaus_pluggable_ui:validate_plugin_spec:117]Loaded pluggable UI specification for n1ql
[error_logger:info,2016-10-19T09:55:36.752-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.265.0>},
{name,menelaus_web},
{mfargs,{menelaus_web,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:36.753-07:00,ns_1@127.0.0.1:ns_heart<0.236.0>:goxdcr_rest:get_from_goxdcr:163]Goxdcr is temporary not available. Return empty list.
[error_logger:info,2016-10-19T09:55:36.754-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.282.0>},
{name,menelaus_event},
{mfargs,{menelaus_event,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:36.755-07:00,ns_1@127.0.0.1:ns_heart<0.236.0>:cluster_logs_collection_task:maybe_build_cluster_logs_task:43]Ignoring exception trying to read cluster_logs_collection_task_status table: error:badarg
[error_logger:info,2016-10-19T09:55:36.756-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.284.0>},
{name,hot_keys_keeper},
{mfargs,{hot_keys_keeper,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.758-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.285.0>},
{name,menelaus_web_alerts_srv},
{mfargs,{menelaus_web_alerts_srv,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.760-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.286.0>},
{name,menelaus_cbauth},
{mfargs,{menelaus_cbauth,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[user:info,2016-10-19T09:55:36.764-07:00,ns_1@127.0.0.1:ns_server_sup<0.198.0>:menelaus_sup:start_link:46]Couchbase Server has started on web port 8091 on node 'ns_1@127.0.0.1'. Version: "4.6.0-3391-enterprise".
[error_logger:info,2016-10-19T09:55:36.764-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.258.0>},
{name,menelaus},
{mfargs,{menelaus_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:36.764-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.290.0>},
{name,ns_ports_setup},
{mfargs,{ns_ports_setup,start,[]}},
{restart_type,{permanent,4}},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.768-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,service_agent_sup}
started: [{pid,<0.295.0>},
{name,service_agent_children_sup},
{mfargs,
{supervisor,start_link,
[{local,service_agent_children_sup},
service_agent_sup,child]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:36.768-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,service_agent_sup}
started: [{pid,<0.296.0>},
{name,service_agent_worker},
{mfargs,
{erlang,apply,
[#Fun,[]]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.769-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.294.0>},
{name,service_agent_sup},
{mfargs,{service_agent_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:36.774-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.298.0>},
{name,ns_memcached_sockets_pool},
{mfargs,{ns_memcached_sockets_pool,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:36.774-07:00,ns_1@127.0.0.1:ns_audit_cfg<0.299.0>:ns_audit_cfg:write_audit_json:158]Writing new content to "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/audit.json" : [{auditd_enabled,
false},
{disabled,
[]},
{log_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"},
{rotate_interval,
86400},
{rotate_size,
20971520},
{sync,
[]},
{version,
1},
{descriptors_path,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/etc/security"}]
[ns_server:debug,2016-10-19T09:55:36.778-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.239.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "@system" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-@system-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-lc$^0/1-0-',1,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,slow_updater_loop,0,
[{file,"src/ns_heart.erl"},{line,243}]},
{proc_lib,init_p_do_apply,3,
[{file,"proc_lib.erl"},{line,239}]}]}}
[ns_server:debug,2016-10-19T09:55:36.778-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.239.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "@system-processes" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-@system-processes-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-lc$^0/1-0-',1,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,'-current_status_slow_inner/0-lc$^0/1-0-',1,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,slow_updater_loop,0,
[{file,"src/ns_heart.erl"},{line,243}]}]}}
[ns_server:debug,2016-10-19T09:55:36.778-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.239.0>:ns_heart:grab_index_status:373]ignoring failure to get index status: {exit,
{noproc,
{gen_server,call,
['index_status_keeper-index',
get_status,2000]}}}
[{gen_server,call,3,[{file,"gen_server.erl"},{line,188}]},
{ns_heart,grab_index_status,0,[{file,"src/ns_heart.erl"},{line,370}]},
{ns_heart,current_status_slow_inner,0,[{file,"src/ns_heart.erl"},{line,280}]},
{ns_heart,current_status_slow,1,[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,slow_updater_loop,0,[{file,"src/ns_heart.erl"},{line,243}]},
{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,239}]}]
[ns_server:debug,2016-10-19T09:55:36.778-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.239.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "test" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-test-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-fun-1-',3,
[{file,"src/ns_heart.erl"},{line,286}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1248}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,285}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,slow_updater_loop,0,
[{file,"src/ns_heart.erl"},{line,243}]}]}}
[ns_server:debug,2016-10-19T09:55:36.778-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.239.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "locked" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-locked-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-fun-1-',3,
[{file,"src/ns_heart.erl"},{line,286}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1248}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,285}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,slow_updater_loop,0,
[{file,"src/ns_heart.erl"},{line,243}]}]}}
[ns_server:debug,2016-10-19T09:55:36.779-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.239.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "default" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-default-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-fun-1-',3,
[{file,"src/ns_heart.erl"},{line,286}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1248}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,285}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,slow_updater_loop,0,
[{file,"src/ns_heart.erl"},{line,243}]}]}}
[ns_server:debug,2016-10-19T09:55:36.779-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.239.0>:goxdcr_rest:get_from_goxdcr:163]Goxdcr is temporary not available. Return empty list.
[ns_server:debug,2016-10-19T09:55:36.779-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.239.0>:cluster_logs_collection_task:maybe_build_cluster_logs_task:43]Ignoring exception trying to read cluster_logs_collection_task_status table: error:badarg
[ns_server:debug,2016-10-19T09:55:36.782-07:00,ns_1@127.0.0.1:ns_ports_setup<0.290.0>:ns_ports_manager:set_dynamic_children:54]Setting children [memcached,moxi,projector,indexer,query,saslauthd_port,
goxdcr,xdcr_proxy]
[ns_server:debug,2016-10-19T09:55:36.788-07:00,ns_1@127.0.0.1:ns_audit_cfg<0.299.0>:ns_audit_cfg:handle_info:107]Instruct memcached to reload audit config
[error_logger:info,2016-10-19T09:55:36.788-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.299.0>},
{name,ns_audit_cfg},
{mfargs,{ns_audit_cfg,start_link,[]}},
{restart_type,{permanent,4}},
{shutdown,1000},
{child_type,worker}]
[user:debug,2016-10-19T09:55:36.789-07:00,ns_1@127.0.0.1:<0.204.0>:ns_log:crash_consumption_loop:70]Service 'moxi' exited with status 0. Restarting. Messages: ERROR: could not contact REST server(s): http://127.0.0.1:8091/pools/default/saslBucketsStreaming
WARNING: curl error: Failed to connect to 127.0.0.1 port 8091: Connection refused from: http://127.0.0.1:8091/pools/default/saslBucketsStreaming
ERROR: could not contact REST server(s): http://127.0.0.1:8091/pools/default/saslBucketsStreaming
WARNING: curl error: Failed to connect to 127.0.0.1 port 8091: Connection refused from: http://127.0.0.1:8091/pools/default/saslBucketsStreaming
EOL on stdin. Exiting
[ns_server:debug,2016-10-19T09:55:36.790-07:00,ns_1@127.0.0.1:ns_ports_setup<0.290.0>:ns_ports_setup:set_children:72]Monitor ns_child_ports_sup <11624.75.0>
[ns_server:debug,2016-10-19T09:55:36.791-07:00,ns_1@127.0.0.1:memcached_config_mgr<0.312.0>:memcached_config_mgr:init:44]waiting for completion of initial ns_ports_setup round
[error_logger:info,2016-10-19T09:55:36.791-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.312.0>},
{name,memcached_config_mgr},
{mfargs,{memcached_config_mgr,start_link,[]}},
{restart_type,{permanent,4}},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:36.791-07:00,ns_1@127.0.0.1:memcached_config_mgr<0.312.0>:memcached_config_mgr:init:46]ns_ports_setup seems to be ready
[ns_server:info,2016-10-19T09:55:36.793-07:00,ns_1@127.0.0.1:<0.313.0>:ns_memcached_log_rotator:init:28]Starting log rotator on "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"/"memcached.log"* with an initial period of 39003ms
[error_logger:info,2016-10-19T09:55:36.793-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.313.0>},
{name,ns_memcached_log_rotator},
{mfargs,{ns_memcached_log_rotator,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:36.794-07:00,ns_1@127.0.0.1:memcached_config_mgr<0.312.0>:memcached_config_mgr:find_port_pid_loop:119]Found memcached port <11624.81.0>
[ns_server:debug,2016-10-19T09:55:36.795-07:00,ns_1@127.0.0.1:memcached_config_mgr<0.312.0>:memcached_config_mgr:do_read_current_memcached_config:251]Got enoent while trying to read active memcached config from /Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached.json.prev
[ns_server:debug,2016-10-19T09:55:36.795-07:00,ns_1@127.0.0.1:memcached_config_mgr<0.312.0>:memcached_config_mgr:init:83]found memcached port to be already active
[error_logger:info,2016-10-19T09:55:36.796-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.316.0>},
{name,memcached_clients_pool},
{mfargs,{memcached_clients_pool,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.799-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.317.0>},
{name,proxied_memcached_clients_pool},
{mfargs,{proxied_memcached_clients_pool,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.799-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.318.0>},
{name,xdc_lhttpc_pool},
{mfargs,
{lhttpc_manager,start_link,
[[{name,xdc_lhttpc_pool},
{connection_timeout,120000},
{pool_size,200}]]}},
{restart_type,{permanent,1}},
{shutdown,10000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.800-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.319.0>},
{name,ns_null_connection_pool},
{mfargs,
{ns_null_connection_pool,start_link,
[ns_null_connection_pool]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.803-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.320.0>,xdcr_sup}
started: [{pid,<0.321.0>},
{name,xdc_stats_holder},
{mfargs,
{proc_lib,start_link,
[xdcr_sup,link_stats_holder_body,[]]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.805-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.320.0>,xdcr_sup}
started: [{pid,<0.323.0>},
{name,xdc_replication_sup},
{mfargs,{xdc_replication_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:36.808-07:00,ns_1@127.0.0.1:xdc_rep_manager<0.324.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[error_logger:info,2016-10-19T09:55:36.808-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.320.0>,xdcr_sup}
started: [{pid,<0.324.0>},
{name,xdc_rep_manager},
{mfargs,{xdc_rep_manager,start_link,[]}},
{restart_type,permanent},
{shutdown,30000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:36.812-07:00,ns_1@127.0.0.1:xdcr_doc_replicator<0.326.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[ns_server:debug,2016-10-19T09:55:36.812-07:00,ns_1@127.0.0.1:xdc_rdoc_replication_srv<0.327.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[error_logger:info,2016-10-19T09:55:36.812-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.320.0>,xdcr_sup}
started: [{pid,<0.326.0>},
{name,xdc_rdoc_replicator},
{mfargs,{doc_replicator,start_link_xdcr,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.813-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.320.0>,xdcr_sup}
started: [{pid,<0.327.0>},
{name,xdc_rdoc_replication_srv},
{mfargs,{doc_replication_srv,start_link_xdcr,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:36.814-07:00,ns_1@127.0.0.1:<0.320.0>:xdc_rdoc_manager:start_link_remote:42]Starting xdc_rdoc_manager on 'couchdb_ns_1@127.0.0.1' with following links: [<0.326.0>,
<0.327.0>,
<0.324.0>]
[ns_server:debug,2016-10-19T09:55:36.816-07:00,ns_1@127.0.0.1:xdcr_doc_replicator<0.326.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.255.0>
[ns_server:debug,2016-10-19T09:55:36.816-07:00,ns_1@127.0.0.1:xdc_rdoc_replication_srv<0.327.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.255.0>
[ns_server:debug,2016-10-19T09:55:36.816-07:00,ns_1@127.0.0.1:xdc_rep_manager<0.324.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.255.0>
[error_logger:info,2016-10-19T09:55:36.816-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.320.0>,xdcr_sup}
started: [{pid,<11625.255.0>},
{name,xdc_rdoc_manager},
{mfargs,
{xdc_rdoc_manager,start_link_remote,
['couchdb_ns_1@127.0.0.1']}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.817-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.320.0>},
{name,xdcr_sup},
{mfargs,{xdcr_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:36.818-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.329.0>},
{name,xdcr_dcp_sockets_pool},
{mfargs,{xdcr_dcp_sockets_pool,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.819-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_worker_sup}
started: [{pid,<0.331.0>},
{name,ns_bucket_worker},
{mfargs,{work_queue,start_link,[ns_bucket_worker]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.825-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.333.0>},
{name,buckets_observing_subscription},
{mfargs,{ns_bucket_sup,subscribe_on_config_events,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.825-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_worker_sup}
started: [{pid,<0.332.0>},
{name,ns_bucket_sup},
{mfargs,{ns_bucket_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:36.825-07:00,ns_1@127.0.0.1:ns_bucket_worker<0.331.0>:ns_bucket_sup:update_children:110]Starting new child: {{docs_sup,"test"},
{docs_sup,start_link,["test"]},
permanent,infinity,supervisor,
[docs_sup]}
[error_logger:info,2016-10-19T09:55:36.825-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.330.0>},
{name,ns_bucket_worker_sup},
{mfargs,{ns_bucket_worker_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:36.829-07:00,ns_1@127.0.0.1:xdcr_doc_replicator<0.326.0>:doc_replicator:loop:64]doing replicate_newnodes_docs
[error_logger:info,2016-10-19T09:55:36.830-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.334.0>},
{name,system_stats_collector},
{mfargs,{system_stats_collector,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.831-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.338.0>},
{name,{stats_archiver,"@system"}},
{mfargs,{stats_archiver,start_link,["@system"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:36.840-07:00,ns_1@127.0.0.1:capi_doc_replicator-test<0.341.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[ns_server:debug,2016-10-19T09:55:36.840-07:00,ns_1@127.0.0.1:capi_ddoc_replication_srv-test<0.342.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[error_logger:info,2016-10-19T09:55:36.840-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.340.0>,docs_sup}
started: [{pid,<0.341.0>},
{name,doc_replicator},
{mfargs,{doc_replicator,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.840-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.340.0>,docs_sup}
started: [{pid,<0.342.0>},
{name,doc_replication_srv},
{mfargs,{doc_replication_srv,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.843-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.343.0>},
{name,{stats_reader,"@system"}},
{mfargs,{stats_reader,start_link,["@system"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.843-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.344.0>},
{name,{stats_archiver,"@system-processes"}},
{mfargs,
{stats_archiver,start_link,["@system-processes"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.843-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.346.0>},
{name,{stats_reader,"@system-processes"}},
{mfargs,
{stats_reader,start_link,["@system-processes"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.847-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.347.0>},
{name,{stats_archiver,"@query"}},
{mfargs,{stats_archiver,start_link,["@query"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.848-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.349.0>},
{name,{stats_reader,"@query"}},
{mfargs,{stats_reader,start_link,["@query"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.849-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'capi_ddoc_manager_sup-test'}
started: [{pid,<11625.263.0>},
{name,capi_ddoc_manager_events},
{mfargs,
{capi_ddoc_manager,start_link_event_manager,
["test"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:36.850-07:00,ns_1@127.0.0.1:capi_doc_replicator-test<0.341.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.264.0>
[ns_server:debug,2016-10-19T09:55:36.850-07:00,ns_1@127.0.0.1:capi_ddoc_replication_srv-test<0.342.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.264.0>
[ns_server:debug,2016-10-19T09:55:36.850-07:00,ns_1@127.0.0.1:ns_bucket_worker<0.331.0>:ns_bucket_sup:update_children:110]Starting new child: {{single_bucket_kv_sup,"test"},
{single_bucket_kv_sup,start_link,["test"]},
permanent,infinity,supervisor,
[single_bucket_kv_sup]}
[error_logger:info,2016-10-19T09:55:36.850-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.340.0>,docs_sup}
started: [{pid,<11625.262.0>},
{name,capi_ddoc_manager_sup},
{mfargs,
{capi_ddoc_manager_sup,start_link_remote,
['couchdb_ns_1@127.0.0.1',"test"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:36.850-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.340.0>},
{name,{docs_sup,"test"}},
{mfargs,{docs_sup,start_link,["test"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:36.850-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'capi_ddoc_manager_sup-test'}
started: [{pid,<11625.264.0>},
{name,capi_ddoc_manager},
{mfargs,
{capi_ddoc_manager,start_link,
["test",<0.341.0>,<0.342.0>]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:36.863-07:00,ns_1@127.0.0.1:capi_doc_replicator-test<0.341.0>:doc_replicator:loop:64]doing replicate_newnodes_docs
[error_logger:info,2016-10-19T09:55:36.869-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.351.0>},
{name,query_stats_collector},
{mfargs,{query_stats_collector,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.869-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.353.0>},
{name,{stats_archiver,"@global"}},
{mfargs,{stats_archiver,start_link,["@global"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.869-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.355.0>},
{name,{stats_reader,"@global"}},
{mfargs,{stats_reader,start_link,["@global"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:36.986-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.357.0>},
{name,global_stats_collector},
{mfargs,{global_stats_collector,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.014-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.360.0>},
{name,goxdcr_status_keeper},
{mfargs,{goxdcr_status_keeper,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:37.014-07:00,ns_1@127.0.0.1:goxdcr_status_keeper<0.360.0>:goxdcr_rest:get_from_goxdcr:163]Goxdcr is temporary not available. Return empty list.
[ns_server:debug,2016-10-19T09:55:37.015-07:00,ns_1@127.0.0.1:goxdcr_status_keeper<0.360.0>:goxdcr_rest:get_from_goxdcr:163]Goxdcr is temporary not available. Return empty list.
[error_logger:info,2016-10-19T09:55:37.024-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_sup}
started: [{pid,<0.364.0>},
{name,index_stats_children_sup},
{mfargs,
{supervisor,start_link,
[{local,index_stats_children_sup},
index_stats_sup,child]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:37.024-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.359.0>,docs_kv_sup}
started: [{pid,<11625.272.0>},
{name,capi_set_view_manager},
{mfargs,
{capi_set_view_manager,start_link_remote,
['couchdb_ns_1@127.0.0.1',"test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.026-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_status_keeper_sup}
started: [{pid,<0.366.0>},
{name,index_status_keeper_worker},
{mfargs,
{work_queue,start_link,
[index_status_keeper_worker]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.026-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_status_keeper_sup}
started: [{pid,<0.367.0>},
{name,index_status_keeper},
{mfargs,{indexer_gsi,start_keeper,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:error,2016-10-19T09:55:37.031-07:00,ns_1@127.0.0.1:index_status_keeper_worker<0.366.0>:index_rest:get_json:42]Request to (indexer) http://127.0.0.1:9102/getIndexStatus failed: {error,
{econnrefused,
[{lhttpc_client,
send_request,
1,
[{file,
"/Users/jenkins/jenkins/workspace/watson-unix/couchdb/src/lhttpc/lhttpc_client.erl"},
{line,
220}]},
{lhttpc_client,
execute,
9,
[{file,
"/Users/jenkins/jenkins/workspace/watson-unix/couchdb/src/lhttpc/lhttpc_client.erl"},
{line,
169}]},
{lhttpc_client,
request,
9,
[{file,
"/Users/jenkins/jenkins/workspace/watson-unix/couchdb/src/lhttpc/lhttpc_client.erl"},
{line,
92}]}]}}
[error_logger:info,2016-10-19T09:55:37.032-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_status_keeper_sup}
started: [{pid,<0.371.0>},
{name,index_status_keeper_fts},
{mfargs,{indexer_fts,start_keeper,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.033-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_sup}
started: [{pid,<0.365.0>},
{name,index_status_keeper_sup},
{mfargs,{index_status_keeper_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:37.033-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_sup}
started: [{pid,<0.374.0>},
{name,index_stats_worker},
{mfargs,
{erlang,apply,
[#Fun,[]]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.033-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.363.0>},
{name,index_stats_sup},
{mfargs,{index_stats_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:37.036-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.359.0>,docs_kv_sup}
started: [{pid,<11625.275.0>},
{name,couch_stats_reader},
{mfargs,
{couch_stats_reader,start_link_remote,
['couchdb_ns_1@127.0.0.1',"test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.036-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.359.0>},
{name,{docs_kv_sup,"test"}},
{mfargs,{docs_kv_sup,start_link,["test"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:37.036-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.376.0>},
{name,{indexer_gsi,index_stats_collector}},
{mfargs,
{index_stats_collector,start_link,[indexer_gsi]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.036-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.379.0>},
{name,{indexer_gsi,stats_archiver,"@index"}},
{mfargs,{stats_archiver,start_link,["@index"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.037-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.381.0>},
{name,{indexer_gsi,stats_archiver,"default"}},
{mfargs,{stats_archiver,start_link,["@index-default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.041-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.384.0>},
{name,compaction_daemon},
{mfargs,{compaction_daemon,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.042-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.383.0>},
{name,{indexer_gsi,stats_archiver,"locked"}},
{mfargs,{stats_archiver,start_link,["@index-locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:37.047-07:00,ns_1@127.0.0.1:ns_memcached-test<0.388.0>:ns_memcached:init:167]Starting ns_memcached
[ns_server:debug,2016-10-19T09:55:37.047-07:00,ns_1@127.0.0.1:<0.389.0>:ns_memcached:run_connect_phase:190]Started 'connecting' phase of ns_memcached-test. Parent is <0.388.0>
[error_logger:info,2016-10-19T09:55:37.047-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.387.0>,ns_memcached_sup}
started: [{pid,<0.388.0>},
{name,{ns_memcached,"test"}},
{mfargs,{ns_memcached,start_link,["test"]}},
{restart_type,permanent},
{shutdown,86400000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.049-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.386.0>},
{name,{indexer_gsi,stats_archiver,"test"}},
{mfargs,{stats_archiver,start_link,["@index-test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.049-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.391.0>},
{name,{indexer_gsi,stats_reader,"@index"}},
{mfargs,{stats_reader,start_link,["@index"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.049-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.392.0>},
{name,{indexer_gsi,stats_reader,"default"}},
{mfargs,{stats_reader,start_link,["@index-default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:37.049-07:00,ns_1@127.0.0.1:ns_memcached-test<0.388.0>:ns_memcached:handle_cast:689]Main ns_memcached connection established: {ok,#Port<0.6527>}
[error_logger:info,2016-10-19T09:55:37.049-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.393.0>},
{name,{indexer_gsi,stats_reader,"locked"}},
{mfargs,{stats_reader,start_link,["@index-locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.050-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.394.0>},
{name,{indexer_gsi,stats_reader,"test"}},
{mfargs,{stats_reader,start_link,["@index-test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[user:info,2016-10-19T09:55:37.050-07:00,ns_1@127.0.0.1:ns_memcached-test<0.388.0>:ns_memcached:handle_cast:718]Bucket "test" loaded on node 'ns_1@127.0.0.1' in 0 seconds.
[error_logger:info,2016-10-19T09:55:37.062-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.387.0>,ns_memcached_sup}
started: [{pid,<0.403.0>},
{name,{terse_bucket_info_uploader,"test"}},
{mfargs,
{terse_bucket_info_uploader,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.062-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.387.0>},
{name,{ns_memcached_sup,"test"}},
{mfargs,{ns_memcached_sup,start_link,["test"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:37.071-07:00,ns_1@127.0.0.1:<0.405.0>:new_concurrency_throttle:init:113]init concurrent throttle process, pid: <0.405.0>, type: kv_throttle# of available token: 1
[error_logger:info,2016-10-19T09:55:37.073-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.406.0>},
{name,{ns_vbm_sup,"test"}},
{mfargs,{ns_vbm_sup,start_link,["test"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:37.075-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.401.0>:compaction_new_daemon:process_scheduler_message:1312]Starting compaction (compact_kv) for the following buckets:
[<<"test">>,<<"locked">>,<<"default">>]
[error_logger:info,2016-10-19T09:55:37.075-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.401.0>},
{name,compaction_new_daemon},
{mfargs,{compaction_new_daemon,start_link,[]}},
{restart_type,{permanent,4}},
{shutdown,86400000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:37.075-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.401.0>:compaction_new_daemon:process_scheduler_message:1312]Starting compaction (compact_views) for the following buckets:
[<<"test">>,<<"locked">>,<<"default">>]
[ns_server:info,2016-10-19T09:55:37.075-07:00,ns_1@127.0.0.1:<0.407.0>:compaction_new_daemon:spawn_scheduled_kv_compactor:471]Start compaction of vbuckets for bucket test with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2016-10-19T09:55:37.076-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.401.0>:compaction_new_daemon:process_scheduler_message:1312]Starting compaction (compact_master) for the following buckets:
[<<"test">>,<<"locked">>,<<"default">>]
[ns_server:info,2016-10-19T09:55:37.076-07:00,ns_1@127.0.0.1:<0.411.0>:compaction_new_daemon:spawn_master_db_compactor:850]Start compaction of master db for bucket test with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2016-10-19T09:55:37.076-07:00,ns_1@127.0.0.1:<0.409.0>:compaction_new_daemon:bucket_needs_compaction:972]`test` data size is 84485420, disk size is 89585344
[ns_server:info,2016-10-19T09:55:37.076-07:00,ns_1@127.0.0.1:<0.412.0>:compaction_new_daemon:spawn_scheduled_kv_compactor:471]Start compaction of vbuckets for bucket locked with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2016-10-19T09:55:37.076-07:00,ns_1@127.0.0.1:<0.414.0>:compaction_new_daemon:bucket_needs_compaction:978]memcached is not started for bucket <<"locked">> yet
[ns_server:info,2016-10-19T09:55:37.077-07:00,ns_1@127.0.0.1:<0.416.0>:compaction_new_daemon:spawn_scheduled_kv_compactor:471]Start compaction of vbuckets for bucket default with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[error_logger:info,2016-10-19T09:55:37.077-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.415.0>},
{name,{dcp_sup,"test"}},
{mfargs,{dcp_sup,start_link,["test"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:info,2016-10-19T09:55:37.077-07:00,ns_1@127.0.0.1:<0.410.0>:compaction_new_daemon:spawn_scheduled_views_compactor:497]Start compaction of indexes for bucket test with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2016-10-19T09:55:37.077-07:00,ns_1@127.0.0.1:<0.418.0>:compaction_new_daemon:bucket_needs_compaction:978]memcached is not started for bucket <<"default">> yet
[ns_server:debug,2016-10-19T09:55:37.077-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.401.0>:compaction_new_daemon:process_compactors_exit:1353]Finished compaction iteration.
[ns_server:debug,2016-10-19T09:55:37.077-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.401.0>:compaction_scheduler:schedule_next:60]Finished compaction for compact_kv too soon. Next run will be in 30s
[error_logger:info,2016-10-19T09:55:37.087-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,cluster_logs_sup}
started: [{pid,<0.421.0>},
{name,ets_holder},
{mfargs,
{cluster_logs_collection_task,
start_link_ets_holder,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.087-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.420.0>},
{name,cluster_logs_sup},
{mfargs,{cluster_logs_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:info,2016-10-19T09:55:37.092-07:00,ns_1@127.0.0.1:<0.422.0>:compaction_new_daemon:spawn_master_db_compactor:850]Start compaction of master db for bucket locked with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:info,2016-10-19T09:55:37.092-07:00,ns_1@127.0.0.1:<0.419.0>:compaction_new_daemon:spawn_scheduled_views_compactor:497]Start compaction of indexes for bucket locked with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:info,2016-10-19T09:55:37.092-07:00,ns_1@127.0.0.1:<0.423.0>:compaction_new_daemon:spawn_master_db_compactor:850]Start compaction of master db for bucket default with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[error_logger:info,2016-10-19T09:55:37.092-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.424.0>},
{name,{dcp_replication_manager,"test"}},
{mfargs,{dcp_replication_manager,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:37.092-07:00,ns_1@127.0.0.1:<0.419.0>:ns_couchdb_api:rpc_couchdb_node:165]RPC to couchdb node failed for {foreach_doc,<<"locked">>,
#Fun,infinity} with {badrpc,
{'EXIT',
{noproc,
{gen_server,
call,
['capi_ddoc_manager-locked',
{foreach_doc,
#Fun},
infinity]}}}}
Stack: [{ns_couchdb_api,rpc_couchdb_node,4,
[{file,"src/ns_couchdb_api.erl"},{line,164}]},
{capi_utils,foreach_live_ddoc_id,2,
[{file,"src/capi_utils.erl"},{line,151}]},
{capi_utils,fetch_ddoc_ids,1,[{file,"src/capi_utils.erl"},{line,144}]},
{compaction_new_daemon,'-spawn_scheduled_views_compactor/2-fun-0-',3,
[{file,"src/compaction_new_daemon.erl"},
{line,500}]},
{proc_lib,init_p,3,[{file,"proc_lib.erl"},{line,224}]}]
[ns_server:error,2016-10-19T09:55:37.093-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.401.0>:compaction_new_daemon:log_compactors_exit:1327]Compactor <0.419.0> exited unexpectedly: {error,
{badrpc,
{'EXIT',
{noproc,
{gen_server,call,
['capi_ddoc_manager-locked',
{foreach_doc,
#Fun},
infinity]}}}}}. Moving to the next bucket.
[error_logger:info,2016-10-19T09:55:37.093-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.425.0>},
{name,{replication_manager,"test"}},
{mfargs,{replication_manager,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:error,2016-10-19T09:55:37.100-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================CRASH REPORT=========================
crasher:
initial call: compaction_new_daemon:-spawn_scheduled_views_compactor/2-fun-0-/0
pid: <0.419.0>
registered_name: []
exception exit: {error,
{badrpc,
{'EXIT',
{noproc,
{gen_server,call,
['capi_ddoc_manager-locked',
{foreach_doc,#Fun},
infinity]}}}}}
in function ns_couchdb_api:rpc_couchdb_node/4 (src/ns_couchdb_api.erl, line 166)
in call from capi_utils:foreach_live_ddoc_id/2 (src/capi_utils.erl, line 151)
in call from capi_utils:fetch_ddoc_ids/1 (src/capi_utils.erl, line 144)
in call from compaction_new_daemon:'-spawn_scheduled_views_compactor/2-fun-0-'/3 (src/compaction_new_daemon.erl, line 500)
ancestors: [compaction_new_daemon,ns_server_sup,ns_server_nodes_sup,
<0.155.0>,ns_server_cluster_sup,<0.88.0>]
messages: []
links: [<0.401.0>]
dictionary: []
trap_exit: false
status: running
heap_size: 4185
stack_size: 27
reductions: 6781
neighbours:
[error_logger:info,2016-10-19T09:55:37.100-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.427.0>},
{name,remote_api},
{mfargs,{remote_api,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:37.105-07:00,ns_1@127.0.0.1:<0.426.0>:compaction_new_daemon:spawn_scheduled_views_compactor:497]Start compaction of indexes for bucket default with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2016-10-19T09:55:37.105-07:00,ns_1@127.0.0.1:<0.428.0>:mb_master:check_master_takeover_needed:141]Sending master node question to the following nodes: []
[ns_server:debug,2016-10-19T09:55:37.105-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.401.0>:compaction_new_daemon:process_compactors_exit:1353]Finished compaction iteration.
[ns_server:debug,2016-10-19T09:55:37.105-07:00,ns_1@127.0.0.1:<0.428.0>:mb_master:check_master_takeover_needed:143]Got replies: []
[ns_server:debug,2016-10-19T09:55:37.105-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.401.0>:compaction_scheduler:schedule_next:60]Finished compaction for compact_master too soon. Next run will be in 3600s
[ns_server:debug,2016-10-19T09:55:37.105-07:00,ns_1@127.0.0.1:<0.428.0>:mb_master:check_master_takeover_needed:149]Was unable to discover master, not going to force mastership takeover
[user:info,2016-10-19T09:55:37.105-07:00,ns_1@127.0.0.1:mb_master<0.430.0>:mb_master:init:86]I'm the only node, so I'm the master.
[ns_server:debug,2016-10-19T09:55:37.105-07:00,ns_1@127.0.0.1:<0.426.0>:ns_couchdb_api:rpc_couchdb_node:165]RPC to couchdb node failed for {foreach_doc,<<"default">>,
#Fun,infinity} with {badrpc,
{'EXIT',
{noproc,
{gen_server,
call,
['capi_ddoc_manager-default',
{foreach_doc,
#Fun},
infinity]}}}}
Stack: [{ns_couchdb_api,rpc_couchdb_node,4,
[{file,"src/ns_couchdb_api.erl"},{line,164}]},
{capi_utils,foreach_live_ddoc_id,2,
[{file,"src/capi_utils.erl"},{line,151}]},
{capi_utils,fetch_ddoc_ids,1,[{file,"src/capi_utils.erl"},{line,144}]},
{compaction_new_daemon,'-spawn_scheduled_views_compactor/2-fun-0-',3,
[{file,"src/compaction_new_daemon.erl"},
{line,500}]},
{proc_lib,init_p,3,[{file,"proc_lib.erl"},{line,224}]}]
[ns_server:error,2016-10-19T09:55:37.106-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.401.0>:compaction_new_daemon:log_compactors_exit:1327]Compactor <0.426.0> exited unexpectedly: {error,
{badrpc,
{'EXIT',
{noproc,
{gen_server,call,
['capi_ddoc_manager-default',
{foreach_doc,
#Fun},
infinity]}}}}}. Moving to the next bucket.
[ns_server:debug,2016-10-19T09:55:37.106-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.401.0>:compaction_new_daemon:process_compactors_exit:1353]Finished compaction iteration.
[ns_server:debug,2016-10-19T09:55:37.106-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.401.0>:compaction_scheduler:schedule_next:60]Finished compaction for compact_views too soon. Next run will be in 30s
[error_logger:error,2016-10-19T09:55:37.106-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================CRASH REPORT=========================
crasher:
initial call: compaction_new_daemon:-spawn_scheduled_views_compactor/2-fun-0-/0
pid: <0.426.0>
registered_name: []
exception exit: {error,
{badrpc,
{'EXIT',
{noproc,
{gen_server,call,
['capi_ddoc_manager-default',
{foreach_doc,#Fun},
infinity]}}}}}
in function ns_couchdb_api:rpc_couchdb_node/4 (src/ns_couchdb_api.erl, line 166)
in call from capi_utils:foreach_live_ddoc_id/2 (src/capi_utils.erl, line 151)
in call from capi_utils:fetch_ddoc_ids/1 (src/capi_utils.erl, line 144)
in call from compaction_new_daemon:'-spawn_scheduled_views_compactor/2-fun-0-'/3 (src/compaction_new_daemon.erl, line 500)
ancestors: [compaction_new_daemon,ns_server_sup,ns_server_nodes_sup,
<0.155.0>,ns_server_cluster_sup,<0.88.0>]
messages: []
links: [<0.401.0>]
dictionary: []
trap_exit: false
status: running
heap_size: 4185
stack_size: 27
reductions: 6619
neighbours:
[error_logger:info,2016-10-19T09:55:37.107-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.432.0>},
{name,{dcp_notifier,"test"}},
{mfargs,{dcp_notifier,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.109-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'janitor_agent_sup-test'}
started: [{pid,<0.434.0>},
{name,rebalance_subprocesses_registry},
{mfargs,
{ns_process_registry,start_link,
['rebalance_subprocesses_registry-test',
[{terminate_command,kill}]]}},
{restart_type,permanent},
{shutdown,86400000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:37.109-07:00,ns_1@127.0.0.1:janitor_agent-test<0.435.0>:janitor_agent:read_flush_counter:1047]Loading flushseq failed: {error,enoent}. Assuming it's equal to global config.
[ns_server:info,2016-10-19T09:55:37.109-07:00,ns_1@127.0.0.1:janitor_agent-test<0.435.0>:janitor_agent:read_flush_counter_from_config:1054]Initialized flushseq 0 from bucket config
[error_logger:info,2016-10-19T09:55:37.110-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'janitor_agent_sup-test'}
started: [{pid,<0.435.0>},
{name,janitor_agent},
{mfargs,{janitor_agent,start_link,["test"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.110-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.433.0>},
{name,{janitor_agent_sup,"test"}},
{mfargs,{janitor_agent_sup,start_link,["test"]}},
{restart_type,permanent},
{shutdown,10000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.114-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.437.0>},
{name,{stats_collector,"test"}},
{mfargs,{stats_collector,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.114-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.440.0>},
{name,{stats_archiver,"test"}},
{mfargs,{stats_archiver,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.114-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.442.0>},
{name,{stats_reader,"test"}},
{mfargs,{stats_reader,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:37.119-07:00,ns_1@127.0.0.1:mb_master_sup<0.436.0>:misc:start_singleton:1094]start_singleton(gen_server, ns_tick, [], []): started as <0.443.0> on 'ns_1@127.0.0.1'
[error_logger:info,2016-10-19T09:55:37.119-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,mb_master_sup}
started: [{pid,<0.443.0>},
{name,ns_tick},
{mfargs,{ns_tick,start_link,[]}},
{restart_type,permanent},
{shutdown,10},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.134-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.444.0>},
{name,{goxdcr_stats_collector,"test"}},
{mfargs,{goxdcr_stats_collector,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.135-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.446.0>},
{name,{goxdcr_stats_archiver,"test"}},
{mfargs,{stats_archiver,start_link,["@xdcr-test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:37.135-07:00,ns_1@127.0.0.1:ns_bucket_worker<0.331.0>:ns_bucket_sup:update_children:110]Starting new child: {{docs_sup,"locked"},
{docs_sup,start_link,["locked"]},
permanent,infinity,supervisor,
[docs_sup]}
[error_logger:info,2016-10-19T09:55:37.135-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.448.0>},
{name,{goxdcr_stats_reader,"test"}},
{mfargs,{stats_reader,start_link,["@xdcr-test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:37.135-07:00,ns_1@127.0.0.1:capi_doc_replicator-locked<0.451.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[ns_server:debug,2016-10-19T09:55:37.135-07:00,ns_1@127.0.0.1:capi_ddoc_replication_srv-locked<0.452.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[error_logger:info,2016-10-19T09:55:37.135-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.449.0>},
{name,{failover_safeness_level,"test"}},
{mfargs,{failover_safeness_level,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.135-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.356.0>},
{name,{single_bucket_kv_sup,"test"}},
{mfargs,{single_bucket_kv_sup,start_link,["test"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:37.135-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.450.0>,docs_sup}
started: [{pid,<0.451.0>},
{name,doc_replicator},
{mfargs,{doc_replicator,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.136-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.450.0>,docs_sup}
started: [{pid,<0.452.0>},
{name,doc_replication_srv},
{mfargs,{doc_replication_srv,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:37.136-07:00,ns_1@127.0.0.1:capi_doc_replicator-locked<0.451.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.302.0>
[error_logger:info,2016-10-19T09:55:37.136-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'capi_ddoc_manager_sup-locked'}
started: [{pid,<11625.301.0>},
{name,capi_ddoc_manager_events},
{mfargs,
{capi_ddoc_manager,start_link_event_manager,
["locked"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:37.136-07:00,ns_1@127.0.0.1:capi_ddoc_replication_srv-locked<0.452.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.302.0>
[ns_server:debug,2016-10-19T09:55:37.136-07:00,ns_1@127.0.0.1:ns_bucket_worker<0.331.0>:ns_bucket_sup:update_children:110]Starting new child: {{single_bucket_kv_sup,"locked"},
{single_bucket_kv_sup,start_link,["locked"]},
permanent,infinity,supervisor,
[single_bucket_kv_sup]}
[error_logger:info,2016-10-19T09:55:37.136-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'capi_ddoc_manager_sup-locked'}
started: [{pid,<11625.302.0>},
{name,capi_ddoc_manager},
{mfargs,
{capi_ddoc_manager,start_link,
["locked",<0.451.0>,<0.452.0>]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:37.136-07:00,ns_1@127.0.0.1:capi_doc_replicator-locked<0.451.0>:doc_replicator:loop:64]doing replicate_newnodes_docs
[error_logger:info,2016-10-19T09:55:37.136-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.450.0>,docs_sup}
started: [{pid,<11625.300.0>},
{name,capi_ddoc_manager_sup},
{mfargs,
{capi_ddoc_manager_sup,start_link_remote,
['couchdb_ns_1@127.0.0.1',"locked"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:37.136-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.450.0>},
{name,{docs_sup,"locked"}},
{mfargs,{docs_sup,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:37.137-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.455.0>,docs_kv_sup}
started: [{pid,<11625.304.0>},
{name,capi_set_view_manager},
{mfargs,
{capi_set_view_manager,start_link_remote,
['couchdb_ns_1@127.0.0.1',"locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:37.137-07:00,ns_1@127.0.0.1:ns_memcached-locked<0.457.0>:ns_memcached:init:167]Starting ns_memcached
[error_logger:info,2016-10-19T09:55:37.137-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.455.0>,docs_kv_sup}
started: [{pid,<11625.307.0>},
{name,couch_stats_reader},
{mfargs,
{couch_stats_reader,start_link_remote,
['couchdb_ns_1@127.0.0.1',"locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.137-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.455.0>},
{name,{docs_kv_sup,"locked"}},
{mfargs,{docs_kv_sup,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:37.137-07:00,ns_1@127.0.0.1:<0.458.0>:ns_memcached:run_connect_phase:190]Started 'connecting' phase of ns_memcached-locked. Parent is <0.457.0>
[error_logger:info,2016-10-19T09:55:37.137-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.456.0>,ns_memcached_sup}
started: [{pid,<0.457.0>},
{name,{ns_memcached,"locked"}},
{mfargs,{ns_memcached,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,86400000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.137-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.456.0>,ns_memcached_sup}
started: [{pid,<0.459.0>},
{name,{terse_bucket_info_uploader,"locked"}},
{mfargs,
{terse_bucket_info_uploader,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.138-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.456.0>},
{name,{ns_memcached_sup,"locked"}},
{mfargs,{ns_memcached_sup,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:37.138-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.461.0>},
{name,{ns_vbm_sup,"locked"}},
{mfargs,{ns_vbm_sup,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:info,2016-10-19T09:55:37.138-07:00,ns_1@127.0.0.1:janitor_agent-locked<0.468.0>:janitor_agent:read_flush_counter:1047]Loading flushseq failed: {error,enoent}. Assuming it's equal to global config.
[error_logger:info,2016-10-19T09:55:37.138-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.462.0>},
{name,{dcp_sup,"locked"}},
{mfargs,{dcp_sup,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:info,2016-10-19T09:55:37.138-07:00,ns_1@127.0.0.1:janitor_agent-locked<0.468.0>:janitor_agent:read_flush_counter_from_config:1054]Initialized flushseq 0 from bucket config
[error_logger:info,2016-10-19T09:55:37.138-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.463.0>},
{name,{dcp_replication_manager,"locked"}},
{mfargs,
{dcp_replication_manager,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.138-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.464.0>},
{name,{replication_manager,"locked"}},
{mfargs,{replication_manager,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.139-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.465.0>},
{name,{dcp_notifier,"locked"}},
{mfargs,{dcp_notifier,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.139-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'janitor_agent_sup-locked'}
started: [{pid,<0.467.0>},
{name,rebalance_subprocesses_registry},
{mfargs,
{ns_process_registry,start_link,
['rebalance_subprocesses_registry-locked',
[{terminate_command,kill}]]}},
{restart_type,permanent},
{shutdown,86400000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.139-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'janitor_agent_sup-locked'}
started: [{pid,<0.468.0>},
{name,janitor_agent},
{mfargs,{janitor_agent,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.140-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.466.0>},
{name,{janitor_agent_sup,"locked"}},
{mfargs,{janitor_agent_sup,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,10000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.140-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.469.0>},
{name,{stats_collector,"locked"}},
{mfargs,{stats_collector,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.140-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.472.0>},
{name,{stats_archiver,"locked"}},
{mfargs,{stats_archiver,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.140-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.474.0>},
{name,{stats_reader,"locked"}},
{mfargs,{stats_reader,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:37.140-07:00,ns_1@127.0.0.1:ns_memcached-locked<0.457.0>:ns_memcached:handle_cast:689]Main ns_memcached connection established: {ok,#Port<0.6727>}
[error_logger:info,2016-10-19T09:55:37.141-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.475.0>},
{name,{goxdcr_stats_collector,"locked"}},
{mfargs,{goxdcr_stats_collector,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[user:info,2016-10-19T09:55:37.141-07:00,ns_1@127.0.0.1:ns_memcached-locked<0.457.0>:ns_memcached:handle_cast:718]Bucket "locked" loaded on node 'ns_1@127.0.0.1' in 0 seconds.
[error_logger:info,2016-10-19T09:55:37.145-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.477.0>},
{name,{goxdcr_stats_archiver,"locked"}},
{mfargs,{stats_archiver,start_link,["@xdcr-locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:37.145-07:00,ns_1@127.0.0.1:ns_bucket_worker<0.331.0>:ns_bucket_sup:update_children:110]Starting new child: {{docs_sup,"default"},
{docs_sup,start_link,["default"]},
permanent,infinity,supervisor,
[docs_sup]}
[error_logger:info,2016-10-19T09:55:37.146-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.485.0>},
{name,{goxdcr_stats_reader,"locked"}},
{mfargs,{stats_reader,start_link,["@xdcr-locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:37.146-07:00,ns_1@127.0.0.1:capi_doc_replicator-default<0.488.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[ns_server:debug,2016-10-19T09:55:37.146-07:00,ns_1@127.0.0.1:capi_ddoc_replication_srv-default<0.489.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[error_logger:info,2016-10-19T09:55:37.146-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.486.0>},
{name,{failover_safeness_level,"locked"}},
{mfargs,
{failover_safeness_level,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.146-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.454.0>},
{name,{single_bucket_kv_sup,"locked"}},
{mfargs,{single_bucket_kv_sup,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:37.146-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.487.0>,docs_sup}
started: [{pid,<0.488.0>},
{name,doc_replicator},
{mfargs,{doc_replicator,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.146-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.487.0>,docs_sup}
started: [{pid,<0.489.0>},
{name,doc_replication_srv},
{mfargs,{doc_replication_srv,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.147-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'capi_ddoc_manager_sup-default'}
started: [{pid,<11625.310.0>},
{name,capi_ddoc_manager_events},
{mfargs,
{capi_ddoc_manager,start_link_event_manager,
["default"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:37.147-07:00,ns_1@127.0.0.1:capi_doc_replicator-default<0.488.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.311.0>
[ns_server:debug,2016-10-19T09:55:37.147-07:00,ns_1@127.0.0.1:capi_ddoc_replication_srv-default<0.489.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.311.0>
[ns_server:debug,2016-10-19T09:55:37.147-07:00,ns_1@127.0.0.1:ns_bucket_worker<0.331.0>:ns_bucket_sup:update_children:110]Starting new child: {{single_bucket_kv_sup,"default"},
{single_bucket_kv_sup,start_link,["default"]},
permanent,infinity,supervisor,
[single_bucket_kv_sup]}
[error_logger:info,2016-10-19T09:55:37.147-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'capi_ddoc_manager_sup-default'}
started: [{pid,<11625.311.0>},
{name,capi_ddoc_manager},
{mfargs,
{capi_ddoc_manager,start_link,
["default",<0.488.0>,<0.489.0>]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:37.147-07:00,ns_1@127.0.0.1:capi_doc_replicator-default<0.488.0>:doc_replicator:loop:64]doing replicate_newnodes_docs
[error_logger:info,2016-10-19T09:55:37.147-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.487.0>,docs_sup}
started: [{pid,<11625.309.0>},
{name,capi_ddoc_manager_sup},
{mfargs,
{capi_ddoc_manager_sup,start_link_remote,
['couchdb_ns_1@127.0.0.1',"default"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:37.147-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.487.0>},
{name,{docs_sup,"default"}},
{mfargs,{docs_sup,start_link,["default"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:37.148-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.492.0>,docs_kv_sup}
started: [{pid,<11625.313.0>},
{name,capi_set_view_manager},
{mfargs,
{capi_set_view_manager,start_link_remote,
['couchdb_ns_1@127.0.0.1',"default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:37.148-07:00,ns_1@127.0.0.1:ns_memcached-default<0.494.0>:ns_memcached:init:167]Starting ns_memcached
[error_logger:info,2016-10-19T09:55:37.148-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.492.0>,docs_kv_sup}
started: [{pid,<11625.316.0>},
{name,couch_stats_reader},
{mfargs,
{couch_stats_reader,start_link_remote,
['couchdb_ns_1@127.0.0.1',"default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.148-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.492.0>},
{name,{docs_kv_sup,"default"}},
{mfargs,{docs_kv_sup,start_link,["default"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:37.148-07:00,ns_1@127.0.0.1:<0.495.0>:ns_memcached:run_connect_phase:190]Started 'connecting' phase of ns_memcached-default. Parent is <0.494.0>
[error_logger:info,2016-10-19T09:55:37.148-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.493.0>,ns_memcached_sup}
started: [{pid,<0.494.0>},
{name,{ns_memcached,"default"}},
{mfargs,{ns_memcached,start_link,["default"]}},
{restart_type,permanent},
{shutdown,86400000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.149-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.493.0>,ns_memcached_sup}
started: [{pid,<0.496.0>},
{name,{terse_bucket_info_uploader,"default"}},
{mfargs,
{terse_bucket_info_uploader,start_link,
["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.150-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.493.0>},
{name,{ns_memcached_sup,"default"}},
{mfargs,{ns_memcached_sup,start_link,["default"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:37.150-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.498.0>},
{name,{ns_vbm_sup,"default"}},
{mfargs,{ns_vbm_sup,start_link,["default"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:37.150-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.499.0>},
{name,{dcp_sup,"default"}},
{mfargs,{dcp_sup,start_link,["default"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:info,2016-10-19T09:55:37.151-07:00,ns_1@127.0.0.1:janitor_agent-default<0.505.0>:janitor_agent:read_flush_counter:1047]Loading flushseq failed: {error,enoent}. Assuming it's equal to global config.
[error_logger:info,2016-10-19T09:55:37.151-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.500.0>},
{name,{dcp_replication_manager,"default"}},
{mfargs,
{dcp_replication_manager,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:37.151-07:00,ns_1@127.0.0.1:janitor_agent-default<0.505.0>:janitor_agent:read_flush_counter_from_config:1054]Initialized flushseq 0 from bucket config
[error_logger:info,2016-10-19T09:55:37.151-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.501.0>},
{name,{replication_manager,"default"}},
{mfargs,{replication_manager,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.151-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.502.0>},
{name,{dcp_notifier,"default"}},
{mfargs,{dcp_notifier,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.151-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'janitor_agent_sup-default'}
started: [{pid,<0.504.0>},
{name,rebalance_subprocesses_registry},
{mfargs,
{ns_process_registry,start_link,
['rebalance_subprocesses_registry-default',
[{terminate_command,kill}]]}},
{restart_type,permanent},
{shutdown,86400000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.152-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'janitor_agent_sup-default'}
started: [{pid,<0.505.0>},
{name,janitor_agent},
{mfargs,{janitor_agent,start_link,["default"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.152-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.503.0>},
{name,{janitor_agent_sup,"default"}},
{mfargs,{janitor_agent_sup,start_link,["default"]}},
{restart_type,permanent},
{shutdown,10000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.152-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.506.0>},
{name,{stats_collector,"default"}},
{mfargs,{stats_collector,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:37.152-07:00,ns_1@127.0.0.1:ns_memcached-default<0.494.0>:ns_memcached:handle_cast:689]Main ns_memcached connection established: {ok,#Port<0.6749>}
[error_logger:info,2016-10-19T09:55:37.152-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.509.0>},
{name,{stats_archiver,"default"}},
{mfargs,{stats_archiver,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.152-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.511.0>},
{name,{stats_reader,"default"}},
{mfargs,{stats_reader,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.153-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.512.0>},
{name,{goxdcr_stats_collector,"default"}},
{mfargs,
{goxdcr_stats_collector,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.153-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.514.0>},
{name,{goxdcr_stats_archiver,"default"}},
{mfargs,{stats_archiver,start_link,["@xdcr-default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[user:info,2016-10-19T09:55:37.153-07:00,ns_1@127.0.0.1:ns_memcached-default<0.494.0>:ns_memcached:handle_cast:718]Bucket "default" loaded on node 'ns_1@127.0.0.1' in 0 seconds.
[error_logger:info,2016-10-19T09:55:37.153-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.520.0>},
{name,{goxdcr_stats_reader,"default"}},
{mfargs,{stats_reader,start_link,["@xdcr-default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.154-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.521.0>},
{name,{failover_safeness_level,"default"}},
{mfargs,
{failover_safeness_level,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.154-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.491.0>},
{name,{single_bucket_kv_sup,"default"}},
{mfargs,{single_bucket_kv_sup,start_link,["default"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:37.255-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.239.0>:goxdcr_rest:get_from_goxdcr:163]Goxdcr is temporary not available. Return empty list.
[ns_server:debug,2016-10-19T09:55:37.285-07:00,ns_1@127.0.0.1:ns_orchestrator_sup<0.534.0>:misc:start_singleton:1094]start_singleton(gen_fsm, ns_orchestrator, [], []): started as <0.535.0> on 'ns_1@127.0.0.1'
[error_logger:info,2016-10-19T09:55:37.285-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_orchestrator_sup}
started: [{pid,<0.535.0>},
{name,ns_orchestrator},
{mfargs,{ns_orchestrator,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:37.294-07:00,ns_1@127.0.0.1:<0.539.0>:auto_failover:init:147]init auto_failover.
[ns_server:debug,2016-10-19T09:55:37.294-07:00,ns_1@127.0.0.1:ns_orchestrator_sup<0.534.0>:misc:start_singleton:1094]start_singleton(gen_server, auto_failover, [], []): started as <0.539.0> on 'ns_1@127.0.0.1'
[ns_server:debug,2016-10-19T09:55:37.294-07:00,ns_1@127.0.0.1:<0.428.0>:restartable:start_child:98]Started child process <0.430.0>
MFA: {mb_master,start_link,[]}
[error_logger:info,2016-10-19T09:55:37.294-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_orchestrator_sup}
started: [{pid,<0.539.0>},
{name,auto_failover},
{mfargs,{auto_failover,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.294-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,mb_master_sup}
started: [{pid,<0.534.0>},
{name,ns_orchestrator_sup},
{mfargs,{ns_orchestrator_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:37.294-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.428.0>},
{name,mb_master},
{mfargs,
{restartable,start_link,
[{mb_master,start_link,[]},infinity]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:37.295-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.540.0>},
{name,master_activity_events_ingress},
{mfargs,
{gen_event,start_link,
[{local,master_activity_events_ingress}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.295-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.541.0>},
{name,master_activity_events_timestamper},
{mfargs,
{master_activity_events,start_link_timestamper,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:37.298-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.543.0>},
{name,master_activity_events_pids_watcher},
{mfargs,
{master_activity_events_pids_watcher,start_link,
[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:37.303-07:00,ns_1@127.0.0.1:janitor_agent-test<0.435.0>:dcp_sup:nuke:79]Nuking DCP replicators for bucket "test":
[]
[ns_server:debug,2016-10-19T09:55:37.308-07:00,ns_1@127.0.0.1:ns_server_nodes_sup<0.156.0>:one_shot_barrier:notify:27]Notifying on barrier menelaus_barrier
[ns_server:debug,2016-10-19T09:55:37.308-07:00,ns_1@127.0.0.1:menelaus_barrier<0.158.0>:one_shot_barrier:barrier_body:62]Barrier menelaus_barrier got notification from <0.156.0>
[error_logger:info,2016-10-19T09:55:37.308-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.546.0>},
{name,master_activity_events_keeper},
{mfargs,{master_activity_events_keeper,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:37.308-07:00,ns_1@127.0.0.1:ns_server_nodes_sup<0.156.0>:one_shot_barrier:notify:32]Successfuly notified on barrier menelaus_barrier
[error_logger:info,2016-10-19T09:55:37.308-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_nodes_sup}
started: [{pid,<0.198.0>},
{name,ns_server_sup},
{mfargs,{ns_server_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:37.308-07:00,ns_1@127.0.0.1:<0.155.0>:restartable:start_child:98]Started child process <0.156.0>
MFA: {ns_server_nodes_sup,start_link,[]}
[ns_server:debug,2016-10-19T09:55:37.308-07:00,ns_1@127.0.0.1:<0.2.0>:child_erlang:child_loop:115]67718: Entered child_loop
[error_logger:info,2016-10-19T09:55:37.308-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.155.0>},
{name,ns_server_nodes_sup},
{mfargs,
{restartable,start_link,
[{ns_server_nodes_sup,start_link,[]},
infinity]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:37.308-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
application: ns_server
started_at: 'ns_1@127.0.0.1'
[ns_server:debug,2016-10-19T09:55:37.309-07:00,ns_1@127.0.0.1:replication_manager-test<0.425.0>:replication_manager:handle_call:182]Change replication type from tap to dcp
[ns_server:info,2016-10-19T09:55:37.310-07:00,ns_1@127.0.0.1:ns_memcached-test<0.388.0>:ns_memcached:handle_call:287]Enabling traffic to bucket "test"
[ns_server:info,2016-10-19T09:55:37.310-07:00,ns_1@127.0.0.1:ns_memcached-test<0.388.0>:ns_memcached:handle_call:291]Bucket "test" marked as warmed in 0 seconds
[ns_server:debug,2016-10-19T09:55:37.311-07:00,ns_1@127.0.0.1:janitor_agent-locked<0.468.0>:dcp_sup:nuke:79]Nuking DCP replicators for bucket "locked":
[]
[ns_server:debug,2016-10-19T09:55:37.315-07:00,ns_1@127.0.0.1:replication_manager-locked<0.464.0>:replication_manager:handle_call:182]Change replication type from tap to dcp
[ns_server:info,2016-10-19T09:55:37.315-07:00,ns_1@127.0.0.1:ns_memcached-locked<0.457.0>:ns_memcached:handle_call:287]Enabling traffic to bucket "locked"
[ns_server:info,2016-10-19T09:55:37.316-07:00,ns_1@127.0.0.1:ns_memcached-locked<0.457.0>:ns_memcached:handle_call:291]Bucket "locked" marked as warmed in 0 seconds
[ns_server:debug,2016-10-19T09:55:37.316-07:00,ns_1@127.0.0.1:janitor_agent-default<0.505.0>:dcp_sup:nuke:79]Nuking DCP replicators for bucket "default":
[]
[ns_server:debug,2016-10-19T09:55:37.318-07:00,ns_1@127.0.0.1:replication_manager-default<0.501.0>:replication_manager:handle_call:182]Change replication type from tap to dcp
[ns_server:info,2016-10-19T09:55:37.318-07:00,ns_1@127.0.0.1:ns_memcached-default<0.494.0>:ns_memcached:handle_call:287]Enabling traffic to bucket "default"
[ns_server:info,2016-10-19T09:55:37.319-07:00,ns_1@127.0.0.1:ns_memcached-default<0.494.0>:ns_memcached:handle_call:291]Bucket "default" marked as warmed in 0 seconds
[ns_server:debug,2016-10-19T09:55:37.319-07:00,ns_1@127.0.0.1:json_rpc_connection-cbq-engine-cbauth<0.577.0>:json_rpc_connection:init:74]Observed revrpc connection: label "cbq-engine-cbauth", handling process <0.577.0>
[ns_server:debug,2016-10-19T09:55:37.319-07:00,ns_1@127.0.0.1:json_rpc_connection-saslauthd-saslauthd-port<0.578.0>:json_rpc_connection:init:74]Observed revrpc connection: label "saslauthd-saslauthd-port", handling process <0.578.0>
[ns_server:debug,2016-10-19T09:55:37.319-07:00,ns_1@127.0.0.1:json_rpc_connection-goxdcr-cbauth<0.579.0>:json_rpc_connection:init:74]Observed revrpc connection: label "goxdcr-cbauth", handling process <0.579.0>
[ns_server:debug,2016-10-19T09:55:37.319-07:00,ns_1@127.0.0.1:menelaus_cbauth<0.286.0>:menelaus_cbauth:handle_cast:87]Observed json rpc process {"cbq-engine-cbauth",<0.577.0>} started
[error_logger:error,2016-10-19T09:55:37.320-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]Supervisor received unexpected message: {ack,<0.577.0>,{ok,<0.577.0>}}
[error_logger:error,2016-10-19T09:55:37.320-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]Supervisor received unexpected message: {ack,<0.578.0>,{ok,<0.578.0>}}
[error_logger:error,2016-10-19T09:55:37.320-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]Supervisor received unexpected message: {ack,<0.579.0>,{ok,<0.579.0>}}
[ns_server:debug,2016-10-19T09:55:37.322-07:00,ns_1@127.0.0.1:json_rpc_connection-cbq-engine-cbauth<0.577.0>:json_rpc_connection:handle_call:157]sending jsonrpc call:{[{jsonrpc,<<"2.0">>},
{id,0},
{method,<<"AuthCacheSvc.UpdateDB">>},
{params,
[{[{specialUser,<<"@cbq-engine-cbauth">>},
{nodes,
[{[{host,<<"127.0.0.1">>},
{user,<<"_admin">>},
{password,"*****"},
{ports,
[8091,18091,9100,9101,9102,9103,9104,9105,
18092,8092,11207,9999,11210,11211,8093,
18093]},
{local,true}]}]},
{buckets,
[{[{name,<<"test">>},{password,"*****"}]},
{[{name,<<"locked">>},{password,"*****"}]},
{[{name,<<"default">>},{password,"*****"}]}]},
{authCheckURL,<<"http://127.0.0.1:8091/_cbauth">>},
{permissionCheckURL,
<<"http://127.0.0.1:8091/_cbauth/checkPermission">>},
{ldapEnabled,false},
{permissionsVersion,75615},
{admin,
{[{user,<<"Administrator">>},
{salt,<<"QB3B+r6o6uk8Xpv/FO8fTQ==">>},
{mac,
<<"6S6aASAfFc8/OJxtT/k7eSxo1HU=">>}]}}]}]}]}
[ns_server:debug,2016-10-19T09:55:37.322-07:00,ns_1@127.0.0.1:json_rpc_connection-cbq-engine-cbauth<0.577.0>:json_rpc_connection:handle_info:93]got response: [{<<"id">>,0},{<<"result">>,true},{<<"error">>,null}]
[ns_server:debug,2016-10-19T09:55:37.322-07:00,ns_1@127.0.0.1:menelaus_cbauth<0.286.0>:menelaus_cbauth:handle_cast:87]Observed json rpc process {"goxdcr-cbauth",<0.579.0>} started
[ns_server:debug,2016-10-19T09:55:37.323-07:00,ns_1@127.0.0.1:json_rpc_connection-goxdcr-cbauth<0.579.0>:json_rpc_connection:handle_call:157]sending jsonrpc call:{[{jsonrpc,<<"2.0">>},
{id,0},
{method,<<"AuthCacheSvc.UpdateDB">>},
{params,
[{[{specialUser,<<"@goxdcr-cbauth">>},
{nodes,
[{[{host,<<"127.0.0.1">>},
{user,<<"_admin">>},
{password,"*****"},
{ports,
[8091,18091,9100,9101,9102,9103,9104,9105,
18092,8092,11207,9999,11210,11211,8093,
18093]},
{local,true}]}]},
{buckets,
[{[{name,<<"test">>},{password,"*****"}]},
{[{name,<<"locked">>},{password,"*****"}]},
{[{name,<<"default">>},{password,"*****"}]}]},
{authCheckURL,<<"http://127.0.0.1:8091/_cbauth">>},
{permissionCheckURL,
<<"http://127.0.0.1:8091/_cbauth/checkPermission">>},
{ldapEnabled,false},
{permissionsVersion,75615},
{admin,
{[{user,<<"Administrator">>},
{salt,<<"QB3B+r6o6uk8Xpv/FO8fTQ==">>},
{mac,
<<"6S6aASAfFc8/OJxtT/k7eSxo1HU=">>}]}}]}]}]}
[ns_server:debug,2016-10-19T09:55:37.323-07:00,ns_1@127.0.0.1:json_rpc_connection-goxdcr-cbauth<0.579.0>:json_rpc_connection:handle_info:93]got response: [{<<"id">>,0},{<<"result">>,true},{<<"error">>,null}]
[ns_server:debug,2016-10-19T09:55:37.435-07:00,ns_1@127.0.0.1:json_rpc_connection-projector-cbauth<0.584.0>:json_rpc_connection:init:74]Observed revrpc connection: label "projector-cbauth", handling process <0.584.0>
[ns_server:debug,2016-10-19T09:55:37.435-07:00,ns_1@127.0.0.1:menelaus_cbauth<0.286.0>:menelaus_cbauth:handle_cast:87]Observed json rpc process {"projector-cbauth",<0.584.0>} started
[error_logger:error,2016-10-19T09:55:37.435-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]Supervisor received unexpected message: {ack,<0.584.0>,{ok,<0.584.0>}}
[ns_server:debug,2016-10-19T09:55:37.435-07:00,ns_1@127.0.0.1:json_rpc_connection-projector-cbauth<0.584.0>:json_rpc_connection:handle_call:157]sending jsonrpc call:{[{jsonrpc,<<"2.0">>},
{id,0},
{method,<<"AuthCacheSvc.UpdateDB">>},
{params,
[{[{specialUser,<<"@projector-cbauth">>},
{nodes,
[{[{host,<<"127.0.0.1">>},
{user,<<"_admin">>},
{password,"*****"},
{ports,
[8091,18091,9100,9101,9102,9103,9104,9105,
18092,8092,11207,9999,11210,11211,8093,
18093]},
{local,true}]}]},
{buckets,
[{[{name,<<"test">>},{password,"*****"}]},
{[{name,<<"locked">>},{password,"*****"}]},
{[{name,<<"default">>},{password,"*****"}]}]},
{authCheckURL,<<"http://127.0.0.1:8091/_cbauth">>},
{permissionCheckURL,
<<"http://127.0.0.1:8091/_cbauth/checkPermission">>},
{ldapEnabled,false},
{permissionsVersion,75615},
{admin,
{[{user,<<"Administrator">>},
{salt,<<"QB3B+r6o6uk8Xpv/FO8fTQ==">>},
{mac,
<<"6S6aASAfFc8/OJxtT/k7eSxo1HU=">>}]}}]}]}]}
[ns_server:debug,2016-10-19T09:55:37.435-07:00,ns_1@127.0.0.1:json_rpc_connection-projector-cbauth<0.584.0>:json_rpc_connection:handle_info:93]got response: [{<<"id">>,0},{<<"result">>,true},{<<"error">>,null}]
[ns_server:debug,2016-10-19T09:55:38.121-07:00,ns_1@127.0.0.1:<0.475.0>:goxdcr_rest:get_from_goxdcr:163]Goxdcr is temporary not available. Return empty list.
[ns_server:debug,2016-10-19T09:55:38.121-07:00,ns_1@127.0.0.1:<0.444.0>:goxdcr_rest:get_from_goxdcr:163]Goxdcr is temporary not available. Return empty list.
[ns_server:error,2016-10-19T09:55:38.121-07:00,ns_1@127.0.0.1:index_stats_collector-index<0.376.0>:index_rest:get_json:42]Request to (indexer) http://127.0.0.1:9102/stats?async=true failed: {error,
{econnrefused,
[{lhttpc_client,
send_request,
1,
[{file,
"/Users/jenkins/jenkins/workspace/watson-unix/couchdb/src/lhttpc/lhttpc_client.erl"},
{line,
220}]},
{lhttpc_client,
execute,
9,
[{file,
"/Users/jenkins/jenkins/workspace/watson-unix/couchdb/src/lhttpc/lhttpc_client.erl"},
{line,
169}]},
{lhttpc_client,
request,
9,
[{file,
"/Users/jenkins/jenkins/workspace/watson-unix/couchdb/src/lhttpc/lhttpc_client.erl"},
{line,
92}]}]}}
[ns_server:debug,2016-10-19T09:55:38.121-07:00,ns_1@127.0.0.1:<0.512.0>:goxdcr_rest:get_from_goxdcr:163]Goxdcr is temporary not available. Return empty list.
[ns_server:error,2016-10-19T09:55:39.122-07:00,ns_1@127.0.0.1:index_stats_collector-index<0.376.0>:index_rest:get_json:42]Request to (indexer) http://127.0.0.1:9102/stats?async=true failed: {error,
{econnrefused,
[{lhttpc_client,
send_request,
1,
[{file,
"/Users/jenkins/jenkins/workspace/watson-unix/couchdb/src/lhttpc/lhttpc_client.erl"},
{line,
220}]},
{lhttpc_client,
execute,
9,
[{file,
"/Users/jenkins/jenkins/workspace/watson-unix/couchdb/src/lhttpc/lhttpc_client.erl"},
{line,
169}]},
{lhttpc_client,
request,
9,
[{file,
"/Users/jenkins/jenkins/workspace/watson-unix/couchdb/src/lhttpc/lhttpc_client.erl"},
{line,
92}]}]}}
[ns_server:debug,2016-10-19T09:55:39.597-07:00,ns_1@127.0.0.1:json_rpc_connection-index-cbauth<0.618.0>:json_rpc_connection:init:74]Observed revrpc connection: label "index-cbauth", handling process <0.618.0>
[ns_server:debug,2016-10-19T09:55:39.597-07:00,ns_1@127.0.0.1:menelaus_cbauth<0.286.0>:menelaus_cbauth:handle_cast:87]Observed json rpc process {"index-cbauth",<0.618.0>} started
[error_logger:error,2016-10-19T09:55:39.597-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]Supervisor received unexpected message: {ack,<0.618.0>,{ok,<0.618.0>}}
[ns_server:debug,2016-10-19T09:55:39.597-07:00,ns_1@127.0.0.1:json_rpc_connection-index-cbauth<0.618.0>:json_rpc_connection:handle_call:157]sending jsonrpc call:{[{jsonrpc,<<"2.0">>},
{id,0},
{method,<<"AuthCacheSvc.UpdateDB">>},
{params,
[{[{specialUser,<<"@index-cbauth">>},
{nodes,
[{[{host,<<"127.0.0.1">>},
{user,<<"_admin">>},
{password,"*****"},
{ports,
[8091,18091,9100,9101,9102,9103,9104,9105,
18092,8092,11207,9999,11210,11211,8093,
18093]},
{local,true}]}]},
{buckets,
[{[{name,<<"test">>},{password,"*****"}]},
{[{name,<<"locked">>},{password,"*****"}]},
{[{name,<<"default">>},{password,"*****"}]}]},
{authCheckURL,<<"http://127.0.0.1:8091/_cbauth">>},
{permissionCheckURL,
<<"http://127.0.0.1:8091/_cbauth/checkPermission">>},
{ldapEnabled,false},
{permissionsVersion,75615},
{admin,
{[{user,<<"Administrator">>},
{salt,<<"QB3B+r6o6uk8Xpv/FO8fTQ==">>},
{mac,
<<"6S6aASAfFc8/OJxtT/k7eSxo1HU=">>}]}}]}]}]}
[ns_server:debug,2016-10-19T09:55:39.617-07:00,ns_1@127.0.0.1:json_rpc_connection-index-cbauth<0.618.0>:json_rpc_connection:handle_info:93]got response: [{<<"id">>,0},{<<"result">>,true},{<<"error">>,null}]
[ns_server:error,2016-10-19T09:55:40.120-07:00,ns_1@127.0.0.1:index_stats_collector-index<0.376.0>:index_rest:get_json:42]Request to (indexer) http://127.0.0.1:9102/stats?async=true failed: {error,
{econnrefused,
[{lhttpc_client,
send_request,
1,
[{file,
"/Users/jenkins/jenkins/workspace/watson-unix/couchdb/src/lhttpc/lhttpc_client.erl"},
{line,
220}]},
{lhttpc_client,
execute,
9,
[{file,
"/Users/jenkins/jenkins/workspace/watson-unix/couchdb/src/lhttpc/lhttpc_client.erl"},
{line,
169}]},
{lhttpc_client,
request,
9,
[{file,
"/Users/jenkins/jenkins/workspace/watson-unix/couchdb/src/lhttpc/lhttpc_client.erl"},
{line,
92}]}]}}
[error_logger:error,2016-10-19T09:55:41.726-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]Error in process <0.700.0> on node 'ns_1@127.0.0.1' with exit value: {{case_clause,{more,"Pages free:~d.",0,[]}},[{memsup,fread_value,2,[{file,"memsup.erl"},{line,759}]},{memsup,get_memory_usage,1,[{file,"memsup.erl"},{line,729}]},{memsup,'-handle_call/3-fun-1-',2,[{file,"memsup.erl"},{line,285}]}]}
[error_logger:error,2016-10-19T09:55:41.726-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]** Generic server memsup terminating
** Last message in was {'EXIT',<0.700.0>,
{{case_clause,{more,"Pages free:~d.",0,[]}},
[{memsup,fread_value,2,
[{file,"memsup.erl"},{line,759}]},
{memsup,get_memory_usage,1,
[{file,"memsup.erl"},{line,729}]},
{memsup,'-handle_call/3-fun-1-',2,
[{file,"memsup.erl"},{line,285}]}]}}
** When Server state == [{data,[{"Timeout",60000}]},
{items,{"Memory Usage",
[{"Allocated",15089123328},
{"Total",15134228480}]}},
{items,{"Worst Memory User",
[{"Pid",<0.7.0>},{"Memory",602208}]}}]
** Reason for termination ==
** {{case_clause,{more,"Pages free:~d.",0,[]}},
[{memsup,fread_value,2,[{file,"memsup.erl"},{line,759}]},
{memsup,get_memory_usage,1,[{file,"memsup.erl"},{line,729}]},
{memsup,'-handle_call/3-fun-1-',2,[{file,"memsup.erl"},{line,285}]}]}
[error_logger:error,2016-10-19T09:55:41.727-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================CRASH REPORT=========================
crasher:
initial call: memsup:init/1
pid: <0.80.0>
registered_name: memsup
exception exit: {{case_clause,{more,"Pages free:~d.",0,[]}},
[{memsup,fread_value,2,[{file,"memsup.erl"},{line,759}]},
{memsup,get_memory_usage,1,
[{file,"memsup.erl"},{line,729}]},
{memsup,'-handle_call/3-fun-1-',2,
[{file,"memsup.erl"},{line,285}]}]}
in function gen_server:terminate/6 (gen_server.erl, line 744)
ancestors: [os_mon_sup,<0.77.0>]
messages: []
links: [<0.78.0>]
dictionary: [{system_memory_high_watermark,set}]
trap_exit: true
status: running
heap_size: 987
stack_size: 27
reductions: 21651
neighbours:
[error_logger:info,2016-10-19T09:55:41.727-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================INFO REPORT=========================
alarm_handler: {clear,system_memory_high_watermark}
[error_logger:error,2016-10-19T09:55:41.727-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================SUPERVISOR REPORT=========================
Supervisor: {local,os_mon_sup}
Context: child_terminated
Reason: {{case_clause,{more,"Pages free:~d.",0,[]}},
[{memsup,fread_value,2,[{file,"memsup.erl"},{line,759}]},
{memsup,get_memory_usage,1,
[{file,"memsup.erl"},{line,729}]},
{memsup,'-handle_call/3-fun-1-',2,
[{file,"memsup.erl"},{line,285}]}]}
Offender: [{pid,<0.80.0>},
{name,memsup},
{mfargs,{memsup,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:error,2016-10-19T09:55:41.727-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================SUPERVISOR REPORT=========================
Supervisor: {local,ns_heart_sup}
Context: child_terminated
Reason: {{{case_clause,{more,"Pages free:~d.",0,[]}},
[{memsup,fread_value,2,[{file,"memsup.erl"},{line,759}]},
{memsup,get_memory_usage,1,
[{file,"memsup.erl"},{line,729}]},
{memsup,'-handle_call/3-fun-1-',2,
[{file,"memsup.erl"},{line,285}]}]},
{gen_server,call,[memsup,get_system_memory_data,infinity]}}
Offender: [{pid,<0.239.0>},
{name,ns_heart_slow_updater},
{mfargs,{ns_heart,start_link_slow_updater,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:41.727-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_heart_sup}
started: [{pid,<0.703.0>},
{name,ns_heart_slow_updater},
{mfargs,{ns_heart,start_link_slow_updater,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:41.727-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,os_mon_sup}
started: [{pid,<0.702.0>},
{name,memsup},
{mfargs,{memsup,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:error,2016-10-19T09:55:41.734-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]Error in process <0.704.0> on node 'ns_1@127.0.0.1' with exit value: {{case_clause,{more,"Pages free:~d.",0,[]}},[{memsup,fread_value,2,[{file,"memsup.erl"},{line,759}]},{memsup,get_memory_usage,1,[{file,"memsup.erl"},{line,729}]},{memsup,'-handle_info/2-fun-0-',2,[{file,"memsup.erl"},{line,342}]}]}
[error_logger:error,2016-10-19T09:55:41.734-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]** Generic server memsup terminating
** Last message in was {'EXIT',<0.704.0>,
{{case_clause,{more,"Pages free:~d.",0,[]}},
[{memsup,fread_value,2,
[{file,"memsup.erl"},{line,759}]},
{memsup,get_memory_usage,1,
[{file,"memsup.erl"},{line,729}]},
{memsup,'-handle_info/2-fun-0-',2,
[{file,"memsup.erl"},{line,342}]}]}}
** When Server state == {state,{unix,darwin},
false,undefined,undefined,false,60000,30000,
0.8,0.05,<0.704.0>,#Ref<0.0.0.4391>,undefined,
[reg],
[]}
** Reason for termination ==
** {{case_clause,{more,"Pages free:~d.",0,[]}},
[{memsup,fread_value,2,[{file,"memsup.erl"},{line,759}]},
{memsup,get_memory_usage,1,[{file,"memsup.erl"},{line,729}]},
{memsup,'-handle_info/2-fun-0-',2,[{file,"memsup.erl"},{line,342}]}]}
[error_logger:error,2016-10-19T09:55:41.735-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================CRASH REPORT=========================
crasher:
initial call: memsup:init/1
pid: <0.702.0>
registered_name: memsup
exception exit: {{case_clause,{more,"Pages free:~d.",0,[]}},
[{memsup,fread_value,2,[{file,"memsup.erl"},{line,759}]},
{memsup,get_memory_usage,1,
[{file,"memsup.erl"},{line,729}]},
{memsup,'-handle_info/2-fun-0-',2,
[{file,"memsup.erl"},{line,342}]}]}
in function gen_server:terminate/6 (gen_server.erl, line 744)
ancestors: [os_mon_sup,<0.77.0>]
messages: []
links: [<0.78.0>]
dictionary: []
trap_exit: true
status: running
heap_size: 610
stack_size: 27
reductions: 212
neighbours:
[error_logger:error,2016-10-19T09:55:41.735-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================SUPERVISOR REPORT=========================
Supervisor: {local,os_mon_sup}
Context: child_terminated
Reason: {{case_clause,{more,"Pages free:~d.",0,[]}},
[{memsup,fread_value,2,[{file,"memsup.erl"},{line,759}]},
{memsup,get_memory_usage,1,
[{file,"memsup.erl"},{line,729}]},
{memsup,'-handle_info/2-fun-0-',2,
[{file,"memsup.erl"},{line,342}]}]}
Offender: [{pid,<0.702.0>},
{name,memsup},
{mfargs,{memsup,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:41.736-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,os_mon_sup}
started: [{pid,<0.706.0>},
{name,memsup},
{mfargs,{memsup,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:error,2016-10-19T09:55:41.742-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]Error in process <0.707.0> on node 'ns_1@127.0.0.1' with exit value: {{case_clause,{more,"Pages free:~d.",0,[]}},[{memsup,fread_value,2,[{file,"memsup.erl"},{line,759}]},{memsup,get_memory_usage,1,[{file,"memsup.erl"},{line,729}]},{memsup,'-handle_info/2-fun-0-',2,[{file,"memsup.erl"},{line,342}]}]}
[error_logger:error,2016-10-19T09:55:41.743-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]** Generic server memsup terminating
** Last message in was {'EXIT',<0.707.0>,
{{case_clause,{more,"Pages free:~d.",0,[]}},
[{memsup,fread_value,2,
[{file,"memsup.erl"},{line,759}]},
{memsup,get_memory_usage,1,
[{file,"memsup.erl"},{line,729}]},
{memsup,'-handle_info/2-fun-0-',2,
[{file,"memsup.erl"},{line,342}]}]}}
** When Server state == {state,{unix,darwin},
false,undefined,undefined,false,60000,30000,
0.8,0.05,<0.707.0>,#Ref<0.0.0.4407>,undefined,
[reg],
[]}
** Reason for termination ==
** {{case_clause,{more,"Pages free:~d.",0,[]}},
[{memsup,fread_value,2,[{file,"memsup.erl"},{line,759}]},
{memsup,get_memory_usage,1,[{file,"memsup.erl"},{line,729}]},
{memsup,'-handle_info/2-fun-0-',2,[{file,"memsup.erl"},{line,342}]}]}
[error_logger:error,2016-10-19T09:55:41.743-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================CRASH REPORT=========================
crasher:
initial call: memsup:init/1
pid: <0.706.0>
registered_name: memsup
exception exit: {{case_clause,{more,"Pages free:~d.",0,[]}},
[{memsup,fread_value,2,[{file,"memsup.erl"},{line,759}]},
{memsup,get_memory_usage,1,
[{file,"memsup.erl"},{line,729}]},
{memsup,'-handle_info/2-fun-0-',2,
[{file,"memsup.erl"},{line,342}]}]}
in function gen_server:terminate/6 (gen_server.erl, line 744)
ancestors: [os_mon_sup,<0.77.0>]
messages: []
links: [<0.78.0>]
dictionary: []
trap_exit: true
status: running
heap_size: 610
stack_size: 27
reductions: 216
neighbours:
[error_logger:error,2016-10-19T09:55:41.743-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================SUPERVISOR REPORT=========================
Supervisor: {local,os_mon_sup}
Context: child_terminated
Reason: {{case_clause,{more,"Pages free:~d.",0,[]}},
[{memsup,fread_value,2,[{file,"memsup.erl"},{line,759}]},
{memsup,get_memory_usage,1,
[{file,"memsup.erl"},{line,729}]},
{memsup,'-handle_info/2-fun-0-',2,
[{file,"memsup.erl"},{line,342}]}]}
Offender: [{pid,<0.706.0>},
{name,memsup},
{mfargs,{memsup,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:41.744-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,os_mon_sup}
started: [{pid,<0.709.0>},
{name,memsup},
{mfargs,{memsup,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:error,2016-10-19T09:55:41.750-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]Error in process <0.710.0> on node 'ns_1@127.0.0.1' with exit value: {{case_clause,{more,"Pages free:~d.",0,[]}},[{memsup,fread_value,2,[{file,"memsup.erl"},{line,759}]},{memsup,get_memory_usage,1,[{file,"memsup.erl"},{line,729}]},{memsup,'-handle_info/2-fun-0-',2,[{file,"memsup.erl"},{line,342}]}]}
[error_logger:error,2016-10-19T09:55:41.750-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]** Generic server memsup terminating
** Last message in was {'EXIT',<0.710.0>,
{{case_clause,{more,"Pages free:~d.",0,[]}},
[{memsup,fread_value,2,
[{file,"memsup.erl"},{line,759}]},
{memsup,get_memory_usage,1,
[{file,"memsup.erl"},{line,729}]},
{memsup,'-handle_info/2-fun-0-',2,
[{file,"memsup.erl"},{line,342}]}]}}
** When Server state == {state,{unix,darwin},
false,undefined,undefined,false,60000,30000,
0.8,0.05,<0.710.0>,#Ref<0.0.0.4416>,undefined,
[reg],
[]}
** Reason for termination ==
** {{case_clause,{more,"Pages free:~d.",0,[]}},
[{memsup,fread_value,2,[{file,"memsup.erl"},{line,759}]},
{memsup,get_memory_usage,1,[{file,"memsup.erl"},{line,729}]},
{memsup,'-handle_info/2-fun-0-',2,[{file,"memsup.erl"},{line,342}]}]}
[error_logger:error,2016-10-19T09:55:41.751-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================CRASH REPORT=========================
crasher:
initial call: memsup:init/1
pid: <0.709.0>
registered_name: memsup
exception exit: {{case_clause,{more,"Pages free:~d.",0,[]}},
[{memsup,fread_value,2,[{file,"memsup.erl"},{line,759}]},
{memsup,get_memory_usage,1,
[{file,"memsup.erl"},{line,729}]},
{memsup,'-handle_info/2-fun-0-',2,
[{file,"memsup.erl"},{line,342}]}]}
in function gen_server:terminate/6 (gen_server.erl, line 744)
ancestors: [os_mon_sup,<0.77.0>]
messages: []
links: [<0.78.0>]
dictionary: []
trap_exit: true
status: running
heap_size: 610
stack_size: 27
reductions: 216
neighbours:
[error_logger:error,2016-10-19T09:55:41.751-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================SUPERVISOR REPORT=========================
Supervisor: {local,os_mon_sup}
Context: child_terminated
Reason: {{case_clause,{more,"Pages free:~d.",0,[]}},
[{memsup,fread_value,2,[{file,"memsup.erl"},{line,759}]},
{memsup,get_memory_usage,1,
[{file,"memsup.erl"},{line,729}]},
{memsup,'-handle_info/2-fun-0-',2,
[{file,"memsup.erl"},{line,342}]}]}
Offender: [{pid,<0.709.0>},
{name,memsup},
{mfargs,{memsup,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:41.751-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,os_mon_sup}
started: [{pid,<0.712.0>},
{name,memsup},
{mfargs,{memsup,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:error,2016-10-19T09:55:41.758-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]Error in process <0.713.0> on node 'ns_1@127.0.0.1' with exit value: {{case_clause,{more,"Pages free:~d.",0,[]}},[{memsup,fread_value,2,[{file,"memsup.erl"},{line,759}]},{memsup,get_memory_usage,1,[{file,"memsup.erl"},{line,729}]},{memsup,'-handle_info/2-fun-0-',2,[{file,"memsup.erl"},{line,342}]}]}
[error_logger:error,2016-10-19T09:55:41.758-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]** Generic server memsup terminating
** Last message in was {'EXIT',<0.713.0>,
{{case_clause,{more,"Pages free:~d.",0,[]}},
[{memsup,fread_value,2,
[{file,"memsup.erl"},{line,759}]},
{memsup,get_memory_usage,1,
[{file,"memsup.erl"},{line,729}]},
{memsup,'-handle_info/2-fun-0-',2,
[{file,"memsup.erl"},{line,342}]}]}}
** When Server state == {state,{unix,darwin},
false,undefined,undefined,false,60000,30000,
0.8,0.05,<0.713.0>,#Ref<0.0.0.4427>,undefined,
[reg],
[]}
** Reason for termination ==
** {{case_clause,{more,"Pages free:~d.",0,[]}},
[{memsup,fread_value,2,[{file,"memsup.erl"},{line,759}]},
{memsup,get_memory_usage,1,[{file,"memsup.erl"},{line,729}]},
{memsup,'-handle_info/2-fun-0-',2,[{file,"memsup.erl"},{line,342}]}]}
[error_logger:error,2016-10-19T09:55:41.759-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================CRASH REPORT=========================
crasher:
initial call: memsup:init/1
pid: <0.712.0>
registered_name: memsup
exception exit: {{case_clause,{more,"Pages free:~d.",0,[]}},
[{memsup,fread_value,2,[{file,"memsup.erl"},{line,759}]},
{memsup,get_memory_usage,1,
[{file,"memsup.erl"},{line,729}]},
{memsup,'-handle_info/2-fun-0-',2,
[{file,"memsup.erl"},{line,342}]}]}
in function gen_server:terminate/6 (gen_server.erl, line 744)
ancestors: [os_mon_sup,<0.77.0>]
messages: []
links: [<0.78.0>]
dictionary: []
trap_exit: true
status: running
heap_size: 610
stack_size: 27
reductions: 212
neighbours:
[error_logger:error,2016-10-19T09:55:41.759-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================SUPERVISOR REPORT=========================
Supervisor: {local,os_mon_sup}
Context: child_terminated
Reason: {{case_clause,{more,"Pages free:~d.",0,[]}},
[{memsup,fread_value,2,[{file,"memsup.erl"},{line,759}]},
{memsup,get_memory_usage,1,
[{file,"memsup.erl"},{line,729}]},
{memsup,'-handle_info/2-fun-0-',2,
[{file,"memsup.erl"},{line,342}]}]}
Offender: [{pid,<0.712.0>},
{name,memsup},
{mfargs,{memsup,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:error,2016-10-19T09:55:41.759-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================SUPERVISOR REPORT=========================
Supervisor: {local,os_mon_sup}
Context: shutdown
Reason: reached_max_restart_intensity
Offender: [{pid,<0.712.0>},
{name,memsup},
{mfargs,{memsup,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:41.760-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================INFO REPORT=========================
application: os_mon
exited: shutdown
type: permanent
[ns_server:debug,2016-10-19T09:55:41.760-07:00,ns_1@127.0.0.1:<0.547.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {master_activity_events,<0.546.0>} exited with reason killed
[ns_server:info,2016-10-19T09:55:41.760-07:00,ns_1@127.0.0.1:mb_master<0.430.0>:mb_master:terminate:299]Synchronously shutting down child mb_master_sup
[ns_server:debug,2016-10-19T09:55:41.760-07:00,ns_1@127.0.0.1:<0.428.0>:restartable:shutdown_child:120]Successfully terminated process <0.430.0>
[ns_server:debug,2016-10-19T09:55:41.760-07:00,ns_1@127.0.0.1:<0.431.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.430.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:41.760-07:00,ns_1@127.0.0.1:<0.402.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.401.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:41.760-07:00,ns_1@127.0.0.1:<0.375.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.374.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:41.760-07:00,ns_1@127.0.0.1:<0.373.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_node_disco_events,<0.371.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:41.760-07:00,ns_1@127.0.0.1:<0.372.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.371.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:41.760-07:00,ns_1@127.0.0.1:<0.369.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_node_disco_events,<0.367.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:41.760-07:00,ns_1@127.0.0.1:<0.368.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.367.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:41.837-07:00,ns_1@127.0.0.1:<0.390.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.386.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:41.938-07:00,ns_1@127.0.0.1:<0.385.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.383.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:42.014-07:00,ns_1@127.0.0.1:<0.382.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.381.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:42.093-07:00,ns_1@127.0.0.1:<0.380.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.379.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:42.093-07:00,ns_1@127.0.0.1:<0.378.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_tick_event,<0.376.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:42.093-07:00,ns_1@127.0.0.1:<0.377.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.376.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:42.093-07:00,ns_1@127.0.0.1:<0.358.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_tick_event,<0.357.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:42.160-07:00,ns_1@127.0.0.1:<0.354.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.353.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:42.160-07:00,ns_1@127.0.0.1:<0.352.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_tick_event,<0.351.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:42.242-07:00,ns_1@127.0.0.1:<0.348.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.347.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:42.697-07:00,ns_1@127.0.0.1:<0.345.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.344.0>} exited with reason shutdown
[error_logger:error,2016-10-19T09:55:42.759-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================SUPERVISOR REPORT=========================
Supervisor: {local,menelaus_sup}
Context: child_terminated
Reason: {noproc,
{gen_server,call,
[{'stats_reader-@global','ns_1@127.0.0.1'},
{latest,minute,1}]}}
Offender: [{pid,<0.285.0>},
{name,menelaus_web_alerts_srv},
{mfargs,{menelaus_web_alerts_srv,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:42.759-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.718.0>},
{name,menelaus_web_alerts_srv},
{mfargs,{menelaus_web_alerts_srv,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:42.794-07:00,ns_1@127.0.0.1:<0.339.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.338.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:42.794-07:00,ns_1@127.0.0.1:<0.337.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_tick_event,<0.334.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:42.794-07:00,ns_1@127.0.0.1:<0.336.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ale_stats_events,<0.334.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:42.868-07:00,ns_1@127.0.0.1:<0.519.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.514.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:42.868-07:00,ns_1@127.0.0.1:<0.513.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_tick_event,<0.512.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.142-07:00,ns_1@127.0.0.1:<0.508.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_tick_event,<0.506.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.142-07:00,ns_1@127.0.0.1:<0.510.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.509.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.142-07:00,ns_1@127.0.0.1:<0.507.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.506.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.143-07:00,ns_1@127.0.0.1:replication_manager-default<0.501.0>:replication_manager:terminate:105]Replication manager died {shutdown,{state,"default",dcp,[],undefined}}
[ns_server:debug,2016-10-19T09:55:43.143-07:00,ns_1@127.0.0.1:<0.497.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {bucket_info_cache_invalidations,<0.496.0>} exited with reason shutdown
[user:info,2016-10-19T09:55:43.143-07:00,ns_1@127.0.0.1:ns_memcached-default<0.494.0>:ns_memcached:terminate:804]Shutting down bucket "default" on 'ns_1@127.0.0.1' for server shutdown
[ns_server:info,2016-10-19T09:55:43.143-07:00,ns_1@127.0.0.1:ns_memcached-default<0.494.0>:ns_memcached:terminate:816]This bucket shutdown is not due to bucket deletion or reconfiguration. Doing nothing
[ns_server:debug,2016-10-19T09:55:43.143-07:00,ns_1@127.0.0.1:ns_memcached-default<0.494.0>:ns_memcached:terminate:842]Terminated.
[ns_server:debug,2016-10-19T09:55:43.217-07:00,ns_1@127.0.0.1:<0.484.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.477.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.218-07:00,ns_1@127.0.0.1:<0.476.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_tick_event,<0.475.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.488-07:00,ns_1@127.0.0.1:<0.473.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.472.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.488-07:00,ns_1@127.0.0.1:<0.471.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_tick_event,<0.469.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.488-07:00,ns_1@127.0.0.1:<0.470.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.469.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.488-07:00,ns_1@127.0.0.1:replication_manager-locked<0.464.0>:replication_manager:terminate:105]Replication manager died {shutdown,{state,"locked",dcp,[],undefined}}
[ns_server:debug,2016-10-19T09:55:43.488-07:00,ns_1@127.0.0.1:<0.460.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {bucket_info_cache_invalidations,<0.459.0>} exited with reason shutdown
[user:info,2016-10-19T09:55:43.488-07:00,ns_1@127.0.0.1:ns_memcached-locked<0.457.0>:ns_memcached:terminate:804]Shutting down bucket "locked" on 'ns_1@127.0.0.1' for server shutdown
[ns_server:info,2016-10-19T09:55:43.489-07:00,ns_1@127.0.0.1:ns_memcached-locked<0.457.0>:ns_memcached:terminate:816]This bucket shutdown is not due to bucket deletion or reconfiguration. Doing nothing
[ns_server:debug,2016-10-19T09:55:43.489-07:00,ns_1@127.0.0.1:ns_memcached-locked<0.457.0>:ns_memcached:terminate:842]Terminated.
[ns_server:debug,2016-10-19T09:55:43.563-07:00,ns_1@127.0.0.1:<0.447.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.446.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.563-07:00,ns_1@127.0.0.1:<0.445.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_tick_event,<0.444.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.829-07:00,ns_1@127.0.0.1:<0.441.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_stats_event,<0.440.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.829-07:00,ns_1@127.0.0.1:<0.439.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_tick_event,<0.437.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.829-07:00,ns_1@127.0.0.1:<0.438.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.437.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.829-07:00,ns_1@127.0.0.1:replication_manager-test<0.425.0>:replication_manager:terminate:105]Replication manager died {shutdown,{state,"test",dcp,[],undefined}}
[ns_server:debug,2016-10-19T09:55:43.829-07:00,ns_1@127.0.0.1:<0.404.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {bucket_info_cache_invalidations,<0.403.0>} exited with reason shutdown
[user:info,2016-10-19T09:55:43.830-07:00,ns_1@127.0.0.1:ns_memcached-test<0.388.0>:ns_memcached:terminate:804]Shutting down bucket "test" on 'ns_1@127.0.0.1' for server shutdown
[ns_server:info,2016-10-19T09:55:43.830-07:00,ns_1@127.0.0.1:ns_memcached-test<0.388.0>:ns_memcached:terminate:816]This bucket shutdown is not due to bucket deletion or reconfiguration. Doing nothing
[ns_server:debug,2016-10-19T09:55:43.830-07:00,ns_1@127.0.0.1:ns_memcached-test<0.388.0>:ns_memcached:terminate:842]Terminated.
[ns_server:debug,2016-10-19T09:55:43.831-07:00,ns_1@127.0.0.1:<0.333.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.332.0>} exited with reason shutdown
[error_logger:error,2016-10-19T09:55:43.831-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================SUPERVISOR REPORT=========================
Supervisor: {local,ns_bucket_sup}
Context: shutdown_error
Reason: normal
Offender: [{pid,<0.333.0>},
{name,buckets_observing_subscription},
{mfargs,{ns_bucket_sup,subscribe_on_config_events,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:43.831-07:00,ns_1@127.0.0.1:<0.325.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.324.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.832-07:00,ns_1@127.0.0.1:<0.315.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.312.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.832-07:00,ns_1@127.0.0.1:<0.314.0>:remote_monitors:handle_down:158]Caller of remote monitor <0.312.0> died with shutdown. Exiting
[ns_server:debug,2016-10-19T09:55:43.832-07:00,ns_1@127.0.0.1:<0.300.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.299.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.832-07:00,ns_1@127.0.0.1:<0.297.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.296.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.832-07:00,ns_1@127.0.0.1:<0.311.0>:remote_monitors:handle_down:158]Caller of remote monitor <0.290.0> died with killed. Exiting
[ns_server:debug,2016-10-19T09:55:43.832-07:00,ns_1@127.0.0.1:<0.291.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.290.0>} exited with reason killed
[ns_server:debug,2016-10-19T09:55:43.832-07:00,ns_1@127.0.0.1:<0.287.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {json_rpc_events,<0.286.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.832-07:00,ns_1@127.0.0.1:<0.289.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.286.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.832-07:00,ns_1@127.0.0.1:<0.288.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_node_disco_events,<0.286.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.832-07:00,ns_1@127.0.0.1:<0.603.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.274.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.832-07:00,ns_1@127.0.0.1:<0.602.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.275.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.832-07:00,ns_1@127.0.0.1:<0.604.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.278.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.832-07:00,ns_1@127.0.0.1:<0.605.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.276.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.833-07:00,ns_1@127.0.0.1:<0.587.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.272.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.833-07:00,ns_1@127.0.0.1:<0.606.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.277.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.833-07:00,ns_1@127.0.0.1:<0.621.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.596.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.833-07:00,ns_1@127.0.0.1:<0.260.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.259.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.834-07:00,ns_1@127.0.0.1:<0.257.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.256.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.834-07:00,ns_1@127.0.0.1:<0.244.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.243.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:43.834-07:00,ns_1@127.0.0.1:<0.240.0>:restartable:shutdown_child:120]Successfully terminated process <0.241.0>
[ns_server:debug,2016-10-19T09:55:43.837-07:00,ns_1@127.0.0.1:json_rpc_connection-goxdcr-cbauth<0.579.0>:json_rpc_connection:handle_info:128]Socket closed
[user:debug,2016-10-19T09:55:43.839-07:00,ns_1@127.0.0.1:<0.204.0>:ns_log:crash_consumption_loop:70]Service 'goxdcr' exited with status 0. Restarting. Messages: BucketSettingsChangeListener 2016-10-19T09:55:43.834-07:00 [INFO] metakv.RunObserveChildren failed, err=Get http://127.0.0.1:8091/_metakv/bucketSettings/?feed=continuous: dial tcp 127.0.0.1:8091: getsockopt: connection refused
BucketSettingsChangeListener 2016-10-19T09:55:43.834-07:00 [INFO] Started MetakvChangeListener BucketSettingsChangeListener
ReplicationSpecChangeListener 2016-10-19T09:55:43.834-07:00 [INFO] metakv.RunObserveChildren failed, err=Get http://127.0.0.1:8091/_metakv/replicationSpec/?feed=continuous: dial tcp 127.0.0.1:8091: getsockopt: connection refused
ReplicationManager 2016-10-19T09:55:43.834-07:00 [INFO] Replication manager is exiting...
ReplicationManager 2016-10-19T09:55:43.834-07:00 [INFO] Replication manager is already in the processof stopping, no-op on this stop request
[ns_server:debug,2016-10-19T09:55:44.835-07:00,ns_1@127.0.0.1:<0.237.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {buckets_events,<0.236.0>} exited with reason killed
[error_logger:error,2016-10-19T09:55:44.835-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================SUPERVISOR REPORT=========================
Supervisor: {local,ns_heart_sup}
Context: shutdown_error
Reason: killed
Offender: [{pid,<0.236.0>},
{name,ns_heart},
{mfargs,{ns_heart,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:44.835-07:00,ns_1@127.0.0.1:<0.228.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.226.0>} exited with reason killed
[ns_server:debug,2016-10-19T09:55:44.835-07:00,ns_1@127.0.0.1:<0.225.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.224.0>} exited with reason killed
[ns_server:debug,2016-10-19T09:55:44.835-07:00,ns_1@127.0.0.1:<0.217.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events_local,<0.216.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:44.835-07:00,ns_1@127.0.0.1:<0.206.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.205.0>} exited with reason shutdown
[error_logger:error,2016-10-19T09:55:44.836-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================CRASH REPORT=========================
crasher:
initial call: gen_event:init_it/6
pid: <0.227.0>
registered_name: bucket_info_cache_invalidations
exception exit: killed
in function gen_event:terminate_server/4 (gen_event.erl, line 320)
ancestors: [bucket_info_cache,ns_server_sup,ns_server_nodes_sup,
<0.155.0>,ns_server_cluster_sup,<0.88.0>]
messages: []
links: []
dictionary: []
trap_exit: true
status: running
heap_size: 610
stack_size: 27
reductions: 256
neighbours:
[ns_server:debug,2016-10-19T09:55:44.838-07:00,ns_1@127.0.0.1:ns_couchdb_port<0.182.0>:ns_port_server:terminate:182]Sending shutdown to port ns_couchdb
[ns_server:debug,2016-10-19T09:55:44.838-07:00,ns_1@127.0.0.1:<0.197.0>:remote_monitors:handle_down:158]Caller of remote monitor <0.183.0> died with shutdown. Exiting
[ns_server:debug,2016-10-19T09:55:44.843-07:00,ns_1@127.0.0.1:ns_couchdb_port<0.182.0>:ns_port_server:terminate:185]ns_couchdb has exited
[error_logger:info,2016-10-19T09:55:44.843-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================INFO REPORT=========================
{net_kernel,{'EXIT',<0.189.0>,connection_closed}}
[ns_server:debug,2016-10-19T09:55:44.844-07:00,ns_1@127.0.0.1:<0.163.0>:restartable:shutdown_child:120]Successfully terminated process <0.165.0>
[ns_server:debug,2016-10-19T09:55:44.844-07:00,ns_1@127.0.0.1:<0.162.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.161.0>} exited with reason shutdown
[ns_server:debug,2016-10-19T09:55:44.844-07:00,ns_1@127.0.0.1:<0.155.0>:restartable:shutdown_child:120]Successfully terminated process <0.156.0>
[ns_server:debug,2016-10-19T09:55:44.844-07:00,ns_1@127.0.0.1:ns_config<0.147.0>:ns_config:wait_saver:829]Done waiting for saver.
[ns_server:debug,2016-10-19T09:55:44.844-07:00,ns_1@127.0.0.1:<0.152.0>:ns_pubsub:do_subscribe_link:145]Parent process of subscription {ns_config_events,<0.151.0>} exited with reason shutdown
[ns_server:info,2016-10-19T09:55:47.130-07:00,nonode@nohost:<0.88.0>:ns_server:init_logging:151]Started & configured logging
[ns_server:info,2016-10-19T09:55:47.135-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]Static config terms:
[{error_logger_mf_dir,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"},
{path_config_bindir,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/bin"},
{path_config_etcdir,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/etc/couchbase"},
{path_config_libdir,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib"},
{path_config_datadir,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase"},
{path_config_tmpdir,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/tmp"},
{path_config_secdir,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/etc/security"},
{nodefile,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/couchbase-server.node"},
{loglevel_default,debug},
{loglevel_couchdb,info},
{loglevel_ns_server,debug},
{loglevel_error_logger,debug},
{loglevel_user,debug},
{loglevel_menelaus,debug},
{loglevel_ns_doctor,debug},
{loglevel_stats,debug},
{loglevel_rebalance,debug},
{loglevel_cluster,debug},
{loglevel_views,debug},
{loglevel_mapreduce_errors,debug},
{loglevel_xdcr,debug},
{loglevel_xdcr_trace,error},
{loglevel_access,info},
{disk_sink_opts,
[{rotation,
[{compress,true},
{size,41943040},
{num_files,10},
{buffer_size_max,52428800}]}]},
{disk_sink_opts_xdcr_trace,
[{rotation,[{compress,false},{size,83886080},{num_files,5}]}]},
{net_kernel_verbosity,10}]
[ns_server:warn,2016-10-19T09:55:47.135-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter error_logger_mf_dir, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.135-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter path_config_bindir, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.135-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter path_config_etcdir, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.135-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter path_config_libdir, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.136-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter path_config_datadir, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.136-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter path_config_tmpdir, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.136-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter path_config_secdir, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.136-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter nodefile, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.136-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_default, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.136-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_couchdb, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.136-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_ns_server, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.136-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_error_logger, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.136-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_user, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.136-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_menelaus, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.136-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_ns_doctor, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.136-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_stats, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.136-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_rebalance, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.137-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_cluster, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.137-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_views, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.137-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_mapreduce_errors, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.137-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_xdcr, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.137-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_xdcr_trace, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.137-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter loglevel_access, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.137-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter disk_sink_opts, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.137-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter disk_sink_opts_xdcr_trace, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.137-07:00,nonode@nohost:<0.88.0>:ns_server:log_pending:32]not overriding parameter net_kernel_verbosity, which is given from command line
[ns_server:warn,2016-10-19T09:55:47.138-07:00,nonode@nohost:<0.88.0>:ns_server:start:79]Could not lock myself into a memory: {error,enotsup}. Ignoring.
[error_logger:info,2016-10-19T09:55:47.140-07:00,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.129.0>},
{name,local_tasks},
{mfargs,{local_tasks,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:47.143-07:00,nonode@nohost:ns_server_cluster_sup<0.128.0>:log_os_info:start_link:25]OS type: {unix,darwin} Version: {14,3,0}
Runtime info: [{otp_release,"R16B03-1"},
{erl_version,"5.10.4.0.0.1"},
{erl_version_long,
"Erlang R16B03-1 (erts-5.10.4.0.0.1) [source-00852da] [64-bit] [smp:8:8] [async-threads:16] [kernel-poll:true]\n"},
{system_arch_raw,"x86_64-apple-darwin13.4.0"},
{system_arch,"x86_64-apple-darwin13.4.0"},
{localtime,{{2016,10,19},{9,55,47}}},
{memory,
[{total,26110784},
{processes,10406360},
{processes_used,10405272},
{system,15704424},
{atom,331249},
{atom_used,309719},
{binary,63040},
{code,7684198},
{ets,2452584}]},
{loaded,
[ns_info,log_os_info,local_tasks,restartable,
ns_server_cluster_sup,mlockall,calendar,
ale_default_formatter,'ale_logger-metakv',
'ale_logger-rebalance','ale_logger-xdcr_trace',
'ale_logger-menelaus','ale_logger-stats',
'ale_logger-access','ale_logger-ns_server',
'ale_logger-user','ale_logger-ns_doctor',
'ale_logger-cluster','ale_logger-xdcr',otp_internal,
ale_stderr_sink,ns_log_sink,filelib,ale_disk_sink,misc,
io_lib_fread,couch_util,ns_server,cpu_sup,memsup,disksup,
os_mon,io,release_handler,overload,alarm_handler,sasl,
timer,tftp_sup,httpd_sup,httpc_handler_sup,httpc_cookie,
inets_trace,httpc_manager,httpc,httpc_profile_sup,
httpc_sup,ftp_sup,inets_sup,inets_app,ssl,lhttpc_manager,
lhttpc_sup,lhttpc,tls_connection_sup,ssl_session_cache,
ssl_pkix_db,ssl_manager,ssl_sup,ssl_app,crypto_server,
crypto_sup,crypto_app,ale_error_logger_handler,
'ale_logger-ale_logger','ale_logger-error_logger',
beam_opcodes,beam_dict,beam_asm,beam_validator,beam_z,
beam_flatten,beam_trim,beam_receive,beam_bsm,beam_peep,
beam_dead,beam_split,beam_type,beam_bool,beam_except,
beam_clean,beam_utils,beam_block,beam_jump,beam_a,
v3_codegen,v3_life,v3_kernel,sys_core_dsetel,erl_bifs,
sys_core_fold,cerl_trees,sys_core_inline,core_lib,cerl,
v3_core,erl_bits,erl_expand_records,sys_pre_expand,sofs,
erl_internal,sets,ordsets,erl_lint,compile,
dynamic_compile,ale_utils,io_lib_pretty,io_lib_format,
io_lib,ale_codegen,dict,ale,ale_dynamic_sup,ale_sup,
ale_app,epp,ns_bootstrap,child_erlang,file_io_server,
orddict,erl_eval,file,c,kernel_config,user_sup,
supervisor_bridge,standard_error,code_server,unicode,
hipe_unified_loader,gb_sets,ets,binary,code,file_server,
net_kernel,global_group,erl_distribution,filename,os,
inet_parse,inet,inet_udp,inet_config,inet_db,global,
gb_trees,rpc,supervisor,kernel,application_master,sys,
application,gen_server,erl_parse,proplists,erl_scan,lists,
application_controller,proc_lib,gen,gen_event,
error_logger,heart,error_handler,erts_internal,erlang,
erl_prim_loader,prim_zip,zlib,prim_file,prim_inet,
prim_eval,init,otp_ring0]},
{applications,
[{lhttpc,"Lightweight HTTP Client","1.3.0"},
{os_mon,"CPO CXC 138 46","2.2.14"},
{public_key,"Public key infrastructure","0.21"},
{asn1,"The Erlang ASN1 compiler version 2.0.4","2.0.4"},
{kernel,"ERTS CXC 138 10","2.16.4"},
{ale,"Another Logger for Erlang","4.6.0-3391-enterprise"},
{inets,"INETS CXC 138 49","5.9.8"},
{ns_server,"Couchbase server","4.6.0-3391-enterprise"},
{crypto,"CRYPTO version 2","3.2"},
{ssl,"Erlang/OTP SSL application","5.3.3"},
{sasl,"SASL CXC 138 11","2.3.4"},
{stdlib,"ERTS CXC 138 10","1.19.4"}]},
{pre_loaded,
[erts_internal,erlang,erl_prim_loader,prim_zip,zlib,
prim_file,prim_inet,prim_eval,init,otp_ring0]},
{process_count,94},
{node,nonode@nohost},
{nodes,[]},
{registered,
[lhttpc_manager,standard_error_sup,release_handler,
code_server,httpd_sup,ale_dynamic_sup,'sink-disk_metakv',
overload,application_controller,'sink-disk_access_int',
alarm_handler,'sink-disk_access',kernel_safe_sup,
'sink-xdcr_trace',standard_error,'sink-disk_reports',
error_logger,'sink-disk_stats',timer_server,
'sink-disk_xdcr_errors',crypto_server,sasl_safe_sup,
crypto_sup,'sink-disk_xdcr','sink-disk_debug',tftp_sup,
os_mon_sup,'sink-disk_error',tls_connection_sup,cpu_sup,
ssl_sup,memsup,'sink-disk_default',init,disksup,inet_db,
httpc_sup,rex,ssl_manager,kernel_sup,httpc_profile_sup,
global_name_server,httpc_manager,ns_server_cluster_sup,
httpc_handler_sup,file_server_2,os_cmd_port_creator,
global_group,ftp_sup,sasl_sup,'sink-stderr',
ale_stats_events,ale,erl_prim_loader,inets_sup,
'sink-ns_log',local_tasks,lhttpc_sup,ale_sup]},
{cookie,nocookie},
{wordsize,8},
{wall_clock,0}]
[ns_server:info,2016-10-19T09:55:47.148-07:00,nonode@nohost:ns_server_cluster_sup<0.128.0>:log_os_info:start_link:27]Manifest:
["","",
" ",
" ",
" ",
" ",
" ",
" "," "," ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" "," ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
""]
[error_logger:info,2016-10-19T09:55:47.149-07:00,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.130.0>},
{name,timeout_diag_logger},
{mfargs,{timeout_diag_logger,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:47.150-07:00,nonode@nohost:dist_manager<0.131.0>:dist_manager:read_address_config_from_path:86]Reading ip config from "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/ip_start"
[ns_server:info,2016-10-19T09:55:47.150-07:00,nonode@nohost:dist_manager<0.131.0>:dist_manager:read_address_config_from_path:86]Reading ip config from "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/ip"
[ns_server:info,2016-10-19T09:55:47.150-07:00,nonode@nohost:dist_manager<0.131.0>:dist_manager:init:163]ip config not found. Looks like we're brand new node
[error_logger:info,2016-10-19T09:55:47.152-07:00,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,inet_gethost_native_sup}
started: [{pid,<0.133.0>},{mfa,{inet_gethost_native,init,[[]]}}]
[error_logger:info,2016-10-19T09:55:47.152-07:00,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,kernel_safe_sup}
started: [{pid,<0.132.0>},
{name,inet_gethost_native_sup},
{mfargs,{inet_gethost_native,start_link,[]}},
{restart_type,temporary},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:47.161-07:00,nonode@nohost:dist_manager<0.131.0>:dist_manager:bringup:214]Attempting to bring up net_kernel with name 'ns_1@127.0.0.1'
[error_logger:info,2016-10-19T09:55:47.164-07:00,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,net_sup}
started: [{pid,<0.135.0>},
{name,erl_epmd},
{mfargs,{erl_epmd,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:47.164-07:00,nonode@nohost:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,net_sup}
started: [{pid,<0.136.0>},
{name,auth},
{mfargs,{auth,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:47.165-07:00,ns_1@127.0.0.1:dist_manager<0.131.0>:dist_manager:configure_net_kernel:255]Set net_kernel vebosity to 10 -> 0
[error_logger:info,2016-10-19T09:55:47.165-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,net_sup}
started: [{pid,<0.137.0>},
{name,net_kernel},
{mfargs,
{net_kernel,start_link,
[['ns_1@127.0.0.1',longnames]]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:47.165-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,kernel_sup}
started: [{pid,<0.134.0>},
{name,net_sup_dynamic},
{mfargs,
{erl_distribution,start_link,
[['ns_1@127.0.0.1',longnames]]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,supervisor}]
[ns_server:info,2016-10-19T09:55:47.166-07:00,ns_1@127.0.0.1:dist_manager<0.131.0>:dist_manager:save_node:147]saving node to "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/couchbase-server.node"
[ns_server:debug,2016-10-19T09:55:47.179-07:00,ns_1@127.0.0.1:dist_manager<0.131.0>:dist_manager:bringup:228]Attempted to save node name to disk: ok
[ns_server:debug,2016-10-19T09:55:47.179-07:00,ns_1@127.0.0.1:dist_manager<0.131.0>:dist_manager:wait_for_node:235]Waiting for connection to node 'babysitter_of_ns_1@127.0.0.1' to be established
[error_logger:info,2016-10-19T09:55:47.180-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================INFO REPORT=========================
{net_kernel,{connect,normal,'babysitter_of_ns_1@127.0.0.1'}}
[ns_server:debug,2016-10-19T09:55:47.183-07:00,ns_1@127.0.0.1:dist_manager<0.131.0>:dist_manager:wait_for_node:244]Observed node 'babysitter_of_ns_1@127.0.0.1' to come up
[error_logger:info,2016-10-19T09:55:47.186-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.131.0>},
{name,dist_manager},
{mfargs,{dist_manager,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:47.187-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.142.0>},
{name,ns_cookie_manager},
{mfargs,{ns_cookie_manager,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:47.187-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.143.0>},
{name,ns_cluster},
{mfargs,{ns_cluster,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:47.188-07:00,ns_1@127.0.0.1:ns_config_sup<0.144.0>:ns_config_sup:init:32]loading static ns_config from "/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/etc/couchbase/config"
[error_logger:info,2016-10-19T09:55:47.189-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.145.0>},
{name,ns_config_events},
{mfargs,
{gen_event,start_link,[{local,ns_config_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:47.189-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.146.0>},
{name,ns_config_events_local},
{mfargs,
{gen_event,start_link,
[{local,ns_config_events_local}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:47.209-07:00,ns_1@127.0.0.1:ns_config<0.147.0>:ns_config:load_config:1070]Loading static config from "/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/etc/couchbase/config"
[ns_server:info,2016-10-19T09:55:47.210-07:00,ns_1@127.0.0.1:ns_config<0.147.0>:ns_config:load_config:1084]Loading dynamic config from "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/config.dat"
[ns_server:debug,2016-10-19T09:55:47.214-07:00,ns_1@127.0.0.1:ns_config<0.147.0>:ns_config:load_config:1092]Here's full dynamic config we loaded:
[[{buckets,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{18,63644101958}}]},
{configs,
[{"test",
[{repl_type,dcp},
{uuid,<<"7f7d4a28ca84a805edf9c899521eb18c">>},
{num_replicas,1},
{replica_index,false},
{ram_quota,524288000},
{auth_type,sasl},
{sasl_password,"*****"},
{autocompaction,false},
{purge_interval,undefined},
{flush_enabled,false},
{num_threads,3},
{eviction_policy,value_only},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map,
[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]]},
{map_opts_hash,133465355}]},
{"locked",
[{eviction_policy,value_only},
{num_threads,3},
{flush_enabled,false},
{purge_interval,undefined},
{autocompaction,false},
{sasl_password,"*****"},
{auth_type,sasl},
{ram_quota,104857600},
{num_replicas,1},
{repl_type,dcp},
{uuid,<<"8515ae93e826e7c4389f3fd25fbb263e">>},
{replica_index,false},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map,
[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]]},
{map_opts_hash,133465355}]},
{"default",
[{repl_type,dcp},
{uuid,<<"b04d5897bd3c5329a82156f1b77c395d">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,104857600},
{auth_type,sasl},
{flush_enabled,false},
{num_threads,3},
{eviction_policy,value_only},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map,
[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]]},
{map_opts_hash,133465355}]}]}]},
{vbucket_map_history,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012772}}]},
{[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]],
[{replication_topology,star},{tags,undefined},{max_slaves,10}]}]},
{alert_limits,
[{max_overhead_perc,50},{max_disk_used,90},{max_indexer_ram,75}]},
{audit,
[{auditd_enabled,false},
{rotate_interval,86400},
{rotate_size,20971520},
{disabled,[]},
{sync,[]},
{log_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"}]},
{auto_failover_cfg,[{enabled,false},{timeout,120},{max_nodes,1},{count,0}]},
{autocompaction,
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]},
{cert_and_pkey,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
{<<"-----BEGIN CERTIFICATE-----\nMIIDAjCCAeqgAwIBAgIIFH6f01mhINIwDQYJKoZIhvcNAQELBQAwJDEiMCAGA1UE\nAxMZQ291Y2hiYXNlIFNlcnZlciAxMzBiNDVmMzAeFw0xMzAxMDEwMDAwMDBaFw00\nOTEyMzEyMzU5NTlaMCQxIjAgBgNVBAMTGUNvdWNoYmFzZSBTZXJ2ZXIgMTMwYjQ1\nZjMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDINaiYz/zhTeD2h8Pa\nP015LetKMhey1yoE2L5H1wHK7pADeFRfqeoNunEvlxRWL/YjnqvRZPxrjdadlh7L\nVhZVke2blopHdhJjaHCvdI8R3BRBK4fLv5m4c0SzdE6bvk1QS+T3rZyzxUbMtB0g\nEq2ZPed8JdQFqO0Bo1JuXJx4/q9tjhvbHUVjRX9QHL3nClC3qVemVjTCKbNqZWv8\n5qZmH/X5DWkyNFKj6HzE20qFWYa8d9tmdeo9zaGVMzCFCOXKPGeHkW/GpJWxK3FM\n/BWdgq5nonb+y3ufSE1JBJjXCO6JipXf4OKRB54009m9hAmJJK9sPVeH9NMnVhS7\naEDXAgMBAAGjODA2MA4GA1UdDwEB/wQEAwICpDATBgNVHSUEDDAKBggrBgEFBQcD\nATAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCAdca3XDbl7heN\n6vk0VTcrrZCLHDY+PLTFcnGq2xv84APOrvwQJEH9qnCw0/czkn9UW+27Ix2wjkeP\nFbUdXKvFDpU0LQgpkdZ+BKXQlX0ezKG+StpUODxYdDnUDCLzRLJsg0GgEODysPAK\nwHiA3X5d+UvNE/Z7TP5ASyzXnypuR8jhXCdEQ0o8mLQMx4I4Xd2sHFz2x6qO9i8f\nMPEJ076QTj5+RyI4BDAgUeWns/ZTKX/bi+FXPkRZ8QWkxIrSkNSdmgvPmMBzFluv\nDhFwtFBMQovmICfkT5TYmtwYsqZgh32v5FZOLUlHOR29R1dKOOuyCbIyqlTjWCYZ\n1j3GlmIC\n-----END CERTIFICATE-----\n">>,
<<"*****">>}]},
{cluster_compat_version,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{6,63644012660}}]},
4,6]},
{drop_request_memory_threshold_mib,undefined},
{email_alerts,
[{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,
[{user,[]},{pass,"*****"},{host,"localhost"},{port,25},{encrypt,false}]},
{alerts,
[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,auto_failover_cluster_too_small,
auto_failover_disabled,ip,disk,overhead,ep_oom_errors,
ep_item_commit_failed,audit_dropped_events,indexer_ram_max_usage]}]},
{fts_memory_quota,512},
{goxdcr_upgrade,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}|
'_deleted']},
{index_aware_rebalance_disabled,false},
{max_bucket_count,10},
{memcached,[]},
{memory_quota,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012751}}]}|
1024]},
{nodes_wanted,['ns_1@127.0.0.1']},
{otp,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]},
{cookie,oxqibayfkfbrogxo}]},
{read_only_user_creds,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}|
null]},
{remote_clusters,[]},
{replication,[{enabled,true}]},
{rest,[{port,8091}]},
{rest_creds,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012784}}]}|
{"Administrator",{password,"*****"}}]},
{roles_definitions,
[{admin,[],
[{name,<<"Admin">>},
{desc,<<"Can manage ALL cluster features including security.">>}],
[{[],all}]},
{ro_admin,[],
[{name,<<"Read Only Admin">>},
{desc,<<"Can view ALL cluster features.">>}],
[{[{bucket,any},password],none},
{[{bucket,any},data],none},
{[admin,security],[read]},
{[admin],none},
{[],[read]}]},
{cluster_admin,[],
[{name,<<"Cluster Admin">>},
{desc,<<"Can manage all cluster features EXCEPT security.">>}],
[{[admin],none},{[],all}]},
{bucket_admin,
[bucket_name],
[{name,<<"Bucket Admin">>},
{desc,
<<"Can manage ALL bucket features for specified buckets (incl. start/stop XDCR)">>}],
[{[{bucket,bucket_name},xdcr],[read,execute]},
{[{bucket,bucket_name}],all},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],none},
{[admin],none},
{[],[read]}]},
{bucket_sasl,
[bucket_name],
[],
[{[{bucket,bucket_name},data],all},
{[{bucket,bucket_name},views],all},
{[{bucket,bucket_name}],[read,flush]},
{[pools],[read]}]},
{views_admin,
[bucket_name],
[{name,<<"Views Admin">>},
{desc,<<"Can manage views for specified buckets">>}],
[{[{bucket,bucket_name},views],all},
{[{bucket,bucket_name},data],[read]},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],none},
{[admin],none},
{[],[read]}]},
{replication_admin,[],
[{name,<<"Replication Admin">>},
{desc,<<"Can manage ONLY XDCR features (cluster AND bucket level)">>}],
[{[{bucket,any},xdcr],all},
{[{bucket,any},data],[read]},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],all},
{[admin],none},
{[],[read]}]}]},
{server_groups,
[[{uuid,<<"0">>},{name,<<"Group 1">>},{nodes,['ns_1@127.0.0.1']}]]},
{set_view_update_daemon,
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]},
{settings,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012777}}]},
{stats,[{send_stats,false}]}]},
{uuid,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012784}}]}|
<<"561b20f339d4184270a7de0b1c1de1b0">>]},
{{couchdb,max_parallel_indexers},4},
{{couchdb,max_parallel_replica_indexers},2},
{{local_changes_count,<<"eac84bf2ecf69c83ca0268ac5aac465d">>},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{35,63644101958}}]}]},
{{metakv,<<"/indexing/settings/config">>},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{4,63644012784}}]}|
<<"{\"indexer.settings.compaction.days_of_week\":\"Sunday,Monday,Tuesday,Wednesday,Thursday,Friday,Saturday\",\"indexer.settings.compaction.interval\":\"00:00,00:00\",\"indexer.settings.compaction.compaction_mode\":\"circular\",\"indexer.settings.persisted_snapshot.interval\":5000,\"indexer.settings.log_level\":\"info\",\"indexer.settings.compaction.min_frag\":30,\"indexer.settings.inmemory_snapshot.interval\":200,\"indexer.settings.max_cpu_percent\":0,\"indexer.settings.storage_mode\":\"forestdb\",\"indexer.settings.recovery.max_rollbacks\":5,\"indexer.settings.memory_quota\":536870912,\"indexer.settings.compaction.abort_exceed_interval\":false}">>]},
{{request_limit,capi},undefined},
{{request_limit,rest},undefined},
{{service_map,fts},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}]},
{{service_map,index},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012791}}]},
'ns_1@127.0.0.1']},
{{service_map,n1ql},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012791}}]},
'ns_1@127.0.0.1']},
{{node,'ns_1@127.0.0.1',audit},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}]},
{{node,'ns_1@127.0.0.1',capi_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
8092]},
{{node,'ns_1@127.0.0.1',compaction_daemon},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{check_interval,30},
{min_db_file_size,131072},
{min_view_file_size,20971520}]},
{{node,'ns_1@127.0.0.1',config_version},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
{4,5}]},
{{node,'ns_1@127.0.0.1',fts_http_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
8094]},
{{node,'ns_1@127.0.0.1',indexer_admin_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9100]},
{{node,'ns_1@127.0.0.1',indexer_http_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9102]},
{{node,'ns_1@127.0.0.1',indexer_scan_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9101]},
{{node,'ns_1@127.0.0.1',indexer_stcatchup_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9104]},
{{node,'ns_1@127.0.0.1',indexer_stinit_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9103]},
{{node,'ns_1@127.0.0.1',indexer_stmaint_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9105]},
{{node,'ns_1@127.0.0.1',is_enterprise},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
true]},
{{node,'ns_1@127.0.0.1',isasl},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/isasl.pw"}]},
{{node,'ns_1@127.0.0.1',ldap_enabled},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
false]},
{{node,'ns_1@127.0.0.1',membership},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
active]},
{{node,'ns_1@127.0.0.1',memcached},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,11210},
{dedicated_port,11209},
{ssl_port,11207},
{admin_user,"_admin"},
{admin_pass,"*****"},
{engines,
[{membase,
[{engine,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/ep.so"},
{static_config_string,"failpartialwarmup=false"}]},
{memcached,
[{engine,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{config_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached.json"},
{audit_file,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/audit.json"},
{log_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003}]},
{{node,'ns_1@127.0.0.1',memcached_config},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
{[{interfaces,
{memcached_config_mgr,omit_missing_mcd_ports,
[{[{host,<<"*">>},{port,port},{maxconn,maxconn}]},
{[{host,<<"*">>},
{port,dedicated_port},
{maxconn,dedicated_port_maxconn}]},
{[{host,<<"*">>},
{port,ssl_port},
{maxconn,maxconn},
{ssl,
{[{key,
<<"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached-key.pem">>},
{cert,
<<"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached-cert.pem">>}]}}]}]}},
{ssl_cipher_list,{"~s",[ssl_cipher_list]}},
{ssl_minimum_protocol,{memcached_config_mgr,ssl_minimum_protocol,[]}},
{connection_idle_time,connection_idle_time},
{breakpad,
{[{enabled,breakpad_enabled},
{minidump_dir,{memcached_config_mgr,get_minidump_dir,[]}}]}},
{extensions,
[{[{module,
<<"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/stdin_term_handler.so">>},
{config,<<>>}]},
{[{module,
<<"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/file_logger.so">>},
{config,
{"cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]}}]}]},
{admin,{"~s",[admin_user]}},
{verbosity,verbosity},
{audit_file,{"~s",[audit_file]}},
{dedupe_nmvb_maps,dedupe_nmvb_maps}]}]},
{{node,'ns_1@127.0.0.1',memcached_defaults},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{maxconn,30000},
{dedicated_port_maxconn,5000},
{ssl_cipher_list,"HIGH"},
{connection_idle_time,0},
{verbosity,0},
{breakpad_enabled,true},
{breakpad_minidump_dir_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/crash"},
{dedupe_nmvb_maps,false}]},
{{node,'ns_1@127.0.0.1',moxi},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,11211},
{verbosity,[]}]},
{{node,'ns_1@127.0.0.1',ns_log},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{filename,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/ns_log"}]},
{{node,'ns_1@127.0.0.1',port_servers},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}]},
{{node,'ns_1@127.0.0.1',projector_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9999]},
{{node,'ns_1@127.0.0.1',query_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
8093]},
{{node,'ns_1@127.0.0.1',rest},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,8091},
{port_meta,global}]},
{{node,'ns_1@127.0.0.1',services},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012751}}]},
index,kv,n1ql]},
{{node,'ns_1@127.0.0.1',ssl_capi_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
18092]},
{{node,'ns_1@127.0.0.1',ssl_proxy_downstream_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
11214]},
{{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
11215]},
{{node,'ns_1@127.0.0.1',ssl_query_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
18093]},
{{node,'ns_1@127.0.0.1',ssl_rest_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
18091]},
{{node,'ns_1@127.0.0.1',stop_xdcr},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012660}}]}|
'_deleted']},
{{node,'ns_1@127.0.0.1',uuid},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
<<"eac84bf2ecf69c83ca0268ac5aac465d">>]},
{{node,'ns_1@127.0.0.1',xdcr_rest_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9998]}]]
[ns_server:info,2016-10-19T09:55:47.220-07:00,ns_1@127.0.0.1:ns_config<0.147.0>:ns_config:load_config:1113]Here's full dynamic config we loaded + static & default config:
[{{node,'ns_1@127.0.0.1',xdcr_rest_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9998]},
{{node,'ns_1@127.0.0.1',uuid},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
<<"eac84bf2ecf69c83ca0268ac5aac465d">>]},
{{node,'ns_1@127.0.0.1',stop_xdcr},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012660}}]}|
'_deleted']},
{{node,'ns_1@127.0.0.1',ssl_rest_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
18091]},
{{node,'ns_1@127.0.0.1',ssl_query_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
18093]},
{{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
11215]},
{{node,'ns_1@127.0.0.1',ssl_proxy_downstream_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
11214]},
{{node,'ns_1@127.0.0.1',ssl_capi_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
18092]},
{{node,'ns_1@127.0.0.1',services},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012751}}]},
index,kv,n1ql]},
{{node,'ns_1@127.0.0.1',rest},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,8091},
{port_meta,global}]},
{{node,'ns_1@127.0.0.1',query_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
8093]},
{{node,'ns_1@127.0.0.1',projector_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9999]},
{{node,'ns_1@127.0.0.1',port_servers},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}]},
{{node,'ns_1@127.0.0.1',ns_log},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{filename,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/ns_log"}]},
{{node,'ns_1@127.0.0.1',moxi},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,11211},
{verbosity,[]}]},
{{node,'ns_1@127.0.0.1',memcached_defaults},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{maxconn,30000},
{dedicated_port_maxconn,5000},
{ssl_cipher_list,"HIGH"},
{connection_idle_time,0},
{verbosity,0},
{breakpad_enabled,true},
{breakpad_minidump_dir_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/crash"},
{dedupe_nmvb_maps,false}]},
{{node,'ns_1@127.0.0.1',memcached_config},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
{[{interfaces,
{memcached_config_mgr,omit_missing_mcd_ports,
[{[{host,<<"*">>},{port,port},{maxconn,maxconn}]},
{[{host,<<"*">>},
{port,dedicated_port},
{maxconn,dedicated_port_maxconn}]},
{[{host,<<"*">>},
{port,ssl_port},
{maxconn,maxconn},
{ssl,
{[{key,
<<"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached-key.pem">>},
{cert,
<<"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached-cert.pem">>}]}}]}]}},
{ssl_cipher_list,{"~s",[ssl_cipher_list]}},
{ssl_minimum_protocol,{memcached_config_mgr,ssl_minimum_protocol,[]}},
{connection_idle_time,connection_idle_time},
{breakpad,
{[{enabled,breakpad_enabled},
{minidump_dir,{memcached_config_mgr,get_minidump_dir,[]}}]}},
{extensions,
[{[{module,
<<"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/stdin_term_handler.so">>},
{config,<<>>}]},
{[{module,
<<"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/file_logger.so">>},
{config,
{"cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]}}]}]},
{admin,{"~s",[admin_user]}},
{verbosity,verbosity},
{audit_file,{"~s",[audit_file]}},
{dedupe_nmvb_maps,dedupe_nmvb_maps}]}]},
{{node,'ns_1@127.0.0.1',memcached},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,11210},
{dedicated_port,11209},
{ssl_port,11207},
{admin_user,"_admin"},
{admin_pass,"*****"},
{engines,
[{membase,
[{engine,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/ep.so"},
{static_config_string,"failpartialwarmup=false"}]},
{memcached,
[{engine,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{config_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached.json"},
{audit_file,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/audit.json"},
{log_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003}]},
{{node,'ns_1@127.0.0.1',membership},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
active]},
{{node,'ns_1@127.0.0.1',ldap_enabled},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
false]},
{{node,'ns_1@127.0.0.1',isasl},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/isasl.pw"}]},
{{node,'ns_1@127.0.0.1',is_enterprise},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
true]},
{{node,'ns_1@127.0.0.1',indexer_stmaint_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9105]},
{{node,'ns_1@127.0.0.1',indexer_stinit_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9103]},
{{node,'ns_1@127.0.0.1',indexer_stcatchup_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9104]},
{{node,'ns_1@127.0.0.1',indexer_scan_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9101]},
{{node,'ns_1@127.0.0.1',indexer_http_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9102]},
{{node,'ns_1@127.0.0.1',indexer_admin_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
9100]},
{{node,'ns_1@127.0.0.1',fts_http_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
8094]},
{{node,'ns_1@127.0.0.1',config_version},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
{4,5}]},
{{node,'ns_1@127.0.0.1',compaction_daemon},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{check_interval,30},
{min_db_file_size,131072},
{min_view_file_size,20971520}]},
{{node,'ns_1@127.0.0.1',capi_port},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
8092]},
{{node,'ns_1@127.0.0.1',audit},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}]},
{{service_map,n1ql},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012791}}]},
'ns_1@127.0.0.1']},
{{service_map,index},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012791}}]},
'ns_1@127.0.0.1']},
{{service_map,fts},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}]},
{{request_limit,rest},undefined},
{{request_limit,capi},undefined},
{{metakv,<<"/indexing/settings/config">>},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{4,63644012784}}]}|
<<"{\"indexer.settings.compaction.days_of_week\":\"Sunday,Monday,Tuesday,Wednesday,Thursday,Friday,Saturday\",\"indexer.settings.compaction.interval\":\"00:00,00:00\",\"indexer.settings.compaction.compaction_mode\":\"circular\",\"indexer.settings.persisted_snapshot.interval\":5000,\"indexer.settings.log_level\":\"info\",\"indexer.settings.compaction.min_frag\":30,\"indexer.settings.inmemory_snapshot.interval\":200,\"indexer.settings.max_cpu_percent\":0,\"indexer.settings.storage_mode\":\"forestdb\",\"indexer.settings.recovery.max_rollbacks\":5,\"indexer.settings.memory_quota\":536870912,\"indexer.settings.compaction.abort_exceed_interval\":false}">>]},
{{local_changes_count,<<"eac84bf2ecf69c83ca0268ac5aac465d">>},
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{35,63644101958}}]}]},
{{couchdb,max_parallel_replica_indexers},2},
{{couchdb,max_parallel_indexers},4},
{uuid,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012784}}]}|
<<"561b20f339d4184270a7de0b1c1de1b0">>]},
{settings,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012777}}]},
{stats,[{send_stats,false}]}]},
{set_view_update_daemon,
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]},
{server_groups,
[[{uuid,<<"0">>},{name,<<"Group 1">>},{nodes,['ns_1@127.0.0.1']}]]},
{roles_definitions,
[{admin,[],
[{name,<<"Admin">>},
{desc,<<"Can manage ALL cluster features including security.">>}],
[{[],all}]},
{ro_admin,[],
[{name,<<"Read Only Admin">>},{desc,<<"Can view ALL cluster features.">>}],
[{[{bucket,any},password],none},
{[{bucket,any},data],none},
{[admin,security],[read]},
{[admin],none},
{[],[read]}]},
{cluster_admin,[],
[{name,<<"Cluster Admin">>},
{desc,<<"Can manage all cluster features EXCEPT security.">>}],
[{[admin],none},{[],all}]},
{bucket_admin,
[bucket_name],
[{name,<<"Bucket Admin">>},
{desc,
<<"Can manage ALL bucket features for specified buckets (incl. start/stop XDCR)">>}],
[{[{bucket,bucket_name},xdcr],[read,execute]},
{[{bucket,bucket_name}],all},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],none},
{[admin],none},
{[],[read]}]},
{bucket_sasl,
[bucket_name],
[],
[{[{bucket,bucket_name},data],all},
{[{bucket,bucket_name},views],all},
{[{bucket,bucket_name}],[read,flush]},
{[pools],[read]}]},
{views_admin,
[bucket_name],
[{name,<<"Views Admin">>},
{desc,<<"Can manage views for specified buckets">>}],
[{[{bucket,bucket_name},views],all},
{[{bucket,bucket_name},data],[read]},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],none},
{[admin],none},
{[],[read]}]},
{replication_admin,[],
[{name,<<"Replication Admin">>},
{desc,<<"Can manage ONLY XDCR features (cluster AND bucket level)">>}],
[{[{bucket,any},xdcr],all},
{[{bucket,any},data],[read]},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],all},
{[admin],none},
{[],[read]}]}]},
{rest_creds,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012784}}]}|
{"Administrator",{password,"*****"}}]},
{rest,[{port,8091}]},
{replication,[{enabled,true}]},
{remote_clusters,[]},
{read_only_user_creds,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}|
null]},
{otp,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]},
{cookie,oxqibayfkfbrogxo}]},
{nodes_wanted,['ns_1@127.0.0.1']},
{memory_quota,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012751}}]}|
1024]},
{memcached,[]},
{max_bucket_count,10},
{index_aware_rebalance_disabled,false},
{goxdcr_upgrade,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}|
'_deleted']},
{fts_memory_quota,512},
{email_alerts,
[{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,
[{user,[]},{pass,"*****"},{host,"localhost"},{port,25},{encrypt,false}]},
{alerts,
[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,auto_failover_cluster_too_small,
auto_failover_disabled,ip,disk,overhead,ep_oom_errors,
ep_item_commit_failed,audit_dropped_events,indexer_ram_max_usage]}]},
{drop_request_memory_threshold_mib,undefined},
{cluster_compat_version,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{6,63644012660}}]},
4,6]},
{cert_and_pkey,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
{<<"-----BEGIN CERTIFICATE-----\nMIIDAjCCAeqgAwIBAgIIFH6f01mhINIwDQYJKoZIhvcNAQELBQAwJDEiMCAGA1UE\nAxMZQ291Y2hiYXNlIFNlcnZlciAxMzBiNDVmMzAeFw0xMzAxMDEwMDAwMDBaFw00\nOTEyMzEyMzU5NTlaMCQxIjAgBgNVBAMTGUNvdWNoYmFzZSBTZXJ2ZXIgMTMwYjQ1\nZjMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDINaiYz/zhTeD2h8Pa\nP015LetKMhey1yoE2L5H1wHK7pADeFRfqeoNunEvlxRWL/YjnqvRZPxrjdadlh7L\nVhZVke2blopHdhJjaHCvdI8R3BRBK4fLv5m4c0SzdE6bvk1QS+T3rZyzxUbMtB0g\nEq2ZPed8JdQFqO0Bo1JuXJx4/q9tjhvbHUVjRX9QHL3nClC3qVemVjTCKbNqZWv8\n5qZmH/X5DWkyNFKj6HzE20qFWYa8d9tmdeo9zaGVMzCFCOXKPGeHkW/GpJWxK3FM\n/BWdgq5nonb+y3ufSE1JBJjXCO6JipXf4OKRB54009m9hAmJJK9sPVeH9NMnVhS7\naEDXAgMBAAGjODA2MA4GA1UdDwEB/wQEAwICpDATBgNVHSUEDDAKBggrBgEFBQcD\nATAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCAdca3XDbl7heN\n6vk0VTcrrZCLHDY+PLTFcnGq2xv84APOrvwQJEH9qnCw0/czkn9UW+27Ix2wjkeP\nFbUdXKvFDpU0LQgpkdZ+BKXQlX0ezKG+StpUODxYdDnUDCLzRLJsg0GgEODysPAK\nwHiA3X5d+UvNE/Z7TP5ASyzXnypuR8jhXCdEQ0o8mLQMx4I4Xd2sHFz2x6qO9i8f\nMPEJ076QTj5+RyI4BDAgUeWns/ZTKX/bi+FXPkRZ8QWkxIrSkNSdmgvPmMBzFluv\nDhFwtFBMQovmICfkT5TYmtwYsqZgh32v5FZOLUlHOR29R1dKOOuyCbIyqlTjWCYZ\n1j3GlmIC\n-----END CERTIFICATE-----\n">>,
<<"*****">>}]},
{autocompaction,
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]},
{auto_failover_cfg,[{enabled,false},{timeout,120},{max_nodes,1},{count,0}]},
{audit,
[{auditd_enabled,false},
{rotate_interval,86400},
{rotate_size,20971520},
{disabled,[]},
{sync,[]},
{log_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"}]},
{alert_limits,
[{max_overhead_perc,50},{max_disk_used,90},{max_indexer_ram,75}]},
{vbucket_map_history,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012772}}]},
{[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]],
[{replication_topology,star},{tags,undefined},{max_slaves,10}]}]},
{buckets,
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{18,63644101958}}]},
{configs,
[{"test",
[{repl_type,dcp},
{uuid,<<"7f7d4a28ca84a805edf9c899521eb18c">>},
{num_replicas,1},
{replica_index,false},
{ram_quota,524288000},
{auth_type,sasl},
{sasl_password,"*****"},
{autocompaction,false},
{purge_interval,undefined},
{flush_enabled,false},
{num_threads,3},
{eviction_policy,value_only},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map,
[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]]},
{map_opts_hash,133465355}]},
{"locked",
[{eviction_policy,value_only},
{num_threads,3},
{flush_enabled,false},
{purge_interval,undefined},
{autocompaction,false},
{sasl_password,"*****"},
{auth_type,sasl},
{ram_quota,104857600},
{num_replicas,1},
{repl_type,dcp},
{uuid,<<"8515ae93e826e7c4389f3fd25fbb263e">>},
{replica_index,false},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map,
[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]]},
{map_opts_hash,133465355}]},
{"default",
[{repl_type,dcp},
{uuid,<<"b04d5897bd3c5329a82156f1b77c395d">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,104857600},
{auth_type,sasl},
{flush_enabled,false},
{num_threads,3},
{eviction_policy,value_only},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map,
[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]]},
{map_opts_hash,133465355}]}]}]}]
[error_logger:info,2016-10-19T09:55:47.224-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.147.0>},
{name,ns_config},
{mfargs,
{ns_config,start_link,
["/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/etc/couchbase/config",
ns_config_default]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:47.225-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.150.0>},
{name,ns_config_remote},
{mfargs,
{ns_config_replica,start_link,
[{local,ns_config_remote}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:47.226-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_config_sup}
started: [{pid,<0.151.0>},
{name,ns_config_log},
{mfargs,{ns_config_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:47.226-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.144.0>},
{name,ns_config_sup},
{mfargs,{ns_config_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:47.227-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.153.0>},
{name,vbucket_filter_changes_registry},
{mfargs,
{ns_process_registry,start_link,
[vbucket_filter_changes_registry,
[{terminate_command,shutdown}]]}},
{restart_type,permanent},
{shutdown,100},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:47.228-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.154.0>},
{name,json_rpc_connection_sup},
{mfargs,{json_rpc_connection_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:47.234-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_nodes_sup}
started: [{pid,<0.157.0>},
{name,remote_monitors},
{mfargs,{remote_monitors,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:47.235-07:00,ns_1@127.0.0.1:menelaus_barrier<0.158.0>:one_shot_barrier:barrier_body:58]Barrier menelaus_barrier has started
[error_logger:info,2016-10-19T09:55:47.235-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_nodes_sup}
started: [{pid,<0.158.0>},
{name,menelaus_barrier},
{mfargs,{menelaus_sup,barrier_start_link,[]}},
{restart_type,temporary},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:47.235-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_nodes_sup}
started: [{pid,<0.159.0>},
{name,rest_lhttpc_pool},
{mfargs,
{lhttpc_manager,start_link,
[[{name,rest_lhttpc_pool},
{connection_timeout,120000},
{pool_size,20}]]}},
{restart_type,{permanent,1}},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:47.242-07:00,ns_1@127.0.0.1:ns_ssl_services_setup<0.161.0>:ns_ssl_services_setup:init:370]Used ssl options:
[{keyfile,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/ssl-cert-key.pem"},
{certfile,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/ssl-cert-key.pem"},
{versions,[tlsv1,'tlsv1.1','tlsv1.2']},
{cacertfile,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/ssl-cert-key.pem-ca"},
{dh,<<48,130,1,8,2,130,1,1,0,152,202,99,248,92,201,35,238,246,5,77,93,120,10,
118,129,36,52,111,193,167,220,49,229,106,105,152,133,121,157,73,158,
232,153,197,197,21,171,140,30,207,52,165,45,8,221,162,21,199,183,66,
211,247,51,224,102,214,190,130,96,253,218,193,35,43,139,145,89,200,250,
145,92,50,80,134,135,188,205,254,148,122,136,237,220,186,147,187,104,
159,36,147,217,117,74,35,163,145,249,175,242,18,221,124,54,140,16,246,
169,84,252,45,47,99,136,30,60,189,203,61,86,225,117,255,4,91,46,110,
167,173,106,51,65,10,248,94,225,223,73,40,232,140,26,11,67,170,118,190,
67,31,127,233,39,68,88,132,171,224,62,187,207,160,189,209,101,74,8,205,
174,146,173,80,105,144,246,25,153,86,36,24,178,163,64,202,221,95,184,
110,244,32,226,217,34,55,188,230,55,16,216,247,173,246,139,76,187,66,
211,159,17,46,20,18,48,80,27,250,96,189,29,214,234,241,34,69,254,147,
103,220,133,40,164,84,8,44,241,61,164,151,9,135,41,60,75,4,202,133,173,
72,6,69,167,89,112,174,40,229,171,2,1,2>>},
{ciphers,[{dhe_rsa,aes_256_cbc,sha256},
{dhe_dss,aes_256_cbc,sha256},
{rsa,aes_256_cbc,sha256},
{dhe_rsa,aes_128_cbc,sha256},
{dhe_dss,aes_128_cbc,sha256},
{rsa,aes_128_cbc,sha256},
{dhe_rsa,aes_256_cbc,sha},
{dhe_dss,aes_256_cbc,sha},
{rsa,aes_256_cbc,sha},
{dhe_rsa,'3des_ede_cbc',sha},
{dhe_dss,'3des_ede_cbc',sha},
{rsa,'3des_ede_cbc',sha},
{dhe_rsa,aes_128_cbc,sha},
{dhe_dss,aes_128_cbc,sha},
{rsa,aes_128_cbc,sha}]}]
[error_logger:info,2016-10-19T09:55:47.293-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_ssl_services_sup}
started: [{pid,<0.161.0>},
{name,ns_ssl_services_setup},
{mfargs,{ns_ssl_services_setup,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:47.309-07:00,ns_1@127.0.0.1:<0.163.0>:menelaus_pluggable_ui:validate_plugin_spec:117]Loaded pluggable UI specification for fts
[ns_server:info,2016-10-19T09:55:47.309-07:00,ns_1@127.0.0.1:<0.163.0>:menelaus_pluggable_ui:validate_plugin_spec:117]Loaded pluggable UI specification for n1ql
[ns_server:debug,2016-10-19T09:55:47.315-07:00,ns_1@127.0.0.1:<0.163.0>:restartable:start_child:98]Started child process <0.165.0>
MFA: {ns_ssl_services_setup,start_link_rest_service,[]}
[error_logger:info,2016-10-19T09:55:47.315-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_ssl_services_sup}
started: [{pid,<0.163.0>},
{name,ns_rest_ssl_service},
{mfargs,
{restartable,start_link,
[{ns_ssl_services_setup,
start_link_rest_service,[]},
1000]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:47.315-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_nodes_sup}
started: [{pid,<0.160.0>},
{name,ns_ssl_services_sup},
{mfargs,{ns_ssl_services_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:47.322-07:00,ns_1@127.0.0.1:wait_link_to_couchdb_node<0.183.0>:ns_server_nodes_sup:do_wait_link_to_couchdb_node:126]Waiting for ns_couchdb node to start
[error_logger:info,2016-10-19T09:55:47.322-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_nodes_sup}
started: [{pid,<0.182.0>},
{name,start_couchdb_node},
{mfargs,{ns_server_nodes_sup,start_couchdb_node,[]}},
{restart_type,{permanent,5}},
{shutdown,86400000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:47.322-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================INFO REPORT=========================
{net_kernel,{connect,normal,'couchdb_ns_1@127.0.0.1'}}
[error_logger:info,2016-10-19T09:55:47.322-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================INFO REPORT=========================
{net_kernel,{'EXIT',<0.186.0>,shutdown}}
[ns_server:debug,2016-10-19T09:55:47.322-07:00,ns_1@127.0.0.1:<0.184.0>:ns_server_nodes_sup:do_wait_link_to_couchdb_node:140]ns_couchdb is not ready: {badrpc,nodedown}
[error_logger:info,2016-10-19T09:55:47.322-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================INFO REPORT=========================
{net_kernel,{net_kernel,875,nodedown,'couchdb_ns_1@127.0.0.1'}}
[error_logger:info,2016-10-19T09:55:47.525-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================INFO REPORT=========================
{net_kernel,{connect,normal,'couchdb_ns_1@127.0.0.1'}}
[ns_server:debug,2016-10-19T09:55:47.530-07:00,ns_1@127.0.0.1:<0.184.0>:ns_server_nodes_sup:do_wait_link_to_couchdb_node:140]ns_couchdb is not ready: false
[ns_server:debug,2016-10-19T09:55:47.734-07:00,ns_1@127.0.0.1:<0.184.0>:ns_server_nodes_sup:do_wait_link_to_couchdb_node:140]ns_couchdb is not ready: false
[ns_server:debug,2016-10-19T09:55:47.938-07:00,ns_1@127.0.0.1:<0.184.0>:ns_server_nodes_sup:do_wait_link_to_couchdb_node:140]ns_couchdb is not ready: false
[ns_server:debug,2016-10-19T09:55:48.140-07:00,ns_1@127.0.0.1:<0.184.0>:ns_server_nodes_sup:do_wait_link_to_couchdb_node:140]ns_couchdb is not ready: false
[ns_server:debug,2016-10-19T09:55:48.344-07:00,ns_1@127.0.0.1:<0.184.0>:ns_server_nodes_sup:do_wait_link_to_couchdb_node:140]ns_couchdb is not ready: false
[error_logger:info,2016-10-19T09:55:48.660-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_nodes_sup}
started: [{pid,<0.183.0>},
{name,wait_for_couchdb_node},
{mfargs,
{erlang,apply,
[#Fun,[]]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:48.663-07:00,ns_1@127.0.0.1:ns_server_nodes_sup<0.156.0>:ns_storage_conf:setup_db_and_ix_paths:53]Initialize db_and_ix_paths variable with [{db_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/data"},
{index_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/data"}]
[error_logger:info,2016-10-19T09:55:48.666-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.198.0>},
{name,diag_handler_worker},
{mfargs,{work_queue,start_link,[diag_handler_worker]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:48.667-07:00,ns_1@127.0.0.1:ns_server_sup<0.197.0>:dir_size:start_link:39]Starting quick version of dir_size with program name: godu
[error_logger:info,2016-10-19T09:55:48.668-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.199.0>},
{name,dir_size},
{mfargs,{dir_size,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.669-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.200.0>},
{name,request_throttler},
{mfargs,{request_throttler,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.671-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,kernel_safe_sup}
started: [{pid,<0.202.0>},
{name,timer2_server},
{mfargs,{timer2,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.672-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.201.0>},
{name,ns_log},
{mfargs,{ns_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.672-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.203.0>},
{name,ns_crash_log_consumer},
{mfargs,{ns_log,start_link_crash_consumer,[]}},
{restart_type,{permanent,4}},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:48.674-07:00,ns_1@127.0.0.1:ns_config_isasl_sync<0.204.0>:ns_config_isasl_sync:init:63]isasl_sync init: ["/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/isasl.pw",
"_admin"]
[ns_server:debug,2016-10-19T09:55:48.674-07:00,ns_1@127.0.0.1:ns_config_isasl_sync<0.204.0>:ns_config_isasl_sync:init:71]isasl_sync init buckets: ["default","locked","test"]
[ns_server:debug,2016-10-19T09:55:48.674-07:00,ns_1@127.0.0.1:ns_config_isasl_sync<0.204.0>:ns_config_isasl_sync:writeSASLConf:143]Writing isasl passwd file: "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/isasl.pw"
[user:info,2016-10-19T09:55:48.675-07:00,ns_1@127.0.0.1:<0.203.0>:ns_log:crash_consumption_loop:70]Service 'ns_server' exited with status 1. Restarting. Messages: {"Kernel pid terminated",application_controller,"{application_terminated,os_mon,shutdown}"}
[ns_server:error,2016-10-19T09:55:48.676-07:00,ns_1@127.0.0.1:ns_log<0.201.0>:ns_log:handle_cast:209]unable to notify listeners because of badarg
[error_logger:info,2016-10-19T09:55:48.719-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.204.0>},
{name,ns_config_isasl_sync},
{mfargs,{ns_config_isasl_sync,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.719-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.207.0>},
{name,ns_log_events},
{mfargs,{gen_event,start_link,[{local,ns_log_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:48.720-07:00,ns_1@127.0.0.1:ns_node_disco<0.210.0>:ns_node_disco:init:138]Initting ns_node_disco with []
[ns_server:debug,2016-10-19T09:55:48.720-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[error_logger:info,2016-10-19T09:55:48.720-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.209.0>},
{name,ns_node_disco_events},
{mfargs,
{gen_event,start_link,
[{local,ns_node_disco_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[user:info,2016-10-19T09:55:48.721-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_sync:130]Node 'ns_1@127.0.0.1' synchronized otp cookie oxqibayfkfbrogxo from cluster
[ns_server:debug,2016-10-19T09:55:48.721-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2016-10-19T09:55:48.734-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[ns_server:debug,2016-10-19T09:55:48.734-07:00,ns_1@127.0.0.1:<0.211.0>:ns_node_disco:do_nodes_wanted_updated_fun:224]ns_node_disco: nodes_wanted updated: ['ns_1@127.0.0.1'], with cookie: oxqibayfkfbrogxo
[ns_server:debug,2016-10-19T09:55:48.735-07:00,ns_1@127.0.0.1:<0.211.0>:ns_node_disco:do_nodes_wanted_updated_fun:230]ns_node_disco: nodes_wanted pong: ['ns_1@127.0.0.1'], with cookie: oxqibayfkfbrogxo
[error_logger:info,2016-10-19T09:55:48.735-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.210.0>},
{name,ns_node_disco},
{mfargs,{ns_node_disco,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.736-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.212.0>},
{name,ns_node_disco_log},
{mfargs,{ns_node_disco_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.737-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.213.0>},
{name,ns_node_disco_conf_events},
{mfargs,{ns_node_disco_conf_events,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:48.739-07:00,ns_1@127.0.0.1:ns_config_rep<0.215.0>:ns_config_rep:init:68]init pulling
[ns_server:debug,2016-10-19T09:55:48.739-07:00,ns_1@127.0.0.1:ns_config_rep<0.215.0>:ns_config_rep:init:70]init pushing
[error_logger:info,2016-10-19T09:55:48.739-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.214.0>},
{name,ns_config_rep_merger},
{mfargs,{ns_config_rep,start_link_merger,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:48.740-07:00,ns_1@127.0.0.1:ns_config_rep<0.215.0>:ns_config_rep:init:74]init reannouncing
[ns_server:debug,2016-10-19T09:55:48.740-07:00,ns_1@127.0.0.1:ns_config_events<0.145.0>:ns_node_disco_conf_events:handle_event:44]ns_node_disco_conf_events config on nodes_wanted
[ns_server:debug,2016-10-19T09:55:48.740-07:00,ns_1@127.0.0.1:ns_config_events<0.145.0>:ns_node_disco_conf_events:handle_event:50]ns_node_disco_conf_events config on otp
[ns_server:debug,2016-10-19T09:55:48.740-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[ns_server:debug,2016-10-19T09:55:48.740-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2016-10-19T09:55:48.740-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
alert_limits ->
[{max_overhead_perc,50},{max_disk_used,90},{max_indexer_ram,75}]
[ns_server:debug,2016-10-19T09:55:48.741-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
audit ->
[{auditd_enabled,false},
{rotate_interval,86400},
{rotate_size,20971520},
{disabled,[]},
{sync,[]},
{log_path,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"}]
[ns_server:debug,2016-10-19T09:55:48.741-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
auto_failover_cfg ->
[{enabled,false},{timeout,120},{max_nodes,1},{count,0}]
[ns_server:debug,2016-10-19T09:55:48.741-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
autocompaction ->
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[error_logger:info,2016-10-19T09:55:48.741-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_node_disco_sup}
started: [{pid,<0.215.0>},
{name,ns_config_rep},
{mfargs,{ns_config_rep,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.741-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.208.0>},
{name,ns_node_disco_sup},
{mfargs,{ns_node_disco_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:48.741-07:00,ns_1@127.0.0.1:ns_config_rep<0.215.0>:ns_config_rep:do_push_keys:321]Replicating some config keys ([alert_limits,audit,auto_failover_cfg,
autocompaction,buckets,cert_and_pkey,
cluster_compat_version,
drop_request_memory_threshold_mib,email_alerts,
fts_memory_quota,goxdcr_upgrade,
index_aware_rebalance_disabled,
max_bucket_count,memcached,memory_quota,
nodes_wanted,otp,read_only_user_creds,
remote_clusters,replication,rest,rest_creds,
roles_definitions,server_groups,
set_view_update_daemon,settings,uuid,
vbucket_map_history,
{couchdb,max_parallel_indexers},
{couchdb,max_parallel_replica_indexers},
{local_changes_count,
<<"eac84bf2ecf69c83ca0268ac5aac465d">>},
{metakv,<<"/indexing/settings/config">>},
{request_limit,capi},
{request_limit,rest},
{service_map,fts},
{service_map,index},
{service_map,n1ql},
{node,'ns_1@127.0.0.1',audit},
{node,'ns_1@127.0.0.1',capi_port},
{node,'ns_1@127.0.0.1',compaction_daemon},
{node,'ns_1@127.0.0.1',config_version},
{node,'ns_1@127.0.0.1',fts_http_port},
{node,'ns_1@127.0.0.1',indexer_admin_port},
{node,'ns_1@127.0.0.1',indexer_http_port},
{node,'ns_1@127.0.0.1',indexer_scan_port},
{node,'ns_1@127.0.0.1',indexer_stcatchup_port},
{node,'ns_1@127.0.0.1',indexer_stinit_port},
{node,'ns_1@127.0.0.1',indexer_stmaint_port},
{node,'ns_1@127.0.0.1',is_enterprise},
{node,'ns_1@127.0.0.1',isasl},
{node,'ns_1@127.0.0.1',ldap_enabled},
{node,'ns_1@127.0.0.1',membership},
{node,'ns_1@127.0.0.1',memcached},
{node,'ns_1@127.0.0.1',memcached_config},
{node,'ns_1@127.0.0.1',memcached_defaults},
{node,'ns_1@127.0.0.1',moxi},
{node,'ns_1@127.0.0.1',ns_log},
{node,'ns_1@127.0.0.1',port_servers},
{node,'ns_1@127.0.0.1',projector_port},
{node,'ns_1@127.0.0.1',query_port},
{node,'ns_1@127.0.0.1',rest},
{node,'ns_1@127.0.0.1',services},
{node,'ns_1@127.0.0.1',ssl_capi_port},
{node,'ns_1@127.0.0.1',
ssl_proxy_downstream_port}]..)
[ns_server:debug,2016-10-19T09:55:48.742-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
buckets ->
[[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{18,63644101958}}],
{configs,[[{map,[{0,[],['ns_1@127.0.0.1',undefined]},
{1,[],['ns_1@127.0.0.1',undefined]},
{2,[],['ns_1@127.0.0.1',undefined]},
{3,[],['ns_1@127.0.0.1',undefined]},
{4,[],['ns_1@127.0.0.1',undefined]},
{5,[],['ns_1@127.0.0.1',undefined]},
{6,[],['ns_1@127.0.0.1',undefined]},
{7,[],['ns_1@127.0.0.1',undefined]},
{8,[],['ns_1@127.0.0.1',undefined]},
{9,[],['ns_1@127.0.0.1',undefined]},
{10,[],['ns_1@127.0.0.1',undefined]},
{11,[],['ns_1@127.0.0.1',undefined]},
{12,[],['ns_1@127.0.0.1',undefined]},
{13,[],['ns_1@127.0.0.1',undefined]},
{14,[],['ns_1@127.0.0.1',undefined]},
{15,[],['ns_1@127.0.0.1',undefined]},
{16,[],['ns_1@127.0.0.1',undefined]},
{17,[],['ns_1@127.0.0.1',undefined]},
{18,[],['ns_1@127.0.0.1',undefined]},
{19,[],['ns_1@127.0.0.1',undefined]},
{20,[],['ns_1@127.0.0.1',undefined]},
{21,[],['ns_1@127.0.0.1',undefined]},
{22,[],['ns_1@127.0.0.1',undefined]},
{23,[],['ns_1@127.0.0.1',undefined]},
{24,[],['ns_1@127.0.0.1',undefined]},
{25,[],['ns_1@127.0.0.1',undefined]},
{26,[],['ns_1@127.0.0.1',undefined]},
{27,[],['ns_1@127.0.0.1',undefined]},
{28,[],['ns_1@127.0.0.1',undefined]},
{29,[],['ns_1@127.0.0.1',undefined]},
{30,[],['ns_1@127.0.0.1',undefined]},
{31,[],['ns_1@127.0.0.1',undefined]},
{32,[],['ns_1@127.0.0.1',undefined]},
{33,[],['ns_1@127.0.0.1',undefined]},
{34,[],['ns_1@127.0.0.1',undefined]},
{35,[],['ns_1@127.0.0.1',undefined]},
{36,[],['ns_1@127.0.0.1',undefined]},
{37,[],['ns_1@127.0.0.1',undefined]},
{38,[],['ns_1@127.0.0.1',undefined]},
{39,[],['ns_1@127.0.0.1',undefined]},
{40,[],['ns_1@127.0.0.1',undefined]},
{41,[],['ns_1@127.0.0.1',undefined]},
{42,[],['ns_1@127.0.0.1',undefined]},
{43,[],['ns_1@127.0.0.1',undefined]},
{44,[],['ns_1@127.0.0.1',undefined]},
{45,[],['ns_1@127.0.0.1',undefined]},
{46,[],['ns_1@127.0.0.1',undefined]},
{47,[],['ns_1@127.0.0.1',undefined]},
{48,[],['ns_1@127.0.0.1',undefined]},
{49,[],['ns_1@127.0.0.1',undefined]},
{50,[],['ns_1@127.0.0.1',undefined]},
{51,[],['ns_1@127.0.0.1',undefined]},
{52,[],['ns_1@127.0.0.1',undefined]},
{53,[],['ns_1@127.0.0.1',undefined]},
{54,[],['ns_1@127.0.0.1',undefined]},
{55,[],['ns_1@127.0.0.1',undefined]},
{56,[],['ns_1@127.0.0.1',undefined]},
{57,[],['ns_1@127.0.0.1',undefined]},
{58,[],['ns_1@127.0.0.1',undefined]},
{59,[],['ns_1@127.0.0.1',undefined]},
{60,[],['ns_1@127.0.0.1',undefined]},
{61,[],['ns_1@127.0.0.1',undefined]},
{62,[],['ns_1@127.0.0.1',undefined]},
{63,[],['ns_1@127.0.0.1',undefined]}]},
{fastForwardMap,[]},
{repl_type,dcp},
{uuid,<<"b04d5897bd3c5329a82156f1b77c395d">>},
{sasl_password,"*****"},
{num_replicas,1},
{replica_index,false},
{ram_quota,104857600},
{auth_type,sasl},
{flush_enabled,false},
{num_threads,3},
{eviction_policy,value_only},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map_opts_hash,133465355}],
[{map,[{0,[],['ns_1@127.0.0.1',undefined]},
{1,[],['ns_1@127.0.0.1',undefined]},
{2,[],['ns_1@127.0.0.1',undefined]},
{3,[],['ns_1@127.0.0.1',undefined]},
{4,[],['ns_1@127.0.0.1',undefined]},
{5,[],['ns_1@127.0.0.1',undefined]},
{6,[],['ns_1@127.0.0.1',undefined]},
{7,[],['ns_1@127.0.0.1',undefined]},
{8,[],['ns_1@127.0.0.1',undefined]},
{9,[],['ns_1@127.0.0.1',undefined]},
{10,[],['ns_1@127.0.0.1',undefined]},
{11,[],['ns_1@127.0.0.1',undefined]},
{12,[],['ns_1@127.0.0.1',undefined]},
{13,[],['ns_1@127.0.0.1',undefined]},
{14,[],['ns_1@127.0.0.1',undefined]},
{15,[],['ns_1@127.0.0.1',undefined]},
{16,[],['ns_1@127.0.0.1',undefined]},
{17,[],['ns_1@127.0.0.1',undefined]},
{18,[],['ns_1@127.0.0.1',undefined]},
{19,[],['ns_1@127.0.0.1',undefined]},
{20,[],['ns_1@127.0.0.1',undefined]},
{21,[],['ns_1@127.0.0.1',undefined]},
{22,[],['ns_1@127.0.0.1',undefined]},
{23,[],['ns_1@127.0.0.1',undefined]},
{24,[],['ns_1@127.0.0.1',undefined]},
{25,[],['ns_1@127.0.0.1',undefined]},
{26,[],['ns_1@127.0.0.1',undefined]},
{27,[],['ns_1@127.0.0.1',undefined]},
{28,[],['ns_1@127.0.0.1',undefined]},
{29,[],['ns_1@127.0.0.1',undefined]},
{30,[],['ns_1@127.0.0.1',undefined]},
{31,[],['ns_1@127.0.0.1',undefined]},
{32,[],['ns_1@127.0.0.1',undefined]},
{33,[],['ns_1@127.0.0.1',undefined]},
{34,[],['ns_1@127.0.0.1',undefined]},
{35,[],['ns_1@127.0.0.1',undefined]},
{36,[],['ns_1@127.0.0.1',undefined]},
{37,[],['ns_1@127.0.0.1',undefined]},
{38,[],['ns_1@127.0.0.1',undefined]},
{39,[],['ns_1@127.0.0.1',undefined]},
{40,[],['ns_1@127.0.0.1',undefined]},
{41,[],['ns_1@127.0.0.1',undefined]},
{42,[],['ns_1@127.0.0.1',undefined]},
{43,[],['ns_1@127.0.0.1',undefined]},
{44,[],['ns_1@127.0.0.1',undefined]},
{45,[],['ns_1@127.0.0.1',undefined]},
{46,[],['ns_1@127.0.0.1',undefined]},
{47,[],['ns_1@127.0.0.1',undefined]},
{48,[],['ns_1@127.0.0.1',undefined]},
{49,[],['ns_1@127.0.0.1',undefined]},
{50,[],['ns_1@127.0.0.1',undefined]},
{51,[],['ns_1@127.0.0.1',undefined]},
{52,[],['ns_1@127.0.0.1',undefined]},
{53,[],['ns_1@127.0.0.1',undefined]},
{54,[],['ns_1@127.0.0.1',undefined]},
{55,[],['ns_1@127.0.0.1',undefined]},
{56,[],['ns_1@127.0.0.1',undefined]},
{57,[],['ns_1@127.0.0.1',undefined]},
{58,[],['ns_1@127.0.0.1',undefined]},
{59,[],['ns_1@127.0.0.1',undefined]},
{60,[],['ns_1@127.0.0.1',undefined]},
{61,[],['ns_1@127.0.0.1',undefined]},
{62,[],['ns_1@127.0.0.1',undefined]},
{63,[],['ns_1@127.0.0.1',undefined]}]},
{fastForwardMap,[]},
{eviction_policy,value_only},
{num_threads,3},
{flush_enabled,false},
{purge_interval,undefined},
{autocompaction,false},
{sasl_password,"*****"},
{auth_type,sasl},
{ram_quota,104857600},
{num_replicas,1},
{repl_type,dcp},
{uuid,<<"8515ae93e826e7c4389f3fd25fbb263e">>},
{replica_index,false},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map_opts_hash,133465355}],
[{map,[{0,[],['ns_1@127.0.0.1',undefined]},
{1,[],['ns_1@127.0.0.1',undefined]},
{2,[],['ns_1@127.0.0.1',undefined]},
{3,[],['ns_1@127.0.0.1',undefined]},
{4,[],['ns_1@127.0.0.1',undefined]},
{5,[],['ns_1@127.0.0.1',undefined]},
{6,[],['ns_1@127.0.0.1',undefined]},
{7,[],['ns_1@127.0.0.1',undefined]},
{8,[],['ns_1@127.0.0.1',undefined]},
{9,[],['ns_1@127.0.0.1',undefined]},
{10,[],['ns_1@127.0.0.1',undefined]},
{11,[],['ns_1@127.0.0.1',undefined]},
{12,[],['ns_1@127.0.0.1',undefined]},
{13,[],['ns_1@127.0.0.1',undefined]},
{14,[],['ns_1@127.0.0.1',undefined]},
{15,[],['ns_1@127.0.0.1',undefined]},
{16,[],['ns_1@127.0.0.1',undefined]},
{17,[],['ns_1@127.0.0.1',undefined]},
{18,[],['ns_1@127.0.0.1',undefined]},
{19,[],['ns_1@127.0.0.1',undefined]},
{20,[],['ns_1@127.0.0.1',undefined]},
{21,[],['ns_1@127.0.0.1',undefined]},
{22,[],['ns_1@127.0.0.1',undefined]},
{23,[],['ns_1@127.0.0.1',undefined]},
{24,[],['ns_1@127.0.0.1',undefined]},
{25,[],['ns_1@127.0.0.1',undefined]},
{26,[],['ns_1@127.0.0.1',undefined]},
{27,[],['ns_1@127.0.0.1',undefined]},
{28,[],['ns_1@127.0.0.1',undefined]},
{29,[],['ns_1@127.0.0.1',undefined]},
{30,[],['ns_1@127.0.0.1',undefined]},
{31,[],['ns_1@127.0.0.1',undefined]},
{32,[],['ns_1@127.0.0.1',undefined]},
{33,[],['ns_1@127.0.0.1',undefined]},
{34,[],['ns_1@127.0.0.1',undefined]},
{35,[],['ns_1@127.0.0.1',undefined]},
{36,[],['ns_1@127.0.0.1',undefined]},
{37,[],['ns_1@127.0.0.1',undefined]},
{38,[],['ns_1@127.0.0.1',undefined]},
{39,[],['ns_1@127.0.0.1',undefined]},
{40,[],['ns_1@127.0.0.1',undefined]},
{41,[],['ns_1@127.0.0.1',undefined]},
{42,[],['ns_1@127.0.0.1',undefined]},
{43,[],['ns_1@127.0.0.1',undefined]},
{44,[],['ns_1@127.0.0.1',undefined]},
{45,[],['ns_1@127.0.0.1',undefined]},
{46,[],['ns_1@127.0.0.1',undefined]},
{47,[],['ns_1@127.0.0.1',undefined]},
{48,[],['ns_1@127.0.0.1',undefined]},
{49,[],['ns_1@127.0.0.1',undefined]},
{50,[],['ns_1@127.0.0.1',undefined]},
{51,[],['ns_1@127.0.0.1',undefined]},
{52,[],['ns_1@127.0.0.1',undefined]},
{53,[],['ns_1@127.0.0.1',undefined]},
{54,[],['ns_1@127.0.0.1',undefined]},
{55,[],['ns_1@127.0.0.1',undefined]},
{56,[],['ns_1@127.0.0.1',undefined]},
{57,[],['ns_1@127.0.0.1',undefined]},
{58,[],['ns_1@127.0.0.1',undefined]},
{59,[],['ns_1@127.0.0.1',undefined]},
{60,[],['ns_1@127.0.0.1',undefined]},
{61,[],['ns_1@127.0.0.1',undefined]},
{62,[],['ns_1@127.0.0.1',undefined]},
{63,[],['ns_1@127.0.0.1',undefined]}]},
{fastForwardMap,[]},
{repl_type,dcp},
{uuid,<<"7f7d4a28ca84a805edf9c899521eb18c">>},
{num_replicas,1},
{replica_index,false},
{ram_quota,524288000},
{auth_type,sasl},
{sasl_password,"*****"},
{autocompaction,false},
{purge_interval,undefined},
{flush_enabled,false},
{num_threads,3},
{eviction_policy,value_only},
{conflict_resolution_type,seqno},
{type,membase},
{num_vbuckets,64},
{replication_topology,star},
{servers,['ns_1@127.0.0.1']},
{map_opts_hash,133465355}]]}]
[ns_server:debug,2016-10-19T09:55:48.743-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
cert_and_pkey ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
{<<"-----BEGIN CERTIFICATE-----\nMIIDAjCCAeqgAwIBAgIIFH6f01mhINIwDQYJKoZIhvcNAQELBQAwJDEiMCAGA1UE\nAxMZQ291Y2hiYXNlIFNlcnZlciAxMzBiNDVmMzAeFw0xMzAxMDEwMDAwMDBaFw00\nOTEyMzEyMzU5NTlaMCQxIjAgBgNVBAMTGUNvdWNoYmFzZSBTZXJ2ZXIgMTMwYjQ1\nZjMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDINaiYz/zhTeD2h8Pa\nP015LetKMhey1yoE2L5H1wHK7pADeFRfqeoNunEvlxRWL/YjnqvRZPxrjdadlh7L\nVhZVke2blopHdhJjaHCvdI8R3BRBK4f"...>>,
<<"*****">>}]
[ns_server:debug,2016-10-19T09:55:48.743-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
cluster_compat_version ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{6,63644012660}}]},4,6]
[ns_server:debug,2016-10-19T09:55:48.743-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
drop_request_memory_threshold_mib ->
undefined
[error_logger:info,2016-10-19T09:55:48.743-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.223.0>},
{name,vbucket_map_mirror},
{mfargs,{vbucket_map_mirror,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:48.743-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
email_alerts ->
[{recipients,["root@localhost"]},
{sender,"couchbase@localhost"},
{enabled,false},
{email_server,[{user,[]},
{pass,"*****"},
{host,"localhost"},
{port,25},
{encrypt,false}]},
{alerts,[auto_failover_node,auto_failover_maximum_reached,
auto_failover_other_nodes_down,auto_failover_cluster_too_small,
auto_failover_disabled,ip,disk,overhead,ep_oom_errors,
ep_item_commit_failed,audit_dropped_events,indexer_ram_max_usage]}]
[ns_server:debug,2016-10-19T09:55:48.743-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
fts_memory_quota ->
512
[ns_server:debug,2016-10-19T09:55:48.743-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
goxdcr_upgrade ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}|
'_deleted']
[ns_server:debug,2016-10-19T09:55:48.743-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
index_aware_rebalance_disabled ->
false
[ns_server:debug,2016-10-19T09:55:48.743-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
max_bucket_count ->
10
[ns_server:debug,2016-10-19T09:55:48.743-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
memcached ->
[]
[ns_server:debug,2016-10-19T09:55:48.743-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
memory_quota ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012751}}]}|1024]
[ns_server:debug,2016-10-19T09:55:48.743-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
nodes_wanted ->
['ns_1@127.0.0.1']
[ns_server:debug,2016-10-19T09:55:48.743-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
otp ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]},
{cookie,oxqibayfkfbrogxo}]
[ns_server:debug,2016-10-19T09:55:48.744-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
read_only_user_creds ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}|null]
[ns_server:debug,2016-10-19T09:55:48.744-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
remote_clusters ->
[]
[ns_server:debug,2016-10-19T09:55:48.744-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
replication ->
[{enabled,true}]
[ns_server:debug,2016-10-19T09:55:48.744-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
rest ->
[{port,8091}]
[ns_server:debug,2016-10-19T09:55:48.744-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
rest_creds ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012784}}]}|
{"Administrator",{password,"*****"}}]
[ns_server:debug,2016-10-19T09:55:48.744-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
roles_definitions ->
[{admin,[],
[{name,<<"Admin">>},
{desc,<<"Can manage ALL cluster features including security.">>}],
[{[],all}]},
{ro_admin,[],
[{name,<<"Read Only Admin">>},
{desc,<<"Can view ALL cluster features.">>}],
[{[{bucket,any},password],none},
{[{bucket,any},data],none},
{[admin,security],[read]},
{[admin],none},
{[],[read]}]},
{cluster_admin,[],
[{name,<<"Cluster Admin">>},
{desc,<<"Can manage all cluster features EXCEPT security.">>}],
[{[admin],none},{[],all}]},
{bucket_admin,[bucket_name],
[{name,<<"Bucket Admin">>},
{desc,<<"Can manage ALL bucket features for specified buckets (incl. start/stop XDCR)">>}],
[{[{bucket,bucket_name},xdcr],[read,execute]},
{[{bucket,bucket_name}],all},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],none},
{[admin],none},
{[],[read]}]},
{bucket_sasl,[bucket_name],
[],
[{[{bucket,bucket_name},data],all},
{[{bucket,bucket_name},views],all},
{[{bucket,bucket_name}],[read,flush]},
{[pools],[read]}]},
{views_admin,[bucket_name],
[{name,<<"Views Admin">>},
{desc,<<"Can manage views for specified buckets">>}],
[{[{bucket,bucket_name},views],all},
{[{bucket,bucket_name},data],[read]},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],none},
{[admin],none},
{[],[read]}]},
{replication_admin,[],
[{name,<<"Replication Admin">>},
{desc,<<"Can manage ONLY XDCR features (cluster AND bucket level)">>}],
[{[{bucket,any},xdcr],all},
{[{bucket,any},data],[read]},
{[{bucket,any},settings],[read]},
{[{bucket,any}],none},
{[xdcr],all},
{[admin],none},
{[],[read]}]}]
[ns_server:debug,2016-10-19T09:55:48.744-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
server_groups ->
[[{uuid,<<"0">>},{name,<<"Group 1">>},{nodes,['ns_1@127.0.0.1']}]]
[ns_server:debug,2016-10-19T09:55:48.744-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
set_view_update_daemon ->
[{update_interval,5000},
{update_min_changes,5000},
{replica_update_min_changes,5000}]
[ns_server:debug,2016-10-19T09:55:48.744-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
settings ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012777}}]},
{stats,[{send_stats,false}]}]
[ns_server:debug,2016-10-19T09:55:48.744-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
uuid ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012784}}]}|
<<"561b20f339d4184270a7de0b1c1de1b0">>]
[error_logger:info,2016-10-19T09:55:48.744-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.225.0>},
{name,bucket_info_cache},
{mfargs,{bucket_info_cache,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.744-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.228.0>},
{name,ns_tick_event},
{mfargs,{gen_event,start_link,[{local,ns_tick_event}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.745-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.229.0>},
{name,buckets_events},
{mfargs,
{gen_event,start_link,[{local,buckets_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:48.745-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
vbucket_map_history ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012772}}]},
{[['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined],
['ns_1@127.0.0.1',undefined]],
[{replication_topology,star},{tags,undefined},{max_slaves,10}]}]
[ns_server:debug,2016-10-19T09:55:48.745-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{couchdb,max_parallel_indexers} ->
4
[ns_server:debug,2016-10-19T09:55:48.745-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{couchdb,max_parallel_replica_indexers} ->
2
[ns_server:debug,2016-10-19T09:55:48.745-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{local_changes_count,<<"eac84bf2ecf69c83ca0268ac5aac465d">>} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{35,63644101958}}]}]
[ns_server:debug,2016-10-19T09:55:48.745-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{metakv,<<"/indexing/settings/config">>} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{4,63644012784}}]}|
<<"{\"indexer.settings.compaction.days_of_week\":\"Sunday,Monday,Tuesday,Wednesday,Thursday,Friday,Saturday\",\"indexer.settings.compaction.interval\":\"00:00,00:00\",\"indexer.settings.compaction.compaction_mode\":\"circular\",\"indexer.settings.persisted_snapshot.interval\":5000,\"indexer.settings.log_level\":\"info\",\"indexer.settings.compaction.min_frag\":30,\"indexer.settings.inmemory_snapshot.interval\""...>>]
[ns_server:debug,2016-10-19T09:55:48.745-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{request_limit,capi} ->
undefined
[ns_server:debug,2016-10-19T09:55:48.745-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{request_limit,rest} ->
undefined
[ns_server:debug,2016-10-19T09:55:48.745-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{service_map,fts} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012660}}]}]
[ns_server:debug,2016-10-19T09:55:48.745-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{service_map,index} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012791}}]},
'ns_1@127.0.0.1']
[ns_server:debug,2016-10-19T09:55:48.745-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{service_map,n1ql} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012791}}]},
'ns_1@127.0.0.1']
[ns_server:debug,2016-10-19T09:55:48.745-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',audit} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}]
[ns_server:debug,2016-10-19T09:55:48.745-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',capi_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|8092]
[ns_server:debug,2016-10-19T09:55:48.746-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',compaction_daemon} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{check_interval,30},
{min_db_file_size,131072},
{min_view_file_size,20971520}]
[ns_server:debug,2016-10-19T09:55:48.746-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',config_version} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|{4,5}]
[ns_server:debug,2016-10-19T09:55:48.746-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',fts_http_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|8094]
[ns_server:debug,2016-10-19T09:55:48.746-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',indexer_admin_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|9100]
[ns_server:debug,2016-10-19T09:55:48.746-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',indexer_http_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|9102]
[ns_server:debug,2016-10-19T09:55:48.746-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',indexer_scan_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|9101]
[ns_server:debug,2016-10-19T09:55:48.746-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',indexer_stcatchup_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|9104]
[ns_server:debug,2016-10-19T09:55:48.746-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',indexer_stinit_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|9103]
[ns_server:debug,2016-10-19T09:55:48.746-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',indexer_stmaint_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|9105]
[ns_server:debug,2016-10-19T09:55:48.746-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',is_enterprise} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|true]
[ns_server:debug,2016-10-19T09:55:48.746-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',isasl} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{path,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/isasl.pw"}]
[ns_server:debug,2016-10-19T09:55:48.746-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',ldap_enabled} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|false]
[ns_server:debug,2016-10-19T09:55:48.746-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',membership} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
active]
[ns_server:debug,2016-10-19T09:55:48.746-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',memcached} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,11210},
{dedicated_port,11209},
{ssl_port,11207},
{admin_user,"_admin"},
{admin_pass,"*****"},
{engines,[{membase,[{engine,"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/ep.so"},
{static_config_string,"failpartialwarmup=false"}]},
{memcached,[{engine,"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/default_engine.so"},
{static_config_string,"vb0=true"}]}]},
{config_path,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached.json"},
{audit_file,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/audit.json"},
{log_path,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"},
{log_prefix,"memcached.log"},
{log_generations,20},
{log_cyclesize,10485760},
{log_sleeptime,19},
{log_rotation_period,39003}]
[ns_server:debug,2016-10-19T09:55:48.747-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',memcached_config} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
{[{interfaces,
{memcached_config_mgr,omit_missing_mcd_ports,
[{[{host,<<"*">>},{port,port},{maxconn,maxconn}]},
{[{host,<<"*">>},
{port,dedicated_port},
{maxconn,dedicated_port_maxconn}]},
{[{host,<<"*">>},
{port,ssl_port},
{maxconn,maxconn},
{ssl,
{[{key,
<<"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached-key.pem">>},
{cert,
<<"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached-cert.pem">>}]}}]}]}},
{ssl_cipher_list,{"~s",[ssl_cipher_list]}},
{ssl_minimum_protocol,{memcached_config_mgr,ssl_minimum_protocol,[]}},
{connection_idle_time,connection_idle_time},
{breakpad,
{[{enabled,breakpad_enabled},
{minidump_dir,{memcached_config_mgr,get_minidump_dir,[]}}]}},
{extensions,
[{[{module,
<<"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/stdin_term_handler.so">>},
{config,<<>>}]},
{[{module,
<<"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/file_logger.so">>},
{config,
{"cyclesize=~B;sleeptime=~B;filename=~s/~s",
[log_cyclesize,log_sleeptime,log_path,log_prefix]}}]}]},
{admin,{"~s",[admin_user]}},
{verbosity,verbosity},
{audit_file,{"~s",[audit_file]}},
{dedupe_nmvb_maps,dedupe_nmvb_maps}]}]
[ns_server:debug,2016-10-19T09:55:48.747-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',memcached_defaults} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{maxconn,30000},
{dedicated_port_maxconn,5000},
{ssl_cipher_list,"HIGH"},
{connection_idle_time,0},
{verbosity,0},
{breakpad_enabled,true},
{breakpad_minidump_dir_path,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/crash"},
{dedupe_nmvb_maps,false}]
[ns_server:debug,2016-10-19T09:55:48.747-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',moxi} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,11211},
{verbosity,[]}]
[ns_server:debug,2016-10-19T09:55:48.747-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',ns_log} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{filename,"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/ns_log"}]
[ns_server:debug,2016-10-19T09:55:48.747-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',port_servers} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}]
[ns_server:debug,2016-10-19T09:55:48.747-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',projector_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|9999]
[ns_server:debug,2016-10-19T09:55:48.747-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',query_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|8093]
[ns_server:debug,2016-10-19T09:55:48.747-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',rest} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]},
{port,8091},
{port_meta,global}]
[ns_server:debug,2016-10-19T09:55:48.747-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',services} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012751}}]},
index,kv,n1ql]
[ns_server:debug,2016-10-19T09:55:48.747-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',ssl_capi_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|18092]
[ns_server:debug,2016-10-19T09:55:48.747-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',ssl_proxy_downstream_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|11214]
[ns_server:debug,2016-10-19T09:55:48.747-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',ssl_proxy_upstream_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|11215]
[ns_server:debug,2016-10-19T09:55:48.747-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',ssl_query_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|18093]
[ns_server:debug,2016-10-19T09:55:48.747-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',ssl_rest_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|18091]
[ns_server:debug,2016-10-19T09:55:48.747-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',stop_xdcr} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{2,63644012660}}]}|
'_deleted']
[ns_server:debug,2016-10-19T09:55:48.747-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',uuid} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|
<<"eac84bf2ecf69c83ca0268ac5aac465d">>]
[ns_server:debug,2016-10-19T09:55:48.748-07:00,ns_1@127.0.0.1:ns_config_log<0.151.0>:ns_config_log:log_common:143]config change:
{node,'ns_1@127.0.0.1',xdcr_rest_port} ->
[{'_vclock',[{<<"eac84bf2ecf69c83ca0268ac5aac465d">>,{1,63644012657}}]}|9998]
[ns_server:debug,2016-10-19T09:55:48.752-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[ns_server:debug,2016-10-19T09:55:48.752-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_sync:110]ns_cookie_manager do_cookie_sync
[ns_server:debug,2016-10-19T09:55:48.752-07:00,ns_1@127.0.0.1:<0.219.0>:ns_node_disco:do_nodes_wanted_updated_fun:224]ns_node_disco: nodes_wanted updated: ['ns_1@127.0.0.1'], with cookie: oxqibayfkfbrogxo
[ns_server:debug,2016-10-19T09:55:48.752-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_save:147]saving cookie to "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server"
[ns_server:debug,2016-10-19T09:55:48.752-07:00,ns_1@127.0.0.1:<0.219.0>:ns_node_disco:do_nodes_wanted_updated_fun:230]ns_node_disco: nodes_wanted pong: ['ns_1@127.0.0.1'], with cookie: oxqibayfkfbrogxo
[ns_server:debug,2016-10-19T09:55:48.754-07:00,ns_1@127.0.0.1:ns_log_events<0.207.0>:ns_mail_log:init:44]ns_mail_log started up
[error_logger:info,2016-10-19T09:55:48.754-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_mail_sup}
started: [{pid,<0.231.0>},
{name,ns_mail_log},
{mfargs,{ns_mail_log,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.754-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.230.0>},
{name,ns_mail_sup},
{mfargs,{ns_mail_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:48.754-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.232.0>},
{name,ns_stats_event},
{mfargs,
{gen_event,start_link,[{local,ns_stats_event}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.755-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.233.0>},
{name,samples_loader_tasks},
{mfargs,{samples_loader_tasks,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:48.764-07:00,ns_1@127.0.0.1:ns_cookie_manager<0.142.0>:ns_cookie_manager:do_cookie_save:149]attempted to save cookie to "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/couchbase-server.cookie-ns-server": ok
[ns_server:debug,2016-10-19T09:55:48.764-07:00,ns_1@127.0.0.1:<0.220.0>:ns_node_disco:do_nodes_wanted_updated_fun:224]ns_node_disco: nodes_wanted updated: ['ns_1@127.0.0.1'], with cookie: oxqibayfkfbrogxo
[ns_server:debug,2016-10-19T09:55:48.764-07:00,ns_1@127.0.0.1:<0.220.0>:ns_node_disco:do_nodes_wanted_updated_fun:230]ns_node_disco: nodes_wanted pong: ['ns_1@127.0.0.1'], with cookie: oxqibayfkfbrogxo
[error_logger:info,2016-10-19T09:55:48.767-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_heart_sup}
started: [{pid,<0.235.0>},
{name,ns_heart},
{mfargs,{ns_heart,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.767-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_heart_sup}
started: [{pid,<0.237.0>},
{name,ns_heart_slow_updater},
{mfargs,{ns_heart,start_link_slow_updater,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.767-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.234.0>},
{name,ns_heart_sup},
{mfargs,{ns_heart_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:48.768-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_doctor_sup}
started: [{pid,<0.241.0>},
{name,ns_doctor_events},
{mfargs,
{gen_event,start_link,[{local,ns_doctor_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:48.770-07:00,ns_1@127.0.0.1:ns_heart<0.235.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "@system" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-@system-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-lc$^0/1-0-',1,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,update_current_status,1,
[{file,"src/ns_heart.erl"},{line,186}]},
{ns_heart,handle_info,2,
[{file,"src/ns_heart.erl"},{line,118}]}]}}
[ns_server:debug,2016-10-19T09:55:48.770-07:00,ns_1@127.0.0.1:ns_heart<0.235.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "@system-processes" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-@system-processes-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-lc$^0/1-0-',1,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,'-current_status_slow_inner/0-lc$^0/1-0-',1,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,update_current_status,1,
[{file,"src/ns_heart.erl"},{line,186}]}]}}
[ns_server:debug,2016-10-19T09:55:48.772-07:00,ns_1@127.0.0.1:<0.238.0>:restartable:start_child:98]Started child process <0.240.0>
MFA: {ns_doctor_sup,start_link,[]}
[error_logger:info,2016-10-19T09:55:48.772-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_doctor_sup}
started: [{pid,<0.242.0>},
{name,ns_doctor},
{mfargs,{ns_doctor,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.773-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.238.0>},
{name,ns_doctor_sup},
{mfargs,
{restartable,start_link,
[{ns_doctor_sup,start_link,[]},infinity]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:48.786-07:00,ns_1@127.0.0.1:ns_heart<0.235.0>:ns_heart:grab_index_status:373]ignoring failure to get index status: {exit,
{noproc,
{gen_server,call,
['index_status_keeper-index',
get_status,2000]}}}
[{gen_server,call,3,[{file,"gen_server.erl"},{line,188}]},
{ns_heart,grab_index_status,0,[{file,"src/ns_heart.erl"},{line,370}]},
{ns_heart,current_status_slow_inner,0,[{file,"src/ns_heart.erl"},{line,280}]},
{ns_heart,current_status_slow,1,[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,update_current_status,1,[{file,"src/ns_heart.erl"},{line,186}]},
{ns_heart,handle_info,2,[{file,"src/ns_heart.erl"},{line,118}]},
{gen_server,handle_msg,5,[{file,"gen_server.erl"},{line,604}]},
{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,239}]}]
[ns_server:debug,2016-10-19T09:55:48.786-07:00,ns_1@127.0.0.1:ns_heart<0.235.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "test" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-test-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-fun-1-',3,
[{file,"src/ns_heart.erl"},{line,286}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1248}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,285}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,update_current_status,1,
[{file,"src/ns_heart.erl"},{line,186}]}]}}
[ns_server:debug,2016-10-19T09:55:48.786-07:00,ns_1@127.0.0.1:ns_heart<0.235.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "locked" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-locked-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-fun-1-',3,
[{file,"src/ns_heart.erl"},{line,286}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1248}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,285}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,update_current_status,1,
[{file,"src/ns_heart.erl"},{line,186}]}]}}
[ns_server:debug,2016-10-19T09:55:48.786-07:00,ns_1@127.0.0.1:ns_heart<0.235.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "default" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-default-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-fun-1-',3,
[{file,"src/ns_heart.erl"},{line,286}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1248}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,285}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,update_current_status,1,
[{file,"src/ns_heart.erl"},{line,186}]}]}}
[error_logger:info,2016-10-19T09:55:48.787-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,kernel_safe_sup}
started: [{pid,<0.247.0>},
{name,disk_log_sup},
{mfargs,{disk_log_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:48.787-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,kernel_safe_sup}
started: [{pid,<0.248.0>},
{name,disk_log_server},
{mfargs,{disk_log_server,start_link,[]}},
{restart_type,permanent},
{shutdown,2000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.789-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.245.0>},
{name,remote_clusters_info},
{mfargs,{remote_clusters_info,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.789-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.251.0>},
{name,master_activity_events},
{mfargs,
{gen_event,start_link,
[{local,master_activity_events}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.791-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.252.0>},
{name,xdcr_ckpt_store},
{mfargs,{simple_store,start_link,[xdcr_ckpt_data]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.792-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.253.0>},
{name,metakv_worker},
{mfargs,{work_queue,start_link,[metakv_worker]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.792-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.254.0>},
{name,index_events},
{mfargs,{gen_event,start_link,[{local,index_events}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.794-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.255.0>},
{name,index_settings_manager},
{mfargs,{index_settings_manager,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.795-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.258.0>},
{name,menelaus_ui_auth},
{mfargs,{menelaus_ui_auth,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.797-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.260.0>},
{name,menelaus_web_cache},
{mfargs,{menelaus_web_cache,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.798-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.261.0>},
{name,menelaus_stats_gatherer},
{mfargs,{menelaus_stats_gatherer,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.798-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.262.0>},
{name,json_rpc_events},
{mfargs,
{gen_event,start_link,[{local,json_rpc_events}]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:48.800-07:00,ns_1@127.0.0.1:menelaus_sup<0.257.0>:menelaus_pluggable_ui:validate_plugin_spec:117]Loaded pluggable UI specification for fts
[ns_server:info,2016-10-19T09:55:48.800-07:00,ns_1@127.0.0.1:menelaus_sup<0.257.0>:menelaus_pluggable_ui:validate_plugin_spec:117]Loaded pluggable UI specification for n1ql
[error_logger:info,2016-10-19T09:55:48.800-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.263.0>},
{name,menelaus_web},
{mfargs,{menelaus_web,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.802-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.281.0>},
{name,menelaus_event},
{mfargs,{menelaus_event,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.804-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.282.0>},
{name,hot_keys_keeper},
{mfargs,{hot_keys_keeper,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:48.806-07:00,ns_1@127.0.0.1:ns_heart<0.235.0>:goxdcr_rest:get_from_goxdcr:163]Goxdcr is temporary not available. Return empty list.
[error_logger:info,2016-10-19T09:55:48.807-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.283.0>},
{name,menelaus_web_alerts_srv},
{mfargs,{menelaus_web_alerts_srv,start_link,[]}},
{restart_type,permanent},
{shutdown,5000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:48.808-07:00,ns_1@127.0.0.1:ns_heart<0.235.0>:cluster_logs_collection_task:maybe_build_cluster_logs_task:43]Ignoring exception trying to read cluster_logs_collection_task_status table: error:badarg
[error_logger:info,2016-10-19T09:55:48.809-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,menelaus_sup}
started: [{pid,<0.285.0>},
{name,menelaus_cbauth},
{mfargs,{menelaus_cbauth,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[user:info,2016-10-19T09:55:48.810-07:00,ns_1@127.0.0.1:ns_server_sup<0.197.0>:menelaus_sup:start_link:46]Couchbase Server has started on web port 8091 on node 'ns_1@127.0.0.1'. Version: "4.6.0-3391-enterprise".
[error_logger:info,2016-10-19T09:55:48.810-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.257.0>},
{name,menelaus},
{mfargs,{menelaus_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:48.810-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.289.0>},
{name,ns_ports_setup},
{mfargs,{ns_ports_setup,start,[]}},
{restart_type,{permanent,4}},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.813-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,service_agent_sup}
started: [{pid,<0.292.0>},
{name,service_agent_children_sup},
{mfargs,
{supervisor,start_link,
[{local,service_agent_children_sup},
service_agent_sup,child]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:48.814-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,service_agent_sup}
started: [{pid,<0.293.0>},
{name,service_agent_worker},
{mfargs,
{erlang,apply,
[#Fun,[]]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.814-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.291.0>},
{name,service_agent_sup},
{mfargs,{service_agent_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:48.819-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.297.0>},
{name,ns_memcached_sockets_pool},
{mfargs,{ns_memcached_sockets_pool,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:48.819-07:00,ns_1@127.0.0.1:ns_audit_cfg<0.298.0>:ns_audit_cfg:write_audit_json:158]Writing new content to "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/audit.json" : [{auditd_enabled,
false},
{disabled,
[]},
{log_path,
"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"},
{rotate_interval,
86400},
{rotate_size,
20971520},
{sync,
[]},
{version,
1},
{descriptors_path,
"/Applications/Couchbase Server.app/Contents/Resources/couchbase-core/etc/security"}]
[ns_server:debug,2016-10-19T09:55:48.823-07:00,ns_1@127.0.0.1:ns_ports_setup<0.289.0>:ns_ports_manager:set_dynamic_children:54]Setting children [memcached,moxi,projector,indexer,query,saslauthd_port,
goxdcr,xdcr_proxy]
[user:debug,2016-10-19T09:55:48.825-07:00,ns_1@127.0.0.1:<0.203.0>:ns_log:crash_consumption_loop:70]Service 'moxi' exited with status 0. Restarting. Messages: ERROR: could not contact REST server(s): http://127.0.0.1:8091/pools/default/saslBucketsStreaming
WARNING: curl error: Failed to connect to 127.0.0.1 port 8091: Connection refused from: http://127.0.0.1:8091/pools/default/saslBucketsStreaming
ERROR: could not contact REST server(s): http://127.0.0.1:8091/pools/default/saslBucketsStreaming
WARNING: curl error: Failed to connect to 127.0.0.1 port 8091: Connection refused from: http://127.0.0.1:8091/pools/default/saslBucketsStreaming
EOL on stdin. Exiting
[ns_server:debug,2016-10-19T09:55:48.826-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.237.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "@system" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-@system-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-lc$^0/1-0-',1,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,slow_updater_loop,0,
[{file,"src/ns_heart.erl"},{line,243}]},
{proc_lib,init_p_do_apply,3,
[{file,"proc_lib.erl"},{line,239}]}]}}
[ns_server:debug,2016-10-19T09:55:48.826-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.237.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "@system-processes" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-@system-processes-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-lc$^0/1-0-',1,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,'-current_status_slow_inner/0-lc$^0/1-0-',1,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,276}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,slow_updater_loop,0,
[{file,"src/ns_heart.erl"},{line,243}]}]}}
[ns_server:debug,2016-10-19T09:55:48.827-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.237.0>:ns_heart:grab_index_status:373]ignoring failure to get index status: {exit,
{noproc,
{gen_server,call,
['index_status_keeper-index',
get_status,2000]}}}
[{gen_server,call,3,[{file,"gen_server.erl"},{line,188}]},
{ns_heart,grab_index_status,0,[{file,"src/ns_heart.erl"},{line,370}]},
{ns_heart,current_status_slow_inner,0,[{file,"src/ns_heart.erl"},{line,280}]},
{ns_heart,current_status_slow,1,[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,slow_updater_loop,0,[{file,"src/ns_heart.erl"},{line,243}]},
{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,239}]}]
[ns_server:debug,2016-10-19T09:55:48.827-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.237.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "test" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-test-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-fun-1-',3,
[{file,"src/ns_heart.erl"},{line,286}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1248}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,285}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,slow_updater_loop,0,
[{file,"src/ns_heart.erl"},{line,243}]}]}}
[ns_server:debug,2016-10-19T09:55:48.827-07:00,ns_1@127.0.0.1:ns_ports_setup<0.289.0>:ns_ports_setup:set_children:72]Monitor ns_child_ports_sup <11624.75.0>
[ns_server:debug,2016-10-19T09:55:48.827-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.237.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "locked" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-locked-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-fun-1-',3,
[{file,"src/ns_heart.erl"},{line,286}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1248}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,285}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,slow_updater_loop,0,
[{file,"src/ns_heart.erl"},{line,243}]}]}}
[ns_server:debug,2016-10-19T09:55:48.827-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.237.0>:ns_heart:grab_latest_stats:259]Ignoring failure to grab "default" stats:
{'EXIT',{badarg,[{ets,last,['stats_archiver-default-minute'],[]},
{stats_archiver,latest_sample,2,
[{file,"src/stats_archiver.erl"},{line,116}]},
{ns_heart,grab_latest_stats,1,
[{file,"src/ns_heart.erl"},{line,255}]},
{ns_heart,'-current_status_slow_inner/0-fun-1-',3,
[{file,"src/ns_heart.erl"},{line,286}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1248}]},
{ns_heart,current_status_slow_inner,0,
[{file,"src/ns_heart.erl"},{line,285}]},
{ns_heart,current_status_slow,1,
[{file,"src/ns_heart.erl"},{line,249}]},
{ns_heart,slow_updater_loop,0,
[{file,"src/ns_heart.erl"},{line,243}]}]}}
[ns_server:debug,2016-10-19T09:55:48.828-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.237.0>:goxdcr_rest:get_from_goxdcr:163]Goxdcr is temporary not available. Return empty list.
[ns_server:debug,2016-10-19T09:55:48.828-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.237.0>:cluster_logs_collection_task:maybe_build_cluster_logs_task:43]Ignoring exception trying to read cluster_logs_collection_task_status table: error:badarg
[ns_server:debug,2016-10-19T09:55:48.830-07:00,ns_1@127.0.0.1:ns_audit_cfg<0.298.0>:ns_audit_cfg:handle_info:107]Instruct memcached to reload audit config
[error_logger:info,2016-10-19T09:55:48.831-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.298.0>},
{name,ns_audit_cfg},
{mfargs,{ns_audit_cfg,start_link,[]}},
{restart_type,{permanent,4}},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:48.832-07:00,ns_1@127.0.0.1:memcached_config_mgr<0.311.0>:memcached_config_mgr:init:44]waiting for completion of initial ns_ports_setup round
[ns_server:debug,2016-10-19T09:55:48.832-07:00,ns_1@127.0.0.1:memcached_config_mgr<0.311.0>:memcached_config_mgr:init:46]ns_ports_setup seems to be ready
[error_logger:info,2016-10-19T09:55:48.832-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.311.0>},
{name,memcached_config_mgr},
{mfargs,{memcached_config_mgr,start_link,[]}},
{restart_type,{permanent,4}},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:48.834-07:00,ns_1@127.0.0.1:<0.312.0>:ns_memcached_log_rotator:init:28]Starting log rotator on "/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs"/"memcached.log"* with an initial period of 39003ms
[error_logger:info,2016-10-19T09:55:48.834-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.312.0>},
{name,ns_memcached_log_rotator},
{mfargs,{ns_memcached_log_rotator,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:48.836-07:00,ns_1@127.0.0.1:memcached_config_mgr<0.311.0>:memcached_config_mgr:find_port_pid_loop:119]Found memcached port <11624.81.0>
[error_logger:info,2016-10-19T09:55:48.836-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.313.0>},
{name,memcached_clients_pool},
{mfargs,{memcached_clients_pool,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:48.837-07:00,ns_1@127.0.0.1:memcached_config_mgr<0.311.0>:memcached_config_mgr:do_read_current_memcached_config:251]Got enoent while trying to read active memcached config from /Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/config/memcached.json.prev
[ns_server:debug,2016-10-19T09:55:48.837-07:00,ns_1@127.0.0.1:memcached_config_mgr<0.311.0>:memcached_config_mgr:init:83]found memcached port to be already active
[error_logger:info,2016-10-19T09:55:48.838-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.317.0>},
{name,proxied_memcached_clients_pool},
{mfargs,{proxied_memcached_clients_pool,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.838-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.318.0>},
{name,xdc_lhttpc_pool},
{mfargs,
{lhttpc_manager,start_link,
[[{name,xdc_lhttpc_pool},
{connection_timeout,120000},
{pool_size,200}]]}},
{restart_type,{permanent,1}},
{shutdown,10000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.840-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.320.0>},
{name,ns_null_connection_pool},
{mfargs,
{ns_null_connection_pool,start_link,
[ns_null_connection_pool]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.848-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.321.0>,xdcr_sup}
started: [{pid,<0.322.0>},
{name,xdc_stats_holder},
{mfargs,
{proc_lib,start_link,
[xdcr_sup,link_stats_holder_body,[]]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.849-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.321.0>,xdcr_sup}
started: [{pid,<0.326.0>},
{name,xdc_replication_sup},
{mfargs,{xdc_replication_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:48.850-07:00,ns_1@127.0.0.1:xdc_rep_manager<0.327.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[error_logger:info,2016-10-19T09:55:48.850-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.321.0>,xdcr_sup}
started: [{pid,<0.327.0>},
{name,xdc_rep_manager},
{mfargs,{xdc_rep_manager,start_link,[]}},
{restart_type,permanent},
{shutdown,30000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:48.852-07:00,ns_1@127.0.0.1:xdcr_doc_replicator<0.329.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[ns_server:debug,2016-10-19T09:55:48.853-07:00,ns_1@127.0.0.1:xdc_rdoc_replication_srv<0.330.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[error_logger:info,2016-10-19T09:55:48.853-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.321.0>,xdcr_sup}
started: [{pid,<0.329.0>},
{name,xdc_rdoc_replicator},
{mfargs,{doc_replicator,start_link_xdcr,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.853-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.321.0>,xdcr_sup}
started: [{pid,<0.330.0>},
{name,xdc_rdoc_replication_srv},
{mfargs,{doc_replication_srv,start_link_xdcr,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:48.854-07:00,ns_1@127.0.0.1:<0.321.0>:xdc_rdoc_manager:start_link_remote:42]Starting xdc_rdoc_manager on 'couchdb_ns_1@127.0.0.1' with following links: [<0.329.0>,
<0.330.0>,
<0.327.0>]
[ns_server:debug,2016-10-19T09:55:48.855-07:00,ns_1@127.0.0.1:xdcr_doc_replicator<0.329.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.254.0>
[ns_server:debug,2016-10-19T09:55:48.856-07:00,ns_1@127.0.0.1:xdc_rdoc_replication_srv<0.330.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.254.0>
[ns_server:debug,2016-10-19T09:55:48.856-07:00,ns_1@127.0.0.1:xdc_rep_manager<0.327.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.254.0>
[error_logger:info,2016-10-19T09:55:48.856-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.321.0>,xdcr_sup}
started: [{pid,<11625.254.0>},
{name,xdc_rdoc_manager},
{mfargs,
{xdc_rdoc_manager,start_link_remote,
['couchdb_ns_1@127.0.0.1']}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.856-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.321.0>},
{name,xdcr_sup},
{mfargs,{xdcr_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:48.857-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.332.0>},
{name,xdcr_dcp_sockets_pool},
{mfargs,{xdcr_dcp_sockets_pool,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.858-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_worker_sup}
started: [{pid,<0.334.0>},
{name,ns_bucket_worker},
{mfargs,{work_queue,start_link,[ns_bucket_worker]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.859-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.336.0>},
{name,buckets_observing_subscription},
{mfargs,{ns_bucket_sup,subscribe_on_config_events,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.859-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_worker_sup}
started: [{pid,<0.335.0>},
{name,ns_bucket_sup},
{mfargs,{ns_bucket_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:48.859-07:00,ns_1@127.0.0.1:ns_bucket_worker<0.334.0>:ns_bucket_sup:update_children:110]Starting new child: {{docs_sup,"test"},
{docs_sup,start_link,["test"]},
permanent,infinity,supervisor,
[docs_sup]}
[error_logger:info,2016-10-19T09:55:48.859-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.333.0>},
{name,ns_bucket_worker_sup},
{mfargs,{ns_bucket_worker_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:48.861-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.337.0>},
{name,system_stats_collector},
{mfargs,{system_stats_collector,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.861-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.341.0>},
{name,{stats_archiver,"@system"}},
{mfargs,{stats_archiver,start_link,["@system"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:48.862-07:00,ns_1@127.0.0.1:capi_doc_replicator-test<0.344.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[ns_server:debug,2016-10-19T09:55:48.862-07:00,ns_1@127.0.0.1:capi_ddoc_replication_srv-test<0.345.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[error_logger:info,2016-10-19T09:55:48.862-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.343.0>,docs_sup}
started: [{pid,<0.344.0>},
{name,doc_replicator},
{mfargs,{doc_replicator,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.862-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.343.0>,docs_sup}
started: [{pid,<0.345.0>},
{name,doc_replication_srv},
{mfargs,{doc_replication_srv,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:48.868-07:00,ns_1@127.0.0.1:xdcr_doc_replicator<0.329.0>:doc_replicator:loop:64]doing replicate_newnodes_docs
[error_logger:info,2016-10-19T09:55:48.871-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.346.0>},
{name,{stats_reader,"@system"}},
{mfargs,{stats_reader,start_link,["@system"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.871-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.347.0>},
{name,{stats_archiver,"@system-processes"}},
{mfargs,
{stats_archiver,start_link,["@system-processes"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.872-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.349.0>},
{name,{stats_reader,"@system-processes"}},
{mfargs,
{stats_reader,start_link,["@system-processes"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.872-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.350.0>},
{name,{stats_archiver,"@query"}},
{mfargs,{stats_archiver,start_link,["@query"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.872-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.352.0>},
{name,{stats_reader,"@query"}},
{mfargs,{stats_reader,start_link,["@query"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.896-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'capi_ddoc_manager_sup-test'}
started: [{pid,<11625.262.0>},
{name,capi_ddoc_manager_events},
{mfargs,
{capi_ddoc_manager,start_link_event_manager,
["test"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:48.896-07:00,ns_1@127.0.0.1:capi_doc_replicator-test<0.344.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.263.0>
[ns_server:debug,2016-10-19T09:55:48.896-07:00,ns_1@127.0.0.1:capi_ddoc_replication_srv-test<0.345.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.263.0>
[ns_server:debug,2016-10-19T09:55:48.897-07:00,ns_1@127.0.0.1:ns_bucket_worker<0.334.0>:ns_bucket_sup:update_children:110]Starting new child: {{single_bucket_kv_sup,"test"},
{single_bucket_kv_sup,start_link,["test"]},
permanent,infinity,supervisor,
[single_bucket_kv_sup]}
[error_logger:info,2016-10-19T09:55:48.897-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'capi_ddoc_manager_sup-test'}
started: [{pid,<11625.263.0>},
{name,capi_ddoc_manager},
{mfargs,
{capi_ddoc_manager,start_link,
["test",<0.344.0>,<0.345.0>]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.897-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.343.0>,docs_sup}
started: [{pid,<11625.261.0>},
{name,capi_ddoc_manager_sup},
{mfargs,
{capi_ddoc_manager_sup,start_link_remote,
['couchdb_ns_1@127.0.0.1',"test"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:48.897-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.343.0>},
{name,{docs_sup,"test"}},
{mfargs,{docs_sup,start_link,["test"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:48.908-07:00,ns_1@127.0.0.1:capi_doc_replicator-test<0.344.0>:doc_replicator:loop:64]doing replicate_newnodes_docs
[error_logger:info,2016-10-19T09:55:48.953-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.354.0>},
{name,query_stats_collector},
{mfargs,{query_stats_collector,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.954-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.356.0>},
{name,{stats_archiver,"@global"}},
{mfargs,{stats_archiver,start_link,["@global"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:48.954-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.358.0>},
{name,{stats_reader,"@global"}},
{mfargs,{stats_reader,start_link,["@global"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.016-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.360.0>},
{name,global_stats_collector},
{mfargs,{global_stats_collector,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.019-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.363.0>},
{name,goxdcr_status_keeper},
{mfargs,{goxdcr_status_keeper,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:49.019-07:00,ns_1@127.0.0.1:goxdcr_status_keeper<0.363.0>:goxdcr_rest:get_from_goxdcr:163]Goxdcr is temporary not available. Return empty list.
[ns_server:debug,2016-10-19T09:55:49.020-07:00,ns_1@127.0.0.1:goxdcr_status_keeper<0.363.0>:goxdcr_rest:get_from_goxdcr:163]Goxdcr is temporary not available. Return empty list.
[error_logger:info,2016-10-19T09:55:49.023-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_sup}
started: [{pid,<0.367.0>},
{name,index_stats_children_sup},
{mfargs,
{supervisor,start_link,
[{local,index_stats_children_sup},
index_stats_sup,child]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:49.024-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.362.0>,docs_kv_sup}
started: [{pid,<11625.271.0>},
{name,capi_set_view_manager},
{mfargs,
{capi_set_view_manager,start_link_remote,
['couchdb_ns_1@127.0.0.1',"test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.025-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_status_keeper_sup}
started: [{pid,<0.369.0>},
{name,index_status_keeper_worker},
{mfargs,
{work_queue,start_link,
[index_status_keeper_worker]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.025-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_status_keeper_sup}
started: [{pid,<0.370.0>},
{name,index_status_keeper},
{mfargs,{indexer_gsi,start_keeper,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.027-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_status_keeper_sup}
started: [{pid,<0.373.0>},
{name,index_status_keeper_fts},
{mfargs,{indexer_fts,start_keeper,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.028-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_sup}
started: [{pid,<0.368.0>},
{name,index_status_keeper_sup},
{mfargs,{index_status_keeper_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:49.028-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_sup}
started: [{pid,<0.376.0>},
{name,index_stats_worker},
{mfargs,
{erlang,apply,
[#Fun,[]]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.028-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.366.0>},
{name,index_stats_sup},
{mfargs,{index_stats_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:49.032-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.380.0>},
{name,{indexer_gsi,index_stats_collector}},
{mfargs,
{index_stats_collector,start_link,[indexer_gsi]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.032-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.383.0>},
{name,{indexer_gsi,stats_archiver,"@index"}},
{mfargs,{stats_archiver,start_link,["@index"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.033-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.385.0>},
{name,{indexer_gsi,stats_archiver,"default"}},
{mfargs,{stats_archiver,start_link,["@index-default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.034-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.362.0>,docs_kv_sup}
started: [{pid,<11625.272.0>},
{name,couch_stats_reader},
{mfargs,
{couch_stats_reader,start_link_remote,
['couchdb_ns_1@127.0.0.1',"test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.034-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.362.0>},
{name,{docs_kv_sup,"test"}},
{mfargs,{docs_kv_sup,start_link,["test"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:49.038-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.389.0>},
{name,compaction_daemon},
{mfargs,{compaction_daemon,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:49.040-07:00,ns_1@127.0.0.1:ns_memcached-test<0.391.0>:ns_memcached:init:167]Starting ns_memcached
[error_logger:info,2016-10-19T09:55:49.040-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.387.0>},
{name,{indexer_gsi,stats_archiver,"locked"}},
{mfargs,{stats_archiver,start_link,["@index-locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:49.040-07:00,ns_1@127.0.0.1:<0.394.0>:ns_memcached:run_connect_phase:190]Started 'connecting' phase of ns_memcached-test. Parent is <0.391.0>
[error_logger:info,2016-10-19T09:55:49.040-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.390.0>,ns_memcached_sup}
started: [{pid,<0.391.0>},
{name,{ns_memcached,"test"}},
{mfargs,{ns_memcached,start_link,["test"]}},
{restart_type,permanent},
{shutdown,86400000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.042-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.393.0>},
{name,{indexer_gsi,stats_archiver,"test"}},
{mfargs,{stats_archiver,start_link,["@index-test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.042-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.396.0>},
{name,{indexer_gsi,stats_reader,"@index"}},
{mfargs,{stats_reader,start_link,["@index"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.042-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.397.0>},
{name,{indexer_gsi,stats_reader,"default"}},
{mfargs,{stats_reader,start_link,["@index-default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:49.042-07:00,ns_1@127.0.0.1:ns_memcached-test<0.391.0>:ns_memcached:handle_cast:689]Main ns_memcached connection established: {ok,#Port<0.6541>}
[error_logger:info,2016-10-19T09:55:49.042-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.398.0>},
{name,{indexer_gsi,stats_reader,"locked"}},
{mfargs,{stats_reader,start_link,["@index-locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.043-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,index_stats_children_sup}
started: [{pid,<0.399.0>},
{name,{indexer_gsi,stats_reader,"test"}},
{mfargs,{stats_reader,start_link,["@index-test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[user:info,2016-10-19T09:55:49.043-07:00,ns_1@127.0.0.1:ns_memcached-test<0.391.0>:ns_memcached:handle_cast:718]Bucket "test" loaded on node 'ns_1@127.0.0.1' in 0 seconds.
[error_logger:info,2016-10-19T09:55:49.055-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.390.0>,ns_memcached_sup}
started: [{pid,<0.408.0>},
{name,{terse_bucket_info_uploader,"test"}},
{mfargs,
{terse_bucket_info_uploader,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.055-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.390.0>},
{name,{ns_memcached_sup,"test"}},
{mfargs,{ns_memcached_sup,start_link,["test"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:49.057-07:00,ns_1@127.0.0.1:<0.410.0>:new_concurrency_throttle:init:113]init concurrent throttle process, pid: <0.410.0>, type: kv_throttle# of available token: 1
[error_logger:info,2016-10-19T09:55:49.058-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.411.0>},
{name,{ns_vbm_sup,"test"}},
{mfargs,{ns_vbm_sup,start_link,["test"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:49.061-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.406.0>:compaction_new_daemon:process_scheduler_message:1312]Starting compaction (compact_kv) for the following buckets:
[<<"test">>,<<"locked">>,<<"default">>]
[error_logger:info,2016-10-19T09:55:49.061-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.406.0>},
{name,compaction_new_daemon},
{mfargs,{compaction_new_daemon,start_link,[]}},
{restart_type,{permanent,4}},
{shutdown,86400000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:49.061-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.406.0>:compaction_new_daemon:process_scheduler_message:1312]Starting compaction (compact_views) for the following buckets:
[<<"test">>,<<"locked">>,<<"default">>]
[ns_server:info,2016-10-19T09:55:49.061-07:00,ns_1@127.0.0.1:<0.412.0>:compaction_new_daemon:spawn_scheduled_kv_compactor:471]Start compaction of vbuckets for bucket test with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2016-10-19T09:55:49.061-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.406.0>:compaction_new_daemon:process_scheduler_message:1312]Starting compaction (compact_master) for the following buckets:
[<<"test">>,<<"locked">>,<<"default">>]
[ns_server:info,2016-10-19T09:55:49.061-07:00,ns_1@127.0.0.1:<0.416.0>:compaction_new_daemon:spawn_master_db_compactor:850]Start compaction of master db for bucket test with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2016-10-19T09:55:49.061-07:00,ns_1@127.0.0.1:<0.415.0>:compaction_new_daemon:bucket_needs_compaction:972]`test` data size is 84485420, disk size is 89585344
[ns_server:info,2016-10-19T09:55:49.062-07:00,ns_1@127.0.0.1:<0.417.0>:compaction_new_daemon:spawn_scheduled_kv_compactor:471]Start compaction of vbuckets for bucket locked with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2016-10-19T09:55:49.062-07:00,ns_1@127.0.0.1:<0.419.0>:compaction_new_daemon:bucket_needs_compaction:978]memcached is not started for bucket <<"locked">> yet
[ns_server:info,2016-10-19T09:55:49.062-07:00,ns_1@127.0.0.1:<0.413.0>:compaction_new_daemon:spawn_scheduled_views_compactor:497]Start compaction of indexes for bucket test with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[error_logger:info,2016-10-19T09:55:49.062-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.420.0>},
{name,{dcp_sup,"test"}},
{mfargs,{dcp_sup,start_link,["test"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:info,2016-10-19T09:55:49.062-07:00,ns_1@127.0.0.1:<0.421.0>:compaction_new_daemon:spawn_scheduled_kv_compactor:471]Start compaction of vbuckets for bucket default with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2016-10-19T09:55:49.063-07:00,ns_1@127.0.0.1:<0.423.0>:compaction_new_daemon:bucket_needs_compaction:978]memcached is not started for bucket <<"default">> yet
[ns_server:debug,2016-10-19T09:55:49.063-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.406.0>:compaction_new_daemon:process_compactors_exit:1353]Finished compaction iteration.
[ns_server:debug,2016-10-19T09:55:49.063-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.406.0>:compaction_scheduler:schedule_next:60]Finished compaction for compact_kv too soon. Next run will be in 30s
[error_logger:info,2016-10-19T09:55:49.066-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,cluster_logs_sup}
started: [{pid,<0.426.0>},
{name,ets_holder},
{mfargs,
{cluster_logs_collection_task,
start_link_ets_holder,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.067-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.425.0>},
{name,cluster_logs_sup},
{mfargs,{cluster_logs_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:49.067-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.427.0>},
{name,{dcp_replication_manager,"test"}},
{mfargs,{dcp_replication_manager,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.068-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.428.0>},
{name,{replication_manager,"test"}},
{mfargs,{replication_manager,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.068-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.429.0>},
{name,remote_api},
{mfargs,{remote_api,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:49.071-07:00,ns_1@127.0.0.1:<0.430.0>:mb_master:check_master_takeover_needed:141]Sending master node question to the following nodes: []
[ns_server:debug,2016-10-19T09:55:49.071-07:00,ns_1@127.0.0.1:<0.430.0>:mb_master:check_master_takeover_needed:143]Got replies: []
[ns_server:debug,2016-10-19T09:55:49.071-07:00,ns_1@127.0.0.1:<0.430.0>:mb_master:check_master_takeover_needed:149]Was unable to discover master, not going to force mastership takeover
[user:info,2016-10-19T09:55:49.071-07:00,ns_1@127.0.0.1:mb_master<0.432.0>:mb_master:init:86]I'm the only node, so I'm the master.
[error_logger:info,2016-10-19T09:55:49.072-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.434.0>},
{name,{dcp_notifier,"test"}},
{mfargs,{dcp_notifier,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:49.074-07:00,ns_1@127.0.0.1:janitor_agent-test<0.437.0>:janitor_agent:read_flush_counter:1047]Loading flushseq failed: {error,enoent}. Assuming it's equal to global config.
[error_logger:info,2016-10-19T09:55:49.074-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'janitor_agent_sup-test'}
started: [{pid,<0.436.0>},
{name,rebalance_subprocesses_registry},
{mfargs,
{ns_process_registry,start_link,
['rebalance_subprocesses_registry-test',
[{terminate_command,kill}]]}},
{restart_type,permanent},
{shutdown,86400000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:49.075-07:00,ns_1@127.0.0.1:janitor_agent-test<0.437.0>:janitor_agent:read_flush_counter_from_config:1054]Initialized flushseq 0 from bucket config
[ns_server:info,2016-10-19T09:55:49.075-07:00,ns_1@127.0.0.1:<0.438.0>:compaction_new_daemon:spawn_master_db_compactor:850]Start compaction of master db for bucket locked with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[error_logger:info,2016-10-19T09:55:49.075-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'janitor_agent_sup-test'}
started: [{pid,<0.437.0>},
{name,janitor_agent},
{mfargs,{janitor_agent,start_link,["test"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:49.075-07:00,ns_1@127.0.0.1:<0.424.0>:compaction_new_daemon:spawn_scheduled_views_compactor:497]Start compaction of indexes for bucket locked with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[error_logger:info,2016-10-19T09:55:49.075-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.435.0>},
{name,{janitor_agent_sup,"test"}},
{mfargs,{janitor_agent_sup,start_link,["test"]}},
{restart_type,permanent},
{shutdown,10000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:49.075-07:00,ns_1@127.0.0.1:<0.439.0>:compaction_new_daemon:spawn_master_db_compactor:850]Start compaction of master db for bucket default with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2016-10-19T09:55:49.075-07:00,ns_1@127.0.0.1:<0.424.0>:ns_couchdb_api:rpc_couchdb_node:165]RPC to couchdb node failed for {foreach_doc,<<"locked">>,
#Fun,infinity} with {badrpc,
{'EXIT',
{noproc,
{gen_server,
call,
['capi_ddoc_manager-locked',
{foreach_doc,
#Fun},
infinity]}}}}
Stack: [{ns_couchdb_api,rpc_couchdb_node,4,
[{file,"src/ns_couchdb_api.erl"},{line,164}]},
{capi_utils,foreach_live_ddoc_id,2,
[{file,"src/capi_utils.erl"},{line,151}]},
{capi_utils,fetch_ddoc_ids,1,[{file,"src/capi_utils.erl"},{line,144}]},
{compaction_new_daemon,'-spawn_scheduled_views_compactor/2-fun-0-',3,
[{file,"src/compaction_new_daemon.erl"},
{line,500}]},
{proc_lib,init_p,3,[{file,"proc_lib.erl"},{line,224}]}]
[ns_server:error,2016-10-19T09:55:49.075-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.406.0>:compaction_new_daemon:log_compactors_exit:1327]Compactor <0.424.0> exited unexpectedly: {error,
{badrpc,
{'EXIT',
{noproc,
{gen_server,call,
['capi_ddoc_manager-locked',
{foreach_doc,
#Fun},
infinity]}}}}}. Moving to the next bucket.
[error_logger:error,2016-10-19T09:55:49.084-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================CRASH REPORT=========================
crasher:
initial call: compaction_new_daemon:-spawn_scheduled_views_compactor/2-fun-0-/0
pid: <0.424.0>
registered_name: []
exception exit: {error,
{badrpc,
{'EXIT',
{noproc,
{gen_server,call,
['capi_ddoc_manager-locked',
{foreach_doc,#Fun},
infinity]}}}}}
in function ns_couchdb_api:rpc_couchdb_node/4 (src/ns_couchdb_api.erl, line 166)
in call from capi_utils:foreach_live_ddoc_id/2 (src/capi_utils.erl, line 151)
in call from capi_utils:fetch_ddoc_ids/1 (src/capi_utils.erl, line 144)
in call from compaction_new_daemon:'-spawn_scheduled_views_compactor/2-fun-0-'/3 (src/compaction_new_daemon.erl, line 500)
ancestors: [compaction_new_daemon,ns_server_sup,ns_server_nodes_sup,
<0.155.0>,ns_server_cluster_sup,<0.88.0>]
messages: []
links: [<0.406.0>]
dictionary: []
trap_exit: false
status: running
heap_size: 4185
stack_size: 27
reductions: 6781
neighbours:
[error_logger:info,2016-10-19T09:55:49.084-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.442.0>},
{name,{stats_collector,"test"}},
{mfargs,{stats_collector,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.084-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.445.0>},
{name,{stats_archiver,"test"}},
{mfargs,{stats_archiver,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.085-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.447.0>},
{name,{stats_reader,"test"}},
{mfargs,{stats_reader,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:49.085-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.406.0>:compaction_new_daemon:process_compactors_exit:1353]Finished compaction iteration.
[ns_server:debug,2016-10-19T09:55:49.085-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.406.0>:compaction_scheduler:schedule_next:60]Finished compaction for compact_master too soon. Next run will be in 3600s
[ns_server:info,2016-10-19T09:55:49.085-07:00,ns_1@127.0.0.1:<0.440.0>:compaction_new_daemon:spawn_scheduled_views_compactor:497]Start compaction of indexes for bucket default with config:
[{database_fragmentation_threshold,{30,undefined}},
{view_fragmentation_threshold,{30,undefined}}]
[ns_server:debug,2016-10-19T09:55:49.086-07:00,ns_1@127.0.0.1:<0.440.0>:ns_couchdb_api:rpc_couchdb_node:165]RPC to couchdb node failed for {foreach_doc,<<"default">>,
#Fun,infinity} with {badrpc,
{'EXIT',
{noproc,
{gen_server,
call,
['capi_ddoc_manager-default',
{foreach_doc,
#Fun},
infinity]}}}}
Stack: [{ns_couchdb_api,rpc_couchdb_node,4,
[{file,"src/ns_couchdb_api.erl"},{line,164}]},
{capi_utils,foreach_live_ddoc_id,2,
[{file,"src/capi_utils.erl"},{line,151}]},
{capi_utils,fetch_ddoc_ids,1,[{file,"src/capi_utils.erl"},{line,144}]},
{compaction_new_daemon,'-spawn_scheduled_views_compactor/2-fun-0-',3,
[{file,"src/compaction_new_daemon.erl"},
{line,500}]},
{proc_lib,init_p,3,[{file,"proc_lib.erl"},{line,224}]}]
[ns_server:error,2016-10-19T09:55:49.086-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.406.0>:compaction_new_daemon:log_compactors_exit:1327]Compactor <0.440.0> exited unexpectedly: {error,
{badrpc,
{'EXIT',
{noproc,
{gen_server,call,
['capi_ddoc_manager-default',
{foreach_doc,
#Fun},
infinity]}}}}}. Moving to the next bucket.
[ns_server:debug,2016-10-19T09:55:49.086-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.406.0>:compaction_new_daemon:process_compactors_exit:1353]Finished compaction iteration.
[ns_server:debug,2016-10-19T09:55:49.086-07:00,ns_1@127.0.0.1:compaction_new_daemon<0.406.0>:compaction_scheduler:schedule_next:60]Finished compaction for compact_views too soon. Next run will be in 30s
[error_logger:error,2016-10-19T09:55:49.086-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================CRASH REPORT=========================
crasher:
initial call: compaction_new_daemon:-spawn_scheduled_views_compactor/2-fun-0-/0
pid: <0.440.0>
registered_name: []
exception exit: {error,
{badrpc,
{'EXIT',
{noproc,
{gen_server,call,
['capi_ddoc_manager-default',
{foreach_doc,#Fun},
infinity]}}}}}
in function ns_couchdb_api:rpc_couchdb_node/4 (src/ns_couchdb_api.erl, line 166)
in call from capi_utils:foreach_live_ddoc_id/2 (src/capi_utils.erl, line 151)
in call from capi_utils:fetch_ddoc_ids/1 (src/capi_utils.erl, line 144)
in call from compaction_new_daemon:'-spawn_scheduled_views_compactor/2-fun-0-'/3 (src/compaction_new_daemon.erl, line 500)
ancestors: [compaction_new_daemon,ns_server_sup,ns_server_nodes_sup,
<0.155.0>,ns_server_cluster_sup,<0.88.0>]
messages: []
links: [<0.406.0>]
dictionary: []
trap_exit: false
status: running
heap_size: 4185
stack_size: 27
reductions: 6617
neighbours:
[ns_server:debug,2016-10-19T09:55:49.098-07:00,ns_1@127.0.0.1:mb_master_sup<0.441.0>:misc:start_singleton:1094]start_singleton(gen_server, ns_tick, [], []): started as <0.448.0> on 'ns_1@127.0.0.1'
[error_logger:info,2016-10-19T09:55:49.099-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,mb_master_sup}
started: [{pid,<0.448.0>},
{name,ns_tick},
{mfargs,{ns_tick,start_link,[]}},
{restart_type,permanent},
{shutdown,10},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.163-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.449.0>},
{name,{goxdcr_stats_collector,"test"}},
{mfargs,{goxdcr_stats_collector,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:49.164-07:00,ns_1@127.0.0.1:ns_bucket_worker<0.334.0>:ns_bucket_sup:update_children:110]Starting new child: {{docs_sup,"locked"},
{docs_sup,start_link,["locked"]},
permanent,infinity,supervisor,
[docs_sup]}
[error_logger:info,2016-10-19T09:55:49.164-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.451.0>},
{name,{goxdcr_stats_archiver,"test"}},
{mfargs,{stats_archiver,start_link,["@xdcr-test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.164-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.453.0>},
{name,{goxdcr_stats_reader,"test"}},
{mfargs,{stats_reader,start_link,["@xdcr-test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:49.164-07:00,ns_1@127.0.0.1:capi_doc_replicator-locked<0.456.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[ns_server:debug,2016-10-19T09:55:49.164-07:00,ns_1@127.0.0.1:capi_ddoc_replication_srv-locked<0.457.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[error_logger:info,2016-10-19T09:55:49.164-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-test'}
started: [{pid,<0.454.0>},
{name,{failover_safeness_level,"test"}},
{mfargs,{failover_safeness_level,start_link,["test"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.164-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.359.0>},
{name,{single_bucket_kv_sup,"test"}},
{mfargs,{single_bucket_kv_sup,start_link,["test"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:49.164-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.455.0>,docs_sup}
started: [{pid,<0.456.0>},
{name,doc_replicator},
{mfargs,{doc_replicator,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.164-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.455.0>,docs_sup}
started: [{pid,<0.457.0>},
{name,doc_replication_srv},
{mfargs,{doc_replication_srv,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:49.165-07:00,ns_1@127.0.0.1:capi_doc_replicator-locked<0.456.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.301.0>
[error_logger:info,2016-10-19T09:55:49.165-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'capi_ddoc_manager_sup-locked'}
started: [{pid,<11625.300.0>},
{name,capi_ddoc_manager_events},
{mfargs,
{capi_ddoc_manager,start_link_event_manager,
["locked"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:49.165-07:00,ns_1@127.0.0.1:capi_ddoc_replication_srv-locked<0.457.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.301.0>
[ns_server:debug,2016-10-19T09:55:49.165-07:00,ns_1@127.0.0.1:ns_bucket_worker<0.334.0>:ns_bucket_sup:update_children:110]Starting new child: {{single_bucket_kv_sup,"locked"},
{single_bucket_kv_sup,start_link,["locked"]},
permanent,infinity,supervisor,
[single_bucket_kv_sup]}
[error_logger:info,2016-10-19T09:55:49.165-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'capi_ddoc_manager_sup-locked'}
started: [{pid,<11625.301.0>},
{name,capi_ddoc_manager},
{mfargs,
{capi_ddoc_manager,start_link,
["locked",<0.456.0>,<0.457.0>]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.165-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.455.0>,docs_sup}
started: [{pid,<11625.299.0>},
{name,capi_ddoc_manager_sup},
{mfargs,
{capi_ddoc_manager_sup,start_link_remote,
['couchdb_ns_1@127.0.0.1',"locked"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:49.165-07:00,ns_1@127.0.0.1:capi_doc_replicator-locked<0.456.0>:doc_replicator:loop:64]doing replicate_newnodes_docs
[error_logger:info,2016-10-19T09:55:49.165-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.455.0>},
{name,{docs_sup,"locked"}},
{mfargs,{docs_sup,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:49.165-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.460.0>,docs_kv_sup}
started: [{pid,<11625.303.0>},
{name,capi_set_view_manager},
{mfargs,
{capi_set_view_manager,start_link_remote,
['couchdb_ns_1@127.0.0.1',"locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.166-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.460.0>,docs_kv_sup}
started: [{pid,<11625.306.0>},
{name,couch_stats_reader},
{mfargs,
{couch_stats_reader,start_link_remote,
['couchdb_ns_1@127.0.0.1',"locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:49.166-07:00,ns_1@127.0.0.1:ns_memcached-locked<0.462.0>:ns_memcached:init:167]Starting ns_memcached
[error_logger:info,2016-10-19T09:55:49.166-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.460.0>},
{name,{docs_kv_sup,"locked"}},
{mfargs,{docs_kv_sup,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:49.166-07:00,ns_1@127.0.0.1:<0.463.0>:ns_memcached:run_connect_phase:190]Started 'connecting' phase of ns_memcached-locked. Parent is <0.462.0>
[error_logger:info,2016-10-19T09:55:49.166-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.461.0>,ns_memcached_sup}
started: [{pid,<0.462.0>},
{name,{ns_memcached,"locked"}},
{mfargs,{ns_memcached,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,86400000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.167-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.461.0>,ns_memcached_sup}
started: [{pid,<0.464.0>},
{name,{terse_bucket_info_uploader,"locked"}},
{mfargs,
{terse_bucket_info_uploader,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.167-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.461.0>},
{name,{ns_memcached_sup,"locked"}},
{mfargs,{ns_memcached_sup,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:49.167-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.466.0>},
{name,{ns_vbm_sup,"locked"}},
{mfargs,{ns_vbm_sup,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:info,2016-10-19T09:55:49.167-07:00,ns_1@127.0.0.1:janitor_agent-locked<0.473.0>:janitor_agent:read_flush_counter:1047]Loading flushseq failed: {error,enoent}. Assuming it's equal to global config.
[error_logger:info,2016-10-19T09:55:49.167-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.467.0>},
{name,{dcp_sup,"locked"}},
{mfargs,{dcp_sup,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:info,2016-10-19T09:55:49.167-07:00,ns_1@127.0.0.1:janitor_agent-locked<0.473.0>:janitor_agent:read_flush_counter_from_config:1054]Initialized flushseq 0 from bucket config
[error_logger:info,2016-10-19T09:55:49.167-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.468.0>},
{name,{dcp_replication_manager,"locked"}},
{mfargs,
{dcp_replication_manager,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.168-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.469.0>},
{name,{replication_manager,"locked"}},
{mfargs,{replication_manager,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.168-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.470.0>},
{name,{dcp_notifier,"locked"}},
{mfargs,{dcp_notifier,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.168-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'janitor_agent_sup-locked'}
started: [{pid,<0.472.0>},
{name,rebalance_subprocesses_registry},
{mfargs,
{ns_process_registry,start_link,
['rebalance_subprocesses_registry-locked',
[{terminate_command,kill}]]}},
{restart_type,permanent},
{shutdown,86400000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.168-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'janitor_agent_sup-locked'}
started: [{pid,<0.473.0>},
{name,janitor_agent},
{mfargs,{janitor_agent,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.168-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.471.0>},
{name,{janitor_agent_sup,"locked"}},
{mfargs,{janitor_agent_sup,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,10000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.169-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.474.0>},
{name,{stats_collector,"locked"}},
{mfargs,{stats_collector,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:49.169-07:00,ns_1@127.0.0.1:ns_bucket_worker<0.334.0>:ns_bucket_sup:update_children:110]Starting new child: {{docs_sup,"default"},
{docs_sup,start_link,["default"]},
permanent,infinity,supervisor,
[docs_sup]}
[error_logger:info,2016-10-19T09:55:49.169-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.477.0>},
{name,{stats_archiver,"locked"}},
{mfargs,{stats_archiver,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.169-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.479.0>},
{name,{stats_reader,"locked"}},
{mfargs,{stats_reader,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:49.169-07:00,ns_1@127.0.0.1:capi_doc_replicator-default<0.487.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[ns_server:info,2016-10-19T09:55:49.169-07:00,ns_1@127.0.0.1:ns_memcached-locked<0.462.0>:ns_memcached:handle_cast:689]Main ns_memcached connection established: {ok,#Port<0.6735>}
[ns_server:debug,2016-10-19T09:55:49.169-07:00,ns_1@127.0.0.1:capi_ddoc_replication_srv-default<0.488.0>:ns_couchdb_api:wait_for_doc_manager:291]Start waiting for doc manager
[error_logger:info,2016-10-19T09:55:49.169-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.480.0>},
{name,{goxdcr_stats_collector,"locked"}},
{mfargs,{goxdcr_stats_collector,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.169-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.482.0>},
{name,{goxdcr_stats_archiver,"locked"}},
{mfargs,{stats_archiver,start_link,["@xdcr-locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.170-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.484.0>},
{name,{goxdcr_stats_reader,"locked"}},
{mfargs,{stats_reader,start_link,["@xdcr-locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.170-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-locked'}
started: [{pid,<0.485.0>},
{name,{failover_safeness_level,"locked"}},
{mfargs,
{failover_safeness_level,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:49.170-07:00,ns_1@127.0.0.1:capi_doc_replicator-default<0.487.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.310.0>
[ns_server:debug,2016-10-19T09:55:49.170-07:00,ns_1@127.0.0.1:capi_ddoc_replication_srv-default<0.488.0>:ns_couchdb_api:wait_for_doc_manager:294]Received doc manager registration from <11625.310.0>
[error_logger:info,2016-10-19T09:55:49.170-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.459.0>},
{name,{single_bucket_kv_sup,"locked"}},
{mfargs,{single_bucket_kv_sup,start_link,["locked"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[user:info,2016-10-19T09:55:49.170-07:00,ns_1@127.0.0.1:ns_memcached-locked<0.462.0>:ns_memcached:handle_cast:718]Bucket "locked" loaded on node 'ns_1@127.0.0.1' in 0 seconds.
[ns_server:debug,2016-10-19T09:55:49.170-07:00,ns_1@127.0.0.1:ns_bucket_worker<0.334.0>:ns_bucket_sup:update_children:110]Starting new child: {{single_bucket_kv_sup,"default"},
{single_bucket_kv_sup,start_link,["default"]},
permanent,infinity,supervisor,
[single_bucket_kv_sup]}
[error_logger:info,2016-10-19T09:55:49.170-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.486.0>,docs_sup}
started: [{pid,<0.487.0>},
{name,doc_replicator},
{mfargs,{doc_replicator,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:49.170-07:00,ns_1@127.0.0.1:capi_doc_replicator-default<0.487.0>:doc_replicator:loop:64]doing replicate_newnodes_docs
[error_logger:info,2016-10-19T09:55:49.171-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.486.0>,docs_sup}
started: [{pid,<0.488.0>},
{name,doc_replication_srv},
{mfargs,{doc_replication_srv,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.171-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'capi_ddoc_manager_sup-default'}
started: [{pid,<11625.309.0>},
{name,capi_ddoc_manager_events},
{mfargs,
{capi_ddoc_manager,start_link_event_manager,
["default"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.171-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'capi_ddoc_manager_sup-default'}
started: [{pid,<11625.310.0>},
{name,capi_ddoc_manager},
{mfargs,
{capi_ddoc_manager,start_link,
["default",<0.487.0>,<0.488.0>]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:49.171-07:00,ns_1@127.0.0.1:ns_memcached-default<0.499.0>:ns_memcached:init:167]Starting ns_memcached
[error_logger:info,2016-10-19T09:55:49.171-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.486.0>,docs_sup}
started: [{pid,<11625.308.0>},
{name,capi_ddoc_manager_sup},
{mfargs,
{capi_ddoc_manager_sup,start_link_remote,
['couchdb_ns_1@127.0.0.1',"default"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:49.171-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.486.0>},
{name,{docs_sup,"default"}},
{mfargs,{docs_sup,start_link,["default"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:49.171-07:00,ns_1@127.0.0.1:<0.500.0>:ns_memcached:run_connect_phase:190]Started 'connecting' phase of ns_memcached-default. Parent is <0.499.0>
[error_logger:info,2016-10-19T09:55:49.172-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.497.0>,docs_kv_sup}
started: [{pid,<11625.312.0>},
{name,capi_set_view_manager},
{mfargs,
{capi_set_view_manager,start_link_remote,
['couchdb_ns_1@127.0.0.1',"default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.172-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.497.0>,docs_kv_sup}
started: [{pid,<11625.315.0>},
{name,couch_stats_reader},
{mfargs,
{couch_stats_reader,start_link_remote,
['couchdb_ns_1@127.0.0.1',"default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.172-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.497.0>},
{name,{docs_kv_sup,"default"}},
{mfargs,{docs_kv_sup,start_link,["default"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:49.172-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.498.0>,ns_memcached_sup}
started: [{pid,<0.499.0>},
{name,{ns_memcached,"default"}},
{mfargs,{ns_memcached,start_link,["default"]}},
{restart_type,permanent},
{shutdown,86400000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:49.172-07:00,ns_1@127.0.0.1:janitor_agent-default<0.510.0>:janitor_agent:read_flush_counter:1047]Loading flushseq failed: {error,enoent}. Assuming it's equal to global config.
[error_logger:info,2016-10-19T09:55:49.172-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {<0.498.0>,ns_memcached_sup}
started: [{pid,<0.501.0>},
{name,{terse_bucket_info_uploader,"default"}},
{mfargs,
{terse_bucket_info_uploader,start_link,
["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:49.173-07:00,ns_1@127.0.0.1:janitor_agent-default<0.510.0>:janitor_agent:read_flush_counter_from_config:1054]Initialized flushseq 0 from bucket config
[error_logger:info,2016-10-19T09:55:49.173-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.498.0>},
{name,{ns_memcached_sup,"default"}},
{mfargs,{ns_memcached_sup,start_link,["default"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:49.173-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.503.0>},
{name,{ns_vbm_sup,"default"}},
{mfargs,{ns_vbm_sup,start_link,["default"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:49.173-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.504.0>},
{name,{dcp_sup,"default"}},
{mfargs,{dcp_sup,start_link,["default"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:49.173-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.505.0>},
{name,{dcp_replication_manager,"default"}},
{mfargs,
{dcp_replication_manager,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.173-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.506.0>},
{name,{replication_manager,"default"}},
{mfargs,{replication_manager,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.173-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.507.0>},
{name,{dcp_notifier,"default"}},
{mfargs,{dcp_notifier,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.174-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'janitor_agent_sup-default'}
started: [{pid,<0.509.0>},
{name,rebalance_subprocesses_registry},
{mfargs,
{ns_process_registry,start_link,
['rebalance_subprocesses_registry-default',
[{terminate_command,kill}]]}},
{restart_type,permanent},
{shutdown,86400000},
{child_type,worker}]
[ns_server:info,2016-10-19T09:55:49.174-07:00,ns_1@127.0.0.1:ns_memcached-default<0.499.0>:ns_memcached:handle_cast:689]Main ns_memcached connection established: {ok,#Port<0.6755>}
[error_logger:info,2016-10-19T09:55:49.174-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'janitor_agent_sup-default'}
started: [{pid,<0.510.0>},
{name,janitor_agent},
{mfargs,{janitor_agent,start_link,["default"]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.174-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.508.0>},
{name,{janitor_agent_sup,"default"}},
{mfargs,{janitor_agent_sup,start_link,["default"]}},
{restart_type,permanent},
{shutdown,10000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.174-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.511.0>},
{name,{stats_collector,"default"}},
{mfargs,{stats_collector,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.175-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.514.0>},
{name,{stats_archiver,"default"}},
{mfargs,{stats_archiver,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[user:info,2016-10-19T09:55:49.175-07:00,ns_1@127.0.0.1:ns_memcached-default<0.499.0>:ns_memcached:handle_cast:718]Bucket "default" loaded on node 'ns_1@127.0.0.1' in 0 seconds.
[error_logger:info,2016-10-19T09:55:49.175-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.516.0>},
{name,{stats_reader,"default"}},
{mfargs,{stats_reader,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.175-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.517.0>},
{name,{goxdcr_stats_collector,"default"}},
{mfargs,
{goxdcr_stats_collector,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.175-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.519.0>},
{name,{goxdcr_stats_archiver,"default"}},
{mfargs,{stats_archiver,start_link,["@xdcr-default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.175-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.521.0>},
{name,{goxdcr_stats_reader,"default"}},
{mfargs,{stats_reader,start_link,["@xdcr-default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.176-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,'single_bucket_kv_sup-default'}
started: [{pid,<0.522.0>},
{name,{failover_safeness_level,"default"}},
{mfargs,
{failover_safeness_level,start_link,["default"]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.176-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_bucket_sup}
started: [{pid,<0.496.0>},
{name,{single_bucket_kv_sup,"default"}},
{mfargs,{single_bucket_kv_sup,start_link,["default"]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:49.244-07:00,ns_1@127.0.0.1:ns_heart_slow_status_updater<0.237.0>:goxdcr_rest:get_from_goxdcr:163]Goxdcr is temporary not available. Return empty list.
[ns_server:debug,2016-10-19T09:55:49.265-07:00,ns_1@127.0.0.1:ns_orchestrator_sup<0.536.0>:misc:start_singleton:1094]start_singleton(gen_fsm, ns_orchestrator, [], []): started as <0.537.0> on 'ns_1@127.0.0.1'
[error_logger:info,2016-10-19T09:55:49.265-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_orchestrator_sup}
started: [{pid,<0.537.0>},
{name,ns_orchestrator},
{mfargs,{ns_orchestrator,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:49.289-07:00,ns_1@127.0.0.1:<0.540.0>:auto_failover:init:147]init auto_failover.
[ns_server:debug,2016-10-19T09:55:49.289-07:00,ns_1@127.0.0.1:ns_orchestrator_sup<0.536.0>:misc:start_singleton:1094]start_singleton(gen_server, auto_failover, [], []): started as <0.540.0> on 'ns_1@127.0.0.1'
[ns_server:debug,2016-10-19T09:55:49.289-07:00,ns_1@127.0.0.1:<0.430.0>:restartable:start_child:98]Started child process <0.432.0>
MFA: {mb_master,start_link,[]}
[error_logger:info,2016-10-19T09:55:49.289-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_orchestrator_sup}
started: [{pid,<0.540.0>},
{name,auto_failover},
{mfargs,{auto_failover,start_link,[]}},
{restart_type,permanent},
{shutdown,1000},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.289-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,mb_master_sup}
started: [{pid,<0.536.0>},
{name,ns_orchestrator_sup},
{mfargs,{ns_orchestrator_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:49.289-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.430.0>},
{name,mb_master},
{mfargs,
{restartable,start_link,
[{mb_master,start_link,[]},infinity]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:49.290-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.542.0>},
{name,master_activity_events_ingress},
{mfargs,
{gen_event,start_link,
[{local,master_activity_events_ingress}]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.290-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.543.0>},
{name,master_activity_events_timestamper},
{mfargs,
{master_activity_events,start_link_timestamper,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[error_logger:info,2016-10-19T09:55:49.296-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.545.0>},
{name,master_activity_events_pids_watcher},
{mfargs,
{master_activity_events_pids_watcher,start_link,
[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:49.301-07:00,ns_1@127.0.0.1:janitor_agent-test<0.437.0>:dcp_sup:nuke:79]Nuking DCP replicators for bucket "test":
[]
[ns_server:debug,2016-10-19T09:55:49.305-07:00,ns_1@127.0.0.1:ns_server_nodes_sup<0.156.0>:one_shot_barrier:notify:27]Notifying on barrier menelaus_barrier
[ns_server:debug,2016-10-19T09:55:49.305-07:00,ns_1@127.0.0.1:menelaus_barrier<0.158.0>:one_shot_barrier:barrier_body:62]Barrier menelaus_barrier got notification from <0.156.0>
[error_logger:info,2016-10-19T09:55:49.305-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_sup}
started: [{pid,<0.546.0>},
{name,master_activity_events_keeper},
{mfargs,{master_activity_events_keeper,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]
[ns_server:debug,2016-10-19T09:55:49.305-07:00,ns_1@127.0.0.1:ns_server_nodes_sup<0.156.0>:one_shot_barrier:notify:32]Successfuly notified on barrier menelaus_barrier
[ns_server:debug,2016-10-19T09:55:49.305-07:00,ns_1@127.0.0.1:<0.155.0>:restartable:start_child:98]Started child process <0.156.0>
MFA: {ns_server_nodes_sup,start_link,[]}
[error_logger:info,2016-10-19T09:55:49.305-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_nodes_sup}
started: [{pid,<0.197.0>},
{name,ns_server_sup},
{mfargs,{ns_server_sup,start_link,[]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[ns_server:debug,2016-10-19T09:55:49.306-07:00,ns_1@127.0.0.1:<0.2.0>:child_erlang:child_loop:115]67769: Entered child_loop
[error_logger:info,2016-10-19T09:55:49.306-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
supervisor: {local,ns_server_cluster_sup}
started: [{pid,<0.155.0>},
{name,ns_server_nodes_sup},
{mfargs,
{restartable,start_link,
[{ns_server_nodes_sup,start_link,[]},
infinity]}},
{restart_type,permanent},
{shutdown,infinity},
{child_type,supervisor}]
[error_logger:info,2016-10-19T09:55:49.306-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]
=========================PROGRESS REPORT=========================
application: ns_server
started_at: 'ns_1@127.0.0.1'
[ns_server:debug,2016-10-19T09:55:49.306-07:00,ns_1@127.0.0.1:replication_manager-test<0.428.0>:replication_manager:handle_call:182]Change replication type from tap to dcp
[ns_server:info,2016-10-19T09:55:49.307-07:00,ns_1@127.0.0.1:ns_memcached-test<0.391.0>:ns_memcached:handle_call:287]Enabling traffic to bucket "test"
[ns_server:info,2016-10-19T09:55:49.307-07:00,ns_1@127.0.0.1:ns_memcached-test<0.391.0>:ns_memcached:handle_call:291]Bucket "test" marked as warmed in 0 seconds
[ns_server:debug,2016-10-19T09:55:49.308-07:00,ns_1@127.0.0.1:janitor_agent-locked<0.473.0>:dcp_sup:nuke:79]Nuking DCP replicators for bucket "locked":
[]
[ns_server:debug,2016-10-19T09:55:49.312-07:00,ns_1@127.0.0.1:replication_manager-locked<0.469.0>:replication_manager:handle_call:182]Change replication type from tap to dcp
[ns_server:info,2016-10-19T09:55:49.314-07:00,ns_1@127.0.0.1:ns_memcached-locked<0.462.0>:ns_memcached:handle_call:287]Enabling traffic to bucket "locked"
[ns_server:info,2016-10-19T09:55:49.314-07:00,ns_1@127.0.0.1:ns_memcached-locked<0.462.0>:ns_memcached:handle_call:291]Bucket "locked" marked as warmed in 0 seconds
[ns_server:debug,2016-10-19T09:55:49.315-07:00,ns_1@127.0.0.1:janitor_agent-default<0.510.0>:dcp_sup:nuke:79]Nuking DCP replicators for bucket "default":
[]
[ns_server:debug,2016-10-19T09:55:49.316-07:00,ns_1@127.0.0.1:replication_manager-default<0.506.0>:replication_manager:handle_call:182]Change replication type from tap to dcp
[ns_server:info,2016-10-19T09:55:49.317-07:00,ns_1@127.0.0.1:ns_memcached-default<0.499.0>:ns_memcached:handle_call:287]Enabling traffic to bucket "default"
[ns_server:debug,2016-10-19T09:55:49.317-07:00,ns_1@127.0.0.1:json_rpc_connection-goxdcr-cbauth<0.579.0>:json_rpc_connection:init:74]Observed revrpc connection: label "goxdcr-cbauth", handling process <0.579.0>
[ns_server:debug,2016-10-19T09:55:49.317-07:00,ns_1@127.0.0.1:json_rpc_connection-cbq-engine-cbauth<0.581.0>:json_rpc_connection:init:74]Observed revrpc connection: label "cbq-engine-cbauth", handling process <0.581.0>
[ns_server:info,2016-10-19T09:55:49.317-07:00,ns_1@127.0.0.1:ns_memcached-default<0.499.0>:ns_memcached:handle_call:291]Bucket "default" marked as warmed in 0 seconds
[ns_server:debug,2016-10-19T09:55:49.317-07:00,ns_1@127.0.0.1:json_rpc_connection-index-cbauth<0.583.0>:json_rpc_connection:init:74]Observed revrpc connection: label "index-cbauth", handling process <0.583.0>
[ns_server:debug,2016-10-19T09:55:49.317-07:00,ns_1@127.0.0.1:menelaus_cbauth<0.285.0>:menelaus_cbauth:handle_cast:87]Observed json rpc process {"goxdcr-cbauth",<0.579.0>} started
[error_logger:error,2016-10-19T09:55:49.317-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]Supervisor received unexpected message: {ack,<0.579.0>,{ok,<0.579.0>}}
[ns_server:debug,2016-10-19T09:55:49.317-07:00,ns_1@127.0.0.1:json_rpc_connection-projector-cbauth<0.584.0>:json_rpc_connection:init:74]Observed revrpc connection: label "projector-cbauth", handling process <0.584.0>
[ns_server:debug,2016-10-19T09:55:49.317-07:00,ns_1@127.0.0.1:json_rpc_connection-saslauthd-saslauthd-port<0.587.0>:json_rpc_connection:init:74]Observed revrpc connection: label "saslauthd-saslauthd-port", handling process <0.587.0>
[error_logger:error,2016-10-19T09:55:49.317-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]Supervisor received unexpected message: {ack,<0.581.0>,{ok,<0.581.0>}}
[error_logger:error,2016-10-19T09:55:49.318-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]Supervisor received unexpected message: {ack,<0.583.0>,{ok,<0.583.0>}}
[error_logger:error,2016-10-19T09:55:49.318-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]Supervisor received unexpected message: {ack,<0.584.0>,{ok,<0.584.0>}}
[error_logger:error,2016-10-19T09:55:49.318-07:00,ns_1@127.0.0.1:error_logger<0.6.0>:ale_error_logger_handler:do_log:203]Supervisor received unexpected message: {ack,<0.587.0>,{ok,<0.587.0>}}
[ns_server:debug,2016-10-19T09:55:49.319-07:00,ns_1@127.0.0.1:json_rpc_connection-goxdcr-cbauth<0.579.0>:json_rpc_connection:handle_call:157]sending jsonrpc call:{[{jsonrpc,<<"2.0">>},
{id,0},
{method,<<"AuthCacheSvc.UpdateDB">>},
{params,
[{[{specialUser,<<"@goxdcr-cbauth">>},
{nodes,
[{[{host,<<"127.0.0.1">>},
{user,<<"_admin">>},
{password,"*****"},
{ports,
[8091,18091,9100,9101,9102,9103,9104,9105,
18092,8092,11207,9999,11210,11211,8093,
18093]},
{local,true}]}]},
{buckets,
[{[{name,<<"test">>},{password,"*****"}]},
{[{name,<<"locked">>},{password,"*****"}]},
{[{name,<<"default">>},{password,"*****"}]}]},
{authCheckURL,<<"http://127.0.0.1:8091/_cbauth">>},
{permissionCheckURL,
<<"http://127.0.0.1:8091/_cbauth/checkPermission">>},
{ldapEnabled,false},
{permissionsVersion,75615},
{admin,
{[{user,<<"Administrator">>},
{salt,<<"QB3B+r6o6uk8Xpv/FO8fTQ==">>},
{mac,
<<"6S6aASAfFc8/OJxtT/k7eSxo1HU=">>}]}}]}]}]}
[ns_server:debug,2016-10-19T09:55:49.320-07:00,ns_1@127.0.0.1:json_rpc_connection-goxdcr-cbauth<0.579.0>:json_rpc_connection:handle_info:93]got response: [{<<"id">>,0},{<<"result">>,true},{<<"error">>,null}]
[ns_server:debug,2016-10-19T09:55:49.320-07:00,ns_1@127.0.0.1:menelaus_cbauth<0.285.0>:menelaus_cbauth:handle_cast:87]Observed json rpc process {"cbq-engine-cbauth",<0.581.0>} started
[ns_server:debug,2016-10-19T09:55:49.320-07:00,ns_1@127.0.0.1:json_rpc_connection-cbq-engine-cbauth<0.581.0>:json_rpc_connection:handle_call:157]sending jsonrpc call:{[{jsonrpc,<<"2.0">>},
{id,0},
{method,<<"AuthCacheSvc.UpdateDB">>},
{params,
[{[{specialUser,<<"@cbq-engine-cbauth">>},
{nodes,
[{[{host,<<"127.0.0.1">>},
{user,<<"_admin">>},
{password,"*****"},
{ports,
[8091,18091,9100,9101,9102,9103,9104,9105,
18092,8092,11207,9999,11210,11211,8093,
18093]},
{local,true}]}]},
{buckets,
[{[{name,<<"test">>},{password,"*****"}]},
{[{name,<<"locked">>},{password,"*****"}]},
{[{name,<<"default">>},{password,"*****"}]}]},
{authCheckURL,<<"http://127.0.0.1:8091/_cbauth">>},
{permissionCheckURL,
<<"http://127.0.0.1:8091/_cbauth/checkPermission">>},
{ldapEnabled,false},
{permissionsVersion,75615},
{admin,
{[{user,<<"Administrator">>},
{salt,<<"QB3B+r6o6uk8Xpv/FO8fTQ==">>},
{mac,
<<"6S6aASAfFc8/OJxtT/k7eSxo1HU=">>}]}}]}]}]}
[ns_server:debug,2016-10-19T09:55:49.321-07:00,ns_1@127.0.0.1:json_rpc_connection-cbq-engine-cbauth<0.581.0>:json_rpc_connection:handle_info:93]got response: [{<<"id">>,0},{<<"result">>,true},{<<"error">>,null}]
[ns_server:debug,2016-10-19T09:55:49.321-07:00,ns_1@127.0.0.1:menelaus_cbauth<0.285.0>:menelaus_cbauth:handle_cast:87]Observed json rpc process {"index-cbauth",<0.583.0>} started
[ns_server:debug,2016-10-19T09:55:49.321-07:00,ns_1@127.0.0.1:json_rpc_connection-index-cbauth<0.583.0>:json_rpc_connection:handle_call:157]sending jsonrpc call:{[{jsonrpc,<<"2.0">>},
{id,0},
{method,<<"AuthCacheSvc.UpdateDB">>},
{params,
[{[{specialUser,<<"@index-cbauth">>},
{nodes,
[{[{host,<<"127.0.0.1">>},
{user,<<"_admin">>},
{password,"*****"},
{ports,
[8091,18091,9100,9101,9102,9103,9104,9105,
18092,8092,11207,9999,11210,11211,8093,
18093]},
{local,true}]}]},
{buckets,
[{[{name,<<"test">>},{password,"*****"}]},
{[{name,<<"locked">>},{password,"*****"}]},
{[{name,<<"default">>},{password,"*****"}]}]},
{authCheckURL,<<"http://127.0.0.1:8091/_cbauth">>},
{permissionCheckURL,
<<"http://127.0.0.1:8091/_cbauth/checkPermission">>},
{ldapEnabled,false},
{permissionsVersion,75615},
{admin,
{[{user,<<"Administrator">>},
{salt,<<"QB3B+r6o6uk8Xpv/FO8fTQ==">>},
{mac,
<<"6S6aASAfFc8/OJxtT/k7eSxo1HU=">>}]}}]}]}]}
[ns_server:debug,2016-10-19T09:55:49.322-07:00,ns_1@127.0.0.1:json_rpc_connection-index-cbauth<0.583.0>:json_rpc_connection:handle_info:93]got response: [{<<"id">>,0},{<<"result">>,true},{<<"error">>,null}]
[ns_server:debug,2016-10-19T09:55:49.322-07:00,ns_1@127.0.0.1:menelaus_cbauth<0.285.0>:menelaus_cbauth:handle_cast:87]Observed json rpc process {"projector-cbauth",<0.584.0>} started
[ns_server:debug,2016-10-19T09:55:49.322-07:00,ns_1@127.0.0.1:json_rpc_connection-projector-cbauth<0.584.0>:json_rpc_connection:handle_call:157]sending jsonrpc call:{[{jsonrpc,<<"2.0">>},
{id,0},
{method,<<"AuthCacheSvc.UpdateDB">>},
{params,
[{[{specialUser,<<"@projector-cbauth">>},
{nodes,
[{[{host,<<"127.0.0.1">>},
{user,<<"_admin">>},
{password,"*****"},
{ports,
[8091,18091,9100,9101,9102,9103,9104,9105,
18092,8092,11207,9999,11210,11211,8093,
18093]},
{local,true}]}]},
{buckets,
[{[{name,<<"test">>},{password,"*****"}]},
{[{name,<<"locked">>},{password,"*****"}]},
{[{name,<<"default">>},{password,"*****"}]}]},
{authCheckURL,<<"http://127.0.0.1:8091/_cbauth">>},
{permissionCheckURL,
<<"http://127.0.0.1:8091/_cbauth/checkPermission">>},
{ldapEnabled,false},
{permissionsVersion,75615},
{admin,
{[{user,<<"Administrator">>},
{salt,<<"QB3B+r6o6uk8Xpv/FO8fTQ==">>},
{mac,
<<"6S6aASAfFc8/OJxtT/k7eSxo1HU=">>}]}}]}]}]}
[ns_server:debug,2016-10-19T09:55:49.322-07:00,ns_1@127.0.0.1:json_rpc_connection-projector-cbauth<0.584.0>:json_rpc_connection:handle_info:93]got response: [{<<"id">>,0},{<<"result">>,true},{<<"error">>,null}]
[ns_server:info,2016-10-19T09:55:53.785-07:00,ns_1@127.0.0.1:ns_doctor<0.242.0>:ns_doctor:update_status:314]The following buckets became ready on node 'ns_1@127.0.0.1': ["default",
"locked","test"]